From d3a7d99ef0de3cb4eb42cfc4ed4a4bd45a47acfc Mon Sep 17 00:00:00 2001 From: Ashwin Sekar Date: Fri, 15 Mar 2024 22:21:52 -0700 Subject: [PATCH 01/28] vote: deprecate unused legacy vote tx plumbing (#274) --- programs/vote/benches/process_vote.rs | 2 +- programs/vote/src/vote_state/mod.rs | 81 ++++++++++++++++++++------- sdk/program/src/vote/state/mod.rs | 23 ++++++-- sdk/src/feature_set.rs | 5 ++ 4 files changed, 87 insertions(+), 24 deletions(-) diff --git a/programs/vote/benches/process_vote.rs b/programs/vote/benches/process_vote.rs index 9008971f086237..a092056c9353ff 100644 --- a/programs/vote/benches/process_vote.rs +++ b/programs/vote/benches/process_vote.rs @@ -48,7 +48,7 @@ fn create_accounts() -> (Slot, SlotHashes, Vec, Vec = vec![0; VoteState::size_of()]; let versioned = VoteStateVersions::new_current(vote_state); diff --git a/programs/vote/src/vote_state/mod.rs b/programs/vote/src/vote_state/mod.rs index c3917085f4f691..ba84fa9bc4790e 100644 --- a/programs/vote/src/vote_state/mod.rs +++ b/programs/vote/src/vote_state/mod.rs @@ -612,6 +612,9 @@ pub fn process_new_vote_state( let timely_vote_credits = feature_set.map_or(false, |f| { f.is_active(&feature_set::timely_vote_credits::id()) }); + let deprecate_unused_legacy_vote_plumbing = feature_set.map_or(false, |f| { + f.is_active(&feature_set::deprecate_unused_legacy_vote_plumbing::id()) + }); let mut earned_credits = if timely_vote_credits { 0_u64 } else { 1_u64 }; if let Some(new_root) = new_root { @@ -621,7 +624,11 @@ pub fn process_new_vote_state( if current_vote.slot() <= new_root { if timely_vote_credits || (current_vote.slot() != new_root) { earned_credits = earned_credits - .checked_add(vote_state.credits_for_vote_at_index(current_vote_state_index)) + .checked_add(vote_state.credits_for_vote_at_index( + current_vote_state_index, + timely_vote_credits, + deprecate_unused_legacy_vote_plumbing, + )) .expect("`earned_credits` does not overflow"); } current_vote_state_index = current_vote_state_index @@ -734,11 +741,19 @@ pub fn process_vote_unfiltered( slot_hashes: &[SlotHash], epoch: Epoch, current_slot: Slot, + timely_vote_credits: bool, + deprecate_unused_legacy_vote_plumbing: bool, ) -> Result<(), VoteError> { check_slots_are_valid(vote_state, vote_slots, &vote.hash, slot_hashes)?; - vote_slots - .iter() - .for_each(|s| vote_state.process_next_vote_slot(*s, epoch, current_slot)); + vote_slots.iter().for_each(|s| { + vote_state.process_next_vote_slot( + *s, + epoch, + current_slot, + timely_vote_credits, + deprecate_unused_legacy_vote_plumbing, + ) + }); Ok(()) } @@ -748,6 +763,8 @@ pub fn process_vote( slot_hashes: &[SlotHash], epoch: Epoch, current_slot: Slot, + timely_vote_credits: bool, + deprecate_unused_legacy_vote_plumbing: bool, ) -> Result<(), VoteError> { if vote.slots.is_empty() { return Err(VoteError::EmptySlots); @@ -769,6 +786,8 @@ pub fn process_vote( slot_hashes, epoch, current_slot, + timely_vote_credits, + deprecate_unused_legacy_vote_plumbing, ) } @@ -785,6 +804,8 @@ pub fn process_vote_unchecked(vote_state: &mut VoteState, vote: Vote) -> Result< &slot_hashes, vote_state.current_epoch(), 0, + true, + true, ) } @@ -1067,7 +1088,18 @@ pub fn process_vote_with_account( ) -> Result<(), InstructionError> { let mut vote_state = verify_and_get_vote_state(vote_account, clock, signers)?; - process_vote(&mut vote_state, vote, slot_hashes, clock.epoch, clock.slot)?; + let timely_vote_credits = feature_set.is_active(&feature_set::timely_vote_credits::id()); + let deprecate_unused_legacy_vote_plumbing = + feature_set.is_active(&feature_set::deprecate_unused_legacy_vote_plumbing::id()); + process_vote( + &mut vote_state, + vote, + slot_hashes, + clock.epoch, + clock.slot, + timely_vote_credits, + deprecate_unused_legacy_vote_plumbing, + )?; if let Some(timestamp) = vote.timestamp { vote.slots .iter() @@ -1250,7 +1282,7 @@ mod tests { 134, 135, ] .into_iter() - .for_each(|v| vote_state.process_next_vote_slot(v, 4, 0)); + .for_each(|v| vote_state.process_next_vote_slot(v, 4, 0, false, true)); let version1_14_11_serialized = bincode::serialize(&VoteStateVersions::V1_14_11(Box::new( VoteState1_14_11::from(vote_state.clone()), @@ -1732,11 +1764,11 @@ mod tests { let slot_hashes: Vec<_> = vote.slots.iter().rev().map(|x| (*x, vote.hash)).collect(); assert_eq!( - process_vote(&mut vote_state_a, &vote, &slot_hashes, 0, 0), + process_vote(&mut vote_state_a, &vote, &slot_hashes, 0, 0, true, true), Ok(()) ); assert_eq!( - process_vote(&mut vote_state_b, &vote, &slot_hashes, 0, 0), + process_vote(&mut vote_state_b, &vote, &slot_hashes, 0, 0, true, true), Ok(()) ); assert_eq!(recent_votes(&vote_state_a), recent_votes(&vote_state_b)); @@ -1749,12 +1781,12 @@ mod tests { let vote = Vote::new(vec![0], Hash::default()); let slot_hashes: Vec<_> = vec![(0, vote.hash)]; assert_eq!( - process_vote(&mut vote_state, &vote, &slot_hashes, 0, 0), + process_vote(&mut vote_state, &vote, &slot_hashes, 0, 0, true, true), Ok(()) ); let recent = recent_votes(&vote_state); assert_eq!( - process_vote(&mut vote_state, &vote, &slot_hashes, 0, 0), + process_vote(&mut vote_state, &vote, &slot_hashes, 0, 0, true, true), Err(VoteError::VoteTooOld) ); assert_eq!(recent, recent_votes(&vote_state)); @@ -1814,7 +1846,7 @@ mod tests { let vote = Vote::new(vec![0], Hash::default()); let slot_hashes: Vec<_> = vec![(*vote.slots.last().unwrap(), vote.hash)]; assert_eq!( - process_vote(&mut vote_state, &vote, &slot_hashes, 0, 0), + process_vote(&mut vote_state, &vote, &slot_hashes, 0, 0, true, true), Ok(()) ); assert_eq!( @@ -1830,7 +1862,7 @@ mod tests { let vote = Vote::new(vec![0], Hash::default()); let slot_hashes: Vec<_> = vec![(*vote.slots.last().unwrap(), vote.hash)]; assert_eq!( - process_vote(&mut vote_state, &vote, &slot_hashes, 0, 0), + process_vote(&mut vote_state, &vote, &slot_hashes, 0, 0, true, true), Ok(()) ); @@ -1849,7 +1881,7 @@ mod tests { let vote = Vote::new(vec![0], Hash::default()); let slot_hashes: Vec<_> = vec![(*vote.slots.last().unwrap(), vote.hash)]; assert_eq!( - process_vote(&mut vote_state, &vote, &slot_hashes, 0, 0), + process_vote(&mut vote_state, &vote, &slot_hashes, 0, 0, true, true), Ok(()) ); @@ -1866,7 +1898,7 @@ mod tests { let vote = Vote::new(vec![], Hash::default()); assert_eq!( - process_vote(&mut vote_state, &vote, &[], 0, 0), + process_vote(&mut vote_state, &vote, &[], 0, 0, true, true), Err(VoteError::EmptySlots) ); } @@ -2163,7 +2195,9 @@ mod tests { &vote, &slot_hashes, 0, - vote_group.1 // vote_group.1 is the slot in which the vote was cast + vote_group.1, // vote_group.1 is the slot in which the vote was cast + true, + true ), Ok(()) ); @@ -3055,7 +3089,7 @@ mod tests { // error with `VotesTooOldAllFiltered` let slot_hashes = vec![(3, Hash::new_unique()), (2, Hash::new_unique())]; assert_eq!( - process_vote(&mut vote_state, &vote, &slot_hashes, 0, 0), + process_vote(&mut vote_state, &vote, &slot_hashes, 0, 0, true, true), Err(VoteError::VotesTooOldAllFiltered) ); @@ -3069,7 +3103,7 @@ mod tests { .1; let vote = Vote::new(vec![old_vote_slot, vote_slot], vote_slot_hash); - process_vote(&mut vote_state, &vote, &slot_hashes, 0, 0).unwrap(); + process_vote(&mut vote_state, &vote, &slot_hashes, 0, 0, true, true).unwrap(); assert_eq!( vote_state .votes @@ -3098,8 +3132,17 @@ mod tests { .unwrap() .1; let vote = Vote::new(vote_slots, vote_hash); - process_vote_unfiltered(&mut vote_state, &vote.slots, &vote, slot_hashes, 0, 0) - .unwrap(); + process_vote_unfiltered( + &mut vote_state, + &vote.slots, + &vote, + slot_hashes, + 0, + 0, + true, + true, + ) + .unwrap(); } vote_state diff --git a/sdk/program/src/vote/state/mod.rs b/sdk/program/src/vote/state/mod.rs index 8853d5de6da143..1bb8c7dc88d91c 100644 --- a/sdk/program/src/vote/state/mod.rs +++ b/sdk/program/src/vote/state/mod.rs @@ -514,6 +514,8 @@ impl VoteState { next_vote_slot: Slot, epoch: Epoch, current_slot: Slot, + timely_vote_credits: bool, + deprecate_unused_legacy_vote_plumbing: bool, ) { // Ignore votes for slots earlier than we already have votes for if self @@ -526,13 +528,21 @@ impl VoteState { self.pop_expired_votes(next_vote_slot); let landed_vote = LandedVote { - latency: Self::compute_vote_latency(next_vote_slot, current_slot), + latency: if timely_vote_credits || !deprecate_unused_legacy_vote_plumbing { + Self::compute_vote_latency(next_vote_slot, current_slot) + } else { + 0 + }, lockout: Lockout::new(next_vote_slot), }; // Once the stack is full, pop the oldest lockout and distribute rewards if self.votes.len() == MAX_LOCKOUT_HISTORY { - let credits = self.credits_for_vote_at_index(0); + let credits = self.credits_for_vote_at_index( + 0, + timely_vote_credits, + deprecate_unused_legacy_vote_plumbing, + ); let landed_vote = self.votes.pop_front().unwrap(); self.root_slot = Some(landed_vote.slot()); @@ -577,7 +587,12 @@ impl VoteState { } /// Returns the credits to award for a vote at the given lockout slot index - pub fn credits_for_vote_at_index(&self, index: usize) -> u64 { + pub fn credits_for_vote_at_index( + &self, + index: usize, + timely_vote_credits: bool, + deprecate_unused_legacy_vote_plumbing: bool, + ) -> u64 { let latency = self .votes .get(index) @@ -585,7 +600,7 @@ impl VoteState { // If latency is 0, this means that the Lockout was created and stored from a software version that did not // store vote latencies; in this case, 1 credit is awarded - if latency == 0 { + if latency == 0 || (deprecate_unused_legacy_vote_plumbing && !timely_vote_credits) { 1 } else { match latency.checked_sub(VOTE_CREDITS_GRACE_SLOTS) { diff --git a/sdk/src/feature_set.rs b/sdk/src/feature_set.rs index 8536282cee8efe..bb7c50f460fd81 100644 --- a/sdk/src/feature_set.rs +++ b/sdk/src/feature_set.rs @@ -780,6 +780,10 @@ pub mod remove_rounding_in_fee_calculation { solana_sdk::declare_id!("BtVN7YjDzNE6Dk7kTT7YTDgMNUZTNgiSJgsdzAeTg2jF"); } +pub mod deprecate_unused_legacy_vote_plumbing { + solana_sdk::declare_id!("6Uf8S75PVh91MYgPQSHnjRAPQq6an5BDv9vomrCwDqLe"); +} + lazy_static! { /// Map of feature identifiers to user-visible description pub static ref FEATURE_NAMES: HashMap = [ @@ -970,6 +974,7 @@ lazy_static! { (enable_gossip_duplicate_proof_ingestion::id(), "enable gossip duplicate proof ingestion #32963"), (enable_chained_merkle_shreds::id(), "Enable chained Merkle shreds #34916"), (remove_rounding_in_fee_calculation::id(), "Removing unwanted rounding in fee calculation #34982"), + (deprecate_unused_legacy_vote_plumbing::id(), "Deprecate unused legacy vote tx plumbing"), /*************** ADD NEW FEATURES HERE ***************/ ] .iter() From 403225f112b16f3c1a3388da28af8f5655652740 Mon Sep 17 00:00:00 2001 From: Pankaj Garg Date: Sun, 17 Mar 2024 15:29:20 -0700 Subject: [PATCH 02/28] Remove public visibility of program cache from bank (#279) --- core/benches/banking_stage.rs | 5 +---- core/src/replay_stage.rs | 6 +---- ledger-tool/src/program.rs | 11 +++------ ledger/src/blockstore_processor.rs | 6 +---- runtime/src/bank.rs | 36 ++++++++++++++++++++++++++++-- runtime/src/bank_forks.rs | 13 ++--------- unified-scheduler-pool/src/lib.rs | 5 +---- 7 files changed, 43 insertions(+), 39 deletions(-) diff --git a/core/benches/banking_stage.rs b/core/benches/banking_stage.rs index 242d3b0ed6b530..9defba6a02d155 100644 --- a/core/benches/banking_stage.rs +++ b/core/benches/banking_stage.rs @@ -398,10 +398,7 @@ fn simulate_process_entries( let bank_fork = BankForks::new_rw_arc(bank); let bank = bank_fork.read().unwrap().get_with_scheduler(slot).unwrap(); bank.clone_without_scheduler() - .loaded_programs_cache - .write() - .unwrap() - .set_fork_graph(bank_fork.clone()); + .set_fork_graph_in_program_cache(bank_fork.clone()); for i in 0..(num_accounts / 2) { bank.transfer(initial_lamports, mint_keypair, &keypairs[i * 2].pubkey()) diff --git a/core/src/replay_stage.rs b/core/src/replay_stage.rs index 015ec5360448f9..90be2dade6a191 100644 --- a/core/src/replay_stage.rs +++ b/core/src/replay_stage.rs @@ -1686,11 +1686,7 @@ impl ReplayStage { root_bank.clear_slot_signatures(slot); // Remove cached entries of the programs that were deployed in this slot. - root_bank - .loaded_programs_cache - .write() - .unwrap() - .prune_by_deployment_slot(slot); + root_bank.prune_program_cache_by_deployment_slot(slot); if let Some(bank_hash) = blockstore.get_bank_hash(slot) { // If a descendant was successfully replayed and chained from a duplicate it must diff --git a/ledger-tool/src/program.rs b/ledger-tool/src/program.rs index af50d59bca0255..24df2168a338bf 100644 --- a/ledger-tool/src/program.rs +++ b/ledger-tool/src/program.rs @@ -514,14 +514,9 @@ pub fn program(ledger_path: &Path, matches: &ArgMatches<'_>) { with_mock_invoke_context!(invoke_context, transaction_context, transaction_accounts); // Adding `DELAY_VISIBILITY_SLOT_OFFSET` to slots to accommodate for delay visibility of the program - let mut loaded_programs = LoadedProgramsForTxBatch::new( - bank.slot() + DELAY_VISIBILITY_SLOT_OFFSET, - bank.loaded_programs_cache - .read() - .unwrap() - .environments - .clone(), - ); + let slot = bank.slot() + DELAY_VISIBILITY_SLOT_OFFSET; + let mut loaded_programs = + LoadedProgramsForTxBatch::new(slot, bank.get_runtime_environments_for_slot(slot)); for key in cached_account_keys { loaded_programs.replenish(key, bank.load_program(&key, false, bank.epoch())); debug!("Loaded program {}", key); diff --git a/ledger/src/blockstore_processor.rs b/ledger/src/blockstore_processor.rs index a76387f7cb2054..9eace1e7c9cd34 100644 --- a/ledger/src/blockstore_processor.rs +++ b/ledger/src/blockstore_processor.rs @@ -1674,11 +1674,7 @@ fn load_frozen_forks( root = new_root_bank.slot(); leader_schedule_cache.set_root(new_root_bank); - new_root_bank - .loaded_programs_cache - .write() - .unwrap() - .prune(root, new_root_bank.epoch()); + new_root_bank.prune_program_cache(root, new_root_bank.epoch()); let _ = bank_forks.write().unwrap().set_root( root, accounts_background_request_sender, diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index d1a1805d0d3a20..6d5c2345f92aca 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -98,7 +98,9 @@ use { solana_program_runtime::{ compute_budget_processor::process_compute_budget_instructions, invoke_context::BuiltinFunctionWithContext, - loaded_programs::{LoadedProgram, LoadedProgramType, LoadedPrograms}, + loaded_programs::{ + LoadedProgram, LoadedProgramType, LoadedPrograms, ProgramRuntimeEnvironments, + }, runtime_config::RuntimeConfig, timings::{ExecuteTimingType, ExecuteTimings}, }, @@ -803,7 +805,7 @@ pub struct Bank { pub incremental_snapshot_persistence: Option, - pub loaded_programs_cache: Arc>>, + loaded_programs_cache: Arc>>, epoch_reward_status: EpochRewardStatus, @@ -1467,6 +1469,36 @@ impl Bank { new } + pub fn set_fork_graph_in_program_cache(&self, fork_graph: Arc>) { + self.loaded_programs_cache + .write() + .unwrap() + .set_fork_graph(fork_graph); + } + + pub fn prune_program_cache(&self, new_root_slot: Slot, new_root_epoch: Epoch) { + self.loaded_programs_cache + .write() + .unwrap() + .prune(new_root_slot, new_root_epoch); + } + + pub fn prune_program_cache_by_deployment_slot(&self, deployment_slot: Slot) { + self.loaded_programs_cache + .write() + .unwrap() + .prune_by_deployment_slot(deployment_slot); + } + + pub fn get_runtime_environments_for_slot(&self, slot: Slot) -> ProgramRuntimeEnvironments { + let epoch = self.epoch_schedule.get_epoch(slot); + self.loaded_programs_cache + .read() + .unwrap() + .get_environments_for_epoch(epoch) + .clone() + } + /// Epoch in which the new cooldown warmup rate for stake was activated pub fn new_warmup_cooldown_rate_epoch(&self) -> Option { self.feature_set diff --git a/runtime/src/bank_forks.rs b/runtime/src/bank_forks.rs index 668062c8d31cce..770cd9059a8e57 100644 --- a/runtime/src/bank_forks.rs +++ b/runtime/src/bank_forks.rs @@ -126,12 +126,7 @@ impl BankForks { scheduler_pool: None, })); - root_bank - .loaded_programs_cache - .write() - .unwrap() - .set_fork_graph(bank_forks.clone()); - + root_bank.set_fork_graph_in_program_cache(bank_forks.clone()); bank_forks } @@ -451,11 +446,7 @@ impl BankForks { pub fn prune_program_cache(&self, root: Slot) { if let Some(root_bank) = self.banks.get(&root) { - root_bank - .loaded_programs_cache - .write() - .unwrap() - .prune(root, root_bank.epoch()); + root_bank.prune_program_cache(root, root_bank.epoch()); } } diff --git a/unified-scheduler-pool/src/lib.rs b/unified-scheduler-pool/src/lib.rs index 09ded82ee88e7d..81a3506ea28480 100644 --- a/unified-scheduler-pool/src/lib.rs +++ b/unified-scheduler-pool/src/lib.rs @@ -941,10 +941,7 @@ mod tests { let slot = bank.slot(); let bank_fork = BankForks::new_rw_arc(bank); let bank = bank_fork.read().unwrap().get(slot).unwrap(); - bank.loaded_programs_cache - .write() - .unwrap() - .set_fork_graph(bank_fork); + bank.set_fork_graph_in_program_cache(bank_fork); bank } From 928ede1d492a4ed826374cf268c6cf1a9447cf22 Mon Sep 17 00:00:00 2001 From: "Jeff Washington (jwash)" Date: Mon, 18 Mar 2024 09:53:44 -0500 Subject: [PATCH 03/28] add stats for write cache flushing (#233) * add stats for write cache flushing * some renames --- accounts-db/src/accounts_db.rs | 64 ++++++++++++++++++++++++++-------- 1 file changed, 49 insertions(+), 15 deletions(-) diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index e706958af8d0f2..34bcdedd2c5499 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -1719,6 +1719,8 @@ struct FlushStats { num_flushed: usize, num_purged: usize, total_size: u64, + store_accounts_timing: StoreAccountsTiming, + store_accounts_total_us: u64, } impl FlushStats { @@ -1726,6 +1728,9 @@ impl FlushStats { saturating_add_assign!(self.num_flushed, other.num_flushed); saturating_add_assign!(self.num_purged, other.num_purged); saturating_add_assign!(self.total_size, other.total_size); + self.store_accounts_timing + .accumulate(&other.store_accounts_timing); + saturating_add_assign!(self.store_accounts_total_us, other.store_accounts_total_us); } } @@ -6050,7 +6055,7 @@ impl AccountsDb { // Note even if force_flush is false, we will still flush all roots <= the // given `requested_flush_root`, even if some of the later roots cannot be used for // cleaning due to an ongoing scan - let (total_new_cleaned_roots, num_cleaned_roots_flushed) = self + let (total_new_cleaned_roots, num_cleaned_roots_flushed, mut flush_stats) = self .flush_rooted_accounts_cache( requested_flush_root, Some((&mut account_bytes_saved, &mut num_accounts_saved)), @@ -6062,7 +6067,7 @@ impl AccountsDb { // banks // If 'should_aggressively_flush_cache', then flush the excess ones to storage - let (total_new_excess_roots, num_excess_roots_flushed) = + let (total_new_excess_roots, num_excess_roots_flushed, flush_stats_aggressively) = if self.should_aggressively_flush_cache() { // Start by flushing the roots // @@ -6071,8 +6076,9 @@ impl AccountsDb { // for `should_clean`. self.flush_rooted_accounts_cache(None, None) } else { - (0, 0) + (0, 0, FlushStats::default()) }; + flush_stats.accumulate(&flush_stats_aggressively); let mut excess_slot_count = 0; let mut unflushable_unrooted_slot_count = 0; @@ -6123,6 +6129,26 @@ impl AccountsDb { ), ("account_bytes_saved", account_bytes_saved, i64), ("num_accounts_saved", num_accounts_saved, i64), + ( + "store_accounts_total_us", + flush_stats.store_accounts_total_us, + i64 + ), + ( + "update_index_us", + flush_stats.store_accounts_timing.update_index_elapsed, + i64 + ), + ( + "store_accounts_elapsed_us", + flush_stats.store_accounts_timing.store_accounts_elapsed, + i64 + ), + ( + "handle_reclaims_elapsed_us", + flush_stats.store_accounts_timing.handle_reclaims_elapsed, + i64 + ), ); } @@ -6130,7 +6156,7 @@ impl AccountsDb { &self, requested_flush_root: Option, should_clean: Option<(&mut usize, &mut usize)>, - ) -> (usize, usize) { + ) -> (usize, usize, FlushStats) { let max_clean_root = should_clean.as_ref().and_then(|_| { // If there is a long running scan going on, this could prevent any cleaning // based on updates from slots > `max_clean_root`. @@ -6161,12 +6187,13 @@ impl AccountsDb { // Iterate from highest to lowest so that we don't need to flush earlier // outdated updates in earlier roots let mut num_roots_flushed = 0; + let mut flush_stats = FlushStats::default(); for &root in cached_roots.iter().rev() { - if self - .flush_slot_cache_with_clean(root, should_flush_f.as_mut(), max_clean_root) - .is_some() + if let Some(stats) = + self.flush_slot_cache_with_clean(root, should_flush_f.as_mut(), max_clean_root) { num_roots_flushed += 1; + flush_stats.accumulate(&stats); } // Regardless of whether this slot was *just* flushed from the cache by the above @@ -6183,7 +6210,7 @@ impl AccountsDb { // so that clean will actually be able to clean the slots. let num_new_roots = cached_roots.len(); self.accounts_index.add_uncleaned_roots(cached_roots); - (num_new_roots, num_roots_flushed) + (num_new_roots, num_roots_flushed, flush_stats) } fn do_flush_slot_cache( @@ -6246,18 +6273,23 @@ impl AccountsDb { &HashSet::default(), ); + let mut store_accounts_timing = StoreAccountsTiming::default(); + let mut store_accounts_total_us = 0; if !is_dead_slot { // This ensures that all updates are written to an AppendVec, before any // updates to the index happen, so anybody that sees a real entry in the index, // will be able to find the account in storage let flushed_store = self.create_and_insert_store(slot, total_size, "flush_slot_cache"); - self.store_accounts_frozen( - (slot, &accounts[..]), - Some(hashes), - &flushed_store, - None, - StoreReclaims::Default, - ); + let (store_accounts_timing_inner, store_accounts_total_inner_us) = measure_us!(self + .store_accounts_frozen( + (slot, &accounts[..]), + Some(hashes), + &flushed_store, + None, + StoreReclaims::Default, + )); + store_accounts_timing = store_accounts_timing_inner; + store_accounts_total_us = store_accounts_total_inner_us; // If the above sizing function is correct, just one AppendVec is enough to hold // all the data for the slot @@ -6273,6 +6305,8 @@ impl AccountsDb { num_flushed, num_purged, total_size, + store_accounts_timing, + store_accounts_total_us, } } From 6846756926450f7dbeabec4e7e8e30704eb52c49 Mon Sep 17 00:00:00 2001 From: WGB5445 <919603023@qq.com> Date: Mon, 18 Mar 2024 09:58:28 -0700 Subject: [PATCH 04/28] [solana-install-init] Optimize error message for Windows user permission installation (#234) * feat: check user's permissions in Windows * feat: Remove check fun and check os_err * fmt and optimize code --- install/src/command.rs | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/install/src/command.rs b/install/src/command.rs index d7b92c17690bda..e96b94b97d9377 100644 --- a/install/src/command.rs +++ b/install/src/command.rs @@ -1171,13 +1171,17 @@ pub fn init_or_update(config_file: &str, is_init: bool, check_only: bool) -> Res release_dir.join("solana-release"), config.active_release_dir(), ) - .map_err(|err| { - format!( + .map_err(|err| match err.raw_os_error() { + #[cfg(windows)] + Some(os_err) if os_err == winapi::shared::winerror::ERROR_PRIVILEGE_NOT_HELD => { + "You need to run this command with administrator privileges.".to_string() + } + _ => format!( "Unable to symlink {:?} to {:?}: {}", release_dir, config.active_release_dir(), err - ) + ), })?; config.save(config_file)?; From f6c22e9796ce2b757b98826c496eeddc5370415c Mon Sep 17 00:00:00 2001 From: ryleung-solana <91908731+ryleung-solana@users.noreply.github.com> Date: Tue, 19 Mar 2024 03:05:00 +0800 Subject: [PATCH 05/28] Make the quic server connection table use an async lock, reducing thrashing (#293) Make the quic server connection table use an async lock, reducing lock contention --- streamer/src/nonblocking/quic.rs | 34 ++++++++++++++++++++++++-------- 1 file changed, 26 insertions(+), 8 deletions(-) diff --git a/streamer/src/nonblocking/quic.rs b/streamer/src/nonblocking/quic.rs index 225412dd08b315..c4969006288dbf 100644 --- a/streamer/src/nonblocking/quic.rs +++ b/streamer/src/nonblocking/quic.rs @@ -33,13 +33,26 @@ use { std::{ iter::repeat_with, net::{IpAddr, SocketAddr, UdpSocket}, + // CAUTION: be careful not to introduce any awaits while holding an RwLock. sync::{ atomic::{AtomicBool, AtomicU64, Ordering}, - Arc, Mutex, MutexGuard, RwLock, + Arc, RwLock, }, time::{Duration, Instant}, }, - tokio::{task::JoinHandle, time::timeout}, + tokio::{ + // CAUTION: It's kind of sketch that we're mixing async and sync locks (see the RwLock above). + // This is done so that sync code can also access the stake table. + // Make sure we don't hold a sync lock across an await - including the await to + // lock an async Mutex. This does not happen now and should not happen as long as we + // don't hold an async Mutex and sync RwLock at the same time (currently true) + // but if we do, the scope of the RwLock must always be a subset of the async Mutex + // (i.e. lock order is always async Mutex -> RwLock). Also, be careful not to + // introduce any other awaits while holding the RwLock. + sync::{Mutex, MutexGuard}, + task::JoinHandle, + time::timeout, + }, }; const WAIT_FOR_STREAM_TIMEOUT: Duration = Duration::from_millis(100); @@ -383,7 +396,7 @@ fn handle_and_cache_new_connection( } } -fn prune_unstaked_connections_and_add_new_connection( +async fn prune_unstaked_connections_and_add_new_connection( connection: Connection, connection_table: Arc>, max_connections: usize, @@ -394,7 +407,7 @@ fn prune_unstaked_connections_and_add_new_connection( let stats = params.stats.clone(); if max_connections > 0 { let connection_table_clone = connection_table.clone(); - let mut connection_table = connection_table.lock().unwrap(); + let mut connection_table = connection_table.lock().await; prune_unstaked_connection_table(&mut connection_table, max_connections, stats); handle_and_cache_new_connection( connection, @@ -504,7 +517,8 @@ async fn setup_connection( match params.peer_type { ConnectionPeerType::Staked(stake) => { - let mut connection_table_l = staked_connection_table.lock().unwrap(); + let mut connection_table_l = staked_connection_table.lock().await; + if connection_table_l.total_size >= max_staked_connections { let num_pruned = connection_table_l.prune_random(PRUNE_RANDOM_SAMPLE_SIZE, stake); @@ -535,7 +549,9 @@ async fn setup_connection( ¶ms, wait_for_chunk_timeout, stream_load_ema.clone(), - ) { + ) + .await + { stats .connection_added_from_staked_peer .fetch_add(1, Ordering::Relaxed); @@ -557,7 +573,9 @@ async fn setup_connection( ¶ms, wait_for_chunk_timeout, stream_load_ema.clone(), - ) { + ) + .await + { stats .connection_added_from_unstaked_peer .fetch_add(1, Ordering::Relaxed); @@ -800,7 +818,7 @@ async fn handle_connection( } } - let removed_connection_count = connection_table.lock().unwrap().remove_connection( + let removed_connection_count = connection_table.lock().await.remove_connection( ConnectionTableKey::new(remote_addr.ip(), params.remote_pubkey), remote_addr.port(), stable_id, From 62c458e4def2359279a412d58a3f4030d5f0d33f Mon Sep 17 00:00:00 2001 From: Yueh-Hsuan Chiang <93241502+yhchiang-sol@users.noreply.github.com> Date: Mon, 18 Mar 2024 12:24:19 -0700 Subject: [PATCH 06/28] [TieredStorage] TieredStorageFile -> TieredReadonlyFile and TieredWritableFIle (#260) #### Problem TieredStorageFile struct currently offers new_readonly() and new_writable() to allow both read and write work-load to share the same struct. However, as we need the writer to use BufWriter to improve performance as well as enable Hasher on writes. There is a need to refactor TieredStorageFile to split its usage for read-only and writable. #### Summary of Changes Refactor TieredStorageFile to TieredReadonlyFIle and TieredWritableFile. #### Test Plan Existing tiered-storage tests. --- accounts-db/src/tiered_storage/file.rs | 84 +++++++++++++++--------- accounts-db/src/tiered_storage/footer.rs | 12 ++-- accounts-db/src/tiered_storage/hot.rs | 24 +++---- accounts-db/src/tiered_storage/index.rs | 16 ++--- accounts-db/src/tiered_storage/owners.rs | 8 +-- 5 files changed, 83 insertions(+), 61 deletions(-) diff --git a/accounts-db/src/tiered_storage/file.rs b/accounts-db/src/tiered_storage/file.rs index 51801c6133e1f7..5bcf5f62efbbbd 100644 --- a/accounts-db/src/tiered_storage/file.rs +++ b/accounts-db/src/tiered_storage/file.rs @@ -9,10 +9,10 @@ use { }; #[derive(Debug)] -pub struct TieredStorageFile(pub File); +pub struct TieredReadableFile(pub File); -impl TieredStorageFile { - pub fn new_readonly(file_path: impl AsRef) -> Self { +impl TieredReadableFile { + pub fn new(file_path: impl AsRef) -> Self { Self( OpenOptions::new() .read(true) @@ -36,30 +36,6 @@ impl TieredStorageFile { )) } - /// Writes `value` to the file. - /// - /// `value` must be plain ol' data. - pub fn write_pod(&self, value: &T) -> IoResult { - // SAFETY: Since T is NoUninit, it does not contain any uninitialized bytes. - unsafe { self.write_type(value) } - } - - /// Writes `value` to the file. - /// - /// Prefer `write_pod` when possible, because `write_value` may cause - /// undefined behavior if `value` contains uninitialized bytes. - /// - /// # Safety - /// - /// Caller must ensure casting T to bytes is safe. - /// Refer to the Safety sections in std::slice::from_raw_parts() - /// and bytemuck's Pod and NoUninit for more information. - pub unsafe fn write_type(&self, value: &T) -> IoResult { - let ptr = value as *const _ as *const u8; - let bytes = unsafe { std::slice::from_raw_parts(ptr, mem::size_of::()) }; - self.write_bytes(bytes) - } - /// Reads a value of type `T` from the file. /// /// Type T must be plain ol' data. @@ -95,13 +71,59 @@ impl TieredStorageFile { (&self.0).seek(SeekFrom::End(offset)) } + pub fn read_bytes(&self, buffer: &mut [u8]) -> IoResult<()> { + (&self.0).read_exact(buffer) + } +} + +#[derive(Debug)] +pub struct TieredWritableFile(pub File); + +impl TieredWritableFile { + pub fn new(file_path: impl AsRef) -> IoResult { + Ok(Self( + OpenOptions::new() + .create_new(true) + .write(true) + .open(file_path)?, + )) + } + + /// Writes `value` to the file. + /// + /// `value` must be plain ol' data. + pub fn write_pod(&self, value: &T) -> IoResult { + // SAFETY: Since T is NoUninit, it does not contain any uninitialized bytes. + unsafe { self.write_type(value) } + } + + /// Writes `value` to the file. + /// + /// Prefer `write_pod` when possible, because `write_value` may cause + /// undefined behavior if `value` contains uninitialized bytes. + /// + /// # Safety + /// + /// Caller must ensure casting T to bytes is safe. + /// Refer to the Safety sections in std::slice::from_raw_parts() + /// and bytemuck's Pod and NoUninit for more information. + pub unsafe fn write_type(&self, value: &T) -> IoResult { + let ptr = value as *const _ as *const u8; + let bytes = unsafe { std::slice::from_raw_parts(ptr, mem::size_of::()) }; + self.write_bytes(bytes) + } + + pub fn seek(&self, offset: u64) -> IoResult { + (&self.0).seek(SeekFrom::Start(offset)) + } + + pub fn seek_from_end(&self, offset: i64) -> IoResult { + (&self.0).seek(SeekFrom::End(offset)) + } + pub fn write_bytes(&self, bytes: &[u8]) -> IoResult { (&self.0).write_all(bytes)?; Ok(bytes.len()) } - - pub fn read_bytes(&self, buffer: &mut [u8]) -> IoResult<()> { - (&self.0).read_exact(buffer) - } } diff --git a/accounts-db/src/tiered_storage/footer.rs b/accounts-db/src/tiered_storage/footer.rs index 1eb4fbdb3ff2ec..dd786a4e804189 100644 --- a/accounts-db/src/tiered_storage/footer.rs +++ b/accounts-db/src/tiered_storage/footer.rs @@ -1,7 +1,7 @@ use { crate::tiered_storage::{ error::TieredStorageError, - file::TieredStorageFile, + file::{TieredReadableFile, TieredWritableFile}, index::IndexBlockFormat, mmap_utils::{get_pod, get_type}, owners::OwnersBlockFormat, @@ -186,11 +186,11 @@ impl Default for TieredStorageFooter { impl TieredStorageFooter { pub fn new_from_path(path: impl AsRef) -> TieredStorageResult { - let file = TieredStorageFile::new_readonly(path); + let file = TieredReadableFile::new(path); Self::new_from_footer_block(&file) } - pub fn write_footer_block(&self, file: &TieredStorageFile) -> TieredStorageResult<()> { + pub fn write_footer_block(&self, file: &TieredWritableFile) -> TieredStorageResult<()> { // SAFETY: The footer does not contain any uninitialized bytes. unsafe { file.write_type(self)? }; file.write_pod(&TieredStorageMagicNumber::default())?; @@ -198,7 +198,7 @@ impl TieredStorageFooter { Ok(()) } - pub fn new_from_footer_block(file: &TieredStorageFile) -> TieredStorageResult { + pub fn new_from_footer_block(file: &TieredReadableFile) -> TieredStorageResult { file.seek_from_end(-(FOOTER_TAIL_SIZE as i64))?; let mut footer_version: u64 = 0; @@ -326,7 +326,7 @@ mod tests { use { super::*, crate::{ - append_vec::test_utils::get_append_vec_path, tiered_storage::file::TieredStorageFile, + append_vec::test_utils::get_append_vec_path, tiered_storage::file::TieredWritableFile, }, memoffset::offset_of, solana_sdk::hash::Hash, @@ -356,7 +356,7 @@ mod tests { // Persist the expected footer. { - let file = TieredStorageFile::new_writable(&path.path).unwrap(); + let file = TieredWritableFile::new(&path.path).unwrap(); expected_footer.write_footer_block(&file).unwrap(); } diff --git a/accounts-db/src/tiered_storage/hot.rs b/accounts-db/src/tiered_storage/hot.rs index 0e1ce6bf9a5a8e..198eccd724f17b 100644 --- a/accounts-db/src/tiered_storage/hot.rs +++ b/accounts-db/src/tiered_storage/hot.rs @@ -7,7 +7,7 @@ use { accounts_hash::AccountHash, tiered_storage::{ byte_block, - file::TieredStorageFile, + file::TieredWritableFile, footer::{AccountBlockFormat, AccountMetaFormat, TieredStorageFooter}, index::{AccountIndexWriterEntry, AccountOffset, IndexBlockFormat, IndexOffset}, meta::{AccountMetaFlags, AccountMetaOptionalFields, TieredAccountMeta}, @@ -542,7 +542,7 @@ impl HotStorageReader { } fn write_optional_fields( - file: &TieredStorageFile, + file: &TieredWritableFile, opt_fields: &AccountMetaOptionalFields, ) -> TieredStorageResult { let mut size = 0; @@ -558,14 +558,14 @@ fn write_optional_fields( /// The writer that creates a hot accounts file. #[derive(Debug)] pub struct HotStorageWriter { - storage: TieredStorageFile, + storage: TieredWritableFile, } impl HotStorageWriter { /// Create a new HotStorageWriter with the specified path. pub fn new(file_path: impl AsRef) -> TieredStorageResult { Ok(Self { - storage: TieredStorageFile::new_writable(file_path)?, + storage: TieredWritableFile::new(file_path)?, }) } @@ -706,7 +706,7 @@ pub mod tests { super::*, crate::tiered_storage::{ byte_block::ByteBlockWriter, - file::TieredStorageFile, + file::TieredWritableFile, footer::{AccountBlockFormat, AccountMetaFormat, TieredStorageFooter, FOOTER_SIZE}, hot::{HotAccountMeta, HotStorageReader}, index::{AccountIndexWriterEntry, IndexBlockFormat, IndexOffset}, @@ -892,7 +892,7 @@ pub mod tests { }; { - let file = TieredStorageFile::new_writable(&path).unwrap(); + let file = TieredWritableFile::new(&path).unwrap(); expected_footer.write_footer_block(&file).unwrap(); } @@ -928,7 +928,7 @@ pub mod tests { ..TieredStorageFooter::default() }; { - let file = TieredStorageFile::new_writable(&path).unwrap(); + let file = TieredWritableFile::new(&path).unwrap(); let mut current_offset = 0; account_offsets = hot_account_metas @@ -971,7 +971,7 @@ pub mod tests { }; { - let file = TieredStorageFile::new_writable(&path).unwrap(); + let file = TieredWritableFile::new(&path).unwrap(); footer.write_footer_block(&file).unwrap(); } @@ -1016,7 +1016,7 @@ pub mod tests { ..TieredStorageFooter::default() }; { - let file = TieredStorageFile::new_writable(&path).unwrap(); + let file = TieredWritableFile::new(&path).unwrap(); let cursor = footer .index_block_format @@ -1059,7 +1059,7 @@ pub mod tests { }; { - let file = TieredStorageFile::new_writable(&path).unwrap(); + let file = TieredWritableFile::new(&path).unwrap(); let mut owners_table = OwnersTable::default(); addresses.iter().for_each(|owner_address| { @@ -1118,7 +1118,7 @@ pub mod tests { let account_offsets: Vec<_>; { - let file = TieredStorageFile::new_writable(&path).unwrap(); + let file = TieredWritableFile::new(&path).unwrap(); let mut current_offset = 0; account_offsets = hot_account_metas @@ -1237,7 +1237,7 @@ pub mod tests { }; { - let file = TieredStorageFile::new_writable(&path).unwrap(); + let file = TieredWritableFile::new(&path).unwrap(); let mut current_offset = 0; // write accounts blocks diff --git a/accounts-db/src/tiered_storage/index.rs b/accounts-db/src/tiered_storage/index.rs index c82e65ce6d275a..405866c3f0fb96 100644 --- a/accounts-db/src/tiered_storage/index.rs +++ b/accounts-db/src/tiered_storage/index.rs @@ -1,6 +1,6 @@ use { crate::tiered_storage::{ - file::TieredStorageFile, footer::TieredStorageFooter, mmap_utils::get_pod, + file::TieredWritableFile, footer::TieredStorageFooter, mmap_utils::get_pod, TieredStorageResult, }, bytemuck::{Pod, Zeroable}, @@ -59,7 +59,7 @@ impl IndexBlockFormat { /// the total number of bytes written. pub fn write_index_block( &self, - file: &TieredStorageFile, + file: &TieredWritableFile, index_entries: &[AccountIndexWriterEntry], ) -> TieredStorageResult { match self { @@ -147,7 +147,7 @@ mod tests { use { super::*, crate::tiered_storage::{ - file::TieredStorageFile, + file::TieredWritableFile, hot::{HotAccountOffset, HOT_ACCOUNT_ALIGNMENT}, }, memmap2::MmapOptions, @@ -181,7 +181,7 @@ mod tests { .collect(); { - let file = TieredStorageFile::new_writable(&path).unwrap(); + let file = TieredWritableFile::new(&path).unwrap(); let indexer = IndexBlockFormat::AddressesThenOffsets; let cursor = indexer.write_index_block(&file, &index_entries).unwrap(); footer.owners_block_offset = cursor as u64; @@ -223,7 +223,7 @@ mod tests { { // we only write a footer here as the test should hit an assert // failure before it actually reads the file. - let file = TieredStorageFile::new_writable(&path).unwrap(); + let file = TieredWritableFile::new(&path).unwrap(); footer.write_footer_block(&file).unwrap(); } @@ -259,7 +259,7 @@ mod tests { { // we only write a footer here as the test should hit an assert // failure before it actually reads the file. - let file = TieredStorageFile::new_writable(&path).unwrap(); + let file = TieredWritableFile::new(&path).unwrap(); footer.write_footer_block(&file).unwrap(); } @@ -294,7 +294,7 @@ mod tests { { // we only write a footer here as the test should hit an assert // failure before we actually read the file. - let file = TieredStorageFile::new_writable(&path).unwrap(); + let file = TieredWritableFile::new(&path).unwrap(); footer.write_footer_block(&file).unwrap(); } @@ -334,7 +334,7 @@ mod tests { { // we only write a footer here as the test should hit an assert // failure before we actually read the file. - let file = TieredStorageFile::new_writable(&path).unwrap(); + let file = TieredWritableFile::new(&path).unwrap(); footer.write_footer_block(&file).unwrap(); } diff --git a/accounts-db/src/tiered_storage/owners.rs b/accounts-db/src/tiered_storage/owners.rs index ebe60cc6f8ed0f..ccebdd64ad50aa 100644 --- a/accounts-db/src/tiered_storage/owners.rs +++ b/accounts-db/src/tiered_storage/owners.rs @@ -1,6 +1,6 @@ use { crate::tiered_storage::{ - file::TieredStorageFile, footer::TieredStorageFooter, mmap_utils::get_pod, + file::TieredWritableFile, footer::TieredStorageFooter, mmap_utils::get_pod, TieredStorageResult, }, indexmap::set::IndexSet, @@ -47,7 +47,7 @@ impl OwnersBlockFormat { /// Persists the provided owners' addresses into the specified file. pub fn write_owners_block( &self, - file: &TieredStorageFile, + file: &TieredWritableFile, owners_table: &OwnersTable, ) -> TieredStorageResult { match self { @@ -116,7 +116,7 @@ impl<'a> OwnersTable<'a> { #[cfg(test)] mod tests { use { - super::*, crate::tiered_storage::file::TieredStorageFile, memmap2::MmapOptions, + super::*, crate::tiered_storage::file::TieredWritableFile, memmap2::MmapOptions, std::fs::OpenOptions, tempfile::TempDir, }; @@ -139,7 +139,7 @@ mod tests { }; { - let file = TieredStorageFile::new_writable(&path).unwrap(); + let file = TieredWritableFile::new(&path).unwrap(); let mut owners_table = OwnersTable::default(); addresses.iter().for_each(|owner_address| { From 9e2768ad5a5390c7767fea8696fff09aef24b0a4 Mon Sep 17 00:00:00 2001 From: Lijun Wang <83639177+lijunwangs@users.noreply.github.com> Date: Mon, 18 Mar 2024 13:54:07 -0700 Subject: [PATCH 07/28] Net script fix for expected shred version (#280) Fix for --expected-shred-version when maybeWaitForSupermajority is on Co-authored-by: Lijun Wang --- multinode-demo/bootstrap-validator.sh | 3 +++ net/remote/remote-node.sh | 3 ++- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/multinode-demo/bootstrap-validator.sh b/multinode-demo/bootstrap-validator.sh index 5afc543b2f0032..5091597cfae7bc 100755 --- a/multinode-demo/bootstrap-validator.sh +++ b/multinode-demo/bootstrap-validator.sh @@ -88,6 +88,9 @@ while [[ -n $1 ]]; do elif [[ $1 == --expected-bank-hash ]]; then args+=("$1" "$2") shift 2 + elif [[ $1 == --expected-shred-version ]]; then + args+=("$1" "$2") + shift 2 elif [[ $1 == --accounts ]]; then args+=("$1" "$2") shift 2 diff --git a/net/remote/remote-node.sh b/net/remote/remote-node.sh index aeb920bd50bab0..4502440babd94b 100755 --- a/net/remote/remote-node.sh +++ b/net/remote/remote-node.sh @@ -264,7 +264,8 @@ EOF if [[ -n "$maybeWaitForSupermajority" ]]; then bankHash=$(solana-ledger-tool -l config/bootstrap-validator bank-hash --halt-at-slot 0) - extraNodeArgs="$extraNodeArgs --expected-bank-hash $bankHash" + shredVersion="$(cat "$SOLANA_CONFIG_DIR"/shred-version)" + extraNodeArgs="$extraNodeArgs --expected-bank-hash $bankHash --expected-shred-version $shredVersion" echo "$bankHash" > config/bank-hash fi fi From fee4d82a567e9cf2d62eacb592ab949e3f010b4c Mon Sep 17 00:00:00 2001 From: Yueh-Hsuan Chiang <93241502+yhchiang-sol@users.noreply.github.com> Date: Mon, 18 Mar 2024 14:27:55 -0700 Subject: [PATCH 08/28] [TieredStorage] Use BufWriter in TieredWritableFile (#261) #### Problem TieredWritableFile currently uses File instead of BufWriter. This will introduce more syscall when doing file writes. #### Summary of Changes This PR makes TieredWritableFile uses BufWriter to allow the write-call to be more optimized to reduce the number of syscalls. #### Test Plan Existing tiered-storage test. Will run experiments to verify its performance improvement. #### Dependency https://github.com/anza-xyz/agave/pull/260 --- accounts-db/src/tiered_storage.rs | 2 +- accounts-db/src/tiered_storage/file.rs | 24 +++++------ accounts-db/src/tiered_storage/footer.rs | 6 +-- accounts-db/src/tiered_storage/hot.rs | 54 ++++++++++++------------ accounts-db/src/tiered_storage/index.rs | 24 ++++++----- accounts-db/src/tiered_storage/owners.rs | 8 ++-- 6 files changed, 60 insertions(+), 58 deletions(-) diff --git a/accounts-db/src/tiered_storage.rs b/accounts-db/src/tiered_storage.rs index e15adb388605c2..cc2776ed178cf6 100644 --- a/accounts-db/src/tiered_storage.rs +++ b/accounts-db/src/tiered_storage.rs @@ -125,7 +125,7 @@ impl TieredStorage { if format == &HOT_FORMAT { let result = { - let writer = HotStorageWriter::new(&self.path)?; + let mut writer = HotStorageWriter::new(&self.path)?; writer.write_accounts(accounts, skip) }; diff --git a/accounts-db/src/tiered_storage/file.rs b/accounts-db/src/tiered_storage/file.rs index 5bcf5f62efbbbd..605e55a0b193a1 100644 --- a/accounts-db/src/tiered_storage/file.rs +++ b/accounts-db/src/tiered_storage/file.rs @@ -2,7 +2,7 @@ use { bytemuck::{AnyBitPattern, NoUninit}, std::{ fs::{File, OpenOptions}, - io::{Read, Result as IoResult, Seek, SeekFrom, Write}, + io::{BufWriter, Read, Result as IoResult, Seek, SeekFrom, Write}, mem, path::Path, }, @@ -77,22 +77,22 @@ impl TieredReadableFile { } #[derive(Debug)] -pub struct TieredWritableFile(pub File); +pub struct TieredWritableFile(pub BufWriter); impl TieredWritableFile { pub fn new(file_path: impl AsRef) -> IoResult { - Ok(Self( + Ok(Self(BufWriter::new( OpenOptions::new() .create_new(true) .write(true) .open(file_path)?, - )) + ))) } /// Writes `value` to the file. /// /// `value` must be plain ol' data. - pub fn write_pod(&self, value: &T) -> IoResult { + pub fn write_pod(&mut self, value: &T) -> IoResult { // SAFETY: Since T is NoUninit, it does not contain any uninitialized bytes. unsafe { self.write_type(value) } } @@ -107,22 +107,22 @@ impl TieredWritableFile { /// Caller must ensure casting T to bytes is safe. /// Refer to the Safety sections in std::slice::from_raw_parts() /// and bytemuck's Pod and NoUninit for more information. - pub unsafe fn write_type(&self, value: &T) -> IoResult { + pub unsafe fn write_type(&mut self, value: &T) -> IoResult { let ptr = value as *const _ as *const u8; let bytes = unsafe { std::slice::from_raw_parts(ptr, mem::size_of::()) }; self.write_bytes(bytes) } - pub fn seek(&self, offset: u64) -> IoResult { - (&self.0).seek(SeekFrom::Start(offset)) + pub fn seek(&mut self, offset: u64) -> IoResult { + self.0.seek(SeekFrom::Start(offset)) } - pub fn seek_from_end(&self, offset: i64) -> IoResult { - (&self.0).seek(SeekFrom::End(offset)) + pub fn seek_from_end(&mut self, offset: i64) -> IoResult { + self.0.seek(SeekFrom::End(offset)) } - pub fn write_bytes(&self, bytes: &[u8]) -> IoResult { - (&self.0).write_all(bytes)?; + pub fn write_bytes(&mut self, bytes: &[u8]) -> IoResult { + self.0.write_all(bytes)?; Ok(bytes.len()) } diff --git a/accounts-db/src/tiered_storage/footer.rs b/accounts-db/src/tiered_storage/footer.rs index dd786a4e804189..fa885f2394ce63 100644 --- a/accounts-db/src/tiered_storage/footer.rs +++ b/accounts-db/src/tiered_storage/footer.rs @@ -190,7 +190,7 @@ impl TieredStorageFooter { Self::new_from_footer_block(&file) } - pub fn write_footer_block(&self, file: &TieredWritableFile) -> TieredStorageResult<()> { + pub fn write_footer_block(&self, file: &mut TieredWritableFile) -> TieredStorageResult<()> { // SAFETY: The footer does not contain any uninitialized bytes. unsafe { file.write_type(self)? }; file.write_pod(&TieredStorageMagicNumber::default())?; @@ -356,8 +356,8 @@ mod tests { // Persist the expected footer. { - let file = TieredWritableFile::new(&path.path).unwrap(); - expected_footer.write_footer_block(&file).unwrap(); + let mut file = TieredWritableFile::new(&path.path).unwrap(); + expected_footer.write_footer_block(&mut file).unwrap(); } // Reopen the same storage, and expect the persisted footer is diff --git a/accounts-db/src/tiered_storage/hot.rs b/accounts-db/src/tiered_storage/hot.rs index 198eccd724f17b..c00dff302c9cea 100644 --- a/accounts-db/src/tiered_storage/hot.rs +++ b/accounts-db/src/tiered_storage/hot.rs @@ -542,7 +542,7 @@ impl HotStorageReader { } fn write_optional_fields( - file: &TieredWritableFile, + file: &mut TieredWritableFile, opt_fields: &AccountMetaOptionalFields, ) -> TieredStorageResult { let mut size = 0; @@ -572,7 +572,7 @@ impl HotStorageWriter { /// Persists an account with the specified information and returns /// the stored size of the account. fn write_account( - &self, + &mut self, lamports: u64, owner_offset: OwnerOffset, account_data: &[u8], @@ -599,7 +599,7 @@ impl HotStorageWriter { stored_size += self .storage .write_bytes(&PADDING_BUFFER[0..(padding_len as usize)])?; - stored_size += write_optional_fields(&self.storage, &optional_fields)?; + stored_size += write_optional_fields(&mut self.storage, &optional_fields)?; Ok(stored_size) } @@ -614,7 +614,7 @@ impl HotStorageWriter { U: StorableAccounts<'a, T>, V: Borrow, >( - &self, + &mut self, accounts: &StorableAccountsWithHashesAndWriteVersions<'a, 'b, T, U, V>, skip: usize, ) -> TieredStorageResult> { @@ -677,7 +677,7 @@ impl HotStorageWriter { footer.index_block_offset = cursor as u64; cursor += footer .index_block_format - .write_index_block(&self.storage, &index)?; + .write_index_block(&mut self.storage, &index)?; if cursor % HOT_BLOCK_ALIGNMENT != 0 { // In case it is not yet aligned, it is due to the fact that // the index block has an odd number of entries. In such case, @@ -692,9 +692,9 @@ impl HotStorageWriter { footer.owner_count = owners_table.len() as u32; footer .owners_block_format - .write_owners_block(&self.storage, &owners_table)?; + .write_owners_block(&mut self.storage, &owners_table)?; - footer.write_footer_block(&self.storage)?; + footer.write_footer_block(&mut self.storage)?; Ok(stored_infos) } @@ -892,8 +892,8 @@ pub mod tests { }; { - let file = TieredWritableFile::new(&path).unwrap(); - expected_footer.write_footer_block(&file).unwrap(); + let mut file = TieredWritableFile::new(&path).unwrap(); + expected_footer.write_footer_block(&mut file).unwrap(); } // Reopen the same storage, and expect the persisted footer is @@ -928,7 +928,7 @@ pub mod tests { ..TieredStorageFooter::default() }; { - let file = TieredWritableFile::new(&path).unwrap(); + let mut file = TieredWritableFile::new(&path).unwrap(); let mut current_offset = 0; account_offsets = hot_account_metas @@ -942,7 +942,7 @@ pub mod tests { // while the test only focuses on account metas, writing a footer // here is necessary to make it a valid tiered-storage file. footer.index_block_offset = current_offset as u64; - footer.write_footer_block(&file).unwrap(); + footer.write_footer_block(&mut file).unwrap(); } let hot_storage = HotStorageReader::new_from_path(&path).unwrap(); @@ -971,8 +971,8 @@ pub mod tests { }; { - let file = TieredWritableFile::new(&path).unwrap(); - footer.write_footer_block(&file).unwrap(); + let mut file = TieredWritableFile::new(&path).unwrap(); + footer.write_footer_block(&mut file).unwrap(); } let hot_storage = HotStorageReader::new_from_path(&path).unwrap(); @@ -1016,14 +1016,14 @@ pub mod tests { ..TieredStorageFooter::default() }; { - let file = TieredWritableFile::new(&path).unwrap(); + let mut file = TieredWritableFile::new(&path).unwrap(); let cursor = footer .index_block_format - .write_index_block(&file, &index_writer_entries) + .write_index_block(&mut file, &index_writer_entries) .unwrap(); footer.owners_block_offset = cursor as u64; - footer.write_footer_block(&file).unwrap(); + footer.write_footer_block(&mut file).unwrap(); } let hot_storage = HotStorageReader::new_from_path(&path).unwrap(); @@ -1059,7 +1059,7 @@ pub mod tests { }; { - let file = TieredWritableFile::new(&path).unwrap(); + let mut file = TieredWritableFile::new(&path).unwrap(); let mut owners_table = OwnersTable::default(); addresses.iter().for_each(|owner_address| { @@ -1067,12 +1067,12 @@ pub mod tests { }); footer .owners_block_format - .write_owners_block(&file, &owners_table) + .write_owners_block(&mut file, &owners_table) .unwrap(); // while the test only focuses on account metas, writing a footer // here is necessary to make it a valid tiered-storage file. - footer.write_footer_block(&file).unwrap(); + footer.write_footer_block(&mut file).unwrap(); } let hot_storage = HotStorageReader::new_from_path(&path).unwrap(); @@ -1118,7 +1118,7 @@ pub mod tests { let account_offsets: Vec<_>; { - let file = TieredWritableFile::new(&path).unwrap(); + let mut file = TieredWritableFile::new(&path).unwrap(); let mut current_offset = 0; account_offsets = hot_account_metas @@ -1141,12 +1141,12 @@ pub mod tests { }); footer .owners_block_format - .write_owners_block(&file, &owners_table) + .write_owners_block(&mut file, &owners_table) .unwrap(); // while the test only focuses on account metas, writing a footer // here is necessary to make it a valid tiered-storage file. - footer.write_footer_block(&file).unwrap(); + footer.write_footer_block(&mut file).unwrap(); } let hot_storage = HotStorageReader::new_from_path(&path).unwrap(); @@ -1237,7 +1237,7 @@ pub mod tests { }; { - let file = TieredWritableFile::new(&path).unwrap(); + let mut file = TieredWritableFile::new(&path).unwrap(); let mut current_offset = 0; // write accounts blocks @@ -1264,7 +1264,7 @@ pub mod tests { footer.index_block_offset = current_offset as u64; current_offset += footer .index_block_format - .write_index_block(&file, &index_writer_entries) + .write_index_block(&mut file, &index_writer_entries) .unwrap(); // write owners block @@ -1275,10 +1275,10 @@ pub mod tests { }); footer .owners_block_format - .write_owners_block(&file, &owners_table) + .write_owners_block(&mut file, &owners_table) .unwrap(); - footer.write_footer_block(&file).unwrap(); + footer.write_footer_block(&mut file).unwrap(); } let hot_storage = HotStorageReader::new_from_path(&path).unwrap(); @@ -1358,7 +1358,7 @@ pub mod tests { let temp_dir = TempDir::new().unwrap(); let path = temp_dir.path().join("test_write_account_and_index_blocks"); let stored_infos = { - let writer = HotStorageWriter::new(&path).unwrap(); + let mut writer = HotStorageWriter::new(&path).unwrap(); writer.write_accounts(&storable_accounts, 0).unwrap() }; diff --git a/accounts-db/src/tiered_storage/index.rs b/accounts-db/src/tiered_storage/index.rs index 405866c3f0fb96..82dbb9332c7550 100644 --- a/accounts-db/src/tiered_storage/index.rs +++ b/accounts-db/src/tiered_storage/index.rs @@ -59,7 +59,7 @@ impl IndexBlockFormat { /// the total number of bytes written. pub fn write_index_block( &self, - file: &TieredWritableFile, + file: &mut TieredWritableFile, index_entries: &[AccountIndexWriterEntry], ) -> TieredStorageResult { match self { @@ -181,9 +181,11 @@ mod tests { .collect(); { - let file = TieredWritableFile::new(&path).unwrap(); + let mut file = TieredWritableFile::new(&path).unwrap(); let indexer = IndexBlockFormat::AddressesThenOffsets; - let cursor = indexer.write_index_block(&file, &index_entries).unwrap(); + let cursor = indexer + .write_index_block(&mut file, &index_entries) + .unwrap(); footer.owners_block_offset = cursor as u64; } @@ -223,8 +225,8 @@ mod tests { { // we only write a footer here as the test should hit an assert // failure before it actually reads the file. - let file = TieredWritableFile::new(&path).unwrap(); - footer.write_footer_block(&file).unwrap(); + let mut file = TieredWritableFile::new(&path).unwrap(); + footer.write_footer_block(&mut file).unwrap(); } let file = OpenOptions::new() @@ -259,8 +261,8 @@ mod tests { { // we only write a footer here as the test should hit an assert // failure before it actually reads the file. - let file = TieredWritableFile::new(&path).unwrap(); - footer.write_footer_block(&file).unwrap(); + let mut file = TieredWritableFile::new(&path).unwrap(); + footer.write_footer_block(&mut file).unwrap(); } let file = OpenOptions::new() @@ -294,8 +296,8 @@ mod tests { { // we only write a footer here as the test should hit an assert // failure before we actually read the file. - let file = TieredWritableFile::new(&path).unwrap(); - footer.write_footer_block(&file).unwrap(); + let mut file = TieredWritableFile::new(&path).unwrap(); + footer.write_footer_block(&mut file).unwrap(); } let file = OpenOptions::new() @@ -334,8 +336,8 @@ mod tests { { // we only write a footer here as the test should hit an assert // failure before we actually read the file. - let file = TieredWritableFile::new(&path).unwrap(); - footer.write_footer_block(&file).unwrap(); + let mut file = TieredWritableFile::new(&path).unwrap(); + footer.write_footer_block(&mut file).unwrap(); } let file = OpenOptions::new() diff --git a/accounts-db/src/tiered_storage/owners.rs b/accounts-db/src/tiered_storage/owners.rs index ccebdd64ad50aa..fa42ffaca97dac 100644 --- a/accounts-db/src/tiered_storage/owners.rs +++ b/accounts-db/src/tiered_storage/owners.rs @@ -47,7 +47,7 @@ impl OwnersBlockFormat { /// Persists the provided owners' addresses into the specified file. pub fn write_owners_block( &self, - file: &TieredWritableFile, + file: &mut TieredWritableFile, owners_table: &OwnersTable, ) -> TieredStorageResult { match self { @@ -139,7 +139,7 @@ mod tests { }; { - let file = TieredWritableFile::new(&path).unwrap(); + let mut file = TieredWritableFile::new(&path).unwrap(); let mut owners_table = OwnersTable::default(); addresses.iter().for_each(|owner_address| { @@ -147,12 +147,12 @@ mod tests { }); footer .owners_block_format - .write_owners_block(&file, &owners_table) + .write_owners_block(&mut file, &owners_table) .unwrap(); // while the test only focuses on account metas, writing a footer // here is necessary to make it a valid tiered-storage file. - footer.write_footer_block(&file).unwrap(); + footer.write_footer_block(&mut file).unwrap(); } let file = OpenOptions::new().read(true).open(path).unwrap(); From 21eff36754c8aff18bd19d2a8c62b9f2f8584668 Mon Sep 17 00:00:00 2001 From: Justin Starry Date: Tue, 19 Mar 2024 07:28:53 +0800 Subject: [PATCH 09/28] cli: skip no-op program buffer writes (#277) cli: skip no-op program deploy write txs --- cli/src/program.rs | 79 ++++++++++++++++++++++++++++------------------ 1 file changed, 49 insertions(+), 30 deletions(-) diff --git a/cli/src/program.rs b/cli/src/program.rs index 92c3c657adc40a..099da9dbaf2438 100644 --- a/cli/src/program.rs +++ b/cli/src/program.rs @@ -2213,11 +2213,12 @@ fn do_process_program_write_and_deploy( let blockhash = rpc_client.get_latest_blockhash()?; // Initialize buffer account or complete if already partially initialized - let (initial_instructions, balance_needed) = if let Some(account) = rpc_client - .get_account_with_commitment(buffer_pubkey, config.commitment)? - .value + let (initial_instructions, balance_needed, buffer_program_data) = if let Some(mut account) = + rpc_client + .get_account_with_commitment(buffer_pubkey, config.commitment)? + .value { - complete_partial_program_init( + let (ixs, balance_needed) = complete_partial_program_init( loader_id, &fee_payer_signer.pubkey(), buffer_pubkey, @@ -2229,7 +2230,11 @@ fn do_process_program_write_and_deploy( }, min_rent_exempt_program_data_balance, allow_excessive_balance, - )? + )?; + let buffer_program_data = account + .data + .split_off(UpgradeableLoaderState::size_of_buffer_metadata()); + (ixs, balance_needed, buffer_program_data) } else if loader_id == &bpf_loader_upgradeable::id() { ( bpf_loader_upgradeable::create_buffer( @@ -2240,6 +2245,7 @@ fn do_process_program_write_and_deploy( program_len, )?, min_rent_exempt_program_data_balance, + vec![0; program_len], ) } else { ( @@ -2251,6 +2257,7 @@ fn do_process_program_write_and_deploy( loader_id, )], min_rent_exempt_program_data_balance, + vec![0; program_len], ) }; let initial_message = if !initial_instructions.is_empty() { @@ -2281,7 +2288,10 @@ fn do_process_program_write_and_deploy( let mut write_messages = vec![]; let chunk_size = calculate_max_chunk_size(&create_msg); for (chunk, i) in program_data.chunks(chunk_size).zip(0..) { - write_messages.push(create_msg((i * chunk_size) as u32, chunk.to_vec())); + let offset = i * chunk_size; + if chunk != &buffer_program_data[offset..offset + chunk.len()] { + write_messages.push(create_msg(offset as u32, chunk.to_vec())); + } } // Create and add final message @@ -2370,31 +2380,37 @@ fn do_process_program_upgrade( let (initial_message, write_messages, balance_needed) = if let Some(buffer_signer) = buffer_signer { // Check Buffer account to see if partial initialization has occurred - let (initial_instructions, balance_needed) = if let Some(account) = rpc_client - .get_account_with_commitment(&buffer_signer.pubkey(), config.commitment)? - .value - { - complete_partial_program_init( - &bpf_loader_upgradeable::id(), - &fee_payer_signer.pubkey(), - &buffer_signer.pubkey(), - &account, - UpgradeableLoaderState::size_of_buffer(program_len), - min_rent_exempt_program_data_balance, - true, - )? - } else { - ( - bpf_loader_upgradeable::create_buffer( + let (initial_instructions, balance_needed, buffer_program_data) = + if let Some(mut account) = rpc_client + .get_account_with_commitment(&buffer_signer.pubkey(), config.commitment)? + .value + { + let (ixs, balance_needed) = complete_partial_program_init( + &bpf_loader_upgradeable::id(), &fee_payer_signer.pubkey(), - buffer_pubkey, - &upgrade_authority.pubkey(), + &buffer_signer.pubkey(), + &account, + UpgradeableLoaderState::size_of_buffer(program_len), min_rent_exempt_program_data_balance, - program_len, - )?, - min_rent_exempt_program_data_balance, - ) - }; + true, + )?; + let buffer_program_data = account + .data + .split_off(UpgradeableLoaderState::size_of_buffer_metadata()); + (ixs, balance_needed, buffer_program_data) + } else { + ( + bpf_loader_upgradeable::create_buffer( + &fee_payer_signer.pubkey(), + buffer_pubkey, + &upgrade_authority.pubkey(), + min_rent_exempt_program_data_balance, + program_len, + )?, + min_rent_exempt_program_data_balance, + vec![0; program_len], + ) + }; let initial_message = if !initial_instructions.is_empty() { Some(Message::new_with_blockhash( @@ -2426,7 +2442,10 @@ fn do_process_program_upgrade( let mut write_messages = vec![]; let chunk_size = calculate_max_chunk_size(&create_msg); for (chunk, i) in program_data.chunks(chunk_size).zip(0..) { - write_messages.push(create_msg((i * chunk_size) as u32, chunk.to_vec())); + let offset = i * chunk_size; + if chunk != &buffer_program_data[offset..offset + chunk.len()] { + write_messages.push(create_msg(offset as u32, chunk.to_vec())); + } } (initial_message, write_messages, balance_needed) From f35bda506708da74fade2d30d18ae3c0a6912030 Mon Sep 17 00:00:00 2001 From: Greg Cusack Date: Mon, 18 Mar 2024 17:58:11 -0700 Subject: [PATCH 10/28] add in method for building a `TpuClient` for `LocalCluster` tests (#258) * add in method for building a TpuClient for LocalCluster tests * add cluster trait. leave dependency on solana_client::tpu_client --- Cargo.lock | 1 + bench-tps/tests/bench_tps.rs | 27 ++++------------- client/src/tpu_client.rs | 2 ++ dos/src/main.rs | 39 +++++------------------- local-cluster/Cargo.toml | 1 + local-cluster/src/cluster.rs | 11 +++++-- local-cluster/src/local_cluster.rs | 48 +++++++++++++++++++++++++++++- 7 files changed, 72 insertions(+), 57 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 89f2cb1a40f9ea..b636fc5b90ea37 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6334,6 +6334,7 @@ dependencies = [ "solana-ledger", "solana-logger", "solana-pubsub-client", + "solana-quic-client", "solana-rpc-client", "solana-rpc-client-api", "solana-runtime", diff --git a/bench-tps/tests/bench_tps.rs b/bench-tps/tests/bench_tps.rs index 7a2b0fe20a5b8d..bfff1f7e1250c4 100644 --- a/bench-tps/tests/bench_tps.rs +++ b/bench-tps/tests/bench_tps.rs @@ -7,13 +7,11 @@ use { cli::{Config, InstructionPaddingConfig}, send_batch::generate_durable_nonce_accounts, }, - solana_client::{ - connection_cache::ConnectionCache, - tpu_client::{TpuClient, TpuClientConfig}, - }, + solana_client::tpu_client::{TpuClient, TpuClientConfig}, solana_core::validator::ValidatorConfig, solana_faucet::faucet::run_local_faucet, solana_local_cluster::{ + cluster::Cluster, local_cluster::{ClusterConfig, LocalCluster}, validator_configs::make_identical_validator_configs, }, @@ -78,24 +76,9 @@ fn test_bench_tps_local_cluster(config: Config) { cluster.transfer(&cluster.funding_keypair, &faucet_pubkey, 100_000_000); - let ConnectionCache::Quic(cache) = &*cluster.connection_cache else { - panic!("Expected a Quic ConnectionCache."); - }; - - let rpc_pubsub_url = format!("ws://{}/", cluster.entry_point_info.rpc_pubsub().unwrap()); - let rpc_url = format!("http://{}", cluster.entry_point_info.rpc().unwrap()); - - let client = Arc::new( - TpuClient::new_with_connection_cache( - Arc::new(RpcClient::new(rpc_url)), - rpc_pubsub_url.as_str(), - TpuClientConfig::default(), - cache.clone(), - ) - .unwrap_or_else(|err| { - panic!("Could not create TpuClient {err:?}"); - }), - ); + let client = Arc::new(cluster.build_tpu_quic_client().unwrap_or_else(|err| { + panic!("Could not create TpuClient with Quic Cache {err:?}"); + })); let lamports_per_account = 100; diff --git a/client/src/tpu_client.rs b/client/src/tpu_client.rs index 038dd86774ea98..555d3aad88bcb1 100644 --- a/client/src/tpu_client.rs +++ b/client/src/tpu_client.rs @@ -21,6 +21,8 @@ pub use { solana_tpu_client::tpu_client::{TpuClientConfig, DEFAULT_FANOUT_SLOTS, MAX_FANOUT_SLOTS}, }; +pub type QuicTpuClient = TpuClient; + pub enum TpuClientWrapper { Quic(TpuClient), Udp(TpuClient), diff --git a/dos/src/main.rs b/dos/src/main.rs index 577e4a2d067393..6b07eec6709a40 100644 --- a/dos/src/main.rs +++ b/dos/src/main.rs @@ -818,7 +818,7 @@ fn main() { pub mod test { use { super::*, - solana_client::tpu_client::TpuClient, + solana_client::tpu_client::QuicTpuClient, solana_core::validator::ValidatorConfig, solana_faucet::faucet::run_local_faucet, solana_gossip::contact_info::LegacyContactInfo, @@ -827,10 +827,8 @@ pub mod test { local_cluster::{ClusterConfig, LocalCluster}, validator_configs::make_identical_validator_configs, }, - solana_quic_client::{QuicConfig, QuicConnectionManager, QuicPool}, solana_rpc::rpc::JsonRpcConfig, solana_sdk::timing::timestamp, - solana_tpu_client::tpu_client::TpuClientConfig, }; const TEST_SEND_BATCH_SIZE: usize = 1; @@ -838,32 +836,7 @@ pub mod test { // thin wrapper for the run_dos function // to avoid specifying everywhere generic parameters fn run_dos_no_client(nodes: &[ContactInfo], iterations: usize, params: DosClientParameters) { - run_dos::>( - nodes, iterations, None, params, - ); - } - - fn build_tpu_quic_client( - cluster: &LocalCluster, - ) -> Arc> { - let rpc_pubsub_url = format!("ws://{}/", cluster.entry_point_info.rpc_pubsub().unwrap()); - let rpc_url = format!("http://{}", cluster.entry_point_info.rpc().unwrap()); - - let ConnectionCache::Quic(cache) = &*cluster.connection_cache else { - panic!("Expected a Quic ConnectionCache."); - }; - - Arc::new( - TpuClient::new_with_connection_cache( - Arc::new(RpcClient::new(rpc_url)), - rpc_pubsub_url.as_str(), - TpuClientConfig::default(), - cache.clone(), - ) - .unwrap_or_else(|err| { - panic!("Could not create TpuClient with Quic Cache {err:?}"); - }), - ) + run_dos::(nodes, iterations, None, params); } #[test] @@ -1003,7 +976,9 @@ pub mod test { .unwrap(); let nodes_slice = [node]; - let client = build_tpu_quic_client(&cluster); + let client = Arc::new(cluster.build_tpu_quic_client().unwrap_or_else(|err| { + panic!("Could not create TpuClient with Quic Cache {err:?}"); + })); // creates one transaction with 8 valid signatures and sends it 10 times run_dos( @@ -1135,7 +1110,9 @@ pub mod test { .unwrap(); let nodes_slice = [node]; - let client = build_tpu_quic_client(&cluster); + let client = Arc::new(cluster.build_tpu_quic_client().unwrap_or_else(|err| { + panic!("Could not create TpuClient with Quic Cache {err:?}"); + })); // creates one transaction and sends it 10 times // this is done in single thread diff --git a/local-cluster/Cargo.toml b/local-cluster/Cargo.toml index 4248fc02945238..07b30030295e52 100644 --- a/local-cluster/Cargo.toml +++ b/local-cluster/Cargo.toml @@ -24,6 +24,7 @@ solana-gossip = { workspace = true } solana-ledger = { workspace = true } solana-logger = { workspace = true } solana-pubsub-client = { workspace = true } +solana-quic-client = { workspace = true } solana-rpc-client = { workspace = true } solana-rpc-client-api = { workspace = true } solana-runtime = { workspace = true } diff --git a/local-cluster/src/cluster.rs b/local-cluster/src/cluster.rs index 03ec1b7abe13f2..425f65c48e14c5 100644 --- a/local-cluster/src/cluster.rs +++ b/local-cluster/src/cluster.rs @@ -1,11 +1,11 @@ use { - solana_client::thin_client::ThinClient, + solana_client::{thin_client::ThinClient, tpu_client::QuicTpuClient}, solana_core::validator::{Validator, ValidatorConfig}, solana_gossip::{cluster_info::Node, contact_info::ContactInfo}, solana_ledger::shred::Shred, - solana_sdk::{pubkey::Pubkey, signature::Keypair}, + solana_sdk::{commitment_config::CommitmentConfig, pubkey::Pubkey, signature::Keypair}, solana_streamer::socket::SocketAddrSpace, - std::{path::PathBuf, sync::Arc}, + std::{io::Result, path::PathBuf, sync::Arc}, }; pub struct ValidatorInfo { @@ -38,6 +38,11 @@ impl ClusterValidatorInfo { pub trait Cluster { fn get_node_pubkeys(&self) -> Vec; fn get_validator_client(&self, pubkey: &Pubkey) -> Option; + fn build_tpu_quic_client(&self) -> Result; + fn build_tpu_quic_client_with_commitment( + &self, + commitment_config: CommitmentConfig, + ) -> Result; fn get_contact_info(&self, pubkey: &Pubkey) -> Option<&ContactInfo>; fn exit_node(&mut self, pubkey: &Pubkey) -> ClusterValidatorInfo; fn restart_node( diff --git a/local-cluster/src/local_cluster.rs b/local-cluster/src/local_cluster.rs index 9d1b483d85fdd3..400f4f73f78c26 100644 --- a/local-cluster/src/local_cluster.rs +++ b/local-cluster/src/local_cluster.rs @@ -7,7 +7,12 @@ use { itertools::izip, log::*, solana_accounts_db::utils::create_accounts_run_and_snapshot_dirs, - solana_client::{connection_cache::ConnectionCache, thin_client::ThinClient}, + solana_client::{ + connection_cache::ConnectionCache, + rpc_client::RpcClient, + thin_client::ThinClient, + tpu_client::{QuicTpuClient, TpuClient, TpuClientConfig}, + }, solana_core::{ consensus::tower_storage::FileTowerStorage, validator::{Validator, ValidatorConfig, ValidatorStartProgress}, @@ -802,6 +807,34 @@ impl LocalCluster { ..SnapshotConfig::new_load_only() } } + + fn build_tpu_client(&self, rpc_client_builder: F) -> Result + where + F: FnOnce(String) -> Arc, + { + let rpc_pubsub_url = format!("ws://{}/", self.entry_point_info.rpc_pubsub().unwrap()); + let rpc_url = format!("http://{}", self.entry_point_info.rpc().unwrap()); + + let cache = match &*self.connection_cache { + ConnectionCache::Quic(cache) => cache, + ConnectionCache::Udp(_) => { + return Err(Error::new( + ErrorKind::Other, + "Expected a Quic ConnectionCache. Got UDP", + )) + } + }; + + let tpu_client = TpuClient::new_with_connection_cache( + rpc_client_builder(rpc_url), + rpc_pubsub_url.as_str(), + TpuClientConfig::default(), + cache.clone(), + ) + .map_err(|err| Error::new(ErrorKind::Other, format!("TpuSenderError: {}", err)))?; + + Ok(tpu_client) + } } impl Cluster for LocalCluster { @@ -820,6 +853,19 @@ impl Cluster for LocalCluster { }) } + fn build_tpu_quic_client(&self) -> Result { + self.build_tpu_client(|rpc_url| Arc::new(RpcClient::new(rpc_url))) + } + + fn build_tpu_quic_client_with_commitment( + &self, + commitment_config: CommitmentConfig, + ) -> Result { + self.build_tpu_client(|rpc_url| { + Arc::new(RpcClient::new_with_commitment(rpc_url, commitment_config)) + }) + } + fn exit_node(&mut self, pubkey: &Pubkey) -> ClusterValidatorInfo { let mut node = self.validators.remove(pubkey).unwrap(); From 01e48239beee55ad793904bf612ebeb066ab014b Mon Sep 17 00:00:00 2001 From: Brennan Date: Mon, 18 Mar 2024 19:37:32 -0700 Subject: [PATCH 11/28] fix polarity for concurrent replay (#297) * fix polarity for concurrent replay --- core/src/replay_stage.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/core/src/replay_stage.rs b/core/src/replay_stage.rs index 90be2dade6a191..8a29d037dedf3c 100644 --- a/core/src/replay_stage.rs +++ b/core/src/replay_stage.rs @@ -655,14 +655,14 @@ impl ReplayStage { }; // Thread pool to (maybe) replay multiple threads in parallel let replay_mode = if replay_slots_concurrently { - ForkReplayMode::Serial - } else { let pool = rayon::ThreadPoolBuilder::new() .num_threads(MAX_CONCURRENT_FORKS_TO_REPLAY) .thread_name(|i| format!("solReplayFork{i:02}")) .build() .expect("new rayon threadpool"); ForkReplayMode::Parallel(pool) + } else { + ForkReplayMode::Serial }; // Thread pool to replay multiple transactions within one block in parallel let replay_tx_thread_pool = rayon::ThreadPoolBuilder::new() From e39bd8d11d8f81330209d8cb530196298bcc6f70 Mon Sep 17 00:00:00 2001 From: Jon C Date: Tue, 19 Mar 2024 07:20:27 +0100 Subject: [PATCH 12/28] install: Fix check for windows build (#295) --- install/src/command.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/install/src/command.rs b/install/src/command.rs index e96b94b97d9377..827ba24ad0bf6d 100644 --- a/install/src/command.rs +++ b/install/src/command.rs @@ -1173,7 +1173,7 @@ pub fn init_or_update(config_file: &str, is_init: bool, check_only: bool) -> Res ) .map_err(|err| match err.raw_os_error() { #[cfg(windows)] - Some(os_err) if os_err == winapi::shared::winerror::ERROR_PRIVILEGE_NOT_HELD => { + Some(os_err) if os_err == winapi::shared::winerror::ERROR_PRIVILEGE_NOT_HELD as i32 => { "You need to run this command with administrator privileges.".to_string() } _ => format!( From 170df8328d1d6c2ca1f17ab6633f9a68f7ef2d62 Mon Sep 17 00:00:00 2001 From: Lucas Steuernagel <38472950+LucasSte@users.noreply.github.com> Date: Tue, 19 Mar 2024 13:44:34 -0300 Subject: [PATCH 13/28] SVM integration test (#307) --- svm/src/transaction_processor.rs | 8 +- svm/tests/hello_solana_program.so | Bin 0 -> 35408 bytes svm/tests/integration_test.rs | 272 ++++++++++++++++++++++++++++++ svm/tests/mock_bank.rs | 3 +- svm/tests/test_program.so | Bin 170136 -> 0 bytes 5 files changed, 278 insertions(+), 5 deletions(-) create mode 100755 svm/tests/hello_solana_program.so create mode 100644 svm/tests/integration_test.rs delete mode 100755 svm/tests/test_program.so diff --git a/svm/src/transaction_processor.rs b/svm/src/transaction_processor.rs index c42566fc9876f9..d1d68365d01fc2 100644 --- a/svm/src/transaction_processor.rs +++ b/svm/src/transaction_processor.rs @@ -1215,7 +1215,7 @@ mod tests { fn test_load_program_from_bytes() { let mut dir = env::current_dir().unwrap(); dir.push("tests"); - dir.push("test_program.so"); + dir.push("hello_solana_program.so"); let mut file = File::open(dir.clone()).expect("file not found"); let metadata = fs::metadata(dir).expect("Unable to read metadata"); let mut buffer = vec![0; metadata.len() as usize]; @@ -1321,7 +1321,7 @@ mod tests { let mut dir = env::current_dir().unwrap(); dir.push("tests"); - dir.push("test_program.so"); + dir.push("hello_solana_program.so"); let mut file = File::open(dir.clone()).expect("file not found"); let metadata = fs::metadata(dir).expect("Unable to read metadata"); let mut buffer = vec![0; metadata.len() as usize]; @@ -1394,7 +1394,7 @@ mod tests { let mut dir = env::current_dir().unwrap(); dir.push("tests"); - dir.push("test_program.so"); + dir.push("hello_solana_program.so"); let mut file = File::open(dir.clone()).expect("file not found"); let metadata = fs::metadata(dir).expect("Unable to read metadata"); let mut buffer = vec![0; metadata.len() as usize]; @@ -1479,7 +1479,7 @@ mod tests { let mut dir = env::current_dir().unwrap(); dir.push("tests"); - dir.push("test_program.so"); + dir.push("hello_solana_program.so"); let mut file = File::open(dir.clone()).expect("file not found"); let metadata = fs::metadata(dir).expect("Unable to read metadata"); let mut buffer = vec![0; metadata.len() as usize]; diff --git a/svm/tests/hello_solana_program.so b/svm/tests/hello_solana_program.so new file mode 100755 index 0000000000000000000000000000000000000000..a9da4ff47e5d84903c9ade357361930953992ec3 GIT binary patch literal 35408 zcmd^oeQ;gJb>9NyBS<ole=Nf3SbVMmtH$y0yc& zc0zwd==q(G`|ev15@p8|XSzeaeP{RV*|TTQIeT{RzKhR1^zn~WR#Z4k%iP~MKzp^Q zTPxO_FbwCqU8h@4pR3$T#!XRGp!j~icSPD*?$!tl^cD1fa#Y|&`ARLx^`Bo>sxPh+#Nv$P zRW#re3kwUtdzF|8zjAs3HQ~9FlFyvrJgPas`9{JwnE>x1wZYKPp2#d*ga9c;HRoge zo+DBw45Is7V3WH>X*_~Gmn=Qh6d3TTycRaK6sFy%^cBg$zqiR?ml1rellkv~iF5A#wG zj@=OYn+2|-IYPUrS>Wt4Dxef@mKiVA3mob^f^TkDr|l}gD|FXsySQH9l>0Wzo?a$GFSbfq@d3$e*Gq2WyGUXHX66CG2aII;kWDF_CMkbG#N+nW z3A~YD;m|qsn^^}&oI|-Klt-q&q;E*3_m&s8O1bGd^wkpDn_X;#(M?GW%G2s=inmKy zF(-NLPRZkO(!7IYCMSe15{z^a^eAOvP?U_$3m{HuU%TYsr)-?LRb1io8RY~&vq$MY zB(U*Sd|Y7Y<(79-e|-X*9HJW0Uz2}Sb3x{v$n}-kqu5``9>w0wev|e!2mMq#1;2r? z#vMmBH@B~-X0e@45}l;oog^6J_zuY`oS78;ofSV}c4y-P+)Q`|+k6&PA7LkuZ`39B zRXihdLHP~v-_>v(CD8w=qf&>@sH-t9U!gWCu>axql@Op_} zK(N_widIG>(EFoba^$rLLE(Q%jHg(urT$w4`~BrrzS(7>=*e+>JHao;fu7LMuM*!W zmJ0brk`MhOdFZ^HMekHj$e5QTy}!5#y^S=iAR&Lye>owS;$1?|){~Mxi{4*Uql;d; z#QGJzv{)}M5Fc32kT8x;%F)kmVmv%2bi%JnyXe??rc?bTMt4QknPmG8ZU0|L-h~$c z(fD6K&FzY>FzDdF(NCBY*w#C&BVf7MFXb3d7k#}b(^!%#!H{cq`6?Gs9?mFV(*#f| zJ}-IrhV)yzM_}`3kU!Q<^Vcc&0Y=#IEU04s&T(FB}egFQd8IA<4t9sQh$11=YDK8U%XIJo=xxzm)qc&Ue&* zTKd@_?W>hu74ZZ7g02tg)cagOaPft5yyA9+b=>(c3!GkMK8BXlgDfFJ+oqf&-X=r1$?e(2hfQaS8zrDuO92?unE>_YWk z!7b^{$G9LobBg&(uiV72*++O*<-A4O<#$THwc-;FB8l}at@V#i@t4AF0>f@Fu1yX2 zgr5F@`+=Rngfr8sm(yJBCa4^k#b*T$&uIS|Z&@6b*ElD?RqzVB&KB+$*zh1f(1TyY z_$_`&xxbP5I4ARwTtBZ6UN|fL7M~M(kl#Nh+X!c_aJy`UC}=n%>vA+r2$Z6{I52A; z)k*s=Nak`g|DD_S>pGm9`7?$81LygfdCt=n-(P@y!r7|~7iz9^9&NiO`AdQq9g}*~ zqo_vs%gtP7y#5-||J;nw@2~kjmnZcL<9#drnoH;t^lLr=LoN>{^sA`*qa9)>=E8!tzZLw?C-9T&`qfco zf-=yzePb`Fw$U%bQBx{5Xnwoo>6NSbOSn_)AYCCFf$)ABAf;@j)TgW7%doA-X77+g zj`&JD8DPo@;pnKehu=VoZWsC~=8o)Sr<9w$7RLn!y)mXI4nE~*k*AcdqDiH6v;H4t zeCa5~yMF3V`;U%Gzu*HHwy(7PMR5vL6Zk7i_*|}Xish)V`AMUju9W_5yuI3AwxWUQ zhdH5J)b(tLhKClJdTCdzXEe8!93QE<1Am;8{Qv{@^IKp0+Sh4zHEB-C3>t}RS$={1V1^n?u zx$O^&Tez94)^!Kt0sYMV(u>KXSIe^%o4LV~^YpN^D?TiAF;5r?<_URmOv=v@FW_&2 z@()wd(()(AOeyT=tW56cDVBTJt;yZyAM$t>^7%Q)|2+vgC*^-fqP!?#bm2N;gp%ze z!Ph59P_Z2)_LUnk-dosLb{}Pu{E^&O)-AEGJY9yrcwdPbuzjTMFK@P=tWMB}f2Csq zj^pk~lw*9qL%Lw&3vbu)tGy@f#_Z(%B#-n8T13Fdu^Y1f&M?UF5Ac%vdFf@kAH zoK)sFbEK@4c)q9(If-?lI3(@rX$~9dtBvm>edlrR7C+{%L-4<-2c4UYUDPglc6lY^nI2hQ^!vG6&9h98%hEY9_((a~m-}?{buzDZu9&ZrhITHQuWR5AqZ*m#ur5$o<~gj>dDZ<* zIHT*$V~plZpPyX!+cFO`QclaCnk2*FT)E@&oiFceaih7!_hX;qoVHrtJTls zb28wVU$sx;H?QZxcHRm5zzC9dx0dWK>)9LXfd~F}Q5&0=*uK`td;q3@E?Ogc5J|DK z^I(i~7wT7^mvO97|8=m3N}cQfA}IcG2d0Nvi(^d>yQIFj zpS$66HSis66F*|-Y0(CZipp)hn`Y2e@01*J+{^WQcSXdBeJRS0a8f|4}?AeIuU0^MhpkaUQU?0Y4uky91p*U3<|o zsyQKca%_ro=&h8G%J>Mrk%(WSnw}UBs8P*cX|K;klk+--d9CT=`XpaH;43HnfUjSt zqn;xDM+#?8F@C%c*Yh9Kci^9ao%S($92cP+GlZPTv!8=WBw>_WoZ*)*1w&pp5^{%q z->YbZX8dLe!-67cme>WUM+vyeJ%ne zO3CvG*ugL2J{Kp^KM%P#5nZ$2q})H8D38z6RR7bJ*cB2#@j2T21dh+qa4IM3IOp`i z^He3zRm`u#AD|rl|Mibc{x;3KPS~LO_a)BPEKaa4C*#<{yjAm_@1INOFwv>ZRQ5G8~h2 zK3%E%EtR+JCyXxk9l#7brG51-$x%+MaNGqyV9y=1l|SNfgkz!)=}Ofv2{YMWV_pz1 zqP@*W=Cddj7`O|wE&=aKL=QnV^PsiD-dYP@1#eRsIF@3_~T*$ig%5~gsaAjq@ zZhnLKj^#7W)iFOP|1$JL%d>7ZBiOtr&zb*If)4iQNMT;bul%Rn|6#e9UYcHD{7AO{ zH2tuBcTyh^Kl7Uu-5a9$wZ$fj(V~XI+I+35rr+=HAyE05N&a6W# zS`SojZ$<|iP}+BIF2nDo{yt144^uuxd7nO}mTswv(@ksj=amQ$V+#dva^C~`6`tRXp<@EF}y*GXPberz3$?h<+_mth1O z|KjloKgRFF#Md0PC>&Ec5)9Q#IXRG7nvR^8SUu#zvmtXBP6;j%fSHGSU*z7TB4@K;+;`V}P`?h49PHu7`*=~}is(MoJqvO(l99On$ ze+`<8J91&DeCl)C+Lxt%N3MMRv3nqP&ja@HHSig?mv+_KJ|0)UlmmYN{2dfH`jYeu zzk+0Z0`FxSmtSt@pDFT!NqlPu|MJ+k|NS4J?#9r;hNw>T7Uh3F_7hB%d*kIlTN~Bk zTdBXIm@A!I1zaB1i-5vVDoo!&r&7=6fC#@1JNq2{Uq%0mq*qAb8(*^(y#QS}UG5K} zZ6}$3(3_iJ6#j^|$$k!g0+tqSn`FFbo9zFR^M0D-6K#|IpxqyYona-3w#~)#PY);Q z%YNO?DU$6l6L$Upz5)cqnxbvzg%7*;dPXX+j*deAOpn#qV*Y-EXe^dT8#fH~oJPa6O4Twd@_G>(FfNBK5-rq5r z&y)Q19^<&4)MJeYQGQ-&qrXVw&7|C}lH4HgH|g91Ie`mB^;3)VZU$839t-QgK6gbw zmy>#~_x3CvN%n{HmN;HLr}W4(TtL5P6(;e_1J?Bm@?E1o6&R2Q^bv6&a`1g8eKCAH z*F(9DkH{?)KZ(|`GdVx8`+w1HwXd$zVz-dzO~#WPhd=%@d(}8SiGPy%|7phE_%`ES zw#2x9pZbgEv%|)HjmKyAkL?^K+9vxDJAaF|U17X%EeR0vQhmBeauHbK!;5gbl*}Hm zUIQgOJJ0Ry{#Cr6y-obE-Cu&9!xDTy70%w^_NM3AisuQ7)_dv?6&D#I|GsLPzh#c@ z~}N3G}`@?wBb zT2Ff?OtDIkL+vWd(TaRUq!;Y&Uzu^&D?eeS73bb4Ap%Ml6;9_O6VPY zv~>Y{V6Gr@wNVu7o{BjV2TnTGF9^S5z^7@jXT&hkoLH zoBCh72WR%ODX-@@x_`6hjp+*6pDtym@W0<36<+WO|4S;!@c?q)ezO&_Z?|!UXT@LG zyxKZ%^KE`*)2<28548{5$K-F5coTRSXL|YA0ywsIDT;y!jN{k%dBR3rUpJtZT<=Qhs)o5r8#%o!c{HOzR5qf+Ykc`-c0 zd#CbDSN!kt@il0Fw@Z$A6>`bzKJg+s3c%S43F4xzQ`|nkU-T|KDD84DxAGS|uj$o! zjdq`8{OE}joM)FwhjvZ`{G2ExjF0e)jNk6pmgw#hx-HuO zE2aK1Ucr2X8#!>u!%P&^5k{e%5s`U7N?ku_Dh`_)5U#;j21t zXBp&mqloop_XU~XE{%J(N!(-4@xxbFO8eKuUcxWPICCc(xjg^nPR}fYo!N=6&lZ2bo&A`;F0Icp4}WL+Eb8V8dd`LG%+QbMi#+^AzcVa~ zIO5+sVxEA%c-?wjs5A{#J_f*Dy{2!yv~>(lrYnUtujwH({E_L>5b{7@fokH ze0oXx`<&3Rb^{Z6-Xi(v{?qOOzLRyK&C_$p&-k_eVtv>3{TF>*_^I{X zCc_wcfy1pYk!OR zjn{?E+?@S$a-GHch8vVIUXS2`eejc6CVXVtWI(nrh$;fACB1Bt`lyaU7djj&@5t68}!g&M$NxqhnVj?l?BbdDw|o)UN9S!&&|PS=c1wPggv{_2I14 zhx#04i1@_1YWwYKL3iQZjPBqMA<-1?)@i=|xZxGy+n%SH9cP^!{)T!VHOvXUo4cjQ`7uU_$4Q-n zKN~0HeVOEvUe(5UcK?wjp7 zjh#2y^N4U->>_;b8q0ra{=UKWCI0@H;djp8AC%GY`AfNf#r3g%>2v!`9#VpwaW-;Z7wf0eDI z384f(m!aJDIk^`kezkbWewQTvaP1;II~V9BIv9DjLe3?_N9FwhW>4|`3O#QCKICHi z>y*>;ooOnhl(lo67aJH3j|ZIFIh4gK;Th2f`+YV1>>Gqxig)U{UCK%O@VMw>=8NDK z^CWhF=dq|sS6TUoxxQB8!TgJ`R+7_d^}oAi|2McQ%i~V38sd68Z?tpjlpEl3$S*+q zF0rpng`V%J-rN01yHAdB%+pk-+@0J${3+z{Jhd#zK@LR2*WevX=Xv(~*zk<#h3T2a zeUJ+zQXJ)G@%oJMz?@BQ!mmpElHSNZC3(IE3br4_IUtgq1KatN?U$+>xjKHI0`|4! z9v4h+t4~UOcvj?DB>#zI`_Pp8J+{xhOs^XvWy1`9LvxFV2A6+tp-6NABcUi~cDkdMu11aS$GM?GR1hEf&vT;q&xER**lHAqbPnXLtJQ09wtk=*tmdmPg zeKNU}p5qF=l3dO*UnZ9lo{nGrN_bZ6JbXj!9(IW%RGaUt`==@oZKrZ9(Yq@3o_%HA zYg}$}GQYM-&)3Z^-fkQgPo(wt_<$h~^UKC(=Jw-d{iWRV4BK->`yFNRZWgbzb)%^M z3F~J-RTG+v{jPQ4YbYhVo94{fJ-pbD5I&NvPxkv!Tfgi*Lbh(j{>kX+d4=5@F}f+b zg$=sKXNZZRWN}PU?f#KHTukTgE&L_gCi4_^i9V!L4{~{UPRi|hoBbZ4TK6j!2ibXe zdbvK25w}Y3P0{wD^tWmIcK%ZQlE5}kc^#;|uVaX+*)M56NO1$tU!onEv@?7>|CuK^ zlah{;^x?u#f1T^mirOXUkn4->uJ#qKcGW81%z&(;n>y~~ioEXS?7cg_{~V;>S%FWi z>+{qi(<=1B(`vV05qYF5*K$QBkagGm>?XY*8UOx#I$*r;d9io@94_UCxxP5SnXBF+ zxzV%dUp}9Z#~krF{E#T3>5<)su;+7;o&(3_?Lyz4UuA8*nhvKjCpJJK{EEs+^(K=8HqDpDN46e9t-nQWY+W?FNv|M7zX^Uy?K*ueNjjf3p@p2baXzrKFgTVLfqsJ}j%9PQl7=BZ@w z8Ri4=A~f5cD;r<_I%4Zg^t$M^tv9(B8<|e{g6i3cfZ@{mD)ST;q&)U(YA@gic9x85 zAg62vRt50&*X)63g2oTF&OndAx6!xz zqNZQHS|9u6+X5;f{Ix2#w2p5l7jwKprWVJC_PdyHo#L+z7?I;zItMYji`E|@OMYD2 zSzIzL?M)vLe_<6n0ke?NPvXx@bCg*I$?I081|1kS0)vx1vTX%AG z;!t~OoS+o@pSnQs?o|0C&fo4+pA7`~MX?iEC&Ib_#Wy{-yVq+6(VConJy&K_Uq}DGO@Pa7Pg5zZJ&`mZ@q^3)Ah^z1?J-d=0oq{ z*nT=&0qrEat>cXS9eT>@KFsVbv+e9XC}GZNq_A5RPuM07ob;Xh0iK7zInj9E<+9%CZ!IaW@m^qU~k~Rf&81{ zC)e?B{wQ&;?H6!dSKek^?_OeDf0P)P?Q?Bjah&p3BtJXJt_j%0j?cB}o4`E*e7xoR!?Kw5# z9Pl|R`3;hrUikeEuhH|MM+L7|+rbZD+}VmW zSM2zR#(i~ySFL!V(zo9aJ)-)$Z(8u|p45)^cXPoa{?-cKV!ot5izhPuvd*U5XSo5& zN#$K;K+fasy$B`zZ*n}8a-U&*zF#peec{uek2LvHGC7Jn@8CGgpO0+CDBGFEp~WGg zW%H9M{5$RM8LqH+r;6lA|D3hUtOtbodWv&<50^a$$*bIad8DfX4lL4DA8_BNmgpCe zbT8%T*ynd^)f2)m)$=}NLp}GGa#&eQ{P=v9_{EPLx!Nm{NwyU_-^6)l$|3BP`@qRIUeYL}D~{H@LBjwa#XekWV4_5eD-%d0*3^NIG5z+lBzw)3Pu z@qSvCGqvrq+IO1WVU~d?Ox>(%V!DuFUhAu`TsTMKRhFTpzxeDEO_ssK|BDt z+WXz?JS%E~w;(yL=gisrg@BI~X?eW<>pa5F=pW0C?>DOtsnzu&v*jgjZ|g!uATB|ar=ceO_ujN>GG4p#lDl$-y_u3W|A zwsUEF-$uGp{I{Ls!1{R{%73*?bIgbB5Ar(BMduncD2ObcpXD+?W)J3n(<|iuCFU6^ z<^C7XpWV+!d;>cE`T7K{v(RfhN5Ff;D=2{m@|621zPkLwoVg(q7q6q5WBmp4V3nS) z6O3f%>tTby_MX@YY6p~a#3!lq!UW}1DkGUh=R(_3#CO9(H(M?91*CLQgRt_P*F! zS$>`MAL)L|-m{DMmMpMmmV94_(4Qc=Kyv$9wLg{5MiQ31*+}yHv>f>GCiO?G(|sk8W2Wv;Snrc|KTl(h2UP zrFMuxQ!@Lp_`~chT^Vq_-Gi?_&LH2v#kXD(FGvh&j<_jle_KW{eyjKuM(1%%(SWOUKUG-q8Lq(Z z3n7Q3zl8!M|6u;WW;lrckr3X$@_l+_tO8AllM?3_cv2izO`bFLvH0S zklc{$z4^)gD)ZYU_0Wqc#*_0Z|9yvcE&_XgtRQXh2|dS@VZT{P=N+*esc?$Qfj_0b zlR(D1o%Yqhhg|+4$h(91{za7cf2H>$+={%f(SF&NxBlGuCi*S@^rzMD36d45gv;!K z)tB^qX}$g)`@@o6%lV1vuj%FAr(U1CMLGZLe;{%eeg4Ih^RZi$^XHRtCS|K9J;V9U z8v2q+2T;5C?=}?Gk9UxNL%qe(_2gd}?qPIS+b6l5gTnvf7gHUS4^WluYwAg@0oRTy z{yE7JcmFr?XVni2JVfo_dT~CpC*d!v4@mvNHnM5FXG&?b_63G z?BmQmQ?-@9AZ{kNyRbF&KNc3|e~U9MDC|Nhd!7eDl6lM{5KN%BX_=#Tvu2QJEi`hWbPRdqf~rUa?BUj)eK&qPRaVi{k*b% zbpGD9pDdGc&nSJnFNSu+-h!=XIKN~59uj=`mqz+p(0-O0hrI_F@grnatK*B;)i;Em z`7Nw_W#{e02NL}7ri7l`^C{c6+d2!pv&8ooq?t3k+*&C};{E1L@a+C#67Tf{Ua0$9 zJEyaIH^%?xs2i{x_rFE@_3sJV?*+i`x02)1{i;2;NBmAAafV+{a{M9jQ?2pG){2c1 z?@&1s^aJr+rVUymF!Ev_J}EY${*e50mJkaNrD#-g%rEF*-a$IENB&;Kr}dt8{2c!_ z!M8ZRTJ<5HmGZ(K$w3cvhS%%9s87mcIpqYl^)}_?{kP_KZe1Q5mXOE0xdY>q?U_pa zxcoll-_A3zz9N|)vva8k4u?`KC;HBES}wo0{&bVX1HPOJst+Z3$T;oqBJ@)H$8oMF z51T)e*RH`sheqxR9v?ZBJDjU~RC_~?UOL1C=_zQM!&BLjo? zjurZIhYk%61bvSU7LFVm@9H{ocw#I!+HiMYFmgD^9S-^)8XM~ij^+*>8NC0?u3vihd#cwgtX;SMw)Dd78#cbT=6!c;dVg&o2z7UE z-m>+s`i8r=ePH39%)R$DHZ`}jwzYS3Zs+WSR`{(-@#h7KHj`kCRwBcu7T!uXM+6O-XFk>B`OI689h@HokkXWcoLKvyt4 zI6N{I1|*`9{@nOM65~_3gCyg7f{`QR!N^mJp!kOl?jOsIh4(S~eIPxSo7i`Bu%G8< zG}nJ{JPbyT4vsx_Xk;Q6JT)>loEslMcz7rnAIlva4-O8G9vU1TJUlL4jSY?;89Pi9 zN>j&TUQGX~VdeSmgNJ#(g520p!SMP=#s--ULc4ExWPsQh%N-sX34t*O1K zqp7oLdvjxRQ*(23OLJ>;TXTDJM{{TM_Ljz$rk3WGmX_9*wwCsmj+V}r?X8WiO|8wX zEv>DsZLRIC9j%?M+uIu3n%bJ%TH0FM+S=ONI@&thwzoI7H?=pnx3ssmx3#yoceHo5 zZ|`XAXzFP0Xz6I}XzOV2=;-L|*xuRL+0@zG+0xnC+1AcyKocZi&>+&nKYAEt6$>dNpBP`P;Ka{Mfn2hv>`{;yHF_0=a= zt{97o<8)celYIRJ)dq^Om+G;WB3X{_II}}qs=h&E*QM&O%cIdH+n-SS2dIBJ?JV#A zfUb3?sJ=m1DdX>iuIam(rK0*12xQ({Nd#ThB0WxFOlC9bDC>Fe#Mp z)rqCoZgRr)sH9A!ROEKEa@f<&%3GHxcl}2S<0HfScr|sj#B0Ri3L}U19U2+h=W_c; z#>N?>prbH2zR$k*kB0l)DdSfeyX?VhbR4Endv)9fpZ4HBofxa1^x%^oJnz9bJovo& zBBOW1gZ0k^#Qx`iDuUrpdGJXOzO4FY`12lo-GlA#TN{3l`ci`@J@~u_U-4n`p-9&L zlKM`AuXyk^4{lUnZ1}r8c+P`QZz}I^r5dKS@A2R%A6DOK_?JDnAtyr&#K<-u1xxF=JNKXq?8?CvXvH+t}t2hV$OPh)xe zDG$Et!F_Gz_!rcNn>o?+Kgnk$BK?yLBhn6U-jT_jhC!`uLrN$ zKul7y`V$_!7Xf^tzR!bi)Rfns(v6eRyQJ}!!Ho%dK|b9c+~>guJb2WDCq4MQ2Ve5w z3w7oEUiRPy-FzFr-5%Vi@tW1|_29|7%IVK}@Kuf1to<&Hj|>hp{xNu??p+POzO$VE z_1<#$^4@ZIRM$UiKdW9$^4HMRnr%=hYvuqb=&gORw31Ke4!w}ivqkG+I96) zmS0dr8=v*RM$4gwa6Rg-;&Xx1p&*=BJhTU&)_$XwS5c0>t-YNaSl+F8mZ^9R_y7L@)@JYG literal 0 HcmV?d00001 diff --git a/svm/tests/integration_test.rs b/svm/tests/integration_test.rs new file mode 100644 index 00000000000000..700b9c2f6a0ad1 --- /dev/null +++ b/svm/tests/integration_test.rs @@ -0,0 +1,272 @@ +#![cfg(test)] + +use { + crate::mock_bank::MockBankCallback, + solana_bpf_loader_program::syscalls::{SyscallAbort, SyscallLog, SyscallMemcpy, SyscallMemset}, + solana_program_runtime::{ + compute_budget::ComputeBudget, + invoke_context::InvokeContext, + loaded_programs::{ + BlockRelation, ForkGraph, LoadedProgram, LoadedPrograms, ProgramRuntimeEnvironments, + }, + runtime_config::RuntimeConfig, + solana_rbpf::{ + program::{BuiltinFunction, BuiltinProgram, FunctionRegistry}, + vm::Config, + }, + timings::ExecuteTimings, + }, + solana_sdk::{ + account::{AccountSharedData, WritableAccount}, + bpf_loader, + clock::{Epoch, Slot}, + epoch_schedule::EpochSchedule, + fee::FeeStructure, + hash::Hash, + instruction::CompiledInstruction, + message::{Message, MessageHeader}, + native_loader, + pubkey::Pubkey, + signature::Signature, + transaction::{SanitizedTransaction, Transaction}, + }, + solana_svm::{ + account_loader::TransactionCheckResult, + transaction_error_metrics::TransactionErrorMetrics, + transaction_processor::{ExecutionRecordingConfig, TransactionBatchProcessor}, + }, + std::{ + cmp::Ordering, + env, + fs::{self, File}, + io::Read, + sync::{Arc, RwLock}, + }, +}; + +// This module contains the implementation of TransactionProcessingCallback +mod mock_bank; + +const BPF_LOADER_NAME: &str = "solana_bpf_loader_program"; +const DEPLOYMENT_SLOT: u64 = 0; +const EXECUTION_SLOT: u64 = 5; // The execution slot must be greater than the deployment slot +const DEPLOYMENT_EPOCH: u64 = 0; +const EXECUTION_EPOCH: u64 = 2; // The execution epoch must be greater than the deployment epoch + +struct MockForkGraph {} + +impl ForkGraph for MockForkGraph { + fn relationship(&self, a: Slot, b: Slot) -> BlockRelation { + match a.cmp(&b) { + Ordering::Less => BlockRelation::Ancestor, + Ordering::Equal => BlockRelation::Equal, + Ordering::Greater => BlockRelation::Descendant, + } + } + + fn slot_epoch(&self, _slot: Slot) -> Option { + Some(0) + } +} + +fn create_custom_environment<'a>() -> BuiltinProgram> { + let compute_budget = ComputeBudget::default(); + let vm_config = Config { + max_call_depth: compute_budget.max_call_depth, + stack_frame_size: compute_budget.stack_frame_size, + enable_address_translation: true, + enable_stack_frame_gaps: true, + instruction_meter_checkpoint_distance: 10000, + enable_instruction_meter: true, + enable_instruction_tracing: true, + enable_symbol_and_section_labels: true, + reject_broken_elfs: true, + noop_instruction_rate: 256, + sanitize_user_provided_values: true, + external_internal_function_hash_collision: false, + reject_callx_r10: false, + enable_sbpf_v1: true, + enable_sbpf_v2: false, + optimize_rodata: false, + new_elf_parser: false, + aligned_memory_mapping: true, + }; + + // These functions are system calls the compile contract calls during execution, so they + // need to be registered. + let mut function_registry = FunctionRegistry::>::default(); + function_registry + .register_function_hashed(*b"abort", SyscallAbort::vm) + .expect("Registration failed"); + function_registry + .register_function_hashed(*b"sol_log_", SyscallLog::vm) + .expect("Registration failed"); + function_registry + .register_function_hashed(*b"sol_memcpy_", SyscallMemcpy::vm) + .expect("Registration failed"); + function_registry + .register_function_hashed(*b"sol_memset_", SyscallMemset::vm) + .expect("Registration failed"); + + BuiltinProgram::new_loader(vm_config, function_registry) +} + +fn create_executable_environment( + mock_bank: &mut MockBankCallback, +) -> (LoadedPrograms, Vec) { + let mut programs_cache = LoadedPrograms::::new(0, 20); + + // We must register the bpf loader account as a loadable account, otherwise programs + // won't execute. + let account_data = native_loader::create_loadable_account_with_fields( + BPF_LOADER_NAME, + (5000, DEPLOYMENT_EPOCH), + ); + mock_bank + .account_shared_data + .insert(bpf_loader::id(), account_data); + + // The bpf loader needs an executable as well + programs_cache.assign_program( + bpf_loader::id(), + Arc::new(LoadedProgram::new_builtin( + DEPLOYMENT_SLOT, + BPF_LOADER_NAME.len(), + solana_bpf_loader_program::Entrypoint::vm, + )), + ); + + programs_cache.environments = ProgramRuntimeEnvironments { + program_runtime_v1: Arc::new(create_custom_environment()), + // We are not using program runtime v2 + program_runtime_v2: Arc::new(BuiltinProgram::new_loader( + Config::default(), + FunctionRegistry::default(), + )), + }; + + programs_cache.fork_graph = Some(Arc::new(RwLock::new(MockForkGraph {}))); + + // Inform SVM of the registered builins + let registered_built_ins = vec![bpf_loader::id()]; + (programs_cache, registered_built_ins) +} + +fn prepare_transactions( + mock_bank: &mut MockBankCallback, +) -> (Vec, Vec) { + let mut all_transactions = Vec::new(); + let mut transaction_checks = Vec::new(); + + // A transaction that works without any account + let key1 = Pubkey::new_unique(); + let fee_payer = Pubkey::new_unique(); + let message = Message { + account_keys: vec![fee_payer, key1], + header: MessageHeader { + num_required_signatures: 1, + num_readonly_signed_accounts: 0, + num_readonly_unsigned_accounts: 0, + }, + instructions: vec![CompiledInstruction { + program_id_index: 1, + accounts: vec![], + data: vec![], + }], + recent_blockhash: Hash::default(), + }; + + let transaction = Transaction { + signatures: vec![Signature::new_unique()], + message, + }; + let sanitized_transaction = + SanitizedTransaction::try_from_legacy_transaction(transaction).unwrap(); + all_transactions.push(sanitized_transaction); + transaction_checks.push((Ok(()), None, Some(20))); + + // Loading the program file + let mut dir = env::current_dir().unwrap(); + dir.push("tests"); + // File compiled from + // https://github.com/solana-developers/program-examples/blob/feb82f254a4633ce2107d06060f2d0558dc987f5/basics/hello-solana/native/program/src/lib.rs + dir.push("hello_solana_program.so"); + let mut file = File::open(dir.clone()).expect("file not found"); + let metadata = fs::metadata(dir).expect("Unable to read metadata"); + let mut buffer = vec![0; metadata.len() as usize]; + file.read_exact(&mut buffer).expect("Buffer overflow"); + + // The program account must have funds and hold the executable binary + let mut account_data = AccountSharedData::default(); + // The executable account owner must be one of the loaders. + account_data.set_owner(bpf_loader::id()); + account_data.set_data(buffer); + account_data.set_executable(true); + account_data.set_lamports(25); + mock_bank.account_shared_data.insert(key1, account_data); + + // The transaction fee payer must have enough funds + let mut account_data = AccountSharedData::default(); + account_data.set_lamports(80000); + mock_bank + .account_shared_data + .insert(fee_payer, account_data); + + // TODO: Include these examples as well: + // A simple funds transfer between accounts + // A transaction that fails + // A transaction whose verification has already failed + + (all_transactions, transaction_checks) +} + +#[test] +fn svm_integration() { + let mut mock_bank = MockBankCallback::default(); + let (transactions, mut check_results) = prepare_transactions(&mut mock_bank); + let (programs_cache, builtins) = create_executable_environment(&mut mock_bank); + let programs_cache = Arc::new(RwLock::new(programs_cache)); + let batch_processor = TransactionBatchProcessor::::new( + EXECUTION_SLOT, + EXECUTION_EPOCH, + EpochSchedule::default(), + FeeStructure::default(), + Arc::new(RuntimeConfig::default()), + programs_cache.clone(), + ); + + let mut error_counter = TransactionErrorMetrics::default(); + let recording_config = ExecutionRecordingConfig { + enable_log_recording: true, + enable_return_data_recording: false, + enable_cpi_recording: false, + }; + let mut timings = ExecuteTimings::default(); + + let result = batch_processor.load_and_execute_sanitized_transactions( + &mock_bank, + &transactions, + check_results.as_mut_slice(), + &mut error_counter, + recording_config, + &mut timings, + None, + builtins.iter(), + None, + false, + ); + + assert_eq!(result.execution_results.len(), 1); + assert!(result.execution_results[0] + .details() + .unwrap() + .status + .is_ok()); + let logs = result.execution_results[0] + .details() + .unwrap() + .log_messages + .as_ref() + .unwrap(); + assert!(logs.contains(&"Program log: Hello, Solana!".to_string())); +} diff --git a/svm/tests/mock_bank.rs b/svm/tests/mock_bank.rs index 3548b5fbac32da..0c123369e25451 100644 --- a/svm/tests/mock_bank.rs +++ b/svm/tests/mock_bank.rs @@ -35,7 +35,8 @@ impl TransactionProcessingCallback for MockBankCallback { } fn get_last_blockhash_and_lamports_per_signature(&self) -> (Hash, u64) { - todo!() + // Mock a hash and a value + (Hash::new_unique(), 2) } fn get_rent_collector(&self) -> &RentCollector { diff --git a/svm/tests/test_program.so b/svm/tests/test_program.so deleted file mode 100755 index 9336ced24df6b4eec7b37c9530c99003016c28d6..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 170136 zcmeFa51d_9buWHql1l>`XiP$gAVw2luKdwBxtWaNkCEI= z_;WbcOaKM_{rcpOAhotK6Clv)gV&~pzRvTemNsbH_ptU4eYQ2Vwm)8LYg((OR+!)S zyZ)TB@45Gep!jP0aYJU`z4zK{ueJ8tYp?z1oU>>BhBtS1bVMGeMBj~=TJtK+Sz`qc z+}CNpP!_F-rsD66=oEo1prm9d^6`@OTHtXsQ_(Q@B>da-W-ZUhM_#RP9*6a&qUgx5 z*2~A!7ic|?y&kD&drPcdK7K;0MIKMLbamcx`89E_W(0p{6K2_rZ zFQ0m{WDHD!jPT3r6{j@T%iuM?00}dE5RF~i6Gepx2rRv5F4GFf^2?CEPfDYxK2Lu* z9q}yX#M6X-YcYfWap3RmIsRtk_&YD)@5h1O$ArGDKkomj#{PH;Y3~o0du_J%%K=nl z&sC;c|0|k^`XTvKsHNz~K8d0^P~4xjF~XdKfTL!xAG?6F5WPYGr=c?IC*8*;nNfd8 zvZHALJ~lZy8RDz8z&8haedH=)&_Mj30RB85%>Ujiv;3FZADN#2@Z(}yPH%oE*AGX7 zesDfqK6Ak8(LfGK_fe5svUvJYN`U1%-7JS9uL$~oxA5D(>gJ!~#xRG(z|Ga-E|wO?D&c|{vKx4cYrhImNdRRNu$Jfj9Wn&mmO zp*)l4=*vWBh=+8p59q9)X8i-X(jq1%&-%VBpW`H1x#%c!DE~ttT`QNQ`P&-$Wloy^ zU<17JmvpN>9Ynu#q`JN^%@L4vm!y8(^dlUnJ|~hMj(ft-!FawhXrFxkJa}{cJ_!8- zXq5D!0D)=zkF?GMOj8AuK9*+n>Q`zyx%fe~A1gFnUugN=4@f@8#Rr4-iI1Xny{*?2 zp6Mq*aD2b=ALc)f{0S0)Q0cf;;`rk_j)?z(9R5XsNb&C#@F;%F=)6bM^%0A=X-xP- zuxT7`M?ZDX{X^ZKR=&oxoc8h=j?-~VgO5)m9opBy(?S2(Gg7Z;K2jmlhT^ejB|qtY zLgIM*h^7}mY4Ot%7dtz|UKgT78mdxQ2nWy}NQmeMkUpjv1}wjVdJKi={Ss5o==w|! z0Uu9*Zs8YkJm&N$9bL1eI`}vw^msa1tn^hnZx+9(c=B5XupZzQ`P{1YiT_<7!0E0A za_I(0d-~561TG(!TYRedCG!;jG$<`OPL^q!?VStVtE(7f9@G;4Ev;es&RluVRomXc z(ywm(lme{tDY=(GTCQ(o=}mOG}i)7cdSOLp>(h z9{VdE*YP}L`cZ%{&rluzBhiz1pXzt~km_~(@N=5~pw>$^%@O!y4G<$FOSC_SO4QOQ z8q(~{E-6`Jk62Fox+s_rNZ)Sw6;3DZz_k}zCBRevUu*Ra(7e_ux*$B(F;iT7K7 zjGKKj`x)<3c_k~={`-8H$Cu8JE$sXvwCCffYWQh)#V%RD9F*P($EW(u@tD%*evI2m z_p{i z>{Pt#n2xu3!~%H8lJIUHD^rkx5YN+bmzA4cwSJ*LD)ssVJgRhlQ}NomFe}%3h3N04 zoc->BKVuF)1dfAfl)u!^@O5lm&3sh9Ov`#t)7aO$<;#>$rWw3HT5YzYMQa*KA7ggwZAk^VoFx)}p2!pTFnDjL8S*6zpxob7~EqqqxXH@2d zgHT_u>AK~6KX4o_#d@{AMaxq;SpVel`g*mgpN+42(b~C0<6i5pw4X0gdYby#`L}g% z%D?yX0>#(NzX~!>XX1QX?EVM&J+}qFN%v0CYu7iQ4~X~Kz`iBjccpOTBr(4?>3&r5 z!|{HI6f}b_x>LMFXX@SGVEX7216X~U+p}TX=2y;{+Ez;dSi7fqWU9J zEOzoa%BQMn^7}{NFZqbt$NGMQgPO=bhr!i%X+G`P1Mkwdn8W&C!Md=%*Xp5&rEdSd zo#WO2a;`pA_@&hUyk8y<1hqNCJwj?bsJbnvHyWaXwbf8Z@){)#kz$-BdR-Iod5Edi77S}bG$u-agdI; z({=Wb&k!_GIDgO%5`H=Kx_FZMugNWD2geki?uFRPaZ7^^+Qn9Oa3$>s>?Wzq{LybW z%}?I$F9JTLGq4v+f^pVL56Dq_J;?Ukp4^C5zQBX!aX)opaFqp5)pmMXt%U&GSFkOI+VFuw8Dw zv7M(ubNr~{i666icL-S2^>K}TU&H+$)<4hc&&K{Y*F_8?XD}E1e?tA0`aP12=>N05 zc%SkSFV%ead$_L9{?l^GaV0xEo$2R zv1wRWJPkP<&}`s+jr>}*RT(HmpOLiF*|kmqYv*cQJPDH-g8R93lT&R(%eNaJUA8{) z^~!l@m;6jzsc`e5Buvk}SJNXZwrrdr8)Qxvgx8uMqu{#H$u+h59Hjry_PgjCxB<)ccFEFA?t3P~P;@zjvXw#fCUus39DmTLS-%>z2_VO?;2R5HKd5 zE?akHdi6GaruKz zQr)vaX)i=-U#a&fipo{f+p1KHZbCjG>nhHu>sHP8b-L42Tdw)FevL;iv-tc*c#dD< zV|S8&U&j`r?+U-ZFB8r)&j7A&?be4BALaM6;K}#HD;=uP@tCfck}3cYlA6w6$zHAJ z^F?*+ucW;pTNhQwzHaH?li2rNYqoz!Iy-^F?b-kmXwbGzpxom_iF z<6E>m`N%VpuG&7&P`B2vj%m9?-Txxxp}vv-i_fwFs>#6(cgu5uG&;JWswGKx;rTrjWl!NO&e=2|v z;g;m!P73IJsebCqxvsoW{82Kl@+e^K7_KXqAnp3sF82EoZjao53GLC7&u8oH6M_HL z06tlKO!d+Abfun$B^Rr|LOpvjkgxlf;eNnTj>m_D&Fnn?VNLt|&i)VaP(L0H`qTZd zWbsj>$HvRWN)PEc8t8F&-hJ#v=o5c4pik&+LEp0h9@2MrNZ$#_X-+Wiei%80cu2=+ z105z8+uuztw*Fgba=R*%*YrVJBn>Y`UPp5M*-l=e+)3wk4RpS^UafeUm0{ewurraEZFa;yPvg4>-m0UaXOG9#A|gPD|TRU z!}R%R5TVkk<;7_*J_zoA`+P~d=3x96BV8aBB6T>D8@0asyNRvy3PK&*6LgW!@Aci9 zCcd);pE`&I+`3~ZPSbtiR_#w0c-75(et zmL%psxjl8c(LW(RKZoJ@3xnII0c)?)p#qPqinnh5(oNM=Iw3CO4`a@^Ymf8;K-nw5RX;Vz(6-$+j2ttB{zq5HP|oNAK1TSWwu}PYy*7rehGWP_eawC z)#mZgzTOw$r+vk7u&l$zgM9MGCAr&)_;lmn#(Ow!e4P9l;9rh?t*^rWekuD}=fk8r zom}stdK8~!^v>6~GF8W^_b1m?Ea5%~^*gcoAz7;Mw1?7ql?T_gUfv zC`727#9GbvDY;hl{vgO8CCNv0o#Xw|wO6X8`W5d}yB0t6fcksO6>q%H{F~**-#%L} zEm!_{uE1Z;XV-0tH(6eiAIa=R5_@~;eIm0j&KLd32K!vi?AH2ld~8?z3t=~w;HUF< z5Wh)BviJ$1KR@4b-NSO~+2Zfh{wZ8VW($>{@UaU(26ti5uFEqyMfuOq2fn{W{boq6 zeN^ZuL^xbTsIQeMohRzbUgYwq+P#61S&Id*YWJ^3W}R*6rv=~0ER|<+X-V>vW!g{4 zJ=#CXR&s&%jgRWsze&BJS+9}&+SsJwIY-jG?<@B5V(WjuZ(;X&()|FSCFXcc`6@4E zE&2n$F8K0#Ra18ccIjCuPcD8&;$p`eBK|1P_WGX^JU)NsBUZ-)kuFUj|%I*b?CgPr5!A z(C6da^_F&?^tnCqeyaGr5VI$44-Np9@M$@IGx&a0ns4J^=#)d!FGDNUe$=Z9PrgZC zVtU1WVxgPJnSVx3erK?h`4Xk);az>2CcZ1A-0acEOH$DFfR&^AAR4?Jv7EbGJ)ak1 zT^_-{sJ?EVt?jkb)1j+WPbuFUpr6%;HJ!;(^)hdFU2p3qSKoJb`Hh^Gv%jb5cpJG4 z#3Lm4mLyK^d7Wnbj;Or+ydpVG=}(3PT=roo+QjT^A-YV*i;X+3Pwsj*AvBP$D}ciF zx^Ckso~7ln;g4rqyQdl*wqKfDto@#>d{Wx+d6Dz%FNNoseotk;;-w#S7wy5grJ=`< zGF{8)+K>5@c(&-2A_YnJo!ZVVqY|g%(C*O?el*}O>E4vWk?W-U3a#ILwZvh+kD~pg zdyD1=bZsJC)r_v+fMYoUJ!bIxs_8zR?K_A2bn7sFC$d)o(Z|d6ZQQh>jIQ{KX z!PmcBR#Wl|(N{|l&q4bjOK3&3ryyP*=qZ-89+^d;eURAn(ESJRZ_4Ybiw)S|(KV?) z#CJ>eG@bgdpoetOFM9?A6sNBcK;l~y^gG+9-|Fpof3>5lenVq>#CKngz7SvSKGn+? z!|Ux6--9`PG+|yI->r7R(}aIG2Y*TfypI3a)=}m3JVnQ|`>XCxxE~(dc#h9ie$(e7 z&>llO`Gj;mo};UXdR`vieN1#CqiaIa7ayDypI^(N0o11 z7rCDLxmlNu;~I%VXxBgCeiHTB^=tEu-w}H6)9P~1a8r)%BLUs{am;;+!dp$4Y#;jn z(}>B(EAZR*8EWHdNBp^l=KY52g@SnO=Y<~J16H|?-EHZ}(jT_;F}cGZkL|VeQAxL= zZ)qF)z6;v(^tIo&QXUJ@_8gsyj4r3Ml%sQwrJc^1l5R!k<~DSG?FXWBFh}Q3qr>S` zyBv?XJk=g&@>KgBk12mi_eEA;`AfRjSX%i@x>s0Q`AfQ&NZRFloy`x@?-1p6-8V}) z^_6yk`cDZ{Z+3nU^MKj?%1N0Y#&W{B9r-yF`FJ5-=8v4N@X~JQl&M3uU&(eZ$h9-X zc4&kVLVwTOAw2Pq2JNKsI78>>c+BKs{#86?@-Y7@9y58EpB0Z?A$%uioF(nXV=DLL z4E0muG3}S+jAd4?{g#};Q$E#K3hKHu&QH^%_qsDml0Jy}Lgb0eyj<6lU(6#6mFYSW z79xG#qh{l1#GhZ-r*O2VoOfui+MT~Y5uAhYJoK(Dq(}qh_ZyJ0_tVSS&ds@YKK&zV zXG^Y~PlfGNRo_N<5dooERlbK*J>dUN*Zwv?E*^_4eMsVXOzoKa?a3z|)pEP1lYHa> zNmGBJ`tYmnvif&wZ2nn1ruNV8HOJ$c?&)K#%+C39KHje1$Jjhe_Z6JJcwFh*JnNXW zBjXl$;&J6C-uJZRZ=R>;=e{q`b9-q|n{Dwt+fpfy$CUmG_74c^_Xt>@>4J<_DF88;VV0`B9FSvhCwLFxY&o70)@8huv@0j65H$FkUK|4ot z_j1H80RN*o{5YyOA^dl?pqu{0k>L4H(tlBI{MAkn|6M1De@PDiaDd^GE(g zdq^`3c>WaA!;$kP-|4CNdnOG30Aj^sdN{J~1mWq8>-@_&S^SL9AL_>yt>lgVdp4&Z zoq>FPeO>9;C-ar>N74>({whSDW^=F)4@i4eTQAp617d{j;J<`;qfVDmb%?pRhqcYh z5tWeMsBA_(^HhF*pV0f4e#HfV3)hqJze4}_B9r+jm-!U!5d+814J@xpam(@=%g3~w z`oncE_4~tZ{Je(&oFvAbUxy}U}hy(YXiyEQY`DYMfe%g@hZ~M!cTQN2+#Ik6VT!Pn3!Lfd^|^o`K!rx zVgVi7B}(N#()~Trm+S1g6yl?Y>gTW_pB(r8d}MOV5vdoJbAIx9EBVN?s)u`R-nH?} zex@E4qIU@)ex4fl8QfVV$***tF8Wsem?WdBt$TgnwP(Ewy4gOW!w?*w%%Z4i9vInv5emCwVH9r4~X?Lz%tO>^If zXYxSoo^$O+CkFn@-emUEDCf5%461^RcZ-!*C z=|do2pC_Kn_1^1&9#T6QQBTMKOK{HQ-92c)&OGutaPZs z=DK-BZlAA&a-I*Y$hC7m6yN#E+WA-I&;7pRw9^?pS491qXYE{_YvdC2bu93c5RkA4!mY0}T7DjhmM#kRi7$~C{z0R=$_ z{kTO)hxpe3f8F-iDHqxomQ$2$|BWd3{ZIEVhdWP}n|8yUT>?McIbYVL#m;k4OV0)5 z2mSyh)_YaK$1&~!O`z_(B{|$VBjQvy+*yin%?0qwIRTp(B8H!8^6Kok$#59{-uO(D{CbC}QmuG+exIOR1+L2q6s5#Z03A0a&NjTWK> zP5JD;r5tzyz!jX?(yHE&liUNuFNwJE4}`lo!>|F=i-QW7eL(3SI2E0gS;pS z<}+mT$$Uw>9d$ZGIqd^qem|)lz1jSDVssNf1N~BxM7rOJ_{8Z}MhaqG(7)q4Z+gF! zcWHUOkFB5`^b2!1pO+AOJJ-=@ArvB&gX9A+%U1+1hM)TiCBef{272~NgmukxGS9|) zRWbbiF7aN~&!pSdEg#nP$npAN7y2Q&Ul+iB?=(4`qYLf%`-x7cwPjk{O-=_h2;_@l zB;{8PUtSK!Ykw=+zfaMm`s-=;59xn&KKh-z3HJ6o$KJPjnbBwaRxE%0Re!aJGb`pLZp6)-(U87Xxv9x5%@dB4sDqEYlD31FDYZcGcZj%?e|Ei&yN7W+pS+D zU{SrKu|L0CoDxfh>q*`EscWgi`MNm1RL50(iP;BRKi5tj6~H0Wqx9aPiVb?rSp{?! z)Gn@CP?5Cvb5~W8(a0ql`*WbYuRv1B6Jz&>H_tTty8np!k5iUN#j4$d_Iq`OXrbiS zjGu|3B%^JpPECKxVqfR_du_I(!4k?3Tf7}`lD2rq<%*E?7_xP<&dV$&Vevz157Or+ z*9$o0aE_F(+6sgSzRyN|imHm6`Ru>tn1~qDZ!Dhlr^2tp*VT$!Y-X13#7$MLGIC8^-_k)P+bH1g9O z@KfHZWd-r(Ko8{29G^%0`RbH@G{bV9+lf4;=<|++=huo>X;m8@XeVBsm z!k}OLp76Z}2O#1dn#Wp*&5tTXYUg}ji5B+RrJ(vo`vMU~LwyRLRF%*BG%5PT z9M>zxgb&li7s4~0FVCkrFFzdGC(!XThWFc6pZ-Hw58Hof+V8p0&dteH@d&!`JVR$`7VFz zJN4ZCgmhkICc*$>{;YJ&lXTnXdKPMh@cAo_gCED|dN^bLDCEv5-N&ErLo>ZP$WPZF z=BE`q%|BYJcBI&Wmv10s&kb@F1UXuN9K>(_EKYkhvm5p=n9ugQFfQFsF3N5J$DsP3 z{yd-K^XCbfW_^F|FkVaS>h~}|ya)E`2K+2#J+A7oFBv?q$P&lz>qc)kmT-KjofPkS1L-(1$MPiJTs~9#B6< zfp4OQKM3X1v@O?*ted*Uly`h!*d}>y|Pw=PpM87(Y)1K1d`)W=f z=PlaZ3lNVY9_fBT`q#^MBcB$FDUcFnI885VyTaKl;yWPulj*BNh(B(@E3Zt z6#`^ze&FU=LU6k7pj>A`FETg*kNkOw5tFCS!^r~0SBT02==3)8qx3kv7l5uqs4;Sj z^1*o*DgZjwZ*GrA@2Gn@Hk(f4s}m-`+JPABnx1bpT5{W;+R ztY`@D_5Aq}e}0~L>Trk-m|f27lQQo1G@M7D4D82=%%fKXcFE__c&m+{YKde<`EzK} z^~wf&sdDBz7Nn+rkDvb%{t@so!9s+IQHfG{(Jo*5cVGM*bB}GmW>M0ofXx4!UEimQ zMi1Zp^;45R{>tPW_m!B$bG=6Ie4RH~?=t~k+#lk($Isy(^*WWe`FXU@&s~iI3+``( z^9Q;!y$=z-pY%!U??YOV`wV>U+5IBY@pRB1qytSw2O%!Tl*{K)$KgYHeFgw5XS?m} zzT1VmPT$dX;3@&{VT*_HTqEtM+w;thE8}j*eV%QGdpl}p{s!T&Ol*N~V-p?`J4e=H_@&51VEMbSFe*euL4io~r6a$S*znW;i{^ z3gOOb38#emdCo-OPmw~>O^EjEHFPZT5(u;&mwyo8=Wroi3t7L3&&5P5`O-f6i#KnY z;gsM3)5nxvzgNi02m^@u)6ZjF->8>2<6mZv?hyE}{2LH(R;~yOa+;6*nH!;?2yAy0 zakeks&+HSE3sk5O={oC#` z)_u_s4=uFAhxyd(G(Vt=`2%R2>-&ij{6W|a{iA)N9!mZVk{#`aeI)(?#FD?$@)42F z0sNEvn=K!}G=T$%nLh_cYDw@M9sTSu-F3Uxn*;cU{6&@zIvet>|GBT*kbjxMgU*J0 zq#5X6k)AF+W{3{p_s`((3=#Dwe)V{l^*?2?!(Rhu6-s^kThtT)WlN zx^AqU?`geX@(`3k<`3(C${+5|_hyBg| z@O;+mLYR-Z1A*=E+@AaGY?tdE&co$doAH-=f4Rgz(chowFBRcGj{fQZr#{njk@Krz zD7gjBkG}=m;{IE>-odugq`yyu>+l41M*MjLq~8S_c%9jsYp^&W%Ip078sln*6FNBv zw9{;VWC2GNV)LVLA4lrdb-Ih<1tlqu7pj4a?S=gHs$|Q1t^eq48U6HAX_x6Q)8F%X zoc4=Q6IcT=R62D0#&+*F+mE?R+Nb~fY~cSUi+4&s;m`!`N!~2!WbvI!@3^3qeX$22 zr}Q4B*(a{UY3Gy0hf@5D{!Z)guYdn4vF~;H_dr;W^H2JGJ<`X%8P
9FrUgzKH1 zpu^=<-=%ffuRFj%UM`i6a|OWf7Zf{4E!wr`<{b~^awI2jWVMxdr9=I?`lEs_;`tZl zoOPSpHkdtN(!IpL}}!x8i+Uv|P76qK)%d0zK9!eLsxv zS3CVLU0$4(z{iO3V{|({*X#SNU5-QQ^>y}M`~3Sf$;Wg%vt<=uCo{is{rs%(7s~Bf z$c^&e6+9o~_imDlpH}#Xln|~Tc~0o>h4B64CZH@YwzvJzn3(m;}H9A znYKfE{>tb=H0k%{viF`p*u;m7M|-ax`9Agy#l^q=UN_%=4cp~CbC*}RZhnI827Di3 zx~4eU--`lyQ?EHbxNc@l`oDr74j-_k_rY^&Y%S zaq_Q!Hz&D8^+fn;!25vI3-N6dd^#_Ld^|%w0zJ5k=|B%|&glVEU-W>n?<=$Ye+lH8 z-d{xicY^#--qa(v%W)qP2$ha6H_@%~%jD1~zb6IWJf4@y5DmHcMCc&-Va{r!X3hbMD< zvp+-oaCsZP&ky;2y|4|wm0tQ&49Q}p)9;D-eT!uAvqHb$xAW&E<5MAM*0c8u`?*a_TTK6zEb)g7G+wQ0KtXm=J-gOd1UFU1eam3*7 z&mF*T=?{BPBA;(#+VS~!01I;K3Hc=yB6Ml@MKzPB>uvF*Z>yXY4DIOmHXL8inf+4I zLD(0V@3d+shthM!;8%wKmr?f0#9otfUx$=NCTZF_^~uIQIgv@d4ADjg~} zmsfg!)b!EUEv~1elcDFLcPO7rHSOoNwb{DAmhv<8676TJSBN%BJ@@}77HIw0p5yg< zh@1~tCI7sAe5MXS|DF@=tiaiO#J$~#t%^6_uOl`-<9ka2;Qo%oQ(n|8f6gQRm{xGO zmn`QK?q9VRE*HNq=6YYV_m&>OrU3bMKHJSJm5vVxf3fLpb?YkmCi*{p4@V$>Vfqy<+J$hmHRs7Rg9fn*5lF;Qf(4|7`fYAH2VPUQOlH zg6?y2bh8Pk+x?kJhdN@eA0wZrOMk|)@yJKiUJjk2_SC;0!+G`Dz|VC1nq2j)w&&~al}}sy zha?Wy0qPH{pXd80@jjI|{qM)IF`mi6`e{talJy(w%X&B`M|KJ8?{BDYeA;^y(|w$A z!-q z7p4CI{2`9scJ{^X$pC-|SLslPF1|+FsorgNM%^?yM{2M$1=h;!jO%r3Usk$(nJsYS z`@DefBJRO4Fh8^>9GApgoUZtNoW^&T2p+{BTcmN`jwm|#WoqAiJXbp3pzuC^?~!CM zeqSdU-d;K$y}f&^y}*vF2>5XN(*9GjAa zW2UF~*?7EbiQujMyp6|CytMJyZoIwNIQ92a{a+s^=I@W#eO!N@A^DJqA^qN6I!;t9 z>HX8%km`BUIB9!+&!_vKf7d8}@7D=izj2>R#s~gT1yX;9nFzHZn}@x8>gkg2<1?Kn zOn-y+=vrw|5c)9j}t8v63;d<$I)A`Np)fzz*-==udd%R8Kgii%wyc_)~ zy~dyKdt}cuDI+H`p5BlfPps*5XLPZ54BlIgc zkFzL++oW)0k93x&C>`Zqk`IRiy@}y4-l1p?vBx`{f!SS}bFZgJ9K4P_V*d{S4u;qTm$+0a&Z3P_v7RFCda;#0Q&pw3D4S2H^(hZ zR5W7W?c#nDYxsM*eZMZ1&EZh1!ivRl+Ii&wA~>aBc2@+;GOB^@tSc@;aZ*YvoS z$9CU$JBXpF@b!0>f=BV$0v_>sP43%#ZLc56_=4|HS7Rw=0#-w@AIPocuZ+>AsvmK@Rp@da_v8 zXZ{}X0hE*Oit$m9w_L*R+4qKgKC5)ve%_gj1Y>-9N#c4{qA0yDzuwZjHKu(&grCj+ zn4WX`^XIXB53l?om8ZWKWdP%pn(Xs$(cVAEG=s|}+h*TnA^(1j z+4*Z^Pb}=pFda1(|yznB^ibD(_do@7NS0d z`vK(l0BSbN?nk$UgXQVxUPSN)jtNWgn ze7p}-;@WJjzYR)12S1;(n0&Ck=KJmcFj+_YI=M^OYTP$qA_X5_l+G-TRoz15h@*@)q2)nRhzFz z`cxiOI|p<*QeF&&=y9p%|K~zaaGa`yCwK_0;t+e#rd9ht;mI|K*t;-;mPtB69KdlJ652JJlaxK7*fo z`gkaxJ|gnV`n#t6{SJk9`%ypJ(j2@9X*e|^I2z&6C>;3 zydy`Sw?6=Kq#tcxvc65o!MI^=c26L&bC)##%?g)XtK-J~`1HB#y(IyNZ`FCj_hHuMh z+TLECC-6Ks<#;Q|>J0sQzhpPf`|cMcpU`!u+p+40>D{2+JFGwNEJ>Q>v=gacG@|)# z$93DQLe7^Si z+wW<)AK9~2J2rk1K54&%KMEb8?fi{0!g)L8Sxmvc$Y0nu#LP+O$=M|-iBGkDD4Lws zO1Ai)FbOHVzqiQ!xA;`8&uR0+_+Gi=_4|dXpP*#Qe4aF zMXe+NjwjWNTVA4G?ALbaPm5mY_CVy%`Lmz-z0eoc3#&)?Ul8~eM~OKeTBGr{i!>hI zB(cvAgum}6dIWv{5$Vx|+Q0SJTYQ3c#>Y##4`t&y)ibj{Zg;Zt5uL}v^ACCc-~4g+ zx7hDrL%+Ly$o!dh_F;)IKzqQD-m9Fa>C{eOP?FU4=T{4*g8Mo2p9o(Yvvtla!-q`4 z)8>A#-#3h_C8?0UA0jyax-Gb8@An{lytwR%ZdyzgJCtphwjFK9iCVn_T;>_-8)8NH;@q+N-48-}j!}@~FW1 zzQ2DTjdGIp3gQy#9kw{>en9cU?lbtlw!d#GvG1}Kr|z$F zUCw%rw;%9dMcUWv@wn1m?`Li7AEEP!D!EGcw`wKUR{w7`V2zfOPC?8E`I&o9e!JY9(QH&Z{^bZz|Wlca$5XB08>8Aum@Ryv-L^4gd_e^49O z@6OM){-Hh)!0{0e>ow!qFL=s#=snQlG+l4TV>*7yHSLd{GhT(-?ANzQ?BmSGm){Sk zUWoo$?Cozyx`djCEspo;c-t|LRJEe(jgnuq_q3<<>@7=v4=)-Z`1b;Qy!t*@dLP{O zxBNTb>3RE@+7X|Bd>*KD+$?zGF_pjT8TCZu3HsSG?d&LGAAi+-`u@2ge?HXCfqh*1 zJYVSq0@j0BA4Ob<^!{G$F?(Obetl2Fs*hhru=G7099C@Czt7Y^ZN5Br9qJYR)qH=X z_we^UBmM05)#;(1OS~EXSFl;orTUci^Ipl0UTQxNL+|ReQ)~ob05N}7I=&$EIR1EC z^{sCGIrgR@tBiUaC#%sHjDdD?s`banVwGElpH>BS-28CvpK=GB&*9I>J%^81 zQD@J&jY&1RSNl@r>GE^ARyxiRM85yfYCMhtqFF9F&X`ZGYvVb+Sg5jSuJ=l9Zr zxSyD3X&5i^UtVteoQekhtnbaPj_dn5eO~qVTTxFLhU~s&A^NKHo6|@AA4Pe2StDIM zw_wfy{xFUodr<0CI_5Oh=Rt3)`Wz`9qY`END|c%;9rtR!BhF)Dhb<;vQobF3N{HR>8OFYx zj86(1AL zSn6M%q2=j&D*DvH@^ipYehUFgIa6dlE`2?e?e}dGgjE|ac{?ieG8iTu19%~a>wC@4 zH@WXJ3JlC=$n;bNnc4S#DQ~v#t0dpw!_;iwXP%IKcfOt9W_zv#79w3Ixg7lcYA=WV z{674av|q=Bt+w_vPp|h+r9A3uBf6}2bU&xJ_s=ZV|PztVA1Rk?5+hWeT6w_ZoX zJrlK){+@+UFZDeMx}Qb&=S1JH@F3<%zxNnESN{a^!~6M1m^M4d_2JdQ`1R)i!sn+S z=Xb>oK0F8cM5qrxhJ4-mB7A+U4PSeH%=vosMfi%_@U<=A%b!Pgdpr$#_D|3YuHQHW z%1ikdc2B1*`92WSm4fE``FC;38zm!LUt;QxE=N5+=iRGjQQjlX`n02mke{EQ+z;XV z+C~uw%h=D^cu$NC`8|Zn$qS`jzsFRU8kh@n^~+NP4D-0c`?|bKvm<-GbTqM-En@wM z{D%1y?gBymzQcAfS3>%*#d$tismX(!ShYka(0qO0fB50~Q-yH;3gV?H512xu^3>3jDLD<3P;whUyhp+rpJ|2x%TM)j`X5Wa>xn$M2TLke`$dQ^$d6MvaKlbDK zIgz`Po%&x+cctSI!Q=h39rTotbJ${kZ~KnB6+yoLD%00W{X+DMibscEa%qMF20foCUv(VjQ-w~r^FTY~a( zoBm4iPS<;Gh3Gwk*Yzgf|E?FqxL5p9&TnXkVZhr1pX8u2eY4~fKZAe&3&URYZ`IN( zHGhFbQI{UOMzyM@o9wFLZ5p@MZn_>bdPqO@-07-Jf2$x)=`lFk6WXg)D=w7s6SgDm z=s`b_|CAo28AuP?@p-p$vL2-2{t&xO^65vq++81dZuTJaiIi}N`7`ae4(~VJH}U#S z{bu#pZ=X6YKJAcJa-uvlyW2-{vUzo{B-`8FA3$Exea{oQ@!}kk%2Q;7S z5vqQ9-G7mcZr@c2yGCi+L?`7Sq`J|65WaCc< zLj16{=kI+fc6LjJ`WmfwIU?c5_~>$fS&Jb*oiJ<0Z_~X?>(~6fx^#Yrzg?&P+qm?5 zQtqd7o?vi4y;As_N#;;r7z)u}OFwsQk!1S*r0l-F5_}V!C60H$r^fHV^1Z#R#n`_m zTqDB>P4_zqkNoYBV+pa(8^d^ylF*NG2KNi&MVhYPq47G2QTKE0nGynV{Qg!-Fj{iXDe zf3MR0toSk0a~&2D^_=}vUx7aey{5O$zd!%T_d!m|%&PI=g$jQY*{h>85=+gt)wSKK>#N zVh$np=aGCrFk4ru|LyhseU-j0%PI6mVo3h&RGa6HR+Lbi+t#9%ktxCVa^237r|u976v=QrtJh&_XR8M60DC|ztf z)UUe14bD>dn-Bp-6MmsrI5z#V_6aZgWqNg=qLKYr$|v1#e1{e!7q5_hNmee={y~4! zN|pQ7pQshJktREf>cp3~v%_v*6}`MSkbsp;DuHp-ujRCgAJ|06X((T+ zaLf;%7w7p7hids!g=Lx{x#}YA-|jUU-*SbBlYwgHOyZj`G|U=o{)OszU(OS z!+lxVr`2VWycgi<+&&-e8TVxw`#sU}Jmuf*XR>mqwlk)*(axwGl%HlfJS*@{e@M5= zp&h?ZHnp?!2jue#E}ksV-=m!)pNu*Fzlwd!R{PIi{FmT7!t6=cbk7zH7m^+o|FdC1lk+-H)pm6|U=Cjcc1U-iC-{;wg*W zpW`~_5b)*c+YY!A{vEb>2RjzQ{k{Rni#h53>^dpNeXA|nPxCdsYM#zxKL7JQmbjiR z-=|{z@^Ztg;A#)?+n*m!pKDyA02DK8tlGrc26DMXqR8h3p2Oc6^e^R9M1w9bzvtU^ zxz(GkaTn-f=-sVxUB#NtpFFn{IUz0q#P3V_eNxf|wkF9R;{jxl&I%nb1b?rizt^QW z)xNXA$qQi{V(A}?ef`S$w}kS;7Vo(2pGCgD{`B`Sx;-pTMaLkN&)54Op5MD(vZJ2! zNfpPlW)ur5*sir&?)NYz`Ze8T&%r-2w3>g&H|4~@do6^kOj#vq=VziM$*3QtU&T-7 zqgGV9s8_3z&vW5k#1r!rp7*iX;~Os1^gt?KtyhSaN`bdSI*ua8_v5`C@9ms# z^A+GXn#&4KIqjtjcZ2#OX;tA&|i5y$@=d%C)tAAdtWby_UKvM*Yxq_ z{l)W0_6+wAMiILo&3uNTmG2e2^qUDs`7rkP0)+MH;N|PH9#LiWbO1A-J)!O39*5l@ zN&WMSb-r?aPr7eb{>D%VA)CLBN`CYFbxiX8xy$DH>quJf$?b}he_PG_I)7#J%Hi&@O|oy=d{i9;}(H`LhG69vOf%FLGFR70)UP_3M(}lO){Yi9 z9)C`@yj$;0IzIn?Y@AxzvY zk?iBKV`>*^`hZ61^PD|%^!c-lE?r;c<>Y+y%u+e{y#(T8j^Fz$c9@*EYD1py@7Iz2 zb`Vq>((<;?0fSG5CjSHCEg}D~#eUy+$K^_w-}7}lG!1Yp$?oT}_t1Z~j`MlCr(X>- z<+%stoL4Bxn(6$8b*zQ{$0R`vb9Ea!m7}+te-5>NpHyp_7k6+%KybNd{iyUkgfc>G zUZDG=1C};)at_&QUZ9`zU@%T;e{cU*W{3Sep{~m(xu5uC4*s9;nBX_8V)a=ZZ)(xE~nSd-qbU$D6KcyW5-FWzGf)fbnvT{rLj*8$!QA$K{0iTT?;o zuYcqF@_YpGkUp@Itz*a(LgttJA-| z9nq0TI@9~&S-bkatgs&3w1#@GezH|PeP7m85!*M@bsF0{+VT8kd6xPa2gnpcA=3A@ z_B0UC;id-m z`2VK-&jUUGndN`!3CjPEaPQ6MPnW-a&Rf}y9u~}D*X4Dt z1ABa~MDjiw>aowqzVF90!vJEM$OyHQ&F_%<=Rmfb;kG{l^_w zXGf#pYRgBrCKp&ui3 z8ed7b`Y+r!DPqMpMy>Yj_d6?y(Hf1sZy--Br?jgOOoN^-p}XH z106F_?%!{rtR=l(>(7Os$uO}=;{inc5)Z|`jrs5o8RjEqdhW0!8~jJ%-~30yh&-p| z`X6bAYj$dU-AVK02iG%%zZ@|&f_wQ5dSi55V{~5kj((}fc^;1=P4-Xy+T`SsZx@&b zoA5t7q9MfoUT4=|_Pg+fm^4nT7c|j);AML5C7N!~W732226{}t*dEsf?eyT9isHM@ z@cR7)`Zd(k@@zf-P4y4?4ejXgi(a&y3#^^B8vA|?>w&z6{mO7WO@+{xpo)*Dc6|IO z^!b%7^x5BgEPfw4@+bQI6Md#e|3}d0U&Hy*eLDQ`9cKOE&wu#yr|!@2xj+o{?B3;k zgSYP-_4yS5No<=x8|c>fd?tCEXz7sQ+@ zik;I0f7hF}!1wX0L(~Gmp`OB_VFbVL>F0_(FD5;H4qja^V6v{jQ(Niz{eA#ZZr(q| zDa4x1Yn;cn*%<)y{hlD@%|N@$;P(OiyORB#uZsQzxP3p|^ZmPsGZtt)mwWkiN;i}H z0O~Tw-?PATex}Ro)c@*Qg9L*2=Y5(?{UiFXlq>zS=QufA;JEnI(QJL|{uJT+07Lj^ za_~PN!k?~){C>qvpbJ&fdxi_q)W1@m=D+oK%#nRH`gIH(XFUHXpUc@?(~Rp|_!sg8 zeGL8{o~|=A9p9vJvQy(}z(+iO4pScZF4Z>rS^GYSzkjo9w!*t!bZPfSwTkACP=E-; zM-q3Orsu6rPb$}{fQes8eJMoxJ_Emh?RM%WAQ5?3y9*&7k;iha-#^vlVS1CvgS*HW zxBUt?06CE%`dPdO$$fm%$H4fU_3|^-zk`~L{QH?*7ivawxyIBVkr$GzU+(yx%4@l< zpZ$4eKc^n)%jyw+$2#TLpNFG<5`ovFUQsT2xg7uu>vJ5j96uTF@%#0IW=ClUvv71z zc}NB5=l_EPT3X(w4*W)wC-E~Br>gw?J&l7lpA1haNV|k%`#buSU#3Y{PoFmI^JNM* zOFJloBQ)*0#>{7M{UHA=@wC(Z_-p}_aizcH@95xiO(k}&E%%yJ`E*4oAL|s(-$yn0 z7EAN61%da-iHi4UCoxyJKJq=}l8;#Ow+Xns|EB{NV5xVVh|B9UJ_+IPD-nBx^8Ts! zM1-dur~C+mXpi6f8U!8&-mlIA`tJtQtNEHcsLkNL6fQ>{|Ge*zh3U~oUS2`)c?I$o z2%cAE0dReqa()YP8yj?FK=^fZPmev%TM+F>d4rxE+tV}k+fsveF9@T9Hs3#P^!UDg z&(wbruwvn;##28p@d2yc$j{8EY-ewUNDhGaQ`Lp&} zj{h^hgM5m%6+Fj#t9+n}eE=Tv$={f@uzupG!Mue0HHeE-teJmY-<$dqBPBm*$@n^yf_c`MyTIDMjU3>Nk)dq07cw zyzhv#dyetJd$G$wPwyfjOYZATg64R?-lrSJI_m}SR*>FMyP}Z%dvs6yh_1H^vYU!} zhTqpysT|P`?Nr##3utE;-;pPNw!`4>ZzMkupU_JCHP|P`vK}M78(l8I%bNb3?SrWI z`>3~Lt3B_xRqL_8Snqlp7ldQIUMt^Y_?zfwM(>%lv#^#3427XY$tfzc%@2<|w$IL4T=)dj33>`{Dkba`AWJ_e|Kh z_4^#$KPSolK7Xbe4810zqpJrDt}te2&R37to8g>~UlP3ZOR{iu zbzRj_*3nnuAi(>^Ugd}-KYKJ-^kHWbf;rq-O+>Q(& z$sAmDlaZO;Q-mF2zUQ+T zk7@nNv~j`b^^A_Sj(4I@3Ed4Msc}QyIv)>!KGN;aEyeqd?~d<@JgZ|0$8p3kbd~no ziH|E{Xa9UGrH@N`$2-^dg-Ay)^4(s){|44VTFprG-dNMlkIR#3wy?QPe`fO0f#>6t z^~m5{+gC~PfluNy`?K2l_Ij0q?gu;nshu+UWN^-Rj}Y$fk)vOS6UajwZk6m*Zxl}} zeg8F;8_*FW?fm5AH&?rUxjwmGRyzJhTJU|O;m%*7xtMGmztL#?ppK8nxi~>6M2|>2 z^-)Q}AJg*Tj(siaWBeQa90z}f=cEeJFDad z9Nl~T`RM|l{v#po$5DTNANK$c2o~&b+@tg|zXJ!${@qx&3yuEvnsw~=KZVY_9nRzN z?~vStHcM>R^ugPixC@yG6DI$s5YsLV;2-_p^gP7)rMy1+z!dm{s+Z~WSwcVTiG4q5 z#&Ti>UW{*s3nYf$W_Z%)gv^g7-djHViuBwhk2kBQggRXp2=*hZdGDE$^Y(@=R(qV7 zo%HXI`FdtMfJ=b)b=Sr-1yQQcTz_$bOBSo$mHvDSThQR^bbrrUa_u4Q*Sodc{d4xu zH-RfTPvKa#^^awpvV&;$P-)^6Y=wR|pd z7UCJUe&GAFuybAV=MuwqX<)ov-xqTI@$&<=^I__n^AWWFhxlE${Xnl5kE>i*?|-t~ z+j8|Lf93kG zu)WXj_=?oyC$U@6`Q}``De1oDUrW93LpQuX()z(TcvDb6^lRV1{Jo?AG33TTx%@Nn zS--@On7*{=pVzX!Zl9-gH`>wZYl!bT;$fk^5OjYcX0zfJw24)t(lzuAdpk4zd=2o!RF-?^8Y(8fq&nBC*Pae z@b@<62jk%RDJ%b+)Si6!A4N}|hb?zLL%Dvt2t(?jBqXV;&IT+etO~} zYXo)TN7at|dm#OuPbl9<$tV6XgzX}Egn3Ti|3SNMXPuAG-hay4KNPf|w(I*OE$DuL z?PF{)RL6AwBA<6*B1-$GrsbiW?+)s{n0|)!<_7%`_V<}VzJDi-bezuosOWeE#&qD>*ef1 zSpPk&kADn)4$SvPm5>(y4Dp1GBl-)>KWy=^tUo`xhHwl6i21Xk$8YeXS4zIu*Y(KG z`Ud06@wZ=(C}RIUQu2{wI$!P;v@*ZEaO-P^nYys}@i(%cHiYu1DqN@!{~IV(u z`{49UfIo&-{4zh!Z~UmzmGS*%g2BG&`jvM0lQ%?nGVhIFKkoyi^Jjzp%$FCU*9zlY zFYrgYe_{JIyr<6m=Jmw?$0pK!8Gfh3(~PtA$ql?VoXO!Aiz=y}nH)&(V)*I! zF2?hO^Zum2ii@FpTyC-t@OyV1M<3>~dRkxfVfpo|^ZHO}yB}lc^;JQWo`aIUPm%7u zmLJf4_Ycvpm5x^mLhnc1YmzkAA81aVGvxZ%-yh3-hRlyA6xWkqkeK`84E~9Kn5DQ(>-qFe5U#9imHA`b3kM38o-?yNm+c)1w2*>*uIq#c&8?x`zaC;`{}_n>o3H<#jKT8_|G$Cv z)Q$&qy#Bq9@<=+WzN;kuz;_6@mpO<7{GTD8`FY@0g`=DvW4X<{KJSO~&)*YoZk`~W z&ph-wvHsVb_ntQq&)*U*;NzdJ`@`R1D{KG9Zwh^%WIK0jyCHpF&DHzqzwCdUP}jcW zRFUs}ZR$Ont9S4J_?-0Cdw=Uab<*=++SL0}uHKu{`v|`y?cLF)-WOTV=g~mEr-A2G zK3*R5`){+J&!fhFs7GK`{N$V2uJvyypATSvB#+;pb0?oP<=*erM|Iyctj~!tZQtqJ zN(rm}Jx=;swELbma`-mtwUWa>Gas}tH1c;;7qP&H`wJ@h%OMg&@Nyf zyk3azlKIZc?;jR; z-(SV8wC@x00OZQ{?Hq13(+srN%=h`TAg96^{KSxeHOZOZ(_?7H^PJcy_VbnBI`(%Z(CknWrO zHQRAJV)T-q1*q4k-$xqu@!ki1oPQr*tk=u-e12)n`nBNZx*Hb((c6#uA zJ{dm~3j}>SZYQW<2=x(-^L9_ZZwb1a$KzR2f%cLb&hk$9<*v7fkuD+M@540N>6c z^-!OI{uXm4wn{SnPHy_1GUyS@`=@n8D{!CF{m@2z+A@Xx``2jO?SbQ?zxf5UF<^Kq z(^U|y>VGAupMTHJFxuaVi(I@v&zSGuxCeco+8M*|{!CsT9Cy42)0+Mxdq{t{KO54| z^NEapm0$Qi6qOwRDxS{=e6W9>$3?3=J$e3i?6Q6VUxX)ryk{HI!Fxl*Lsy&h{yKj9 zd2s$7>f_6lD#8(S2uJ;8DWW|-zxusmzo$8PuGVkFJ9E^Z^Y{1l@*a&+PWYLl<&#vg z>T9&V!VEfkw?e&;C`+E)JvMxvwVr9@xDUMhx{3eQ_Gs3 zr|I&5BqP5sm!|u)JbX@q{mpgNB=r|!V$G)eBIt*#UvUrI`hRe%;WIiop9wz1#OL#( zKPR8wH%2>a08{L=d#wFPAo%@V*PD$4nqOb9G0UNrQLB8g&@!J>RV#Rs+$z5mNi@H{4^je9CDB>T~@p_l-@CQ<2Z1cRAwKh_AmCe-P-mkPm;~DANq-cpcWX z`$3gyssIO}7i&;dELcCjL*aODjre?AZQP>yzJ6f2^ka*B^4$tv`mqJP^ka+sQh;Ya zeoH^1-G+WNKGS~O4nTz7ofLq8fm<~Q`C z#uo48QZb4_jskYqn|-dlsXd^1x>%U|rz*t|*gQ~mmXq-5h(1>pBj*e^ccxnJk! z!j1EI)RF3Azt*2vqj9mYN`CNp6Sl*3AmfdDwcO9O{d>l}w`%@DjbMj9D8Cw?2ilW9 zx907XFI^&V{@hk-ulf|x#!EDII#YYqB46~pMZV~Hi+s`Z7WpMK>Fw?Z)1v3-=LS7z zWd!Og&pGmO`tc-9#qx9Uw{r-*fwV=M%V)IvSLc3xofROlD%6(eb`@!`JPjk5( z|2CRF(e0pVaCz|k4W!BW+_(e@gp6NR0AFtu9iEL74l!BqbB2PvMM|HSVSZN$e-PY$ zOzhP3?HbAb8|+BpGcqIlKKUMAwK6$Qx_ir(jXNq(9tW z!t-7NH)wM7@&cZ=B&7GJ?{lZ`1LEL9aCF*)-_jyjRvk+L$CR^pRzgp!}bglAd2Kf(xKI%)@4(VWj(;m%ThXjI`GoN;6 zH|mid_BYcrjLr#zXF2KeeAc6VnU9$Eg#6HkIXw6Ksae;XT-e_|fSJ1nKM-7RU4M3i zNXm6P;?;;Zk|~6=pMC#+tLAfjl2fc-M%yr7P=DC3B*pPMysw*Geg_P$y!Jaf z&P5-HU%&gl8~kJ09XUMZ&HZTBV@T<5+h68?%~yD>KRvCN?V zd#>V{SZerCD0apA|GgA8!tF*M<-qdyzfQ8FiK@mM`Yiosi_Pv)p4`{)_q)3sozK+n zktl@ZDm`zY-3!kH2T)Fs3ciHJ;Cy-eyHQBGHdwptH^RRd+yRk)N#<3-YxQU+81TJ4 ziRbQ;D4Jn%Nbzpb{N0G8ebaAme>d=x4yS)&iNaSp&=5kh((aur-cjE-qI?*Jy7j!j z5Y3bH#9E1>8Sej?AClkKt1nVGzn6FzI$lE7j@y(#f1lQj^$O?fydAe`!xLv%eI!}$ zu*L20y8iCiYxUO|UL={*KlM*189QHv=#M7}k;>8Zjq(yXp3hQ!j|S_xJX1N|AjQy! zM!(DNRp$4-{5c(e4#MxP9>#Dg0af}PP0L1oPUw>Hc(PRHot4Hs$@jLfYg-`X^ z`n_uR;b++RcYT|y>KRR3uJsDC>I7d#-{x73^tpT-zA{x2xO`UGbAe9R0ICzi#6rbi z>G%`T2Zv`n)YS6w-%RrfS%`i^()oKF?e6vX{nXf=%c5uh?z!-3EIg*pz)_eY~#@LYv`A2{8|g*?}wN^z>~|E<&d?0-ta*OeR1&iH;b%Q?AncsZP*u~lZW8@IDRhCYx69}1=k~lVeIt#zCpS#V(|W6 z%<#QC>)^;2(ITzxp)YN}fOIFTCgD1p8}lyM9`x@}*9&KfQmFYtOPJaFB&&zWi z`vT?>_cwh#QJHe8RCGMbUNL0Z&N&6|5AP|XBa?C`BbJVe#(vI>G{}fLSg)y{Pvig-DvrH zj9$0H?#KIcksG&2_1rzHXtdnNRymJx)a#xeb9`TPbDt9E`|Vw$gkrhYbAIpB>~y`8 z*t*8|bEzLCz|f8|u3v;d2<|u2zbGMH{}6NWo)42}vRL)e_hHVle(>|yn*dwFzdHJl z;T+RX9~WJBXujXua60N&Ykrqa-1>Yq`UCXg|ET;S{?#<&@`$BvT>5xEhsuEv+jD&9 z&{#`a*L8{Yx8H+ze}1Dq7wqSL=ai&c)ODA|7`F`my+!|SxXbwf%1BY8Klh*Sy=LT- zN3xBu_& znWS6F(gs@SP+ADIO=q7wT_9~~Nogs8E`&9+cS74_$U;*D9Li!4k$_ti@jD<(1i>gS zQ4pdm21SXu5JbNOM2U(LL@|oO?>YB;&ScI*B7xuk_xj(yntPu2dG2$bv!8qJotehH zsphf&x%t!ETlS^gYu38kTRT$QTAEXCI_g_ncDKg%xto%y=Juv^Q>t~v|#?JJ$#)$s%EsW!K$6LBn( zC#w!bu(hMDePvs$xvBlY%C^?T%DqkTn%1^hTU)BtN+KO=+M7zQa(inc5i^ty1>9@g z%^myP#~o*#*0_B&9-@iZUQ%;M&q}p*>}_AQs-t;-Yb;xRLex!e#+uzxDvPL_%AzCn zZrjJiv+2gYEwLo!cU2;jO6+NDN>*oOVNmddl{?9uwv~H35_EBUYGq9#*1Ef8Wov48 z6BUMZys0^v>a0n$#wfQnO)V=zX>T~`^Tp#aZ_wlMrdP(>g60ZOO`yhOWi-{CXi26h zukXY4^&RPSs&z+8%l3VgU#leFotqPz+8W!Mb~mS58?)`LmRs%?s-~8-J5E)ZY+L1S z&!pVc4BSm^?*H0OHnq0wD=1j?Xf+{Jp>--9dV0g3{JFZ!T&kqJd2K z8Qq&ox4Tbp@6V72ZF`#%P0hRAQF-Z@X!3N7pWGU2-kox%tOiE~>vp#`?as7E-Ksr! zygq**7z&$_SUizTrO{3|H>X<)`KQ4C)~5DYd~b@^;3+MMj(ya~+g8SQ$1X{&Y)kH0 zi3+!BCnlSHV-wknl9S2uel|Aacf(9$0hqa4_x|wGfrjFdLY}<)Z9+lUg6%{=9X<~ zg?mftzzX-)*uHE_YdgiLfk9NRXiU4?Gb`L@?5Ae;ev?GGQ+lUG>d)DbXxY-yR(D{_ zS*JHNuixqo@7mF@!GHR$jXMrpy#7?*mIGTGc3l!$*Hm}OfxxMCyX$wfoV%@M-ciP3-OV2+0qTM_8H?(f*NN-7Zp3}5t&#u!>IsL#{Tg}!Ddt#~ZzSxHCmjuEc z*=-%`Pv4aEpT4`L&6~c|x9*a2F5PwMIiVKwtjM;uot@j-_H6Q=o<93@Z`v0M9ti9= zL#Le+-?;zc4f}S)63MN5>axD}i?#$B4s6>Q3^XLSMJ{UD(-{bzdFj>z@%YwVr|v)V z%u9BpGl`D%4eKsCYoY_FCVXP5GnMGDny2Wg?^ZZQbY<0og$m02^i+E++YzTBneOLX zTbg%oZ@M(Kx!D@&dEx(mF;pc}7^bGg86o$<`$_ zsJF&woV2d6`d4dgf8!;ogf;wSV+k7l-7S}-TGM-5_Q%}mmezf-_I5EYP_Z<%yPNi9 z_ohU5m8rF++B;gCX}G6>-5QYIZ9lzFv|sW-jjm^8g`<38a7V?|hl$~YCRv1T8r*4I zk}fA&T4{8kiHR((+}DyMt2H(2TWBWTZ)GjEw>1?@9&qooD)Imgu_8*$SgX#<(zKS? zBm8W4uc~mzX%H48uN;zT>|ML|n6;EqnsM6n;jO|=Ju2P4%3UG32!O0|yFcV!PJb$v zS6uGCytJuiqQ+2E8F~*lT(t zK9A4q^ZER~fG_9^`NBTa7x8=iUcZmFu>$^}KjaVlO@Acd33vm(fIkoj1OuT!IA8`M zK~K;d^acIFKrk2#1;arz7zueo-jFZk4+TQOP$(1*nW0G76ZVFEVShLf4u(VFaM%n- zOpoa`eWu?Gm_ajShE3CqM99SmB_AQv5xOiwC>`5Nv#>QSw6(`tsa1#>*R9%|jG?Au z%_%7{-N{i2vedS&q=iho+o}$lUo1ha^#1=mtgiBZ^~e5S4+lG(;wVb*yqcPt3*xk# zX%Im>O$yOS?PfPGn-Oav4GWd9_>g%{EeMqZK1f=~U)X0kq|YMc>sz zY2AeG^scB5>NT`9qWPF+Au)N2o?L3PIc!dQNm-fQhU# z_uT)L?*>N>`N(x2|AKSY?74@W;0>(WxM}lg+s;U)&j04O7A!1tRm?bSekii4_e;ai zm4~};>Mg5SeNwvV#+&E1G(Pf|*UpXq?ceWg-|@LyYE~X!z4O-mZC||O?%w;p^U!0Z zGiDxs)T)y=o_W{ZPyIMwcEpj3m!5R;E3dxx&f`xw-N!CHuG(*|syl6K{q~)^8qPic z!e}Cu-qY53`G-Gx$30)^8yLF(E6ptrf8xT$A1ZM;S2)rR+sc~U!J{1BISZXj%8xEt zR&t7S_KCTBN|!j7IICTO8QV5o87`k!;i_7_G2%$L%02T+j&Up~vDKQ+EhQ_R6=mgR zweI7cGs{DcRV7E3IcJvDZw~us`O9it6;~d2)|O?i6XzXy+`_}=mv5s4PMLK?Sw(4` z>-h4H8S76vv2=AwMd_KPwvtLmN$&di(RHqh++7zg-Z-P8bk?D(N-ILko%3_wUX$ED zv#z{i)5ZmLuI;lnmsRB6+*EOt+2;6WU3E*kV@B?= zk8VxOy4q7Y@5Xx$o_gE24@SyPbe>;&T*an}>XJhb_MMm7;*6Bdtrbn;GvltSe|cj0 z?XO(vpJO|!bhguV<;Sjb?kSn&C@-tLIeKb&`YBU;hQc!Z02Dz&n`bA_mL}4 zbzHN4&f!=>HfN^O zq|8;j7H_+9$IPS1myl~Vsg{@JezdgW>QckzC@Co|wU?E;%F5?fESzz~%p+%2&Yn5P zS?M_BkVDJo+2%W|Y)3ebEL&h(XkRqX?O5hmF{8%jad_=M+uimr+3#`Q>-v}dt&;!P z-*&uHeqZN->upH{JZDuYTvj$Dere zhyVNX@7^(-R)Y?&TD^AjX%}4maf-Xb;C_xqDvls@`aawH#TeT#?8r8?&_~S^zb9Ucy{ddtFF2J zj=LUy~kDhdhkDvbIRtGSCxwfcTs6Y>5--9lr1aSRI%Ji zZNuT6vD|rN=?q8i%M`WJd%7cchii>vj$=)k=~`BD@SV9;u9b6FIF6Zf%$(fEoCiO1 z#EiqcK3TG|WHq(4s`A{!i`!@BetG1~lH5Bbxff>s^;So?{L1qW%k{f*KPsuHTJ5MP zHC>xrGfUfN9PK#Qc}{ums;Y$*^UAk6b000e_l}wKo!;A=SN`g_vY90%xw|T_{Ck{#HKGi%BL8UuBsxCNjw`dxY{y0^_*qc=@8v`=!C({0>T5;ZP3G;bU- z-@Rz2JG$t#<@seNdfdxf?iyKc?~SfF`mJb<@wPkkg?FN%|JYs(*(#0+FPZgX_}iO&F~yvX-iQB+U+*yv9_ZZoHt`t zc{%+;g{_?Cnv!LXHLeq@Z0;~!;B-+%l~ve}vaJ%AJ6)twVL#Glw?}AFa@wh3*p9Y4 zY%_#hLJGFS?DJ^KB5ktmvXwb1>_^*H)3r0{%4#xCRvjhOWXkL_EUUsLO4Kgoh4u*f zr&2k}R%dh4?WE1-vYlzOm(6s=ZT9jRWt;5_$ezs>o^7KPOJ>-Xl-tryTPZnZKf>;G zR61u-u+%n3d}FBNDEra$uhwoWbJ^@O%5BtKZ5{T-wo4pNd%3OD@jMj*d0Qqd+Fhj; zcAICB*Xbd-#8zEC)9$8B+8icD(uIyyF1x+QVVh+u6A3!(Pt+Q=M~^WaH`twrDLji zYvcF7#1Fm4^|e?^PGvtzeP0|?#rI)zaNj2R?zhyYT_BS2_{OLus2*h&^X?!o?rr=`ZN!b^Eial&`6qlp;o-%P1rz1AL)aF+)7u&dMPqd!$QGDEbs?E3nU@t|< zZ&WpmV!n^K@}Zb#h$}yf`A*_)*evE35|`f?Lw+xw@PcsR7(7k}i?91VAvZKe={i*o z>MWv0@QiG#co!;fs`zeHoQPL>Ew5&|~HD1B9r>MX7pLcKK7_hw>4 z)!0NF#c34t^N2&;Ft-+4q2F0Q&1S3>2I+`V6uD^~KO!!_MT_1KAgQhDv^wnDn%{bRSp#?{1UA}mh#l1$Nsqn~7C)nW}K7az}%Tuiv!hx~|ZiuL+3 z(naY58%N@gWl|1*{D9>x@`UNs*G|^f4-c9*XoPvdZTg-!$F;(`(yoUH< z*gXCWy}TkMt2m)j$@0(R&m(;`vM3LNbgFwyqgt^>Y*H7uWO6 zB&$9luD_LJOwmF#hRI&>Jn|^Tms&CM&wJT>lJtxH=lU;Dyej8%atzay@h{>_k&YY={2H2?pKhVw16A`{J#iH-=Ax@9UmhiWRd)g6stk&GoVb{Wj;8s?1+U6)Bqf1LTeVv?W(=ieyvV0GTIH#b2Z+-?32TQ? z?T^CYVO%NNkKnavNW<_`%;72zP_<6tx%y;Wl=);_tP3XNTc+S*Jw2Je7=kC`MuHyf zx9n@Qo`Dz>PZfDN!z^7b&fmdTw=&NF$C z$>U6RW8zl+<;3Jbhmr@G?81DY;@wQnFu9k>V@x)e>gm-pxrfQaOm-iqUms<1FO!Fu z+>gmtsOzo%e73dXL2`_dzswNzF_5b-A0#c_xoB z+1Q|8A3a5vvm2SbNtcH=>+&d*&C~SwIwlV?d4$PhO!kNc3LUC^>X_WX3V#W$(>9dWbz1;J==79bxf|_uE!5Exqp`)Kg8s|20cD^jxJ}<)8&39 zSK;1Qm2Y2E*B@cBE2hU+C3Lx)$-PYOXY%-N{rc*RE?YN*gp_}tCOy83$%eJcpU|&l zay64{ncTqSERzQ=*3%zi@+gzl4TJK(3O5i+_AoiyrrYv9*9N15#I(Dn079=b%2 zuivlB877Y~Iohe~4=}msfF56UsV>(uIm_fhCXX@M^&#C}1CzU%Jj&#T%k=BJm^{Md z&V#!C$W^**T%*fXOdeu#*R{HS-A8nJh{+9IdVKwLx}0Hh)kpRCVJ7FU*W>#>rpuWd zbh-NDx;)6_?2USSp2^kSdVB+ub4(ujgsz|aq%POqrOTa6?&{U!2bi3_M~@$6a_(L| z-uSXES2MZyYkK@RlcV3%UG9Eem-9@H{#B26zoEEnLNs5<9yx!DkgiFT*u@nlRKH5V{#9Z`+9j?q%`-lZTl+#$;Ec?!TMKCX?%#oMCc~$vsT&WAY%AhnPId zWb4Opipx8y`|n|L9h0L>?qqT|lY5ywz~n(Dk1%t=E_lT9YqF}Z=s876l!xr@m?OzvfJ zKa&TUJj~=VCXX}ONa*=@G1<*zlgaf=&M>)?$z4qDVRA2%`@Jy*9TYT6 zb$Nuz-81$0K_=JE(&KYX9$>P1aX|QEr9WH0e)MQvZa7AlyP4e2-6gz*6VUVldCr9@txSuSN7^o z)#HbloZqa+8>i`Vl*xG}ySC{1StbuKxqqv!UwyhRcQbjM$2g<_F8A!$`=?5FU#?%@)2+(`*e_H1l@I9go!CE9@%7k0 zQ*sphVM;b1)UU66NS6nm(&dpMUC#VemwT{ZrR+zsU!~-6Cg+~lukT~>2$Qof>iT0Z z>2d@1b5wc**uPP7C-!HQY`&pipJ8$xZiIxslm7QIxvzZUdJ*4us4nNix;(^Wx2eZx znVe^`Sv0xwWZz4}Hyud+XLy+}?1FrzT`YPx;42F;3(4ceuhz z0`$R{!e=}Bps?RM+YEYDPdlyv?_>NL@Gizb2A*g96W{}ke-3<*@!N+@ILvEpr2>@zkHZjz|-JExQ|nIn!&S7 z{}S*z+{Y>XE5N&%{pxUiL?NZd^MfBM@arlw2 z_?;}FulD(s{?X7s8T!khzXJN|If5H}4Y+!KpzLgco!PMCh5otFuf_8NrQbwccr=Rp zcBmT{gE!1kM4k_TcQJktyqoclf@c~31o$xHw}98HfdR>X5xkS}d%=4ce*k=h@dv@p zxq7}H1&=cRBk&yKKLa0Q{MX<^jK2(S9-^oFDtMIfzk%l%|2Oyu<4ziPM7vVYi&VA` zwYc3-66n&6$}i4p9MY&?gEd2k0~zA(R?HAjH4XJpnqFb zau*D!{VFf{GugUPUS=}})W8WlxumqCIFdKkO<_m;Zi48K|6={?5{o+w_1wS>`_~Y+ z#&e{r>^}uQ0^SGx!>s(!=a3Eqz-b|PmhmOvb&M|uk1`$r&oRCRyo>QuhztMKXI48= zFCQZ=+KGCO=Z2rRKwo_ZR*et81$U!?3_^dIH6PfFI@NH%{oq}UuLd7sd;@W79tT(D zc|G*2=EE>fp8)S<{1)(f#=i)jVfN z68;|wUig3+_yX`MrhhEB$@ntxTE_jvg`euP#`MuG>-Z3HQSa(`k{juM)Z%ty2YrT2 z9BSJ8EcDg$MYk18>q_WvfxcB-6!$IYtLIrt{|V?X#b( zx*uF9x%&Hk@H)nS2tIz4(v;_C!26i~ufVGo>-sM#`^?Utz(<&!H^A$e{=dPqcrKt) zDY4dz)W42VM4o4Z4>CLR!AF_C8+?q}KLNa&aUXa+F6hJxu>};-dYi_bJ@S z#`)mtJqpzh_rQ*8k)Gdn@GMLBa&SW##%0%nyP2IE!3UU~9`H`4e;ar|#b?*W zLDRH#Y@+qF;Nmmy;!yk`_&DP~1h+nX;}Gl0racbc)l0J9uuRTupXF09! zMZOxC{u<)KfAt+G9{6*!rEfQuVt%oj4aMCK{c+ewwDAk#B8ANHFo4rvsMQKFajp2CriJW!8GyW{flaN^lR;KZ3X@hg$HwO{SlQeun9{fcIlQRo)%| z&oljl;C+l=4?fKJC&4S3{V#ymGTsZ`!}!<0yBU|ik4p8<_!BDK6IG7n`Dt)-na-aB zuVeZzfj2PzC-82@{|4UA_}k!BEM1p%e?s?1j2{AC$M{0xqJE>`s<G9|-S`yrX}YnF!@+N7c9s$seyZ=r>4pB) z#D%4?yLsDFgve<=a~M#z{eSPSod|bZdU^pl9>f=uGG_=2R_QUy#J)b~t2lyYU6ITRhG-kUDg0d>(aHl{vqJCC+mD6ajQK?WjR}Fq_P}by|=6Q8gTV~ui{&+0Ige? z{axUrj7P!iYIT1u0*^A@4&KT572w8t-A%-cpc-z;Ju8$1fF61Rq!6h-voDW(ESl#Dkth)y~l2;kxC`Fdf#1f zH+UUuCmwM1KDg4aB`#8EVCkL)UdQ+j@Lt9*1Rr2L1MWFR&sQt>7}LKD+|Bf_1U-pzWt_bATnd>!0%ny&vaxUo^^gWwsa{|s?4&R@sInU|p7$Lzcc z-plN~30}KNPgi__ld%5{X1|iSi0NbcM}iMBz688-i*COLy#7?3%Y8|kG0gaS==ap= z`dh&}nf+bh8CEY*@DXO`B5;H0w}X!{{VTw0nZEqq78-9EzZv=sjDH?{kokWXanatY zqBbkTs=a*^++h3(aP_$jrTM*4j+@!}E_jCVLD+v0{qY#s zABO#UW`7iRMwy*Al>IYw|NjG?XS~$fzoz#P>UI4&;3JG50p4(yu751}5VOAmyp!1p zgIAuZ+gS%*&G;5@gW1^$Ud!|w!5bKFQudjhHt;OdzZ|@a@sEJ#nVp*yXZp8*4>0{Z z!TXv1SHOoD|2Ft2wd}@h#xpjPC?Db}A3$xeqv$GKT z>oD%A`_1LhA7}bO=!@?{7RLqFU&Gi4F22)P9P0L9C%E_yV{xpt{u)LKTzsdlI97qb z7NvaAdm8Hf-AeEiVv>)cJo~}Z;3FXk)O(u01g~5nc^3NT#HAe%5>($)w>=?w4*KIL zh);r7V%+O>NJDodsWj~q<}SuYnIR z{w8=Gv;Q`D_8i?#=^`Q1emmo{z%$IB!@$Rw{zC97mhKYpA!cVeaj{-fpKGmxTdS0P zroSFM%F;azJkR)9;GN8$bBT*|)#qzfx*6#AF*{lC%JcMclh0YHoiqD6%f8+E4ry_P z8XvZzomZm1)c1ehNcxkttNW(lPffu`r{E6vI$bEn{!Ou=s=F2-l|ISDn6 zeV4cxpYOzYTVu$>PvtAdjcSy$(!T}cNuKfQwA8NySM~VQX35p(p_HAH7Rjraoo|7Q z_u9naktgF1?#at-<}s7=6;trjh>LRQgZ*mQ{}6HO{wQX%h)VZPi`$K1ln>O6r(tIp zc3g;TvTza6}n>7NJQ&v*iSjPX6-Rm^@najU&G$^_N?ayRrlnSLL5mho?c_cH!{ z*dG8_@7>K=BFkZv>CXqRgg+<-Lw?UEJ@2?s5qYkLzK3xSc!qHkyqocL;A4!R20qI8 zc5pZI^L+4r#*^S?RQG=`_z2T)2k&J1mw{I?ehqjx;~yt3`b!=9i|W_k23Ox-pvHlh zVW&T?`}wM}AJh5Yz-yWQf569?e#ugCJw5ki`m?}?8J`DkGQJ3Wl<{itEaP7ATtfGM z6>-sjE93`@3h$+-EKZ*jLAkkcSvR=)9tTx!4-yysXjs{I$O~SDees(H;+QM3G4D7L zpmIn8;B+*&E2Z<}!Rwg*O7INR4}**Mf{Txn!Fw351Mg#Y&IGSZ7h5rmbHPofp8y|X z`g_5<8Sfx&l^b|3lKC0*&E0ys&qH7RW`rvL)yK2}qUv|A5Eq{2!C}Vu z3-rZrEr>(SQ_IK!VMqO@gyL<)MZ4v{-Eb@P2X2&hM&!wO+R~@@IoUYy7WDg(?l|hJf!hZ+ANcrD}4fDh1pO!4tNc!uf!7JQ89zY5;X^#2at%lO;i)yz)W zGT{Z?x6=Jn@gd&}p>+t|w-p~pK!2F=#o!*smw~&P{Q!6a(_agoWqcEO594Qmk1;O4 z=YjecOE(7nDpn2`f%h~0Ht;&8e;Ifu+Z0!-ga zT;$gkm!7*}?gH>?#;=2&`U6nJ>1J^CJ+dnH7Vs{ne+Rhwo>`@TKXEaBj>G>lMEnZ+ z!_3ah;I$u87UcOa#6|fJvhokn4VuVrC-PN`%i`9J8};kUbUS-3eY^2DR-Qf3uVU%m z2K_gg{*NtA>pi5a>iwVK>id=4NPg)`Sq_WoH*myJ0)D2&sr@iJmlGFP7*{|Mr)$CU zm+QP6y!tAge@<~`=ZoNDSL*urf!8zrZ-V#dbo~MF5vKn`@Tw2%`ac67VEVrSuV?%|<>@)r~ z#k+Jn4}lLd{l}I4YjypniHq^)M2zPSM85_7>T7g;r&sE)VDo}}uZ}+7#PnA|U;LJr zIMn!@CN3;>GXJyS4b0C=!Ly8C4L-p5$BB!4tz`N74!Do`^LyACVg9@Z-g~|9cjEXb zxaYbFCPH?f_)G7XGCq^I@F$FPWogj568fD?e;s%?<6FS<%%7d$#z#f^6GtO>J>yNp zg+C@MhfjfDh4EbZ`91Je%>O^bzUyNXX^POB;A0=xxxBHZ^%&DH_X|B*SKpxP9|E3Z z`U}9v7+(rL%FbLEx!4C_F0F_%e08U2{7xR8EyRX>{uD)+jt@k=$r}}2y&K2POpVawB zz$=-4H+b$QUH`M-b)VAt7s30P{+GdBpV0Nc0p7{deFVIo=|2hH$N0~{2N?eic;zSc zbYB6lXZ&^WDC2K|4>Mj86n1G}ot48J@Os9N0PkjeF?eN zz^gv3+n4Wc)B5}~IzI>cT}(egT(sNF=cWEIF1rW%W6aLi!3P+B7(CDH4}xcz{xixB z)BiR2Fyp@mH$JQ9YYcpx@qdB$d`{PQhJ;M}Y0RJ5;MJ^Ns=zxLUku*Q_;T3H#~JSdcipNi z$n$OBm5kp5p8X$P{{ir!yw1N1ZhS%KKLBrFT)t0F`zI{jUqQczrTa2?AG7mk@Nvff z0d6uowy^Mz`r{qCpEJP6nEv75BaANs@5$?SP5`fB+z*~-d@Xp6@l(P37(Wwykn!`t zt8O#2BLyC1yal|M@dMyvj9&%b#rO^2=IvUt#%I8@jNb;HXZ&999OK^rA7uOyaQ7GW zbe{z8W&CI0os9nm+{N0%D@y-P-ToMOKjZ%d@3~9YcbGz^=L<}KCU_Oop9dagTz;P) zeGdlXCqjSVZarN;c;O4Q5WNO`q*vG91ny@14Dcx9=YaPzo&X?q-m-KWmQTmL3 zSn1!Z>t6?M+@tfG!5bL=yt2diUEn>8_kj;E{vB|W`STcYvA+EoTmO$if0XI}6Wsk} z-G6&T$n@Nq@fqO#-_Z3B2d`s%5qRf)x_-5?&$v(7VSEkv*w=MCo4|*d{u$u)U(xl? z0XOc~c^o{$_#W^qv(o|I#q>W6-p}-}12_8gbU&f&d`;)Kg7-50yTNN2|0;M7;}3!l zG5$DsHB0wt@DWzd&w89eiVuKy44Jkz(W5_;6$Sh_R7N1308gLg8% zh`6|KQs1{;h2*zDe~{_#1g~O#Ho}hjo^WO7dgz->|5M=IjQi2GzZ<-P@r%Jb8UGOY0OQwy*D^nE1RrPm zpCvBF=LYN#*ijshTb!P2u=0N$JOl2A{!wdWeRVOu8N8SA4&vg9x`$u@rz^mtjDJMw zKcVa21YY@FoqrB|klDEdyo%YmAAE%A_k-t{{qKPfJgletBk*3ve*r$m?7RTp&-fp} zGtAB#;C+n$2Yj5_DO)T2qkVX$KbN?u_bc#xgaqqY3w_rkQdWOY1s`YpOmGk5=PCX@ zWkH@(;0;W_1-ysx1K{of-Og3uwT#~Y-u-=D|1*j+{oBAtnf|@tLrni0;8iSNkAQm~ z)zf_vyn*Te47`)^-zfdZbUUwr4>JAN!ShW2E$~W~uac95UAn(xd=7Zm<9fPBfM=Qh zvEZXD-4)=2Og{`>#mZ+LcrW8yz)faxbvsXi*E9Z0@F8aB1@JM(N5Ngp z&R@ZMf2gPXZ}1+bUwX38qt9hLsq4$%lcs(9r*wWK^!r)4EdlRi`ZeGq%pduC6=a{; zSr7eg#tr0^!-}QpBco( zdT$Wx1WS!H!&ZPkU&->j4t$KIyM?&0qkiAVLpH>*zgGOEeR7uWmEhxyUk^UY_@}^) zXUJU9@jtafpy!W2D}ogKQmv5bK7#2#0N%j(cfs?F|A4s2m--!|DzYk$H)_RS8qZm} z{{bIocFNXC{VJwE7rc-01>l)qi0dYfrQpMiuLM`Wm#VIafV+OKh&*oqH-4$})0G{j ze>QlI@tCsD?Cb&0GyM+Y*18w_63dW$9{T+(U-y7lGy7jx_8ET|Jj3`PaZ!&h{2o+2 z()}B8@myo{SpcUzpRit>X@BfFaGZSL`C*-}2Jd8iBlrO0+rX=seffJn^!;hi>-OW& z?`M23c!t@z1iX&%9C$6W^HK0VrvFLs%3taJ+zOs${BH0(<6i|IV*EjH^*g*OmB+!| z%+IHZi+WVQ1E%`pTj1*Vz!X1zgRn@?Re!B4%d-c(_cuCU1zykeH-P6D-wLjN*H>BF z1>VQ>qu_&#Uj*LI{AnjH`pfFGY?kTo%af4@Uv#Df4M_MY@crOwSvC&)d1l{!inPCX zr?j&Y_G`fZvt0t!U(N+TUH(9Zu?YIt5*L=nnf-3?>i>mjyTfy^;?*unr*6myfKE(KL@Gi!) z;I7~4b`F5&89xX<&iM7<^}pBcd=k8x@ms-b8NVC6`xV{JSHZg&e-M0t*?A1SlkuN` zk2C%(c*Cfk?u+1+EZskW8%+Q2;QdVh9dMKBmv0j3(faKVdb)>zyBJ>p-pBY7@F?Re z!0Y~`+Yf=)|54{BgEuh!I`C1(&s26^)$N=Mo?|=#UiBAUe=oTE&pPh_uV(sJfcG>0 z5oPB!-A*@nJ>#DxF6KM+JC}V=c*HDT8}Y1hk#cyI}5>wnf`I$1 z1NsZMNPlhxKMuSO{88{y@OJPg!IhtR@E5>UIXngaFYu+XBfc+5)YmQON7djsKQ}7v_M!qh!N;-g%7H%%-ua@`S3J}x?U;AKPkA!-fRFV`kU=?o72MS> zx%wWAs;IQ%I-v7&!MkC<3U+RfN&Tvaq<#kFBRxi~q<$arHG*<@ z3cUNMO{2zt6<)lBE zKW9(DpF@A|g8fR^IXH#>eefsq58eN-gO9(Z^M}Aknf{~T1C0L=yi!dFF!xjNVWvL} zKK72D?g+St*?9##%lK>HRa7^{$3MVDofRMNfcLyzOzHcfw+Wd(SI+Fu0k8d+u0J2V zit$C@1FSrc2k&R;t^^-rJPba{%I9QogYi?rO~%gv?`8aK@L|Rew#)X}gLb8U@3E2e zCu^S{oq}&fJFiChtM>VgDfCC+PY8HRu8Q%xqRjQ}kp*YjO96ZnXHQ>XH-vHk2((Qj5e2nRTLD_fc`genyjDH2Zmf8On zc$Dcs0^Y^=55Pwlm*4McGu*74pM$<LHX6Fs?VaDH5_Dgg-j(XY7 zqm0i0&m5)e9|}Ii^cR4SGJY(0J!=ok!1Ijz!Mn=zbXS89GW`wUBaClR`pnLD@Jhzd z12-8@fOj6P`?Cjpl{&$0gU4tO`y zcbzHQ`7q;i!N(at61=uT_vcvfLB^MZ_b?s=uRcV#a}s#Np*lYmJj-}Jcr6Le z{Wy3(<9om}tUa`Y*D-!Mcn_g3#(Ti4Sbg0FKF;*-0nf1fJ^-F${2}l@ z#vcdwF#mr7?q>X1rO(E_5%4I}{{wg@<9`JoVEKIue2D2g&ywwPl<`^MX-E-v-{p z?B4_KX7%y__z>d{D*KE-2HwT&KLy^0{L0)K#?O@fWAyU;6}XG>7s0C;A651j>vqP# zYnlEy_yFU^cG*sPnH?9n!T22TI>zUN=NW$+>wFXIXVWc9ViD<2w$2~wkoN^cu%q^! zPMbo%1L<}itCz!NiZ9XmRp32L|9bEN#y3W3-}Pz|04J}{b#`Qj6VlH#P|rfd#Rq^QSiRwbUp^&#rQb*Amhdk+1@&mo5nZ600XS^0Xx=c^E4!nW!dhlMx<@cV`_ZKl9 zh5i`h8F0^XJ>4vL9pjzg{fy_pdl~NnuVUrX4L;0x4|pBpdGILXz2N@VCY+ij&sw{t9bl<^Z4 zH+6jQ@hb2f<8E*d^RpU!l<9lG ztC+qC-pP0^xQq3pI`9FeUk^Uc_<;_rD=-e^U2+}LK>Cx7Cs$3u@1KG{HwAZ|J-Pjr zQ}A=A;MYvSH(-B0i}IP zuU)0vFKv+ixYp@>w&G0xaPWG@j{=V}z7)Kd@fF|(OE(BU%JkQQcdpU>+z4LJ`02_% zE6<(EKI0dFyHD2br@*Tj-v=ILyaRlk@yo&c8NUYHU8|>i19+bCPl4Alek=G8<9C9O zFuof7F^6{BjsAEa=}*=lhhb-w)z=H)j_AKkVdsc*CohLnrr<48@Xt-bAD@D+fuFVTUyYMvQ|P})^=R0g zh9{-xcfomvafm^mgE&OA7sD8qC+l}01h1Vf!7wh{5vAXBgii74_fzoO$j{02|4Hv> zOvW##c0L)uZVKK{`I=1MsA*4iwi`9AEy-AW%%~B6Yg$u#36OB0IkvAUVFlU_?33sA zR?;Q~iZp5xE&KMRn(2CqZjZ(3oM~%sl_7aH+FJI;nq!S^@pNNrM_YSiTdcXM{XmT` znTmPSX-_Pe428Wh?~26kjv9~Ac<#1{^@DOpy2nWA5&=$9$_Wr}{8qW=S@m<|Md@lZ14j|7uc zYC$WH0hwZ;F(4`;Kq=O2ZEWmlrWTh2h4QX zXXPa%^AeJ!7m}qHlBE}tr58f!S&n4eTYaHqYfHAVskxE-PozYHFC;|`IvMcAQi)j7 z8;Hk(iG*h&7m+OWpTwS~=G|U@tgS88+TNIsHSJ9$=_+3+Zl;3qP$))CDj7*{)XG#Z z;ZUJm{+q>cDijZT62VX~5(@{?l-=T5GBrsyNRh$+X3-2LyrFo^7c)a~f6zmpvnaOc z_2{Bk7k$F%kZcxmIH1v{rY6-Q2V~b2K$bxOg%gxr z*Bg}M5S?X71Q8!bd>HXz#D|rkFm-HEs3sImD4I|-$&lu_F8cK%@du5jNc@sLO&zJ$ zy~&GW0iT*bv#l+=TVwlb;vsJ!7z}tkF={j3hzIkgmc+XXQVfA!YcP$sw8&9!8k1_A zM$lj??u`UOiDV@3L8mx!l{$OG8>6AbpH9mu&WkaV&a%KUYLYCg65}SxA9SMg1d|?L zD4g_rgNa}`MPq1jA^GJL^ZwTwzCa`$NK0^SnU&tQ|gd)LsEX1mtI#O}%2lOry z(8n735?M`IA8P_(J=Xky6-28OU&I$Qy@8l7oleqhP2-3U;|QI-AJDGp^NN3Q)9(w? z0-NUS57t1_o;Y=WT9o>e-n8FjGj&KGxI%jQhV=3c>E#=eZ93i-G()N8L`yQ|^KG=w z>pPnECR43qIkZ`lra#n2ru2e$U*s)~8l6AsPxHUyfrAtyiZ?dHk;l@~VvN1z{_NK&&D`BQX{$wB= zN~dGK0L{Qu8vzg1f;?MO9m#UJ>XD-s$yPI_GtGQb(VBDxu*NY8_*IPGntDW(HKmGx zx>`Ur-mVW1;SUIe>lCa)N!g?zS2dtL*|8}h&z^x9QWocl!M6xVFESpH4SkU@u z#qRZZ-?d~H_o!CmtEr)uS5t##w`6O*!m{Po)NHp}Ff~W0Gk#Y6646YpCLhkEQb{wG z@OvZableQ2ViT)>;fOEzER>@p z8=B$ByBuUCwyCvc-^5B^IvWpoeKa43L$PFtvQGs@Z8#{F7|Qm4yCw0^92pG8JwabQ z7z;50h6?7fVHeeqbzpGx|YiEw}xFM5eZ zrdMKde;^r%`>5KIURnXM67zbj%^<2oF_4R?g6{LIIijZK{oLevf*#Whc>F0^E5|~9 zR=Hk}wG0eeZLBbZc+*YIO>LRR-K{PAsZ04hDSy}(p(aMnBo>cbtt)~%13Fvd2A!?( zgwECt0iETbE&|rzDgxG+Dgu)wJK4nL4XN3Rb|f0J9c>wM$WJpAm3`O~j?f(FrKJuv zcPcR4f=~c=ZxoVuW)!fz6iV`bMJUO8BN37}aw1gF_sY9G!Q_3USJ|fk)F>qNDIo1r zK-w2d@>WZP)a^sT z$t>%m#}o;$-9^)9#=NO?JWM?%6bsUgP;uw=()_N=`ufnz=54QEpE11wtD)=e7SASy zMa(p}HR7}Y44JfMONSzUYWuQv6mGaEpijzPwvwPFpPnI_mqq&uh!(Hql*TQJ@OgrA z2SNm_>JheAx#sXg`f|8(sEC~uw zH_$F(dquSG%i{F~mQU`x2-`BB6ku1-Hj8kWBBVbQV2)B7Bn`g@j*5WZYJJoyp<$JS z(6EX@1b7Z)#fX@~MY3`QlI3k(X4Rw>vI@fr6|D1Y2lQ6yV|Q#mx@CidvOp|r6WJ=H zAa|!ktZY+O$SM~rRPb1?#s!-w9J%JTnAMW3P{Cu_E-Y5CLbrpV4-Y6R#OmZ2Mc))7QAIQZM2o4e5M;Vcu)6gDpbf@ zsE{TbD}2F1dZB`?aN#;OlK3V(mSc+WGhFb~EaWF#$hTRrZx%eH-;#odasU!m%n2(K z*A~2v6f8#y_Gk^Jtgu?3($qR^P``VnMF5LuwHz#X5Sj3x;GbC7AYrekkQ1+mCsHU* zFO4#qKU9ka1BIl#ypvAUs8>$9qL{oitLwJ7T&O#*+!K~pu_1}Kf{FG)8xPVs+P{Er zrWz%%CF=K>jdV|GWx;GzO@j74P(&0$)zDrl3W!1lCKnTyl|l+?^dieBUAm%RkGd67 zQ+cA;f}WfLggw>C$V%Z_)#)h~9fLvz?^UNKhWer$6+A^36uZtcR&7lCDIk5KKp{%b zav~0+DTSmT0?CpPA?b$*$^26Yr7l>(uACKuw54q!5wNZl0c-6Q4qGcEJfu(?%r)7L z_@2~(nwt7pYg?+`65h{(+q%v3#eyN9FOg1)ZLQ)47nEfejEId+J;KVWh|rrPt-3VD za9E%H!Za`x8#1X=#rMYcr2I7zTJ^_Fx-+CFfnlFFh`n-TINjQEX{xy~7H_HvhXP4& zA{B}x;`BHumV}~p*OBr>%w!@Nk9qvzU_2Z^95pCfNLfA?9{;O_M{St*q)kDn4ZHW z)AS(D^oD$?n96XIyuN(%>X?UAQd0t_O|S< ziKoJJV{gVHrk@_CdDJugcuQ+rrY1#C=INnkgdPS5BB_YFPDaLJfq?0WMG_vLN&BLv z^2fR|<_(GGkn|YAOHbQVDv~zzlBpVciy$61<0-nCjfFymDsE5Kgv_9y9`Pk2^aRsS zuNlB5-5U7Y+LQja18shKDIrX)p^==Y+bY^KG5zrX-Qm&RM}nRuQW3|RTbgUA$9U=X zKAkjEv_FtYr~Py<8BBQmp-3_kOC)LnG(=m^WvMl_w6@pyg8^?Mod|g1^z0;=Qf(#^ zYs2)aPAvaQ~P%t&&c;PvA@a)#6_EcoQ z_}eJof$-6;dxAD==pH34mj4CseaT2D7Eh+bu~axt&n=R{q@Suc?)RsBNi!X*5&g-^ z=X+|(XC`VBX!@H;L0Tfyt!hMU(8m)YJTp;t0>1a!^TGX~-0bm((k6A*m_JSX{Nc2` zzrkJ)?I(%t0$~}iioBnpE07A(Ry=Ls1XGD{(qlbBm|nqHdoMwK7h$s7`S;$3riVh_ zFzrMq%n-d7kksv)AizIw_ zBuCd;Qy^e= zpJ_UgsO)1O1k{rLaVQM zkY02vDzyJ+Z@1AFyfyFp2_gVv-KKYxcxvj)z~CnN28t-qt=!7gcII0 z75wyTMs7)3ZTh`Snw}!6mJ<+tDZDS$o@q(8(Yp?!fG6Jbqd4mUTPW3*h>?@j@H%!k z((A9Hrm0;dg8{M2OA8gRTvS-SoCb9|%khQ+)(}N!dH+ZOYuKT)TBb3C~GzpQR*sr^DKypqF;!UuFLyi zijud%6bLHE*^}vah#5DXUWP^I>w2LET zby|`6={Ig#T+TNVDGvVpmr8mx}Z&z-e(ygm}heZVBCYlJzer|=ViWEpT1PaL& zGzH{1W0{caZ(_0~Q9zD!mI>Jy1S?#M`xIfs8e1$Qve*SHTp@=k5i1`YSRq-d#N-fa zrD2UnR>&F-txzE~xrHy79GAWHk~fl7J(39WpzfWBATxODjh>0Ct0;hEFyxCk%JH;o zRc=y?tKLTwlY87&QtxAQl)L4Y^T;8&IuSx)5}STQ^O^6X0h?$z*a!2{f zehN?S*Ne;1V#p#|426&(3L%pe5*uC99Bi$3F{Y>MwYK`Fr|Xjs6ufjxfR;zMb7)_* zDJ**#ZUTi8az;uM>JtpDonp}pDO9+5d+%XUJ{a-R({*W_0`H?j>9NIhOVdwvpjKaE z6+Owu6lIp)RG_^$+CB>hV>G&pnZp#T%IT&;CZ?_H#~5Tr4uS%hRgi_pXsg;=y#ibbtPRQ*suwrUE<3n?I- zrGPX_0c4Fr(ldc3-q^A3Vm7t3?u)gzr&_(fY%ED1FlkTGd*ge(K6$g5ZB3<{I>mxF zK-+-<`iw~+6b!`!^6gW4M+Ofvtk6UvvTP_8SrsAKL`A6JD28jn3c0`lFW5vrWYH1B zG$BH=m_$f6c?!u#E|xD7_6lCf6}O0$i(V^K(8CNTtiV&6VyvcTJz@}TkOFW?&3O?u zZBRh2w!IX>h(jT$Tl->`1=2@e)Epx8ksyVT6`F(!mttZeJymd&R>6q8R}iVn8vzkg zMMw-qO`(E4%zdPXxle=&v2tZEtf<1ITuj8GVu_(_C{*xXKFJc-$|qS?2*po1D(J~a zjot{gdRf5Y{R69bCR)1cgrtO_T!aexvTOM0Z6gbbfMr4iWFd->)kiF3m6sJN=qnSG zy@Vort$57?Cg^n(Tv+^K%7h8oF2x1Z!KOP*`D9C@*Kd#u3Rn$55Sbk>y_R5=IlcCw zUV5RBRjmTa=i*kVZ~?rc*a=jyVO59FFA&)v1(NMhgk-n1LIoRgF({b4i?l*;mzdR& z1e5K?3dvF@CLh{cOcp3H`6__L3ckp;B$#}yzzWHN7nY~Lnx%KyMT@eox7wW*n*Izz zpY$qRUnnx!4=hU)xhTZaLvEx z6Jf;`@`AyLu8^27 zQVJnMs>jlmaEan%-xbD?3F?Z4D^&$ir!Ux0RYeS0EOc#Q1sN-JXkv1e}vSLI?R;38NkNY8c7br?bzD*=TXze0Y$d7zc z&x$RS8~Ot2VaA~l`mbOU*A}jjFJ1{Nvq6rx(VjzXtLb zeO@BmK_6A8Pj^MK^zkkFQi#2&U?P)B>=CygF@MbKp^q}q>(8nG)7qKrNRkv`xC8PmEqyxi}_||zS!?$r@Lw#i7;sxCeh-f>Y})GM2VMGMVKD@U`997 zZ);!@v+;qSRhgeg`--wTxsSR;m=iWxDTG+f;)<78xr-l7c_<-+TpMxbBSlaKa2Sz% zGlFng`EV^Uf^n8-gM>_Sbv;Lc%6@zgvdUF#Xt(p-n3d8zW5*1*c}v)+Qs z*v7(jan0%3$>EY-B@LJ1@ZZGYvl54IEphlnIWrUjTic3{1tx7x$3#n3GyCyAIP?<^ zO$UdU7g%P7FszqqV4Kd$CksR8)lW8xFbptKacDOZFYQ|1 zvQs%;v9bF6@zZB7R!`r5|M`0_R?j~D_&NL(YoIbKI62p{bDkG!5+oVSdHUKFA79@- zZ(l9mnD?7WgLB7nfCObS zK5Tq*C;p6l+>)(AIn(%=nlW5dSNn>|3-WlUjW>mhrH(g#hR!!g!)8XmB1lDl{sazsdMJ#cD=^Glc4z#ZMJ zI-mk>FRr%dO;WXK3V=@mYEm^edYDN?y~#+EEhc?z9nl0S6Bl9C=R9GQ1Z z7jq@wyL5(0FE{FRn-)5-dG-NnOL*yzmb02I(LXI<GA zEKQwP^KO~y+6`4@-*slpyJewkPrO@BIuB;(TB&t4i_-Z%fxo4rYe!UiD5Ux8emsQE zg$hMbBP_mzis$5?R#2j=FMQI~V+j*Jsp^kUC;+EPELJEARutx=vj|Biw6%6^@v-tG zp@KN1WU+KvjWD6Eu!FEqx|+@>wYtNhlXV=}sQT9B+@m>kX-__d=F>S3KB}SP2%Sw% z-f8qup`zrN&-^K55h|M#i54a_7kl^Sqf2;XF?Tmz-VvV=ORe|t(Ulu<_*3j0ggs>U zIlG&$^7Tb`S#pYR-CfxOrLzE;3CoMdv8XUKm7+jMTA=`8#L0HD7D?Aw2@?iO68hO( zqA(b=8fNzBi#IbuUl16Y`RZWc-FE<6;PnCYIAAPEPK|Nj~k*Goij0keBaO zJpFiJf*42(-+d)5qz2NpKJYi*YbCJ~gmT+c`xRMRX3FY#x5$B!&kWp%2#j6_0}S56 z+a9hRPece=y0(>R%maTTM;nb+v2Z#7dACB^Hs(rO8-JtQwlZxajLM0Yh4B`+l`e0>w2egtb{0nAJP56F12fTf zL?}d8b3)gO9yn!e(1ePLOjQ_sbkKNyK+D9Y%3z5O`T#CtjKJ829c9opoLO5mc4(ta zd(*5#22M0{CL5$-aL>%tq6ZE%GZ|wZcC2L98UwSLnTjz3?c6XIOHXAwv>HdGGso6b zknr=^jgDvo&>kj7<8@OQRcu!AJPfoDO+=xG=@;vW#bokb1K1Ihlv6m!lPhyKU@`4n zU$e7%#1KXCcaCN7;pU6+=JEKY2edhvLn`pR-B9IenRIe+|F9Uv=^Q@2zJ7ZSRg2{i z_RS;EyMUCMRjL9KVfz&cw=?z*jKr~R8%(LH6dcb(I(>B?8l$|Wy31TlRpdmUtfqo# zUJWg1Y@HY~?3zjis~@#{jj!yz^s}z2XwR&~kO~O3V^&3{(?4F-d+CGlo^^GdWmQ`b zQ-NXi(JE3~sbgh0+9N%Ew@B&~EZf6pJRqUc7Jxn0p8H)67RY_v{kusRq>+dx>@$Tt8s3v|Fi5B;W$Q zozv~z^%`68i*Y?9JOtp3u2tuG%lcW;1IgWGm5wsZe-_Yk@o;L7UGIX5J(#cXsTb_c7j*~2sE=;{@xSgTi!18)~lS>}X8=aKcik^QOS zVs9^IT?}BLwY9wIQeKPz0dnXtOH2Ez$hEvDfx6ka-t^;k3oV)+_K;9CwOu~|f`AER z!$t>*7>K}1h)VK(M2%7bHA;oj$P{a!E&)oY3u+~s;GYWdco>rn^6Lq30*3re?RU=A zu+EJWU=?Xr^Z+=p*y?NZFtX1`ND*`cgbd)(TWC*!IIRZk@$(M+f?{{9Q^dO8Tzw9v z{I&rd<8pNeqPoNZaBeKd3OLCUV{RQ?hxq}I*Y}&dt6M5-+9b&%eUomIG|09U$OvE` ztS;|vbz=)0y8=VTZmO$cav0tTEvt#*-rije7sJF`JAfdPon3Ff;N|5lw#Gcy`n+G$ zm0_5GJR-*oR^AkmPIY&AEtafOe1IodMNW&Bf{pN{rRhX)-+`vU=G*PoH>5{f*y*3O zj)9Is4%<~Y$eVb&6 zd^Y@cK>eo{8~~R)0Peb?JW=^>(I1jz5U|^ecFolVlIKwNMLux@11o+woRvweVVvu>2COfrX$X)#uDB5H5LJ7i~7S@Cv!yVpnxivJCV0 zMn1h7Oi#tO=<;9Ua5YmeWiK@RtW%! z0ymq~4wHhVTQ6fkT?STCB;yF+a)rruhxh`3g67?YWtm3|ShifWF>+H0(g`$kOt%Zd{R#byfg&^46H; zEYYWgwP5fnAv7Pb?bwgHqZb?zl-VLE`<(Gux}@TgjsnhcDwzx`6B!1=!yB3jHF&5K zt07g2=(wl)-@R4>$QPg=SY6&0nC$Lo{l#{D(T@ewq4CQ!_b)4sLooF5Sgk>Fl6z1f zC99wU%dLU6A5dzK=z?EIAluU&~ZVsqu1&1J7 zLt^)^H!_$@D}*rIK>WbysjC9pem9j>r>&=BZ-%F>uwHL_E>T)-kktrG0jbTIjP>Op zcJ@V+wsG0@h<2$B@KSTT zpnh_TLfe9AR+pf_3VFlD=SEd|97)Y!$ll0-?~QeI_#mijmbb5 z(oWY+N3fegZOOW%ZC#>@=ZWOsE&&~i?Q^P?iR%a90Vf~k8l zFk~%*=BWIR%ivxa(h9J;g1x;KeVOT@FN59xA^;hunDIefv$Vrr8rOxjAPRTYjs(~n zGseZXw=Gvu-DAc;*{4joQefK}^txRZNcH&ZxK1xE9G#e7>@6*11sXL6-{ zd3*XtXXg9qPgyzi3`3b{)ALji?htjS_{B!ZRJXZ1DVkaHiY)X-DLdyK=qR{QmBmq`-6I8o5%?d};^6jW)W-nvm3T z3Oh+JwyXI}ws1}XNCySqH9c2x7|#)kRr1t+MG{S!pc4RjEVl4laLJ2Jpbf=aVQxfX zoS(nrXL}DpUrQJmj;@=58kzL9j&068FG~@AK+P~cr@HaI@h}!J?wUms_Rm38(~it} z+=GOJ1}N(r!=s_Sra@2SaU3ZMv>S|RLD=ISFGQ*8F8`l=4zQxw=$5+_=$Tv(=f(pS zXI3n0mG$GIS2hhIjA>+*aqC$xfNVj~?~CRG<6*iN7eNi^F_g_^z1&tbM?{q={KTJK z_svR|0EErRbbCOOxeCbJDH$Dzkvq}C@Oy7l2eXsi{%!y{YbSY8u$n^! zYvo5>SM(ET2`+O#E#V*W|W$a01cFxpY$2E1IB`)=1q z+uPaMcW_AvyQcT-6T9~}Jpc2 Date: Tue, 19 Mar 2024 15:32:07 -0400 Subject: [PATCH 14/28] SVM: minor refactoring to improve code readability (#317) --- svm/src/transaction_processor.rs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/svm/src/transaction_processor.rs b/svm/src/transaction_processor.rs index d1d68365d01fc2..a566802dc12987 100644 --- a/svm/src/transaction_processor.rs +++ b/svm/src/transaction_processor.rs @@ -342,11 +342,11 @@ impl TransactionBatchProcessor { fn filter_executable_program_accounts<'a, CB: TransactionProcessingCallback>( callbacks: &CB, txs: &[SanitizedTransaction], - lock_results: &mut [TransactionCheckResult], + check_results: &mut [TransactionCheckResult], program_owners: &'a [Pubkey], ) -> HashMap { let mut result: HashMap = HashMap::new(); - lock_results.iter_mut().zip(txs).for_each(|etx| { + check_results.iter_mut().zip(txs).for_each(|etx| { if let ((Ok(()), _nonce, lamports_per_signature), tx) = etx { if lamports_per_signature.is_some() { tx.message() @@ -361,9 +361,9 @@ impl TransactionBatchProcessor { if let Some(index) = callbacks.account_matches_owners(key, program_owners) { - program_owners - .get(index) - .map(|owner| entry.insert((owner, 1))); + if let Some(owner) = program_owners.get(index) { + entry.insert((owner, 1)); + } } } }); From 7ad99f3b1126aab2d28983f2783a3d9568429219 Mon Sep 17 00:00:00 2001 From: Ashwin Sekar Date: Tue, 19 Mar 2024 15:26:13 -0700 Subject: [PATCH 15/28] vote: reuse ff to gate tvc constant update from 8 -> 16 (#322) --- programs/vote/src/vote_state/mod.rs | 2 ++ sdk/program/src/vote/state/mod.rs | 12 ++++++++++-- 2 files changed, 12 insertions(+), 2 deletions(-) diff --git a/programs/vote/src/vote_state/mod.rs b/programs/vote/src/vote_state/mod.rs index ba84fa9bc4790e..b95f47e8c1b9c2 100644 --- a/programs/vote/src/vote_state/mod.rs +++ b/programs/vote/src/vote_state/mod.rs @@ -2173,6 +2173,7 @@ mod tests { let mut feature_set = FeatureSet::default(); feature_set.activate(&feature_set::timely_vote_credits::id(), 1); + feature_set.activate(&feature_set::deprecate_unused_legacy_vote_plumbing::id(), 1); // For each vote group, process all vote groups leading up to it and it itself, and ensure that the number of // credits earned is correct for both regular votes and vote state updates @@ -2307,6 +2308,7 @@ mod tests { let mut feature_set = FeatureSet::default(); feature_set.activate(&feature_set::timely_vote_credits::id(), 1); + feature_set.activate(&feature_set::deprecate_unused_legacy_vote_plumbing::id(), 1); // Retroactive voting is only possible with VoteStateUpdate transactions, which is why Vote transactions are // not tested here diff --git a/sdk/program/src/vote/state/mod.rs b/sdk/program/src/vote/state/mod.rs index 1bb8c7dc88d91c..d22d5814c2ebd2 100644 --- a/sdk/program/src/vote/state/mod.rs +++ b/sdk/program/src/vote/state/mod.rs @@ -47,6 +47,9 @@ pub const VOTE_CREDITS_GRACE_SLOTS: u8 = 2; // Maximum number of credits to award for a vote; this number of credits is awarded to votes on slots that land within the grace period. After that grace period, vote credits are reduced. pub const VOTE_CREDITS_MAXIMUM_PER_SLOT: u8 = 16; +// Previous max per slot +pub const VOTE_CREDITS_MAXIMUM_PER_SLOT_OLD: u8 = 8; + #[frozen_abi(digest = "Ch2vVEwos2EjAVqSHCyJjnN2MNX1yrpapZTGhMSCjWUH")] #[derive(Serialize, Default, Deserialize, Debug, PartialEq, Eq, Clone, AbiExample)] pub struct Vote { @@ -597,6 +600,11 @@ impl VoteState { .votes .get(index) .map_or(0, |landed_vote| landed_vote.latency); + let max_credits = if deprecate_unused_legacy_vote_plumbing { + VOTE_CREDITS_MAXIMUM_PER_SLOT + } else { + VOTE_CREDITS_MAXIMUM_PER_SLOT_OLD + }; // If latency is 0, this means that the Lockout was created and stored from a software version that did not // store vote latencies; in this case, 1 credit is awarded @@ -606,13 +614,13 @@ impl VoteState { match latency.checked_sub(VOTE_CREDITS_GRACE_SLOTS) { None | Some(0) => { // latency was <= VOTE_CREDITS_GRACE_SLOTS, so maximum credits are awarded - VOTE_CREDITS_MAXIMUM_PER_SLOT as u64 + max_credits as u64 } Some(diff) => { // diff = latency - VOTE_CREDITS_GRACE_SLOTS, and diff > 0 // Subtract diff from VOTE_CREDITS_MAXIMUM_PER_SLOT which is the number of credits to award - match VOTE_CREDITS_MAXIMUM_PER_SLOT.checked_sub(diff) { + match max_credits.checked_sub(diff) { // If diff >= VOTE_CREDITS_MAXIMUM_PER_SLOT, 1 credit is awarded None | Some(0) => 1, From dcdce7c9f13cdf669bc1949c5138b42165384000 Mon Sep 17 00:00:00 2001 From: Alessandro Decina Date: Wed, 20 Mar 2024 09:39:33 +1100 Subject: [PATCH 16/28] accounts-db: unpack_archive: unpack accounts straight into their final destination (#289) * accounts-db: unpack_archive: avoid extra iteration on each path We used to do a iterator.clone().any(...) followed by iterator.collect(). Merge the two and avoid an extra iteration and re-parsing of the path. * accounts-db: unpack_archive: unpack accounts straight into their final destination We used to unpack accounts into account_path/accounts/ then rename to account_path/. We now unpack them into their final destination directly and avoid the rename syscall. --- accounts-db/src/hardened_unpack.rs | 76 ++++++++++++++++++++---------- 1 file changed, 50 insertions(+), 26 deletions(-) diff --git a/accounts-db/src/hardened_unpack.rs b/accounts-db/src/hardened_unpack.rs index 39eca4f9cdf3d9..ebdafe675f0512 100644 --- a/accounts-db/src/hardened_unpack.rs +++ b/accounts-db/src/hardened_unpack.rs @@ -112,27 +112,26 @@ where // first by ourselves when there are odd paths like including `..` or / // for our clearer pattern matching reasoning: // https://docs.rs/tar/0.4.26/src/tar/entry.rs.html#371 - let parts = path.components().map(|p| match p { - CurDir => Some("."), - Normal(c) => c.to_str(), - _ => None, // Prefix (for Windows) and RootDir are forbidden - }); + let parts = path + .components() + .map(|p| match p { + CurDir => Ok("."), + Normal(c) => c.to_str().ok_or(()), + _ => Err(()), // Prefix (for Windows) and RootDir are forbidden + }) + .collect::, _>>(); // Reject old-style BSD directory entries that aren't explicitly tagged as directories let legacy_dir_entry = entry.header().as_ustar().is_none() && entry.path_bytes().ends_with(b"/"); let kind = entry.header().entry_type(); let reject_legacy_dir_entry = legacy_dir_entry && (kind != Directory); - - if parts.clone().any(|p| p.is_none()) || reject_legacy_dir_entry { + let (Ok(parts), false) = (parts, reject_legacy_dir_entry) else { return Err(UnpackError::Archive(format!( "invalid path found: {path_str:?}" ))); - } + }; - let parts: Vec<_> = parts.map(|p| p.unwrap()).collect(); - let account_filename = - (parts.len() == 2 && parts[0] == "accounts").then(|| PathBuf::from(parts[1])); let unpack_dir = match entry_checker(parts.as_slice(), kind) { UnpackPath::Invalid => { return Err(UnpackError::Archive(format!( @@ -159,13 +158,24 @@ where )?; total_count = checked_total_count_increment(total_count, limit_count)?; - let target = sanitize_path(&entry.path()?, unpack_dir)?; // ? handles file system errors - if target.is_none() { + let account_filename = match parts.as_slice() { + ["accounts", account_filename] => Some(PathBuf::from(account_filename)), + _ => None, + }; + let entry_path = if let Some(account) = account_filename { + // Special case account files. We're unpacking an account entry inside one of the + // account_paths returned by `entry_checker`. We want to unpack into + // account_path/ instead of account_path/accounts/ so we strip the + // accounts/ prefix. + sanitize_path(&account, unpack_dir) + } else { + sanitize_path(&path, unpack_dir) + }?; // ? handles file system errors + let Some(entry_path) = entry_path else { continue; // skip it - } - let target = target.unwrap(); + }; - let unpack = entry.unpack(target); + let unpack = entry.unpack(&entry_path); check_unpack_result(unpack.map(|_unpack| true)?, path_str)?; // Sanitize permissions. @@ -173,16 +183,7 @@ where GNUSparse | Regular => 0o644, _ => 0o755, }; - let entry_path_buf = unpack_dir.join(entry.path()?); - set_perms(&entry_path_buf, mode)?; - - let entry_path = if let Some(account_filename) = account_filename { - let stripped_path = unpack_dir.join(account_filename); // strip away "accounts" - fs::rename(&entry_path_buf, &stripped_path)?; - stripped_path - } else { - entry_path_buf - }; + set_perms(&entry_path, mode)?; // Process entry after setting permissions entry_processor(entry_path); @@ -1029,4 +1030,27 @@ mod tests { if message == "too many files in snapshot: 1000000000000" ); } + + #[test] + fn test_archive_unpack_account_path() { + let mut header = Header::new_gnu(); + header.set_path("accounts/123.456").unwrap(); + header.set_size(4); + header.set_cksum(); + let data: &[u8] = &[1, 2, 3, 4]; + + let mut archive = Builder::new(Vec::new()); + archive.append(&header, data).unwrap(); + let result = with_finalize_and_unpack(archive, |ar, tmp| { + unpack_snapshot_with_processors( + ar, + tmp, + &[tmp.join("accounts_dest")], + None, + |_, _| {}, + |path| assert_eq!(path, tmp.join("accounts_dest/123.456")), + ) + }); + assert_matches!(result, Ok(())); + } } From 01194377644576e08cb25bf6141307bb9fe0f088 Mon Sep 17 00:00:00 2001 From: Tao Zhu <82401714+tao-stones@users.noreply.github.com> Date: Tue, 19 Mar 2024 19:44:34 -0500 Subject: [PATCH 17/28] qos service should also accumulate executed but errored units (#328) qos service should also accumulated executed but errored units --- core/src/banking_stage/consumer.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/core/src/banking_stage/consumer.rs b/core/src/banking_stage/consumer.rs index c5ed22a34278ce..e66b32c0bda898 100644 --- a/core/src/banking_stage/consumer.rs +++ b/core/src/banking_stage/consumer.rs @@ -772,7 +772,9 @@ impl Consumer { (0, 0), |(units, times), program_timings| { ( - units.saturating_add(program_timings.accumulated_units), + units + .saturating_add(program_timings.accumulated_units) + .saturating_add(program_timings.total_errored_units), times.saturating_add(program_timings.accumulated_us), ) }, From 5871a0e7ed8141a4032481935385c8ddb1d756fc Mon Sep 17 00:00:00 2001 From: Jon C Date: Wed, 20 Mar 2024 13:21:00 +0100 Subject: [PATCH 18/28] CI: Add windows clippy job and fix clippy errors (#330) * CI: Run clippy on windows * Update cargo-clippy-before-script.sh for Windows * Pacify clippy --- .github/scripts/cargo-clippy-before-script.sh | 4 +++ .github/workflows/cargo.yml | 2 ++ accounts-db/src/hardened_unpack.rs | 3 ++ .../src/geyser_plugin_manager.rs | 6 +++- install/src/command.rs | 10 ++---- programs/sbf/benches/bpf_loader.rs | 7 +++-- rpc/src/rpc_service.rs | 31 +++++++++---------- 7 files changed, 36 insertions(+), 27 deletions(-) diff --git a/.github/scripts/cargo-clippy-before-script.sh b/.github/scripts/cargo-clippy-before-script.sh index b9426203aa6ffc..bba03060877434 100755 --- a/.github/scripts/cargo-clippy-before-script.sh +++ b/.github/scripts/cargo-clippy-before-script.sh @@ -6,6 +6,10 @@ os_name="$1" case "$os_name" in "Windows") + vcpkg install openssl:x64-windows-static-md + vcpkg integrate install + choco install protoc + export PROTOC='C:\ProgramData\chocolatey\lib\protoc\tools\bin\protoc.exe' ;; "macOS") brew install protobuf diff --git a/.github/workflows/cargo.yml b/.github/workflows/cargo.yml index 3d7b1371b6578b..b52a543e2d9e01 100644 --- a/.github/workflows/cargo.yml +++ b/.github/workflows/cargo.yml @@ -31,6 +31,7 @@ jobs: matrix: os: - macos-latest-large + - windows-latest runs-on: ${{ matrix.os }} steps: - uses: actions/checkout@v4 @@ -53,6 +54,7 @@ jobs: matrix: os: - macos-latest-large + - windows-latest runs-on: ${{ matrix.os }} steps: - uses: actions/checkout@v4 diff --git a/accounts-db/src/hardened_unpack.rs b/accounts-db/src/hardened_unpack.rs index ebdafe675f0512..cff22fde8ab368 100644 --- a/accounts-db/src/hardened_unpack.rs +++ b/accounts-db/src/hardened_unpack.rs @@ -205,6 +205,9 @@ where #[cfg(windows)] fn set_perms(dst: &Path, _mode: u32) -> std::io::Result<()> { let mut perm = fs::metadata(dst)?.permissions(); + // This is OK for Windows, but clippy doesn't realize we're doing this + // only on Windows. + #[allow(clippy::permissions_set_readonly_false)] perm.set_readonly(false); fs::set_permissions(dst, perm) } diff --git a/geyser-plugin-manager/src/geyser_plugin_manager.rs b/geyser-plugin-manager/src/geyser_plugin_manager.rs index d9e556d78a6895..7ccc3aee97f78b 100644 --- a/geyser-plugin-manager/src/geyser_plugin_manager.rs +++ b/geyser-plugin-manager/src/geyser_plugin_manager.rs @@ -451,9 +451,13 @@ mod tests { plugin: P, config_path: &'static str, ) -> (LoadedGeyserPlugin, Library, &'static str) { + #[cfg(unix)] + let library = libloading::os::unix::Library::this(); + #[cfg(windows)] + let library = libloading::os::windows::Library::this().unwrap(); ( LoadedGeyserPlugin::new(Box::new(plugin), None), - Library::from(libloading::os::unix::Library::this()), + Library::from(library), config_path, ) } diff --git a/install/src/command.rs b/install/src/command.rs index 827ba24ad0bf6d..9869e395b342a1 100644 --- a/install/src/command.rs +++ b/install/src/command.rs @@ -333,9 +333,7 @@ pub fn string_from_winreg_value(val: &winreg::RegValue) -> Option { let words = unsafe { slice::from_raw_parts(val.bytes.as_ptr() as *const u16, val.bytes.len() / 2) }; - let mut s = if let Ok(s) = String::from_utf16(words) { - s - } else { + let Ok(mut s) = String::from_utf16(words) else { return None; }; while s.ends_with('\u{0}') { @@ -392,11 +390,9 @@ fn add_to_path(new_path: &str) -> bool { }, }; - let old_path = if let Some(s) = + let Some(old_path) = get_windows_path_var().unwrap_or_else(|err| panic!("Unable to get PATH: {}", err)) - { - s - } else { + else { return false; }; diff --git a/programs/sbf/benches/bpf_loader.rs b/programs/sbf/benches/bpf_loader.rs index 47c55245000df1..1dd827bbeb197b 100644 --- a/programs/sbf/benches/bpf_loader.rs +++ b/programs/sbf/benches/bpf_loader.rs @@ -2,7 +2,10 @@ #![cfg(feature = "sbf_c")] #![allow(clippy::uninlined_format_args)] #![allow(clippy::arithmetic_side_effects)] -#![cfg_attr(not(target_arch = "x86_64"), allow(dead_code, unused_imports))] +#![cfg_attr( + any(target_os = "windows", not(target_arch = "x86_64")), + allow(dead_code, unused_imports) +)] use { solana_rbpf::memory_region::MemoryState, @@ -103,7 +106,7 @@ fn bench_program_create_executable(bencher: &mut Bencher) { } #[bench] -#[cfg(target_arch = "x86_64")] +#[cfg(all(not(target_os = "windows"), target_arch = "x86_64"))] fn bench_program_alu(bencher: &mut Bencher) { let ns_per_s = 1000000000; let one_million = 1000000; diff --git a/rpc/src/rpc_service.rs b/rpc/src/rpc_service.rs index 303a1e94b223b2..10580b4711c054 100644 --- a/rpc/src/rpc_service.rs +++ b/rpc/src/rpc_service.rs @@ -878,24 +878,21 @@ mod tests { panic!("Unexpected RequestMiddlewareAction variant"); } - #[cfg(unix)] + std::fs::remove_file(&genesis_path).unwrap(); { - std::fs::remove_file(&genesis_path).unwrap(); - { - let mut file = std::fs::File::create(ledger_path.path().join("wrong")).unwrap(); - file.write_all(b"wrong file").unwrap(); - } - symlink::symlink_file("wrong", &genesis_path).unwrap(); - - // File is a symbolic link => request should fail. - let action = rrm.process_file_get(DEFAULT_GENESIS_DOWNLOAD_PATH); - if let RequestMiddlewareAction::Respond { response, .. } = action { - let response = runtime.block_on(response); - let response = response.unwrap(); - assert_ne!(response.status(), 200); - } else { - panic!("Unexpected RequestMiddlewareAction variant"); - } + let mut file = std::fs::File::create(ledger_path.path().join("wrong")).unwrap(); + file.write_all(b"wrong file").unwrap(); + } + symlink::symlink_file("wrong", &genesis_path).unwrap(); + + // File is a symbolic link => request should fail. + let action = rrm.process_file_get(DEFAULT_GENESIS_DOWNLOAD_PATH); + if let RequestMiddlewareAction::Respond { response, .. } = action { + let response = runtime.block_on(response); + let response = response.unwrap(); + assert_ne!(response.status(), 200); + } else { + panic!("Unexpected RequestMiddlewareAction variant"); } } } From 78f033d9e64b895104f43ce0ba76ea2ee8f5d2a7 Mon Sep 17 00:00:00 2001 From: Pankaj Garg Date: Wed, 20 Mar 2024 05:39:07 -0700 Subject: [PATCH 19/28] Move code to check_program_modification_slot out of SVM (#329) * Move code to check_program_modification_slot out of SVM * add documentation for the public function --- runtime/src/bank.rs | 27 ++++++++++++--- svm/src/transaction_processor.rs | 58 +++++++++++--------------------- 2 files changed, 42 insertions(+), 43 deletions(-) diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 6d5c2345f92aca..b7329724a2558e 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -99,7 +99,8 @@ use { compute_budget_processor::process_compute_budget_instructions, invoke_context::BuiltinFunctionWithContext, loaded_programs::{ - LoadedProgram, LoadedProgramType, LoadedPrograms, ProgramRuntimeEnvironments, + LoadedProgram, LoadedProgramMatchCriteria, LoadedProgramType, LoadedPrograms, + ProgramRuntimeEnvironments, }, runtime_config::RuntimeConfig, timings::{ExecuteTimingType, ExecuteTimings}, @@ -168,7 +169,8 @@ use { account_overrides::AccountOverrides, transaction_error_metrics::TransactionErrorMetrics, transaction_processor::{ - TransactionBatchProcessor, TransactionLogMessages, TransactionProcessingCallback, + ExecutionRecordingConfig, TransactionBatchProcessor, TransactionLogMessages, + TransactionProcessingCallback, }, transaction_results::{ TransactionExecutionDetails, TransactionExecutionResult, TransactionResults, @@ -271,7 +273,6 @@ pub struct BankRc { #[cfg(RUSTC_WITH_SPECIALIZATION)] use solana_frozen_abi::abi_example::AbiExample; -use solana_svm::transaction_processor::ExecutionRecordingConfig; #[cfg(RUSTC_WITH_SPECIALIZATION)] impl AbiExample for BankRc { @@ -550,6 +551,7 @@ impl PartialEq for Bank { loaded_programs_cache: _, epoch_reward_status: _, transaction_processor: _, + check_program_modification_slot: _, // Ignore new fields explicitly if they do not impact PartialEq. // Adding ".." will remove compile-time checks that if a new field // is added to the struct, this PartialEq is accordingly updated. @@ -810,6 +812,8 @@ pub struct Bank { epoch_reward_status: EpochRewardStatus, transaction_processor: TransactionBatchProcessor, + + check_program_modification_slot: bool, } struct VoteWithStakeDelegations { @@ -996,6 +1000,7 @@ impl Bank { ))), epoch_reward_status: EpochRewardStatus::default(), transaction_processor: TransactionBatchProcessor::default(), + check_program_modification_slot: false, }; bank.transaction_processor = TransactionBatchProcessor::new( @@ -1314,6 +1319,7 @@ impl Bank { loaded_programs_cache: parent.loaded_programs_cache.clone(), epoch_reward_status: parent.epoch_reward_status.clone(), transaction_processor: TransactionBatchProcessor::default(), + check_program_modification_slot: false, }; new.transaction_processor = TransactionBatchProcessor::new( @@ -1864,6 +1870,7 @@ impl Bank { ))), epoch_reward_status: fields.epoch_reward_status, transaction_processor: TransactionBatchProcessor::default(), + check_program_modification_slot: false, }; bank.transaction_processor = TransactionBatchProcessor::new( @@ -7517,7 +7524,7 @@ impl Bank { } pub fn check_program_modification_slot(&mut self) { - self.transaction_processor.check_program_modification_slot = true; + self.check_program_modification_slot = true; } pub fn load_program( @@ -7579,6 +7586,18 @@ impl TransactionProcessingCallback for Bank { Ok(()) } } + + fn get_program_match_criteria(&self, program: &Pubkey) -> LoadedProgramMatchCriteria { + if self.check_program_modification_slot { + self.transaction_processor + .program_modification_slot(self, program) + .map_or(LoadedProgramMatchCriteria::Tombstone, |slot| { + LoadedProgramMatchCriteria::DeployedOnOrAfterSlot(slot) + }) + } else { + LoadedProgramMatchCriteria::NoCriteria + } + } } #[cfg(feature = "dev-context-only-utils")] diff --git a/svm/src/transaction_processor.rs b/svm/src/transaction_processor.rs index a566802dc12987..40ccf81561f26e 100644 --- a/svm/src/transaction_processor.rs +++ b/svm/src/transaction_processor.rs @@ -103,6 +103,10 @@ pub trait TransactionProcessingCallback { ) -> transaction::Result<()> { Ok(()) } + + fn get_program_match_criteria(&self, _program: &Pubkey) -> LoadedProgramMatchCriteria { + LoadedProgramMatchCriteria::NoCriteria + } } #[derive(Debug)] @@ -128,8 +132,6 @@ pub struct TransactionBatchProcessor { /// Transaction fee structure fee_structure: FeeStructure, - pub check_program_modification_slot: bool, - /// Optional config parameters that can override runtime behavior runtime_config: Arc, @@ -145,10 +147,6 @@ impl Debug for TransactionBatchProcessor { .field("epoch", &self.epoch) .field("epoch_schedule", &self.epoch_schedule) .field("fee_structure", &self.fee_structure) - .field( - "check_program_modification_slot", - &self.check_program_modification_slot, - ) .field("runtime_config", &self.runtime_config) .field("sysvar_cache", &self.sysvar_cache) .field("loaded_programs_cache", &self.loaded_programs_cache) @@ -163,7 +161,6 @@ impl Default for TransactionBatchProcessor { epoch: Epoch::default(), epoch_schedule: EpochSchedule::default(), fee_structure: FeeStructure::default(), - check_program_modification_slot: false, runtime_config: Arc::::default(), sysvar_cache: RwLock::::default(), loaded_programs_cache: Arc::new(RwLock::new(LoadedPrograms::new( @@ -188,7 +185,6 @@ impl TransactionBatchProcessor { epoch, epoch_schedule, fee_structure, - check_program_modification_slot: false, runtime_config, sysvar_cache: RwLock::::default(), loaded_programs_cache, @@ -491,30 +487,15 @@ impl TransactionBatchProcessor { limit_to_load_programs: bool, ) -> LoadedProgramsForTxBatch { let mut missing_programs: Vec<(Pubkey, (LoadedProgramMatchCriteria, u64))> = - if self.check_program_modification_slot { - program_accounts_map - .iter() - .map(|(pubkey, (_, count))| { - ( - *pubkey, - ( - self.program_modification_slot(callback, pubkey) - .map_or(LoadedProgramMatchCriteria::Tombstone, |slot| { - LoadedProgramMatchCriteria::DeployedOnOrAfterSlot(slot) - }), - *count, - ), - ) - }) - .collect() - } else { - program_accounts_map - .iter() - .map(|(pubkey, (_, count))| { - (*pubkey, (LoadedProgramMatchCriteria::NoCriteria, *count)) - }) - .collect() - }; + program_accounts_map + .iter() + .map(|(pubkey, (_, count))| { + ( + *pubkey, + (callback.get_program_match_criteria(pubkey), *count), + ) + }) + .collect(); let mut loaded_programs_for_txs = None; let mut program_to_store = None; @@ -763,7 +744,11 @@ impl TransactionBatchProcessor { } } - fn program_modification_slot( + /// Find the slot in which the program was most recently modified. + /// Returns slot 0 for programs deployed with v1/v2 loaders, since programs deployed + /// with those loaders do not retain deployment slot information. + /// Returns an error if the program's account state can not be found or parsed. + pub fn program_modification_slot( &self, callbacks: &CB, pubkey: &Pubkey, @@ -1815,10 +1800,7 @@ mod tests { fn test_replenish_program_cache() { // Case 1 let mut mock_bank = MockBankCallback::default(); - let mut batch_processor = TransactionBatchProcessor:: { - check_program_modification_slot: true, - ..TransactionBatchProcessor::default() - }; + let batch_processor = TransactionBatchProcessor::::default(); batch_processor .loaded_programs_cache .write() @@ -1848,8 +1830,6 @@ mod tests { )); // Case 2 - batch_processor.check_program_modification_slot = false; - let result = batch_processor.replenish_program_cache(&mock_bank, &account_maps, true); let program1 = result.find(&key1).unwrap(); From 304333405c58a7c862183a906cf2466e1d829d5e Mon Sep 17 00:00:00 2001 From: HaoranYi Date: Wed, 20 Mar 2024 10:54:15 -0500 Subject: [PATCH 20/28] Revert deprecate executable feature (#309) * revert deprecate executable feature * add native loader account transfer test --------- Co-authored-by: HaoranYi --- cli/src/program.rs | 15 +- cli/tests/program.rs | 44 ++-- ledger-tool/src/program.rs | 1 - program-runtime/src/invoke_context.rs | 14 +- program-runtime/src/message_processor.rs | 16 +- program-test/src/lib.rs | 24 +-- .../address-lookup-table/src/processor.rs | 22 +- programs/bpf_loader/benches/serialization.rs | 49 +---- programs/bpf_loader/src/lib.rs | 188 ++++++------------ programs/bpf_loader/src/serialization.rs | 65 ++---- programs/bpf_loader/src/syscalls/cpi.rs | 84 +++----- programs/config/src/config_processor.rs | 2 +- programs/loader-v4/src/lib.rs | 25 ++- programs/sbf/benches/bpf_loader.rs | 10 +- programs/stake/src/stake_instruction.rs | 37 +--- programs/stake/src/stake_state.rs | 105 ++++------ programs/system/src/system_instruction.rs | 22 +- programs/system/src/system_processor.rs | 62 +++++- programs/vote/src/vote_state/mod.rs | 24 +-- programs/zk-token-proof/src/lib.rs | 14 +- runtime/src/bank.rs | 9 +- runtime/src/bank/tests.rs | 40 ++-- sdk/src/account.rs | 94 +-------- sdk/src/feature_set.rs | 5 - sdk/src/transaction_context.rs | 96 +++------ svm/src/account_loader.rs | 15 +- 26 files changed, 353 insertions(+), 729 deletions(-) diff --git a/cli/src/program.rs b/cli/src/program.rs index 099da9dbaf2438..c35871868f0f04 100644 --- a/cli/src/program.rs +++ b/cli/src/program.rs @@ -44,7 +44,7 @@ use { }, solana_rpc_client_nonce_utils::blockhash_query::BlockhashQuery, solana_sdk::{ - account::{is_executable, Account}, + account::Account, account_utils::StateMut, bpf_loader, bpf_loader_deprecated, bpf_loader_upgradeable::{self, UpgradeableLoaderState}, @@ -1066,15 +1066,6 @@ fn get_default_program_keypair(program_location: &Option) -> Keypair { program_keypair } -fn is_account_executable(account: &Account) -> bool { - if account.owner == bpf_loader_deprecated::id() || account.owner == bpf_loader::id() { - account.executable - } else { - let feature_set = FeatureSet::all_enabled(); - is_executable(account, &feature_set) - } -} - /// Deploy program using upgradeable loader. It also can process program upgrades #[allow(clippy::too_many_arguments)] fn process_program_deploy( @@ -1131,7 +1122,7 @@ fn process_program_deploy( .into()); } - if !is_account_executable(&account) { + if !account.executable { // Continue an initial deploy true } else if let Ok(UpgradeableLoaderState::Program { @@ -2534,7 +2525,7 @@ fn complete_partial_program_init( ) -> Result<(Vec, u64), Box> { let mut instructions: Vec = vec![]; let mut balance_needed = 0; - if is_account_executable(account) { + if account.executable { return Err("Buffer account is already executable".into()); } if account.owner != *loader_id && !system_program::check_id(&account.owner) { diff --git a/cli/tests/program.rs b/cli/tests/program.rs index 039df1d64b8ae8..6eb281d65b9e35 100644 --- a/cli/tests/program.rs +++ b/cli/tests/program.rs @@ -14,11 +14,9 @@ use { solana_rpc_client::rpc_client::RpcClient, solana_rpc_client_nonce_utils::blockhash_query::BlockhashQuery, solana_sdk::{ - account::is_executable, account_utils::StateMut, bpf_loader_upgradeable::{self, UpgradeableLoaderState}, commitment_config::CommitmentConfig, - feature_set::FeatureSet, pubkey::Pubkey, signature::{Keypair, NullSigner, Signer}, }, @@ -102,7 +100,7 @@ fn test_cli_program_deploy_non_upgradeable() { let account0 = rpc_client.get_account(&program_id).unwrap(); assert_eq!(account0.lamports, minimum_balance_for_program); assert_eq!(account0.owner, bpf_loader_upgradeable::id()); - assert!(is_executable(&account0, &FeatureSet::all_enabled())); + assert!(account0.executable); let (programdata_pubkey, _) = Pubkey::find_program_address(&[program_id.as_ref()], &bpf_loader_upgradeable::id()); @@ -112,10 +110,7 @@ fn test_cli_program_deploy_non_upgradeable() { minimum_balance_for_programdata ); assert_eq!(programdata_account.owner, bpf_loader_upgradeable::id()); - assert!(!is_executable( - &programdata_account, - &FeatureSet::all_enabled() - )); + assert!(!programdata_account.executable); assert_eq!( programdata_account.data[UpgradeableLoaderState::size_of_programdata_metadata()..], program_data[..] @@ -143,7 +138,7 @@ fn test_cli_program_deploy_non_upgradeable() { .unwrap(); assert_eq!(account1.lamports, minimum_balance_for_program); assert_eq!(account1.owner, bpf_loader_upgradeable::id()); - assert!(is_executable(&account1, &FeatureSet::all_enabled())); + assert!(account1.executable); let (programdata_pubkey, _) = Pubkey::find_program_address( &[custom_address_keypair.pubkey().as_ref()], &bpf_loader_upgradeable::id(), @@ -154,10 +149,7 @@ fn test_cli_program_deploy_non_upgradeable() { minimum_balance_for_programdata ); assert_eq!(programdata_account.owner, bpf_loader_upgradeable::id()); - assert!(!is_executable( - &programdata_account, - &FeatureSet::all_enabled() - )); + assert!(!programdata_account.executable); assert_eq!( programdata_account.data[UpgradeableLoaderState::size_of_programdata_metadata()..], program_data[..] @@ -385,7 +377,7 @@ fn test_cli_program_deploy_with_authority() { let program_account = rpc_client.get_account(&program_keypair.pubkey()).unwrap(); assert_eq!(program_account.lamports, minimum_balance_for_program); assert_eq!(program_account.owner, bpf_loader_upgradeable::id()); - assert!(is_executable(&program_account, &FeatureSet::all_enabled())); + assert!(program_account.executable); let (programdata_pubkey, _) = Pubkey::find_program_address( &[program_keypair.pubkey().as_ref()], &bpf_loader_upgradeable::id(), @@ -396,10 +388,7 @@ fn test_cli_program_deploy_with_authority() { minimum_balance_for_programdata ); assert_eq!(programdata_account.owner, bpf_loader_upgradeable::id()); - assert!(!is_executable( - &programdata_account, - &FeatureSet::all_enabled() - )); + assert!(!programdata_account.executable); assert_eq!( programdata_account.data[UpgradeableLoaderState::size_of_programdata_metadata()..], program_data[..] @@ -433,7 +422,7 @@ fn test_cli_program_deploy_with_authority() { let program_account = rpc_client.get_account(&program_pubkey).unwrap(); assert_eq!(program_account.lamports, minimum_balance_for_program); assert_eq!(program_account.owner, bpf_loader_upgradeable::id()); - assert!(is_executable(&program_account, &FeatureSet::all_enabled())); + assert!(program_account.executable); let (programdata_pubkey, _) = Pubkey::find_program_address(&[program_pubkey.as_ref()], &bpf_loader_upgradeable::id()); let programdata_account = rpc_client.get_account(&programdata_pubkey).unwrap(); @@ -442,10 +431,7 @@ fn test_cli_program_deploy_with_authority() { minimum_balance_for_programdata ); assert_eq!(programdata_account.owner, bpf_loader_upgradeable::id()); - assert!(!is_executable( - &programdata_account, - &FeatureSet::all_enabled() - )); + assert!(program_account.executable); assert_eq!( programdata_account.data[UpgradeableLoaderState::size_of_programdata_metadata()..], program_data[..] @@ -470,7 +456,7 @@ fn test_cli_program_deploy_with_authority() { let program_account = rpc_client.get_account(&program_pubkey).unwrap(); assert_eq!(program_account.lamports, minimum_balance_for_program); assert_eq!(program_account.owner, bpf_loader_upgradeable::id()); - assert!(is_executable(&program_account, &FeatureSet::all_enabled())); + assert!(program_account.executable); let (programdata_pubkey, _) = Pubkey::find_program_address(&[program_pubkey.as_ref()], &bpf_loader_upgradeable::id()); let programdata_account = rpc_client.get_account(&programdata_pubkey).unwrap(); @@ -479,10 +465,7 @@ fn test_cli_program_deploy_with_authority() { minimum_balance_for_programdata ); assert_eq!(programdata_account.owner, bpf_loader_upgradeable::id()); - assert!(!is_executable( - &programdata_account, - &FeatureSet::all_enabled() - )); + assert!(program_account.executable); assert_eq!( programdata_account.data[UpgradeableLoaderState::size_of_programdata_metadata()..], program_data[..] @@ -548,7 +531,7 @@ fn test_cli_program_deploy_with_authority() { let program_account = rpc_client.get_account(&program_pubkey).unwrap(); assert_eq!(program_account.lamports, minimum_balance_for_program); assert_eq!(program_account.owner, bpf_loader_upgradeable::id()); - assert!(is_executable(&program_account, &FeatureSet::all_enabled())); + assert!(program_account.executable); let (programdata_pubkey, _) = Pubkey::find_program_address(&[program_pubkey.as_ref()], &bpf_loader_upgradeable::id()); let programdata_account = rpc_client.get_account(&programdata_pubkey).unwrap(); @@ -557,10 +540,7 @@ fn test_cli_program_deploy_with_authority() { minimum_balance_for_programdata ); assert_eq!(programdata_account.owner, bpf_loader_upgradeable::id()); - assert!(!is_executable( - &programdata_account, - &FeatureSet::all_enabled() - )); + assert!(program_account.executable); assert_eq!( programdata_account.data[UpgradeableLoaderState::size_of_programdata_metadata()..], program_data[..] diff --git a/ledger-tool/src/program.rs b/ledger-tool/src/program.rs index 24df2168a338bf..0b4855ccb7f756 100644 --- a/ledger-tool/src/program.rs +++ b/ledger-tool/src/program.rs @@ -540,7 +540,6 @@ pub fn program(ledger_path: &Path, matches: &ArgMatches<'_>) { .get_current_instruction_context() .unwrap(), true, // copy_account_data - &invoke_context.feature_set, ) .unwrap(); diff --git a/program-runtime/src/invoke_context.rs b/program-runtime/src/invoke_context.rs index 7e930fad169627..5b2d417912256f 100644 --- a/program-runtime/src/invoke_context.rs +++ b/program-runtime/src/invoke_context.rs @@ -403,7 +403,7 @@ impl<'a> InvokeContext<'a> { })?; let borrowed_program_account = instruction_context .try_borrow_instruction_account(self.transaction_context, program_account_index)?; - if !borrowed_program_account.is_executable(&self.feature_set) { + if !borrowed_program_account.is_executable() { ic_msg!(self, "Account {} is not executable", callee_program_id); return Err(InstructionError::AccountNotExecutable); } @@ -802,17 +802,17 @@ mod tests { MockInstruction::NoopFail => return Err(InstructionError::GenericError), MockInstruction::ModifyOwned => instruction_context .try_borrow_instruction_account(transaction_context, 0)? - .set_data_from_slice(&[1], &invoke_context.feature_set)?, + .set_data_from_slice(&[1])?, MockInstruction::ModifyNotOwned => instruction_context .try_borrow_instruction_account(transaction_context, 1)? - .set_data_from_slice(&[1], &invoke_context.feature_set)?, + .set_data_from_slice(&[1])?, MockInstruction::ModifyReadonly => instruction_context .try_borrow_instruction_account(transaction_context, 2)? - .set_data_from_slice(&[1], &invoke_context.feature_set)?, + .set_data_from_slice(&[1])?, MockInstruction::UnbalancedPush => { instruction_context .try_borrow_instruction_account(transaction_context, 0)? - .checked_add_lamports(1, &invoke_context.feature_set)?; + .checked_add_lamports(1)?; let program_id = *transaction_context.get_key_of_account_at_index(3)?; let metas = vec![ AccountMeta::new_readonly( @@ -843,7 +843,7 @@ mod tests { } MockInstruction::UnbalancedPop => instruction_context .try_borrow_instruction_account(transaction_context, 0)? - .checked_add_lamports(1, &invoke_context.feature_set)?, + .checked_add_lamports(1)?, MockInstruction::ConsumeComputeUnits { compute_units_to_consume, desired_result, @@ -855,7 +855,7 @@ mod tests { } MockInstruction::Resize { new_len } => instruction_context .try_borrow_instruction_account(transaction_context, 0)? - .set_data(vec![0; new_len as usize], &invoke_context.feature_set)?, + .set_data(vec![0; new_len as usize])?, } } else { return Err(InstructionError::InvalidInstructionData); diff --git a/program-runtime/src/message_processor.rs b/program-runtime/src/message_processor.rs index 507197298479d9..e307609e096501 100644 --- a/program-runtime/src/message_processor.rs +++ b/program-runtime/src/message_processor.rs @@ -221,16 +221,16 @@ mod tests { MockSystemInstruction::TransferLamports { lamports } => { instruction_context .try_borrow_instruction_account(transaction_context, 0)? - .checked_sub_lamports(lamports, &invoke_context.feature_set)?; + .checked_sub_lamports(lamports)?; instruction_context .try_borrow_instruction_account(transaction_context, 1)? - .checked_add_lamports(lamports, &invoke_context.feature_set)?; + .checked_add_lamports(lamports)?; Ok(()) } MockSystemInstruction::ChangeData { data } => { instruction_context .try_borrow_instruction_account(transaction_context, 1)? - .set_data(vec![data], &invoke_context.feature_set)?; + .set_data(vec![data])?; Ok(()) } } @@ -444,14 +444,14 @@ mod tests { MockSystemInstruction::DoWork { lamports, data } => { let mut dup_account = instruction_context .try_borrow_instruction_account(transaction_context, 2)?; - dup_account.checked_sub_lamports(lamports, &invoke_context.feature_set)?; - to_account.checked_add_lamports(lamports, &invoke_context.feature_set)?; - dup_account.set_data(vec![data], &invoke_context.feature_set)?; + dup_account.checked_sub_lamports(lamports)?; + to_account.checked_add_lamports(lamports)?; + dup_account.set_data(vec![data])?; drop(dup_account); let mut from_account = instruction_context .try_borrow_instruction_account(transaction_context, 0)?; - from_account.checked_sub_lamports(lamports, &invoke_context.feature_set)?; - to_account.checked_add_lamports(lamports, &invoke_context.feature_set)?; + from_account.checked_sub_lamports(lamports)?; + to_account.checked_add_lamports(lamports)?; Ok(()) } } diff --git a/program-test/src/lib.rs b/program-test/src/lib.rs index 669cb15a595afb..f4fba5761d1332 100644 --- a/program-test/src/lib.rs +++ b/program-test/src/lib.rs @@ -133,7 +133,6 @@ pub fn invoke_builtin_function( .transaction_context .get_current_instruction_context()?, true, // copy_account_data // There is no VM so direct mapping can not be implemented here - &invoke_context.feature_set, )?; // Deserialize data back into instruction params @@ -164,25 +163,18 @@ pub fn invoke_builtin_function( if borrowed_account.is_writable() { if let Some(account_info) = account_info_map.get(borrowed_account.get_key()) { if borrowed_account.get_lamports() != account_info.lamports() { - borrowed_account - .set_lamports(account_info.lamports(), &invoke_context.feature_set)?; + borrowed_account.set_lamports(account_info.lamports())?; } if borrowed_account .can_data_be_resized(account_info.data_len()) .is_ok() - && borrowed_account - .can_data_be_changed(&invoke_context.feature_set) - .is_ok() + && borrowed_account.can_data_be_changed().is_ok() { - borrowed_account.set_data_from_slice( - &account_info.data.borrow(), - &invoke_context.feature_set, - )?; + borrowed_account.set_data_from_slice(&account_info.data.borrow())?; } if borrowed_account.get_owner() != account_info.owner { - borrowed_account - .set_owner(account_info.owner.as_ref(), &invoke_context.feature_set)?; + borrowed_account.set_owner(account_info.owner.as_ref())?; } } } @@ -293,17 +285,17 @@ impl solana_sdk::program_stubs::SyscallStubs for SyscallStubs { .unwrap(); if borrowed_account.get_lamports() != account_info.lamports() { borrowed_account - .set_lamports(account_info.lamports(), &invoke_context.feature_set) + .set_lamports(account_info.lamports()) .unwrap(); } let account_info_data = account_info.try_borrow_data().unwrap(); // The redundant check helps to avoid the expensive data comparison if we can match borrowed_account .can_data_be_resized(account_info_data.len()) - .and_then(|_| borrowed_account.can_data_be_changed(&invoke_context.feature_set)) + .and_then(|_| borrowed_account.can_data_be_changed()) { Ok(()) => borrowed_account - .set_data_from_slice(&account_info_data, &invoke_context.feature_set) + .set_data_from_slice(&account_info_data) .unwrap(), Err(err) if borrowed_account.get_data() != *account_info_data => { panic!("{err:?}"); @@ -313,7 +305,7 @@ impl solana_sdk::program_stubs::SyscallStubs for SyscallStubs { // Change the owner at the end so that we are allowed to change the lamports and data before if borrowed_account.get_owner() != account_info.owner { borrowed_account - .set_owner(account_info.owner.as_ref(), &invoke_context.feature_set) + .set_owner(account_info.owner.as_ref()) .unwrap(); } if instruction_account.is_writable { diff --git a/programs/address-lookup-table/src/processor.rs b/programs/address-lookup-table/src/processor.rs index 643310d316bf83..4db568c71a1a20 100644 --- a/programs/address-lookup-table/src/processor.rs +++ b/programs/address-lookup-table/src/processor.rs @@ -162,10 +162,9 @@ impl Processor { let instruction_context = transaction_context.get_current_instruction_context()?; let mut lookup_table_account = instruction_context.try_borrow_instruction_account(transaction_context, 0)?; - lookup_table_account.set_state( - &ProgramState::LookupTable(LookupTableMeta::new(authority_key)), - &invoke_context.feature_set, - )?; + lookup_table_account.set_state(&ProgramState::LookupTable(LookupTableMeta::new( + authority_key, + )))?; Ok(()) } @@ -214,7 +213,7 @@ impl Processor { let mut lookup_table_meta = lookup_table.meta; lookup_table_meta.authority = None; AddressLookupTable::overwrite_meta_data( - lookup_table_account.get_data_mut(&invoke_context.feature_set)?, + lookup_table_account.get_data_mut()?, lookup_table_meta, )?; @@ -306,12 +305,11 @@ impl Processor { )?; { AddressLookupTable::overwrite_meta_data( - lookup_table_account.get_data_mut(&invoke_context.feature_set)?, + lookup_table_account.get_data_mut()?, lookup_table_meta, )?; for new_address in new_addresses { - lookup_table_account - .extend_from_slice(new_address.as_ref(), &invoke_context.feature_set)?; + lookup_table_account.extend_from_slice(new_address.as_ref())?; } } drop(lookup_table_account); @@ -383,7 +381,7 @@ impl Processor { lookup_table_meta.deactivation_slot = clock.slot; AddressLookupTable::overwrite_meta_data( - lookup_table_account.get_data_mut(&invoke_context.feature_set)?, + lookup_table_account.get_data_mut()?, lookup_table_meta, )?; @@ -458,13 +456,13 @@ impl Processor { let mut recipient_account = instruction_context.try_borrow_instruction_account(transaction_context, 2)?; - recipient_account.checked_add_lamports(withdrawn_lamports, &invoke_context.feature_set)?; + recipient_account.checked_add_lamports(withdrawn_lamports)?; drop(recipient_account); let mut lookup_table_account = instruction_context.try_borrow_instruction_account(transaction_context, 0)?; - lookup_table_account.set_data_length(0, &invoke_context.feature_set)?; - lookup_table_account.set_lamports(0, &invoke_context.feature_set)?; + lookup_table_account.set_data_length(0)?; + lookup_table_account.set_lamports(0)?; Ok(()) } diff --git a/programs/bpf_loader/benches/serialization.rs b/programs/bpf_loader/benches/serialization.rs index abd0823b83497e..5d3c55a165e399 100644 --- a/programs/bpf_loader/benches/serialization.rs +++ b/programs/bpf_loader/benches/serialization.rs @@ -7,7 +7,6 @@ use { solana_sdk::{ account::{Account, AccountSharedData}, bpf_loader, bpf_loader_deprecated, - feature_set::FeatureSet, pubkey::Pubkey, sysvar::rent::Rent, transaction_context::{IndexOfAccount, InstructionAccount, TransactionContext}, @@ -127,13 +126,7 @@ fn bench_serialize_unaligned(bencher: &mut Bencher) { .get_current_instruction_context() .unwrap(); bencher.iter(|| { - let _ = serialize_parameters( - &transaction_context, - instruction_context, - false, - &FeatureSet::all_enabled(), - ) - .unwrap(); + let _ = serialize_parameters(&transaction_context, instruction_context, false).unwrap(); }); } @@ -144,13 +137,7 @@ fn bench_serialize_unaligned_copy_account_data(bencher: &mut Bencher) { .get_current_instruction_context() .unwrap(); bencher.iter(|| { - let _ = serialize_parameters( - &transaction_context, - instruction_context, - true, - &FeatureSet::all_enabled(), - ) - .unwrap(); + let _ = serialize_parameters(&transaction_context, instruction_context, true).unwrap(); }); } @@ -162,13 +149,7 @@ fn bench_serialize_aligned(bencher: &mut Bencher) { .unwrap(); bencher.iter(|| { - let _ = serialize_parameters( - &transaction_context, - instruction_context, - false, - &FeatureSet::all_enabled(), - ) - .unwrap(); + let _ = serialize_parameters(&transaction_context, instruction_context, false).unwrap(); }); } @@ -180,13 +161,7 @@ fn bench_serialize_aligned_copy_account_data(bencher: &mut Bencher) { .unwrap(); bencher.iter(|| { - let _ = serialize_parameters( - &transaction_context, - instruction_context, - true, - &FeatureSet::all_enabled(), - ) - .unwrap(); + let _ = serialize_parameters(&transaction_context, instruction_context, true).unwrap(); }); } @@ -197,13 +172,7 @@ fn bench_serialize_unaligned_max_accounts(bencher: &mut Bencher) { .get_current_instruction_context() .unwrap(); bencher.iter(|| { - let _ = serialize_parameters( - &transaction_context, - instruction_context, - false, - &FeatureSet::all_enabled(), - ) - .unwrap(); + let _ = serialize_parameters(&transaction_context, instruction_context, false).unwrap(); }); } @@ -215,12 +184,6 @@ fn bench_serialize_aligned_max_accounts(bencher: &mut Bencher) { .unwrap(); bencher.iter(|| { - let _ = serialize_parameters( - &transaction_context, - instruction_context, - false, - &FeatureSet::all_enabled(), - ) - .unwrap(); + let _ = serialize_parameters(&transaction_context, instruction_context, false).unwrap(); }); } diff --git a/programs/bpf_loader/src/lib.rs b/programs/bpf_loader/src/lib.rs index a9c34fbabfc6f6..2cae8b502efdb9 100644 --- a/programs/bpf_loader/src/lib.rs +++ b/programs/bpf_loader/src/lib.rs @@ -34,8 +34,7 @@ use { clock::Slot, entrypoint::{MAX_PERMITTED_DATA_INCREASE, SUCCESS}, feature_set::{ - bpf_account_data_direct_mapping, deprecate_executable_meta_update_in_bpf_loader, - enable_bpf_loader_set_authority_checked_ix, FeatureSet, + bpf_account_data_direct_mapping, enable_bpf_loader_set_authority_checked_ix, }, instruction::{AccountMeta, InstructionError}, loader_upgradeable_instruction::UpgradeableLoaderInstruction, @@ -172,7 +171,7 @@ fn write_program_data( let transaction_context = &invoke_context.transaction_context; let instruction_context = transaction_context.get_current_instruction_context()?; let mut program = instruction_context.try_borrow_instruction_account(transaction_context, 0)?; - let data = program.get_data_mut(&invoke_context.feature_set)?; + let data = program.get_data_mut()?; let write_offset = program_data_offset.saturating_add(bytes.len()); if data.len() < write_offset { ic_msg!( @@ -402,7 +401,7 @@ pub fn process_instruction_inner( } // Program Invocation - if !program_account.is_executable(&invoke_context.feature_set) { + if !program_account.is_executable() { ic_logger_msg!(log_collector, "Program is not executable"); return Err(Box::new(InstructionError::IncorrectProgramId)); } @@ -460,12 +459,9 @@ fn process_loader_upgradeable_instruction( instruction_context.get_index_of_instruction_account_in_transaction(1)?, )?); - buffer.set_state( - &UpgradeableLoaderState::Buffer { - authority_address: authority_key, - }, - &invoke_context.feature_set, - )?; + buffer.set_state(&UpgradeableLoaderState::Buffer { + authority_address: authority_key, + })?; } UpgradeableLoaderInstruction::Write { offset, bytes } => { instruction_context.check_number_of_instruction_accounts(2)?; @@ -589,8 +585,8 @@ fn process_loader_upgradeable_instruction( instruction_context.try_borrow_instruction_account(transaction_context, 3)?; let mut payer = instruction_context.try_borrow_instruction_account(transaction_context, 0)?; - payer.checked_add_lamports(buffer.get_lamports(), &invoke_context.feature_set)?; - buffer.set_lamports(0, &invoke_context.feature_set)?; + payer.checked_add_lamports(buffer.get_lamports())?; + buffer.set_lamports(0)?; } let owner_id = *program_id; @@ -644,15 +640,12 @@ fn process_loader_upgradeable_instruction( { let mut programdata = instruction_context.try_borrow_instruction_account(transaction_context, 1)?; - programdata.set_state( - &UpgradeableLoaderState::ProgramData { - slot: clock.slot, - upgrade_authority_address: authority_key, - }, - &invoke_context.feature_set, - )?; + programdata.set_state(&UpgradeableLoaderState::ProgramData { + slot: clock.slot, + upgrade_authority_address: authority_key, + })?; let dst_slice = programdata - .get_data_mut(&invoke_context.feature_set)? + .get_data_mut()? .get_mut( programdata_data_offset ..programdata_data_offset.saturating_add(buffer_data_len), @@ -665,30 +658,16 @@ fn process_loader_upgradeable_instruction( .get(buffer_data_offset..) .ok_or(InstructionError::AccountDataTooSmall)?; dst_slice.copy_from_slice(src_slice); - buffer.set_data_length( - UpgradeableLoaderState::size_of_buffer(0), - &invoke_context.feature_set, - )?; + buffer.set_data_length(UpgradeableLoaderState::size_of_buffer(0))?; } // Update the Program account let mut program = instruction_context.try_borrow_instruction_account(transaction_context, 2)?; - program.set_state( - &UpgradeableLoaderState::Program { - programdata_address: programdata_key, - }, - &invoke_context.feature_set, - )?; - - // Skip writing true to executable meta after bpf program deployment when - // `deprecate_executable_meta_update_in_bpf_loader` feature is activated. - if !invoke_context - .feature_set - .is_active(&deprecate_executable_meta_update_in_bpf_loader::id()) - { - program.set_executable(true)?; - } + program.set_state(&UpgradeableLoaderState::Program { + programdata_address: programdata_key, + })?; + program.set_executable(true)?; drop(program); ic_logger_msg!(log_collector, "Deployed program {:?}", new_program_id); @@ -710,7 +689,7 @@ fn process_loader_upgradeable_instruction( let program = instruction_context.try_borrow_instruction_account(transaction_context, 1)?; - if !program.is_executable(&invoke_context.feature_set) { + if !program.is_executable() { ic_logger_msg!(log_collector, "Program account not executable"); return Err(InstructionError::AccountNotExecutable); } @@ -841,15 +820,12 @@ fn process_loader_upgradeable_instruction( let mut programdata = instruction_context.try_borrow_instruction_account(transaction_context, 0)?; { - programdata.set_state( - &UpgradeableLoaderState::ProgramData { - slot: clock.slot, - upgrade_authority_address: authority_key, - }, - &invoke_context.feature_set, - )?; + programdata.set_state(&UpgradeableLoaderState::ProgramData { + slot: clock.slot, + upgrade_authority_address: authority_key, + })?; let dst_slice = programdata - .get_data_mut(&invoke_context.feature_set)? + .get_data_mut()? .get_mut( programdata_data_offset ..programdata_data_offset.saturating_add(buffer_data_len), @@ -864,7 +840,7 @@ fn process_loader_upgradeable_instruction( dst_slice.copy_from_slice(src_slice); } programdata - .get_data_mut(&invoke_context.feature_set)? + .get_data_mut()? .get_mut(programdata_data_offset.saturating_add(buffer_data_len)..) .ok_or(InstructionError::AccountDataTooSmall)? .fill(0); @@ -879,14 +855,10 @@ fn process_loader_upgradeable_instruction( .get_lamports() .saturating_add(buffer_lamports) .saturating_sub(programdata_balance_required), - &invoke_context.feature_set, - )?; - buffer.set_lamports(0, &invoke_context.feature_set)?; - programdata.set_lamports(programdata_balance_required, &invoke_context.feature_set)?; - buffer.set_data_length( - UpgradeableLoaderState::size_of_buffer(0), - &invoke_context.feature_set, )?; + buffer.set_lamports(0)?; + programdata.set_lamports(programdata_balance_required)?; + buffer.set_data_length(UpgradeableLoaderState::size_of_buffer(0))?; ic_logger_msg!(log_collector, "Upgraded program {:?}", new_program_id); } @@ -922,12 +894,9 @@ fn process_loader_upgradeable_instruction( ic_logger_msg!(log_collector, "Buffer authority did not sign"); return Err(InstructionError::MissingRequiredSignature); } - account.set_state( - &UpgradeableLoaderState::Buffer { - authority_address: new_authority.cloned(), - }, - &invoke_context.feature_set, - )?; + account.set_state(&UpgradeableLoaderState::Buffer { + authority_address: new_authority.cloned(), + })?; } UpgradeableLoaderState::ProgramData { slot, @@ -945,13 +914,10 @@ fn process_loader_upgradeable_instruction( ic_logger_msg!(log_collector, "Upgrade authority did not sign"); return Err(InstructionError::MissingRequiredSignature); } - account.set_state( - &UpgradeableLoaderState::ProgramData { - slot, - upgrade_authority_address: new_authority.cloned(), - }, - &invoke_context.feature_set, - )?; + account.set_state(&UpgradeableLoaderState::ProgramData { + slot, + upgrade_authority_address: new_authority.cloned(), + })?; } _ => { ic_logger_msg!(log_collector, "Account does not support authorities"); @@ -997,12 +963,9 @@ fn process_loader_upgradeable_instruction( ic_logger_msg!(log_collector, "New authority did not sign"); return Err(InstructionError::MissingRequiredSignature); } - account.set_state( - &UpgradeableLoaderState::Buffer { - authority_address: Some(*new_authority_key), - }, - &invoke_context.feature_set, - )?; + account.set_state(&UpgradeableLoaderState::Buffer { + authority_address: Some(*new_authority_key), + })?; } UpgradeableLoaderState::ProgramData { slot, @@ -1024,13 +987,10 @@ fn process_loader_upgradeable_instruction( ic_logger_msg!(log_collector, "New authority did not sign"); return Err(InstructionError::MissingRequiredSignature); } - account.set_state( - &UpgradeableLoaderState::ProgramData { - slot, - upgrade_authority_address: Some(*new_authority_key), - }, - &invoke_context.feature_set, - )?; + account.set_state(&UpgradeableLoaderState::ProgramData { + slot, + upgrade_authority_address: Some(*new_authority_key), + })?; } _ => { ic_logger_msg!(log_collector, "Account does not support authorities"); @@ -1055,19 +1015,13 @@ fn process_loader_upgradeable_instruction( instruction_context.try_borrow_instruction_account(transaction_context, 0)?; let close_key = *close_account.get_key(); let close_account_state = close_account.get_state()?; - close_account.set_data_length( - UpgradeableLoaderState::size_of_uninitialized(), - &invoke_context.feature_set, - )?; + close_account.set_data_length(UpgradeableLoaderState::size_of_uninitialized())?; match close_account_state { UpgradeableLoaderState::Uninitialized => { let mut recipient_account = instruction_context .try_borrow_instruction_account(transaction_context, 1)?; - recipient_account.checked_add_lamports( - close_account.get_lamports(), - &invoke_context.feature_set, - )?; - close_account.set_lamports(0, &invoke_context.feature_set)?; + recipient_account.checked_add_lamports(close_account.get_lamports())?; + close_account.set_lamports(0)?; ic_logger_msg!(log_collector, "Closed Uninitialized {}", close_key); } @@ -1079,7 +1033,6 @@ fn process_loader_upgradeable_instruction( transaction_context, instruction_context, &log_collector, - &invoke_context.feature_set, )?; ic_logger_msg!(log_collector, "Closed Buffer {}", close_key); @@ -1126,7 +1079,6 @@ fn process_loader_upgradeable_instruction( transaction_context, instruction_context, &log_collector, - &invoke_context.feature_set, )?; let clock = invoke_context.get_sysvar_cache().get_clock()?; invoke_context.programs_modified_by_tx.replenish( @@ -1277,7 +1229,7 @@ fn process_loader_upgradeable_instruction( let instruction_context = transaction_context.get_current_instruction_context()?; let mut programdata_account = instruction_context .try_borrow_instruction_account(transaction_context, PROGRAM_DATA_ACCOUNT_INDEX)?; - programdata_account.set_data_length(new_len, &invoke_context.feature_set)?; + programdata_account.set_data_length(new_len)?; let programdata_data_offset = UpgradeableLoaderState::size_of_programdata_metadata(); @@ -1298,13 +1250,10 @@ fn process_loader_upgradeable_instruction( let mut programdata_account = instruction_context .try_borrow_instruction_account(transaction_context, PROGRAM_DATA_ACCOUNT_INDEX)?; - programdata_account.set_state( - &UpgradeableLoaderState::ProgramData { - slot: clock_slot, - upgrade_authority_address, - }, - &invoke_context.feature_set, - )?; + programdata_account.set_state(&UpgradeableLoaderState::ProgramData { + slot: clock_slot, + upgrade_authority_address, + })?; ic_logger_msg!( log_collector, @@ -1322,7 +1271,6 @@ fn common_close_account( transaction_context: &TransactionContext, instruction_context: &InstructionContext, log_collector: &Option>>, - feature_set: &FeatureSet, ) -> Result<(), InstructionError> { if authority_address.is_none() { ic_logger_msg!(log_collector, "Account is immutable"); @@ -1346,9 +1294,9 @@ fn common_close_account( let mut recipient_account = instruction_context.try_borrow_instruction_account(transaction_context, 1)?; - recipient_account.checked_add_lamports(close_account.get_lamports(), feature_set)?; - close_account.set_lamports(0, feature_set)?; - close_account.set_state(&UpgradeableLoaderState::Uninitialized, feature_set)?; + recipient_account.checked_add_lamports(close_account.get_lamports())?; + close_account.set_lamports(0)?; + close_account.set_state(&UpgradeableLoaderState::Uninitialized)?; Ok(()) } @@ -1383,7 +1331,6 @@ fn execute<'a, 'b: 'a>( invoke_context.transaction_context, instruction_context, !direct_mapping, - &invoke_context.feature_set, )?; serialize_time.stop(); @@ -1464,15 +1411,13 @@ fn execute<'a, 'b: 'a>( instruction_account_index as IndexOfAccount, )?; - error = EbpfError::SyscallError(Box::new( - if account.is_executable(&invoke_context.feature_set) { - InstructionError::ExecutableDataModified - } else if account.is_writable() { - InstructionError::ExternalAccountDataModified - } else { - InstructionError::ReadonlyDataModified - }, - )); + error = EbpfError::SyscallError(Box::new(if account.is_executable() { + InstructionError::ExecutableDataModified + } else if account.is_writable() { + InstructionError::ExternalAccountDataModified + } else { + InstructionError::ReadonlyDataModified + })); } } } @@ -1500,7 +1445,6 @@ fn execute<'a, 'b: 'a>( copy_account_data, parameter_bytes, &invoke_context.get_syscall_context()?.accounts_metadata, - &invoke_context.feature_set, ) } @@ -1630,9 +1574,6 @@ mod tests { expected_result, Entrypoint::vm, |invoke_context| { - let mut features = FeatureSet::all_enabled(); - features.deactivate(&deprecate_executable_meta_update_in_bpf_loader::id()); - invoke_context.feature_set = Arc::new(features); test_utils::load_all_invoked_programs(invoke_context); }, |_invoke_context| {}, @@ -1721,9 +1662,6 @@ mod tests { Err(InstructionError::ProgramFailedToComplete), Entrypoint::vm, |invoke_context| { - let mut features = FeatureSet::all_enabled(); - features.deactivate(&deprecate_executable_meta_update_in_bpf_loader::id()); - invoke_context.feature_set = Arc::new(features); invoke_context.mock_set_remaining(0); test_utils::load_all_invoked_programs(invoke_context); }, @@ -2269,11 +2207,7 @@ mod tests { instruction_accounts, expected_result, Entrypoint::vm, - |invoke_context| { - let mut features = FeatureSet::all_enabled(); - features.deactivate(&deprecate_executable_meta_update_in_bpf_loader::id()); - invoke_context.feature_set = Arc::new(features); - }, + |_invoke_context| {}, |_invoke_context| {}, ) } diff --git a/programs/bpf_loader/src/serialization.rs b/programs/bpf_loader/src/serialization.rs index d4cbd09642f47c..f9cbc2e752c54d 100644 --- a/programs/bpf_loader/src/serialization.rs +++ b/programs/bpf_loader/src/serialization.rs @@ -11,7 +11,6 @@ use { solana_sdk::{ bpf_loader_deprecated, entrypoint::{BPF_ALIGN_OF_U128, MAX_PERMITTED_DATA_INCREASE, NON_DUP_MARKER}, - feature_set::FeatureSet, instruction::InstructionError, pubkey::Pubkey, system_instruction::MAX_PERMITTED_DATA_LENGTH, @@ -94,7 +93,6 @@ impl Serializer { fn write_account( &mut self, account: &mut BorrowedAccount<'_>, - feature_set: &FeatureSet, ) -> Result { let vm_data_addr = if self.copy_account_data { let vm_data_addr = self.vaddr.saturating_add(self.buffer.len() as u64); @@ -103,7 +101,7 @@ impl Serializer { } else { self.push_region(true); let vaddr = self.vaddr; - self.push_account_data_region(account, feature_set)?; + self.push_account_data_region(account)?; vaddr }; @@ -123,7 +121,7 @@ impl Serializer { .map_err(|_| InstructionError::InvalidArgument)?; self.region_start += BPF_ALIGN_OF_U128.saturating_sub(align_offset); // put the realloc padding in its own region - self.push_region(account.can_data_be_changed(feature_set).is_ok()); + self.push_region(account.can_data_be_changed().is_ok()); } } @@ -133,13 +131,12 @@ impl Serializer { fn push_account_data_region( &mut self, account: &mut BorrowedAccount<'_>, - feature_set: &FeatureSet, ) -> Result<(), InstructionError> { if !account.get_data().is_empty() { - let region = match account_data_region_memory_state(account, feature_set) { + let region = match account_data_region_memory_state(account) { MemoryState::Readable => MemoryRegion::new_readonly(account.get_data(), self.vaddr), MemoryState::Writable => { - MemoryRegion::new_writable(account.get_data_mut(feature_set)?, self.vaddr) + MemoryRegion::new_writable(account.get_data_mut()?, self.vaddr) } MemoryState::Cow(index_in_transaction) => { MemoryRegion::new_cow(account.get_data(), self.vaddr, index_in_transaction) @@ -194,7 +191,6 @@ pub fn serialize_parameters( transaction_context: &TransactionContext, instruction_context: &InstructionContext, copy_account_data: bool, - feature_set: &FeatureSet, ) -> Result< ( AlignedMemory, @@ -243,7 +239,6 @@ pub fn serialize_parameters( instruction_context.get_instruction_data(), &program_id, copy_account_data, - feature_set, ) } else { serialize_parameters_aligned( @@ -251,7 +246,6 @@ pub fn serialize_parameters( instruction_context.get_instruction_data(), &program_id, copy_account_data, - feature_set, ) } } @@ -262,7 +256,6 @@ pub fn deserialize_parameters( copy_account_data: bool, buffer: &[u8], accounts_metadata: &[SerializedAccountMetadata], - feature_set: &FeatureSet, ) -> Result<(), InstructionError> { let is_loader_deprecated = *instruction_context .try_borrow_last_program_account(transaction_context)? @@ -276,7 +269,6 @@ pub fn deserialize_parameters( copy_account_data, buffer, account_lengths, - feature_set, ) } else { deserialize_parameters_aligned( @@ -285,7 +277,6 @@ pub fn deserialize_parameters( copy_account_data, buffer, account_lengths, - feature_set, ) } } @@ -295,7 +286,6 @@ fn serialize_parameters_unaligned( instruction_data: &[u8], program_id: &Pubkey, copy_account_data: bool, - feature_set: &FeatureSet, ) -> Result< ( AlignedMemory, @@ -346,9 +336,9 @@ fn serialize_parameters_unaligned( let vm_key_addr = s.write_all(account.get_key().as_ref()); let vm_lamports_addr = s.write::(account.get_lamports().to_le()); s.write::((account.get_data().len() as u64).to_le()); - let vm_data_addr = s.write_account(&mut account, feature_set)?; + let vm_data_addr = s.write_account(&mut account)?; let vm_owner_addr = s.write_all(account.get_owner().as_ref()); - s.write::(account.is_executable(feature_set) as u8); + s.write::(account.is_executable() as u8); s.write::((account.get_rent_epoch()).to_le()); accounts_metadata.push(SerializedAccountMetadata { original_data_len: account.get_data().len(), @@ -374,7 +364,6 @@ pub fn deserialize_parameters_unaligned>( copy_account_data: bool, buffer: &[u8], account_lengths: I, - feature_set: &FeatureSet, ) -> Result<(), InstructionError> { let mut start = size_of::(); // number of accounts for (instruction_account_index, pre_len) in (0..instruction_context @@ -396,7 +385,7 @@ pub fn deserialize_parameters_unaligned>( .ok_or(InstructionError::InvalidArgument)?, ); if borrowed_account.get_lamports() != lamports { - borrowed_account.set_lamports(lamports, feature_set)?; + borrowed_account.set_lamports(lamports)?; } start += size_of::() // lamports + size_of::(); // data length @@ -407,9 +396,9 @@ pub fn deserialize_parameters_unaligned>( // The redundant check helps to avoid the expensive data comparison if we can match borrowed_account .can_data_be_resized(data.len()) - .and_then(|_| borrowed_account.can_data_be_changed(feature_set)) + .and_then(|_| borrowed_account.can_data_be_changed()) { - Ok(()) => borrowed_account.set_data_from_slice(data, feature_set)?, + Ok(()) => borrowed_account.set_data_from_slice(data)?, Err(err) if borrowed_account.get_data() != data => return Err(err), _ => {} } @@ -428,7 +417,6 @@ fn serialize_parameters_aligned( instruction_data: &[u8], program_id: &Pubkey, copy_account_data: bool, - feature_set: &FeatureSet, ) -> Result< ( AlignedMemory, @@ -478,13 +466,13 @@ fn serialize_parameters_aligned( s.write::(NON_DUP_MARKER); s.write::(borrowed_account.is_signer() as u8); s.write::(borrowed_account.is_writable() as u8); - s.write::(borrowed_account.is_executable(feature_set) as u8); + s.write::(borrowed_account.is_executable() as u8); s.write_all(&[0u8, 0, 0, 0]); let vm_key_addr = s.write_all(borrowed_account.get_key().as_ref()); let vm_owner_addr = s.write_all(borrowed_account.get_owner().as_ref()); let vm_lamports_addr = s.write::(borrowed_account.get_lamports().to_le()); s.write::((borrowed_account.get_data().len() as u64).to_le()); - let vm_data_addr = s.write_account(&mut borrowed_account, feature_set)?; + let vm_data_addr = s.write_account(&mut borrowed_account)?; s.write::((borrowed_account.get_rent_epoch()).to_le()); accounts_metadata.push(SerializedAccountMetadata { original_data_len: borrowed_account.get_data().len(), @@ -515,7 +503,6 @@ pub fn deserialize_parameters_aligned>( copy_account_data: bool, buffer: &[u8], account_lengths: I, - feature_set: &FeatureSet, ) -> Result<(), InstructionError> { let mut start = size_of::(); // number of accounts for (instruction_account_index, pre_len) in (0..instruction_context @@ -545,7 +532,7 @@ pub fn deserialize_parameters_aligned>( .ok_or(InstructionError::InvalidArgument)?, ); if borrowed_account.get_lamports() != lamports { - borrowed_account.set_lamports(lamports, feature_set)?; + borrowed_account.set_lamports(lamports)?; } start += size_of::(); // lamports let post_len = LittleEndian::read_u64( @@ -567,9 +554,9 @@ pub fn deserialize_parameters_aligned>( .ok_or(InstructionError::InvalidArgument)?; match borrowed_account .can_data_be_resized(post_len) - .and_then(|_| borrowed_account.can_data_be_changed(feature_set)) + .and_then(|_| borrowed_account.can_data_be_changed()) { - Ok(()) => borrowed_account.set_data_from_slice(data, feature_set)?, + Ok(()) => borrowed_account.set_data_from_slice(data)?, Err(err) if borrowed_account.get_data() != data => return Err(err), _ => {} } @@ -583,14 +570,14 @@ pub fn deserialize_parameters_aligned>( .ok_or(InstructionError::InvalidArgument)?; match borrowed_account .can_data_be_resized(post_len) - .and_then(|_| borrowed_account.can_data_be_changed(feature_set)) + .and_then(|_| borrowed_account.can_data_be_changed()) { Ok(()) => { - borrowed_account.set_data_length(post_len, feature_set)?; + borrowed_account.set_data_length(post_len)?; let allocated_bytes = post_len.saturating_sub(pre_len); if allocated_bytes > 0 { borrowed_account - .get_data_mut(feature_set)? + .get_data_mut()? .get_mut(pre_len..pre_len.saturating_add(allocated_bytes)) .ok_or(InstructionError::InvalidArgument)? .copy_from_slice( @@ -608,18 +595,15 @@ pub fn deserialize_parameters_aligned>( start += size_of::(); // rent_epoch if borrowed_account.get_owner().to_bytes() != owner { // Change the owner at the end so that we are allowed to change the lamports and data before - borrowed_account.set_owner(owner, feature_set)?; + borrowed_account.set_owner(owner)?; } } } Ok(()) } -pub(crate) fn account_data_region_memory_state( - account: &BorrowedAccount<'_>, - feature_set: &FeatureSet, -) -> MemoryState { - if account.can_data_be_changed(feature_set).is_ok() { +pub(crate) fn account_data_region_memory_state(account: &BorrowedAccount<'_>) -> MemoryState { + if account.can_data_be_changed().is_ok() { if account.is_shared() { MemoryState::Cow(account.get_index_in_transaction() as u64) } else { @@ -744,7 +728,6 @@ mod tests { invoke_context.transaction_context, instruction_context, copy_account_data, - &invoke_context.feature_set, ); assert_eq!( serialization_result.as_ref().err(), @@ -899,7 +882,6 @@ mod tests { invoke_context.transaction_context, instruction_context, copy_account_data, - &invoke_context.feature_set, ) .unwrap(); @@ -938,7 +920,7 @@ mod tests { assert_eq!(account.lamports(), account_info.lamports()); assert_eq!(account.data(), &account_info.data.borrow()[..]); assert_eq!(account.owner(), account_info.owner); - assert!(account_info.executable); + assert_eq!(account.executable(), account_info.executable); assert_eq!(account.rent_epoch(), account_info.rent_epoch); assert_eq!( @@ -961,7 +943,6 @@ mod tests { copy_account_data, serialized.as_slice(), &accounts_metadata, - &invoke_context.feature_set, ) .unwrap(); for (index_in_transaction, (_key, original_account)) in @@ -992,7 +973,6 @@ mod tests { invoke_context.transaction_context, instruction_context, copy_account_data, - &invoke_context.feature_set, ) .unwrap(); let mut serialized_regions = concat_regions(®ions); @@ -1023,7 +1003,7 @@ mod tests { assert_eq!(account.lamports(), account_info.lamports()); assert_eq!(account.data(), &account_info.data.borrow()[..]); assert_eq!(account.owner(), account_info.owner); - assert!(account_info.executable); + assert_eq!(account.executable(), account_info.executable); assert_eq!(account.rent_epoch(), account_info.rent_epoch); } @@ -1033,7 +1013,6 @@ mod tests { copy_account_data, serialized.as_slice(), &account_lengths, - &invoke_context.feature_set, ) .unwrap(); for (index_in_transaction, (_key, original_account)) in diff --git a/programs/bpf_loader/src/syscalls/cpi.rs b/programs/bpf_loader/src/syscalls/cpi.rs index b4368f2172e04f..13f9cbaf905275 100644 --- a/programs/bpf_loader/src/syscalls/cpi.rs +++ b/programs/bpf_loader/src/syscalls/cpi.rs @@ -8,7 +8,7 @@ use { memory_region::{MemoryRegion, MemoryState}, }, solana_sdk::{ - feature_set::{enable_bpf_loader_set_authority_checked_ix, FeatureSet}, + feature_set::enable_bpf_loader_set_authority_checked_ix, stable_layout::stable_instruction::StableInstruction, syscalls::{ MAX_CPI_ACCOUNT_INFOS, MAX_CPI_INSTRUCTION_ACCOUNTS, MAX_CPI_INSTRUCTION_DATA_LEN, @@ -883,7 +883,7 @@ where .transaction_context .get_key_of_account_at_index(instruction_account.index_in_transaction)?; - if callee_account.is_executable(&invoke_context.feature_set) { + if callee_account.is_executable() { // Use the known account consume_compute_meter( invoke_context, @@ -1139,7 +1139,6 @@ fn cpi_common( caller_account, &callee_account, is_loader_deprecated, - &invoke_context.feature_set, )?; } } @@ -1180,7 +1179,7 @@ fn update_callee_account( direct_mapping: bool, ) -> Result<(), Error> { if callee_account.get_lamports() != *caller_account.lamports { - callee_account.set_lamports(*caller_account.lamports, &invoke_context.feature_set)?; + callee_account.set_lamports(*caller_account.lamports)?; } if direct_mapping { @@ -1188,7 +1187,7 @@ fn update_callee_account( let post_len = *caller_account.ref_to_len_in_vm.get()? as usize; match callee_account .can_data_be_resized(post_len) - .and_then(|_| callee_account.can_data_be_changed(&invoke_context.feature_set)) + .and_then(|_| callee_account.can_data_be_changed()) { Ok(()) => { let realloc_bytes_used = post_len.saturating_sub(caller_account.original_data_len); @@ -1196,7 +1195,7 @@ fn update_callee_account( if is_loader_deprecated && realloc_bytes_used > 0 { return Err(InstructionError::InvalidRealloc.into()); } - callee_account.set_data_length(post_len, &invoke_context.feature_set)?; + callee_account.set_data_length(post_len)?; if realloc_bytes_used > 0 { let serialized_data = translate_slice::( memory_mapping, @@ -1207,7 +1206,7 @@ fn update_callee_account( invoke_context.get_check_aligned(), )?; callee_account - .get_data_mut(&invoke_context.feature_set)? + .get_data_mut()? .get_mut(caller_account.original_data_len..post_len) .ok_or(SyscallError::InvalidLength)? .copy_from_slice(serialized_data); @@ -1222,10 +1221,9 @@ fn update_callee_account( // The redundant check helps to avoid the expensive data comparison if we can match callee_account .can_data_be_resized(caller_account.serialized_data.len()) - .and_then(|_| callee_account.can_data_be_changed(&invoke_context.feature_set)) + .and_then(|_| callee_account.can_data_be_changed()) { - Ok(()) => callee_account - .set_data_from_slice(caller_account.serialized_data, &invoke_context.feature_set)?, + Ok(()) => callee_account.set_data_from_slice(caller_account.serialized_data)?, Err(err) if callee_account.get_data() != caller_account.serialized_data => { return Err(Box::new(err)); } @@ -1235,7 +1233,7 @@ fn update_callee_account( // Change the owner at the end so that we are allowed to change the lamports and data before if callee_account.get_owner() != caller_account.owner { - callee_account.set_owner(caller_account.owner.as_ref(), &invoke_context.feature_set)?; + callee_account.set_owner(caller_account.owner.as_ref())?; } Ok(()) @@ -1246,7 +1244,6 @@ fn update_caller_account_perms( caller_account: &CallerAccount, callee_account: &BorrowedAccount<'_>, is_loader_deprecated: bool, - feature_set: &FeatureSet, ) -> Result<(), Error> { let CallerAccount { original_data_len, @@ -1256,10 +1253,9 @@ fn update_caller_account_perms( let data_region = account_data_region(memory_mapping, *vm_data_addr, *original_data_len)?; if let Some(region) = data_region { - region.state.set(account_data_region_memory_state( - callee_account, - feature_set, - )); + region + .state + .set(account_data_region_memory_state(callee_account)); } let realloc_region = account_realloc_region( memory_mapping, @@ -1270,7 +1266,7 @@ fn update_caller_account_perms( if let Some(region) = realloc_region { region .state - .set(if callee_account.can_data_be_changed(feature_set).is_ok() { + .set(if callee_account.can_data_be_changed().is_ok() { MemoryState::Writable } else { MemoryState::Readable @@ -1818,11 +1814,9 @@ mod tests { let mut callee_account = borrow_instruction_account!(invoke_context, 0); + callee_account.set_lamports(42).unwrap(); callee_account - .set_lamports(42, &invoke_context.feature_set) - .unwrap(); - callee_account - .set_owner(Pubkey::new_unique().as_ref(), &invoke_context.feature_set) + .set_owner(Pubkey::new_unique().as_ref()) .unwrap(); update_caller_account( @@ -1891,9 +1885,7 @@ mod tests { (b"foobazbad".to_vec(), MAX_PERMITTED_DATA_INCREASE - 3), ] { assert_eq!(caller_account.serialized_data, callee_account.get_data()); - callee_account - .set_data_from_slice(&new_value, &invoke_context.feature_set) - .unwrap(); + callee_account.set_data_from_slice(&new_value).unwrap(); update_caller_account( &invoke_context, @@ -1921,10 +1913,7 @@ mod tests { } callee_account - .set_data_length( - original_data_len + MAX_PERMITTED_DATA_INCREASE, - &invoke_context.feature_set, - ) + .set_data_length(original_data_len + MAX_PERMITTED_DATA_INCREASE) .unwrap(); update_caller_account( &invoke_context, @@ -1940,10 +1929,7 @@ mod tests { assert!(is_zeroed(&data_slice[data_len..])); callee_account - .set_data_length( - original_data_len + MAX_PERMITTED_DATA_INCREASE + 1, - &invoke_context.feature_set, - ) + .set_data_length(original_data_len + MAX_PERMITTED_DATA_INCREASE + 1) .unwrap(); assert_matches!( update_caller_account( @@ -1958,11 +1944,9 @@ mod tests { ); // close the account + callee_account.set_data_length(0).unwrap(); callee_account - .set_data_length(0, &invoke_context.feature_set) - .unwrap(); - callee_account - .set_owner(system_program::id().as_ref(), &invoke_context.feature_set) + .set_owner(system_program::id().as_ref()) .unwrap(); update_caller_account( &invoke_context, @@ -2031,13 +2015,9 @@ mod tests { (vec![], 0), // check lower bound ] { if change_ptr { - callee_account - .set_data(new_value, &invoke_context.feature_set) - .unwrap(); + callee_account.set_data(new_value).unwrap(); } else { - callee_account - .set_data_from_slice(&new_value, &invoke_context.feature_set) - .unwrap(); + callee_account.set_data_from_slice(&new_value).unwrap(); } update_caller_account( @@ -2107,10 +2087,7 @@ mod tests { } callee_account - .set_data_length( - original_data_len + MAX_PERMITTED_DATA_INCREASE, - &invoke_context.feature_set, - ) + .set_data_length(original_data_len + MAX_PERMITTED_DATA_INCREASE) .unwrap(); update_caller_account( &invoke_context, @@ -2128,10 +2105,7 @@ mod tests { ); callee_account - .set_data_length( - original_data_len + MAX_PERMITTED_DATA_INCREASE + 1, - &invoke_context.feature_set, - ) + .set_data_length(original_data_len + MAX_PERMITTED_DATA_INCREASE + 1) .unwrap(); assert_matches!( update_caller_account( @@ -2146,11 +2120,9 @@ mod tests { ); // close the account + callee_account.set_data_length(0).unwrap(); callee_account - .set_data_length(0, &invoke_context.feature_set) - .unwrap(); - callee_account - .set_owner(system_program::id().as_ref(), &invoke_context.feature_set) + .set_owner(system_program::id().as_ref()) .unwrap(); update_caller_account( &invoke_context, @@ -2493,9 +2465,7 @@ mod tests { // this is done when a writable account is mapped, and it ensures // through make_data_mut() that the account is made writable and resized // with enough padding to hold the realloc padding - callee_account - .get_data_mut(&invoke_context.feature_set) - .unwrap(); + callee_account.get_data_mut().unwrap(); let serialized_data = translate_slice_mut::( &memory_mapping, diff --git a/programs/config/src/config_processor.rs b/programs/config/src/config_processor.rs index b85715eb171391..fd4b806567180d 100644 --- a/programs/config/src/config_processor.rs +++ b/programs/config/src/config_processor.rs @@ -127,7 +127,7 @@ declare_process_instruction!(Entrypoint, DEFAULT_COMPUTE_UNITS, |invoke_context| ic_msg!(invoke_context, "instruction data too large"); return Err(InstructionError::InvalidInstructionData); } - config_account.get_data_mut(&invoke_context.feature_set)?[..data.len()].copy_from_slice(data); + config_account.get_data_mut()?[..data.len()].copy_from_slice(data); Ok(()) }); diff --git a/programs/loader-v4/src/lib.rs b/programs/loader-v4/src/lib.rs index 4764b23fe65e50..9573f925085585 100644 --- a/programs/loader-v4/src/lib.rs +++ b/programs/loader-v4/src/lib.rs @@ -247,7 +247,7 @@ pub fn process_instruction_write( } let end_offset = (offset as usize).saturating_add(bytes.len()); program - .get_data_mut(&invoke_context.feature_set)? + .get_data_mut()? .get_mut( LoaderV4State::program_data_offset().saturating_add(offset as usize) ..LoaderV4State::program_data_offset().saturating_add(end_offset), @@ -325,20 +325,19 @@ pub fn process_instruction_truncate( return Err(InstructionError::InvalidArgument); } let lamports_to_receive = program.get_lamports().saturating_sub(required_lamports); - program.checked_sub_lamports(lamports_to_receive, &invoke_context.feature_set)?; - recipient.checked_add_lamports(lamports_to_receive, &invoke_context.feature_set)?; + program.checked_sub_lamports(lamports_to_receive)?; + recipient.checked_add_lamports(lamports_to_receive)?; } std::cmp::Ordering::Equal => {} } if new_size == 0 { - program.set_data_length(0, &invoke_context.feature_set)?; + program.set_data_length(0)?; } else { program.set_data_length( LoaderV4State::program_data_offset().saturating_add(new_size as usize), - &invoke_context.feature_set, )?; if is_initialization { - let state = get_state_mut(program.get_data_mut(&invoke_context.feature_set)?)?; + let state = get_state_mut(program.get_data_mut()?)?; state.slot = 0; state.status = LoaderV4Status::Retracted; state.authority_address = *authority_address; @@ -432,12 +431,12 @@ pub fn process_instruction_deploy( let rent = invoke_context.get_sysvar_cache().get_rent()?; let required_lamports = rent.minimum_balance(source_program.get_data().len()); let transfer_lamports = required_lamports.saturating_sub(program.get_lamports()); - program.set_data_from_slice(source_program.get_data(), &invoke_context.feature_set)?; - source_program.set_data_length(0, &invoke_context.feature_set)?; - source_program.checked_sub_lamports(transfer_lamports, &invoke_context.feature_set)?; - program.checked_add_lamports(transfer_lamports, &invoke_context.feature_set)?; + program.set_data_from_slice(source_program.get_data())?; + source_program.set_data_length(0)?; + source_program.checked_sub_lamports(transfer_lamports)?; + program.checked_add_lamports(transfer_lamports)?; } - let state = get_state_mut(program.get_data_mut(&invoke_context.feature_set)?)?; + let state = get_state_mut(program.get_data_mut()?)?; state.slot = current_slot; state.status = LoaderV4Status::Deployed; @@ -486,7 +485,7 @@ pub fn process_instruction_retract( ic_logger_msg!(log_collector, "Program is not deployed"); return Err(InstructionError::InvalidArgument); } - let state = get_state_mut(program.get_data_mut(&invoke_context.feature_set)?)?; + let state = get_state_mut(program.get_data_mut()?)?; state.status = LoaderV4Status::Retracted; Ok(()) } @@ -516,7 +515,7 @@ pub fn process_instruction_transfer_authority( ic_logger_msg!(log_collector, "New authority did not sign"); return Err(InstructionError::MissingRequiredSignature); } - let state = get_state_mut(program.get_data_mut(&invoke_context.feature_set)?)?; + let state = get_state_mut(program.get_data_mut()?)?; if let Some(new_authority_address) = new_authority_address { state.authority_address = new_authority_address; } else if matches!(state.status, LoaderV4Status::Deployed) { diff --git a/programs/sbf/benches/bpf_loader.rs b/programs/sbf/benches/bpf_loader.rs index 1dd827bbeb197b..cf8670cc86151a 100644 --- a/programs/sbf/benches/bpf_loader.rs +++ b/programs/sbf/benches/bpf_loader.rs @@ -38,7 +38,7 @@ use { bpf_loader, client::SyncClient, entrypoint::SUCCESS, - feature_set::{self, FeatureSet}, + feature_set::FeatureSet, instruction::{AccountMeta, Instruction}, message::Message, native_loader, @@ -189,15 +189,11 @@ fn bench_program_alu(bencher: &mut Bencher) { #[bench] fn bench_program_execute_noop(bencher: &mut Bencher) { let GenesisConfigInfo { - mut genesis_config, + genesis_config, mint_keypair, .. } = create_genesis_config(50); - genesis_config - .accounts - .remove(&feature_set::deprecate_executable_meta_update_in_bpf_loader::id()); - let bank = Bank::new_for_benches(&genesis_config); let (bank, bank_forks) = bank.wrap_with_bank_forks_for_tests(); let mut bank_client = BankClient::new_shared(bank.clone()); @@ -261,7 +257,6 @@ fn bench_create_vm(bencher: &mut Bencher) { .get_current_instruction_context() .unwrap(), !direct_mapping, // copy_account_data, - &invoke_context.feature_set, ) .unwrap(); @@ -296,7 +291,6 @@ fn bench_instruction_count_tuner(_bencher: &mut Bencher) { .get_current_instruction_context() .unwrap(), !direct_mapping, // copy_account_data - &invoke_context.feature_set, ) .unwrap(); diff --git a/programs/stake/src/stake_instruction.rs b/programs/stake/src/stake_instruction.rs index f52f978da324fe..b7ce459a541315 100644 --- a/programs/stake/src/stake_instruction.rs +++ b/programs/stake/src/stake_instruction.rs @@ -73,13 +73,7 @@ declare_process_instruction!(Entrypoint, DEFAULT_COMPUTE_UNITS, |invoke_context| Ok(StakeInstruction::Initialize(authorized, lockup)) => { let mut me = get_stake_account()?; let rent = get_sysvar_with_account_check::rent(invoke_context, instruction_context, 1)?; - initialize( - &mut me, - &authorized, - &lockup, - &rent, - &invoke_context.feature_set, - ) + initialize(&mut me, &authorized, &lockup, &rent) } Ok(StakeInstruction::Authorize(authorized_pubkey, stake_authorize)) => { let mut me = get_stake_account()?; @@ -96,7 +90,6 @@ declare_process_instruction!(Entrypoint, DEFAULT_COMPUTE_UNITS, |invoke_context| stake_authorize, &clock, custodian_pubkey, - &invoke_context.feature_set, ) } Ok(StakeInstruction::AuthorizeWithSeed(args)) => { @@ -118,7 +111,6 @@ declare_process_instruction!(Entrypoint, DEFAULT_COMPUTE_UNITS, |invoke_context| args.stake_authorize, &clock, custodian_pubkey, - &invoke_context.feature_set, ) } Ok(StakeInstruction::DelegateStake) => { @@ -221,7 +213,6 @@ declare_process_instruction!(Entrypoint, DEFAULT_COMPUTE_UNITS, |invoke_context| None }, new_warmup_cooldown_rate_epoch(invoke_context), - &invoke_context.feature_set, ) } Ok(StakeInstruction::Deactivate) => { @@ -233,13 +224,7 @@ declare_process_instruction!(Entrypoint, DEFAULT_COMPUTE_UNITS, |invoke_context| Ok(StakeInstruction::SetLockup(lockup)) => { let mut me = get_stake_account()?; let clock = invoke_context.get_sysvar_cache().get_clock()?; - set_lockup( - &mut me, - &lockup, - &signers, - &clock, - &invoke_context.feature_set, - ) + set_lockup(&mut me, &lockup, &signers, &clock) } Ok(StakeInstruction::InitializeChecked) => { let mut me = get_stake_account()?; @@ -260,13 +245,7 @@ declare_process_instruction!(Entrypoint, DEFAULT_COMPUTE_UNITS, |invoke_context| }; let rent = get_sysvar_with_account_check::rent(invoke_context, instruction_context, 1)?; - initialize( - &mut me, - &authorized, - &Lockup::default(), - &rent, - &invoke_context.feature_set, - ) + initialize(&mut me, &authorized, &Lockup::default(), &rent) } Ok(StakeInstruction::AuthorizeChecked(stake_authorize)) => { let mut me = get_stake_account()?; @@ -289,7 +268,6 @@ declare_process_instruction!(Entrypoint, DEFAULT_COMPUTE_UNITS, |invoke_context| stake_authorize, &clock, custodian_pubkey, - &invoke_context.feature_set, ) } Ok(StakeInstruction::AuthorizeCheckedWithSeed(args)) => { @@ -318,7 +296,6 @@ declare_process_instruction!(Entrypoint, DEFAULT_COMPUTE_UNITS, |invoke_context| args.stake_authorize, &clock, custodian_pubkey, - &invoke_context.feature_set, ) } Ok(StakeInstruction::SetLockupChecked(lockup_checked)) => { @@ -332,13 +309,7 @@ declare_process_instruction!(Entrypoint, DEFAULT_COMPUTE_UNITS, |invoke_context| custodian: custodian_pubkey.cloned(), }; let clock = invoke_context.get_sysvar_cache().get_clock()?; - set_lockup( - &mut me, - &lockup, - &signers, - &clock, - &invoke_context.feature_set, - ) + set_lockup(&mut me, &lockup, &signers, &clock) } Ok(StakeInstruction::GetMinimumDelegation) => { let feature_set = invoke_context.feature_set.as_ref(); diff --git a/programs/stake/src/stake_state.rs b/programs/stake/src/stake_state.rs index 68b03c9e1429a9..f20283cfbd8582 100644 --- a/programs/stake/src/stake_state.rs +++ b/programs/stake/src/stake_state.rs @@ -149,7 +149,6 @@ pub fn initialize( authorized: &Authorized, lockup: &Lockup, rent: &Rent, - feature_set: &FeatureSet, ) -> Result<(), InstructionError> { if stake_account.get_data().len() != StakeStateV2::size_of() { return Err(InstructionError::InvalidAccountData); @@ -158,14 +157,11 @@ pub fn initialize( if let StakeStateV2::Uninitialized = stake_account.get_state()? { let rent_exempt_reserve = rent.minimum_balance(stake_account.get_data().len()); if stake_account.get_lamports() >= rent_exempt_reserve { - stake_account.set_state( - &StakeStateV2::Initialized(Meta { - rent_exempt_reserve, - authorized: *authorized, - lockup: *lockup, - }), - feature_set, - ) + stake_account.set_state(&StakeStateV2::Initialized(Meta { + rent_exempt_reserve, + authorized: *authorized, + lockup: *lockup, + })) } else { Err(InstructionError::InsufficientFunds) } @@ -184,7 +180,6 @@ pub fn authorize( stake_authorize: StakeAuthorize, clock: &Clock, custodian: Option<&Pubkey>, - feature_set: &FeatureSet, ) -> Result<(), InstructionError> { match stake_account.get_state()? { StakeStateV2::Stake(mut meta, stake, stake_flags) => { @@ -194,7 +189,7 @@ pub fn authorize( stake_authorize, Some((&meta.lockup, clock, custodian)), )?; - stake_account.set_state(&StakeStateV2::Stake(meta, stake, stake_flags), feature_set) + stake_account.set_state(&StakeStateV2::Stake(meta, stake, stake_flags)) } StakeStateV2::Initialized(mut meta) => { meta.authorized.authorize( @@ -203,7 +198,7 @@ pub fn authorize( stake_authorize, Some((&meta.lockup, clock, custodian)), )?; - stake_account.set_state(&StakeStateV2::Initialized(meta), feature_set) + stake_account.set_state(&StakeStateV2::Initialized(meta)) } _ => Err(InstructionError::InvalidAccountData), } @@ -221,7 +216,6 @@ pub fn authorize_with_seed( stake_authorize: StakeAuthorize, clock: &Clock, custodian: Option<&Pubkey>, - feature_set: &FeatureSet, ) -> Result<(), InstructionError> { let mut signers = HashSet::default(); if instruction_context.is_instruction_account_signer(authority_base_index)? { @@ -242,7 +236,6 @@ pub fn authorize_with_seed( stake_authorize, clock, custodian, - feature_set, ) } @@ -280,10 +273,7 @@ pub fn delegate( &vote_state?.convert_to_current(), clock.epoch, ); - stake_account.set_state( - &StakeStateV2::Stake(meta, stake, StakeFlags::empty()), - feature_set, - ) + stake_account.set_state(&StakeStateV2::Stake(meta, stake, StakeFlags::empty())) } StakeStateV2::Stake(meta, mut stake, stake_flags) => { meta.authorized.check(signers, StakeAuthorize::Staker)?; @@ -298,7 +288,7 @@ pub fn delegate( clock, stake_history, )?; - stake_account.set_state(&StakeStateV2::Stake(meta, stake, stake_flags), feature_set) + stake_account.set_state(&StakeStateV2::Stake(meta, stake, stake_flags)) } _ => Err(InstructionError::InvalidAccountData), } @@ -354,10 +344,7 @@ pub fn deactivate( if let StakeStateV2::Stake(meta, mut stake, mut stake_flags) = stake_account.get_state()? { meta.authorized.check(signers, StakeAuthorize::Staker)?; deactivate_stake(invoke_context, &mut stake, &mut stake_flags, clock.epoch)?; - stake_account.set_state( - &StakeStateV2::Stake(meta, stake, stake_flags), - &invoke_context.feature_set, - ) + stake_account.set_state(&StakeStateV2::Stake(meta, stake, stake_flags)) } else { Err(InstructionError::InvalidAccountData) } @@ -368,16 +355,15 @@ pub fn set_lockup( lockup: &LockupArgs, signers: &HashSet, clock: &Clock, - feature_set: &FeatureSet, ) -> Result<(), InstructionError> { match stake_account.get_state()? { StakeStateV2::Initialized(mut meta) => { meta.set_lockup(lockup, signers, clock)?; - stake_account.set_state(&StakeStateV2::Initialized(meta), feature_set) + stake_account.set_state(&StakeStateV2::Initialized(meta)) } StakeStateV2::Stake(mut meta, stake, stake_flags) => { meta.set_lockup(lockup, signers, clock)?; - stake_account.set_state(&StakeStateV2::Stake(meta, stake, stake_flags), feature_set) + stake_account.set_state(&StakeStateV2::Stake(meta, stake, stake_flags)) } _ => Err(InstructionError::InvalidAccountData), } @@ -481,17 +467,11 @@ pub fn split( let mut stake_account = instruction_context .try_borrow_instruction_account(transaction_context, stake_account_index)?; - stake_account.set_state( - &StakeStateV2::Stake(meta, stake, stake_flags), - &invoke_context.feature_set, - )?; + stake_account.set_state(&StakeStateV2::Stake(meta, stake, stake_flags))?; drop(stake_account); let mut split = instruction_context .try_borrow_instruction_account(transaction_context, split_index)?; - split.set_state( - &StakeStateV2::Stake(split_meta, split_stake, stake_flags), - &invoke_context.feature_set, - )?; + split.set_state(&StakeStateV2::Stake(split_meta, split_stake, stake_flags))?; } StakeStateV2::Initialized(meta) => { meta.authorized.check(signers, StakeAuthorize::Staker)?; @@ -510,10 +490,7 @@ pub fn split( split_meta.rent_exempt_reserve = validated_split_info.destination_rent_exempt_reserve; let mut split = instruction_context .try_borrow_instruction_account(transaction_context, split_index)?; - split.set_state( - &StakeStateV2::Initialized(split_meta), - &invoke_context.feature_set, - )?; + split.set_state(&StakeStateV2::Initialized(split_meta))?; } StakeStateV2::Uninitialized => { let stake_pubkey = transaction_context.get_key_of_account_at_index( @@ -531,17 +508,17 @@ pub fn split( let mut stake_account = instruction_context .try_borrow_instruction_account(transaction_context, stake_account_index)?; if lamports == stake_account.get_lamports() { - stake_account.set_state(&StakeStateV2::Uninitialized, &invoke_context.feature_set)?; + stake_account.set_state(&StakeStateV2::Uninitialized)?; } drop(stake_account); let mut split = instruction_context.try_borrow_instruction_account(transaction_context, split_index)?; - split.checked_add_lamports(lamports, &invoke_context.feature_set)?; + split.checked_add_lamports(lamports)?; drop(split); let mut stake_account = instruction_context .try_borrow_instruction_account(transaction_context, stake_account_index)?; - stake_account.checked_sub_lamports(lamports, &invoke_context.feature_set)?; + stake_account.checked_sub_lamports(lamports)?; Ok(()) } @@ -597,16 +574,16 @@ pub fn merge( ic_msg!(invoke_context, "Merging stake accounts"); if let Some(merged_state) = stake_merge_kind.merge(invoke_context, source_merge_kind, clock)? { - stake_account.set_state(&merged_state, &invoke_context.feature_set)?; + stake_account.set_state(&merged_state)?; } // Source is about to be drained, deinitialize its state - source_account.set_state(&StakeStateV2::Uninitialized, &invoke_context.feature_set)?; + source_account.set_state(&StakeStateV2::Uninitialized)?; // Drain the source stake account let lamports = source_account.get_lamports(); - source_account.checked_sub_lamports(lamports, &invoke_context.feature_set)?; - stake_account.checked_add_lamports(lamports, &invoke_context.feature_set)?; + source_account.checked_sub_lamports(lamports)?; + stake_account.checked_add_lamports(lamports)?; Ok(()) } @@ -698,9 +675,8 @@ pub fn redelegate( deactivate(invoke_context, stake_account, &clock, signers)?; // transfer the effective stake to the uninitialized stake account - stake_account.checked_sub_lamports(effective_stake, &invoke_context.feature_set)?; - uninitialized_stake_account - .checked_add_lamports(effective_stake, &invoke_context.feature_set)?; + stake_account.checked_sub_lamports(effective_stake)?; + uninitialized_stake_account.checked_add_lamports(effective_stake)?; // initialize and schedule `uninitialized_stake_account` for activation let sysvar_cache = invoke_context.get_sysvar_cache(); @@ -714,19 +690,16 @@ pub fn redelegate( &uninitialized_stake_meta, &invoke_context.feature_set, )?; - uninitialized_stake_account.set_state( - &StakeStateV2::Stake( - uninitialized_stake_meta, - new_stake( - stake_amount, - &vote_pubkey, - &vote_state.convert_to_current(), - clock.epoch, - ), - StakeFlags::MUST_FULLY_ACTIVATE_BEFORE_DEACTIVATION_IS_PERMITTED, + uninitialized_stake_account.set_state(&StakeStateV2::Stake( + uninitialized_stake_meta, + new_stake( + stake_amount, + &vote_pubkey, + &vote_state.convert_to_current(), + clock.epoch, ), - &invoke_context.feature_set, - )?; + StakeFlags::MUST_FULLY_ACTIVATE_BEFORE_DEACTIVATION_IS_PERMITTED, + ))?; Ok(()) } @@ -743,7 +716,6 @@ pub fn withdraw( withdraw_authority_index: IndexOfAccount, custodian_index: Option, new_rate_activation_epoch: Option, - feature_set: &FeatureSet, ) -> Result<(), InstructionError> { let withdraw_authority_pubkey = transaction_context.get_key_of_account_at_index( instruction_context @@ -828,14 +800,14 @@ pub fn withdraw( // Deinitialize state upon zero balance if lamports == stake_account.get_lamports() { - stake_account.set_state(&StakeStateV2::Uninitialized, feature_set)?; + stake_account.set_state(&StakeStateV2::Uninitialized)?; } - stake_account.checked_sub_lamports(lamports, feature_set)?; + stake_account.checked_sub_lamports(lamports)?; drop(stake_account); let mut to = instruction_context.try_borrow_instruction_account(transaction_context, to_index)?; - to.checked_add_lamports(lamports, feature_set)?; + to.checked_add_lamports(lamports)?; Ok(()) } @@ -883,10 +855,7 @@ pub(crate) fn deactivate_delinquent( // voted in the last `MINIMUM_DELINQUENT_EPOCHS_FOR_DEACTIVATION` if eligible_for_deactivate_delinquent(&delinquent_vote_state.epoch_credits, current_epoch) { deactivate_stake(invoke_context, &mut stake, &mut stake_flags, current_epoch)?; - stake_account.set_state( - &StakeStateV2::Stake(meta, stake, stake_flags), - &invoke_context.feature_set, - ) + stake_account.set_state(&StakeStateV2::Stake(meta, stake, stake_flags)) } else { Err(StakeError::MinimumDelinquentEpochsForDeactivationNotMet.into()) } diff --git a/programs/system/src/system_instruction.rs b/programs/system/src/system_instruction.rs index 0c9daf22d4b024..95860379fb17a0 100644 --- a/programs/system/src/system_instruction.rs +++ b/programs/system/src/system_instruction.rs @@ -56,10 +56,7 @@ pub fn advance_nonce_account( next_durable_nonce, invoke_context.lamports_per_signature, ); - account.set_state( - &Versions::new(State::Initialized(new_data)), - &invoke_context.feature_set, - ) + account.set_state(&Versions::new(State::Initialized(new_data))) } State::Uninitialized => { ic_msg!( @@ -117,10 +114,7 @@ pub fn withdraw_nonce_account( ); return Err(SystemError::NonceBlockhashNotExpired.into()); } - from.set_state( - &Versions::new(State::Uninitialized), - &invoke_context.feature_set, - )?; + from.set_state(&Versions::new(State::Uninitialized))?; } else { let min_balance = rent.minimum_balance(from.get_data().len()); let amount = checked_add(lamports, min_balance)?; @@ -147,11 +141,11 @@ pub fn withdraw_nonce_account( return Err(InstructionError::MissingRequiredSignature); } - from.checked_sub_lamports(lamports, &invoke_context.feature_set)?; + from.checked_sub_lamports(lamports)?; drop(from); let mut to = instruction_context .try_borrow_instruction_account(transaction_context, to_account_index)?; - to.checked_add_lamports(lamports, &invoke_context.feature_set)?; + to.checked_add_lamports(lamports)?; Ok(()) } @@ -190,7 +184,7 @@ pub fn initialize_nonce_account( invoke_context.lamports_per_signature, ); let state = State::Initialized(data); - account.set_state(&Versions::new(state), &invoke_context.feature_set) + account.set_state(&Versions::new(state)) } State::Initialized(_) => { ic_msg!( @@ -221,7 +215,7 @@ pub fn authorize_nonce_account( .get_state::()? .authorize(signers, *nonce_authority) { - Ok(versions) => account.set_state(&versions, &invoke_context.feature_set), + Ok(versions) => account.set_state(&versions), Err(AuthorizeNonceError::Uninitialized) => { ic_msg!( invoke_context, @@ -1002,9 +996,7 @@ mod test { let mut nonce_account = instruction_context .try_borrow_instruction_account(transaction_context, NONCE_ACCOUNT_INDEX) .unwrap(); - nonce_account - .checked_sub_lamports(42 * 2, &invoke_context.feature_set) - .unwrap(); + nonce_account.checked_sub_lamports(42 * 2).unwrap(); set_invoke_context_blockhash!(invoke_context, 63); let authorized = *nonce_account.get_key(); let result = diff --git a/programs/system/src/system_processor.rs b/programs/system/src/system_processor.rs index 2a66b388103f9a..57cd8e546f13d8 100644 --- a/programs/system/src/system_processor.rs +++ b/programs/system/src/system_processor.rs @@ -104,7 +104,7 @@ fn allocate( return Err(SystemError::InvalidAccountDataLength.into()); } - account.set_data_length(space as usize, &invoke_context.feature_set)?; + account.set_data_length(space as usize)?; Ok(()) } @@ -126,7 +126,7 @@ fn assign( return Err(InstructionError::MissingRequiredSignature); } - account.set_owner(&owner.to_bytes(), &invoke_context.feature_set) + account.set_owner(&owner.to_bytes()) } fn allocate_and_assign( @@ -203,11 +203,11 @@ fn transfer_verified( return Err(SystemError::ResultWithNegativeLamports.into()); } - from.checked_sub_lamports(lamports, &invoke_context.feature_set)?; + from.checked_sub_lamports(lamports)?; drop(from); let mut to = instruction_context .try_borrow_instruction_account(transaction_context, to_account_index)?; - to.checked_add_lamports(lamports, &invoke_context.feature_set)?; + to.checked_add_lamports(lamports)?; Ok(()) } @@ -481,9 +481,7 @@ declare_process_instruction!(Entrypoint, DEFAULT_COMPUTE_UNITS, |invoke_context| let nonce_versions: nonce::state::Versions = nonce_account.get_state()?; match nonce_versions.upgrade() { None => Err(InstructionError::InvalidArgument), - Some(nonce_versions) => { - nonce_account.set_state(&nonce_versions, &invoke_context.feature_set) - } + Some(nonce_versions) => nonce_account.set_state(&nonce_versions), } } SystemInstruction::Allocate { space } => { @@ -2065,4 +2063,54 @@ mod tests { upgraded_nonce_account ); } + + #[test] + fn test_assign_native_loader_and_transfer() { + for size in [0, 10] { + let pubkey = Pubkey::new_unique(); + let account = AccountSharedData::new(100, size, &system_program::id()); + let accounts = process_instruction( + &bincode::serialize(&SystemInstruction::Assign { + owner: solana_sdk::native_loader::id(), + }) + .unwrap(), + vec![(pubkey, account.clone())], + vec![AccountMeta { + pubkey, + is_signer: true, + is_writable: true, + }], + Ok(()), + ); + assert_eq!(accounts[0].owner(), &solana_sdk::native_loader::id()); + assert_eq!(accounts[0].lamports(), 100); + + let pubkey2 = Pubkey::new_unique(); + let accounts = process_instruction( + &bincode::serialize(&SystemInstruction::Transfer { lamports: 50 }).unwrap(), + vec![ + ( + pubkey2, + AccountSharedData::new(100, 0, &system_program::id()), + ), + (pubkey, accounts[0].clone()), + ], + vec![ + AccountMeta { + pubkey: pubkey2, + is_signer: true, + is_writable: true, + }, + AccountMeta { + pubkey, + is_signer: false, + is_writable: true, + }, + ], + Ok(()), + ); + assert_eq!(accounts[1].owner(), &solana_sdk::native_loader::id()); + assert_eq!(accounts[1].lamports(), 150); + } + } } diff --git a/programs/vote/src/vote_state/mod.rs b/programs/vote/src/vote_state/mod.rs index b95f47e8c1b9c2..f5901374d9b6d9 100644 --- a/programs/vote/src/vote_state/mod.rs +++ b/programs/vote/src/vote_state/mod.rs @@ -156,24 +156,22 @@ fn set_vote_account_state( && (!vote_account .is_rent_exempt_at_data_length(VoteStateVersions::vote_state_size_of(true)) || vote_account - .set_data_length(VoteStateVersions::vote_state_size_of(true), feature_set) + .set_data_length(VoteStateVersions::vote_state_size_of(true)) .is_err()) { // Account cannot be resized to the size of a vote state as it will not be rent exempt, or failed to be // resized for other reasons. So store the V1_14_11 version. - return vote_account.set_state( - &VoteStateVersions::V1_14_11(Box::new(VoteState1_14_11::from(vote_state))), - feature_set, - ); + return vote_account.set_state(&VoteStateVersions::V1_14_11(Box::new( + VoteState1_14_11::from(vote_state), + ))); } // Vote account is large enough to store the newest version of vote state - vote_account.set_state(&VoteStateVersions::new_current(vote_state), feature_set) + vote_account.set_state(&VoteStateVersions::new_current(vote_state)) // Else when the vote_state_add_vote_latency feature is not enabled, then the V1_14_11 version is stored } else { - vote_account.set_state( - &VoteStateVersions::V1_14_11(Box::new(VoteState1_14_11::from(vote_state))), - feature_set, - ) + vote_account.set_state(&VoteStateVersions::V1_14_11(Box::new( + VoteState1_14_11::from(vote_state), + ))) } } @@ -1023,11 +1021,11 @@ pub fn withdraw( } } - vote_account.checked_sub_lamports(lamports, feature_set)?; + vote_account.checked_sub_lamports(lamports)?; drop(vote_account); let mut to_account = instruction_context .try_borrow_instruction_account(transaction_context, to_account_index)?; - to_account.checked_add_lamports(lamports, feature_set)?; + to_account.checked_add_lamports(lamports)?; Ok(()) } @@ -1373,7 +1371,7 @@ mod tests { // Test that when the feature is enabled, if the vote account does have sufficient lamports, the // new vote state is written out assert_eq!( - borrowed_account.set_lamports(rent.minimum_balance(VoteState::size_of()), &feature_set), + borrowed_account.set_lamports(rent.minimum_balance(VoteState::size_of()),), Ok(()) ); assert_eq!( diff --git a/programs/zk-token-proof/src/lib.rs b/programs/zk-token-proof/src/lib.rs index 21c09b4ef123f2..ba47d13624826e 100644 --- a/programs/zk-token-proof/src/lib.rs +++ b/programs/zk-token-proof/src/lib.rs @@ -130,8 +130,7 @@ where return Err(InstructionError::InvalidAccountData); } - proof_context_account - .set_data_from_slice(&context_state_data, &invoke_context.feature_set)?; + proof_context_account.set_data_from_slice(&context_state_data)?; } Ok(()) @@ -173,13 +172,10 @@ fn process_close_proof_context(invoke_context: &mut InvokeContext) -> Result<(), let mut destination_account = instruction_context.try_borrow_instruction_account(transaction_context, 1)?; - destination_account.checked_add_lamports( - proof_context_account.get_lamports(), - &invoke_context.feature_set, - )?; - proof_context_account.set_lamports(0, &invoke_context.feature_set)?; - proof_context_account.set_data_length(0, &invoke_context.feature_set)?; - proof_context_account.set_owner(system_program::id().as_ref(), &invoke_context.feature_set)?; + destination_account.checked_add_lamports(proof_context_account.get_lamports())?; + proof_context_account.set_lamports(0)?; + proof_context_account.set_data_length(0)?; + proof_context_account.set_owner(system_program::id().as_ref())?; Ok(()) } diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index b7329724a2558e..388c2f4a15f529 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -107,9 +107,8 @@ use { }, solana_sdk::{ account::{ - create_account_shared_data_with_fields as create_account, create_executable_meta, - from_account, Account, AccountSharedData, InheritableAccountFields, ReadableAccount, - WritableAccount, + create_account_shared_data_with_fields as create_account, from_account, Account, + AccountSharedData, InheritableAccountFields, ReadableAccount, WritableAccount, }, clock::{ BankId, Epoch, Slot, SlotCount, SlotIndex, UnixTimestamp, DEFAULT_HASHES_PER_TICK, @@ -3955,12 +3954,10 @@ impl Bank { // Add a bogus executable account, which will be loaded and ignored. let (lamports, rent_epoch) = self.inherit_specially_retained_account_fields(&None); - // Mock account_data with executable_meta so that the account is executable. - let account_data = create_executable_meta(&owner); let account = AccountSharedData::from(Account { lamports, owner, - data: account_data.to_vec(), + data: vec![], executable: true, rent_epoch, }); diff --git a/runtime/src/bank/tests.rs b/runtime/src/bank/tests.rs index 29dbdc2e5aeacd..6960f220244998 100644 --- a/runtime/src/bank/tests.rs +++ b/runtime/src/bank/tests.rs @@ -1051,7 +1051,6 @@ fn test_rent_exempt_executable_account() { let mut account = AccountSharedData::new(account_balance, 0, &solana_sdk::pubkey::new_rand()); account.set_executable(true); account.set_owner(bpf_loader_upgradeable::id()); - account.set_data(create_executable_meta(account.owner()).to_vec()); bank.store_account(&account_pubkey, &account); let transfer_lamports = 1; @@ -1089,10 +1088,10 @@ fn test_rent_complex() { MockInstruction::Deduction => { instruction_context .try_borrow_instruction_account(transaction_context, 1)? - .checked_add_lamports(1, &invoke_context.feature_set)?; + .checked_add_lamports(1)?; instruction_context .try_borrow_instruction_account(transaction_context, 2)? - .checked_sub_lamports(1, &invoke_context.feature_set)?; + .checked_sub_lamports(1)?; Ok(()) } } @@ -5994,16 +5993,16 @@ fn test_transaction_with_duplicate_accounts_in_instruction() { let lamports = u64::from_le_bytes(instruction_data.try_into().unwrap()); instruction_context .try_borrow_instruction_account(transaction_context, 2)? - .checked_sub_lamports(lamports, &invoke_context.feature_set)?; + .checked_sub_lamports(lamports)?; instruction_context .try_borrow_instruction_account(transaction_context, 1)? - .checked_add_lamports(lamports, &invoke_context.feature_set)?; + .checked_add_lamports(lamports)?; instruction_context .try_borrow_instruction_account(transaction_context, 0)? - .checked_sub_lamports(lamports, &invoke_context.feature_set)?; + .checked_sub_lamports(lamports)?; instruction_context .try_borrow_instruction_account(transaction_context, 1)? - .checked_add_lamports(lamports, &invoke_context.feature_set)?; + .checked_add_lamports(lamports)?; Ok(()) }); @@ -6473,25 +6472,26 @@ fn test_bank_hash_consistency() { if bank.slot == 0 { assert_eq!( bank.hash().to_string(), - "3VqF5pMe3XABLqzUaYw2UVXfAokMJgMkrdfvneFQkHbB", + "i5hGiQ3WtEehNrvhbfPFkUdm267t18fSpujcYtkBioW", ); } + if bank.slot == 32 { assert_eq!( bank.hash().to_string(), - "B8GsaBJ9aJrQcbhTTfgNVuV4uwb4v8nKT86HUjDLvNgk", + "7NmBtNvbhoqzatJv8NgBs84qWrm4ZhpuC75DCpbqwiS" ); } if bank.slot == 64 { assert_eq!( bank.hash().to_string(), - "Eg9VRE3zUwarxWyHXhitX9wLkg1vfNeiVqVQxSif6qEC" + "A1jjuUaENeDcsSvwejFGaZ5zWmnJ77doSzqdKtfzpoFk" ); } if bank.slot == 128 { assert_eq!( bank.hash().to_string(), - "5rLmK24zyxdeb8aLn5LDEnHLDQmxRd5gWZDVJGgsFX1c" + "ApnMkFt5Bs4yDJ8S2CCPsQRL1He6vWXw6vMzAyc5i811" ); break; } @@ -6507,7 +6507,7 @@ fn test_same_program_id_uses_unique_executable_accounts() { let instruction_context = transaction_context.get_current_instruction_context()?; instruction_context .try_borrow_program_account(transaction_context, 0)? - .set_data_length(2, &invoke_context.feature_set) + .set_data_length(2) }); let (genesis_config, mint_keypair) = create_genesis_config(50000); @@ -9477,7 +9477,7 @@ fn test_transfer_sysvar() { let instruction_context = transaction_context.get_current_instruction_context()?; instruction_context .try_borrow_instruction_account(transaction_context, 1)? - .set_data(vec![0; 40], &invoke_context.feature_set)?; + .set_data(vec![0; 40])?; Ok(()) }); @@ -10321,10 +10321,10 @@ declare_process_instruction!(MockTransferBuiltin, 1, |invoke_context| { MockTransferInstruction::Transfer(amount) => { instruction_context .try_borrow_instruction_account(transaction_context, 1)? - .checked_sub_lamports(amount, &invoke_context.feature_set)?; + .checked_sub_lamports(amount)?; instruction_context .try_borrow_instruction_account(transaction_context, 2)? - .checked_add_lamports(amount, &invoke_context.feature_set)?; + .checked_add_lamports(amount)?; Ok(()) } } @@ -11034,7 +11034,7 @@ declare_process_instruction!(MockReallocBuiltin, 1, |invoke_context| { // Set data length instruction_context .try_borrow_instruction_account(transaction_context, 1)? - .set_data_length(new_size, &invoke_context.feature_set)?; + .set_data_length(new_size)?; // set balance let current_balance = instruction_context @@ -11045,17 +11045,17 @@ declare_process_instruction!(MockReallocBuiltin, 1, |invoke_context| { if diff_balance.is_positive() { instruction_context .try_borrow_instruction_account(transaction_context, 0)? - .checked_sub_lamports(amount, &invoke_context.feature_set)?; + .checked_sub_lamports(amount)?; instruction_context .try_borrow_instruction_account(transaction_context, 1)? - .set_lamports(new_balance, &invoke_context.feature_set)?; + .set_lamports(new_balance)?; } else { instruction_context .try_borrow_instruction_account(transaction_context, 0)? - .checked_add_lamports(amount, &invoke_context.feature_set)?; + .checked_add_lamports(amount)?; instruction_context .try_borrow_instruction_account(transaction_context, 1)? - .set_lamports(new_balance, &invoke_context.feature_set)?; + .set_lamports(new_balance)?; } Ok(()) } diff --git a/sdk/src/account.rs b/sdk/src/account.rs index 96cdd5b90ce99b..f701e868ccf4e2 100644 --- a/sdk/src/account.rs +++ b/sdk/src/account.rs @@ -6,9 +6,8 @@ use { crate::{ bpf_loader, bpf_loader_deprecated, bpf_loader_upgradeable, clock::{Epoch, INITIAL_RENT_EPOCH}, - feature_set::{deprecate_executable_meta_update_in_bpf_loader, FeatureSet}, lamports::LamportsError, - loader_v4, native_loader, + loader_v4, pubkey::Pubkey, }, serde::{ @@ -40,9 +39,6 @@ pub struct Account { /// the program that owns this account. If executable, the program that loads this account. pub owner: Pubkey, /// this account's data contains a loaded program (and is now read-only) - /// - /// When feature `deprecate_executable_meta_update_in_bpf_loader` is active, - /// `executable` is deprecated, please use `fn is_executable(&account)` instead. pub executable: bool, /// the epoch at which this account will next owe rent pub rent_epoch: Epoch, @@ -767,94 +763,6 @@ pub const PROGRAM_OWNERS: &[Pubkey] = &[ loader_v4::id(), ]; -const LOADER_V4_STATUS_BYTE_OFFSET: usize = 40; - -/// Create executable account meta data based on account's `owner`. -/// -/// This function is only used for testing and an optimization during -/// transaction loading. -/// -/// When the program account is already present in the program cache, we don't -/// need to load the full account data during transaction loading. Instead, all -/// we need is a minimal executable account meta data, which is what this -/// function returns. -pub fn create_executable_meta(owner: &Pubkey) -> &[u8] { - // For upgradable program account, only `UpgradeableLoaderState::Program` - // variant (i.e. discriminant = 2) should *executable*, which means the - // discriminant for the enum at byte offset 0 in account data is 2. - const EXECUTABLE_META_FOR_BPF_LOADER_UPGRADABLE: [u8; 1] = [2]; - - // For loader v4 program, when LoaderV4Status (byte_offset = 40 in account - // data) is set, the program is executable. - const fn get_executable_meta_for_loader_v4() -> [u8; 41] { - let mut v = [0; LOADER_V4_STATUS_BYTE_OFFSET + 1]; - v[LOADER_V4_STATUS_BYTE_OFFSET] = 1; - v - } - const EXECUTABLE_META_FOR_LOADER_V4: [u8; LOADER_V4_STATUS_BYTE_OFFSET + 1] = - get_executable_meta_for_loader_v4(); - - // For other owners, simple returns a 1 byte array would make it executable. - const DEFAULT_EXECUTABLE_META: [u8; 1] = [1]; - - if bpf_loader_upgradeable::check_id(owner) { - &EXECUTABLE_META_FOR_BPF_LOADER_UPGRADABLE - } else if loader_v4::check_id(owner) { - &EXECUTABLE_META_FOR_LOADER_V4 - } else { - &DEFAULT_EXECUTABLE_META - } -} - -/// Return true if the account program is executable. -pub fn is_executable(account: &impl ReadableAccount, feature_set: &FeatureSet) -> bool { - if !feature_set.is_active(&deprecate_executable_meta_update_in_bpf_loader::id()) { - account.executable() - } else { - // First, check if the account is empty. Empty accounts are not executable. - if account.data().is_empty() { - return false; - } - - // bpf_loader/bpf_loader_deprecated still relies on `executable` on the - // program account. When the program account is finalized, the loader will - // mark `executable` flag on the account. We can't emulate `executable` for - // these two loaders. However, when `deprecate_executable` is true, we - // should have already disabled the deployment of bpf_loader and - // bpf_loader_deprecated. Therefore, we can safely assume that all those - // programs are `executable`. - if bpf_loader::check_id(account.owner()) || bpf_loader_deprecated::check_id(account.owner()) - { - return true; - } - - if bpf_loader_upgradeable::check_id(account.owner()) { - // For upgradable program account, only - // `UpgradeableLoaderState::Program` variant (i.e. discriminant = 2) is - // *executable*. - return account.data()[0] == 2; - } - - if loader_v4::check_id(account.owner()) { - // LoaderV4Status (byte_offset = 40) - // return account.data()[LOADER_V4_STATUS_BYTE_OFFSET] != 0; - return false; // TODO: return false for now - } - - false - } -} - -/// Return true if the account program is a builtin program. -/// -/// This function also ensures that all valid builtin programs have non-empty -/// program data. Typically, the program data contains only the "name" for the -/// program. If, for some reason, the program account's data is empty, we should -/// exclude such a program from `builtins`. -pub fn is_builtin(account: &impl ReadableAccount) -> bool { - native_loader::check_id(account.owner()) && !account.data().is_empty() -} - #[cfg(test)] pub mod tests { use super::*; diff --git a/sdk/src/feature_set.rs b/sdk/src/feature_set.rs index bb7c50f460fd81..55ce4c1253940a 100644 --- a/sdk/src/feature_set.rs +++ b/sdk/src/feature_set.rs @@ -756,10 +756,6 @@ pub mod disable_bpf_loader_instructions { solana_sdk::declare_id!("7WeS1vfPRgeeoXArLh7879YcB9mgE9ktjPDtajXeWfXn"); } -pub mod deprecate_executable_meta_update_in_bpf_loader { - solana_sdk::declare_id!("k6uR1J9VtKJnTukBV2Eo15BEy434MBg8bT6hHQgmU8v"); -} - pub mod enable_zk_proof_from_account { solana_sdk::declare_id!("zkiTNuzBKxrCLMKehzuQeKZyLtX2yvFcEKMML8nExU8"); } @@ -967,7 +963,6 @@ lazy_static! { (index_erasure_conflict_duplicate_proofs::id(), "generate duplicate proofs for index and erasure conflicts #34360"), (merkle_conflict_duplicate_proofs::id(), "generate duplicate proofs for merkle root conflicts #34270"), (disable_bpf_loader_instructions::id(), "disable bpf loader management instructions #34194"), - (deprecate_executable_meta_update_in_bpf_loader::id(), "deprecate executable meta flag update in bpf loader #34194"), (enable_zk_proof_from_account::id(), "Enable zk token proof program to read proof from accounts instead of instruction data #34750"), (curve25519_restrict_msm_length::id(), "restrict curve25519 multiscalar multiplication vector lengths #34763"), (cost_model_requested_write_lock_cost::id(), "cost model uses number of requested write locks #34819"), diff --git a/sdk/src/transaction_context.rs b/sdk/src/transaction_context.rs index 981f64870f6063..7df7fc96d67933 100644 --- a/sdk/src/transaction_context.rs +++ b/sdk/src/transaction_context.rs @@ -17,8 +17,7 @@ use { }; use { crate::{ - account::{is_builtin, is_executable, AccountSharedData, ReadableAccount}, - feature_set::FeatureSet, + account::{AccountSharedData, ReadableAccount}, instruction::InstructionError, pubkey::Pubkey, }, @@ -740,11 +739,7 @@ impl<'a> BorrowedAccount<'a> { /// Assignes the owner of this account (transaction wide) #[cfg(not(target_os = "solana"))] - pub fn set_owner( - &mut self, - pubkey: &[u8], - feature_set: &FeatureSet, - ) -> Result<(), InstructionError> { + pub fn set_owner(&mut self, pubkey: &[u8]) -> Result<(), InstructionError> { // Only the owner can assign a new owner if !self.is_owned_by_current_program() { return Err(InstructionError::ModifiedProgramId); @@ -754,7 +749,7 @@ impl<'a> BorrowedAccount<'a> { return Err(InstructionError::ModifiedProgramId); } // and only if the account is not executable - if self.is_executable(feature_set) { + if self.is_executable() { return Err(InstructionError::ModifiedProgramId); } // and only if the data is zero-initialized or empty @@ -778,11 +773,7 @@ impl<'a> BorrowedAccount<'a> { /// Overwrites the number of lamports of this account (transaction wide) #[cfg(not(target_os = "solana"))] - pub fn set_lamports( - &mut self, - lamports: u64, - feature_set: &FeatureSet, - ) -> Result<(), InstructionError> { + pub fn set_lamports(&mut self, lamports: u64) -> Result<(), InstructionError> { // An account not owned by the program cannot have its balance decrease if !self.is_owned_by_current_program() && lamports < self.get_lamports() { return Err(InstructionError::ExternalAccountLamportSpend); @@ -792,7 +783,7 @@ impl<'a> BorrowedAccount<'a> { return Err(InstructionError::ReadonlyLamportChange); } // The balance of executable accounts may not change - if self.is_executable(feature_set) { + if self.is_executable() { return Err(InstructionError::ExecutableLamportChange); } // don't touch the account if the lamports do not change @@ -806,31 +797,21 @@ impl<'a> BorrowedAccount<'a> { /// Adds lamports to this account (transaction wide) #[cfg(not(target_os = "solana"))] - pub fn checked_add_lamports( - &mut self, - lamports: u64, - feature_set: &FeatureSet, - ) -> Result<(), InstructionError> { + pub fn checked_add_lamports(&mut self, lamports: u64) -> Result<(), InstructionError> { self.set_lamports( self.get_lamports() .checked_add(lamports) .ok_or(InstructionError::ArithmeticOverflow)?, - feature_set, ) } /// Subtracts lamports from this account (transaction wide) #[cfg(not(target_os = "solana"))] - pub fn checked_sub_lamports( - &mut self, - lamports: u64, - feature_set: &FeatureSet, - ) -> Result<(), InstructionError> { + pub fn checked_sub_lamports(&mut self, lamports: u64) -> Result<(), InstructionError> { self.set_lamports( self.get_lamports() .checked_sub(lamports) .ok_or(InstructionError::ArithmeticOverflow)?, - feature_set, ) } @@ -842,11 +823,8 @@ impl<'a> BorrowedAccount<'a> { /// Returns a writable slice of the account data (transaction wide) #[cfg(not(target_os = "solana"))] - pub fn get_data_mut( - &mut self, - feature_set: &FeatureSet, - ) -> Result<&mut [u8], InstructionError> { - self.can_data_be_changed(feature_set)?; + pub fn get_data_mut(&mut self) -> Result<&mut [u8], InstructionError> { + self.can_data_be_changed()?; self.touch()?; self.make_data_mut(); Ok(self.account.data_as_mut_slice()) @@ -871,13 +849,9 @@ impl<'a> BorrowedAccount<'a> { not(target_os = "solana"), any(test, feature = "dev-context-only-utils") ))] - pub fn set_data( - &mut self, - data: Vec, - feature_set: &FeatureSet, - ) -> Result<(), InstructionError> { + pub fn set_data(&mut self, data: Vec) -> Result<(), InstructionError> { self.can_data_be_resized(data.len())?; - self.can_data_be_changed(feature_set)?; + self.can_data_be_changed()?; self.touch()?; self.update_accounts_resize_delta(data.len())?; @@ -890,18 +864,14 @@ impl<'a> BorrowedAccount<'a> { /// Call this when you have a slice of data you do not own and want to /// replace the account data with it. #[cfg(not(target_os = "solana"))] - pub fn set_data_from_slice( - &mut self, - data: &[u8], - feature_set: &FeatureSet, - ) -> Result<(), InstructionError> { + pub fn set_data_from_slice(&mut self, data: &[u8]) -> Result<(), InstructionError> { self.can_data_be_resized(data.len())?; - self.can_data_be_changed(feature_set)?; + self.can_data_be_changed()?; self.touch()?; self.update_accounts_resize_delta(data.len())?; // Calling make_data_mut() here guarantees that set_data_from_slice() // copies in places, extending the account capacity if necessary but - // never reducing it. This is required as the account might be directly + // never reducing it. This is required as the account migh be directly // mapped into a MemoryRegion, and therefore reducing capacity would // leave a hole in the vm address space. After CPI or upon program // termination, the runtime will zero the extra capacity. @@ -915,13 +885,9 @@ impl<'a> BorrowedAccount<'a> { /// /// Fills it with zeros at the end if is extended or truncates at the end otherwise. #[cfg(not(target_os = "solana"))] - pub fn set_data_length( - &mut self, - new_length: usize, - feature_set: &FeatureSet, - ) -> Result<(), InstructionError> { + pub fn set_data_length(&mut self, new_length: usize) -> Result<(), InstructionError> { self.can_data_be_resized(new_length)?; - self.can_data_be_changed(feature_set)?; + self.can_data_be_changed()?; // don't touch the account if the length does not change if self.get_data().len() == new_length { return Ok(()); @@ -934,14 +900,10 @@ impl<'a> BorrowedAccount<'a> { /// Appends all elements in a slice to the account #[cfg(not(target_os = "solana"))] - pub fn extend_from_slice( - &mut self, - data: &[u8], - feature_set: &FeatureSet, - ) -> Result<(), InstructionError> { + pub fn extend_from_slice(&mut self, data: &[u8]) -> Result<(), InstructionError> { let new_len = self.get_data().len().saturating_add(data.len()); self.can_data_be_resized(new_len)?; - self.can_data_be_changed(feature_set)?; + self.can_data_be_changed()?; if data.is_empty() { return Ok(()); @@ -995,7 +957,7 @@ impl<'a> BorrowedAccount<'a> { // about to write into it. Make the account mutable by copying it in a // buffer with MAX_PERMITTED_DATA_INCREASE capacity so that if the // transaction reallocs, we don't have to copy the whole account data a - // second time to fulfill the realloc. + // second time to fullfill the realloc. // // NOTE: The account memory region CoW code in bpf_loader::create_vm() implements the same // logic and must be kept in sync. @@ -1014,12 +976,8 @@ impl<'a> BorrowedAccount<'a> { /// Serializes a state into the account data #[cfg(not(target_os = "solana"))] - pub fn set_state( - &mut self, - state: &T, - feature_set: &FeatureSet, - ) -> Result<(), InstructionError> { - let data = self.get_data_mut(feature_set)?; + pub fn set_state(&mut self, state: &T) -> Result<(), InstructionError> { + let data = self.get_data_mut()?; let serialized_size = bincode::serialized_size(state).map_err(|_| InstructionError::GenericError)?; if serialized_size > data.len() as u64 { @@ -1040,8 +998,8 @@ impl<'a> BorrowedAccount<'a> { /// Returns whether this account is executable (transaction wide) #[inline] - pub fn is_executable(&self, feature_set: &FeatureSet) -> bool { - is_builtin(&*self.account) || is_executable(&*self.account, feature_set) + pub fn is_executable(&self) -> bool { + self.account.executable() } /// Configures whether this account is executable (transaction wide) @@ -1064,11 +1022,11 @@ impl<'a> BorrowedAccount<'a> { return Err(InstructionError::ExecutableModified); } // one can not clear the executable flag - if self.account.executable() && !is_executable { + if self.is_executable() && !is_executable { return Err(InstructionError::ExecutableModified); } // don't touch the account if the executable flag does not change - if self.account.executable() == is_executable { + if self.is_executable() == is_executable { return Ok(()); } self.touch()?; @@ -1119,9 +1077,9 @@ impl<'a> BorrowedAccount<'a> { /// Returns an error if the account data can not be mutated by the current program #[cfg(not(target_os = "solana"))] - pub fn can_data_be_changed(&self, feature_set: &FeatureSet) -> Result<(), InstructionError> { + pub fn can_data_be_changed(&self) -> Result<(), InstructionError> { // Only non-executable accounts data can be changed - if self.is_executable(feature_set) { + if self.is_executable() { return Err(InstructionError::ExecutableDataModified); } // and only if the account is writable diff --git a/svm/src/account_loader.rs b/svm/src/account_loader.rs index ee06dd5fbf2198..374fc756de31da 100644 --- a/svm/src/account_loader.rs +++ b/svm/src/account_loader.rs @@ -11,10 +11,7 @@ use { loaded_programs::LoadedProgramsForTxBatch, }, solana_sdk::{ - account::{ - create_executable_meta, is_builtin, is_executable, Account, AccountSharedData, - ReadableAccount, WritableAccount, - }, + account::{Account, AccountSharedData, ReadableAccount, WritableAccount}, feature_set::{ self, include_loaded_accounts_data_size_in_fee_calculation, remove_rounding_in_fee_calculation, @@ -336,7 +333,7 @@ fn load_transaction_accounts( return Err(TransactionError::ProgramAccountNotFound); } - if !(is_builtin(program_account) || is_executable(program_account, &feature_set)) { + if !program_account.executable() { error_counters.invalid_program_for_execution += 1; return Err(TransactionError::InvalidProgramForExecution); } @@ -356,8 +353,7 @@ fn load_transaction_accounts( let owner_index = accounts.len(); if let Some(owner_account) = callbacks.get_account_shared_data(owner_id) { if !native_loader::check_id(owner_account.owner()) - || !(is_builtin(&owner_account) - || is_executable(&owner_account, &feature_set)) + || !owner_account.executable() { error_counters.invalid_program_for_execution += 1; return Err(TransactionError::InvalidProgramForExecution); @@ -423,7 +419,6 @@ fn account_shared_data_from_program( .ok_or(TransactionError::AccountNotFound)?; program_account.set_owner(**program_owner); program_account.set_executable(true); - program_account.set_data_from_slice(create_executable_meta(program_owner)); Ok(program_account) } @@ -887,7 +882,7 @@ mod tests { let mut account = AccountSharedData::new(40, 1, &Pubkey::default()); account.set_owner(bpf_loader_upgradeable::id()); - account.set_data(create_executable_meta(account.owner()).to_vec()); + account.set_executable(true); accounts.push((key1, account)); let instructions = vec![CompiledInstruction::new(1, &(), vec![0])]; @@ -967,7 +962,6 @@ mod tests { account.set_executable(true); account.set_rent_epoch(1); account.set_owner(key1); - account.set_data(create_executable_meta(account.owner()).to_vec()); accounts.push((key2, account)); let instructions = vec![ @@ -1428,7 +1422,6 @@ mod tests { let mut expected = AccountSharedData::default(); expected.set_owner(other_key); expected.set_executable(true); - expected.set_data_from_slice(create_executable_meta(&other_key)); assert_eq!(result.unwrap(), expected); } From 81c8ed73bcdd632988fdba5043bb112cc45c858f Mon Sep 17 00:00:00 2001 From: Trent Nelson <490004+t-nelson@users.noreply.github.com> Date: Wed, 20 Mar 2024 11:13:50 -0600 Subject: [PATCH 21/28] rpc-sts: add config options for stake-weighted qos (#197) * rpc-sts: plumb options for swqos config * rpc-sts: send to specific tpu peers when configured --- .../src/send_transaction_service.rs | 22 ++++++++++++--- validator/src/cli.rs | 16 +++++++++++ validator/src/main.rs | 28 +++++++++++++++---- 3 files changed, 57 insertions(+), 9 deletions(-) diff --git a/send-transaction-service/src/send_transaction_service.rs b/send-transaction-service/src/send_transaction_service.rs index dbdcda2f2ff905..abe53b236d2e75 100644 --- a/send-transaction-service/src/send_transaction_service.rs +++ b/send-transaction-service/src/send_transaction_service.rs @@ -115,6 +115,7 @@ pub struct Config { pub batch_send_rate_ms: u64, /// When the retry pool exceeds this max size, new transactions are dropped after their first broadcast attempt pub retry_pool_max_size: usize, + pub tpu_peers: Option>, } impl Default for Config { @@ -127,6 +128,7 @@ impl Default for Config { batch_size: DEFAULT_TRANSACTION_BATCH_SIZE, batch_send_rate_ms: DEFAULT_BATCH_SEND_RATE_MS, retry_pool_max_size: MAX_TRANSACTION_RETRY_POOL_SIZE, + tpu_peers: None, } } } @@ -566,12 +568,18 @@ impl SendTransactionService { stats: &SendTransactionServiceStats, ) { // Processing the transactions in batch - let addresses = Self::get_tpu_addresses_with_slots( + let mut addresses = config + .tpu_peers + .as_ref() + .map(|addrs| addrs.iter().map(|a| (a, 0)).collect::>()) + .unwrap_or_default(); + let leader_addresses = Self::get_tpu_addresses_with_slots( tpu_address, leader_info, config, connection_cache.protocol(), ); + addresses.extend(leader_addresses); let wire_transactions = transactions .iter() @@ -584,8 +592,8 @@ impl SendTransactionService { }) .collect::>(); - for address in &addresses { - Self::send_transactions(address.0, &wire_transactions, connection_cache, stats); + for (address, _) in &addresses { + Self::send_transactions(address, &wire_transactions, connection_cache, stats); } } @@ -702,14 +710,20 @@ impl SendTransactionService { let iter = wire_transactions.chunks(config.batch_size); for chunk in iter { + let mut addresses = config + .tpu_peers + .as_ref() + .map(|addrs| addrs.iter().collect::>()) + .unwrap_or_default(); let mut leader_info_provider = leader_info_provider.lock().unwrap(); let leader_info = leader_info_provider.get_leader_info(); - let addresses = Self::get_tpu_addresses( + let leader_addresses = Self::get_tpu_addresses( tpu_address, leader_info, config, connection_cache.protocol(), ); + addresses.extend(leader_addresses); for address in &addresses { Self::send_transactions(address, chunk, connection_cache, stats); diff --git a/validator/src/cli.rs b/validator/src/cli.rs index e9298d9c02928e..f127273c8da2f3 100644 --- a/validator/src/cli.rs +++ b/validator/src/cli.rs @@ -1163,6 +1163,22 @@ pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { .default_value(&default_args.rpc_send_transaction_retry_pool_max_size) .help("The maximum size of transactions retry pool."), ) + .arg( + Arg::with_name("rpc_send_transaction_tpu_peer") + .long("rpc-send-transaction-tpu-peer") + .takes_value(true) + .number_of_values(1) + .multiple(true) + .value_name("HOST:PORT") + .validator(solana_net_utils::is_host_port) + .help("Peer(s) to broadcast transactions to instead of the current leader") + ) + .arg( + Arg::with_name("rpc_send_transaction_also_leader") + .long("rpc-send-transaction-also-leader") + .requires("rpc_send_transaction_tpu_peer") + .help("With `--rpc-send-transaction-tpu-peer HOST:PORT`, also send to the current leader") + ) .arg( Arg::with_name("rpc_scan_and_fix_roots") .long("rpc-scan-and-fix-roots") diff --git a/validator/src/main.rs b/validator/src/main.rs index fd55bf74d20dde..545ecfda481d35 100644 --- a/validator/src/main.rs +++ b/validator/src/main.rs @@ -1308,6 +1308,27 @@ pub fn main() { ); exit(1); } + let rpc_send_transaction_tpu_peers = matches + .values_of("rpc_send_transaction_tpu_peer") + .map(|values| { + values + .map(solana_net_utils::parse_host_port) + .collect::, String>>() + }) + .transpose() + .unwrap_or_else(|e| { + eprintln!("failed to parse rpc send-transaction-service tpu peer address: {e}"); + exit(1); + }); + let rpc_send_transaction_also_leader = matches.is_present("rpc_send_transaction_also_leader"); + let leader_forward_count = + if rpc_send_transaction_tpu_peers.is_some() && !rpc_send_transaction_also_leader { + // rpc-sts is configured to send only to specific tpu peers. disable leader forwards + 0 + } else { + value_t_or_exit!(matches, "rpc_send_transaction_leader_forward_count", u64) + }; + let full_api = matches.is_present("full_rpc_api"); let mut validator_config = ValidatorConfig { @@ -1399,11 +1420,7 @@ pub fn main() { contact_debug_interval, send_transaction_service_config: send_transaction_service::Config { retry_rate_ms: rpc_send_retry_rate_ms, - leader_forward_count: value_t_or_exit!( - matches, - "rpc_send_transaction_leader_forward_count", - u64 - ), + leader_forward_count, default_max_retries: value_t!( matches, "rpc_send_transaction_default_max_retries", @@ -1422,6 +1439,7 @@ pub fn main() { "rpc_send_transaction_retry_pool_max_size", usize ), + tpu_peers: rpc_send_transaction_tpu_peers, }, no_poh_speed_test: matches.is_present("no_poh_speed_test"), no_os_memory_stats_reporting: matches.is_present("no_os_memory_stats_reporting"), From e97d3590c7e8b76840401c8e523ace813bf96835 Mon Sep 17 00:00:00 2001 From: Yueh-Hsuan Chiang <93241502+yhchiang-sol@users.noreply.github.com> Date: Wed, 20 Mar 2024 10:39:25 -0700 Subject: [PATCH 22/28] [TieredStorage] Refactor TieredStorage::new_readonly() code path (#195) #### Problem The TieredStorage::new_readonly() function currently has the following problems: * It opens the file without checking the magic number before checking and loading the footer. * It opens the file twice: first to load the footer, then open again by the reader. #### Summary of Changes This PR refactors TieredStorage::new_readonly() so that it first performs all checks inside the constructor of TieredReadableFile. The TieredReadableFile instance is then passed to the proper reader (currently HotStorageReader) when all checks are passed. #### Test Plan * Added a new test to check MagicNumberMismatch. * Existing tiered-storage tests --- accounts-db/src/tiered_storage.rs | 3 +- accounts-db/src/tiered_storage/file.rs | 86 +++++++++++++++++++--- accounts-db/src/tiered_storage/footer.rs | 24 +----- accounts-db/src/tiered_storage/hot.rs | 35 +++++---- accounts-db/src/tiered_storage/readable.rs | 6 +- 5 files changed, 105 insertions(+), 49 deletions(-) diff --git a/accounts-db/src/tiered_storage.rs b/accounts-db/src/tiered_storage.rs index cc2776ed178cf6..70169a59428fe6 100644 --- a/accounts-db/src/tiered_storage.rs +++ b/accounts-db/src/tiered_storage.rs @@ -170,7 +170,8 @@ mod tests { use { super::*, crate::account_storage::meta::StoredMetaWriteVersion, - footer::{TieredStorageFooter, TieredStorageMagicNumber}, + file::TieredStorageMagicNumber, + footer::TieredStorageFooter, hot::HOT_FORMAT, index::IndexOffset, solana_sdk::{ diff --git a/accounts-db/src/tiered_storage/file.rs b/accounts-db/src/tiered_storage/file.rs index 605e55a0b193a1..e6ea4a7c65d15d 100644 --- a/accounts-db/src/tiered_storage/file.rs +++ b/accounts-db/src/tiered_storage/file.rs @@ -1,5 +1,6 @@ use { - bytemuck::{AnyBitPattern, NoUninit}, + super::{error::TieredStorageError, TieredStorageResult}, + bytemuck::{AnyBitPattern, NoUninit, Pod, Zeroable}, std::{ fs::{File, OpenOptions}, io::{BufWriter, Read, Result as IoResult, Seek, SeekFrom, Write}, @@ -8,23 +9,37 @@ use { }, }; +/// The ending 8 bytes of a valid tiered account storage file. +pub const FILE_MAGIC_NUMBER: u64 = u64::from_le_bytes(*b"AnzaTech"); + +#[derive(Debug, PartialEq, Eq, Clone, Copy, Pod, Zeroable)] +#[repr(C)] +pub struct TieredStorageMagicNumber(pub u64); + +// Ensure there are no implicit padding bytes +const _: () = assert!(std::mem::size_of::() == 8); + +impl Default for TieredStorageMagicNumber { + fn default() -> Self { + Self(FILE_MAGIC_NUMBER) + } +} + #[derive(Debug)] pub struct TieredReadableFile(pub File); impl TieredReadableFile { - pub fn new(file_path: impl AsRef) -> Self { - Self( + pub fn new(file_path: impl AsRef) -> TieredStorageResult { + let file = Self( OpenOptions::new() .read(true) .create(false) - .open(&file_path) - .unwrap_or_else(|err| { - panic!( - "[TieredStorageError] Unable to open {} as read-only: {err}", - file_path.as_ref().display(), - ); - }), - ) + .open(&file_path)?, + ); + + file.check_magic_number()?; + + Ok(file) } pub fn new_writable(file_path: impl AsRef) -> IoResult { @@ -36,6 +51,19 @@ impl TieredReadableFile { )) } + fn check_magic_number(&self) -> TieredStorageResult<()> { + self.seek_from_end(-(std::mem::size_of::() as i64))?; + let mut magic_number = TieredStorageMagicNumber::zeroed(); + self.read_pod(&mut magic_number)?; + if magic_number != TieredStorageMagicNumber::default() { + return Err(TieredStorageError::MagicNumberMismatch( + TieredStorageMagicNumber::default().0, + magic_number.0, + )); + } + Ok(()) + } + /// Reads a value of type `T` from the file. /// /// Type T must be plain ol' data. @@ -127,3 +155,39 @@ impl TieredWritableFile { Ok(bytes.len()) } } + +#[cfg(test)] +mod tests { + use { + crate::tiered_storage::{ + error::TieredStorageError, + file::{TieredReadableFile, TieredWritableFile, FILE_MAGIC_NUMBER}, + }, + std::path::Path, + tempfile::TempDir, + }; + + fn generate_test_file_with_number(path: impl AsRef, number: u64) { + let mut file = TieredWritableFile::new(path).unwrap(); + file.write_pod(&number).unwrap(); + } + + #[test] + fn test_new() { + let temp_dir = TempDir::new().unwrap(); + let path = temp_dir.path().join("test_new"); + generate_test_file_with_number(&path, FILE_MAGIC_NUMBER); + assert!(TieredReadableFile::new(&path).is_ok()); + } + + #[test] + fn test_magic_number_mismatch() { + let temp_dir = TempDir::new().unwrap(); + let path = temp_dir.path().join("test_magic_number_mismatch"); + generate_test_file_with_number(&path, !FILE_MAGIC_NUMBER); + assert!(matches!( + TieredReadableFile::new(&path), + Err(TieredStorageError::MagicNumberMismatch(_, _)) + )); + } +} diff --git a/accounts-db/src/tiered_storage/footer.rs b/accounts-db/src/tiered_storage/footer.rs index fa885f2394ce63..89e671d121cce6 100644 --- a/accounts-db/src/tiered_storage/footer.rs +++ b/accounts-db/src/tiered_storage/footer.rs @@ -1,13 +1,13 @@ use { crate::tiered_storage::{ error::TieredStorageError, - file::{TieredReadableFile, TieredWritableFile}, + file::{TieredReadableFile, TieredStorageMagicNumber, TieredWritableFile}, index::IndexBlockFormat, mmap_utils::{get_pod, get_type}, owners::OwnersBlockFormat, TieredStorageResult, }, - bytemuck::{Pod, Zeroable}, + bytemuck::Zeroable, memmap2::Mmap, num_enum::TryFromPrimitiveError, solana_sdk::{hash::Hash, pubkey::Pubkey}, @@ -26,22 +26,6 @@ static_assertions::const_assert_eq!(mem::size_of::(), 160); /// even when the footer's format changes. pub const FOOTER_TAIL_SIZE: usize = 24; -/// The ending 8 bytes of a valid tiered account storage file. -pub const FOOTER_MAGIC_NUMBER: u64 = 0x502A2AB5; // SOLALABS -> SOLANA LABS - -#[derive(Debug, PartialEq, Eq, Clone, Copy, Pod, Zeroable)] -#[repr(C)] -pub struct TieredStorageMagicNumber(pub u64); - -// Ensure there are no implicit padding bytes -const _: () = assert!(std::mem::size_of::() == 8); - -impl Default for TieredStorageMagicNumber { - fn default() -> Self { - Self(FOOTER_MAGIC_NUMBER) - } -} - #[repr(u16)] #[derive( Clone, @@ -133,7 +117,7 @@ pub struct TieredStorageFooter { /// The size of the footer including the magic number. pub footer_size: u64, // This field is persisted in the storage but not in this struct. - // The number should match FOOTER_MAGIC_NUMBER. + // The number should match FILE_MAGIC_NUMBER. // pub magic_number: u64, } @@ -186,7 +170,7 @@ impl Default for TieredStorageFooter { impl TieredStorageFooter { pub fn new_from_path(path: impl AsRef) -> TieredStorageResult { - let file = TieredReadableFile::new(path); + let file = TieredReadableFile::new(path)?; Self::new_from_footer_block(&file) } diff --git a/accounts-db/src/tiered_storage/hot.rs b/accounts-db/src/tiered_storage/hot.rs index c00dff302c9cea..1a5017535cdded 100644 --- a/accounts-db/src/tiered_storage/hot.rs +++ b/accounts-db/src/tiered_storage/hot.rs @@ -7,7 +7,7 @@ use { accounts_hash::AccountHash, tiered_storage::{ byte_block, - file::TieredWritableFile, + file::{TieredReadableFile, TieredWritableFile}, footer::{AccountBlockFormat, AccountMetaFormat, TieredStorageFooter}, index::{AccountIndexWriterEntry, AccountOffset, IndexBlockFormat, IndexOffset}, meta::{AccountMetaFlags, AccountMetaOptionalFields, TieredAccountMeta}, @@ -24,7 +24,7 @@ use { account::ReadableAccount, pubkey::Pubkey, rent_collector::RENT_EXEMPT_RENT_EPOCH, stake_history::Epoch, }, - std::{borrow::Borrow, fs::OpenOptions, option::Option, path::Path}, + std::{borrow::Borrow, option::Option, path::Path}, }; pub const HOT_FORMAT: TieredStorageFormat = TieredStorageFormat { @@ -346,10 +346,8 @@ pub struct HotStorageReader { } impl HotStorageReader { - /// Constructs a HotStorageReader from the specified path. - pub fn new_from_path(path: impl AsRef) -> TieredStorageResult { - let file = OpenOptions::new().read(true).open(path)?; - let mmap = unsafe { MmapOptions::new().map(&file)? }; + pub fn new(file: TieredReadableFile) -> TieredStorageResult { + let mmap = unsafe { MmapOptions::new().map(&file.0)? }; // Here we are copying the footer, as accessing any data in a // TieredStorage instance requires accessing its Footer. // This can help improve cache locality and reduce the overhead @@ -899,7 +897,8 @@ pub mod tests { // Reopen the same storage, and expect the persisted footer is // the same as what we have written. { - let hot_storage = HotStorageReader::new_from_path(&path).unwrap(); + let file = TieredReadableFile::new(&path).unwrap(); + let hot_storage = HotStorageReader::new(file).unwrap(); assert_eq!(expected_footer, *hot_storage.footer()); } } @@ -945,7 +944,8 @@ pub mod tests { footer.write_footer_block(&mut file).unwrap(); } - let hot_storage = HotStorageReader::new_from_path(&path).unwrap(); + let file = TieredReadableFile::new(&path).unwrap(); + let hot_storage = HotStorageReader::new(file).unwrap(); for (offset, expected_meta) in account_offsets.iter().zip(hot_account_metas.iter()) { let meta = hot_storage.get_account_meta_from_offset(*offset).unwrap(); @@ -975,7 +975,8 @@ pub mod tests { footer.write_footer_block(&mut file).unwrap(); } - let hot_storage = HotStorageReader::new_from_path(&path).unwrap(); + let file = TieredReadableFile::new(&path).unwrap(); + let hot_storage = HotStorageReader::new(file).unwrap(); let offset = HotAccountOffset::new(footer.index_block_offset as usize).unwrap(); // Read from index_block_offset, which offset doesn't belong to // account blocks. Expect assert failure here @@ -1026,7 +1027,8 @@ pub mod tests { footer.write_footer_block(&mut file).unwrap(); } - let hot_storage = HotStorageReader::new_from_path(&path).unwrap(); + let file = TieredReadableFile::new(&path).unwrap(); + let hot_storage = HotStorageReader::new(file).unwrap(); for (i, index_writer_entry) in index_writer_entries.iter().enumerate() { let account_offset = hot_storage .get_account_offset(IndexOffset(i as u32)) @@ -1075,7 +1077,8 @@ pub mod tests { footer.write_footer_block(&mut file).unwrap(); } - let hot_storage = HotStorageReader::new_from_path(&path).unwrap(); + let file = TieredReadableFile::new(&path).unwrap(); + let hot_storage = HotStorageReader::new(file).unwrap(); for (i, address) in addresses.iter().enumerate() { assert_eq!( hot_storage @@ -1149,7 +1152,8 @@ pub mod tests { footer.write_footer_block(&mut file).unwrap(); } - let hot_storage = HotStorageReader::new_from_path(&path).unwrap(); + let file = TieredReadableFile::new(&path).unwrap(); + let hot_storage = HotStorageReader::new(file).unwrap(); // First, verify whether we can find the expected owners. let mut owner_candidates = owner_addresses.clone(); @@ -1281,7 +1285,8 @@ pub mod tests { footer.write_footer_block(&mut file).unwrap(); } - let hot_storage = HotStorageReader::new_from_path(&path).unwrap(); + let file = TieredReadableFile::new(&path).unwrap(); + let hot_storage = HotStorageReader::new(file).unwrap(); for i in 0..NUM_ACCOUNTS { let (stored_meta, next) = hot_storage @@ -1362,10 +1367,10 @@ pub mod tests { writer.write_accounts(&storable_accounts, 0).unwrap() }; - let hot_storage = HotStorageReader::new_from_path(&path).unwrap(); + let file = TieredReadableFile::new(&path).unwrap(); + let hot_storage = HotStorageReader::new(file).unwrap(); let num_accounts = account_data_sizes.len(); - for i in 0..num_accounts { let (stored_meta, next) = hot_storage .get_account(IndexOffset(i as u32)) diff --git a/accounts-db/src/tiered_storage/readable.rs b/accounts-db/src/tiered_storage/readable.rs index e3d169d4f6d99e..15d678ffc856fc 100644 --- a/accounts-db/src/tiered_storage/readable.rs +++ b/accounts-db/src/tiered_storage/readable.rs @@ -3,6 +3,7 @@ use { account_storage::meta::StoredAccountMeta, accounts_file::MatchAccountOwnerError, tiered_storage::{ + file::TieredReadableFile, footer::{AccountMetaFormat, TieredStorageFooter}, hot::HotStorageReader, index::IndexOffset, @@ -22,9 +23,10 @@ pub enum TieredStorageReader { impl TieredStorageReader { /// Creates a reader for the specified tiered storage accounts file. pub fn new_from_path(path: impl AsRef) -> TieredStorageResult { - let footer = TieredStorageFooter::new_from_path(&path)?; + let file = TieredReadableFile::new(&path)?; + let footer = TieredStorageFooter::new_from_footer_block(&file)?; match footer.account_meta_format { - AccountMetaFormat::Hot => Ok(Self::Hot(HotStorageReader::new_from_path(path)?)), + AccountMetaFormat::Hot => Ok(Self::Hot(HotStorageReader::new(file)?)), } } From 2273098c553a4b6c5f5c926117a608ab2e799231 Mon Sep 17 00:00:00 2001 From: Yueh-Hsuan Chiang <93241502+yhchiang-sol@users.noreply.github.com> Date: Wed, 20 Mar 2024 12:17:12 -0700 Subject: [PATCH 23/28] [TieredStorage] Store account address range (#172) #### Problem The TieredStorageFooter has the min_account_address and max_account_address fields to describe the account address range in its file. But the current implementation hasn't updated the fields yet. #### Summary of Changes This PR enables the TieredStorage to persist address range information into its footer via min_account_address and max_account_address. #### Test Plan Updated tiered-storage test to verify persisted account address range. --- accounts-db/src/tiered_storage.rs | 24 +++++- accounts-db/src/tiered_storage/hot.rs | 9 ++- accounts-db/src/tiered_storage/meta.rs | 77 +++++++++++++++++++- accounts-db/src/tiered_storage/test_utils.rs | 12 +++ 4 files changed, 117 insertions(+), 5 deletions(-) diff --git a/accounts-db/src/tiered_storage.rs b/accounts-db/src/tiered_storage.rs index 70169a59428fe6..3f655896a28ed6 100644 --- a/accounts-db/src/tiered_storage.rs +++ b/accounts-db/src/tiered_storage.rs @@ -183,7 +183,7 @@ mod tests { mem::ManuallyDrop, }, tempfile::tempdir, - test_utils::{create_test_account, verify_test_account}, + test_utils::{create_test_account, verify_test_account_with_footer}, }; impl TieredStorage { @@ -368,13 +368,33 @@ mod tests { let mut index_offset = IndexOffset(0); let mut verified_accounts = HashSet::new(); + let footer = reader.footer(); + + const MIN_PUBKEY: Pubkey = Pubkey::new_from_array([0x00u8; 32]); + const MAX_PUBKEY: Pubkey = Pubkey::new_from_array([0xFFu8; 32]); + let mut min_pubkey_ref = &MAX_PUBKEY; + let mut max_pubkey_ref = &MIN_PUBKEY; + while let Some((stored_meta, next)) = reader.get_account(index_offset).unwrap() { if let Some(account) = expected_accounts_map.get(stored_meta.pubkey()) { - verify_test_account(&stored_meta, *account, stored_meta.pubkey()); + verify_test_account_with_footer( + &stored_meta, + *account, + stored_meta.pubkey(), + footer, + ); verified_accounts.insert(stored_meta.pubkey()); + if *min_pubkey_ref > *stored_meta.pubkey() { + min_pubkey_ref = stored_meta.pubkey(); + } + if *max_pubkey_ref < *stored_meta.pubkey() { + max_pubkey_ref = stored_meta.pubkey(); + } } index_offset = next; } + assert_eq!(footer.min_account_address, *min_pubkey_ref); + assert_eq!(footer.max_account_address, *max_pubkey_ref); assert!(!verified_accounts.is_empty()); assert_eq!(verified_accounts.len(), expected_accounts_map.len()) } diff --git a/accounts-db/src/tiered_storage/hot.rs b/accounts-db/src/tiered_storage/hot.rs index 1a5017535cdded..c1e92e4469b269 100644 --- a/accounts-db/src/tiered_storage/hot.rs +++ b/accounts-db/src/tiered_storage/hot.rs @@ -10,7 +10,9 @@ use { file::{TieredReadableFile, TieredWritableFile}, footer::{AccountBlockFormat, AccountMetaFormat, TieredStorageFooter}, index::{AccountIndexWriterEntry, AccountOffset, IndexBlockFormat, IndexOffset}, - meta::{AccountMetaFlags, AccountMetaOptionalFields, TieredAccountMeta}, + meta::{ + AccountAddressRange, AccountMetaFlags, AccountMetaOptionalFields, TieredAccountMeta, + }, mmap_utils::{get_pod, get_slice}, owners::{OwnerOffset, OwnersBlockFormat, OwnersTable, OWNER_NO_OWNER}, StorableAccounts, StorableAccountsWithHashesAndWriteVersions, TieredStorageError, @@ -620,6 +622,7 @@ impl HotStorageWriter { let mut index = vec![]; let mut owners_table = OwnersTable::default(); let mut cursor = 0; + let mut address_range = AccountAddressRange::default(); // writing accounts blocks let len = accounts.accounts.len(); @@ -631,6 +634,7 @@ impl HotStorageWriter { address, offset: HotAccountOffset::new(cursor)?, }; + address_range.update(address); // Obtain necessary fields from the account, or default fields // for a zero-lamport account in the None case. @@ -691,7 +695,8 @@ impl HotStorageWriter { footer .owners_block_format .write_owners_block(&mut self.storage, &owners_table)?; - + footer.min_account_address = *address_range.min; + footer.max_account_address = *address_range.max; footer.write_footer_block(&mut self.storage)?; Ok(stored_infos) diff --git a/accounts-db/src/tiered_storage/meta.rs b/accounts-db/src/tiered_storage/meta.rs index 2aa53e5a4de1ed..c98fe2efa8b6f6 100644 --- a/accounts-db/src/tiered_storage/meta.rs +++ b/accounts-db/src/tiered_storage/meta.rs @@ -4,7 +4,7 @@ use { crate::tiered_storage::owners::OwnerOffset, bytemuck::{Pod, Zeroable}, modular_bitfield::prelude::*, - solana_sdk::stake_history::Epoch, + solana_sdk::{pubkey::Pubkey, stake_history::Epoch}, }; /// The struct that handles the account meta flags. @@ -124,6 +124,38 @@ impl AccountMetaOptionalFields { } } +const MIN_ACCOUNT_ADDRESS: Pubkey = Pubkey::new_from_array([0x00u8; 32]); +const MAX_ACCOUNT_ADDRESS: Pubkey = Pubkey::new_from_array([0xFFu8; 32]); + +#[derive(Debug)] +/// A struct that maintains an address-range using its min and max fields. +pub struct AccountAddressRange<'a> { + /// The minimum address observed via update() + pub min: &'a Pubkey, + /// The maximum address observed via update() + pub max: &'a Pubkey, +} + +impl Default for AccountAddressRange<'_> { + fn default() -> Self { + Self { + min: &MAX_ACCOUNT_ADDRESS, + max: &MIN_ACCOUNT_ADDRESS, + } + } +} + +impl<'a> AccountAddressRange<'a> { + pub fn update(&mut self, address: &'a Pubkey) { + if *self.min > *address { + self.min = address; + } + if *self.max < *address { + self.max = address; + } + } +} + #[cfg(test)] pub mod tests { use super::*; @@ -221,4 +253,47 @@ pub mod tests { ); } } + + #[test] + fn test_pubkey_range_update_single() { + let address = solana_sdk::pubkey::new_rand(); + let mut address_range = AccountAddressRange::default(); + + address_range.update(&address); + // For a single update, the min and max should equal to the address + assert_eq!(*address_range.min, address); + assert_eq!(*address_range.max, address); + } + + #[test] + fn test_pubkey_range_update_multiple() { + const NUM_PUBKEYS: usize = 20; + + let mut address_range = AccountAddressRange::default(); + let mut addresses = Vec::with_capacity(NUM_PUBKEYS); + + let mut min_index = 0; + let mut max_index = 0; + + // Generate random addresses and track expected min and max indices + for i in 0..NUM_PUBKEYS { + let address = solana_sdk::pubkey::new_rand(); + addresses.push(address); + + // Update expected min and max indices + if address < addresses[min_index] { + min_index = i; + } + if address > addresses[max_index] { + max_index = i; + } + } + + addresses + .iter() + .for_each(|address| address_range.update(address)); + + assert_eq!(*address_range.min, addresses[min_index]); + assert_eq!(*address_range.max, addresses[max_index]); + } } diff --git a/accounts-db/src/tiered_storage/test_utils.rs b/accounts-db/src/tiered_storage/test_utils.rs index f44f20f77cc5dd..8916ef894cc26e 100644 --- a/accounts-db/src/tiered_storage/test_utils.rs +++ b/accounts-db/src/tiered_storage/test_utils.rs @@ -1,6 +1,7 @@ #![cfg(test)] //! Helper functions for TieredStorage tests use { + super::footer::TieredStorageFooter, crate::{ account_storage::meta::{StoredAccountMeta, StoredMeta}, accounts_hash::AccountHash, @@ -61,3 +62,14 @@ pub(super) fn verify_test_account( assert_eq!(stored_meta.pubkey(), address); assert_eq!(*stored_meta.hash(), AccountHash(Hash::default())); } + +pub(super) fn verify_test_account_with_footer( + stored_meta: &StoredAccountMeta<'_>, + account: Option<&impl ReadableAccount>, + address: &Pubkey, + footer: &TieredStorageFooter, +) { + verify_test_account(stored_meta, account, address); + assert!(footer.min_account_address <= *address); + assert!(footer.max_account_address >= *address); +} From 1d89ea01cc6b597a66c4c5986cb2a34310a307eb Mon Sep 17 00:00:00 2001 From: Dmitri Makarov Date: Wed, 20 Mar 2024 15:26:45 -0400 Subject: [PATCH 24/28] Rename LoadedPrograms to ProgramCache for readability (#339) --- program-runtime/src/loaded_programs.rs | 60 ++++++++++---------- runtime/src/bank.rs | 78 ++++++++++++-------------- runtime/src/bank/tests.rs | 14 ++--- svm/src/transaction_processor.rs | 61 +++++++++----------- svm/tests/integration_test.rs | 20 +++---- 5 files changed, 110 insertions(+), 123 deletions(-) diff --git a/program-runtime/src/loaded_programs.rs b/program-runtime/src/loaded_programs.rs index f6163d63cd738c..8364f7013d65d1 100644 --- a/program-runtime/src/loaded_programs.rs +++ b/program-runtime/src/loaded_programs.rs @@ -145,7 +145,7 @@ pub struct LoadedProgram { pub latest_access_slot: AtomicU64, } -/// Global cache statistics for [LoadedPrograms]. +/// Global cache statistics for [ProgramCache]. #[derive(Debug, Default)] pub struct Stats { /// a program was already in the cache @@ -568,7 +568,7 @@ struct SecondLevel { /// - allows for cooperative loading of TX batches which hit the same missing programs simultaneously. /// - enforces that all programs used in a batch are eagerly loaded ahead of execution. /// - is not persisted to disk or a snapshot, so it needs to cold start and warm up first. -pub struct LoadedPrograms { +pub struct ProgramCache { /// A two level index: /// /// The first level is for the address at which programs are deployed and the second level for the slot (and thus also fork). @@ -595,9 +595,9 @@ pub struct LoadedPrograms { pub loading_task_waiter: Arc, } -impl Debug for LoadedPrograms { +impl Debug for ProgramCache { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { - f.debug_struct("LoadedPrograms") + f.debug_struct("ProgramCache") .field("root slot", &self.latest_root_slot) .field("root epoch", &self.latest_root_epoch) .field("stats", &self.stats) @@ -606,11 +606,11 @@ impl Debug for LoadedPrograms { } } -/// Local view into [LoadedPrograms] which was extracted for a specific TX batch. +/// Local view into [ProgramCache] which was extracted for a specific TX batch. /// -/// This isolation enables the global [LoadedPrograms] to continue to evolve (e.g. evictions), +/// This isolation enables the global [ProgramCache] to continue to evolve (e.g. evictions), /// while the TX batch is guaranteed it will continue to find all the programs it requires. -/// For program management instructions this also buffers them before they are merged back into the global [LoadedPrograms]. +/// For program management instructions this also buffers them before they are merged back into the global [ProgramCache]. #[derive(Clone, Debug, Default)] pub struct LoadedProgramsForTxBatch { /// Pubkey is the address of a program. @@ -681,7 +681,7 @@ pub enum LoadedProgramMatchCriteria { NoCriteria, } -impl LoadedPrograms { +impl ProgramCache { pub fn new(root_slot: Slot, root_epoch: Epoch) -> Self { Self { entries: HashMap::new(), @@ -734,7 +734,7 @@ impl LoadedPrograms { (LoadedProgramType::Unloaded(_), LoadedProgramType::TestLoaded(_)) => {} _ => { // Something is wrong, I can feel it ... - error!("LoadedPrograms::assign_program() failed key={:?} existing={:?} entry={:?}", key, slot_versions, entry); + error!("ProgramCache::assign_program() failed key={:?} existing={:?} entry={:?}", key, slot_versions, entry); debug_assert!(false, "Unexpected replacement of an entry"); self.stats.replacements.fetch_add(1, Ordering::Relaxed); return true; @@ -1146,9 +1146,9 @@ impl solana_frozen_abi::abi_example::AbiExample for LoadedProgram { } #[cfg(RUSTC_WITH_SPECIALIZATION)] -impl solana_frozen_abi::abi_example::AbiExample for LoadedPrograms { +impl solana_frozen_abi::abi_example::AbiExample for ProgramCache { fn example() -> Self { - // LoadedPrograms isn't serializable by definition. + // ProgramCache isn't serializable by definition. Self::new(Slot::default(), Epoch::default()) } } @@ -1158,7 +1158,7 @@ mod tests { use { crate::loaded_programs::{ BlockRelation, ForkGraph, LoadedProgram, LoadedProgramMatchCriteria, LoadedProgramType, - LoadedPrograms, LoadedProgramsForTxBatch, ProgramRuntimeEnvironment, + LoadedProgramsForTxBatch, ProgramCache, ProgramRuntimeEnvironment, ProgramRuntimeEnvironments, DELAY_VISIBILITY_SLOT_OFFSET, }, assert_matches::assert_matches, @@ -1178,8 +1178,8 @@ mod tests { static MOCK_ENVIRONMENT: std::sync::OnceLock = std::sync::OnceLock::::new(); - fn new_mock_cache() -> LoadedPrograms { - let mut cache = LoadedPrograms::new(0, 0); + fn new_mock_cache() -> ProgramCache { + let mut cache = ProgramCache::new(0, 0); cache.environments.program_runtime_v1 = MOCK_ENVIRONMENT .get_or_init(|| Arc::new(BuiltinProgram::new_mock())) @@ -1220,7 +1220,7 @@ mod tests { } fn set_tombstone( - cache: &mut LoadedPrograms, + cache: &mut ProgramCache, key: Pubkey, slot: Slot, reason: LoadedProgramType, @@ -1231,7 +1231,7 @@ mod tests { } fn insert_unloaded_program( - cache: &mut LoadedPrograms, + cache: &mut ProgramCache, key: Pubkey, slot: Slot, ) -> Arc { @@ -1254,7 +1254,7 @@ mod tests { unloaded } - fn num_matching_entries(cache: &LoadedPrograms, predicate: P) -> usize + fn num_matching_entries(cache: &ProgramCache, predicate: P) -> usize where P: Fn(&LoadedProgramType) -> bool, FG: ForkGraph, @@ -1302,7 +1302,7 @@ mod tests { } fn program_deploy_test_helper( - cache: &mut LoadedPrograms, + cache: &mut ProgramCache, program: Pubkey, deployment_slots: Vec, usage_counters: Vec, @@ -2574,28 +2574,28 @@ mod tests { let tombstone = Arc::new(LoadedProgram::new_tombstone(0, LoadedProgramType::Closed)); assert!( - LoadedPrograms::::matches_loaded_program_criteria( + ProgramCache::::matches_loaded_program_criteria( &tombstone, &LoadedProgramMatchCriteria::NoCriteria ) ); assert!( - LoadedPrograms::::matches_loaded_program_criteria( + ProgramCache::::matches_loaded_program_criteria( &tombstone, &LoadedProgramMatchCriteria::Tombstone ) ); assert!( - LoadedPrograms::::matches_loaded_program_criteria( + ProgramCache::::matches_loaded_program_criteria( &tombstone, &LoadedProgramMatchCriteria::DeployedOnOrAfterSlot(0) ) ); assert!( - !LoadedPrograms::::matches_loaded_program_criteria( + !ProgramCache::::matches_loaded_program_criteria( &tombstone, &LoadedProgramMatchCriteria::DeployedOnOrAfterSlot(1) ) @@ -2604,28 +2604,28 @@ mod tests { let program = new_test_loaded_program(0, 1); assert!( - LoadedPrograms::::matches_loaded_program_criteria( + ProgramCache::::matches_loaded_program_criteria( &program, &LoadedProgramMatchCriteria::NoCriteria ) ); assert!( - !LoadedPrograms::::matches_loaded_program_criteria( + !ProgramCache::::matches_loaded_program_criteria( &program, &LoadedProgramMatchCriteria::Tombstone ) ); assert!( - LoadedPrograms::::matches_loaded_program_criteria( + ProgramCache::::matches_loaded_program_criteria( &program, &LoadedProgramMatchCriteria::DeployedOnOrAfterSlot(0) ) ); assert!( - !LoadedPrograms::::matches_loaded_program_criteria( + !ProgramCache::::matches_loaded_program_criteria( &program, &LoadedProgramMatchCriteria::DeployedOnOrAfterSlot(1) ) @@ -2638,28 +2638,28 @@ mod tests { )); assert!( - LoadedPrograms::::matches_loaded_program_criteria( + ProgramCache::::matches_loaded_program_criteria( &program, &LoadedProgramMatchCriteria::NoCriteria ) ); assert!( - !LoadedPrograms::::matches_loaded_program_criteria( + !ProgramCache::::matches_loaded_program_criteria( &program, &LoadedProgramMatchCriteria::Tombstone ) ); assert!( - LoadedPrograms::::matches_loaded_program_criteria( + ProgramCache::::matches_loaded_program_criteria( &program, &LoadedProgramMatchCriteria::DeployedOnOrAfterSlot(0) ) ); assert!( - !LoadedPrograms::::matches_loaded_program_criteria( + !ProgramCache::::matches_loaded_program_criteria( &program, &LoadedProgramMatchCriteria::DeployedOnOrAfterSlot(1) ) diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 388c2f4a15f529..e2ab858660361f 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -99,7 +99,7 @@ use { compute_budget_processor::process_compute_budget_instructions, invoke_context::BuiltinFunctionWithContext, loaded_programs::{ - LoadedProgram, LoadedProgramMatchCriteria, LoadedProgramType, LoadedPrograms, + LoadedProgram, LoadedProgramMatchCriteria, LoadedProgramType, ProgramCache, ProgramRuntimeEnvironments, }, runtime_config::RuntimeConfig, @@ -547,7 +547,7 @@ impl PartialEq for Bank { accounts_data_size_delta_off_chain: _, fee_structure: _, incremental_snapshot_persistence: _, - loaded_programs_cache: _, + program_cache: _, epoch_reward_status: _, transaction_processor: _, check_program_modification_slot: _, @@ -806,7 +806,7 @@ pub struct Bank { pub incremental_snapshot_persistence: Option, - loaded_programs_cache: Arc>>, + program_cache: Arc>>, epoch_reward_status: EpochRewardStatus, @@ -993,7 +993,7 @@ impl Bank { accounts_data_size_delta_on_chain: AtomicI64::new(0), accounts_data_size_delta_off_chain: AtomicI64::new(0), fee_structure: FeeStructure::default(), - loaded_programs_cache: Arc::new(RwLock::new(LoadedPrograms::new( + program_cache: Arc::new(RwLock::new(ProgramCache::new( Slot::default(), Epoch::default(), ))), @@ -1008,7 +1008,7 @@ impl Bank { bank.epoch_schedule.clone(), bank.fee_structure.clone(), bank.runtime_config.clone(), - bank.loaded_programs_cache.clone(), + bank.program_cache.clone(), ); let accounts_data_size_initial = bank.get_total_accounts_stats().unwrap().data_len as u64; @@ -1315,7 +1315,7 @@ impl Bank { accounts_data_size_delta_on_chain: AtomicI64::new(0), accounts_data_size_delta_off_chain: AtomicI64::new(0), fee_structure: parent.fee_structure.clone(), - loaded_programs_cache: parent.loaded_programs_cache.clone(), + program_cache: parent.program_cache.clone(), epoch_reward_status: parent.epoch_reward_status.clone(), transaction_processor: TransactionBatchProcessor::default(), check_program_modification_slot: false, @@ -1327,7 +1327,7 @@ impl Bank { new.epoch_schedule.clone(), new.fee_structure.clone(), new.runtime_config.clone(), - new.loaded_programs_cache.clone(), + new.program_cache.clone(), ); let (_, ancestors_time_us) = measure_us!({ @@ -1367,13 +1367,12 @@ impl Bank { .min(slots_in_epoch) .checked_div(2) .unwrap(); - let mut loaded_programs_cache = new.loaded_programs_cache.write().unwrap(); - if loaded_programs_cache.upcoming_environments.is_some() { - if let Some((key, program_to_recompile)) = - loaded_programs_cache.programs_to_recompile.pop() + let mut program_cache = new.program_cache.write().unwrap(); + if program_cache.upcoming_environments.is_some() { + if let Some((key, program_to_recompile)) = program_cache.programs_to_recompile.pop() { - let effective_epoch = loaded_programs_cache.latest_root_epoch.saturating_add(1); - drop(loaded_programs_cache); + let effective_epoch = program_cache.latest_root_epoch.saturating_add(1); + drop(program_cache); let recompiled = new.load_program(&key, false, effective_epoch); recompiled .tx_usage_counter @@ -1381,17 +1380,17 @@ impl Bank { recompiled .ix_usage_counter .fetch_add(program_to_recompile.ix_usage_counter.load(Relaxed), Relaxed); - let mut loaded_programs_cache = new.loaded_programs_cache.write().unwrap(); - loaded_programs_cache.assign_program(key, recompiled); + let mut program_cache = new.program_cache.write().unwrap(); + program_cache.assign_program(key, recompiled); } - } else if new.epoch() != loaded_programs_cache.latest_root_epoch + } else if new.epoch() != program_cache.latest_root_epoch || slot_index.saturating_add(slots_in_recompilation_phase) >= slots_in_epoch { // Anticipate the upcoming program runtime environment for the next epoch, // so we can try to recompile loaded programs before the feature transition hits. - drop(loaded_programs_cache); + drop(program_cache); let (feature_set, _new_feature_activations) = new.compute_active_feature_set(true); - let mut loaded_programs_cache = new.loaded_programs_cache.write().unwrap(); + let mut program_cache = new.program_cache.write().unwrap(); let program_runtime_environment_v1 = create_program_runtime_environment_v1( &feature_set, &new.runtime_config.compute_budget.unwrap_or_default(), @@ -1403,7 +1402,7 @@ impl Bank { &new.runtime_config.compute_budget.unwrap_or_default(), false, /* debugging_features */ ); - let mut upcoming_environments = loaded_programs_cache.environments.clone(); + let mut upcoming_environments = program_cache.environments.clone(); let changed_program_runtime_v1 = *upcoming_environments.program_runtime_v1 != program_runtime_environment_v1; let changed_program_runtime_v2 = @@ -1416,10 +1415,10 @@ impl Bank { upcoming_environments.program_runtime_v2 = Arc::new(program_runtime_environment_v2); } - loaded_programs_cache.upcoming_environments = Some(upcoming_environments); - loaded_programs_cache.programs_to_recompile = loaded_programs_cache + program_cache.upcoming_environments = Some(upcoming_environments); + program_cache.programs_to_recompile = program_cache .get_flattened_entries(changed_program_runtime_v1, changed_program_runtime_v2); - loaded_programs_cache + program_cache .programs_to_recompile .sort_by_cached_key(|(_id, program)| program.decayed_usage_counter(slot)); } @@ -1464,32 +1463,32 @@ impl Bank { ); parent - .loaded_programs_cache + .program_cache .read() .unwrap() .stats .submit(parent.slot()); - new.loaded_programs_cache.write().unwrap().stats.reset(); + new.program_cache.write().unwrap().stats.reset(); new } pub fn set_fork_graph_in_program_cache(&self, fork_graph: Arc>) { - self.loaded_programs_cache + self.program_cache .write() .unwrap() .set_fork_graph(fork_graph); } pub fn prune_program_cache(&self, new_root_slot: Slot, new_root_epoch: Epoch) { - self.loaded_programs_cache + self.program_cache .write() .unwrap() .prune(new_root_slot, new_root_epoch); } pub fn prune_program_cache_by_deployment_slot(&self, deployment_slot: Slot) { - self.loaded_programs_cache + self.program_cache .write() .unwrap() .prune_by_deployment_slot(deployment_slot); @@ -1497,7 +1496,7 @@ impl Bank { pub fn get_runtime_environments_for_slot(&self, slot: Slot) -> ProgramRuntimeEnvironments { let epoch = self.epoch_schedule.get_epoch(slot); - self.loaded_programs_cache + self.program_cache .read() .unwrap() .get_environments_for_epoch(epoch) @@ -1863,10 +1862,7 @@ impl Bank { accounts_data_size_delta_on_chain: AtomicI64::new(0), accounts_data_size_delta_off_chain: AtomicI64::new(0), fee_structure: FeeStructure::default(), - loaded_programs_cache: Arc::new(RwLock::new(LoadedPrograms::new( - fields.slot, - fields.epoch, - ))), + program_cache: Arc::new(RwLock::new(ProgramCache::new(fields.slot, fields.epoch))), epoch_reward_status: fields.epoch_reward_status, transaction_processor: TransactionBatchProcessor::default(), check_program_modification_slot: false, @@ -1878,7 +1874,7 @@ impl Bank { bank.epoch_schedule.clone(), bank.fee_structure.clone(), bank.runtime_config.clone(), - bank.loaded_programs_cache.clone(), + bank.program_cache.clone(), ); bank.finish_init( @@ -4987,7 +4983,7 @@ impl Bank { } = execution_result { if details.status.is_ok() { - let mut cache = self.loaded_programs_cache.write().unwrap(); + let mut cache = self.program_cache.write().unwrap(); cache.merge(programs_modified_by_tx); } } @@ -6013,10 +6009,10 @@ impl Bank { } } - let mut loaded_programs_cache = self.loaded_programs_cache.write().unwrap(); - loaded_programs_cache.latest_root_slot = self.slot(); - loaded_programs_cache.latest_root_epoch = self.epoch(); - loaded_programs_cache.environments.program_runtime_v1 = Arc::new( + let mut program_cache = self.program_cache.write().unwrap(); + program_cache.latest_root_slot = self.slot(); + program_cache.latest_root_epoch = self.epoch(); + program_cache.environments.program_runtime_v1 = Arc::new( create_program_runtime_environment_v1( &self.feature_set, &self.runtime_config.compute_budget.unwrap_or_default(), @@ -6025,7 +6021,7 @@ impl Bank { ) .unwrap(), ); - loaded_programs_cache.environments.program_runtime_v2 = + program_cache.environments.program_runtime_v2 = Arc::new(create_program_runtime_environment_v2( &self.runtime_config.compute_budget.unwrap_or_default(), false, /* debugging_features */ @@ -7094,7 +7090,7 @@ impl Bank { debug!("Adding program {} under {:?}", name, program_id); self.add_builtin_account(name.as_str(), &program_id, false); self.builtin_programs.insert(program_id); - self.loaded_programs_cache + self.program_cache .write() .unwrap() .assign_program(program_id, Arc::new(builtin)); @@ -7399,7 +7395,7 @@ impl Bank { self.store_account(new_address, &AccountSharedData::default()); // Unload a program from the bank's cache - self.loaded_programs_cache + self.program_cache .write() .unwrap() .remove_programs([*old_address].into_iter()); diff --git a/runtime/src/bank/tests.rs b/runtime/src/bank/tests.rs index 6960f220244998..f104c8ee2b963d 100644 --- a/runtime/src/bank/tests.rs +++ b/runtime/src/bank/tests.rs @@ -11910,14 +11910,14 @@ fn test_feature_activation_loaded_programs_recompilation_phase() { goto_end_of_slot(bank.clone()); let bank = new_bank_from_parent_with_bank_forks(&bank_forks, bank, &Pubkey::default(), 16); let current_env = bank - .loaded_programs_cache + .program_cache .read() .unwrap() .get_environments_for_epoch(0) .program_runtime_v1 .clone(); let upcoming_env = bank - .loaded_programs_cache + .program_cache .read() .unwrap() .get_environments_for_epoch(1) @@ -11926,9 +11926,8 @@ fn test_feature_activation_loaded_programs_recompilation_phase() { // Advance the bank to recompile the program. { - let loaded_programs_cache = bank.loaded_programs_cache.read().unwrap(); - let slot_versions = - loaded_programs_cache.get_slot_versions_for_tests(&program_keypair.pubkey()); + let program_cache = bank.program_cache.read().unwrap(); + let slot_versions = program_cache.get_slot_versions_for_tests(&program_keypair.pubkey()); assert_eq!(slot_versions.len(), 1); assert!(Arc::ptr_eq( slot_versions[0].program.get_environment().unwrap(), @@ -11938,9 +11937,8 @@ fn test_feature_activation_loaded_programs_recompilation_phase() { goto_end_of_slot(bank.clone()); let bank = new_from_parent_with_fork_next_slot(bank, bank_forks.as_ref()); { - let loaded_programs_cache = bank.loaded_programs_cache.read().unwrap(); - let slot_versions = - loaded_programs_cache.get_slot_versions_for_tests(&program_keypair.pubkey()); + let program_cache = bank.program_cache.read().unwrap(); + let slot_versions = program_cache.get_slot_versions_for_tests(&program_keypair.pubkey()); assert_eq!(slot_versions.len(), 2); assert!(Arc::ptr_eq( slot_versions[0].program.get_environment().unwrap(), diff --git a/svm/src/transaction_processor.rs b/svm/src/transaction_processor.rs index 40ccf81561f26e..f28820c41cceed 100644 --- a/svm/src/transaction_processor.rs +++ b/svm/src/transaction_processor.rs @@ -17,7 +17,7 @@ use { compute_budget::ComputeBudget, loaded_programs::{ ForkGraph, LoadProgramMetrics, LoadedProgram, LoadedProgramMatchCriteria, - LoadedProgramType, LoadedPrograms, LoadedProgramsForTxBatch, ProgramRuntimeEnvironment, + LoadedProgramType, LoadedProgramsForTxBatch, ProgramCache, ProgramRuntimeEnvironment, ProgramRuntimeEnvironments, DELAY_VISIBILITY_SLOT_OFFSET, }, log_collector::LogCollector, @@ -137,7 +137,8 @@ pub struct TransactionBatchProcessor { pub sysvar_cache: RwLock, - pub loaded_programs_cache: Arc>>, + /// Programs required for transaction batch processing + pub program_cache: Arc>>, } impl Debug for TransactionBatchProcessor { @@ -149,7 +150,7 @@ impl Debug for TransactionBatchProcessor { .field("fee_structure", &self.fee_structure) .field("runtime_config", &self.runtime_config) .field("sysvar_cache", &self.sysvar_cache) - .field("loaded_programs_cache", &self.loaded_programs_cache) + .field("program_cache", &self.program_cache) .finish() } } @@ -163,7 +164,7 @@ impl Default for TransactionBatchProcessor { fee_structure: FeeStructure::default(), runtime_config: Arc::::default(), sysvar_cache: RwLock::::default(), - loaded_programs_cache: Arc::new(RwLock::new(LoadedPrograms::new( + program_cache: Arc::new(RwLock::new(ProgramCache::new( Slot::default(), Epoch::default(), ))), @@ -178,7 +179,7 @@ impl TransactionBatchProcessor { epoch_schedule: EpochSchedule, fee_structure: FeeStructure, runtime_config: Arc, - loaded_programs_cache: Arc>>, + program_cache: Arc>>, ) -> Self { Self { slot, @@ -187,7 +188,7 @@ impl TransactionBatchProcessor { fee_structure, runtime_config, sysvar_cache: RwLock::::default(), - loaded_programs_cache, + program_cache, } } @@ -308,7 +309,7 @@ impl TransactionBatchProcessor { execution_time.stop(); const SHRINK_LOADED_PROGRAMS_TO_PERCENTAGE: u8 = 90; - self.loaded_programs_cache + self.program_cache .write() .unwrap() .evict_using_2s_random_selection( @@ -374,8 +375,8 @@ impl TransactionBatchProcessor { result } - /// Load program with a specific pubkey from loaded programs - /// cache, and update the program's access slot as a side-effect. + /// Load program with a specific pubkey from program cache, and + /// update the program's access slot as a side-effect. pub fn load_program_with_pubkey( &self, callbacks: &CB, @@ -383,8 +384,8 @@ impl TransactionBatchProcessor { reload: bool, effective_epoch: Epoch, ) -> Arc { - let loaded_programs_cache = self.loaded_programs_cache.read().unwrap(); - let environments = loaded_programs_cache.get_environments_for_epoch(effective_epoch); + let program_cache = self.program_cache.read().unwrap(); + let environments = program_cache.get_environments_for_epoch(effective_epoch); let mut load_program_metrics = LoadProgramMetrics { program_id: pubkey.to_string(), ..LoadProgramMetrics::default() @@ -463,10 +464,10 @@ impl TransactionBatchProcessor { load_program_metrics.submit_datapoint(&mut timings); if !Arc::ptr_eq( &environments.program_runtime_v1, - &loaded_programs_cache.environments.program_runtime_v1, + &program_cache.environments.program_runtime_v1, ) || !Arc::ptr_eq( &environments.program_runtime_v2, - &loaded_programs_cache.environments.program_runtime_v2, + &program_cache.environments.program_runtime_v2, ) { // There can be two entries per program when the environment changes. // One for the old environment before the epoch boundary and one for the new environment after the epoch boundary. @@ -502,21 +503,18 @@ impl TransactionBatchProcessor { loop { let (program_to_load, task_cookie, task_waiter) = { // Lock the global cache. - let mut loaded_programs_cache = self.loaded_programs_cache.write().unwrap(); + let mut program_cache = self.program_cache.write().unwrap(); // Initialize our local cache. let is_first_round = loaded_programs_for_txs.is_none(); if is_first_round { loaded_programs_for_txs = Some(LoadedProgramsForTxBatch::new( self.slot, - loaded_programs_cache - .get_environments_for_epoch(self.epoch) - .clone(), + program_cache.get_environments_for_epoch(self.epoch).clone(), )); } // Submit our last completed loading task. if let Some((key, program)) = program_to_store.take() { - if loaded_programs_cache - .finish_cooperative_loading_task(self.slot, key, program) + if program_cache.finish_cooperative_loading_task(self.slot, key, program) && limit_to_load_programs { // This branch is taken when there is an error in assigning a program to a @@ -524,21 +522,19 @@ impl TransactionBatchProcessor { // tests purposes. let mut ret = LoadedProgramsForTxBatch::new( self.slot, - loaded_programs_cache - .get_environments_for_epoch(self.epoch) - .clone(), + program_cache.get_environments_for_epoch(self.epoch).clone(), ); ret.hit_max_limit = true; return ret; } } // Figure out which program needs to be loaded next. - let program_to_load = loaded_programs_cache.extract( + let program_to_load = program_cache.extract( &mut missing_programs, loaded_programs_for_txs.as_mut().unwrap(), is_first_round, ); - let task_waiter = Arc::clone(&loaded_programs_cache.loading_task_waiter); + let task_waiter = Arc::clone(&program_cache.loading_task_waiter); (program_to_load, task_waiter.cookie(), task_waiter) // Unlock the global cache again. }; @@ -1266,7 +1262,7 @@ mod tests { 0, LoadedProgramType::FailedVerification( batch_processor - .loaded_programs_cache + .program_cache .read() .unwrap() .get_environments_for_epoch(20) @@ -1294,7 +1290,7 @@ mod tests { 0, LoadedProgramType::FailedVerification( batch_processor - .loaded_programs_cache + .program_cache .read() .unwrap() .get_environments_for_epoch(20) @@ -1367,7 +1363,7 @@ mod tests { 0, LoadedProgramType::FailedVerification( batch_processor - .loaded_programs_cache + .program_cache .read() .unwrap() .get_environments_for_epoch(0) @@ -1447,7 +1443,7 @@ mod tests { 0, LoadedProgramType::FailedVerification( batch_processor - .loaded_programs_cache + .program_cache .read() .unwrap() .get_environments_for_epoch(0) @@ -1506,7 +1502,7 @@ mod tests { let batch_processor = TransactionBatchProcessor::::default(); batch_processor - .loaded_programs_cache + .program_cache .write() .unwrap() .upcoming_environments = Some(ProgramRuntimeEnvironments::default()); @@ -1801,11 +1797,8 @@ mod tests { // Case 1 let mut mock_bank = MockBankCallback::default(); let batch_processor = TransactionBatchProcessor::::default(); - batch_processor - .loaded_programs_cache - .write() - .unwrap() - .fork_graph = Some(Arc::new(RwLock::new(TestForkGraph {}))); + batch_processor.program_cache.write().unwrap().fork_graph = + Some(Arc::new(RwLock::new(TestForkGraph {}))); let key1 = Pubkey::new_unique(); let key2 = Pubkey::new_unique(); let owner = Pubkey::new_unique(); diff --git a/svm/tests/integration_test.rs b/svm/tests/integration_test.rs index 700b9c2f6a0ad1..45409a3b146848 100644 --- a/svm/tests/integration_test.rs +++ b/svm/tests/integration_test.rs @@ -7,7 +7,7 @@ use { compute_budget::ComputeBudget, invoke_context::InvokeContext, loaded_programs::{ - BlockRelation, ForkGraph, LoadedProgram, LoadedPrograms, ProgramRuntimeEnvironments, + BlockRelation, ForkGraph, LoadedProgram, ProgramCache, ProgramRuntimeEnvironments, }, runtime_config::RuntimeConfig, solana_rbpf::{ @@ -113,8 +113,8 @@ fn create_custom_environment<'a>() -> BuiltinProgram> { fn create_executable_environment( mock_bank: &mut MockBankCallback, -) -> (LoadedPrograms, Vec) { - let mut programs_cache = LoadedPrograms::::new(0, 20); +) -> (ProgramCache, Vec) { + let mut program_cache = ProgramCache::::new(0, 20); // We must register the bpf loader account as a loadable account, otherwise programs // won't execute. @@ -127,7 +127,7 @@ fn create_executable_environment( .insert(bpf_loader::id(), account_data); // The bpf loader needs an executable as well - programs_cache.assign_program( + program_cache.assign_program( bpf_loader::id(), Arc::new(LoadedProgram::new_builtin( DEPLOYMENT_SLOT, @@ -136,7 +136,7 @@ fn create_executable_environment( )), ); - programs_cache.environments = ProgramRuntimeEnvironments { + program_cache.environments = ProgramRuntimeEnvironments { program_runtime_v1: Arc::new(create_custom_environment()), // We are not using program runtime v2 program_runtime_v2: Arc::new(BuiltinProgram::new_loader( @@ -145,11 +145,11 @@ fn create_executable_environment( )), }; - programs_cache.fork_graph = Some(Arc::new(RwLock::new(MockForkGraph {}))); + program_cache.fork_graph = Some(Arc::new(RwLock::new(MockForkGraph {}))); // Inform SVM of the registered builins let registered_built_ins = vec![bpf_loader::id()]; - (programs_cache, registered_built_ins) + (program_cache, registered_built_ins) } fn prepare_transactions( @@ -224,15 +224,15 @@ fn prepare_transactions( fn svm_integration() { let mut mock_bank = MockBankCallback::default(); let (transactions, mut check_results) = prepare_transactions(&mut mock_bank); - let (programs_cache, builtins) = create_executable_environment(&mut mock_bank); - let programs_cache = Arc::new(RwLock::new(programs_cache)); + let (program_cache, builtins) = create_executable_environment(&mut mock_bank); + let program_cache = Arc::new(RwLock::new(program_cache)); let batch_processor = TransactionBatchProcessor::::new( EXECUTION_SLOT, EXECUTION_EPOCH, EpochSchedule::default(), FeeStructure::default(), Arc::new(RuntimeConfig::default()), - programs_cache.clone(), + program_cache.clone(), ); let mut error_counter = TransactionErrorMetrics::default(); From 973d05c098b24364ad29a5066e78884c5c3a7537 Mon Sep 17 00:00:00 2001 From: steviez Date: Wed, 20 Mar 2024 15:07:04 -0500 Subject: [PATCH 25/28] Allow configuration of replay thread pools from CLI (#236) Bubble up the constants to the CLI that control the sizes of the following two thread pools: - The thread pool used to replay multiple forks in parallel - The thread pool used to execute transactions in parallel --- core/src/replay_stage.rs | 22 ++--- core/src/tvu.rs | 22 ++++- core/src/validator.rs | 14 ++- local-cluster/src/validator_configs.rs | 3 +- validator/src/cli.rs | 19 ++-- validator/src/cli/thread_args.rs | 115 +++++++++++++++++++++++++ validator/src/main.rs | 8 +- 7 files changed, 179 insertions(+), 24 deletions(-) create mode 100644 validator/src/cli/thread_args.rs diff --git a/core/src/replay_stage.rs b/core/src/replay_stage.rs index 8a29d037dedf3c..48641297f63fcc 100644 --- a/core/src/replay_stage.rs +++ b/core/src/replay_stage.rs @@ -51,7 +51,6 @@ use { solana_measure::measure::Measure, solana_poh::poh_recorder::{PohLeaderStatus, PohRecorder, GRACE_TICKS_FACTOR, MAX_GRACE_SLOTS}, solana_program_runtime::timings::ExecuteTimings, - solana_rayon_threadlimit::get_max_thread_count, solana_rpc::{ optimistically_confirmed_bank_tracker::{BankNotification, BankNotificationSenderConfig}, rpc_subscriptions::RpcSubscriptions, @@ -80,6 +79,7 @@ use { solana_vote_program::vote_state::VoteTransaction, std::{ collections::{HashMap, HashSet}, + num::NonZeroUsize, result, sync::{ atomic::{AtomicBool, AtomicU64, Ordering}, @@ -95,11 +95,9 @@ pub const SUPERMINORITY_THRESHOLD: f64 = 1f64 / 3f64; pub const MAX_UNCONFIRMED_SLOTS: usize = 5; pub const DUPLICATE_LIVENESS_THRESHOLD: f64 = 0.1; pub const DUPLICATE_THRESHOLD: f64 = 1.0 - SWITCH_FORK_THRESHOLD - DUPLICATE_LIVENESS_THRESHOLD; + const MAX_VOTE_SIGNATURES: usize = 200; const MAX_VOTE_REFRESH_INTERVAL_MILLIS: usize = 5000; -// Expect this number to be small enough to minimize thread pool overhead while large enough -// to be able to replay all active forks at the same time in most cases. -const MAX_CONCURRENT_FORKS_TO_REPLAY: usize = 4; const MAX_REPAIR_RETRY_LOOP_ATTEMPTS: usize = 10; #[derive(PartialEq, Eq, Debug)] @@ -291,7 +289,8 @@ pub struct ReplayStageConfig { // Stops voting until this slot has been reached. Should be used to avoid // duplicate voting which can lead to slashing. pub wait_to_vote_slot: Option, - pub replay_slots_concurrently: bool, + pub replay_forks_threads: NonZeroUsize, + pub replay_transactions_threads: NonZeroUsize, } /// Timing information for the ReplayStage main processing loop @@ -574,7 +573,8 @@ impl ReplayStage { ancestor_hashes_replay_update_sender, tower_storage, wait_to_vote_slot, - replay_slots_concurrently, + replay_forks_threads, + replay_transactions_threads, } = config; trace!("replay stage"); @@ -654,19 +654,19 @@ impl ReplayStage { ) }; // Thread pool to (maybe) replay multiple threads in parallel - let replay_mode = if replay_slots_concurrently { + let replay_mode = if replay_forks_threads.get() == 1 { + ForkReplayMode::Serial + } else { let pool = rayon::ThreadPoolBuilder::new() - .num_threads(MAX_CONCURRENT_FORKS_TO_REPLAY) + .num_threads(replay_forks_threads.get()) .thread_name(|i| format!("solReplayFork{i:02}")) .build() .expect("new rayon threadpool"); ForkReplayMode::Parallel(pool) - } else { - ForkReplayMode::Serial }; // Thread pool to replay multiple transactions within one block in parallel let replay_tx_thread_pool = rayon::ThreadPoolBuilder::new() - .num_threads(get_max_thread_count()) + .num_threads(replay_transactions_threads.get()) .thread_name(|i| format!("solReplayTx{i:02}")) .build() .expect("new rayon threadpool"); diff --git a/core/src/tvu.rs b/core/src/tvu.rs index 47bc9a7905da5f..2e64fe0675891b 100644 --- a/core/src/tvu.rs +++ b/core/src/tvu.rs @@ -53,6 +53,7 @@ use { std::{ collections::HashSet, net::{SocketAddr, UdpSocket}, + num::NonZeroUsize, sync::{atomic::AtomicBool, Arc, RwLock}, thread::{self, JoinHandle}, }, @@ -81,7 +82,6 @@ pub struct TvuSockets { pub ancestor_hashes_requests: UdpSocket, } -#[derive(Default)] pub struct TvuConfig { pub max_ledger_shreds: Option, pub shred_version: u16, @@ -90,7 +90,22 @@ pub struct TvuConfig { // Validators which should be given priority when serving repairs pub repair_whitelist: Arc>>, pub wait_for_vote_to_start_leader: bool, - pub replay_slots_concurrently: bool, + pub replay_forks_threads: NonZeroUsize, + pub replay_transactions_threads: NonZeroUsize, +} + +impl Default for TvuConfig { + fn default() -> Self { + Self { + max_ledger_shreds: None, + shred_version: 0, + repair_validators: None, + repair_whitelist: Arc::new(RwLock::new(HashSet::default())), + wait_for_vote_to_start_leader: false, + replay_forks_threads: NonZeroUsize::new(1).expect("1 is non-zero"), + replay_transactions_threads: NonZeroUsize::new(1).expect("1 is non-zero"), + } + } } impl Tvu { @@ -265,7 +280,8 @@ impl Tvu { ancestor_hashes_replay_update_sender, tower_storage: tower_storage.clone(), wait_to_vote_slot, - replay_slots_concurrently: tvu_config.replay_slots_concurrently, + replay_forks_threads: tvu_config.replay_forks_threads, + replay_transactions_threads: tvu_config.replay_transactions_threads, }; let (voting_sender, voting_receiver) = unbounded(); diff --git a/core/src/validator.rs b/core/src/validator.rs index 3d2a93daecba2f..98a267aeafc71a 100644 --- a/core/src/validator.rs +++ b/core/src/validator.rs @@ -74,6 +74,7 @@ use { poh_service::{self, PohService}, }, solana_program_runtime::runtime_config::RuntimeConfig, + solana_rayon_threadlimit::get_max_thread_count, solana_rpc::{ max_slots::MaxSlots, optimistically_confirmed_bank_tracker::{ @@ -123,6 +124,7 @@ use { std::{ collections::{HashMap, HashSet}, net::SocketAddr, + num::NonZeroUsize, path::{Path, PathBuf}, sync::{ atomic::{AtomicBool, AtomicU64, Ordering}, @@ -260,7 +262,6 @@ pub struct ValidatorConfig { pub wait_to_vote_slot: Option, pub ledger_column_options: LedgerColumnOptions, pub runtime_config: RuntimeConfig, - pub replay_slots_concurrently: bool, pub banking_trace_dir_byte_limit: banking_trace::DirByteLimit, pub block_verification_method: BlockVerificationMethod, pub block_production_method: BlockProductionMethod, @@ -268,6 +269,8 @@ pub struct ValidatorConfig { pub use_snapshot_archives_at_startup: UseSnapshotArchivesAtStartup, pub wen_restart_proto_path: Option, pub unified_scheduler_handler_threads: Option, + pub replay_forks_threads: NonZeroUsize, + pub replay_transactions_threads: NonZeroUsize, } impl Default for ValidatorConfig { @@ -328,7 +331,6 @@ impl Default for ValidatorConfig { wait_to_vote_slot: None, ledger_column_options: LedgerColumnOptions::default(), runtime_config: RuntimeConfig::default(), - replay_slots_concurrently: false, banking_trace_dir_byte_limit: 0, block_verification_method: BlockVerificationMethod::default(), block_production_method: BlockProductionMethod::default(), @@ -336,6 +338,8 @@ impl Default for ValidatorConfig { use_snapshot_archives_at_startup: UseSnapshotArchivesAtStartup::default(), wen_restart_proto_path: None, unified_scheduler_handler_threads: None, + replay_forks_threads: NonZeroUsize::new(1).expect("1 is non-zero"), + replay_transactions_threads: NonZeroUsize::new(1).expect("1 is non-zero"), } } } @@ -346,6 +350,9 @@ impl ValidatorConfig { enforce_ulimit_nofile: false, rpc_config: JsonRpcConfig::default_for_test(), block_production_method: BlockProductionMethod::ThreadLocalMultiIterator, + replay_forks_threads: NonZeroUsize::new(1).expect("1 is non-zero"), + replay_transactions_threads: NonZeroUsize::new(get_max_thread_count()) + .expect("thread count is non-zero"), ..Self::default() } } @@ -1305,7 +1312,8 @@ impl Validator { repair_validators: config.repair_validators.clone(), repair_whitelist: config.repair_whitelist.clone(), wait_for_vote_to_start_leader, - replay_slots_concurrently: config.replay_slots_concurrently, + replay_forks_threads: config.replay_forks_threads, + replay_transactions_threads: config.replay_transactions_threads, }, &max_slots, block_metadata_notifier, diff --git a/local-cluster/src/validator_configs.rs b/local-cluster/src/validator_configs.rs index 33883bb02c1d77..45045203412a73 100644 --- a/local-cluster/src/validator_configs.rs +++ b/local-cluster/src/validator_configs.rs @@ -61,7 +61,6 @@ pub fn safe_clone_config(config: &ValidatorConfig) -> ValidatorConfig { wait_to_vote_slot: config.wait_to_vote_slot, ledger_column_options: config.ledger_column_options.clone(), runtime_config: config.runtime_config.clone(), - replay_slots_concurrently: config.replay_slots_concurrently, banking_trace_dir_byte_limit: config.banking_trace_dir_byte_limit, block_verification_method: config.block_verification_method.clone(), block_production_method: config.block_production_method.clone(), @@ -69,6 +68,8 @@ pub fn safe_clone_config(config: &ValidatorConfig) -> ValidatorConfig { use_snapshot_archives_at_startup: config.use_snapshot_archives_at_startup, wen_restart_proto_path: config.wen_restart_proto_path.clone(), unified_scheduler_handler_threads: config.unified_scheduler_handler_threads, + replay_forks_threads: config.replay_forks_threads, + replay_transactions_threads: config.replay_transactions_threads, } } diff --git a/validator/src/cli.rs b/validator/src/cli.rs index f127273c8da2f3..8cae6667f87a34 100644 --- a/validator/src/cli.rs +++ b/validator/src/cli.rs @@ -52,6 +52,9 @@ use { std::{path::PathBuf, str::FromStr}, }; +pub mod thread_args; +use thread_args::{thread_args, DefaultThreadArgs}; + const EXCLUDE_KEY: &str = "account-index-exclude-key"; const INCLUDE_KEY: &str = "account-index-include-key"; // The default minimal snapshot download speed (bytes/second) @@ -1466,11 +1469,6 @@ pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { .value_name("BYTES") .help("Maximum number of bytes written to the program log before truncation"), ) - .arg( - Arg::with_name("replay_slots_concurrently") - .long("replay-slots-concurrently") - .help("Allow concurrent replay of slots on different forks"), - ) .arg( Arg::with_name("banking_trace_dir_byte_limit") // expose friendly alternative name to cli than internal @@ -1555,6 +1553,7 @@ pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { ", ), ) + .args(&thread_args(&default_args.thread_args)) .args(&get_deprecated_arguments()) .after_help("The default subcommand is run") .subcommand( @@ -2073,6 +2072,13 @@ fn deprecated_arguments() -> Vec { .long("no-rocksdb-compaction") .takes_value(false) .help("Disable manual compaction of the ledger database")); + add_arg!( + Arg::with_name("replay_slots_concurrently") + .long("replay-slots-concurrently") + .help("Allow concurrent replay of slots on different forks") + .conflicts_with("replay_forks_threads"), + replaced_by: "replay_forks_threads", + usage_warning: "Equivalent behavior to this flag would be --replay-forks-threads 4"); add_arg!(Arg::with_name("rocksdb_compaction_interval") .long("rocksdb-compaction-interval-slots") .value_name("ROCKSDB_COMPACTION_INTERVAL_SLOTS") @@ -2195,6 +2201,8 @@ pub struct DefaultArgs { pub banking_trace_dir_byte_limit: String, pub wen_restart_path: String, + + pub thread_args: DefaultThreadArgs, } impl DefaultArgs { @@ -2277,6 +2285,7 @@ impl DefaultArgs { wait_for_restart_window_max_delinquent_stake: "5".to_string(), banking_trace_dir_byte_limit: BANKING_TRACE_DIR_DEFAULT_BYTE_LIMIT.to_string(), wen_restart_path: "wen_restart_progress.proto".to_string(), + thread_args: DefaultThreadArgs::default(), } } } diff --git a/validator/src/cli/thread_args.rs b/validator/src/cli/thread_args.rs new file mode 100644 index 00000000000000..53d8cf15d984a0 --- /dev/null +++ b/validator/src/cli/thread_args.rs @@ -0,0 +1,115 @@ +//! Arguments for controlling the number of threads allocated for various tasks + +use { + clap::{value_t_or_exit, Arg, ArgMatches}, + solana_clap_utils::{hidden_unless_forced, input_validators::is_within_range}, + solana_rayon_threadlimit::get_max_thread_count, + std::{num::NonZeroUsize, ops::RangeInclusive}, +}; + +// Need this struct to provide &str whose lifetime matches that of the CLAP Arg's +pub struct DefaultThreadArgs { + pub replay_forks_threads: String, + pub replay_transactions_threads: String, +} + +impl Default for DefaultThreadArgs { + fn default() -> Self { + Self { + replay_forks_threads: ReplayForksThreadsArg::default().to_string(), + replay_transactions_threads: ReplayTransactionsThreadsArg::default().to_string(), + } + } +} + +pub fn thread_args<'a>(defaults: &DefaultThreadArgs) -> Vec> { + vec![ + new_thread_arg::(&defaults.replay_forks_threads), + new_thread_arg::(&defaults.replay_transactions_threads), + ] +} + +fn new_thread_arg<'a, T: ThreadArg>(default: &str) -> Arg<'_, 'a> { + Arg::with_name(T::NAME) + .long(T::LONG_NAME) + .takes_value(true) + .value_name("NUMBER") + .default_value(default) + .validator(|num| is_within_range(num, T::range())) + .hidden(hidden_unless_forced()) + .help(T::HELP) +} + +pub struct NumThreadConfig { + pub replay_forks_threads: NonZeroUsize, + pub replay_transactions_threads: NonZeroUsize, +} + +pub fn parse_num_threads_args(matches: &ArgMatches) -> NumThreadConfig { + NumThreadConfig { + replay_forks_threads: if matches.is_present("replay_slots_concurrently") { + NonZeroUsize::new(4).expect("4 is non-zero") + } else { + value_t_or_exit!(matches, ReplayForksThreadsArg::NAME, NonZeroUsize) + }, + replay_transactions_threads: value_t_or_exit!( + matches, + ReplayTransactionsThreadsArg::NAME, + NonZeroUsize + ), + } +} + +/// Configuration for CLAP arguments that control the number of threads for various functions +trait ThreadArg { + /// The argument's name + const NAME: &'static str; + /// The argument's long name + const LONG_NAME: &'static str; + /// The argument's help message + const HELP: &'static str; + + /// The default number of threads + fn default() -> usize; + /// The minimum allowed number of threads (inclusive) + fn min() -> usize { + 1 + } + /// The maximum allowed number of threads (inclusive) + fn max() -> usize { + // By default, no thread pool should scale over the number of the machine's threads + get_max_thread_count() + } + /// The range of allowed number of threads (inclusive on both ends) + fn range() -> RangeInclusive { + RangeInclusive::new(Self::min(), Self::max()) + } +} + +struct ReplayForksThreadsArg; +impl ThreadArg for ReplayForksThreadsArg { + const NAME: &'static str = "replay_forks_threads"; + const LONG_NAME: &'static str = "replay-forks-threads"; + const HELP: &'static str = "Number of threads to use for replay of blocks on different forks"; + + fn default() -> usize { + // Default to single threaded fork execution + 1 + } + fn max() -> usize { + // Choose a value that is small enough to limit the overhead of having a large thread pool + // while also being large enough to allow replay of all active forks in most scenarios + 4 + } +} + +struct ReplayTransactionsThreadsArg; +impl ThreadArg for ReplayTransactionsThreadsArg { + const NAME: &'static str = "replay_transactions_threads"; + const LONG_NAME: &'static str = "replay-transactions-threads"; + const HELP: &'static str = "Number of threads to use for transaction replay"; + + fn default() -> usize { + get_max_thread_count() + } +} diff --git a/validator/src/main.rs b/validator/src/main.rs index 545ecfda481d35..cdd631446d68c5 100644 --- a/validator/src/main.rs +++ b/validator/src/main.rs @@ -1331,6 +1331,11 @@ pub fn main() { let full_api = matches.is_present("full_rpc_api"); + let cli::thread_args::NumThreadConfig { + replay_forks_threads, + replay_transactions_threads, + } = cli::thread_args::parse_num_threads_args(&matches); + let mut validator_config = ValidatorConfig { require_tower: matches.is_present("require_tower"), tower_storage, @@ -1464,12 +1469,13 @@ pub fn main() { ..RuntimeConfig::default() }, staked_nodes_overrides: staked_nodes_overrides.clone(), - replay_slots_concurrently: matches.is_present("replay_slots_concurrently"), use_snapshot_archives_at_startup: value_t_or_exit!( matches, use_snapshot_archives_at_startup::cli::NAME, UseSnapshotArchivesAtStartup ), + replay_forks_threads, + replay_transactions_threads, ..ValidatorConfig::default() }; From 27eff8408b7223bb3c4ab70523f8a8dca3ca6645 Mon Sep 17 00:00:00 2001 From: "GHA: Update Upstream From Fork" Date: Fri, 22 Mar 2024 15:58:10 -0500 Subject: [PATCH 26/28] Revert "Allow configuration of replay thread pools from CLI (#236)" This reverts commit 973d05c098b24364ad29a5066e78884c5c3a7537. --- core/src/replay_stage.rs | 22 ++--- core/src/tvu.rs | 22 +---- core/src/validator.rs | 14 +-- local-cluster/src/validator_configs.rs | 3 +- validator/src/cli.rs | 19 ++-- validator/src/cli/thread_args.rs | 115 ------------------------- validator/src/main.rs | 8 +- 7 files changed, 24 insertions(+), 179 deletions(-) delete mode 100644 validator/src/cli/thread_args.rs diff --git a/core/src/replay_stage.rs b/core/src/replay_stage.rs index 48641297f63fcc..8a29d037dedf3c 100644 --- a/core/src/replay_stage.rs +++ b/core/src/replay_stage.rs @@ -51,6 +51,7 @@ use { solana_measure::measure::Measure, solana_poh::poh_recorder::{PohLeaderStatus, PohRecorder, GRACE_TICKS_FACTOR, MAX_GRACE_SLOTS}, solana_program_runtime::timings::ExecuteTimings, + solana_rayon_threadlimit::get_max_thread_count, solana_rpc::{ optimistically_confirmed_bank_tracker::{BankNotification, BankNotificationSenderConfig}, rpc_subscriptions::RpcSubscriptions, @@ -79,7 +80,6 @@ use { solana_vote_program::vote_state::VoteTransaction, std::{ collections::{HashMap, HashSet}, - num::NonZeroUsize, result, sync::{ atomic::{AtomicBool, AtomicU64, Ordering}, @@ -95,9 +95,11 @@ pub const SUPERMINORITY_THRESHOLD: f64 = 1f64 / 3f64; pub const MAX_UNCONFIRMED_SLOTS: usize = 5; pub const DUPLICATE_LIVENESS_THRESHOLD: f64 = 0.1; pub const DUPLICATE_THRESHOLD: f64 = 1.0 - SWITCH_FORK_THRESHOLD - DUPLICATE_LIVENESS_THRESHOLD; - const MAX_VOTE_SIGNATURES: usize = 200; const MAX_VOTE_REFRESH_INTERVAL_MILLIS: usize = 5000; +// Expect this number to be small enough to minimize thread pool overhead while large enough +// to be able to replay all active forks at the same time in most cases. +const MAX_CONCURRENT_FORKS_TO_REPLAY: usize = 4; const MAX_REPAIR_RETRY_LOOP_ATTEMPTS: usize = 10; #[derive(PartialEq, Eq, Debug)] @@ -289,8 +291,7 @@ pub struct ReplayStageConfig { // Stops voting until this slot has been reached. Should be used to avoid // duplicate voting which can lead to slashing. pub wait_to_vote_slot: Option, - pub replay_forks_threads: NonZeroUsize, - pub replay_transactions_threads: NonZeroUsize, + pub replay_slots_concurrently: bool, } /// Timing information for the ReplayStage main processing loop @@ -573,8 +574,7 @@ impl ReplayStage { ancestor_hashes_replay_update_sender, tower_storage, wait_to_vote_slot, - replay_forks_threads, - replay_transactions_threads, + replay_slots_concurrently, } = config; trace!("replay stage"); @@ -654,19 +654,19 @@ impl ReplayStage { ) }; // Thread pool to (maybe) replay multiple threads in parallel - let replay_mode = if replay_forks_threads.get() == 1 { - ForkReplayMode::Serial - } else { + let replay_mode = if replay_slots_concurrently { let pool = rayon::ThreadPoolBuilder::new() - .num_threads(replay_forks_threads.get()) + .num_threads(MAX_CONCURRENT_FORKS_TO_REPLAY) .thread_name(|i| format!("solReplayFork{i:02}")) .build() .expect("new rayon threadpool"); ForkReplayMode::Parallel(pool) + } else { + ForkReplayMode::Serial }; // Thread pool to replay multiple transactions within one block in parallel let replay_tx_thread_pool = rayon::ThreadPoolBuilder::new() - .num_threads(replay_transactions_threads.get()) + .num_threads(get_max_thread_count()) .thread_name(|i| format!("solReplayTx{i:02}")) .build() .expect("new rayon threadpool"); diff --git a/core/src/tvu.rs b/core/src/tvu.rs index 2e64fe0675891b..47bc9a7905da5f 100644 --- a/core/src/tvu.rs +++ b/core/src/tvu.rs @@ -53,7 +53,6 @@ use { std::{ collections::HashSet, net::{SocketAddr, UdpSocket}, - num::NonZeroUsize, sync::{atomic::AtomicBool, Arc, RwLock}, thread::{self, JoinHandle}, }, @@ -82,6 +81,7 @@ pub struct TvuSockets { pub ancestor_hashes_requests: UdpSocket, } +#[derive(Default)] pub struct TvuConfig { pub max_ledger_shreds: Option, pub shred_version: u16, @@ -90,22 +90,7 @@ pub struct TvuConfig { // Validators which should be given priority when serving repairs pub repair_whitelist: Arc>>, pub wait_for_vote_to_start_leader: bool, - pub replay_forks_threads: NonZeroUsize, - pub replay_transactions_threads: NonZeroUsize, -} - -impl Default for TvuConfig { - fn default() -> Self { - Self { - max_ledger_shreds: None, - shred_version: 0, - repair_validators: None, - repair_whitelist: Arc::new(RwLock::new(HashSet::default())), - wait_for_vote_to_start_leader: false, - replay_forks_threads: NonZeroUsize::new(1).expect("1 is non-zero"), - replay_transactions_threads: NonZeroUsize::new(1).expect("1 is non-zero"), - } - } + pub replay_slots_concurrently: bool, } impl Tvu { @@ -280,8 +265,7 @@ impl Tvu { ancestor_hashes_replay_update_sender, tower_storage: tower_storage.clone(), wait_to_vote_slot, - replay_forks_threads: tvu_config.replay_forks_threads, - replay_transactions_threads: tvu_config.replay_transactions_threads, + replay_slots_concurrently: tvu_config.replay_slots_concurrently, }; let (voting_sender, voting_receiver) = unbounded(); diff --git a/core/src/validator.rs b/core/src/validator.rs index 98a267aeafc71a..3d2a93daecba2f 100644 --- a/core/src/validator.rs +++ b/core/src/validator.rs @@ -74,7 +74,6 @@ use { poh_service::{self, PohService}, }, solana_program_runtime::runtime_config::RuntimeConfig, - solana_rayon_threadlimit::get_max_thread_count, solana_rpc::{ max_slots::MaxSlots, optimistically_confirmed_bank_tracker::{ @@ -124,7 +123,6 @@ use { std::{ collections::{HashMap, HashSet}, net::SocketAddr, - num::NonZeroUsize, path::{Path, PathBuf}, sync::{ atomic::{AtomicBool, AtomicU64, Ordering}, @@ -262,6 +260,7 @@ pub struct ValidatorConfig { pub wait_to_vote_slot: Option, pub ledger_column_options: LedgerColumnOptions, pub runtime_config: RuntimeConfig, + pub replay_slots_concurrently: bool, pub banking_trace_dir_byte_limit: banking_trace::DirByteLimit, pub block_verification_method: BlockVerificationMethod, pub block_production_method: BlockProductionMethod, @@ -269,8 +268,6 @@ pub struct ValidatorConfig { pub use_snapshot_archives_at_startup: UseSnapshotArchivesAtStartup, pub wen_restart_proto_path: Option, pub unified_scheduler_handler_threads: Option, - pub replay_forks_threads: NonZeroUsize, - pub replay_transactions_threads: NonZeroUsize, } impl Default for ValidatorConfig { @@ -331,6 +328,7 @@ impl Default for ValidatorConfig { wait_to_vote_slot: None, ledger_column_options: LedgerColumnOptions::default(), runtime_config: RuntimeConfig::default(), + replay_slots_concurrently: false, banking_trace_dir_byte_limit: 0, block_verification_method: BlockVerificationMethod::default(), block_production_method: BlockProductionMethod::default(), @@ -338,8 +336,6 @@ impl Default for ValidatorConfig { use_snapshot_archives_at_startup: UseSnapshotArchivesAtStartup::default(), wen_restart_proto_path: None, unified_scheduler_handler_threads: None, - replay_forks_threads: NonZeroUsize::new(1).expect("1 is non-zero"), - replay_transactions_threads: NonZeroUsize::new(1).expect("1 is non-zero"), } } } @@ -350,9 +346,6 @@ impl ValidatorConfig { enforce_ulimit_nofile: false, rpc_config: JsonRpcConfig::default_for_test(), block_production_method: BlockProductionMethod::ThreadLocalMultiIterator, - replay_forks_threads: NonZeroUsize::new(1).expect("1 is non-zero"), - replay_transactions_threads: NonZeroUsize::new(get_max_thread_count()) - .expect("thread count is non-zero"), ..Self::default() } } @@ -1312,8 +1305,7 @@ impl Validator { repair_validators: config.repair_validators.clone(), repair_whitelist: config.repair_whitelist.clone(), wait_for_vote_to_start_leader, - replay_forks_threads: config.replay_forks_threads, - replay_transactions_threads: config.replay_transactions_threads, + replay_slots_concurrently: config.replay_slots_concurrently, }, &max_slots, block_metadata_notifier, diff --git a/local-cluster/src/validator_configs.rs b/local-cluster/src/validator_configs.rs index 45045203412a73..33883bb02c1d77 100644 --- a/local-cluster/src/validator_configs.rs +++ b/local-cluster/src/validator_configs.rs @@ -61,6 +61,7 @@ pub fn safe_clone_config(config: &ValidatorConfig) -> ValidatorConfig { wait_to_vote_slot: config.wait_to_vote_slot, ledger_column_options: config.ledger_column_options.clone(), runtime_config: config.runtime_config.clone(), + replay_slots_concurrently: config.replay_slots_concurrently, banking_trace_dir_byte_limit: config.banking_trace_dir_byte_limit, block_verification_method: config.block_verification_method.clone(), block_production_method: config.block_production_method.clone(), @@ -68,8 +69,6 @@ pub fn safe_clone_config(config: &ValidatorConfig) -> ValidatorConfig { use_snapshot_archives_at_startup: config.use_snapshot_archives_at_startup, wen_restart_proto_path: config.wen_restart_proto_path.clone(), unified_scheduler_handler_threads: config.unified_scheduler_handler_threads, - replay_forks_threads: config.replay_forks_threads, - replay_transactions_threads: config.replay_transactions_threads, } } diff --git a/validator/src/cli.rs b/validator/src/cli.rs index 8cae6667f87a34..f127273c8da2f3 100644 --- a/validator/src/cli.rs +++ b/validator/src/cli.rs @@ -52,9 +52,6 @@ use { std::{path::PathBuf, str::FromStr}, }; -pub mod thread_args; -use thread_args::{thread_args, DefaultThreadArgs}; - const EXCLUDE_KEY: &str = "account-index-exclude-key"; const INCLUDE_KEY: &str = "account-index-include-key"; // The default minimal snapshot download speed (bytes/second) @@ -1469,6 +1466,11 @@ pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { .value_name("BYTES") .help("Maximum number of bytes written to the program log before truncation"), ) + .arg( + Arg::with_name("replay_slots_concurrently") + .long("replay-slots-concurrently") + .help("Allow concurrent replay of slots on different forks"), + ) .arg( Arg::with_name("banking_trace_dir_byte_limit") // expose friendly alternative name to cli than internal @@ -1553,7 +1555,6 @@ pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { ", ), ) - .args(&thread_args(&default_args.thread_args)) .args(&get_deprecated_arguments()) .after_help("The default subcommand is run") .subcommand( @@ -2072,13 +2073,6 @@ fn deprecated_arguments() -> Vec { .long("no-rocksdb-compaction") .takes_value(false) .help("Disable manual compaction of the ledger database")); - add_arg!( - Arg::with_name("replay_slots_concurrently") - .long("replay-slots-concurrently") - .help("Allow concurrent replay of slots on different forks") - .conflicts_with("replay_forks_threads"), - replaced_by: "replay_forks_threads", - usage_warning: "Equivalent behavior to this flag would be --replay-forks-threads 4"); add_arg!(Arg::with_name("rocksdb_compaction_interval") .long("rocksdb-compaction-interval-slots") .value_name("ROCKSDB_COMPACTION_INTERVAL_SLOTS") @@ -2201,8 +2195,6 @@ pub struct DefaultArgs { pub banking_trace_dir_byte_limit: String, pub wen_restart_path: String, - - pub thread_args: DefaultThreadArgs, } impl DefaultArgs { @@ -2285,7 +2277,6 @@ impl DefaultArgs { wait_for_restart_window_max_delinquent_stake: "5".to_string(), banking_trace_dir_byte_limit: BANKING_TRACE_DIR_DEFAULT_BYTE_LIMIT.to_string(), wen_restart_path: "wen_restart_progress.proto".to_string(), - thread_args: DefaultThreadArgs::default(), } } } diff --git a/validator/src/cli/thread_args.rs b/validator/src/cli/thread_args.rs deleted file mode 100644 index 53d8cf15d984a0..00000000000000 --- a/validator/src/cli/thread_args.rs +++ /dev/null @@ -1,115 +0,0 @@ -//! Arguments for controlling the number of threads allocated for various tasks - -use { - clap::{value_t_or_exit, Arg, ArgMatches}, - solana_clap_utils::{hidden_unless_forced, input_validators::is_within_range}, - solana_rayon_threadlimit::get_max_thread_count, - std::{num::NonZeroUsize, ops::RangeInclusive}, -}; - -// Need this struct to provide &str whose lifetime matches that of the CLAP Arg's -pub struct DefaultThreadArgs { - pub replay_forks_threads: String, - pub replay_transactions_threads: String, -} - -impl Default for DefaultThreadArgs { - fn default() -> Self { - Self { - replay_forks_threads: ReplayForksThreadsArg::default().to_string(), - replay_transactions_threads: ReplayTransactionsThreadsArg::default().to_string(), - } - } -} - -pub fn thread_args<'a>(defaults: &DefaultThreadArgs) -> Vec> { - vec![ - new_thread_arg::(&defaults.replay_forks_threads), - new_thread_arg::(&defaults.replay_transactions_threads), - ] -} - -fn new_thread_arg<'a, T: ThreadArg>(default: &str) -> Arg<'_, 'a> { - Arg::with_name(T::NAME) - .long(T::LONG_NAME) - .takes_value(true) - .value_name("NUMBER") - .default_value(default) - .validator(|num| is_within_range(num, T::range())) - .hidden(hidden_unless_forced()) - .help(T::HELP) -} - -pub struct NumThreadConfig { - pub replay_forks_threads: NonZeroUsize, - pub replay_transactions_threads: NonZeroUsize, -} - -pub fn parse_num_threads_args(matches: &ArgMatches) -> NumThreadConfig { - NumThreadConfig { - replay_forks_threads: if matches.is_present("replay_slots_concurrently") { - NonZeroUsize::new(4).expect("4 is non-zero") - } else { - value_t_or_exit!(matches, ReplayForksThreadsArg::NAME, NonZeroUsize) - }, - replay_transactions_threads: value_t_or_exit!( - matches, - ReplayTransactionsThreadsArg::NAME, - NonZeroUsize - ), - } -} - -/// Configuration for CLAP arguments that control the number of threads for various functions -trait ThreadArg { - /// The argument's name - const NAME: &'static str; - /// The argument's long name - const LONG_NAME: &'static str; - /// The argument's help message - const HELP: &'static str; - - /// The default number of threads - fn default() -> usize; - /// The minimum allowed number of threads (inclusive) - fn min() -> usize { - 1 - } - /// The maximum allowed number of threads (inclusive) - fn max() -> usize { - // By default, no thread pool should scale over the number of the machine's threads - get_max_thread_count() - } - /// The range of allowed number of threads (inclusive on both ends) - fn range() -> RangeInclusive { - RangeInclusive::new(Self::min(), Self::max()) - } -} - -struct ReplayForksThreadsArg; -impl ThreadArg for ReplayForksThreadsArg { - const NAME: &'static str = "replay_forks_threads"; - const LONG_NAME: &'static str = "replay-forks-threads"; - const HELP: &'static str = "Number of threads to use for replay of blocks on different forks"; - - fn default() -> usize { - // Default to single threaded fork execution - 1 - } - fn max() -> usize { - // Choose a value that is small enough to limit the overhead of having a large thread pool - // while also being large enough to allow replay of all active forks in most scenarios - 4 - } -} - -struct ReplayTransactionsThreadsArg; -impl ThreadArg for ReplayTransactionsThreadsArg { - const NAME: &'static str = "replay_transactions_threads"; - const LONG_NAME: &'static str = "replay-transactions-threads"; - const HELP: &'static str = "Number of threads to use for transaction replay"; - - fn default() -> usize { - get_max_thread_count() - } -} diff --git a/validator/src/main.rs b/validator/src/main.rs index cdd631446d68c5..545ecfda481d35 100644 --- a/validator/src/main.rs +++ b/validator/src/main.rs @@ -1331,11 +1331,6 @@ pub fn main() { let full_api = matches.is_present("full_rpc_api"); - let cli::thread_args::NumThreadConfig { - replay_forks_threads, - replay_transactions_threads, - } = cli::thread_args::parse_num_threads_args(&matches); - let mut validator_config = ValidatorConfig { require_tower: matches.is_present("require_tower"), tower_storage, @@ -1469,13 +1464,12 @@ pub fn main() { ..RuntimeConfig::default() }, staked_nodes_overrides: staked_nodes_overrides.clone(), + replay_slots_concurrently: matches.is_present("replay_slots_concurrently"), use_snapshot_archives_at_startup: value_t_or_exit!( matches, use_snapshot_archives_at_startup::cli::NAME, UseSnapshotArchivesAtStartup ), - replay_forks_threads, - replay_transactions_threads, ..ValidatorConfig::default() }; From de3c79828c6ff1f1870b6cdae0192a73dd9c8c6f Mon Sep 17 00:00:00 2001 From: nickfrosty <75431177+nickfrosty@users.noreply.github.com> Date: Wed, 22 Jan 2025 14:37:45 -0500 Subject: [PATCH 27/28] chore: readme --- README.md | 37 +++++++++++++++++++++++++------------ 1 file changed, 25 insertions(+), 12 deletions(-) diff --git a/README.md b/README.md index c6183f6ab6183e..711af659f5abe5 100644 --- a/README.md +++ b/README.md @@ -1,3 +1,12 @@ +# PLEASE READ: This repo no longer contains the SPL program implementations + +This repo still exists in archived form, feel free to fork any reference +implementations it still contains. + +See Agave, the Solana validator implementation from Anza: https://github.com/anza-xyz/agave + +--- +

Solana @@ -26,20 +35,24 @@ $ rustup update ``` When building a specific release branch, you should check the rust version in `ci/rust-version.sh` and if necessary, install that version by running: + ```bash $ rustup install VERSION ``` + Note that if this is not the latest rust version on your machine, cargo commands may require an [override](https://rust-lang.github.io/rustup/overrides.html) in order to use the correct version. On Linux systems you may need to install libssl-dev, pkg-config, zlib1g-dev, protobuf etc. On Ubuntu: + ```bash $ sudo apt-get update $ sudo apt-get install libssl-dev libudev-dev pkg-config zlib1g-dev llvm clang cmake make libprotobuf-dev protobuf-compiler ``` On Fedora: + ```bash $ sudo dnf install openssl-devel systemd-devel pkg-config zlib-devel llvm clang cmake make protobuf-devel protobuf-compiler perl-core ``` @@ -71,8 +84,8 @@ Start your own testnet locally, instructions are in the [online docs](https://do ### Accessing the remote development cluster -* `devnet` - stable public cluster for development accessible via -devnet.solana.com. Runs 24/7. Learn more about the [public clusters](https://docs.solanalabs.com/clusters) +- `devnet` - stable public cluster for development accessible via + devnet.solana.com. Runs 24/7. Learn more about the [public clusters](https://docs.solanalabs.com/clusters) # Benchmarking @@ -103,10 +116,10 @@ $ open target/cov/lcov-local/index.html ``` Why coverage? While most see coverage as a code quality metric, we see it primarily as a developer -productivity metric. When a developer makes a change to the codebase, presumably it's a *solution* to -some problem. Our unit-test suite is how we encode the set of *problems* the codebase solves. Running -the test suite should indicate that your change didn't *infringe* on anyone else's solutions. Adding a -test *protects* your solution from future changes. Say you don't understand why a line of code exists, +productivity metric. When a developer makes a change to the codebase, presumably it's a _solution_ to +some problem. Our unit-test suite is how we encode the set of _problems_ the codebase solves. Running +the test suite should indicate that your change didn't _infringe_ on anyone else's solutions. Adding a +test _protects_ your solution from future changes. Say you don't understand why a line of code exists, try deleting it and running the unit-tests. The nearest test failure should tell you what problem was solved by that code. If no test fails, go ahead and submit a Pull Request that asks, "what problem is solved by this code?" On the other hand, if a test does fail and you can think of a @@ -138,10 +151,10 @@ reader is or is working on behalf of a Specially Designated National (SDN) or a person subject to similar blocking or denied party prohibitions. -The reader should be aware that U.S. export control and sanctions laws prohibit -U.S. persons (and other persons that are subject to such laws) from transacting -with persons in certain countries and territories or that are on the SDN list. -Accordingly, there is a risk to individuals that other persons using any of the -code contained in this repo, or a derivation thereof, may be sanctioned persons -and that transactions with such persons would be a violation of U.S. export +The reader should be aware that U.S. export control and sanctions laws prohibit +U.S. persons (and other persons that are subject to such laws) from transacting +with persons in certain countries and territories or that are on the SDN list. +Accordingly, there is a risk to individuals that other persons using any of the +code contained in this repo, or a derivation thereof, may be sanctioned persons +and that transactions with such persons would be a violation of U.S. export controls and sanctions law. From 7700cb3128c1f19820de67b81aa45d18f73d2ac0 Mon Sep 17 00:00:00 2001 From: nickfrosty <75431177+nickfrosty@users.noreply.github.com> Date: Wed, 22 Jan 2025 14:41:49 -0500 Subject: [PATCH 28/28] chore: readme --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 711af659f5abe5..b273ad8f085850 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,4 @@ -# PLEASE READ: This repo no longer contains the SPL program implementations +# PLEASE READ: This repo is now a public archive This repo still exists in archived form, feel free to fork any reference implementations it still contains.