diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index b827449fcf1765..bbeee4c01bf602 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -87,7 +87,6 @@ use { genesis_config::{ClusterType, GenesisConfig}, hash::Hash, pubkey::Pubkey, - rent::Rent, saturating_add_assign, timing::AtomicInterval, transaction::SanitizedTransaction, @@ -100,7 +99,6 @@ use { io::Result as IoResult, ops::{Range, RangeBounds}, path::{Path, PathBuf}, - str::FromStr, sync::{ atomic::{AtomicBool, AtomicU32, AtomicU64, AtomicUsize, Ordering}, Arc, Condvar, Mutex, RwLock, @@ -475,7 +473,6 @@ pub const ACCOUNTS_DB_CONFIG_FOR_TESTING: AccountsDbConfig = AccountsDbConfig { index: Some(ACCOUNTS_INDEX_CONFIG_FOR_TESTING), base_working_path: None, accounts_hash_cache_path: None, - filler_accounts_config: FillerAccountsConfig::const_default(), write_cache_limit_bytes: None, ancient_append_vec_offset: None, skip_initial_hash_calc: false, @@ -488,7 +485,6 @@ pub const ACCOUNTS_DB_CONFIG_FOR_BENCHMARKS: AccountsDbConfig = AccountsDbConfig index: Some(ACCOUNTS_INDEX_CONFIG_FOR_BENCHMARKS), base_working_path: None, accounts_hash_cache_path: None, - filler_accounts_config: FillerAccountsConfig::const_default(), write_cache_limit_bytes: None, ancient_append_vec_offset: None, skip_initial_hash_calc: false, @@ -522,26 +518,6 @@ pub struct AccountsAddRootTiming { pub store_us: u64, } -#[derive(Debug, Clone, Copy)] -pub struct FillerAccountsConfig { - /// Number of filler accounts - pub count: usize, - /// Data size per account, in bytes - pub size: usize, -} - -impl FillerAccountsConfig { - pub const fn const_default() -> Self { - Self { count: 0, size: 0 } - } -} - -impl Default for FillerAccountsConfig { - fn default() -> Self { - Self::const_default() - } -} - const ANCIENT_APPEND_VEC_DEFAULT_OFFSET: Option = Some(-10_000); #[derive(Debug, Default, Clone)] @@ -550,7 +526,6 @@ pub struct AccountsDbConfig { /// Base directory for various necessary files pub base_working_path: Option, pub accounts_hash_cache_path: Option, - pub filler_accounts_config: FillerAccountsConfig, pub write_cache_limit_bytes: Option, /// if None, ancient append vecs are set to ANCIENT_APPEND_VEC_DEFAULT_OFFSET /// Some(offset) means include slots up to (max_slot - (slots_per_epoch - 'offset')) @@ -1537,17 +1512,8 @@ pub struct AccountsDb { /// GeyserPlugin accounts update notifier accounts_update_notifier: Option, - filler_accounts_config: FillerAccountsConfig, - pub filler_account_suffix: Option, - pub(crate) active_stats: ActiveStats, - /// number of filler accounts to add for each slot - pub filler_accounts_per_slot: AtomicU64, - - /// number of slots remaining where filler accounts should be added - pub filler_account_slots_remaining: AtomicU64, - pub verify_accounts_hash_in_bg: VerifyAccountsHashInBackground, /// Used to disable logging dead slots during removal. @@ -2385,7 +2351,6 @@ struct ScanState<'a> { bin_range: &'a Range, config: &'a CalcAccountsHashConfig<'a>, mismatch_found: Arc, - filler_account_suffix: Option<&'a Pubkey>, range: usize, sort_time: Arc, pubkey_to_bin_index: usize, @@ -2415,9 +2380,7 @@ impl<'a> AppendVecScan for ScanState<'a> { let mut loaded_hash = loaded_account.loaded_hash(); let hash_is_missing = loaded_hash == AccountHash(Hash::default()); - if (self.config.check_hash || hash_is_missing) - && !AccountsDb::is_filler_account_helper(pubkey, self.filler_account_suffix) - { + if self.config.check_hash || hash_is_missing { let computed_hash = loaded_account.compute_hash(pubkey); if hash_is_missing { loaded_hash = computed_hash; @@ -2498,8 +2461,6 @@ impl AccountsDb { AccountsDb { create_ancient_storage: CreateAncientStorage::Pack, verify_accounts_hash_in_bg: VerifyAccountsHashInBackground::default(), - filler_accounts_per_slot: AtomicU64::default(), - filler_account_slots_remaining: AtomicU64::default(), active_stats: ActiveStats::default(), skip_initial_hash_calc: false, ancient_append_vec_offset: None, @@ -2552,8 +2513,6 @@ impl AccountsDb { dirty_stores: DashMap::default(), zero_lamport_accounts_to_purge_after_full_snapshot: DashSet::default(), accounts_update_notifier: None, - filler_accounts_config: FillerAccountsConfig::default(), - filler_account_suffix: None, log_dead_slots: AtomicBool::new(true), exhaustively_verify_refcounts: false, partitioned_epoch_rewards_config: PartitionedEpochRewardsConfig::default(), @@ -2605,10 +2564,6 @@ impl AccountsDb { let accounts_hash_cache_path = accounts_db_config .as_ref() .and_then(|config| config.accounts_hash_cache_path.clone()); - let filler_accounts_config = accounts_db_config - .as_ref() - .map(|config| config.filler_accounts_config) - .unwrap_or_default(); let skip_initial_hash_calc = accounts_db_config .as_ref() .map(|config| config.skip_initial_hash_calc) @@ -2642,11 +2597,6 @@ impl AccountsDb { let partitioned_epoch_rewards_config: PartitionedEpochRewardsConfig = PartitionedEpochRewardsConfig::new(test_partitioned_epoch_rewards); - let filler_account_suffix = if filler_accounts_config.count > 0 { - Some(solana_sdk::pubkey::new_rand()) - } else { - None - }; let paths_is_empty = paths.is_empty(); let mut new = Self { paths, @@ -2656,8 +2606,6 @@ impl AccountsDb { account_indexes, shrink_ratio, accounts_update_notifier, - filler_accounts_config, - filler_account_suffix, create_ancient_storage, write_cache_limit_bytes: accounts_db_config .as_ref() @@ -2689,20 +2637,6 @@ impl AccountsDb { new } - /// Gradual means filler accounts will be added over the course of an epoch, during cache flush. - /// This is in contrast to adding all the filler accounts immediately before the validator starts. - fn init_gradual_filler_accounts(&self, slots_per_epoch: Slot) { - let count = self.filler_accounts_config.count; - if count > 0 { - // filler accounts are a debug only feature. integer division is fine here - let accounts_per_slot = (count as u64) / slots_per_epoch; - self.filler_accounts_per_slot - .store(accounts_per_slot, Ordering::Release); - self.filler_account_slots_remaining - .store(slots_per_epoch, Ordering::Release); - } - } - pub fn set_shrink_paths(&self, paths: Vec) { assert!(!paths.is_empty()); let mut shrink_paths = self.shrink_paths.write().unwrap(); @@ -4422,15 +4356,6 @@ impl AccountsDb { .get_all_less_than(slot) } - fn get_prior_root(&self, slot: Slot) -> Option { - self.accounts_index - .roots_tracker - .read() - .unwrap() - .alive_roots - .get_prior(slot) - } - /// return all slots that are more than one epoch old and thus could already be an ancient append vec /// or which could need to be combined into a new or existing ancient append vec /// offset is used to combine newer slots than we normally would. This is designed to be used for testing. @@ -6576,30 +6501,6 @@ impl AccountsDb { } } - let mut filler_accounts = 0; - if self.filler_accounts_enabled() { - let slots_remaining = self.filler_account_slots_remaining.load(Ordering::Acquire); - if slots_remaining > 0 { - // figure out - let pr = self.get_prior_root(slot); - - if let Some(prior_root) = pr { - let filler_account_slots = - std::cmp::min(slot.saturating_sub(prior_root), slots_remaining); - self.filler_account_slots_remaining - .fetch_sub(filler_account_slots, Ordering::Release); - let filler_accounts_per_slot = - self.filler_accounts_per_slot.load(Ordering::Acquire); - filler_accounts = filler_account_slots * filler_accounts_per_slot; - - // keep space for filler accounts - let addl_size = filler_accounts - * (aligned_stored_size(self.filler_accounts_config.size) as u64); - total_size += addl_size; - } - } - } - let (accounts, hashes): (Vec<(&Pubkey, &AccountSharedData)>, Vec) = iter_items .iter() .filter_map(|iter_item| { @@ -6649,25 +6550,6 @@ impl AccountsDb { StoreReclaims::Default, ); - if filler_accounts > 0 { - // add extra filler accounts at the end of the append vec - let (account, hash) = self.get_filler_account(&Rent::default()); - let mut accounts = Vec::with_capacity(filler_accounts as usize); - let mut hashes = Vec::with_capacity(filler_accounts as usize); - let pubkeys = self.get_filler_account_pubkeys(filler_accounts as usize); - pubkeys.iter().for_each(|key| { - accounts.push((key, &account)); - hashes.push(hash); - }); - self.store_accounts_frozen( - (slot, &accounts[..]), - Some(hashes), - &flushed_store, - None, - StoreReclaims::Ignore, - ); - } - // If the above sizing function is correct, just one AppendVec is enough to hold // all the data for the slot assert!(self.storage.get_slot_storage_entry(slot).is_some()); @@ -7025,9 +6907,6 @@ impl AccountsDb { let result: Vec = pubkeys .iter() .filter_map(|pubkey| { - if self.is_filler_account(pubkey) { - return None; - } if let AccountIndexGetResult::Found(lock, index) = self.accounts_index.get(pubkey, config.ancestors, Some(max_slot)) { @@ -7053,7 +6932,7 @@ impl AccountsDb { let mut loaded_hash = loaded_account.loaded_hash(); let balance = loaded_account.lamports(); let hash_is_missing = loaded_hash == AccountHash(Hash::default()); - if (config.check_hash || hash_is_missing) && !self.is_filler_account(pubkey) { + if config.check_hash || hash_is_missing { let computed_hash = loaded_account.compute_hash(pubkey); if hash_is_missing { @@ -7644,7 +7523,6 @@ impl AccountsDb { bins: usize, bin_range: &Range, config: &CalcAccountsHashConfig<'_>, - filler_account_suffix: Option<&Pubkey>, ) -> Result, AccountsHashVerificationError> { assert!(bin_range.start < bins); assert!(bin_range.end <= bins); @@ -7665,7 +7543,6 @@ impl AccountsDb { bin_calculator: &bin_calculator, config, mismatch_found: mismatch_found.clone(), - filler_account_suffix, range, bin_range, sort_time: sort_time.clone(), @@ -7808,11 +7685,6 @@ impl AccountsDb { }; let accounts_hasher = AccountsHasher { - filler_account_suffix: if self.filler_accounts_config.count > 0 { - self.filler_account_suffix - } else { - None - }, zero_lamport_accounts: kind.zero_lamport_accounts(), dir_for_temp_cache_files: transient_accounts_hash_cache_path, active_stats: &self.active_stats, @@ -7826,7 +7698,6 @@ impl AccountsDb { PUBKEY_BINS_FOR_CALCULATING_HASHES, &bounds, config, - accounts_hasher.filler_account_suffix.as_ref(), )?; let cache_hash_data_files = cache_hash_data_file_references @@ -8055,11 +7926,6 @@ impl AccountsDb { hashes.retain(|k| k.0 != ignore); } - if self.filler_accounts_enabled() { - // filler accounts must be added to 'dirty_keys' above but cannot be used to calculate hash - hashes.retain(|(pubkey, _hash)| !self.is_filler_account(pubkey)); - } - let accounts_delta_hash = AccountsDeltaHash(AccountsHasher::accumulate_account_hashes(hashes)); accumulate.stop(); @@ -9111,91 +8977,6 @@ impl AccountsDb { } } - fn filler_unique_id_bytes() -> usize { - std::mem::size_of::() - } - - fn filler_rent_partition_prefix_bytes() -> usize { - std::mem::size_of::() - } - - fn filler_prefix_bytes() -> usize { - Self::filler_unique_id_bytes() + Self::filler_rent_partition_prefix_bytes() - } - - pub fn is_filler_account_helper( - pubkey: &Pubkey, - filler_account_suffix: Option<&Pubkey>, - ) -> bool { - let offset = Self::filler_prefix_bytes(); - filler_account_suffix - .as_ref() - .map(|filler_account_suffix| { - pubkey.as_ref()[offset..] == filler_account_suffix.as_ref()[offset..] - }) - .unwrap_or_default() - } - - /// true if 'pubkey' is a filler account - pub fn is_filler_account(&self, pubkey: &Pubkey) -> bool { - Self::is_filler_account_helper(pubkey, self.filler_account_suffix.as_ref()) - } - - /// true if it is possible that there are filler accounts present - pub fn filler_accounts_enabled(&self) -> bool { - self.filler_account_suffix.is_some() - } - - /// return 'AccountSharedData' and a hash for a filler account - fn get_filler_account(&self, rent: &Rent) -> (AccountSharedData, AccountHash) { - let string = "FiLLERACCoUNTooooooooooooooooooooooooooooooo"; - let hash = AccountHash(Hash::from_str(string).unwrap()); - let owner = Pubkey::from_str(string).unwrap(); - let space = self.filler_accounts_config.size; - let rent_exempt_reserve = rent.minimum_balance(space); - let lamports = rent_exempt_reserve; - let mut account = AccountSharedData::new(lamports, space, &owner); - // just non-zero rent epoch. filler accounts are rent-exempt - let dummy_rent_epoch = 2; - account.set_rent_epoch(dummy_rent_epoch); - (account, hash) - } - - fn get_filler_account_pubkeys(&self, count: usize) -> Vec { - (0..count) - .map(|_| { - let subrange = solana_sdk::pubkey::new_rand(); - self.get_filler_account_pubkey(&subrange) - }) - .collect() - } - - fn get_filler_account_pubkey(&self, subrange: &Pubkey) -> Pubkey { - // pubkey begins life as entire filler 'suffix' pubkey - let mut key = self.filler_account_suffix.unwrap(); - let rent_prefix_bytes = Self::filler_rent_partition_prefix_bytes(); - // first bytes are replaced with rent partition range: filler_rent_partition_prefix_bytes - key.as_mut()[0..rent_prefix_bytes] - .copy_from_slice(&subrange.as_ref()[0..rent_prefix_bytes]); - key - } - - /// filler accounts are space-holding accounts which are ignored by hash calculations and rent. - /// They are designed to allow a validator to run against a network successfully while simulating having many more accounts present. - /// All filler accounts share a common pubkey suffix. The suffix is randomly generated per validator on startup. - /// The filler accounts are added to each slot in the snapshot after index generation. - /// The accounts added in a slot are setup to have pubkeys such that rent will be collected from them before (or when?) their slot becomes an epoch old. - /// Thus, the filler accounts are rewritten by rent and the old slot can be thrown away successfully. - pub fn maybe_add_filler_accounts(&self, epoch_schedule: &EpochSchedule, slot: Slot) { - if self.filler_accounts_config.count == 0 { - return; - } - - self.init_gradual_filler_accounts( - epoch_schedule.get_slots_in_epoch(epoch_schedule.get_epoch(slot)), - ); - } - pub fn generate_index( &self, limit_load_slot_count_from_snapshot: Option, @@ -10090,7 +9871,6 @@ pub mod tests { check_hash, ..CalcAccountsHashConfig::default() }, - None, ) .map(|references| { references diff --git a/accounts-db/src/accounts_hash.rs b/accounts-db/src/accounts_hash.rs index 7631ea694635b8..72f74be6f130d1 100644 --- a/accounts-db/src/accounts_hash.rs +++ b/accounts-db/src/accounts_hash.rs @@ -469,7 +469,6 @@ impl CumulativeOffsets { #[derive(Debug)] pub struct AccountsHasher<'a> { - pub filler_account_suffix: Option, pub zero_lamport_accounts: ZeroLamportAccounts, /// The directory where temporary cache files are put pub dir_for_temp_cache_files: PathBuf, @@ -495,11 +494,6 @@ struct ItemLocation<'a> { } impl<'a> AccountsHasher<'a> { - /// true if it is possible that there are filler accounts present - pub fn filler_accounts_enabled(&self) -> bool { - self.filler_account_suffix.is_some() - } - pub fn calculate_hash(hashes: Vec>) -> (Hash, usize) { let cumulative_offsets = CumulativeOffsets::from_raw(&hashes); @@ -1151,7 +1145,6 @@ impl<'a> AccountsHasher<'a> { }; let mut overall_sum = 0; - let filler_accounts_enabled = self.filler_accounts_enabled(); while let Some(pointer) = working_set.pop() { let key = &sorted_data_by_pubkey[pointer.slot_group_index][pointer.offset].pubkey; @@ -1166,13 +1159,10 @@ impl<'a> AccountsHasher<'a> { // add lamports and get hash if item.lamports != 0 { - // do not include filler accounts in the hash - if !(filler_accounts_enabled && self.is_filler_account(&item.pubkey)) { - overall_sum = Self::checked_cast_for_capitalization( - item.lamports as u128 + overall_sum as u128, - ); - hashes.write(&item.hash.0); - } + overall_sum = Self::checked_cast_for_capitalization( + item.lamports as u128 + overall_sum as u128, + ); + hashes.write(&item.hash.0); } else { // if lamports == 0, check if they should be included if self.zero_lamport_accounts == ZeroLamportAccounts::Included { @@ -1196,13 +1186,6 @@ impl<'a> AccountsHasher<'a> { (hashes, overall_sum) } - fn is_filler_account(&self, pubkey: &Pubkey) -> bool { - crate::accounts_db::AccountsDb::is_filler_account_helper( - pubkey, - self.filler_account_suffix.as_ref(), - ) - } - /// input: /// vec: group of slot data, ordered by Slot (low to high) /// vec: [..] - items found in that slot range Sorted by: Pubkey, higher Slot, higher Write version (if pubkey =) @@ -1343,7 +1326,6 @@ mod tests { impl<'a> AccountsHasher<'a> { fn new(dir_for_temp_cache_files: PathBuf) -> Self { Self { - filler_account_suffix: None, zero_lamport_accounts: ZeroLamportAccounts::Excluded, dir_for_temp_cache_files, active_stats: &ACTIVE_STATS, diff --git a/accounts-db/src/rent_collector.rs b/accounts-db/src/rent_collector.rs index cea0a07c9883b3..1a72cac88308b3 100644 --- a/accounts-db/src/rent_collector.rs +++ b/accounts-db/src/rent_collector.rs @@ -111,10 +111,9 @@ impl RentCollector { &self, address: &Pubkey, account: &mut AccountSharedData, - filler_account_suffix: Option<&Pubkey>, set_exempt_rent_epoch_max: bool, ) -> CollectedInfo { - match self.calculate_rent_result(address, account, filler_account_suffix) { + match self.calculate_rent_result(address, account) { RentResult::Exempt => { if set_exempt_rent_epoch_max { account.set_rent_epoch(RENT_EXEMPT_RENT_EPOCH); @@ -151,19 +150,13 @@ impl RentCollector { &self, address: &Pubkey, account: &impl ReadableAccount, - filler_account_suffix: Option<&Pubkey>, ) -> RentResult { if account.rent_epoch() == RENT_EXEMPT_RENT_EPOCH || account.rent_epoch() > self.epoch { // potentially rent paying account (or known and already marked exempt) // Maybe collect rent later, leave account alone for now. return RentResult::NoRentCollectionNow; } - if !self.should_collect_rent(address, account) - || crate::accounts_db::AccountsDb::is_filler_account_helper( - address, - filler_account_suffix, - ) - { + if !self.should_collect_rent(address, account) { // easy to determine this account should not consider having rent collected from it return RentResult::Exempt; } @@ -230,12 +223,7 @@ mod tests { ) -> CollectedInfo { // initialize rent_epoch as created at this epoch account.set_rent_epoch(self.epoch); - self.collect_from_existing_account( - address, - account, - /*filler_account_suffix:*/ None, - set_exempt_rent_epoch_max, - ) + self.collect_from_existing_account(address, account, set_exempt_rent_epoch_max) } } @@ -246,7 +234,7 @@ mod tests { let mut account = AccountSharedData::default(); assert_matches!( - rent_collector.calculate_rent_result(&Pubkey::default(), &account, None), + rent_collector.calculate_rent_result(&Pubkey::default(), &account), RentResult::NoRentCollectionNow ); { @@ -255,7 +243,6 @@ mod tests { rent_collector.collect_from_existing_account( &Pubkey::default(), &mut account_clone, - None, set_exempt_rent_epoch_max ), CollectedInfo::default() @@ -265,7 +252,7 @@ mod tests { account.set_executable(true); assert_matches!( - rent_collector.calculate_rent_result(&Pubkey::default(), &account, None), + rent_collector.calculate_rent_result(&Pubkey::default(), &account), RentResult::Exempt ); { @@ -278,7 +265,6 @@ mod tests { rent_collector.collect_from_existing_account( &Pubkey::default(), &mut account_clone, - None, set_exempt_rent_epoch_max ), CollectedInfo::default() @@ -288,7 +274,7 @@ mod tests { account.set_executable(false); assert_matches!( - rent_collector.calculate_rent_result(&incinerator::id(), &account, None), + rent_collector.calculate_rent_result(&incinerator::id(), &account), RentResult::Exempt ); { @@ -301,7 +287,6 @@ mod tests { rent_collector.collect_from_existing_account( &incinerator::id(), &mut account_clone, - None, set_exempt_rent_epoch_max ), CollectedInfo::default() @@ -309,49 +294,44 @@ mod tests { assert_eq!(account_clone, account_expected); } - // try a few combinations of rent collector rent epoch and collecting rent with and without filler accounts specified (but we aren't a filler) - let filler_account = solana_sdk::pubkey::new_rand(); - - for filler_accounts in [None, Some(&filler_account)] { - for (rent_epoch, rent_due_expected) in [(2, 2), (3, 5)] { - rent_collector.epoch = rent_epoch; - account.set_lamports(10); - account.set_rent_epoch(1); - let new_rent_epoch_expected = rent_collector.epoch + 1; - assert!( - matches!( - rent_collector.calculate_rent_result(&Pubkey::default(), &account, filler_accounts), - RentResult::CollectRent{ new_rent_epoch, rent_due} if new_rent_epoch == new_rent_epoch_expected && rent_due == rent_due_expected, + // try a few combinations of rent collector rent epoch and collecting rent + for (rent_epoch, rent_due_expected) in [(2, 2), (3, 5)] { + rent_collector.epoch = rent_epoch; + account.set_lamports(10); + account.set_rent_epoch(1); + let new_rent_epoch_expected = rent_collector.epoch + 1; + assert!( + matches!( + rent_collector.calculate_rent_result(&Pubkey::default(), &account), + RentResult::CollectRent{ new_rent_epoch, rent_due} if new_rent_epoch == new_rent_epoch_expected && rent_due == rent_due_expected, + ), + "{:?}", + rent_collector.calculate_rent_result(&Pubkey::default(), &account) + ); + + { + let mut account_clone = account.clone(); + assert_eq!( + rent_collector.collect_from_existing_account( + &Pubkey::default(), + &mut account_clone, + set_exempt_rent_epoch_max ), - "{:?}", - rent_collector.calculate_rent_result(&Pubkey::default(), &account, None,) + CollectedInfo { + rent_amount: rent_due_expected, + account_data_len_reclaimed: 0 + } ); - - { - let mut account_clone = account.clone(); - assert_eq!( - rent_collector.collect_from_existing_account( - &Pubkey::default(), - &mut account_clone, - filler_accounts, - set_exempt_rent_epoch_max - ), - CollectedInfo { - rent_amount: rent_due_expected, - account_data_len_reclaimed: 0 - } - ); - let mut account_expected = account.clone(); - account_expected.set_lamports(account.lamports() - rent_due_expected); - account_expected.set_rent_epoch(new_rent_epoch_expected); - assert_eq!(account_clone, account_expected); - } + let mut account_expected = account.clone(); + account_expected.set_lamports(account.lamports() - rent_due_expected); + account_expected.set_rent_epoch(new_rent_epoch_expected); + assert_eq!(account_clone, account_expected); } } // enough lamports to make us exempt account.set_lamports(1_000_000); - let result = rent_collector.calculate_rent_result(&Pubkey::default(), &account, None); + let result = rent_collector.calculate_rent_result(&Pubkey::default(), &account); assert!( matches!(result, RentResult::Exempt), "{result:?}, set_exempt_rent_epoch_max: {set_exempt_rent_epoch_max}", @@ -366,7 +346,6 @@ mod tests { rent_collector.collect_from_existing_account( &Pubkey::default(), &mut account_clone, - None, set_exempt_rent_epoch_max ), CollectedInfo::default() @@ -379,7 +358,7 @@ mod tests { // We don't calculate rent amount vs data if the rent_epoch is already in the future. account.set_rent_epoch(1_000_000); assert_matches!( - rent_collector.calculate_rent_result(&Pubkey::default(), &account, None), + rent_collector.calculate_rent_result(&Pubkey::default(), &account), RentResult::NoRentCollectionNow ); { @@ -388,42 +367,12 @@ mod tests { rent_collector.collect_from_existing_account( &Pubkey::default(), &mut account_clone, - None, set_exempt_rent_epoch_max ), CollectedInfo::default() ); assert_eq!(account_clone, account); } - - // filler accounts are exempt - account.set_rent_epoch(1); - account.set_lamports(10); - assert_matches!( - rent_collector.calculate_rent_result( - &filler_account, - &account, - Some(&filler_account), - ), - RentResult::Exempt - ); - { - let mut account_clone = account.clone(); - let mut account_expected = account.clone(); - if set_exempt_rent_epoch_max { - account_expected.set_rent_epoch(RENT_EXEMPT_RENT_EPOCH); - } - assert_eq!( - rent_collector.collect_from_existing_account( - &filler_account, - &mut account_clone, - Some(&filler_account), - set_exempt_rent_epoch_max - ), - CollectedInfo::default() - ); - assert_eq!(account_clone, account_expected); - } } } @@ -464,7 +413,6 @@ mod tests { let collected = rent_collector.collect_from_existing_account( &solana_sdk::pubkey::new_rand(), &mut existing_account, - None, // filler_account_suffix set_exempt_rent_epoch_max, ); assert!(existing_account.lamports() < old_lamports); @@ -502,7 +450,6 @@ mod tests { let collected = rent_collector.collect_from_existing_account( &pubkey, &mut account, - None, // filler_account_suffix set_exempt_rent_epoch_max, ); assert_eq!(account.lamports(), huge_lamports); @@ -519,7 +466,6 @@ mod tests { let collected = rent_collector.collect_from_existing_account( &pubkey, &mut account, - None, // filler_account_suffix set_exempt_rent_epoch_max, ); assert_eq!(account.lamports(), tiny_lamports - collected.rent_amount); @@ -546,7 +492,6 @@ mod tests { let collected = rent_collector.collect_from_existing_account( &pubkey, &mut account, - None, // filler_account_suffix set_exempt_rent_epoch_max, ); assert_eq!(account.lamports(), 0); @@ -573,7 +518,6 @@ mod tests { let collected = rent_collector.collect_from_existing_account( &Pubkey::new_unique(), &mut account, - None, // filler_account_suffix set_exempt_rent_epoch_max, ); diff --git a/core/src/validator.rs b/core/src/validator.rs index 700315f4a67c1a..bf59642012448c 100644 --- a/core/src/validator.rs +++ b/core/src/validator.rs @@ -748,13 +748,7 @@ impl Validator { let (snapshot_package_sender, snapshot_packager_service) = if config.snapshot_config.should_generate_snapshots() { - // filler accounts make snapshots invalid for use - // so, do not publish that we have snapshots - let enable_gossip_push = config - .accounts_db_config - .as_ref() - .map(|config| config.filler_accounts_config.count == 0) - .unwrap_or(true); + let enable_gossip_push = true; let (snapshot_package_sender, snapshot_package_receiver) = crossbeam_channel::unbounded(); let snapshot_packager_service = SnapshotPackagerService::new( diff --git a/ledger-tool/src/args.rs b/ledger-tool/src/args.rs index 0bb28e4a2779ca..63198c1c6188fa 100644 --- a/ledger-tool/src/args.rs +++ b/ledger-tool/src/args.rs @@ -2,7 +2,7 @@ use { crate::LEDGER_TOOL_DIRECTORY, clap::{value_t, values_t_or_exit, ArgMatches}, solana_accounts_db::{ - accounts_db::{AccountsDb, AccountsDbConfig, FillerAccountsConfig}, + accounts_db::{AccountsDb, AccountsDbConfig}, accounts_index::{AccountsIndexConfig, IndexLimitMb}, partitioned_rewards::TestPartitionedEpochRewards, }, @@ -53,11 +53,6 @@ pub fn get_accounts_db_config( ..AccountsIndexConfig::default() }; - let filler_accounts_config = FillerAccountsConfig { - count: value_t!(arg_matches, "accounts_filler_count", usize).unwrap_or(0), - size: value_t!(arg_matches, "accounts_filler_size", usize).unwrap_or(0), - }; - let accounts_hash_cache_path = arg_matches .value_of("accounts_hash_cache_path") .map(Into::into) @@ -77,7 +72,6 @@ pub fn get_accounts_db_config( index: Some(accounts_index_config), base_working_path: Some(ledger_tool_ledger_path), accounts_hash_cache_path: Some(accounts_hash_cache_path), - filler_accounts_config, ancient_append_vec_offset: value_t!(arg_matches, "accounts_db_ancient_append_vecs", i64) .ok(), exhaustively_verify_refcounts: arg_matches.is_present("accounts_db_verify_refcounts"), diff --git a/ledger-tool/src/main.rs b/ledger-tool/src/main.rs index a6b34d39d63e76..98f8fc797253fa 100644 --- a/ledger-tool/src/main.rs +++ b/ledger-tool/src/main.rs @@ -1134,23 +1134,6 @@ fn main() { "Debug option to skip rewrites for rent-exempt accounts but still add them in bank delta hash calculation", ) .hidden(hidden_unless_forced()); - let accounts_filler_count = Arg::with_name("accounts_filler_count") - .long("accounts-filler-count") - .value_name("COUNT") - .validator(is_parsable::) - .takes_value(true) - .default_value("0") - .help("How many accounts to add to stress the system. Accounts are ignored in operations related to correctness.") - .hidden(hidden_unless_forced()); - let accounts_filler_size = Arg::with_name("accounts_filler_size") - .long("accounts-filler-size") - .value_name("BYTES") - .validator(is_parsable::) - .takes_value(true) - .default_value("0") - .requires("accounts_filler_count") - .help("Size per filler account in bytes.") - .hidden(hidden_unless_forced()); let account_paths_arg = Arg::with_name("account_paths") .long("accounts") .value_name("PATHS") @@ -1619,8 +1602,6 @@ fn main() { .arg(&accountsdb_skip_shrink) .arg(&accountsdb_verify_refcounts) .arg(&accounts_db_test_skip_rewrites_but_include_in_bank_hash) - .arg(&accounts_filler_count) - .arg(&accounts_filler_size) .arg(&verify_index_arg) .arg(&accounts_db_skip_initial_hash_calc_arg) .arg(&ancient_append_vecs) diff --git a/ledger/src/bank_forks_utils.rs b/ledger/src/bank_forks_utils.rs index c75380581fc16d..4f6dd9e98ee6d5 100644 --- a/ledger/src/bank_forks_utils.rs +++ b/ledger/src/bank_forks_utils.rs @@ -160,15 +160,6 @@ pub fn load_bank_forks( ); (bank_forks, Some(starting_snapshot_hashes)) } else { - let maybe_filler_accounts = process_options - .accounts_db_config - .as_ref() - .map(|config| config.filler_accounts_config.count > 0); - - if let Some(true) = maybe_filler_accounts { - panic!("filler accounts specified, but not loading from snapshot"); - } - info!("Processing ledger from genesis"); let bank_forks = blockstore_processor::process_blockstore_for_bank_0( genesis_config, diff --git a/runtime/src/accounts/mod.rs b/runtime/src/accounts/mod.rs index 9995a9251960b8..d8f3d8d1588d4f 100644 --- a/runtime/src/accounts/mod.rs +++ b/runtime/src/accounts/mod.rs @@ -218,7 +218,6 @@ fn load_transaction_accounts( .collect_from_existing_account( key, &mut account, - accounts_db.filler_account_suffix.as_ref(), set_exempt_rent_epoch_max, ) .rent_amount; diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 70e5d7c2f0d0a6..db85d8b6b1eab5 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -5935,13 +5935,9 @@ impl Bank { let mut skipped_rewrites = Vec::default(); for (pubkey, account, _loaded_slot) in accounts.iter_mut() { let rent_collected_info = if self.should_collect_rent() { - let (rent_collected_info, measure) = - measure!(self.rent_collector.collect_from_existing_account( - pubkey, - account, - self.rc.accounts.accounts_db.filler_account_suffix.as_ref(), - set_exempt_rent_epoch_max, - )); + let (rent_collected_info, measure) = measure!(self + .rent_collector + .collect_from_existing_account(pubkey, account, set_exempt_rent_epoch_max,)); time_collecting_rent_us += measure.as_us(); rent_collected_info } else { diff --git a/runtime/src/bank/tests.rs b/runtime/src/bank/tests.rs index 20fba12fa946c5..4e43c4a81cee5d 100644 --- a/runtime/src/bank/tests.rs +++ b/runtime/src/bank/tests.rs @@ -448,7 +448,6 @@ fn test_credit_debit_rent_no_side_effect_on_hash() { let expected_rent = bank.rent_collector().collect_from_existing_account( &keypairs[4].pubkey(), &mut account_copy, - None, set_exempt_rent_epoch_max, ); assert_eq!(expected_rent.rent_amount, too_few_lamports); @@ -11505,7 +11504,6 @@ fn test_accounts_data_size_and_rent_collection(should_collect_rent: bool) { let info = bank.rent_collector.collect_from_existing_account( &keypair.pubkey(), &mut account, - None, set_exempt_rent_epoch_max, ); assert_eq!(info.account_data_len_reclaimed, data_size as u64); diff --git a/runtime/src/serde_snapshot.rs b/runtime/src/serde_snapshot.rs index 078da133979f64..8ee9f60d553549 100644 --- a/runtime/src/serde_snapshot.rs +++ b/runtime/src/serde_snapshot.rs @@ -941,8 +941,6 @@ where .set(rent_paying_accounts_by_partition) .unwrap(); - accounts_db.maybe_add_filler_accounts(&genesis_config.epoch_schedule, snapshot_slot); - handle.join().unwrap(); measure_notify.stop(); diff --git a/runtime/src/serde_snapshot/tests.rs b/runtime/src/serde_snapshot/tests.rs index 278d6d68da8bc1..f638b7e975a776 100644 --- a/runtime/src/serde_snapshot/tests.rs +++ b/runtime/src/serde_snapshot/tests.rs @@ -446,9 +446,6 @@ mod serde_snapshot_tests { let account2 = AccountSharedData::new(some_lamport + 1, no_data, &owner); let pubkey2 = solana_sdk::pubkey::new_rand(); - let filler_account = AccountSharedData::new(some_lamport, no_data, &owner); - let filler_account_pubkey = solana_sdk::pubkey::new_rand(); - let accounts = AccountsDb::new_single_for_tests(); let mut current_slot = 1; @@ -459,12 +456,6 @@ mod serde_snapshot_tests { accounts.store_for_tests(current_slot, &[(&pubkey, &zero_lamport_account)]); accounts.store_for_tests(current_slot, &[(&pubkey2, &account2)]); - // Store the account a few times. - // use to be: store enough accounts such that an additional store for slot 2 is created. - // but we use the write cache now - for _ in 0..3 { - accounts.store_for_tests(current_slot, &[(&filler_account_pubkey, &filler_account)]); - } accounts.add_root_and_flush_write_cache(current_slot); accounts.assert_load_account(current_slot, pubkey, zero_lamport); diff --git a/validator/src/cli.rs b/validator/src/cli.rs index 34b7359b2f1eef..eb680e8069e9e1 100644 --- a/validator/src/cli.rs +++ b/validator/src/cli.rs @@ -1279,23 +1279,6 @@ pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { May be specified multiple times. \ [default: [ledger]/accounts_index]"), ) - .arg(Arg::with_name("accounts_filler_count") - .long("accounts-filler-count") - .value_name("COUNT") - .validator(is_parsable::) - .takes_value(true) - .default_value(&default_args.accounts_filler_count) - .help("How many accounts to add to stress the system. Accounts are ignored in operations related to correctness.") - .hidden(hidden_unless_forced())) - .arg(Arg::with_name("accounts_filler_size") - .long("accounts-filler-size") - .value_name("BYTES") - .validator(is_parsable::) - .takes_value(true) - .default_value(&default_args.accounts_filler_size) - .requires("accounts_filler_count") - .help("Size per filler account in bytes.") - .hidden(hidden_unless_forced())) .arg( Arg::with_name("accounts_db_test_hash_calculation") .long("accounts-db-test-hash-calculation") @@ -1957,8 +1940,6 @@ pub struct DefaultArgs { pub contact_debug_interval: String, - pub accounts_filler_count: String, - pub accounts_filler_size: String, pub accountsdb_repl_threads: String, pub snapshot_version: SnapshotVersion, @@ -2032,8 +2013,6 @@ impl DefaultArgs { .to_string(), rpc_pubsub_worker_threads: "4".to_string(), accountsdb_repl_threads: num_cpus::get().to_string(), - accounts_filler_count: "0".to_string(), - accounts_filler_size: "0".to_string(), maximum_full_snapshot_archives_to_retain: DEFAULT_MAX_FULL_SNAPSHOT_ARCHIVES_TO_RETAIN .to_string(), maximum_incremental_snapshot_archives_to_retain: diff --git a/validator/src/main.rs b/validator/src/main.rs index 0037fea465d50f..fc39971c3008f4 100644 --- a/validator/src/main.rs +++ b/validator/src/main.rs @@ -8,10 +8,7 @@ use { log::*, rand::{seq::SliceRandom, thread_rng}, solana_accounts_db::{ - accounts_db::{ - AccountShrinkThreshold, AccountsDb, AccountsDbConfig, CreateAncientStorage, - FillerAccountsConfig, - }, + accounts_db::{AccountShrinkThreshold, AccountsDb, AccountsDbConfig, CreateAncientStorage}, accounts_index::{ AccountIndex, AccountSecondaryIndexes, AccountSecondaryIndexesIncludeExclude, AccountsIndexConfig, IndexLimitMb, @@ -1186,16 +1183,10 @@ pub fn main() { .ok() .map(|mb| mb * MB); - let filler_accounts_config = FillerAccountsConfig { - count: value_t_or_exit!(matches, "accounts_filler_count", usize), - size: value_t_or_exit!(matches, "accounts_filler_size", usize), - }; - let accounts_db_config = AccountsDbConfig { index: Some(accounts_index_config), base_working_path: Some(ledger_path.clone()), accounts_hash_cache_path: Some(accounts_hash_cache_path), - filler_accounts_config, write_cache_limit_bytes: value_t!(matches, "accounts_db_cache_limit_mb", u64) .ok() .map(|mb| mb * MB as u64),