Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add ability to output Bank hash details #32632

Merged
merged 23 commits into from
Aug 15, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
23 commits
Select commit Hold shift + click to select a range
fb41d7b
Rework logic for when temp_accounts_hash_cache_path is created
steviez Jul 25, 2023
2f2c259
Store base directory where accountsdb stuff lives
steviez Jul 25, 2023
970bbb9
Write bank hash components to file when node diverges
steviez Jul 26, 2023
96e791d
Add .json suffix to make file contents clear
steviez Jul 26, 2023
fd3e3b2
Rework scan function and type to aggregate data
steviez Jul 26, 2023
7810039
Use serde_json::to_writer instad of explicitly creating byte array
steviez Jul 27, 2023
baab049
Use base64 encoding for account data
steviez Jul 27, 2023
7dc5dba
Add "bank" to hash field names to disambiguate
steviez Jul 28, 2023
997d73f
Add deserialize impl for BankHashAccounts
steviez Jul 29, 2023
07a1571
Move .to_string() conversion into BankHashDetails
steviez Jul 29, 2023
580b2f1
Add encoding field to output file
steviez Jul 31, 2023
4263ab0
Add basic unit test to confirm serialize & deserialize are inverse
steviez Jul 31, 2023
702715c
Add quick module description
steviez Jul 31, 2023
b10c603
Move some impl over to TryFrom trait
steviez Aug 9, 2023
63688bc
Rework function to return Result
steviez Aug 9, 2023
93dd40f
Handle errors for writing files
steviez Aug 9, 2023
e3f7086
Move write function out of impl Bank
steviez Aug 9, 2023
61ebb4d
Do away with tuple struct indexing
steviez Aug 11, 2023
05ec1eb
Rename TempAccount to SerdeAccount
steviez Aug 11, 2023
b4748b8
Fixup command help and include destination directory in message
steviez Aug 11, 2023
3397f45
Use struct with named fields over tuple
steviez Aug 14, 2023
11c5ede
Move PubkeyHashAccount <==> SerdeAccount conversion to From impl's
steviez Aug 14, 2023
6aa7140
Cleanup the rebase - file didn't save the first time
steviez Aug 15, 2023
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

99 changes: 83 additions & 16 deletions accounts-db/src/accounts_db.rs
Original file line number Diff line number Diff line change
Expand Up @@ -469,7 +469,7 @@ pub(crate) struct ShrinkCollect<'a, T: ShrinkCollectRefs<'a>> {

pub const ACCOUNTS_DB_CONFIG_FOR_TESTING: AccountsDbConfig = AccountsDbConfig {
index: Some(ACCOUNTS_INDEX_CONFIG_FOR_TESTING),
accounts_hash_cache_path: None,
base_working_path: None,
filler_accounts_config: FillerAccountsConfig::const_default(),
write_cache_limit_bytes: None,
ancient_append_vec_offset: None,
Expand All @@ -480,7 +480,7 @@ pub const ACCOUNTS_DB_CONFIG_FOR_TESTING: AccountsDbConfig = AccountsDbConfig {
};
pub const ACCOUNTS_DB_CONFIG_FOR_BENCHMARKS: AccountsDbConfig = AccountsDbConfig {
index: Some(ACCOUNTS_INDEX_CONFIG_FOR_BENCHMARKS),
accounts_hash_cache_path: None,
base_working_path: None,
filler_accounts_config: FillerAccountsConfig::const_default(),
write_cache_limit_bytes: None,
ancient_append_vec_offset: None,
Expand Down Expand Up @@ -539,7 +539,8 @@ const ANCIENT_APPEND_VEC_DEFAULT_OFFSET: Option<i64> = Some(-10_000);
#[derive(Debug, Default, Clone)]
pub struct AccountsDbConfig {
pub index: Option<AccountsIndexConfig>,
pub accounts_hash_cache_path: Option<PathBuf>,
/// Base directory for various necessary files
pub base_working_path: Option<PathBuf>,
pub filler_accounts_config: FillerAccountsConfig,
pub write_cache_limit_bytes: Option<u64>,
/// if None, ancient append vecs are set to ANCIENT_APPEND_VEC_DEFAULT_OFFSET
Expand Down Expand Up @@ -1467,6 +1468,9 @@ pub struct AccountsDb {
/// Set of storage paths to pick from
pub paths: Vec<PathBuf>,

/// Base directory for various necessary files
base_working_path: PathBuf,
/// Directories for account hash calculations, within base_working_path
full_accounts_hash_cache_path: PathBuf,
incremental_accounts_hash_cache_path: PathBuf,
transient_accounts_hash_cache_path: PathBuf,
Expand Down Expand Up @@ -2413,6 +2417,13 @@ impl<'a> AppendVecScan for ScanState<'a> {
}
}

#[derive(Clone, Debug, Eq, PartialEq)]
pub struct PubkeyHashAccount {
pub pubkey: Pubkey,
pub hash: Hash,
pub account: AccountSharedData,
}

impl AccountsDb {
pub const ACCOUNTS_HASH_CACHE_DIR: &str = "accounts_hash_cache";

Expand All @@ -2422,20 +2433,34 @@ impl AccountsDb {

fn default_with_accounts_index(
accounts_index: AccountInfoAccountsIndex,
accounts_hash_cache_path: Option<PathBuf>,
base_working_path: Option<PathBuf>,
) -> Self {
let num_threads = get_thread_count();
const MAX_READ_ONLY_CACHE_DATA_SIZE: usize = 400_000_000; // 400M bytes

let mut temp_accounts_hash_cache_path = None;
let accounts_hash_cache_path = accounts_hash_cache_path.unwrap_or_else(|| {
temp_accounts_hash_cache_path = Some(TempDir::new().unwrap());
temp_accounts_hash_cache_path
.as_ref()
.unwrap()
.path()
.to_path_buf()
});
let (base_working_path, accounts_hash_cache_path, temp_accounts_hash_cache_path) =
match base_working_path {
Some(base_working_path) => {
let accounts_hash_cache_path =
base_working_path.join(Self::ACCOUNTS_HASH_CACHE_DIR);
(base_working_path, accounts_hash_cache_path, None)
}
None => {
let temp_accounts_hash_cache_path = Some(TempDir::new().unwrap());
let base_working_path = temp_accounts_hash_cache_path
.as_ref()
.unwrap()
.path()
.to_path_buf();
let accounts_hash_cache_path =
base_working_path.join(Self::ACCOUNTS_HASH_CACHE_DIR);
(
base_working_path,
accounts_hash_cache_path,
temp_accounts_hash_cache_path,
)
}
};

let mut bank_hash_stats = HashMap::new();
bank_hash_stats.insert(0, BankHashStats::default());
Expand Down Expand Up @@ -2464,6 +2489,7 @@ impl AccountsDb {
write_cache_limit_bytes: None,
write_version: AtomicU64::new(0),
paths: vec![],
base_working_path,
full_accounts_hash_cache_path: accounts_hash_cache_path.join("full"),
incremental_accounts_hash_cache_path: accounts_hash_cache_path.join("incremental"),
transient_accounts_hash_cache_path: accounts_hash_cache_path.join("transient"),
Expand Down Expand Up @@ -2545,9 +2571,9 @@ impl AccountsDb {
accounts_db_config.as_mut().and_then(|x| x.index.take()),
exit,
);
let accounts_hash_cache_path = accounts_db_config
let base_working_path = accounts_db_config
.as_ref()
.and_then(|x| x.accounts_hash_cache_path.clone());
.and_then(|x| x.base_working_path.clone());

let filler_accounts_config = accounts_db_config
.as_ref()
Expand Down Expand Up @@ -2603,7 +2629,7 @@ impl AccountsDb {
.and_then(|x| x.write_cache_limit_bytes),
partitioned_epoch_rewards_config,
exhaustively_verify_refcounts,
..Self::default_with_accounts_index(accounts_index, accounts_hash_cache_path)
..Self::default_with_accounts_index(accounts_index, base_working_path)
};
if paths_is_empty {
// Create a temporary set of accounts directories, used primarily
Expand Down Expand Up @@ -2650,6 +2676,11 @@ impl AccountsDb {
self.file_size
}

/// Get the base working directory
pub fn get_base_working_path(&self) -> PathBuf {
self.base_working_path.clone()
}

pub fn new_single_for_tests() -> Self {
AccountsDb::new_for_tests(Vec::new(), &ClusterType::Development)
}
Expand Down Expand Up @@ -7856,6 +7887,42 @@ impl AccountsDb {
(hashes, scan.as_us(), accumulate)
}

/// Return all of the accounts for a given slot
pub fn get_pubkey_hash_account_for_slot(&self, slot: Slot) -> Vec<PubkeyHashAccount> {
type ScanResult =
ScanStorageResult<PubkeyHashAccount, DashMap<Pubkey, (Hash, AccountSharedData)>>;
let scan_result: ScanResult = self.scan_account_storage(
slot,
|loaded_account: LoadedAccount| {
// Cache only has one version per key, don't need to worry about versioning
Some(PubkeyHashAccount {
pubkey: *loaded_account.pubkey(),
hash: loaded_account.loaded_hash(),
account: loaded_account.take_account(),
})
},
|accum: &DashMap<Pubkey, (Hash, AccountSharedData)>, loaded_account: LoadedAccount| {
// Storage may have duplicates so only keep the latest version for each key
accum.insert(
*loaded_account.pubkey(),
(loaded_account.loaded_hash(), loaded_account.take_account()),
);
},
);

match scan_result {
ScanStorageResult::Cached(cached_result) => cached_result,
ScanStorageResult::Stored(stored_result) => stored_result
.into_iter()
.map(|(pubkey, (hash, account))| PubkeyHashAccount {
pubkey,
hash,
account,
})
.collect(),
}
}

/// Calculate accounts delta hash for `slot`
///
/// As part of calculating the accounts delta hash, get a list of accounts modified this slot
Expand Down
3 changes: 2 additions & 1 deletion core/src/replay_stage.rs
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@ use {
solana_rpc_client_api::response::SlotUpdate,
solana_runtime::{
accounts_background_service::AbsRequestSender,
bank::{Bank, NewBankOptions},
bank::{bank_hash_details, Bank, NewBankOptions},
bank_forks::{BankForks, MAX_ROOT_DISTANCE_FOR_VOTE_ONLY},
commitment::BlockCommitmentCache,
prioritization_fee_cache::PrioritizationFeeCache,
Expand Down Expand Up @@ -1500,6 +1500,7 @@ impl ReplayStage {
let bank = w_bank_forks
.remove(*slot)
.expect("BankForks should not have been purged yet");
let _ = bank_hash_details::write_bank_hash_details_file(&bank);
((*slot, bank.bank_id()), bank)
})
.unzip()
Expand Down
4 changes: 2 additions & 2 deletions ledger-tool/src/args.rs
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
use {
clap::{value_t, values_t_or_exit, ArgMatches},
solana_accounts_db::{
accounts_db::{AccountsDb, AccountsDbConfig, FillerAccountsConfig},
accounts_db::{AccountsDbConfig, FillerAccountsConfig},
accounts_index::{AccountsIndexConfig, IndexLimitMb},
partitioned_rewards::TestPartitionedEpochRewards,
},
Expand Down Expand Up @@ -57,7 +57,7 @@ pub fn get_accounts_db_config(

AccountsDbConfig {
index: Some(accounts_index_config),
accounts_hash_cache_path: Some(ledger_path.join(AccountsDb::ACCOUNTS_HASH_CACHE_DIR)),
base_working_path: Some(ledger_path.to_path_buf()),
filler_accounts_config,
ancient_append_vec_offset: value_t!(arg_matches, "accounts_db_ancient_append_vecs", i64)
.ok(),
Expand Down
15 changes: 14 additions & 1 deletion ledger-tool/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ use {
},
solana_measure::{measure, measure::Measure},
solana_runtime::{
bank::{Bank, RewardCalculationEvent, TotalAccountsStats},
bank::{bank_hash_details, Bank, RewardCalculationEvent, TotalAccountsStats},
bank_forks::BankForks,
runtime_config::RuntimeConfig,
snapshot_archive_info::SnapshotArchiveInfoGetter,
Expand Down Expand Up @@ -1663,6 +1663,14 @@ fn main() {
.takes_value(false)
.help("After verifying the ledger, print some information about the account stores"),
)
.arg(
Arg::with_name("write_bank_file")
.long("write-bank-file")
.takes_value(false)
.help("After verifying the ledger, write a file that contains the information \
that went into computing the completed bank's bank hash. The file will be \
written within <LEDGER_DIR>/bank_hash_details/"),
)
).subcommand(
SubCommand::with_name("graph")
.about("Create a Graphviz rendering of the ledger")
Expand Down Expand Up @@ -2645,6 +2653,7 @@ fn main() {
..ProcessOptions::default()
};
let print_accounts_stats = arg_matches.is_present("print_accounts_stats");
let write_bank_file = arg_matches.is_present("write_bank_file");
let genesis_config = open_genesis_config_by(&ledger_path, arg_matches);
info!("genesis hash: {}", genesis_config.hash());

Expand All @@ -2671,6 +2680,10 @@ fn main() {
let working_bank = bank_forks.read().unwrap().working_bank();
working_bank.print_accounts_stats();
}
if write_bank_file {
let working_bank = bank_forks.read().unwrap().working_bank();
let _ = bank_hash_details::write_bank_hash_details_file(&working_bank);
}
exit_signal.store(true, Ordering::Relaxed);
system_monitor_service.join().unwrap();
}
Expand Down
3 changes: 3 additions & 0 deletions programs/sbf/Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

3 changes: 3 additions & 0 deletions runtime/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@ edition = { workspace = true }

[dependencies]
arrayref = { workspace = true }
base64 = { workspace = true }
bincode = { workspace = true }
blake3 = { workspace = true }
bv = { workspace = true, features = ["serde"] }
Expand Down Expand Up @@ -44,6 +45,7 @@ rayon = { workspace = true }
regex = { workspace = true }
serde = { workspace = true, features = ["rc"] }
serde_derive = { workspace = true }
serde_json = { workspace = true }
siphasher = { workspace = true }
solana-accounts-db = { workspace = true }
solana-address-lookup-table-program = { workspace = true }
Expand All @@ -63,6 +65,7 @@ solana-rayon-threadlimit = { workspace = true }
solana-sdk = { workspace = true }
solana-stake-program = { workspace = true }
solana-system-program = { workspace = true }
solana-version = { workspace = true }
solana-vote-program = { workspace = true }
solana-zk-token-proof-program = { workspace = true }
solana-zk-token-sdk = { workspace = true }
Expand Down
1 change: 1 addition & 0 deletions runtime/src/bank.rs
Original file line number Diff line number Diff line change
Expand Up @@ -212,6 +212,7 @@ struct VerifyAccountsHashConfig {
}

mod address_lookup_table;
pub mod bank_hash_details;
mod builtin_programs;
pub mod epoch_accounts_hash_utils;
mod metrics;
Expand Down
Loading