diff --git a/Cargo.lock b/Cargo.lock index 02ccdb5080c5bf..662a5e61158443 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6459,6 +6459,7 @@ dependencies = [ "num_cpus", "num_enum 0.7.2", "prost", + "qualifier_attr", "rand 0.8.5", "rand_chacha 0.3.1", "rayon", diff --git a/core/Cargo.toml b/core/Cargo.toml index bc1bd4549f6751..182d3deaba836c 100644 --- a/core/Cargo.toml +++ b/core/Cargo.toml @@ -90,6 +90,7 @@ serde_json = { workspace = true } serial_test = { workspace = true } # See order-crates-for-publishing.py for using this unusual `path = "."` solana-core = { path = ".", features = ["dev-context-only-utils"] } +solana-ledger = { workspace = true, features = ["dev-context-only-utils"] } solana-logger = { workspace = true } solana-poh = { workspace = true, features = ["dev-context-only-utils"] } solana-program-runtime = { workspace = true } diff --git a/core/src/replay_stage.rs b/core/src/replay_stage.rs index ef24ccd6f2d809..b36a58a134f015 100644 --- a/core/src/replay_stage.rs +++ b/core/src/replay_stage.rs @@ -1285,8 +1285,14 @@ impl ReplayStage { ) -> (ProgressMap, HeaviestSubtreeForkChoice) { let (root_bank, frozen_banks, duplicate_slot_hashes) = { let bank_forks = bank_forks.read().unwrap(); + let root_bank = bank_forks.root_bank(); let duplicate_slots = blockstore - .duplicate_slots_iterator(bank_forks.root_bank().slot()) + // It is important that the root bank is not marked as duplicate on initialization. + // Although this bank could contain a duplicate proof, the fact that it was rooted + // either during a previous run or artificially means that we should ignore any + // duplicate proofs for the root slot, thus we start consuming duplicate proofs + // from the root slot + 1 + .duplicate_slots_iterator(root_bank.slot().saturating_add(1)) .unwrap(); let duplicate_slot_hashes = duplicate_slots.filter_map(|slot| { let bank = bank_forks.get(slot)?; @@ -1295,7 +1301,7 @@ impl ReplayStage { .then_some((slot, bank.hash())) }); ( - bank_forks.root_bank(), + root_bank, bank_forks.frozen_banks().values().cloned().collect(), duplicate_slot_hashes.collect::>(), ) @@ -4304,6 +4310,9 @@ pub(crate) mod tests { replay_stage::ReplayStage, vote_simulator::{self, VoteSimulator}, }, + blockstore_processor::{ + confirm_full_slot, fill_blockstore_slot_with_ticks, process_bank_0, ProcessOptions, + }, crossbeam_channel::unbounded, itertools::Itertools, solana_entry::entry::{self, Entry}, @@ -8715,4 +8724,136 @@ pub(crate) mod tests { assert_eq!(tower.vote_state, expected_tower.vote_state); assert_eq!(tower.node_pubkey, expected_tower.node_pubkey); } + + #[test] + fn test_initialize_progress_and_fork_choice_with_duplicates() { + solana_logger::setup(); + let GenesisConfigInfo { + mut genesis_config, .. + } = create_genesis_config(123); + + let ticks_per_slot = 1; + genesis_config.ticks_per_slot = ticks_per_slot; + let (ledger_path, blockhash) = + solana_ledger::create_new_tmp_ledger_auto_delete!(&genesis_config); + let blockstore = Blockstore::open(ledger_path.path()).unwrap(); + + /* + Bank forks with: + slot 0 + | + slot 1 -> Duplicate before restart, the restart slot + | + slot 2 + | + slot 3 -> Duplicate before restart, artificially rooted + | + slot 4 -> Duplicate before restart, artificially rooted + | + slot 5 -> Duplicate before restart + | + slot 6 + */ + + let mut last_hash = blockhash; + for i in 0..6 { + last_hash = + fill_blockstore_slot_with_ticks(&blockstore, ticks_per_slot, i + 1, i, last_hash); + } + // Artifically root 3 and 4 + blockstore.set_roots([3, 4].iter()).unwrap(); + + // Set up bank0 + let bank_forks = BankForks::new_rw_arc(Bank::new_for_tests(&genesis_config)); + let bank0 = bank_forks.read().unwrap().get_with_scheduler(0).unwrap(); + let recyclers = VerifyRecyclers::default(); + let replay_tx_thread_pool = rayon::ThreadPoolBuilder::new() + .num_threads(1) + .thread_name(|i| format!("solReplayTx{i:02}")) + .build() + .expect("new rayon threadpool"); + + process_bank_0( + &bank0, + &blockstore, + &replay_tx_thread_pool, + &ProcessOptions::default(), + &recyclers, + None, + None, + ); + + // Mark block 1, 3, 4, 5 as duplicate + blockstore.store_duplicate_slot(1, vec![], vec![]).unwrap(); + blockstore.store_duplicate_slot(3, vec![], vec![]).unwrap(); + blockstore.store_duplicate_slot(4, vec![], vec![]).unwrap(); + blockstore.store_duplicate_slot(5, vec![], vec![]).unwrap(); + + let bank1 = bank_forks.write().unwrap().insert(Bank::new_from_parent( + bank0.clone_without_scheduler(), + &Pubkey::default(), + 1, + )); + confirm_full_slot( + &blockstore, + &bank1, + &replay_tx_thread_pool, + &ProcessOptions::default(), + &recyclers, + &mut ConfirmationProgress::new(bank0.last_blockhash()), + None, + None, + None, + &mut ExecuteTimings::default(), + ) + .unwrap(); + + bank_forks + .write() + .unwrap() + .set_root( + 1, + &solana_runtime::accounts_background_service::AbsRequestSender::default(), + None, + ) + .unwrap(); + + let leader_schedule_cache = LeaderScheduleCache::new_from_bank(&bank1); + + // process_blockstore_from_root() from slot 1 onwards + blockstore_processor::process_blockstore_from_root( + &blockstore, + &bank_forks, + &leader_schedule_cache, + &ProcessOptions::default(), + None, + None, + None, + &AbsRequestSender::default(), + ) + .unwrap(); + + assert_eq!(bank_forks.read().unwrap().root(), 4); + + // Verify that fork choice can be initialized and that the root is not marked duplicate + let (_progress, fork_choice) = + ReplayStage::initialize_progress_and_fork_choice_with_locked_bank_forks( + &bank_forks, + &Pubkey::new_unique(), + &Pubkey::new_unique(), + &blockstore, + ); + + let bank_forks = bank_forks.read().unwrap(); + // 4 (the artificial root) is the tree root and no longer duplicate + assert_eq!(fork_choice.tree_root().0, 4); + assert!(fork_choice + .is_candidate(&(4, bank_forks.bank_hash(4).unwrap())) + .unwrap()); + + // 5 is still considered duplicate, so it is not a valid fork choice candidate + assert!(!fork_choice + .is_candidate(&(5, bank_forks.bank_hash(5).unwrap())) + .unwrap()); + } } diff --git a/ledger/Cargo.toml b/ledger/Cargo.toml index 87ba0c39235a12..c849bba1630647 100644 --- a/ledger/Cargo.toml +++ b/ledger/Cargo.toml @@ -29,6 +29,7 @@ mockall = { workspace = true } num_cpus = { workspace = true } num_enum = { workspace = true } prost = { workspace = true } +qualifier_attr = { workspace = true } rand = { workspace = true } rand_chacha = { workspace = true } rayon = { workspace = true } diff --git a/ledger/src/blockstore_processor.rs b/ledger/src/blockstore_processor.rs index 4e093814b9bb3a..079b7017feeecb 100644 --- a/ledger/src/blockstore_processor.rs +++ b/ledger/src/blockstore_processor.rs @@ -1,3 +1,5 @@ +#[cfg(feature = "dev-context-only-utils")] +use qualifier_attr::qualifiers; use { crate::{ block_error::BlockError, @@ -974,6 +976,11 @@ fn verify_ticks( Ok(()) } +<<<<<<< HEAD +======= +#[allow(clippy::too_many_arguments)] +#[cfg_attr(feature = "dev-context-only-utils", qualifiers(pub))] +>>>>>>> 2a48564b59 (blockstore: only consume duplicate proofs from root_slot + 1 on startup (#1971)) fn confirm_full_slot( blockstore: &Blockstore, bank: &BankWithScheduler, @@ -1378,6 +1385,7 @@ fn confirm_slot_entries( } // Special handling required for processing the entries in slot 0 +#[cfg_attr(feature = "dev-context-only-utils", qualifiers(pub))] fn process_bank_0( bank0: &BankWithScheduler, blockstore: &Blockstore, diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 5d61ac882b55ae..64401b7e6cf69c 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -5323,6 +5323,7 @@ dependencies = [ "num_cpus", "num_enum 0.7.2", "prost", + "qualifier_attr", "rand 0.8.5", "rand_chacha 0.3.1", "rayon",