From 5981bd52c81895f5d92ea0ac76bb64c59c7f1f8d Mon Sep 17 00:00:00 2001 From: Tiram <18632023+tiram88@users.noreply.github.com> Date: Sun, 16 Jul 2023 13:26:28 +0300 Subject: [PATCH 01/86] Split mempool atomic validate and insert transaction in 3 steps --- mining/src/manager.rs | 50 +++++++++++-- mining/src/manager_tests.rs | 71 ++++++++++--------- .../populate_entries_and_try_validate.rs | 11 ++- .../validate_and_insert_transaction.rs | 36 +++++----- 4 files changed, 109 insertions(+), 59 deletions(-) diff --git a/mining/src/manager.rs b/mining/src/manager.rs index 4832bef4d0..cf383b62a8 100644 --- a/mining/src/manager.rs +++ b/mining/src/manager.rs @@ -8,6 +8,7 @@ use crate::{ errors::MiningManagerResult, mempool::{ config::Config, + populate_entries_and_try_validate::validate_mempool_transaction_and_populate, tx::{Orphan, Priority}, Mempool, }, @@ -20,11 +21,12 @@ use kaspa_consensus_core::{ api::ConsensusApi, block::BlockTemplate, coinbase::MinerData, - errors::block::RuleError, + errors::block::RuleError as BlockRuleError, tx::{MutableTransaction, Transaction, TransactionId, TransactionOutput}, }; use kaspa_consensusmanager::{spawn_blocking, ConsensusProxy}; use kaspa_core::error; +use kaspa_mining_errors::mempool::RuleResult; use parking_lot::{Mutex, RwLock}; pub struct MiningManager { @@ -81,7 +83,7 @@ impl MiningManager { let block_template = cache_lock.set_immutable_cached_template(block_template); return Ok(block_template.as_ref().clone()); } - Err(BuilderError::ConsensusError(RuleError::InvalidTransactionsInNewBlock(invalid_transactions))) => { + Err(BuilderError::ConsensusError(BlockRuleError::InvalidTransactionsInNewBlock(invalid_transactions))) => { let mut mempool_write = self.mempool.write(); invalid_transactions.iter().for_each(|(x, _)| { let removal_result = mempool_write.remove_transaction(x, true); @@ -121,6 +123,7 @@ impl MiningManager { /// added to any block. /// /// The returned transactions are clones of objects owned by the mempool. + #[cfg(test)] pub fn validate_and_insert_transaction( &self, consensus: &dyn ConsensusApi, @@ -128,7 +131,7 @@ impl MiningManager { priority: Priority, orphan: Orphan, ) -> MiningManagerResult>> { - Ok(self.mempool.write().validate_and_insert_transaction(consensus, transaction, priority, orphan)?) + self.validate_and_insert_mutable_transaction(consensus, MutableTransaction::from_tx(transaction), priority, orphan) } /// Exposed only for tests. Ordinary users should let the mempool create the mutable tx internally @@ -140,7 +143,12 @@ impl MiningManager { priority: Priority, orphan: Orphan, ) -> MiningManagerResult>> { - Ok(self.mempool.write().validate_and_insert_mutable_transaction(consensus, transaction, priority, orphan)?) + // read lock on mempool + let mut transaction = self.mempool.read().pre_validate_and_populate_transaction(consensus, transaction)?; + // no lock on mempool + let validation_result = validate_mempool_transaction_and_populate(consensus, &mut transaction); + // write lock on mempool + Ok(self.mempool.write().post_validate_and_insert_transaction(consensus, validation_result, transaction, priority, orphan)?) } /// Try to return a mempool transaction by its id. @@ -208,6 +216,25 @@ impl MiningManager { pub fn is_transaction_output_dust(&self, transaction_output: &TransactionOutput) -> bool { self.mempool.read().is_transaction_output_dust(transaction_output) } + + fn pre_validate_and_populate_transaction( + &self, + consensus: &dyn ConsensusApi, + transaction: MutableTransaction, + ) -> MiningManagerResult { + Ok(self.mempool.read().pre_validate_and_populate_transaction(consensus, transaction)?) + } + + fn post_validate_and_insert_transaction( + &self, + consensus: &dyn ConsensusApi, + transaction: MutableTransaction, + validation_result: RuleResult<()>, + priority: Priority, + orphan: Orphan, + ) -> MiningManagerResult>> { + Ok(self.mempool.write().post_validate_and_insert_transaction(consensus, validation_result, transaction, priority, orphan)?) + } } /// Async proxy for the mining manager @@ -242,7 +269,20 @@ impl MiningManagerProxy { priority: Priority, orphan: Orphan, ) -> MiningManagerResult>> { - consensus.clone().spawn_blocking(move |c| self.inner.validate_and_insert_transaction(c, transaction, priority, orphan)).await + let mut transaction = MutableTransaction::from_tx(transaction); + let inner = self.inner.clone(); + // read lock on mempool + transaction = consensus.clone().spawn_blocking(move |c| inner.pre_validate_and_populate_transaction(c, transaction)).await?; + // no lock on mempool + let (result, transaction) = consensus + .clone() + .spawn_blocking(move |c| (validate_mempool_transaction_and_populate(c, &mut transaction), transaction)) + .await; + // write lock on mempool + consensus + .clone() + .spawn_blocking(move |c| self.inner.post_validate_and_insert_transaction(c, transaction, result, priority, orphan)) + .await } pub async fn handle_new_block_transactions( diff --git a/mining/src/manager_tests.rs b/mining/src/manager_tests.rs index 6a1637e2ce..fbefbcb15d 100644 --- a/mining/src/manager_tests.rs +++ b/mining/src/manager_tests.rs @@ -8,7 +8,6 @@ mod tests { config::{Config, DEFAULT_MINIMUM_RELAY_TRANSACTION_FEE}, errors::RuleError, tx::{Orphan, Priority}, - Mempool, }, model::candidate_tx::CandidateTransaction, testutils::consensus_mock::ConsensusMock, @@ -41,10 +40,10 @@ mod tests { fn test_validate_and_insert_transaction() { const TX_COUNT: u32 = 10; let consensus = Arc::new(ConsensusMock::new()); - let mut mempool = Mempool::new(Config::build_default(TARGET_TIME_PER_BLOCK, false, MAX_BLOCK_MASS)); + let mining_manager = MiningManager::new(TARGET_TIME_PER_BLOCK, false, MAX_BLOCK_MASS, None); let transactions_to_insert = (0..TX_COUNT).map(|i| create_transaction_with_utxo_entry(i, 0)).collect::>(); for transaction in transactions_to_insert.iter() { - let result = mempool.validate_and_insert_mutable_transaction( + let result = mining_manager.validate_and_insert_mutable_transaction( consensus.as_ref(), transaction.clone(), Priority::Low, @@ -55,7 +54,7 @@ mod tests { // The UtxoEntry was filled manually for those transactions, so the transactions won't be considered orphans. // Therefore, all the transactions expected to be contained in the mempool. - let (transactions_from_pool, _) = mempool.get_all_transactions(true, false); + let (transactions_from_pool, _) = mining_manager.get_all_transactions(true, false); assert_eq!( transactions_to_insert.len(), transactions_from_pool.len(), @@ -92,14 +91,14 @@ mod tests { // The parent's transaction was inserted into the consensus, so we want to verify that // the child transaction is not considered an orphan and inserted into the mempool. let transaction_not_an_orphan = create_child_and_parent_txs_and_add_parent_to_consensus(&consensus); - let result = mempool.validate_and_insert_transaction( + let result = mining_manager.validate_and_insert_transaction( consensus.as_ref(), transaction_not_an_orphan.clone(), Priority::Low, Orphan::Allowed, ); assert!(result.is_ok(), "inserting the child transaction {} into the mempool failed", transaction_not_an_orphan.id()); - let (transactions_from_pool, _) = mempool.get_all_transactions(true, false); + let (transactions_from_pool, _) = mining_manager.get_all_transactions(true, false); assert!( contained_by_mtxs(transaction_not_an_orphan.id(), &transactions_from_pool), "missing transaction {} in the mempool", @@ -113,7 +112,7 @@ mod tests { #[test] fn test_simulated_error_in_consensus() { let consensus = Arc::new(ConsensusMock::new()); - let mut mempool = Mempool::new(Config::build_default(TARGET_TIME_PER_BLOCK, false, MAX_BLOCK_MASS)); + let mining_manager = MiningManager::new(TARGET_TIME_PER_BLOCK, false, MAX_BLOCK_MASS, None); // Build an invalid transaction with some gas and inform the consensus mock about the result it should return // when the mempool will submit this transaction for validation. @@ -123,17 +122,18 @@ mod tests { consensus.set_status(transaction.id(), status.clone()); // Try validate and insert the transaction into the mempool - let result = into_status( - mempool - .validate_and_insert_transaction(consensus.as_ref(), transaction.tx.as_ref().clone(), Priority::Low, Orphan::Allowed) - .map_err(MiningManagerError::from), - ); + let result = into_status(mining_manager.validate_and_insert_transaction( + consensus.as_ref(), + transaction.tx.as_ref().clone(), + Priority::Low, + Orphan::Allowed, + )); assert_eq!( status, result, "Unexpected result when trying to insert an invalid transaction: expected: {status:?}, got: {result:?}", ); - let pool_tx = mempool.get_transaction(&transaction.id(), true, true); + let pool_tx = mining_manager.get_transaction(&transaction.id(), true, true); assert!(pool_tx.is_none(), "Mempool contains a transaction that should have been rejected"); } @@ -142,24 +142,28 @@ mod tests { #[test] fn test_insert_double_transactions_to_mempool() { let consensus = Arc::new(ConsensusMock::new()); - let mut mempool = Mempool::new(Config::build_default(TARGET_TIME_PER_BLOCK, false, MAX_BLOCK_MASS)); + let mining_manager = MiningManager::new(TARGET_TIME_PER_BLOCK, false, MAX_BLOCK_MASS, None); let transaction = create_transaction_with_utxo_entry(0, 0); // submit the transaction to the mempool - let result = - mempool.validate_and_insert_mutable_transaction(consensus.as_ref(), transaction.clone(), Priority::Low, Orphan::Allowed); + let result = mining_manager.validate_and_insert_mutable_transaction( + consensus.as_ref(), + transaction.clone(), + Priority::Low, + Orphan::Allowed, + ); assert!(result.is_ok(), "mempool should have accepted a valid transaction but did not"); // submit the same transaction again to the mempool - let result = mempool.validate_and_insert_transaction( + let result = mining_manager.validate_and_insert_transaction( consensus.as_ref(), transaction.tx.as_ref().clone(), Priority::Low, Orphan::Allowed, ); assert!(result.is_err(), "mempool should refuse a double submit of the same transaction but accepts it"); - if let Err(RuleError::RejectDuplicate(transaction_id)) = result { + if let Err(MiningManagerError::MempoolError(RuleError::RejectDuplicate(transaction_id))) = result { assert_eq!( transaction.id(), transaction_id, @@ -180,7 +184,7 @@ mod tests { #[test] fn test_double_spend_in_mempool() { let consensus = Arc::new(ConsensusMock::new()); - let mut mempool = Mempool::new(Config::build_default(TARGET_TIME_PER_BLOCK, false, MAX_BLOCK_MASS)); + let mining_manager = MiningManager::new(TARGET_TIME_PER_BLOCK, false, MAX_BLOCK_MASS, None); let transaction = create_child_and_parent_txs_and_add_parent_to_consensus(&consensus); assert!( @@ -189,7 +193,8 @@ mod tests { transaction.id() ); - let result = mempool.validate_and_insert_transaction(consensus.as_ref(), transaction.clone(), Priority::Low, Orphan::Allowed); + let result = + mining_manager.validate_and_insert_transaction(consensus.as_ref(), transaction.clone(), Priority::Low, Orphan::Allowed); assert!(result.is_ok(), "the mempool should accept a valid transaction when it is able to populate its UTXO entries"); let mut double_spending_transaction = transaction.clone(); @@ -200,14 +205,14 @@ mod tests { double_spending_transaction.id(), "two transactions differing by only one output value should have different ids" ); - let result = mempool.validate_and_insert_transaction( + let result = mining_manager.validate_and_insert_transaction( consensus.as_ref(), double_spending_transaction.clone(), Priority::Low, Orphan::Allowed, ); assert!(result.is_err(), "mempool should refuse a double spend transaction but accepts it"); - if let Err(RuleError::RejectDoubleSpendInMempool(_, transaction_id)) = result { + if let Err(MiningManagerError::MempoolError(RuleError::RejectDoubleSpendInMempool(_, transaction_id))) = result { assert_eq!( transaction.id(), transaction_id, @@ -227,12 +232,12 @@ mod tests { #[test] fn test_handle_new_block_transactions() { let consensus = Arc::new(ConsensusMock::new()); - let mut mempool = Mempool::new(Config::build_default(TARGET_TIME_PER_BLOCK, false, MAX_BLOCK_MASS)); + let mining_manager = MiningManager::new(TARGET_TIME_PER_BLOCK, false, MAX_BLOCK_MASS, None); const TX_COUNT: u32 = 10; let transactions_to_insert = (0..TX_COUNT).map(|i| create_transaction_with_utxo_entry(i, 0)).collect::>(); for transaction in transactions_to_insert.iter() { - let result = mempool.validate_and_insert_transaction( + let result = mining_manager.validate_and_insert_transaction( consensus.as_ref(), transaction.tx.as_ref().clone(), Priority::Low, @@ -247,14 +252,14 @@ mod tests { let block_with_first_part = build_block_transactions(first_part.iter().map(|mtx| mtx.tx.as_ref())); let block_with_rest = build_block_transactions(rest.iter().map(|mtx| mtx.tx.as_ref())); - let result = mempool.handle_new_block_transactions(consensus.as_ref(), &block_with_first_part); + let result = mining_manager.handle_new_block_transactions(consensus.as_ref(), &block_with_first_part); assert!( result.is_ok(), "the handling by the mempool of the transactions of a block accepted by the consensus should succeed but returned {result:?}" ); for handled_tx_id in first_part.iter().map(|x| x.id()) { assert!( - mempool.get_transaction(&handled_tx_id, true, true).is_none(), + mining_manager.get_transaction(&handled_tx_id, true, true).is_none(), "the transaction {handled_tx_id} should not be in the mempool" ); } @@ -262,20 +267,20 @@ mod tests { // transactions, will still be included in the mempool. for handled_tx_id in rest.iter().map(|x| x.id()) { assert!( - mempool.get_transaction(&handled_tx_id, true, true).is_some(), + mining_manager.get_transaction(&handled_tx_id, true, true).is_some(), "the transaction {handled_tx_id} is lacking from the mempool" ); } // Handle all the other transactions. - let result = mempool.handle_new_block_transactions(consensus.as_ref(), &block_with_rest); + let result = mining_manager.handle_new_block_transactions(consensus.as_ref(), &block_with_rest); assert!( result.is_ok(), "the handling by the mempool of the transactions of a block accepted by the consensus should succeed but returned {result:?}" ); for handled_tx_id in rest.iter().map(|x| x.id()) { assert!( - mempool.get_transaction(&handled_tx_id, true, true).is_none(), + mining_manager.get_transaction(&handled_tx_id, true, true).is_none(), "the transaction {handled_tx_id} should no longer be in the mempool" ); } @@ -286,10 +291,10 @@ mod tests { // will be removed from the mempool. fn test_double_spend_with_block() { let consensus = Arc::new(ConsensusMock::new()); - let mut mempool = Mempool::new(Config::build_default(TARGET_TIME_PER_BLOCK, false, MAX_BLOCK_MASS)); + let mining_manager = MiningManager::new(TARGET_TIME_PER_BLOCK, false, MAX_BLOCK_MASS, None); let transaction_in_the_mempool = create_transaction_with_utxo_entry(0, 0); - let result = mempool.validate_and_insert_transaction( + let result = mining_manager.validate_and_insert_transaction( consensus.as_ref(), transaction_in_the_mempool.tx.as_ref().clone(), Priority::Low, @@ -302,11 +307,11 @@ mod tests { transaction_in_the_mempool.tx.inputs[0].previous_outpoint; let block_transactions = build_block_transactions(std::iter::once(double_spend_transaction_in_the_block.tx.as_ref())); - let result = mempool.handle_new_block_transactions(consensus.as_ref(), &block_transactions); + let result = mining_manager.handle_new_block_transactions(consensus.as_ref(), &block_transactions); assert!(result.is_ok()); assert!( - mempool.get_transaction(&transaction_in_the_mempool.id(), true, true).is_none(), + mining_manager.get_transaction(&transaction_in_the_mempool.id(), true, true).is_none(), "the transaction {} shouldn't be in the mempool since at least one output was already spent", transaction_in_the_mempool.id() ); diff --git a/mining/src/mempool/populate_entries_and_try_validate.rs b/mining/src/mempool/populate_entries_and_try_validate.rs index 615bbf491f..112a0622bf 100644 --- a/mining/src/mempool/populate_entries_and_try_validate.rs +++ b/mining/src/mempool/populate_entries_and_try_validate.rs @@ -14,11 +14,11 @@ impl Mempool { // missingOutpoints is reduced to a simple ConsensusError::TxMissingOutpoints. self.populate_mempool_entries(transaction); - consensus.validate_mempool_transaction_and_populate(transaction)?; + validate_mempool_transaction_and_populate(consensus, transaction)?; Ok(()) } - fn populate_mempool_entries(&self, transaction: &mut MutableTransaction) { + pub(super) fn populate_mempool_entries(&self, transaction: &mut MutableTransaction) { for (i, input) in transaction.tx.inputs.iter().enumerate() { if let Some(parent) = self.transaction_pool.get(&input.previous_outpoint.transaction_id) { let output = &parent.mtx.tx.outputs[input.previous_outpoint.index as usize]; @@ -28,3 +28,10 @@ impl Mempool { } } } + +pub(crate) fn validate_mempool_transaction_and_populate( + consensus: &dyn ConsensusApi, + transaction: &mut MutableTransaction, +) -> RuleResult<()> { + Ok(consensus.validate_mempool_transaction_and_populate(transaction)?) +} diff --git a/mining/src/mempool/validate_and_insert_transaction.rs b/mining/src/mempool/validate_and_insert_transaction.rs index f4e915de6f..a6115a5cb3 100644 --- a/mining/src/mempool/validate_and_insert_transaction.rs +++ b/mining/src/mempool/validate_and_insert_transaction.rs @@ -16,29 +16,31 @@ use kaspa_utils::vec::VecExtensions; use super::tx::{Orphan, Priority}; impl Mempool { - pub(crate) fn validate_and_insert_transaction( - &mut self, + pub(crate) fn pre_validate_and_populate_transaction( + &self, consensus: &dyn ConsensusApi, - transaction: Transaction, - priority: Priority, - orphan: Orphan, - ) -> RuleResult>> { - self.validate_and_insert_mutable_transaction(consensus, MutableTransaction::from_tx(transaction), priority, orphan) + mut transaction: MutableTransaction, + ) -> RuleResult { + // Populate mass in the beginning, it will be used in multiple places throughout the validation and insertion. + transaction.calculated_mass = Some(consensus.calculate_transaction_mass(&transaction.tx)); + self.validate_transaction_in_isolation(&transaction)?; + self.transaction_pool.check_double_spends(&transaction)?; + self.populate_mempool_entries(&mut transaction); + Ok(transaction) } - pub(crate) fn validate_and_insert_mutable_transaction( + pub(crate) fn post_validate_and_insert_transaction( &mut self, consensus: &dyn ConsensusApi, - mut transaction: MutableTransaction, + validation_result: RuleResult<()>, + transaction: MutableTransaction, priority: Priority, orphan: Orphan, ) -> RuleResult>> { - // Populate mass in the beginning, it will be used in multiple places throughout the validation and insertion. - transaction.calculated_mass = Some(consensus.calculate_transaction_mass(&transaction.tx)); + // Re-check double spends since validate_and_insert_transaction is no longer atomic + self.transaction_pool.check_double_spends(&transaction)?; - self.validate_transaction_pre_utxo_entry(&transaction)?; - - match self.populate_entries_and_try_validate(consensus, &mut transaction) { + match validation_result { Ok(_) => {} Err(RuleError::RejectMissingOutpoint) => { if orphan == Orphan::Forbidden { @@ -67,11 +69,6 @@ impl Mempool { Ok(accepted_transactions) } - fn validate_transaction_pre_utxo_entry(&self, transaction: &MutableTransaction) -> RuleResult<()> { - self.validate_transaction_in_isolation(transaction)?; - self.transaction_pool.check_double_spends(transaction) - } - fn validate_transaction_in_isolation(&self, transaction: &MutableTransaction) -> RuleResult<()> { let transaction_id = transaction.id(); if self.transaction_pool.has(&transaction_id) { @@ -175,6 +172,7 @@ impl Mempool { assert_eq!(transactions.len(), 1, "the list returned by remove_orphan is expected to contain exactly one transaction"); let mut transaction = transactions.pop().unwrap(); + self.transaction_pool.check_double_spends(&transaction.mtx)?; consensus.validate_mempool_transaction_and_populate(&mut transaction.mtx)?; self.validate_transaction_in_context(&transaction.mtx)?; transaction.added_at_daa_score = consensus.get_virtual_daa_score(); From 1b18e1114820f81a2cb4c98a18689d4bc3eb52e1 Mon Sep 17 00:00:00 2001 From: Tiram <18632023+tiram88@users.noreply.github.com> Date: Mon, 17 Jul 2023 21:03:18 +0300 Subject: [PATCH 02/86] Process tx relay flow received txs in batch --- consensus/core/src/api/mod.rs | 8 +- consensus/src/consensus/mod.rs | 4 + .../pipeline/virtual_processor/processor.rs | 45 ++++++++++- mining/src/manager.rs | 76 ++++++++++++++++++- .../populate_entries_and_try_validate.rs | 8 ++ mining/src/model/mod.rs | 1 + mining/src/model/txs_stager.rs | 42 ++++++++++ protocol/flows/src/v5/txrelay/flow.rs | 41 +++++----- 8 files changed, 195 insertions(+), 30 deletions(-) create mode 100644 mining/src/model/txs_stager.rs diff --git a/consensus/core/src/api/mod.rs b/consensus/core/src/api/mod.rs index 738fc0df3f..e1e543946d 100644 --- a/consensus/core/src/api/mod.rs +++ b/consensus/core/src/api/mod.rs @@ -40,11 +40,17 @@ pub trait ConsensusApi: Send + Sync { } /// Populates the mempool transaction with maximally found UTXO entry data and proceeds to full transaction - /// validation if all are found. If validation is successful, also [`transaction.calculated_fee`] is expected to be populated + /// validation if all are found. If validation is successful, also [`transaction.calculated_fee`] is expected to be populated. fn validate_mempool_transaction_and_populate(&self, transaction: &mut MutableTransaction) -> TxResult<()> { unimplemented!() } + /// Populates the mempool transactions with maximally found UTXO entry data and proceeds to full transactions + /// validation if all are found. If validation is successful, also [`transaction.calculated_fee`] is expected to be populated. + fn validate_mempool_transactions_in_parallel(&self, transactions: &mut Vec) -> Vec> { + unimplemented!() + } + fn calculate_transaction_mass(&self, transaction: &Transaction) -> u64 { unimplemented!() } diff --git a/consensus/src/consensus/mod.rs b/consensus/src/consensus/mod.rs index 0e42ce9737..8db56657e3 100644 --- a/consensus/src/consensus/mod.rs +++ b/consensus/src/consensus/mod.rs @@ -363,6 +363,10 @@ impl ConsensusApi for Consensus { Ok(()) } + fn validate_mempool_transactions_in_parallel(&self, transactions: &mut Vec) -> Vec> { + self.virtual_processor.validate_mempool_transactions_in_parallel(transactions) + } + fn calculate_transaction_mass(&self, transaction: &Transaction) -> u64 { self.services.mass_calculator.calc_tx_mass(transaction) } diff --git a/consensus/src/pipeline/virtual_processor/processor.rs b/consensus/src/pipeline/virtual_processor/processor.rs index e36d7a4e99..3aa3bab918 100644 --- a/consensus/src/pipeline/virtual_processor/processor.rs +++ b/consensus/src/pipeline/virtual_processor/processor.rs @@ -79,7 +79,10 @@ use itertools::Itertools; use kaspa_utils::binary_heap::BinaryHeapExtensions; use parking_lot::{RwLock, RwLockUpgradableReadGuard}; use rand::seq::SliceRandom; -use rayon::ThreadPool; +use rayon::{ + prelude::{IntoParallelRefMutIterator, ParallelIterator}, + ThreadPool, +}; use rocksdb::WriteBatch; use std::{ cmp::min, @@ -653,6 +656,21 @@ impl VirtualStateProcessor { (virtual_parents, ghostdag_data) } + fn validate_mempool_transaction_and_populate_impl( + &self, + mutable_tx: &mut MutableTransaction, + virtual_utxo_view: &impl UtxoView, + virtual_daa_score: u64, + virtual_past_median_time: u64, + ) -> TxResult<()> { + self.transaction_validator.validate_tx_in_isolation(&mutable_tx.tx)?; + + self.transaction_validator.utxo_free_tx_validation(&mutable_tx.tx, virtual_daa_score, virtual_past_median_time)?; + self.validate_mempool_transaction_in_utxo_context(mutable_tx, virtual_utxo_view, virtual_daa_score)?; + + Ok(()) + } + pub fn validate_mempool_transaction_and_populate(&self, mutable_tx: &mut MutableTransaction) -> TxResult<()> { self.transaction_validator.validate_tx_in_isolation(&mutable_tx.tx)?; @@ -662,10 +680,29 @@ impl VirtualStateProcessor { let virtual_daa_score = virtual_state.daa_score; let virtual_past_median_time = virtual_state.past_median_time; - self.transaction_validator.utxo_free_tx_validation(&mutable_tx.tx, virtual_daa_score, virtual_past_median_time)?; - self.validate_mempool_transaction_in_utxo_context(mutable_tx, virtual_utxo_view, virtual_daa_score)?; + self.validate_mempool_transaction_and_populate_impl(mutable_tx, virtual_utxo_view, virtual_daa_score, virtual_past_median_time) + } - Ok(()) + pub fn validate_mempool_transactions_in_parallel(&self, mutable_txs: &mut Vec) -> Vec> { + let virtual_read = self.virtual_stores.read(); + let virtual_state = virtual_read.state.get().unwrap(); + let virtual_utxo_view = &virtual_read.utxo_set; + let virtual_daa_score = virtual_state.daa_score; + let virtual_past_median_time = virtual_state.past_median_time; + + self.thread_pool.install(|| { + mutable_txs + .par_iter_mut() + .map(|mtx| { + self.validate_mempool_transaction_and_populate_impl( + mtx, + &virtual_utxo_view, + virtual_daa_score, + virtual_past_median_time, + ) + }) + .collect::>>() + }) } fn validate_block_template_transaction( diff --git a/mining/src/manager.rs b/mining/src/manager.rs index cf383b62a8..8d9cccf5a8 100644 --- a/mining/src/manager.rs +++ b/mining/src/manager.rs @@ -8,13 +8,14 @@ use crate::{ errors::MiningManagerResult, mempool::{ config::Config, - populate_entries_and_try_validate::validate_mempool_transaction_and_populate, + populate_entries_and_try_validate::{validate_mempool_transaction_and_populate, validate_mempool_transactions_in_parallel}, tx::{Orphan, Priority}, Mempool, }, model::{ candidate_tx::CandidateTransaction, owner_txs::{GroupedOwnerTransactions, ScriptPublicKeySet}, + txs_stager::TransactionsStagger, }, }; use kaspa_consensus_core::{ @@ -257,8 +258,7 @@ impl MiningManagerProxy { self.inner.clear_block_template() } - /// validate_and_insert_transaction validates the given transaction, and - /// adds it to the set of known transactions that have not yet been + /// Validates a transaction and adds it to the set of known transactions that have not yet been /// added to any block. /// /// The returned transactions are clones of objects owned by the mempool. @@ -285,6 +285,76 @@ impl MiningManagerProxy { .await } + /// Validates a batch of transactions, handling iteratively only the independent ones, and + /// adds those to the set of known transactions that have not yet been added to any block. + /// + /// Returns transactions that where unorphaned following the insertion of the provided + /// transactions. The returned transactions are clones of objects owned by the mempool. + pub async fn validate_and_insert_transaction_batch( + self, + consensus: &ConsensusProxy, + transactions: Vec, + priority: Priority, + orphan: Orphan, + ) -> MiningManagerResult>> { + let mut batch = TransactionsStagger::new(transactions); + let mut unorphaned_txs: Vec> = vec![]; + while let Some(transactions) = batch.stagger() { + let mut transactions = transactions.into_iter().map(MutableTransaction::from_tx).collect::>(); + + // read lock on mempool + let inner = self.inner.clone(); + transactions = consensus + .clone() + .spawn_blocking(move |c| { + // Here, we simply drop all erroneous transactions since the caller doesn't care about those anyway + transactions.into_iter().filter_map(|tx| inner.clone().pre_validate_and_populate_transaction(c, tx).ok()).collect() + }) + .await; + + // no lock on mempool + let (results, transactions) = consensus + .clone() + .spawn_blocking(move |c| (validate_mempool_transactions_in_parallel(c, &mut transactions), transactions)) + .await; + + // write lock on mempool + // FIXME: should we block on each single transaction or on the the full transaction vector? + let mut txs = vec![]; + for (transaction, result) in transactions.into_iter().zip(results) { + let inner = self.inner.clone(); + txs.extend( + consensus + .clone() + .spawn_blocking(move |c| { + inner.post_validate_and_insert_transaction(c, transaction, result, priority, orphan).unwrap_or_default() + }) + .await, + ); + } + // let mut txs = consensus + // .clone() + // .spawn_blocking(move |c| { + // transactions + // .into_iter() + // .zip(results) + // .flat_map(|(transaction, result)| { + // inner + // .clone() + // .post_validate_and_insert_transaction(c, transaction, result, priority, orphan) + // .unwrap_or_default() + // }) + // .collect() + // }) + // .await; + + // TODO: handle RuleError::RejectInvalid errors when a banning process gets implemented + unorphaned_txs.extend(txs); + } + + Ok(unorphaned_txs) + } + pub async fn handle_new_block_transactions( self, consensus: &ConsensusProxy, diff --git a/mining/src/mempool/populate_entries_and_try_validate.rs b/mining/src/mempool/populate_entries_and_try_validate.rs index 112a0622bf..9eaf65c07b 100644 --- a/mining/src/mempool/populate_entries_and_try_validate.rs +++ b/mining/src/mempool/populate_entries_and_try_validate.rs @@ -1,5 +1,6 @@ use crate::mempool::{errors::RuleResult, model::pool::Pool, Mempool}; use kaspa_consensus_core::{api::ConsensusApi, constants::UNACCEPTED_DAA_SCORE, tx::MutableTransaction, tx::UtxoEntry}; +use kaspa_mining_errors::mempool::RuleError; impl Mempool { pub(crate) fn populate_entries_and_try_validate( @@ -35,3 +36,10 @@ pub(crate) fn validate_mempool_transaction_and_populate( ) -> RuleResult<()> { Ok(consensus.validate_mempool_transaction_and_populate(transaction)?) } + +pub(crate) fn validate_mempool_transactions_in_parallel( + consensus: &dyn ConsensusApi, + transactions: &mut Vec, +) -> Vec> { + consensus.validate_mempool_transactions_in_parallel(transactions).into_iter().map(|x| x.map_err(RuleError::from)).collect() +} diff --git a/mining/src/model/mod.rs b/mining/src/model/mod.rs index c53ad12e12..8f9128e881 100644 --- a/mining/src/model/mod.rs +++ b/mining/src/model/mod.rs @@ -4,6 +4,7 @@ use std::collections::HashSet; pub(crate) mod candidate_tx; pub mod owner_txs; pub mod topological_index; +pub mod txs_stager; /// A set of unique transaction ids pub type TransactionIdSet = HashSet; diff --git a/mining/src/model/txs_stager.rs b/mining/src/model/txs_stager.rs new file mode 100644 index 0000000000..ca3a00d801 --- /dev/null +++ b/mining/src/model/txs_stager.rs @@ -0,0 +1,42 @@ +use super::TransactionIdSet; +use kaspa_consensus_core::tx::{Transaction, TransactionId}; + +pub struct TransactionsStagger { + txs: Vec, + ids: TransactionIdSet, +} + +impl TransactionsStagger { + pub fn new(txs: Vec) -> Self { + let ids = txs.iter().map(|x| x.id()).collect(); + Self { txs, ids } + } + + pub fn is_empty(&self) -> bool { + self.txs.is_empty() + } + + /// Extract and return all independent transactions + pub fn stagger(&mut self) -> Option> { + let mut ready = Vec::with_capacity(self.txs.len()); + let mut dependent = Vec::with_capacity(self.txs.len()); + while let Some(tx) = self.txs.pop() { + if self.is_dependent(&tx) { + dependent.push(tx); + } else { + ready.push(tx); + } + } + self.txs = dependent; + self.ids = self.txs.iter().map(|x| x.id()).collect(); + (!self.is_empty()).then_some(ready) + } + + pub fn has(&self, transaction_id: &TransactionId) -> bool { + self.ids.contains(transaction_id) + } + + pub fn is_dependent(&self, tx: &Transaction) -> bool { + tx.inputs.iter().any(|x| self.has(&x.previous_outpoint.transaction_id)) + } +} diff --git a/protocol/flows/src/v5/txrelay/flow.rs b/protocol/flows/src/v5/txrelay/flow.rs index 1c75e57106..32a99bb369 100644 --- a/protocol/flows/src/v5/txrelay/flow.rs +++ b/protocol/flows/src/v5/txrelay/flow.rs @@ -165,7 +165,7 @@ impl RelayTransactionsFlow { consensus: ConsensusProxy, requests: Vec>, ) -> Result<(), ProtocolError> { - // trace!("Receive {} transaction ids from {}", requests.len(), self.router.identity()); + let mut transactions: Vec = Vec::with_capacity(requests.len()); for request in requests { let response = self.read_response().await?; let transaction_id = response.transaction_id(); @@ -175,29 +175,26 @@ impl RelayTransactionsFlow { request.req, transaction_id ))); } - let Response::Transaction(transaction) = response else { continue; }; - match self - .ctx - .mining_manager() - .clone() - .validate_and_insert_transaction(&consensus, transaction, Priority::Low, Orphan::Allowed) - .await - { - Ok(accepted_transactions) => { - // trace!("Broadcast {} accepted transaction ids", accepted_transactions.len()); - self.ctx.broadcast_transactions(accepted_transactions.iter().map(|x| x.id())).await?; - } - Err(MiningManagerError::MempoolError(err)) => { - if let RuleError::RejectInvalid(_) = err { - // TODO: discuss a banning process - return Err(ProtocolError::MisbehavingPeer(format!("rejected invalid transaction {}", transaction_id))); - } - continue; - } - Err(_) => {} + if let Response::Transaction(transaction) = response { + transactions.push(transaction); + } + } + match self + .ctx + .mining_manager() + .clone() + .validate_and_insert_transaction_batch(&consensus, transactions, Priority::Low, Orphan::Allowed) + .await + { + Ok(accepted_transactions) => { + self.ctx.broadcast_transactions(accepted_transactions.iter().map(|x| x.id())).await?; + } + Err(MiningManagerError::MempoolError(RuleError::RejectInvalid(transaction_id))) => { + // TODO: discuss a banning process + return Err(ProtocolError::MisbehavingPeer(format!("rejected invalid transaction {}", transaction_id))); } + Err(_) => {} } - // trace!("Processed {} transactions from {}", requests.len(), self.router.identity()); Ok(()) } } From 84c1743afc9c5ef2964679c3068119ae868589f6 Mon Sep 17 00:00:00 2001 From: Tiram <18632023+tiram88@users.noreply.github.com> Date: Tue, 18 Jul 2023 18:00:25 +0300 Subject: [PATCH 03/86] Use a single blocking task per MiningManagerProxy fn --- mining/src/manager.rs | 142 +++++++++++++++--------------------------- 1 file changed, 49 insertions(+), 93 deletions(-) diff --git a/mining/src/manager.rs b/mining/src/manager.rs index 8d9cccf5a8..c80121d500 100644 --- a/mining/src/manager.rs +++ b/mining/src/manager.rs @@ -27,7 +27,6 @@ use kaspa_consensus_core::{ }; use kaspa_consensusmanager::{spawn_blocking, ConsensusProxy}; use kaspa_core::error; -use kaspa_mining_errors::mempool::RuleResult; use parking_lot::{Mutex, RwLock}; pub struct MiningManager { @@ -124,7 +123,6 @@ impl MiningManager { /// added to any block. /// /// The returned transactions are clones of objects owned by the mempool. - #[cfg(test)] pub fn validate_and_insert_transaction( &self, consensus: &dyn ConsensusApi, @@ -135,8 +133,7 @@ impl MiningManager { self.validate_and_insert_mutable_transaction(consensus, MutableTransaction::from_tx(transaction), priority, orphan) } - /// Exposed only for tests. Ordinary users should let the mempool create the mutable tx internally - #[cfg(test)] + /// Exposed only for tests. Ordinary users should call `validate_and_insert_transaction` instead pub fn validate_and_insert_mutable_transaction( &self, consensus: &dyn ConsensusApi, @@ -152,6 +149,49 @@ impl MiningManager { Ok(self.mempool.write().post_validate_and_insert_transaction(consensus, validation_result, transaction, priority, orphan)?) } + /// Validates a batch of transactions, handling iteratively only the independent ones, and + /// adds those to the set of known transactions that have not yet been added to any block. + /// + /// Returns transactions that where unorphaned following the insertion of the provided + /// transactions. The returned transactions are clones of objects owned by the mempool. + pub fn validate_and_insert_transaction_batch( + &self, + consensus: &dyn ConsensusApi, + transactions: Vec, + priority: Priority, + orphan: Orphan, + ) -> MiningManagerResult>> { + let mut batch = TransactionsStagger::new(transactions); + let mut unorphaned_txs: Vec> = vec![]; + while let Some(transactions) = batch.stagger() { + let mut transactions = transactions.into_iter().map(MutableTransaction::from_tx).collect::>(); + + // read lock on mempool + let mempool = self.mempool.read(); + // Here, we simply drop all erroneous transactions since the caller doesn't care about those anyway + transactions = + transactions.into_iter().filter_map(|tx| mempool.pre_validate_and_populate_transaction(consensus, tx).ok()).collect(); + + // no lock on mempool + let validation_result = validate_mempool_transactions_in_parallel(consensus, &mut transactions); + + // write lock on mempool + let mut mempool = self.mempool.write(); + let txs = transactions + .into_iter() + .zip(validation_result) + .flat_map(|(transaction, result)| { + mempool.post_validate_and_insert_transaction(consensus, result, transaction, priority, orphan).unwrap_or_default() + }) + .collect::>(); + + // TODO: handle RuleError::RejectInvalid errors when a banning process gets implemented + unorphaned_txs.extend(txs); + } + + Ok(unorphaned_txs) + } + /// Try to return a mempool transaction by its id. /// /// Note: the transaction is an orphan if tx.is_fully_populated() returns false. @@ -217,25 +257,6 @@ impl MiningManager { pub fn is_transaction_output_dust(&self, transaction_output: &TransactionOutput) -> bool { self.mempool.read().is_transaction_output_dust(transaction_output) } - - fn pre_validate_and_populate_transaction( - &self, - consensus: &dyn ConsensusApi, - transaction: MutableTransaction, - ) -> MiningManagerResult { - Ok(self.mempool.read().pre_validate_and_populate_transaction(consensus, transaction)?) - } - - fn post_validate_and_insert_transaction( - &self, - consensus: &dyn ConsensusApi, - transaction: MutableTransaction, - validation_result: RuleResult<()>, - priority: Priority, - orphan: Orphan, - ) -> MiningManagerResult>> { - Ok(self.mempool.write().post_validate_and_insert_transaction(consensus, validation_result, transaction, priority, orphan)?) - } } /// Async proxy for the mining manager @@ -269,20 +290,7 @@ impl MiningManagerProxy { priority: Priority, orphan: Orphan, ) -> MiningManagerResult>> { - let mut transaction = MutableTransaction::from_tx(transaction); - let inner = self.inner.clone(); - // read lock on mempool - transaction = consensus.clone().spawn_blocking(move |c| inner.pre_validate_and_populate_transaction(c, transaction)).await?; - // no lock on mempool - let (result, transaction) = consensus - .clone() - .spawn_blocking(move |c| (validate_mempool_transaction_and_populate(c, &mut transaction), transaction)) - .await; - // write lock on mempool - consensus - .clone() - .spawn_blocking(move |c| self.inner.post_validate_and_insert_transaction(c, transaction, result, priority, orphan)) - .await + consensus.clone().spawn_blocking(move |c| self.inner.validate_and_insert_transaction(c, transaction, priority, orphan)).await } /// Validates a batch of transactions, handling iteratively only the independent ones, and @@ -297,62 +305,10 @@ impl MiningManagerProxy { priority: Priority, orphan: Orphan, ) -> MiningManagerResult>> { - let mut batch = TransactionsStagger::new(transactions); - let mut unorphaned_txs: Vec> = vec![]; - while let Some(transactions) = batch.stagger() { - let mut transactions = transactions.into_iter().map(MutableTransaction::from_tx).collect::>(); - - // read lock on mempool - let inner = self.inner.clone(); - transactions = consensus - .clone() - .spawn_blocking(move |c| { - // Here, we simply drop all erroneous transactions since the caller doesn't care about those anyway - transactions.into_iter().filter_map(|tx| inner.clone().pre_validate_and_populate_transaction(c, tx).ok()).collect() - }) - .await; - - // no lock on mempool - let (results, transactions) = consensus - .clone() - .spawn_blocking(move |c| (validate_mempool_transactions_in_parallel(c, &mut transactions), transactions)) - .await; - - // write lock on mempool - // FIXME: should we block on each single transaction or on the the full transaction vector? - let mut txs = vec![]; - for (transaction, result) in transactions.into_iter().zip(results) { - let inner = self.inner.clone(); - txs.extend( - consensus - .clone() - .spawn_blocking(move |c| { - inner.post_validate_and_insert_transaction(c, transaction, result, priority, orphan).unwrap_or_default() - }) - .await, - ); - } - // let mut txs = consensus - // .clone() - // .spawn_blocking(move |c| { - // transactions - // .into_iter() - // .zip(results) - // .flat_map(|(transaction, result)| { - // inner - // .clone() - // .post_validate_and_insert_transaction(c, transaction, result, priority, orphan) - // .unwrap_or_default() - // }) - // .collect() - // }) - // .await; - - // TODO: handle RuleError::RejectInvalid errors when a banning process gets implemented - unorphaned_txs.extend(txs); - } - - Ok(unorphaned_txs) + consensus + .clone() + .spawn_blocking(move |c| self.inner.validate_and_insert_transaction_batch(c, transactions, priority, orphan)) + .await } pub async fn handle_new_block_transactions( From 563baaf8f03ff06c455b2061168b81652ff0542e Mon Sep 17 00:00:00 2001 From: Tiram <18632023+tiram88@users.noreply.github.com> Date: Tue, 18 Jul 2023 20:26:48 +0300 Subject: [PATCH 04/86] Split parallel txs validation in chunks of max block mass --- consensus/core/src/api/mod.rs | 2 +- consensus/src/consensus/mod.rs | 2 +- .../pipeline/virtual_processor/processor.rs | 2 +- mining/src/block_template/builder.rs | 4 +++ mining/src/manager.rs | 25 +++++++++++++++++-- .../populate_entries_and_try_validate.rs | 2 +- 6 files changed, 31 insertions(+), 6 deletions(-) diff --git a/consensus/core/src/api/mod.rs b/consensus/core/src/api/mod.rs index e1e543946d..f34016eb9e 100644 --- a/consensus/core/src/api/mod.rs +++ b/consensus/core/src/api/mod.rs @@ -47,7 +47,7 @@ pub trait ConsensusApi: Send + Sync { /// Populates the mempool transactions with maximally found UTXO entry data and proceeds to full transactions /// validation if all are found. If validation is successful, also [`transaction.calculated_fee`] is expected to be populated. - fn validate_mempool_transactions_in_parallel(&self, transactions: &mut Vec) -> Vec> { + fn validate_mempool_transactions_in_parallel(&self, transactions: &mut [MutableTransaction]) -> Vec> { unimplemented!() } diff --git a/consensus/src/consensus/mod.rs b/consensus/src/consensus/mod.rs index 8db56657e3..0f9907a3a0 100644 --- a/consensus/src/consensus/mod.rs +++ b/consensus/src/consensus/mod.rs @@ -363,7 +363,7 @@ impl ConsensusApi for Consensus { Ok(()) } - fn validate_mempool_transactions_in_parallel(&self, transactions: &mut Vec) -> Vec> { + fn validate_mempool_transactions_in_parallel(&self, transactions: &mut [MutableTransaction]) -> Vec> { self.virtual_processor.validate_mempool_transactions_in_parallel(transactions) } diff --git a/consensus/src/pipeline/virtual_processor/processor.rs b/consensus/src/pipeline/virtual_processor/processor.rs index 3aa3bab918..aef1935c42 100644 --- a/consensus/src/pipeline/virtual_processor/processor.rs +++ b/consensus/src/pipeline/virtual_processor/processor.rs @@ -683,7 +683,7 @@ impl VirtualStateProcessor { self.validate_mempool_transaction_and_populate_impl(mutable_tx, virtual_utxo_view, virtual_daa_score, virtual_past_median_time) } - pub fn validate_mempool_transactions_in_parallel(&self, mutable_txs: &mut Vec) -> Vec> { + pub fn validate_mempool_transactions_in_parallel(&self, mutable_txs: &mut [MutableTransaction]) -> Vec> { let virtual_read = self.virtual_stores.read(); let virtual_state = virtual_read.state.get().unwrap(); let virtual_utxo_view = &virtual_read.utxo_set; diff --git a/mining/src/block_template/builder.rs b/mining/src/block_template/builder.rs index feae39d85d..be265d7b7e 100644 --- a/mining/src/block_template/builder.rs +++ b/mining/src/block_template/builder.rs @@ -119,4 +119,8 @@ impl BlockTemplateBuilder { block_template.miner_data = new_miner_data.clone(); Ok(block_template) } + + pub fn max_block_mass(&self) -> u64 { + self.policy.max_block_mass + } } diff --git a/mining/src/manager.rs b/mining/src/manager.rs index c80121d500..be04433d93 100644 --- a/mining/src/manager.rs +++ b/mining/src/manager.rs @@ -173,13 +173,20 @@ impl MiningManager { transactions.into_iter().filter_map(|tx| mempool.pre_validate_and_populate_transaction(consensus, tx).ok()).collect(); // no lock on mempool - let validation_result = validate_mempool_transactions_in_parallel(consensus, &mut transactions); + // We process the transactions by chunks of max block mass to prevent locking the virtual processor for too long. + let mut lower_bound: usize = 0; + let mut validation_results = Vec::with_capacity(transactions.len()); + while let Some(upper_bound) = self.next_transaction_chunk_upper_bound(&transactions, lower_bound) { + validation_results + .extend(validate_mempool_transactions_in_parallel(consensus, &mut transactions[lower_bound..upper_bound])); + lower_bound = upper_bound; + } // write lock on mempool let mut mempool = self.mempool.write(); let txs = transactions .into_iter() - .zip(validation_result) + .zip(validation_results) .flat_map(|(transaction, result)| { mempool.post_validate_and_insert_transaction(consensus, result, transaction, priority, orphan).unwrap_or_default() }) @@ -192,6 +199,20 @@ impl MiningManager { Ok(unorphaned_txs) } + fn next_transaction_chunk_upper_bound(&self, transactions: &[MutableTransaction], lower_bound: usize) -> Option { + if lower_bound >= transactions.len() { + return None; + } + let mut mass = 0; + transactions[lower_bound..] + .iter() + .position(|tx| { + mass += tx.calculated_mass.unwrap(); + mass >= self.block_template_builder.max_block_mass() + }) + .or(Some(transactions.len())) + } + /// Try to return a mempool transaction by its id. /// /// Note: the transaction is an orphan if tx.is_fully_populated() returns false. diff --git a/mining/src/mempool/populate_entries_and_try_validate.rs b/mining/src/mempool/populate_entries_and_try_validate.rs index 9eaf65c07b..637638bdaa 100644 --- a/mining/src/mempool/populate_entries_and_try_validate.rs +++ b/mining/src/mempool/populate_entries_and_try_validate.rs @@ -39,7 +39,7 @@ pub(crate) fn validate_mempool_transaction_and_populate( pub(crate) fn validate_mempool_transactions_in_parallel( consensus: &dyn ConsensusApi, - transactions: &mut Vec, + transactions: &mut [MutableTransaction], ) -> Vec> { consensus.validate_mempool_transactions_in_parallel(transactions).into_iter().map(|x| x.map_err(RuleError::from)).collect() } From 49d74ebdbda979d14764e58278eee40984f5eb0e Mon Sep 17 00:00:00 2001 From: Tiram <18632023+tiram88@users.noreply.github.com> Date: Wed, 26 Jul 2023 17:05:47 +0300 Subject: [PATCH 05/86] Abstract expire_low_priority_transactions into Pool trait --- .../mempool/handle_new_block_transactions.rs | 4 +- mining/src/mempool/model/orphan_pool.rs | 59 +++++++------- mining/src/mempool/model/pool.rs | 12 ++- mining/src/mempool/model/transactions_pool.rs | 78 +++++++++---------- 4 files changed, 79 insertions(+), 74 deletions(-) diff --git a/mining/src/mempool/handle_new_block_transactions.rs b/mining/src/mempool/handle_new_block_transactions.rs index 9fbd7e2d92..7835a097e8 100644 --- a/mining/src/mempool/handle_new_block_transactions.rs +++ b/mining/src/mempool/handle_new_block_transactions.rs @@ -2,6 +2,8 @@ use crate::mempool::{errors::RuleResult, Mempool}; use kaspa_consensus_core::{api::ConsensusApi, tx::Transaction}; use std::{collections::HashSet, sync::Arc}; +use super::model::pool::Pool; + impl Mempool { pub(crate) fn handle_new_block_transactions( &mut self, @@ -17,7 +19,7 @@ impl Mempool { let mut unorphaned_transactions = self.process_orphans_after_accepted_transaction(consensus, transaction)?; accepted_orphans.append(&mut unorphaned_transactions); } - self.orphan_pool.expire_low_priority_transactions(consensus)?; + self.orphan_pool.expire_low_priority_transactions(consensus.get_virtual_daa_score())?; self.transaction_pool.expire_low_priority_transactions(consensus.get_virtual_daa_score())?; Ok(accepted_orphans) } diff --git a/mining/src/mempool/model/orphan_pool.rs b/mining/src/mempool/model/orphan_pool.rs index 327f831064..4b8935c0fb 100644 --- a/mining/src/mempool/model/orphan_pool.rs +++ b/mining/src/mempool/model/orphan_pool.rs @@ -214,36 +214,6 @@ impl OrphanPool { self.get_redeemer_ids_in_pool(transaction_id).iter().map(|x| self.remove_single_orphan(x)).collect() } - pub(crate) fn expire_low_priority_transactions(&mut self, consensus: &dyn ConsensusApi) -> RuleResult<()> { - let virtual_daa_score = consensus.get_virtual_daa_score(); - if virtual_daa_score < self.last_expire_scan + self.config.orphan_expire_scan_interval_daa_score { - return Ok(()); - } - - // Never expire high priority transactions - // Remove all transactions whose addedAtDAAScore is older then TransactionExpireIntervalDAAScore - let expired_low_priority_transactions: Vec = self - .all_orphans - .values() - .filter_map(|x| { - if (x.priority == Priority::Low) - && virtual_daa_score > x.added_at_daa_score + self.config.orphan_expire_interval_daa_score - { - Some(x.id()) - } else { - None - } - }) - .collect(); - - for transaction_id in expired_low_priority_transactions.iter() { - self.remove_orphan(transaction_id, false)?; - } - - self.last_expire_scan = virtual_daa_score; - Ok(()) - } - pub(crate) fn update_orphans_after_transaction_removed( &mut self, removed_transaction: &MempoolTransaction, @@ -290,4 +260,33 @@ impl Pool for OrphanPool { fn chained_mut(&mut self) -> &mut TransactionsEdges { &mut self.chained_orphans } + + fn expire_low_priority_transactions(&mut self, virtual_daa_score: u64) -> RuleResult<()> { + if virtual_daa_score < self.last_expire_scan + self.config.orphan_expire_scan_interval_daa_score { + return Ok(()); + } + + // Never expire high priority transactions + // Remove all transactions whose addedAtDAAScore is older then TransactionExpireIntervalDAAScore + let expired_low_priority_transactions: Vec = self + .all_orphans + .values() + .filter_map(|x| { + if (x.priority == Priority::Low) + && virtual_daa_score > x.added_at_daa_score + self.config.orphan_expire_interval_daa_score + { + Some(x.id()) + } else { + None + } + }) + .collect(); + + for transaction_id in expired_low_priority_transactions.iter() { + self.remove_orphan(transaction_id, false)?; + } + + self.last_expire_scan = virtual_daa_score; + Ok(()) + } } diff --git a/mining/src/mempool/model/pool.rs b/mining/src/mempool/model/pool.rs index a974a2c38d..79cdd308ae 100644 --- a/mining/src/mempool/model/pool.rs +++ b/mining/src/mempool/model/pool.rs @@ -1,8 +1,8 @@ -use std::collections::{hash_set::Iter, HashMap, HashSet}; - -use super::{map::MempoolTransactionCollection, tx::MempoolTransaction}; use crate::{ - mempool::tx::Priority, + mempool::{ + model::{map::MempoolTransactionCollection, tx::MempoolTransaction}, + tx::Priority, + }, model::{ owner_txs::{GroupedOwnerTransactions, ScriptPublicKeySet}, topological_index::TopologicalIndex, @@ -10,6 +10,8 @@ use crate::{ }, }; use kaspa_consensus_core::tx::{MutableTransaction, TransactionId}; +use kaspa_mining_errors::mempool::RuleResult; +use std::collections::{hash_set::Iter, HashMap, HashSet}; pub(crate) type TransactionsEdges = HashMap; @@ -115,6 +117,8 @@ pub(crate) trait Pool { }); }); } + + fn expire_low_priority_transactions(&mut self, virtual_daa_score: u64) -> RuleResult<()>; } pub(crate) struct PoolIndex { diff --git a/mining/src/mempool/model/transactions_pool.rs b/mining/src/mempool/model/transactions_pool.rs index 98de0b2d6b..60c325e7f9 100644 --- a/mining/src/mempool/model/transactions_pool.rs +++ b/mining/src/mempool/model/transactions_pool.rs @@ -149,45 +149,6 @@ impl TransactionsPool { self.all_transactions.remove(transaction_id).ok_or(RuleError::RejectMissingTransaction(*transaction_id)) } - pub(crate) fn expire_low_priority_transactions(&mut self, virtual_daa_score: u64) -> RuleResult<()> { - let now = unix_now(); - if virtual_daa_score < self.last_expire_scan_daa_score + self.config.transaction_expire_scan_interval_daa_score - || now < self.last_expire_scan_time + self.config.transaction_expire_scan_interval_milliseconds - { - return Ok(()); - } - - // Never expire high priority transactions - // Remove all transactions whose added_at_daa_score is older then transaction_expire_interval_daa_score - let expired_low_priority_transactions: Vec = self - .all_transactions - .values() - .filter_map(|x| { - if (x.priority == Priority::Low) - && virtual_daa_score > x.added_at_daa_score + self.config.transaction_expire_interval_daa_score - { - debug!( - "Removing transaction {}, because it expired, virtual DAA score is {} and expire limit is {}", - x.id(), - virtual_daa_score, - x.added_at_daa_score + self.config.transaction_expire_interval_daa_score - ); - Some(x.id()) - } else { - None - } - }) - .collect(); - - for transaction_id in expired_low_priority_transactions.iter() { - self.remove_transaction(transaction_id)?; - } - - self.last_expire_scan_daa_score = virtual_daa_score; - self.last_expire_scan_time = now; - Ok(()) - } - /// Is the mempool transaction identified by `transaction_id` ready for being inserted into a block template? pub(crate) fn is_transaction_ready(&self, transaction_id: &TransactionId) -> bool { if self.all_transactions.contains_key(transaction_id) { @@ -299,4 +260,43 @@ impl Pool for TransactionsPool { fn chained_mut(&mut self) -> &mut TransactionsEdges { &mut self.chained_transactions } + + fn expire_low_priority_transactions(&mut self, virtual_daa_score: u64) -> RuleResult<()> { + let now = unix_now(); + if virtual_daa_score < self.last_expire_scan_daa_score + self.config.transaction_expire_scan_interval_daa_score + || now < self.last_expire_scan_time + self.config.transaction_expire_scan_interval_milliseconds + { + return Ok(()); + } + + // Never expire high priority transactions + // Remove all transactions whose added_at_daa_score is older then transaction_expire_interval_daa_score + let expired_low_priority_transactions: Vec = self + .all_transactions + .values() + .filter_map(|x| { + if (x.priority == Priority::Low) + && virtual_daa_score > x.added_at_daa_score + self.config.transaction_expire_interval_daa_score + { + debug!( + "Removing transaction {}, because it expired, virtual DAA score is {} and expire limit is {}", + x.id(), + virtual_daa_score, + x.added_at_daa_score + self.config.transaction_expire_interval_daa_score + ); + Some(x.id()) + } else { + None + } + }) + .collect(); + + for transaction_id in expired_low_priority_transactions.iter() { + self.remove_transaction(transaction_id)?; + } + + self.last_expire_scan_daa_score = virtual_daa_score; + self.last_expire_scan_time = now; + Ok(()) + } } From 0cb66199110270958bc314f022d41155ef018a84 Mon Sep 17 00:00:00 2001 From: Tiram <18632023+tiram88@users.noreply.github.com> Date: Sat, 26 Aug 2023 23:18:29 +0300 Subject: [PATCH 06/86] Making room in the mempool for a new transaction won't remove chained txs nor parent txs of the new transaction --- mining/src/mempool/model/transactions_pool.rs | 56 ++++++++++++++----- mining/src/mempool/model/tx.rs | 5 ++ .../validate_and_insert_transaction.rs | 2 +- 3 files changed, 47 insertions(+), 16 deletions(-) diff --git a/mining/src/mempool/model/transactions_pool.rs b/mining/src/mempool/model/transactions_pool.rs index 60c325e7f9..907a36d5f7 100644 --- a/mining/src/mempool/model/transactions_pool.rs +++ b/mining/src/mempool/model/transactions_pool.rs @@ -170,35 +170,61 @@ impl TransactionsPool { .collect() } + /// Is the mempool transaction identified by `transaction_id` unchained, thus having no successor? + pub(crate) fn transaction_is_unchained(&self, transaction_id: &TransactionId) -> bool { + if self.all_transactions.contains_key(transaction_id) { + if let Some(chains) = self.chained_transactions.get(transaction_id) { + return chains.is_empty(); + } + return true; + } + false + } /// Returns the exceeding low-priority transactions having the lowest fee rates in order - /// to have room for at least `free_slots` new transactions. + /// to have room for at least `free_slots` new transactions. The returned transactions + /// are guaranteed to be unchained (no successor in mempool) and to not be parent of + /// `transaction`. /// /// An error is returned if the mempool is filled with high priority transactions. - pub(crate) fn limit_transaction_count(&self, free_slots: usize) -> RuleResult> { + pub(crate) fn limit_transaction_count( + &self, + free_slots: usize, + transaction: &MutableTransaction, + ) -> RuleResult> { + assert!(free_slots > 0); // Returns a vector of transactions to be removed that the caller has to remove actually. // The caller is golang validateAndInsertTransaction equivalent. // This behavior differs from golang impl. - let mut transactions_to_remove = Vec::new(); - if self.len() + free_slots > self.config.maximum_transaction_count as usize { + let trim_size = self.len() + free_slots - usize::min(self.len() + free_slots, self.config.maximum_transaction_count as usize); + let mut transactions_to_remove = Vec::with_capacity(trim_size); + if trim_size > 0 { // TODO: consider introducing an index on all_transactions low-priority items instead. // // Sorting this vector here may be sub-optimal compared with maintaining a sorted // index of all_transactions low-priority items if the proportion of low-priority txs // in all_transactions is important. - let mut low_priority_txs = self.all_transactions.values().filter(|x| x.priority == Priority::Low).collect::>(); - - if !low_priority_txs.is_empty() { - low_priority_txs.sort_by(|a, b| a.fee_rate().partial_cmp(&b.fee_rate()).unwrap()); - transactions_to_remove.extend_from_slice( - &low_priority_txs[0..usize::min( - self.len() + free_slots - self.config.maximum_transaction_count as usize, - low_priority_txs.len(), - )], - ); + let low_priority_txs = self + .all_transactions + .values() + .filter(|x| x.priority == Priority::Low && self.transaction_is_unchained(&x.id()) && !x.is_parent_of(transaction)); + + if trim_size == 1 { + // This is the most likely case. Here we just search the minimum, thus avoiding the need to sort altogether. + if let Some(tx) = low_priority_txs.min_by(|a, b| a.fee_rate().partial_cmp(&b.fee_rate()).unwrap()) { + transactions_to_remove.push(tx); + } + } else { + let mut low_priority_txs = low_priority_txs.collect::>(); + if low_priority_txs.len() > trim_size { + low_priority_txs.sort_by(|a, b| a.fee_rate().partial_cmp(&b.fee_rate()).unwrap()); + transactions_to_remove.extend_from_slice(&low_priority_txs[0..usize::min(trim_size, low_priority_txs.len())]); + } else { + transactions_to_remove = low_priority_txs; + } } } - // An error is returned if the mempool is filled with high priority transactions. + // An error is returned if the mempool is filled with high priority and other unremovable transactions. let tx_count = self.len() + free_slots - transactions_to_remove.len(); if tx_count as u64 > self.config.maximum_transaction_count { let err = RuleError::RejectMempoolIsFull(tx_count - free_slots, self.config.maximum_transaction_count); diff --git a/mining/src/mempool/model/tx.rs b/mining/src/mempool/model/tx.rs index 1cc0611745..66e85e3116 100644 --- a/mining/src/mempool/model/tx.rs +++ b/mining/src/mempool/model/tx.rs @@ -21,6 +21,11 @@ impl MempoolTransaction { pub(crate) fn fee_rate(&self) -> f64 { self.mtx.calculated_fee.unwrap() as f64 / self.mtx.calculated_mass.unwrap() as f64 } + + pub(crate) fn is_parent_of(&self, transaction: &MutableTransaction) -> bool { + let parent_id = self.id(); + transaction.tx.inputs.iter().any(|x| x.previous_outpoint.transaction_id == parent_id) + } } impl Ord for MempoolTransaction { diff --git a/mining/src/mempool/validate_and_insert_transaction.rs b/mining/src/mempool/validate_and_insert_transaction.rs index a6115a5cb3..9d2a729ed4 100644 --- a/mining/src/mempool/validate_and_insert_transaction.rs +++ b/mining/src/mempool/validate_and_insert_transaction.rs @@ -57,7 +57,7 @@ impl Mempool { self.validate_transaction_in_context(&transaction)?; // Before adding the transaction, check if there is room in the pool - self.transaction_pool.limit_transaction_count(1)?.iter().try_for_each(|x| self.remove_transaction(x, true))?; + self.transaction_pool.limit_transaction_count(1, &transaction)?.iter().try_for_each(|x| self.remove_transaction(x, true))?; // Here the accepted transaction is cloned in order to prevent having self borrowed immutably for the // transaction reference and mutably for the call to process_orphans_after_accepted_transaction From 6ebde5320ca8afd7425e34c8db7e74e8413ab3f8 Mon Sep 17 00:00:00 2001 From: Tiram <18632023+tiram88@users.noreply.github.com> Date: Tue, 29 Aug 2023 16:48:02 +0300 Subject: [PATCH 07/86] Refine lock granularity on Mempool and Consensus while processing unorphaned transactions (wip) --- mining/src/manager.rs | 116 ++++++++++++++++-- .../mempool/handle_new_block_transactions.rs | 21 ++-- mining/src/mempool/mod.rs | 2 +- .../validate_and_insert_transaction.rs | 65 +++------- 4 files changed, 132 insertions(+), 72 deletions(-) diff --git a/mining/src/manager.rs b/mining/src/manager.rs index be04433d93..4468df56c2 100644 --- a/mining/src/manager.rs +++ b/mining/src/manager.rs @@ -8,6 +8,7 @@ use crate::{ errors::MiningManagerResult, mempool::{ config::Config, + model::tx::MempoolTransaction, populate_entries_and_try_validate::{validate_mempool_transaction_and_populate, validate_mempool_transactions_in_parallel}, tx::{Orphan, Priority}, Mempool, @@ -26,7 +27,7 @@ use kaspa_consensus_core::{ tx::{MutableTransaction, Transaction, TransactionId, TransactionOutput}, }; use kaspa_consensusmanager::{spawn_blocking, ConsensusProxy}; -use kaspa_core::error; +use kaspa_core::{error, info}; use parking_lot::{Mutex, RwLock}; pub struct MiningManager { @@ -146,7 +147,78 @@ impl MiningManager { // no lock on mempool let validation_result = validate_mempool_transaction_and_populate(consensus, &mut transaction); // write lock on mempool - Ok(self.mempool.write().post_validate_and_insert_transaction(consensus, validation_result, transaction, priority, orphan)?) + let mut mempool = self.mempool.write(); + if let Some(accepted_transaction) = + mempool.post_validate_and_insert_transaction(consensus, validation_result, transaction, priority, orphan)? + { + let unorphaned_transactions = mempool.get_unorphaned_transactions_after_accepted_transaction(&accepted_transaction); + drop(mempool); + + // The capacity used here may be exceeded since accepted unorphaned transaction may themselves unorphan other transactions. + let mut accepted_transactions = Vec::with_capacity(unorphaned_transactions.len() + 1); + // We include the original accepted transaction as well + accepted_transactions.push(accepted_transaction); + accepted_transactions.extend(self.validate_and_insert_unorphaned_transactions(consensus, unorphaned_transactions)); + + Ok(accepted_transactions) + } else { + Ok(vec![]) + } + } + + fn validate_and_insert_unorphaned_transactions( + &self, + consensus: &dyn ConsensusApi, + mut incoming_transactions: Vec, + ) -> Vec> { + // The capacity used here may be exceeded (see next comment). + let mut accepted_transactions = Vec::with_capacity(incoming_transactions.len()); + // We loop as long as incoming unorphaned transactions do unorphan other transactions when they + // get validated and inserted into the mempool + while !incoming_transactions.is_empty() { + let (mut transactions, priorities): (Vec, Vec) = + incoming_transactions.into_iter().map(|x| (x.mtx, x.priority)).unzip(); + + // no lock on mempool + // We process the transactions by chunks of max block mass to prevent locking the virtual processor for too long. + let mut lower_bound: usize = 0; + let mut validation_results = Vec::with_capacity(transactions.len()); + while let Some(upper_bound) = self.next_transaction_chunk_upper_bound(&transactions, lower_bound) { + validation_results + .extend(validate_mempool_transactions_in_parallel(consensus, &mut transactions[lower_bound..upper_bound])); + lower_bound = upper_bound; + } + + // write lock on mempool + let mut mempool = self.mempool.write(); + incoming_transactions = transactions + .into_iter() + .zip(priorities) + .zip(validation_results) + .flat_map(|((transaction, priority), validation_result)| { + let orphan_id = transaction.id(); + match mempool.post_validate_and_insert_transaction( + consensus, + validation_result, + transaction, + priority, + Orphan::Forbidden, + ) { + Ok(Some(accepted_transaction)) => { + accepted_transactions.push(accepted_transaction.clone()); + mempool.get_unorphaned_transactions_after_accepted_transaction(&accepted_transaction) + } + Ok(None) => vec![], + Err(err) => { + info!("Failed to unorphan transaction {0} due to rule error: {1}", orphan_id, err.to_string()); + vec![] + } + } + }) + .collect::>(); + drop(mempool); + } + accepted_transactions } /// Validates a batch of transactions, handling iteratively only the independent ones, and @@ -161,16 +233,18 @@ impl MiningManager { priority: Priority, orphan: Orphan, ) -> MiningManagerResult>> { + // The capacity used here may be exceeded since accepted transactions may unorphan other transactions. + let mut accepted_transactions: Vec> = Vec::with_capacity(transactions.len()); let mut batch = TransactionsStagger::new(transactions); - let mut unorphaned_txs: Vec> = vec![]; while let Some(transactions) = batch.stagger() { let mut transactions = transactions.into_iter().map(MutableTransaction::from_tx).collect::>(); // read lock on mempool - let mempool = self.mempool.read(); // Here, we simply drop all erroneous transactions since the caller doesn't care about those anyway - transactions = - transactions.into_iter().filter_map(|tx| mempool.pre_validate_and_populate_transaction(consensus, tx).ok()).collect(); + transactions = transactions + .into_iter() + .filter_map(|tx| self.mempool.read().pre_validate_and_populate_transaction(consensus, tx).ok()) + .collect(); // no lock on mempool // We process the transactions by chunks of max block mass to prevent locking the virtual processor for too long. @@ -184,19 +258,27 @@ impl MiningManager { // write lock on mempool let mut mempool = self.mempool.write(); - let txs = transactions + let unorphaned_transactions = transactions .into_iter() .zip(validation_results) - .flat_map(|(transaction, result)| { - mempool.post_validate_and_insert_transaction(consensus, result, transaction, priority, orphan).unwrap_or_default() + .flat_map(|(transaction, validation_result)| { + if let Ok(Some(accepted_transaction)) = + mempool.post_validate_and_insert_transaction(consensus, validation_result, transaction, priority, orphan) + { + accepted_transactions.push(accepted_transaction.clone()); + mempool.get_unorphaned_transactions_after_accepted_transaction(&accepted_transaction) + } else { + vec![] + } }) .collect::>(); + drop(mempool); // TODO: handle RuleError::RejectInvalid errors when a banning process gets implemented - unorphaned_txs.extend(txs); + accepted_transactions.extend(self.validate_and_insert_unorphaned_transactions(consensus, unorphaned_transactions)); } - Ok(unorphaned_txs) + Ok(accepted_transactions) } fn next_transaction_chunk_upper_bound(&self, transactions: &[MutableTransaction], lower_bound: usize) -> Option { @@ -261,7 +343,17 @@ impl MiningManager { block_transactions: &[Transaction], ) -> MiningManagerResult>> { // TODO: should use tx acceptance data to verify that new block txs are actually accepted into virtual state. - Ok(self.mempool.write().handle_new_block_transactions(consensus, block_transactions)?) + + // write lock on mempool + let unorphaned_transactions = self.mempool.write().handle_new_block_transactions(block_transactions)?; + + // alternate no & write lock on mempool + let accepted_transactions = self.validate_and_insert_unorphaned_transactions(consensus, unorphaned_transactions); + + // write lock on mempool + self.mempool.write().expire_low_priority_transactions(consensus)?; + + Ok(accepted_transactions) } pub fn revalidate_high_priority_transactions(&self, consensus: &dyn ConsensusApi) -> MiningManagerResult> { diff --git a/mining/src/mempool/handle_new_block_transactions.rs b/mining/src/mempool/handle_new_block_transactions.rs index 7835a097e8..612aa6820e 100644 --- a/mining/src/mempool/handle_new_block_transactions.rs +++ b/mining/src/mempool/handle_new_block_transactions.rs @@ -1,27 +1,26 @@ use crate::mempool::{errors::RuleResult, Mempool}; use kaspa_consensus_core::{api::ConsensusApi, tx::Transaction}; -use std::{collections::HashSet, sync::Arc}; +use std::collections::HashSet; -use super::model::pool::Pool; +use super::model::{pool::Pool, tx::MempoolTransaction}; impl Mempool { - pub(crate) fn handle_new_block_transactions( - &mut self, - consensus: &dyn ConsensusApi, - block_transactions: &[Transaction], - ) -> RuleResult>> { - let mut accepted_orphans = vec![]; + pub(crate) fn handle_new_block_transactions(&mut self, block_transactions: &[Transaction]) -> RuleResult> { + let mut unorphaned_transactions = vec![]; for transaction in block_transactions[1..].iter() { let transaction_id = transaction.id(); self.remove_transaction(&transaction_id, false)?; self.remove_double_spends(transaction)?; self.orphan_pool.remove_orphan(&transaction_id, false)?; - let mut unorphaned_transactions = self.process_orphans_after_accepted_transaction(consensus, transaction)?; - accepted_orphans.append(&mut unorphaned_transactions); + unorphaned_transactions.append(&mut self.get_unorphaned_transactions_after_accepted_transaction(transaction)); } + Ok(unorphaned_transactions) + } + + pub(crate) fn expire_low_priority_transactions(&mut self, consensus: &dyn ConsensusApi) -> RuleResult<()> { self.orphan_pool.expire_low_priority_transactions(consensus.get_virtual_daa_score())?; self.transaction_pool.expire_low_priority_transactions(consensus.get_virtual_daa_score())?; - Ok(accepted_orphans) + Ok(()) } fn remove_double_spends(&mut self, transaction: &Transaction) -> RuleResult<()> { diff --git a/mining/src/mempool/mod.rs b/mining/src/mempool/mod.rs index cfe8b44d1c..f438bc27ea 100644 --- a/mining/src/mempool/mod.rs +++ b/mining/src/mempool/mod.rs @@ -14,7 +14,7 @@ pub(crate) mod check_transaction_standard; pub mod config; pub mod errors; pub(crate) mod handle_new_block_transactions; -mod model; +pub(crate) mod model; pub(crate) mod populate_entries_and_try_validate; pub(crate) mod remove_transaction; pub(crate) mod revalidate_high_priority_transactions; diff --git a/mining/src/mempool/validate_and_insert_transaction.rs b/mining/src/mempool/validate_and_insert_transaction.rs index 9d2a729ed4..70bf16ffc9 100644 --- a/mining/src/mempool/validate_and_insert_transaction.rs +++ b/mining/src/mempool/validate_and_insert_transaction.rs @@ -11,7 +11,6 @@ use kaspa_consensus_core::{ tx::{MutableTransaction, Transaction, TransactionId, TransactionOutpoint, UtxoEntry}, }; use kaspa_core::info; -use kaspa_utils::vec::VecExtensions; use super::tx::{Orphan, Priority}; @@ -36,7 +35,7 @@ impl Mempool { transaction: MutableTransaction, priority: Priority, orphan: Orphan, - ) -> RuleResult>> { + ) -> RuleResult>> { // Re-check double spends since validate_and_insert_transaction is no longer atomic self.transaction_pool.check_double_spends(&transaction)?; @@ -47,7 +46,7 @@ impl Mempool { return Err(RuleError::RejectDisallowedOrphan(transaction.id())); } self.orphan_pool.try_add_orphan(consensus, transaction, priority)?; - return Ok(vec![]); + return Ok(None); } Err(err) => { return Err(err); @@ -63,10 +62,7 @@ impl Mempool { // transaction reference and mutably for the call to process_orphans_after_accepted_transaction let accepted_transaction = self.transaction_pool.add_transaction(transaction, consensus.get_virtual_daa_score(), priority)?.mtx.tx.clone(); - let mut accepted_transactions = self.process_orphans_after_accepted_transaction(consensus, &accepted_transaction)?; - // We include the original accepted transaction as well - accepted_transactions.swap_insert(0, accepted_transaction); - Ok(accepted_transactions) + Ok(Some(accepted_transaction)) } fn validate_transaction_in_isolation(&self, transaction: &MutableTransaction) -> RuleResult<()> { @@ -87,36 +83,13 @@ impl Mempool { Ok(()) } - /// Finds all transactions that can be unorphaned after a some transaction - /// has been accepted. Unorphan and add those to the transaction pool. - /// - /// Returns the list of all successfully processed transactions. - pub(crate) fn process_orphans_after_accepted_transaction( - &mut self, - consensus: &dyn ConsensusApi, - accepted_transaction: &Transaction, - ) -> RuleResult>> { - // Rust rewrite: - // - The function is relocated from OrphanPool into Mempool - let unorphaned_transactions = self.get_unorphaned_transactions_after_accepted_transaction(consensus, accepted_transaction)?; - let mut added_transactions = Vec::with_capacity(unorphaned_transactions.len() + 1); // +1 since some callers add the accepted tx itself - for transaction in unorphaned_transactions { - // The returned transactions are leaving the mempool but must also be added to - // the transaction pool so we clone. - added_transactions.push(transaction.mtx.tx.clone()); - self.transaction_pool.add_mempool_transaction(transaction)?; - } - Ok(added_transactions) - } - /// Returns a list with all successfully unorphaned transactions after some /// transaction has been accepted. - fn get_unorphaned_transactions_after_accepted_transaction( + pub(crate) fn get_unorphaned_transactions_after_accepted_transaction( &mut self, - consensus: &dyn ConsensusApi, transaction: &Transaction, - ) -> RuleResult> { - let mut accepted_orphans = Vec::new(); + ) -> Vec { + let mut unorphaned_transactions = Vec::new(); let transaction_id = transaction.id(); let mut outpoint = TransactionOutpoint::new(transaction_id, 0); for (i, output) in transaction.outputs.iter().enumerate() { @@ -139,9 +112,9 @@ impl Mempool { continue; } if let Some(orphan_id) = orphan_id { - match self.unorphan_transaction(consensus, &orphan_id) { - Ok(accepted_tx) => { - accepted_orphans.push(accepted_tx); + match self.unorphan_transaction(&orphan_id) { + Ok(unorphaned_tx) => { + unorphaned_transactions.push(unorphaned_tx); } Err(err) => { // In case of validation error, we log the problem and drop the @@ -151,31 +124,27 @@ impl Mempool { } } } - Ok(accepted_orphans) + + unorphaned_transactions } - fn unorphan_transaction( - &mut self, - consensus: &dyn ConsensusApi, - transaction_id: &TransactionId, - ) -> RuleResult { + fn unorphan_transaction(&mut self, transaction_id: &TransactionId) -> RuleResult { // Rust rewrite: // - Instead of adding the validated transaction to mempool transaction pool, // we return it. - // - The function is relocated from OrphanPool into Mempool + // - The function is relocated from OrphanPool into Mempool. + // - The function no longer validates the transaction in mempool (signatures) nor in context. + // This job is delegated to a fn called later in the process (Manager::validate_and_insert_unorphaned_transactions). // Remove the transaction identified by transaction_id from the orphan pool. let mut transactions = self.orphan_pool.remove_orphan(transaction_id, false)?; - // At this point, `transactions` contain exactly one transaction. + // At this point, `transactions` contains exactly one transaction. // The one we just removed from the orphan pool. assert_eq!(transactions.len(), 1, "the list returned by remove_orphan is expected to contain exactly one transaction"); - let mut transaction = transactions.pop().unwrap(); + let transaction = transactions.pop().unwrap(); self.transaction_pool.check_double_spends(&transaction.mtx)?; - consensus.validate_mempool_transaction_and_populate(&mut transaction.mtx)?; - self.validate_transaction_in_context(&transaction.mtx)?; - transaction.added_at_daa_score = consensus.get_virtual_daa_score(); Ok(transaction) } } From ff59ac9bdfc1b86729b84d872348b3d8e76bcc82 Mon Sep 17 00:00:00 2001 From: Tiram <18632023+tiram88@users.noreply.github.com> Date: Tue, 29 Aug 2023 23:00:29 +0300 Subject: [PATCH 08/86] Fix failing test --- mining/src/testutils/consensus_mock.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/mining/src/testutils/consensus_mock.rs b/mining/src/testutils/consensus_mock.rs index d9af3bdc29..18b5fb49b5 100644 --- a/mining/src/testutils/consensus_mock.rs +++ b/mining/src/testutils/consensus_mock.rs @@ -129,6 +129,10 @@ impl ConsensusApi for ConsensusMock { Ok(()) } + fn validate_mempool_transactions_in_parallel(&self, transactions: &mut [MutableTransaction]) -> Vec> { + transactions.iter_mut().map(|x| self.validate_mempool_transaction_and_populate(x)).collect() + } + fn calculate_transaction_mass(&self, transaction: &Transaction) -> u64 { if transaction.is_coinbase() { 0 From 974a9809b4c9c85fe9310482abd20885cd662221 Mon Sep 17 00:00:00 2001 From: Tiram <18632023+tiram88@users.noreply.github.com> Date: Wed, 30 Aug 2023 15:31:58 +0300 Subject: [PATCH 09/86] Enhance performance & refine lock granularity on Mempool and Consensus while revalidating high priority transactions --- consensus/core/src/tx.rs | 6 + mining/src/manager.rs | 103 +++++++++++++++++- mining/src/mempool/mod.rs | 17 ++- mining/src/mempool/model/pool.rs | 1 + mining/src/mempool/model/transactions_pool.rs | 4 + .../populate_entries_and_try_validate.rs | 18 +-- .../revalidate_high_priority_transactions.rs | 61 ----------- mining/src/model/txs_stager.rs | 25 +++-- 8 files changed, 138 insertions(+), 97 deletions(-) delete mode 100644 mining/src/mempool/revalidate_high_priority_transactions.rs diff --git a/consensus/core/src/tx.rs b/consensus/core/src/tx.rs index f9e1fef7c2..4d3ae47518 100644 --- a/consensus/core/src/tx.rs +++ b/consensus/core/src/tx.rs @@ -340,6 +340,12 @@ impl> MutableTransaction { } } +impl> AsRef for MutableTransaction { + fn as_ref(&self) -> &Transaction { + self.tx.as_ref() + } +} + /// Private struct used to wrap a [`MutableTransaction`] as a [`VerifiableTransaction`] struct MutableTransactionVerifiableWrapper<'a, T: AsRef> { inner: &'a MutableTransaction, diff --git a/mining/src/manager.rs b/mining/src/manager.rs index 4468df56c2..5c42a46fc8 100644 --- a/mining/src/manager.rs +++ b/mining/src/manager.rs @@ -27,7 +27,8 @@ use kaspa_consensus_core::{ tx::{MutableTransaction, Transaction, TransactionId, TransactionOutput}, }; use kaspa_consensusmanager::{spawn_blocking, ConsensusProxy}; -use kaspa_core::{error, info}; +use kaspa_core::{debug, error, info, warn}; +use kaspa_mining_errors::mempool::RuleError; use parking_lot::{Mutex, RwLock}; pub struct MiningManager { @@ -188,6 +189,7 @@ impl MiningManager { .extend(validate_mempool_transactions_in_parallel(consensus, &mut transactions[lower_bound..upper_bound])); lower_bound = upper_bound; } + assert_eq!(transactions.len(), validation_results.len(), "every transaction should have a matching validation result"); // write lock on mempool let mut mempool = self.mempool.write(); @@ -237,14 +239,20 @@ impl MiningManager { let mut accepted_transactions: Vec> = Vec::with_capacity(transactions.len()); let mut batch = TransactionsStagger::new(transactions); while let Some(transactions) = batch.stagger() { + if transactions.is_empty() { + panic!( + "The mempool got a batch of transactions for validation with cyclic dependencies: {:?}", + transactions.iter().map(|x| x.id()).collect::>() + ); + } let mut transactions = transactions.into_iter().map(MutableTransaction::from_tx).collect::>(); // read lock on mempool // Here, we simply drop all erroneous transactions since the caller doesn't care about those anyway - transactions = transactions - .into_iter() - .filter_map(|tx| self.mempool.read().pre_validate_and_populate_transaction(consensus, tx).ok()) - .collect(); + let mempool = self.mempool.read(); + transactions = + transactions.into_iter().filter_map(|tx| mempool.pre_validate_and_populate_transaction(consensus, tx).ok()).collect(); + drop(mempool); // no lock on mempool // We process the transactions by chunks of max block mass to prevent locking the virtual processor for too long. @@ -255,6 +263,7 @@ impl MiningManager { .extend(validate_mempool_transactions_in_parallel(consensus, &mut transactions[lower_bound..upper_bound])); lower_bound = upper_bound; } + assert_eq!(transactions.len(), validation_results.len(), "every transaction should have a matching validation result"); // write lock on mempool let mut mempool = self.mempool.write(); @@ -357,7 +366,89 @@ impl MiningManager { } pub fn revalidate_high_priority_transactions(&self, consensus: &dyn ConsensusApi) -> MiningManagerResult> { - Ok(self.mempool.write().revalidate_high_priority_transactions(consensus)?) + // read lock on mempool + let transactions = self.mempool.read().all_transactions_with_priority(Priority::High); + + let mut valid_ids = Vec::with_capacity(transactions.len()); + + // We process the transactions by level of dependency inside the batch. + // Doing so allows to remove all chained dependencies of rejected transactions before actually trying + // to revalidate those, saving potentially a lot of computing resources. + let mut batch = TransactionsStagger::new(transactions); + while let Some(transactions) = batch.stagger() { + if transactions.is_empty() { + panic!( + "The mempool high priorities transactions have cyclic dependencies: {:?}", + transactions.iter().map(|x| x.id()).collect::>() + ); + } + + // read lock on mempool + // As the revalidation process is no longer atomic, we filter the transactions ready for revalidation, + // keeping only the ones actually present in the mempool (see comment above). + let mempool = self.mempool.read(); + let mut transactions = transactions + .into_iter() + .filter_map(|mut x| { + if mempool.has_transaction(&x.id(), true, false) { + mempool.populate_mempool_entries(&mut x); + Some(x) + } else { + None + } + }) + .collect::>(); + drop(mempool); + + // no lock on mempool + // We process the transactions by chunks of max block mass to prevent locking the virtual processor for too long. + let mut lower_bound: usize = 0; + let mut validation_results = Vec::with_capacity(transactions.len()); + while let Some(upper_bound) = self.next_transaction_chunk_upper_bound(&transactions, lower_bound) { + validation_results + .extend(validate_mempool_transactions_in_parallel(consensus, &mut transactions[lower_bound..upper_bound])); + lower_bound = upper_bound; + } + assert_eq!(transactions.len(), validation_results.len(), "every transaction should have a matching validation result"); + + // write lock on mempool + // According to the validation result, transactions are either accepted or removed + let mut mempool = self.mempool.write(); + for (transaction, validation_result) in transactions.into_iter().zip(validation_results) { + let transaction_id = transaction.id(); + // Only consider transactions still being in the mempool since during the validation, some might have been removed. + if mempool.update_revalidated_transaction(transaction) { + match validation_result { + Ok(()) => { + // A following transaction should not remove this one from the pool since we process in a topological order. + // TODO: consider the (very unlikely) scenario of two high priority txs sandwiching a low one, where + // in this case topology order is not guaranteed since we only considered chained dependencies of + // high-priority transactions. + valid_ids.push(transaction_id); + } + Err(RuleError::RejectMissingOutpoint) => { + debug!( + "Removing transaction {0} and its redeemers for missing outpoint during revalidation", + transaction_id + ); + // This call cleanly removes the invalid transaction and its redeemers. + mempool.remove_transaction(&transaction_id, true)?; + } + Err(err) => { + // Rust rewrite note: + // The behavior changes here compared to the golang version. + // The failed revalidation is simply logged and the process continues. + warn!("Removing transaction {0} and its redeemers, it failed revalidation with {1}", transaction_id, err); + // This call cleanly removes the invalid transaction and its redeemers. + mempool.remove_transaction(&transaction_id, true)?; + } + } + } + } + drop(mempool); + } + // Return the successfully processed high priority transaction ids + Ok(valid_ids) } /// is_transaction_output_dust returns whether or not the passed transaction output diff --git a/mining/src/mempool/mod.rs b/mining/src/mempool/mod.rs index f438bc27ea..ce8308e1ac 100644 --- a/mining/src/mempool/mod.rs +++ b/mining/src/mempool/mod.rs @@ -6,9 +6,10 @@ use crate::model::{ use self::{ config::Config, model::{orphan_pool::OrphanPool, pool::Pool, transactions_pool::TransactionsPool}, + tx::Priority, }; use kaspa_consensus_core::tx::{MutableTransaction, TransactionId}; -use std::sync::Arc; +use std::{collections::hash_map::Entry, sync::Arc}; pub(crate) mod check_transaction_standard; pub mod config; @@ -17,7 +18,6 @@ pub(crate) mod handle_new_block_transactions; pub(crate) mod model; pub(crate) mod populate_entries_and_try_validate; pub(crate) mod remove_transaction; -pub(crate) mod revalidate_high_priority_transactions; pub(crate) mod validate_and_insert_transaction; /// Mempool contains transactions intended to be inserted into a block and mined. @@ -122,6 +122,19 @@ impl Mempool { pub(crate) fn block_candidate_transactions(&self) -> Vec { self.transaction_pool.all_ready_transactions() } + + pub(crate) fn all_transactions_with_priority(&self, priority: Priority) -> Vec { + self.transaction_pool.all_transactions_with_priority(priority) + } + + pub(crate) fn update_revalidated_transaction(&mut self, transaction: MutableTransaction) -> bool { + if let Entry::Occupied(mut entry) = self.transaction_pool.all_mut().entry(transaction.id()) { + entry.get_mut().mtx = transaction; + true + } else { + false + } + } } pub mod tx { diff --git a/mining/src/mempool/model/pool.rs b/mining/src/mempool/model/pool.rs index 79cdd308ae..f587ee5fa7 100644 --- a/mining/src/mempool/model/pool.rs +++ b/mining/src/mempool/model/pool.rs @@ -127,6 +127,7 @@ pub(crate) struct PoolIndex { } impl PoolIndex { + #[allow(dead_code)] pub(crate) fn new(transactions: TransactionIdSet, chained_transactions: TransactionsEdges) -> Self { Self { transactions, chained_transactions } } diff --git a/mining/src/mempool/model/transactions_pool.rs b/mining/src/mempool/model/transactions_pool.rs index 907a36d5f7..73164e9bc3 100644 --- a/mining/src/mempool/model/transactions_pool.rs +++ b/mining/src/mempool/model/transactions_pool.rs @@ -239,6 +239,10 @@ impl TransactionsPool { self.all().values().map(|x| x.mtx.clone()).collect() } + pub(crate) fn all_transactions_with_priority(&self, priority: Priority) -> Vec { + self.all().values().filter_map(|x| if x.priority == priority { Some(x.mtx.clone()) } else { None }).collect() + } + pub(crate) fn get_outpoint_owner_id(&self, outpoint: &TransactionOutpoint) -> Option<&TransactionId> { self.utxo_set.get_outpoint_owner_id(outpoint) } diff --git a/mining/src/mempool/populate_entries_and_try_validate.rs b/mining/src/mempool/populate_entries_and_try_validate.rs index 637638bdaa..1626bd3eb8 100644 --- a/mining/src/mempool/populate_entries_and_try_validate.rs +++ b/mining/src/mempool/populate_entries_and_try_validate.rs @@ -3,23 +3,7 @@ use kaspa_consensus_core::{api::ConsensusApi, constants::UNACCEPTED_DAA_SCORE, t use kaspa_mining_errors::mempool::RuleError; impl Mempool { - pub(crate) fn populate_entries_and_try_validate( - &self, - consensus: &dyn ConsensusApi, - transaction: &mut MutableTransaction, - ) -> RuleResult<()> { - // Rust rewrite note: - // Neither parentsInPool nor missingOutpoints are actually used or needed by the - // callers so we neither build nor return them. - // parentsInPool is now built by transactions_pool::add_mempool_transaction. - // missingOutpoints is reduced to a simple ConsensusError::TxMissingOutpoints. - - self.populate_mempool_entries(transaction); - validate_mempool_transaction_and_populate(consensus, transaction)?; - Ok(()) - } - - pub(super) fn populate_mempool_entries(&self, transaction: &mut MutableTransaction) { + pub(crate) fn populate_mempool_entries(&self, transaction: &mut MutableTransaction) { for (i, input) in transaction.tx.inputs.iter().enumerate() { if let Some(parent) = self.transaction_pool.get(&input.previous_outpoint.transaction_id) { let output = &parent.mtx.tx.outputs[input.previous_outpoint.index as usize]; diff --git a/mining/src/mempool/revalidate_high_priority_transactions.rs b/mining/src/mempool/revalidate_high_priority_transactions.rs deleted file mode 100644 index ac64245c03..0000000000 --- a/mining/src/mempool/revalidate_high_priority_transactions.rs +++ /dev/null @@ -1,61 +0,0 @@ -use crate::{ - mempool::{ - errors::{RuleError, RuleResult}, - model::pool::Pool, - Mempool, - }, - model::topological_index::TopologicalIndex, -}; -use kaspa_consensus_core::{ - api::ConsensusApi, - tx::{MutableTransaction, TransactionId}, -}; -use kaspa_core::debug; - -use super::tx::Priority; - -impl Mempool { - pub(crate) fn revalidate_high_priority_transactions(&mut self, consensus: &dyn ConsensusApi) -> RuleResult> { - // First establish a topologically ordered list of all high priority transaction ids - - // Processing the transactions in a parent to chained order guarantees that - // any transaction removal will propagate to all chained dependencies saving - // validations calls to consensus. - let ids = self.transaction_pool.index(Priority::High).topological_index()?; - let mut valid_ids = vec![]; - - for transaction_id in ids.iter() { - // Try to take the transaction out of the storage map so we can mutate it with some self functions. - // The redeemers of removed transactions are removed too so the following call may return a None. - if let Some(mut transaction) = self.transaction_pool.all_mut().remove(transaction_id) { - let is_valid = self.revalidate_transaction(consensus, &mut transaction.mtx)?; - // After mutating we can now put the transaction back into the storage map. - // The alternative would be to wrap transactions in the pools with a RefCell. - self.transaction_pool.all_mut().insert(*transaction_id, transaction); - if is_valid { - // A following transaction should not remove this one from the pool since we process - // in topological order - // TODO: consider the scenario of two high priority txs sandwiching a low one, where - // in this case topology order is not guaranteed since we topologically sorted only - // high-priority transactions - valid_ids.push(*transaction_id); - } else { - debug!("Removing transaction {0}, it failed revalidation", transaction_id); - // This call cleanly removes the invalid transaction and its redeemers. - self.remove_transaction(transaction_id, true)?; - } - } - } - // Return the successfully processed high priority transaction ids - Ok(valid_ids) - } - - fn revalidate_transaction(&self, consensus: &dyn ConsensusApi, transaction: &mut MutableTransaction) -> RuleResult { - transaction.clear_entries(); - match self.populate_entries_and_try_validate(consensus, transaction) { - Ok(_) => Ok(true), - Err(RuleError::RejectMissingOutpoint) => Ok(false), - Err(err) => Err(err), - } - } -} diff --git a/mining/src/model/txs_stager.rs b/mining/src/model/txs_stager.rs index ca3a00d801..86377596c2 100644 --- a/mining/src/model/txs_stager.rs +++ b/mining/src/model/txs_stager.rs @@ -1,14 +1,14 @@ use super::TransactionIdSet; use kaspa_consensus_core::tx::{Transaction, TransactionId}; -pub struct TransactionsStagger { - txs: Vec, +pub struct TransactionsStagger> { + txs: Vec, ids: TransactionIdSet, } -impl TransactionsStagger { - pub fn new(txs: Vec) -> Self { - let ids = txs.iter().map(|x| x.id()).collect(); +impl> TransactionsStagger { + pub fn new(txs: Vec) -> Self { + let ids = txs.iter().map(|x| x.as_ref().id()).collect(); Self { txs, ids } } @@ -17,7 +17,10 @@ impl TransactionsStagger { } /// Extract and return all independent transactions - pub fn stagger(&mut self) -> Option> { + pub fn stagger(&mut self) -> Option> { + if self.is_empty() { + return None; + } let mut ready = Vec::with_capacity(self.txs.len()); let mut dependent = Vec::with_capacity(self.txs.len()); while let Some(tx) = self.txs.pop() { @@ -28,15 +31,15 @@ impl TransactionsStagger { } } self.txs = dependent; - self.ids = self.txs.iter().map(|x| x.id()).collect(); - (!self.is_empty()).then_some(ready) + self.ids = self.txs.iter().map(|x| x.as_ref().id()).collect(); + Some(ready) } - pub fn has(&self, transaction_id: &TransactionId) -> bool { + fn has(&self, transaction_id: &TransactionId) -> bool { self.ids.contains(transaction_id) } - pub fn is_dependent(&self, tx: &Transaction) -> bool { - tx.inputs.iter().any(|x| self.has(&x.previous_outpoint.transaction_id)) + fn is_dependent(&self, tx: &T) -> bool { + tx.as_ref().inputs.iter().any(|x| self.has(&x.previous_outpoint.transaction_id)) } } From b81487609104ab4ba95f9ee78bc104534143d187 Mon Sep 17 00:00:00 2001 From: Tiram <18632023+tiram88@users.noreply.github.com> Date: Thu, 31 Aug 2023 22:53:25 +0300 Subject: [PATCH 10/86] Comments --- mining/src/manager.rs | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/mining/src/manager.rs b/mining/src/manager.rs index 5c42a46fc8..d4e0c1d0f8 100644 --- a/mining/src/manager.rs +++ b/mining/src/manager.rs @@ -175,8 +175,10 @@ impl MiningManager { // The capacity used here may be exceeded (see next comment). let mut accepted_transactions = Vec::with_capacity(incoming_transactions.len()); // We loop as long as incoming unorphaned transactions do unorphan other transactions when they - // get validated and inserted into the mempool + // get validated and inserted into the mempool. while !incoming_transactions.is_empty() { + // Since the consensus validation requires a slice of MutableTransaction, we destructure the vector of + // MempoolTransaction into 2 distinct vectors holding respectively the needed MutableTransaction and priority. let (mut transactions, priorities): (Vec, Vec) = incoming_transactions.into_iter().map(|x| (x.mtx, x.priority)).unzip(); @@ -412,18 +414,20 @@ impl MiningManager { assert_eq!(transactions.len(), validation_results.len(), "every transaction should have a matching validation result"); // write lock on mempool - // According to the validation result, transactions are either accepted or removed + // Depending on the validation result, transactions are either accepted or removed let mut mempool = self.mempool.write(); for (transaction, validation_result) in transactions.into_iter().zip(validation_results) { let transaction_id = transaction.id(); - // Only consider transactions still being in the mempool since during the validation, some might have been removed. + // Only consider transactions still being in the mempool since during the validation some might have been removed. if mempool.update_revalidated_transaction(transaction) { match validation_result { Ok(()) => { // A following transaction should not remove this one from the pool since we process in a topological order. - // TODO: consider the (very unlikely) scenario of two high priority txs sandwiching a low one, where - // in this case topology order is not guaranteed since we only considered chained dependencies of - // high-priority transactions. + // Still, considering the (very unlikely) scenario of two high priority txs sandwiching a low one, where + // in this case topological order is not guaranteed since we only considered chained dependencies of + // high-priority transactions, we might wrongfully return as valid the id of a removed transaction. + // However, as only consequence, said transaction would then be advertised to registered peers and not be + // provided upon request. valid_ids.push(transaction_id); } Err(RuleError::RejectMissingOutpoint) => { From bd13f22b325c0a52416d681d5e07221968dbe92e Mon Sep 17 00:00:00 2001 From: Tiram <18632023+tiram88@users.noreply.github.com> Date: Fri, 1 Sep 2023 14:31:03 +0300 Subject: [PATCH 11/86] Fix upper bound of transactions chunk --- mining/src/manager.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/mining/src/manager.rs b/mining/src/manager.rs index d4e0c1d0f8..f0d38c0532 100644 --- a/mining/src/manager.rs +++ b/mining/src/manager.rs @@ -303,6 +303,7 @@ impl MiningManager { mass += tx.calculated_mass.unwrap(); mass >= self.block_template_builder.max_block_mass() }) + .map(|relative_index| relative_index + lower_bound) .or(Some(transactions.len())) } From eab8c8e3b64b1cb0ba60801e4e8c3bd0cdd6b9db Mon Sep 17 00:00:00 2001 From: Tiram <18632023+tiram88@users.noreply.github.com> Date: Fri, 1 Sep 2023 16:48:05 +0300 Subject: [PATCH 12/86] Ensure a chunk has at least 1 tx --- mining/src/manager.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/mining/src/manager.rs b/mining/src/manager.rs index f0d38c0532..4baada45f9 100644 --- a/mining/src/manager.rs +++ b/mining/src/manager.rs @@ -303,7 +303,10 @@ impl MiningManager { mass += tx.calculated_mass.unwrap(); mass >= self.block_template_builder.max_block_mass() }) - .map(|relative_index| relative_index + lower_bound) + // Make sure the upper bound is greater than the lower bound, allowing to handle a very unlikely, + // (if not impossible) case where the mass of a single transaction is greater than the maximum + // chunk mass. + .map(|relative_index| relative_index.max(1) + lower_bound) .or(Some(transactions.len())) } From f7b58d2988b6a44c9193dea8b79d8b7fd481016b Mon Sep 17 00:00:00 2001 From: Tiram <18632023+tiram88@users.noreply.github.com> Date: Fri, 1 Sep 2023 16:48:53 +0300 Subject: [PATCH 13/86] Prevent add twice the same tx to the mempool --- .../src/mempool/validate_and_insert_transaction.rs | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/mining/src/mempool/validate_and_insert_transaction.rs b/mining/src/mempool/validate_and_insert_transaction.rs index 70bf16ffc9..9590e70bc9 100644 --- a/mining/src/mempool/validate_and_insert_transaction.rs +++ b/mining/src/mempool/validate_and_insert_transaction.rs @@ -36,6 +36,16 @@ impl Mempool { priority: Priority, orphan: Orphan, ) -> RuleResult>> { + let transaction_id = transaction.id(); + + // First check if the transaction was not already added to the mempool. + // The case may arise since the execution of the manager public functions is no + // longer atomic and different code paths may lead to inserting the same transaction + // concurrently. + if self.transaction_pool.has(&transaction_id) { + return Ok(None); + } + // Re-check double spends since validate_and_insert_transaction is no longer atomic self.transaction_pool.check_double_spends(&transaction)?; @@ -43,7 +53,7 @@ impl Mempool { Ok(_) => {} Err(RuleError::RejectMissingOutpoint) => { if orphan == Orphan::Forbidden { - return Err(RuleError::RejectDisallowedOrphan(transaction.id())); + return Err(RuleError::RejectDisallowedOrphan(transaction_id)); } self.orphan_pool.try_add_orphan(consensus, transaction, priority)?; return Ok(None); From 13b4e43553cbe70de4332377405a5a30e521e39b Mon Sep 17 00:00:00 2001 From: Tiram <18632023+tiram88@users.noreply.github.com> Date: Fri, 1 Sep 2023 18:26:18 +0300 Subject: [PATCH 14/86] Clear transaction entries before revalidation --- mining/src/manager.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/mining/src/manager.rs b/mining/src/manager.rs index 4baada45f9..96affb6ef0 100644 --- a/mining/src/manager.rs +++ b/mining/src/manager.rs @@ -397,6 +397,7 @@ impl MiningManager { .into_iter() .filter_map(|mut x| { if mempool.has_transaction(&x.id(), true, false) { + x.clear_entries(); mempool.populate_mempool_entries(&mut x); Some(x) } else { From 9026043d63fdf6d12d4fc0cb32b72dc3a6638d48 Mon Sep 17 00:00:00 2001 From: Tiram <18632023+tiram88@users.noreply.github.com> Date: Fri, 1 Sep 2023 18:45:29 +0300 Subject: [PATCH 15/86] Add some logs and comments --- mining/src/block_template/builder.rs | 4 +- mining/src/manager.rs | 57 +++++++++++++------ .../validate_and_insert_transaction.rs | 8 ++- 3 files changed, 48 insertions(+), 21 deletions(-) diff --git a/mining/src/block_template/builder.rs b/mining/src/block_template/builder.rs index be265d7b7e..4c46efed56 100644 --- a/mining/src/block_template/builder.rs +++ b/mining/src/block_template/builder.rs @@ -3,7 +3,7 @@ use crate::{block_template::selector::TransactionsSelector, model::candidate_tx: use kaspa_consensus_core::{ api::ConsensusApi, block::BlockTemplate, coinbase::MinerData, merkle::calc_hash_merkle_root, tx::COINBASE_TRANSACTION_INDEX, }; -use kaspa_core::{debug, time::unix_now}; +use kaspa_core::{time::unix_now, trace}; pub(crate) struct BlockTemplateBuilder { policy: Policy, @@ -84,7 +84,7 @@ impl BlockTemplateBuilder { miner_data: &MinerData, transactions: Vec, ) -> BuilderResult { - debug!("Considering {} transactions for inclusion to new block", transactions.len()); + trace!("Considering {} transactions for inclusion into a new block", transactions.len()); let mut selector = TransactionsSelector::new(self.policy.clone(), transactions); let block_txs = selector.select_transactions(); Ok(consensus.build_block_template(miner_data.clone(), block_txs)?) diff --git a/mining/src/manager.rs b/mining/src/manager.rs index 96affb6ef0..7d68999226 100644 --- a/mining/src/manager.rs +++ b/mining/src/manager.rs @@ -27,7 +27,7 @@ use kaspa_consensus_core::{ tx::{MutableTransaction, Transaction, TransactionId, TransactionOutput}, }; use kaspa_consensusmanager::{spawn_blocking, ConsensusProxy}; -use kaspa_core::{debug, error, info, warn}; +use kaspa_core::{debug, error, warn}; use kaspa_mining_errors::mempool::RuleError; use parking_lot::{Mutex, RwLock}; @@ -178,7 +178,7 @@ impl MiningManager { // get validated and inserted into the mempool. while !incoming_transactions.is_empty() { // Since the consensus validation requires a slice of MutableTransaction, we destructure the vector of - // MempoolTransaction into 2 distinct vectors holding respectively the needed MutableTransaction and priority. + // MempoolTransaction into 2 distinct vectors holding respectively the needed MutableTransaction and Priority. let (mut transactions, priorities): (Vec, Vec) = incoming_transactions.into_iter().map(|x| (x.mtx, x.priority)).unzip(); @@ -214,7 +214,7 @@ impl MiningManager { } Ok(None) => vec![], Err(err) => { - info!("Failed to unorphan transaction {0} due to rule error: {1}", orphan_id, err.to_string()); + debug!("Failed to unorphan transaction {0} due to rule error: {1}", orphan_id, err); vec![] } } @@ -250,10 +250,21 @@ impl MiningManager { let mut transactions = transactions.into_iter().map(MutableTransaction::from_tx).collect::>(); // read lock on mempool - // Here, we simply drop all erroneous transactions since the caller doesn't care about those anyway + // Here, we simply log and drop all erroneous transactions since the caller doesn't care about those anyway let mempool = self.mempool.read(); - transactions = - transactions.into_iter().filter_map(|tx| mempool.pre_validate_and_populate_transaction(consensus, tx).ok()).collect(); + transactions = transactions + .into_iter() + .filter_map(|tx| { + let transaction_id = tx.id(); + match mempool.pre_validate_and_populate_transaction(consensus, tx) { + Ok(tx) => Some(tx), + Err(err) => { + debug!("Failed to pre validate transaction {0} due to rule error: {1}", transaction_id, err); + None + } + } + }) + .collect(); drop(mempool); // no lock on mempool @@ -268,18 +279,26 @@ impl MiningManager { assert_eq!(transactions.len(), validation_results.len(), "every transaction should have a matching validation result"); // write lock on mempool + // Here again, transactions failing post validation are logged and dropped let mut mempool = self.mempool.write(); let unorphaned_transactions = transactions .into_iter() .zip(validation_results) .flat_map(|(transaction, validation_result)| { - if let Ok(Some(accepted_transaction)) = - mempool.post_validate_and_insert_transaction(consensus, validation_result, transaction, priority, orphan) - { - accepted_transactions.push(accepted_transaction.clone()); - mempool.get_unorphaned_transactions_after_accepted_transaction(&accepted_transaction) - } else { - vec![] + let transaction_id = transaction.id(); + match mempool.post_validate_and_insert_transaction(consensus, validation_result, transaction, priority, orphan) { + Ok(Some(accepted_transaction)) => { + accepted_transactions.push(accepted_transaction.clone()); + mempool.get_unorphaned_transactions_after_accepted_transaction(&accepted_transaction) + } + Ok(None) => { + // Either orphaned or already existing in the mempool + vec![] + } + Err(err) => { + debug!("Failed to post validate transaction {0} due to rule error: {1}", transaction_id, err); + vec![] + } } }) .collect::>(); @@ -289,6 +308,8 @@ impl MiningManager { accepted_transactions.extend(self.validate_and_insert_unorphaned_transactions(consensus, unorphaned_transactions)); } + // Please note: the only reason this function returns a Result is the future handling of misbehaving nodes + // and the related RuleError::RejectInvalid Ok(accepted_transactions) } @@ -373,6 +394,7 @@ impl MiningManager { pub fn revalidate_high_priority_transactions(&self, consensus: &dyn ConsensusApi) -> MiningManagerResult> { // read lock on mempool + // Prepare a vector with clones of high priority transactions found in the mempool let transactions = self.mempool.read().all_transactions_with_priority(Priority::High); let mut valid_ids = Vec::with_capacity(transactions.len()); @@ -436,8 +458,8 @@ impl MiningManager { valid_ids.push(transaction_id); } Err(RuleError::RejectMissingOutpoint) => { - debug!( - "Removing transaction {0} and its redeemers for missing outpoint during revalidation", + warn!( + "Removing high priority transaction {0} and its redeemers for missing outpoint during revalidation", transaction_id ); // This call cleanly removes the invalid transaction and its redeemers. @@ -447,7 +469,10 @@ impl MiningManager { // Rust rewrite note: // The behavior changes here compared to the golang version. // The failed revalidation is simply logged and the process continues. - warn!("Removing transaction {0} and its redeemers, it failed revalidation with {1}", transaction_id, err); + warn!( + "Removing high priority transaction {0} and its redeemers, it failed revalidation with {1}", + transaction_id, err + ); // This call cleanly removes the invalid transaction and its redeemers. mempool.remove_transaction(&transaction_id, true)?; } diff --git a/mining/src/mempool/validate_and_insert_transaction.rs b/mining/src/mempool/validate_and_insert_transaction.rs index 9590e70bc9..41e36e4997 100644 --- a/mining/src/mempool/validate_and_insert_transaction.rs +++ b/mining/src/mempool/validate_and_insert_transaction.rs @@ -10,7 +10,7 @@ use kaspa_consensus_core::{ constants::UNACCEPTED_DAA_SCORE, tx::{MutableTransaction, Transaction, TransactionId, TransactionOutpoint, UtxoEntry}, }; -use kaspa_core::info; +use kaspa_core::{debug, info}; use super::tx::{Orphan, Priority}; @@ -43,6 +43,7 @@ impl Mempool { // longer atomic and different code paths may lead to inserting the same transaction // concurrently. if self.transaction_pool.has(&transaction_id) { + debug!("Transaction {0} is not post validated since already in the mempool", transaction_id); return Ok(None); } @@ -56,6 +57,7 @@ impl Mempool { return Err(RuleError::RejectDisallowedOrphan(transaction_id)); } self.orphan_pool.try_add_orphan(consensus, transaction, priority)?; + debug!("Transaction {0} added to orphans", transaction_id); return Ok(None); } Err(err) => { @@ -68,8 +70,7 @@ impl Mempool { // Before adding the transaction, check if there is room in the pool self.transaction_pool.limit_transaction_count(1, &transaction)?.iter().try_for_each(|x| self.remove_transaction(x, true))?; - // Here the accepted transaction is cloned in order to prevent having self borrowed immutably for the - // transaction reference and mutably for the call to process_orphans_after_accepted_transaction + // Add the transaction to the mempool as a MempoolTransaction and return a clone of the embedded Arc let accepted_transaction = self.transaction_pool.add_transaction(transaction, consensus.get_virtual_daa_score(), priority)?.mtx.tx.clone(); Ok(Some(accepted_transaction)) @@ -125,6 +126,7 @@ impl Mempool { match self.unorphan_transaction(&orphan_id) { Ok(unorphaned_tx) => { unorphaned_transactions.push(unorphaned_tx); + debug!("Transaction {0} unorphaned", transaction_id); } Err(err) => { // In case of validation error, we log the problem and drop the From 82cb48a9bb3eb6a624201922e74bbd826cee4072 Mon Sep 17 00:00:00 2001 From: Tiram <18632023+tiram88@users.noreply.github.com> Date: Tue, 5 Sep 2023 17:11:06 +0300 Subject: [PATCH 16/86] Add logs to debug transactions removals --- mining/src/manager.rs | 48 +++++++++++++++---- .../mempool/handle_new_block_transactions.rs | 6 ++- mining/src/mempool/model/orphan_pool.rs | 27 +++++++++-- mining/src/mempool/model/transactions_pool.rs | 3 +- mining/src/mempool/remove_transaction.rs | 34 ++++++++++--- .../validate_and_insert_transaction.rs | 8 ++-- 6 files changed, 100 insertions(+), 26 deletions(-) diff --git a/mining/src/manager.rs b/mining/src/manager.rs index 7d68999226..8fa6db5e7a 100644 --- a/mining/src/manager.rs +++ b/mining/src/manager.rs @@ -87,8 +87,13 @@ impl MiningManager { } Err(BuilderError::ConsensusError(BlockRuleError::InvalidTransactionsInNewBlock(invalid_transactions))) => { let mut mempool_write = self.mempool.write(); - invalid_transactions.iter().for_each(|(x, _)| { - let removal_result = mempool_write.remove_transaction(x, true); + invalid_transactions.iter().for_each(|(x, err)| { + let removal_result = mempool_write.remove_transaction( + x, + true, + "invalid in block template", + format!(" error: {}", err).as_str(), + ); if let Err(err) = removal_result { // Original golang comment: // mempool.remove_transactions might return errors in situations that are perfectly fine in this context. @@ -458,23 +463,46 @@ impl MiningManager { valid_ids.push(transaction_id); } Err(RuleError::RejectMissingOutpoint) => { - warn!( - "Removing high priority transaction {0} and its redeemers for missing outpoint during revalidation", - transaction_id - ); - // This call cleanly removes the invalid transaction and its redeemers. - mempool.remove_transaction(&transaction_id, true)?; + let transaction = mempool.get_transaction(&transaction_id, true, false).unwrap(); + let missing_txs = transaction + .entries + .iter() + .zip(transaction.tx.inputs.iter()) + .flat_map( + |(entry, input)| if entry.is_none() { Some(input.previous_outpoint.transaction_id) } else { None }, + ) + .collect::>(); + + // A transaction may have missing outpoints for legitimate reasons related to concurrency, like a race condition between + // an accepted block having not started yet or unfinished call to handle_new_block_transactions but already processed by + // the consensus and this ongoing call to revalidate. + // + // So we only remove the transaction and keep its redeemers in the mempool because we cannot be sure they are invalid, in + // fact in the race condition case they are valid regarding outpoints. + let extra_info = match missing_txs.len() { + 0 => " but no missing tx!".to_string(), // this is never supposed to happen + 1 => format!(" missing tx {}", missing_txs[0]), + n => format!(" with {} missing txs {}..{}", n, missing_txs[0], missing_txs.last().unwrap()), + }; + + // This call cleanly removes the invalid transaction. + mempool.remove_transaction( + &transaction_id, + false, + "high priority revalidation, missing outpoints", + extra_info.as_str(), + )?; } Err(err) => { // Rust rewrite note: // The behavior changes here compared to the golang version. // The failed revalidation is simply logged and the process continues. warn!( - "Removing high priority transaction {0} and its redeemers, it failed revalidation with {1}", + "Removing high priority transaction {0} and its redeemers, it failed revalidation with {1}", transaction_id, err ); // This call cleanly removes the invalid transaction and its redeemers. - mempool.remove_transaction(&transaction_id, true)?; + mempool.remove_transaction(&transaction_id, true, "", "")?; } } } diff --git a/mining/src/mempool/handle_new_block_transactions.rs b/mining/src/mempool/handle_new_block_transactions.rs index 612aa6820e..c64621e73c 100644 --- a/mining/src/mempool/handle_new_block_transactions.rs +++ b/mining/src/mempool/handle_new_block_transactions.rs @@ -11,7 +11,7 @@ impl Mempool { let transaction_id = transaction.id(); self.remove_transaction(&transaction_id, false)?; self.remove_double_spends(transaction)?; - self.orphan_pool.remove_orphan(&transaction_id, false)?; + self.orphan_pool.remove_orphan(&transaction_id, false, "accepted")?; unorphaned_transactions.append(&mut self.get_unorphaned_transactions_after_accepted_transaction(transaction)); } Ok(unorphaned_transactions) @@ -30,6 +30,8 @@ impl Mempool { transactions_to_remove.insert(*redeemer_id); } } - transactions_to_remove.iter().try_for_each(|x| self.remove_transaction(x, true)) + transactions_to_remove + .iter() + .try_for_each(|x| self.remove_transaction(x, true, "double spend", format!(" favouring {}", transaction.id()).as_str())) } } diff --git a/mining/src/mempool/model/orphan_pool.rs b/mining/src/mempool/model/orphan_pool.rs index 4b8935c0fb..5aceec4819 100644 --- a/mining/src/mempool/model/orphan_pool.rs +++ b/mining/src/mempool/model/orphan_pool.rs @@ -13,7 +13,8 @@ use kaspa_consensus_core::{ tx::MutableTransaction, tx::{TransactionId, TransactionOutpoint}, }; -use kaspa_core::warn; +use kaspa_core::{debug, warn}; +use kaspa_utils::iter::IterExtensions; use std::sync::Arc; use super::pool::TransactionsEdges; @@ -95,7 +96,7 @@ impl OrphanPool { } // Don't remove redeemers in the case of a random eviction since the evicted transaction is // not invalid, therefore it's redeemers are as good as any orphan that just arrived. - self.remove_orphan(&orphan_to_remove.unwrap().id(), false)?; + self.remove_orphan(&orphan_to_remove.unwrap().id(), false, "making room")?; } Ok(()) } @@ -154,6 +155,7 @@ impl OrphanPool { } self.all_orphans.insert(id, transaction); + debug!("Added transaction to orphan pool: {}", id); Ok(()) } @@ -161,6 +163,7 @@ impl OrphanPool { &mut self, transaction_id: &TransactionId, remove_redeemers: bool, + reason: &str, ) -> RuleResult> { // Rust rewrite: // - the call cycle removeOrphan -> removeRedeemersOf -> removeOrphan is replaced by @@ -175,7 +178,23 @@ impl OrphanPool { if remove_redeemers { transaction_ids_to_remove.extend(self.get_redeemer_ids_in_pool(transaction_id)); } - transaction_ids_to_remove.iter().map(|x| self.remove_single_orphan(x)).collect() + let removed_transactions = + transaction_ids_to_remove.iter().map(|x| self.remove_single_orphan(x)).collect::>>()?; + match removed_transactions.len() { + 0 => (), // This is not possible + 1 => { + debug!("Removed transaction from orphan pool ({}): {}", reason, removed_transactions[0].id()); + } + n => { + debug!( + "Removed {} transactions from orphan pool ({}): {}", + n, + reason, + removed_transactions.iter().map(|x| x.id()).reusable_format(", ") + ); + } + } + Ok(removed_transactions) } fn remove_single_orphan(&mut self, transaction_id: &TransactionId) -> RuleResult { @@ -283,7 +302,7 @@ impl Pool for OrphanPool { .collect(); for transaction_id in expired_low_priority_transactions.iter() { - self.remove_orphan(transaction_id, false)?; + self.remove_orphan(transaction_id, false, "expired")?; } self.last_expire_scan = virtual_daa_score; diff --git a/mining/src/mempool/model/transactions_pool.rs b/mining/src/mempool/model/transactions_pool.rs index 73164e9bc3..f20ef11f95 100644 --- a/mining/src/mempool/model/transactions_pool.rs +++ b/mining/src/mempool/model/transactions_pool.rs @@ -11,7 +11,7 @@ use kaspa_consensus_core::{ tx::TransactionId, tx::{MutableTransaction, TransactionOutpoint}, }; -use kaspa_core::{debug, time::unix_now, warn}; +use kaspa_core::{debug, time::unix_now, trace, warn}; use std::{ collections::{hash_map::Keys, hash_set::Iter}, sync::Arc, @@ -107,6 +107,7 @@ impl TransactionsPool { self.utxo_set.add_transaction(&transaction.mtx); self.all_transactions.insert(id, transaction); + trace!("Added transaction {}", id); Ok(()) } diff --git a/mining/src/mempool/remove_transaction.rs b/mining/src/mempool/remove_transaction.rs index fb71b3671e..1433621878 100644 --- a/mining/src/mempool/remove_transaction.rs +++ b/mining/src/mempool/remove_transaction.rs @@ -1,30 +1,52 @@ use crate::mempool::{errors::RuleResult, model::pool::Pool, Mempool}; use kaspa_consensus_core::tx::TransactionId; +use kaspa_core::debug; +use kaspa_utils::iter::IterExtensions; impl Mempool { - pub(crate) fn remove_transaction(&mut self, transaction_id: &TransactionId, remove_redeemers: bool) -> RuleResult<()> { + pub(crate) fn remove_transaction( + &mut self, + transaction_id: &TransactionId, + remove_redeemers: bool, + reason: &str, + extra_info: &str, + ) -> RuleResult<()> { if self.orphan_pool.has(transaction_id) { - return self.orphan_pool.remove_orphan(transaction_id, true).map(|_| ()); + return self.orphan_pool.remove_orphan(transaction_id, true, reason).map(|_| ()); } if !self.transaction_pool.has(transaction_id) { return Ok(()); } - let mut transactions_to_remove = vec![*transaction_id]; + let mut removed_transactions = vec![*transaction_id]; let redeemers = self.transaction_pool.get_redeemer_ids_in_pool(transaction_id); if remove_redeemers { - transactions_to_remove.extend(redeemers); + removed_transactions.extend(redeemers); } else { redeemers.iter().for_each(|x| { self.transaction_pool.remove_parent_chained_relation_in_pool(x, transaction_id); }); } - transactions_to_remove.iter().try_for_each(|x| self.remove_transaction_from_sets(x, remove_redeemers))?; + removed_transactions.iter().try_for_each(|x| self.remove_transaction_from_sets(x, remove_redeemers))?; if remove_redeemers { - self.orphan_pool.remove_redeemers_of(transaction_id)?; + removed_transactions.extend(self.orphan_pool.remove_redeemers_of(transaction_id)?.iter().map(|x| x.id())); + } + + if !reason.is_empty() { + match removed_transactions.len() { + 0 => {} + 1 => debug!("Removed transaction ({}) {}{}", reason, removed_transactions[0], extra_info), + n => debug!( + "Removed {} transactions ({}): {}{}", + n, + reason, + removed_transactions.iter().reusable_format(", "), + extra_info + ), + } } Ok(()) diff --git a/mining/src/mempool/validate_and_insert_transaction.rs b/mining/src/mempool/validate_and_insert_transaction.rs index 41e36e4997..990632a548 100644 --- a/mining/src/mempool/validate_and_insert_transaction.rs +++ b/mining/src/mempool/validate_and_insert_transaction.rs @@ -57,7 +57,6 @@ impl Mempool { return Err(RuleError::RejectDisallowedOrphan(transaction_id)); } self.orphan_pool.try_add_orphan(consensus, transaction, priority)?; - debug!("Transaction {0} added to orphans", transaction_id); return Ok(None); } Err(err) => { @@ -68,7 +67,10 @@ impl Mempool { self.validate_transaction_in_context(&transaction)?; // Before adding the transaction, check if there is room in the pool - self.transaction_pool.limit_transaction_count(1, &transaction)?.iter().try_for_each(|x| self.remove_transaction(x, true))?; + self.transaction_pool + .limit_transaction_count(1, &transaction)? + .iter() + .try_for_each(|x| self.remove_transaction(x, true, "making room", format!(" for {}", transaction_id).as_str()))?; // Add the transaction to the mempool as a MempoolTransaction and return a clone of the embedded Arc let accepted_transaction = @@ -149,7 +151,7 @@ impl Mempool { // This job is delegated to a fn called later in the process (Manager::validate_and_insert_unorphaned_transactions). // Remove the transaction identified by transaction_id from the orphan pool. - let mut transactions = self.orphan_pool.remove_orphan(transaction_id, false)?; + let mut transactions = self.orphan_pool.remove_orphan(transaction_id, false, "unorphaned")?; // At this point, `transactions` contains exactly one transaction. // The one we just removed from the orphan pool. From 77af21d095e127be7c0b8c36accee7f553ed9db8 Mon Sep 17 00:00:00 2001 From: Tiram <18632023+tiram88@users.noreply.github.com> Date: Tue, 5 Sep 2023 17:13:17 +0300 Subject: [PATCH 17/86] On accepted block do not remove orphan tx redeemers --- mining/src/mempool/handle_new_block_transactions.rs | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/mining/src/mempool/handle_new_block_transactions.rs b/mining/src/mempool/handle_new_block_transactions.rs index c64621e73c..d3bbbf2a57 100644 --- a/mining/src/mempool/handle_new_block_transactions.rs +++ b/mining/src/mempool/handle_new_block_transactions.rs @@ -9,7 +9,13 @@ impl Mempool { let mut unorphaned_transactions = vec![]; for transaction in block_transactions[1..].iter() { let transaction_id = transaction.id(); - self.remove_transaction(&transaction_id, false)?; + // Rust rewrite: This behavior does differ from golang implementation. + // If the transaction got accepted via a peer but is still an orphan here, do not remove + // its redeemers in the orphan pool. We give those a chance to be unorphaned and included + // in the next block template. + if !self.orphan_pool.has(&transaction_id) { + self.remove_transaction(&transaction_id, false, "accepted", "")?; + } self.remove_double_spends(transaction)?; self.orphan_pool.remove_orphan(&transaction_id, false, "accepted")?; unorphaned_transactions.append(&mut self.get_unorphaned_transactions_after_accepted_transaction(transaction)); From 41397ba880e345a873842b623f9ed7005726227b Mon Sep 17 00:00:00 2001 From: Tiram <18632023+tiram88@users.noreply.github.com> Date: Tue, 5 Sep 2023 17:14:01 +0300 Subject: [PATCH 18/86] Add 2 TODOs --- consensus/src/pipeline/virtual_processor/processor.rs | 1 + protocol/flows/src/v5/blockrelay/flow.rs | 2 ++ 2 files changed, 3 insertions(+) diff --git a/consensus/src/pipeline/virtual_processor/processor.rs b/consensus/src/pipeline/virtual_processor/processor.rs index 5bf4fb24d5..ebceaf6320 100644 --- a/consensus/src/pipeline/virtual_processor/processor.rs +++ b/consensus/src/pipeline/virtual_processor/processor.rs @@ -793,6 +793,7 @@ impl VirtualStateProcessor { utxo_view: &impl UtxoView, ) -> Result<(), RuleError> { // Search for invalid transactions. This can happen since the mining manager calling this function is not atomically in sync with virtual state + // TODO: process transactions in parallel let mut invalid_transactions = Vec::new(); for tx in txs.iter() { if let Err(e) = self.validate_block_template_transaction(tx, virtual_state, utxo_view) { diff --git a/protocol/flows/src/v5/blockrelay/flow.rs b/protocol/flows/src/v5/blockrelay/flow.rs index 236a34bf6d..ae92260a93 100644 --- a/protocol/flows/src/v5/blockrelay/flow.rs +++ b/protocol/flows/src/v5/blockrelay/flow.rs @@ -154,6 +154,8 @@ impl HandleRelayInvsFlow { } self.ctx.log_block_acceptance(inv.hash, BlockSource::Relay); + // TODO: investigate if reversing the following 2 calls may lower the tx missing outpoints error rate + // in Manager::get_block_template() self.ctx.on_new_block_template().await?; self.ctx.on_new_block(&session, block).await?; From 54e8b39a65a280aabf187101c02be584d0e18291 Mon Sep 17 00:00:00 2001 From: Tiram <18632023+tiram88@users.noreply.github.com> Date: Wed, 6 Sep 2023 16:26:39 +0300 Subject: [PATCH 19/86] Fix a bug of high priority transactions being unexpectedly orphaned or rejected --- mining/src/manager.rs | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/mining/src/manager.rs b/mining/src/manager.rs index 8fa6db5e7a..1d48b2abbc 100644 --- a/mining/src/manager.rs +++ b/mining/src/manager.rs @@ -23,7 +23,7 @@ use kaspa_consensus_core::{ api::ConsensusApi, block::BlockTemplate, coinbase::MinerData, - errors::block::RuleError as BlockRuleError, + errors::{block::RuleError as BlockRuleError, tx::TxRuleError}, tx::{MutableTransaction, Transaction, TransactionId, TransactionOutput}, }; use kaspa_consensusmanager::{spawn_blocking, ConsensusProxy}; @@ -88,9 +88,19 @@ impl MiningManager { Err(BuilderError::ConsensusError(BlockRuleError::InvalidTransactionsInNewBlock(invalid_transactions))) => { let mut mempool_write = self.mempool.write(); invalid_transactions.iter().for_each(|(x, err)| { + // On missing outpoints, the most likely is that the tx was already in a block accepted by + // the consensus but not yet processed by handle_new_block_transactions(). Another possibility + // is a double spend. In both cases, we simply remove the transaction but keep its redeemers. + // Those will either be valid in a next block template or invalidated if it's a double spend. + // + // If the redeemers of a transaction accepted in consensus but not yet handled in mempool were + // removed, it would lead to having subsequently submitted children transactions of the removed + // redeemers being unexpectedly either orphaned or rejected in case orphans are disallowed. + // + // For all other errors, we do remove the redeemers. let removal_result = mempool_write.remove_transaction( x, - true, + *err != TxRuleError::MissingTxOutpoints, "invalid in block template", format!(" error: {}", err).as_str(), ); From f6225aa0760f4d7ca39458c7b7107338a4b063e4 Mon Sep 17 00:00:00 2001 From: Tiram <18632023+tiram88@users.noreply.github.com> Date: Wed, 6 Sep 2023 17:04:45 +0300 Subject: [PATCH 20/86] Refactor transaction removal reason into an enum --- mining/src/manager.rs | 8 ++-- .../mempool/handle_new_block_transactions.rs | 21 ++++++---- mining/src/mempool/model/orphan_pool.rs | 12 +++--- mining/src/mempool/model/tx.rs | 42 ++++++++++++++++++- mining/src/mempool/remove_transaction.rs | 10 +++-- .../validate_and_insert_transaction.rs | 20 ++++----- 6 files changed, 80 insertions(+), 33 deletions(-) diff --git a/mining/src/manager.rs b/mining/src/manager.rs index 1d48b2abbc..d50356f1ee 100644 --- a/mining/src/manager.rs +++ b/mining/src/manager.rs @@ -8,7 +8,7 @@ use crate::{ errors::MiningManagerResult, mempool::{ config::Config, - model::tx::MempoolTransaction, + model::tx::{MempoolTransaction, TxRemovalReason}, populate_entries_and_try_validate::{validate_mempool_transaction_and_populate, validate_mempool_transactions_in_parallel}, tx::{Orphan, Priority}, Mempool, @@ -101,7 +101,7 @@ impl MiningManager { let removal_result = mempool_write.remove_transaction( x, *err != TxRuleError::MissingTxOutpoints, - "invalid in block template", + TxRemovalReason::InvalidInBlockTemplate, format!(" error: {}", err).as_str(), ); if let Err(err) = removal_result { @@ -499,7 +499,7 @@ impl MiningManager { mempool.remove_transaction( &transaction_id, false, - "high priority revalidation, missing outpoints", + TxRemovalReason::RevalidationWithMissingOutpoints, extra_info.as_str(), )?; } @@ -512,7 +512,7 @@ impl MiningManager { transaction_id, err ); // This call cleanly removes the invalid transaction and its redeemers. - mempool.remove_transaction(&transaction_id, true, "", "")?; + mempool.remove_transaction(&transaction_id, true, TxRemovalReason::Muted, "")?; } } } diff --git a/mining/src/mempool/handle_new_block_transactions.rs b/mining/src/mempool/handle_new_block_transactions.rs index d3bbbf2a57..914b0af577 100644 --- a/mining/src/mempool/handle_new_block_transactions.rs +++ b/mining/src/mempool/handle_new_block_transactions.rs @@ -1,9 +1,14 @@ -use crate::mempool::{errors::RuleResult, Mempool}; +use crate::mempool::{ + errors::RuleResult, + model::{ + pool::Pool, + tx::{MempoolTransaction, TxRemovalReason}, + }, + Mempool, +}; use kaspa_consensus_core::{api::ConsensusApi, tx::Transaction}; use std::collections::HashSet; -use super::model::{pool::Pool, tx::MempoolTransaction}; - impl Mempool { pub(crate) fn handle_new_block_transactions(&mut self, block_transactions: &[Transaction]) -> RuleResult> { let mut unorphaned_transactions = vec![]; @@ -14,10 +19,10 @@ impl Mempool { // its redeemers in the orphan pool. We give those a chance to be unorphaned and included // in the next block template. if !self.orphan_pool.has(&transaction_id) { - self.remove_transaction(&transaction_id, false, "accepted", "")?; + self.remove_transaction(&transaction_id, false, TxRemovalReason::Accepted, "")?; } self.remove_double_spends(transaction)?; - self.orphan_pool.remove_orphan(&transaction_id, false, "accepted")?; + self.orphan_pool.remove_orphan(&transaction_id, false, TxRemovalReason::Accepted)?; unorphaned_transactions.append(&mut self.get_unorphaned_transactions_after_accepted_transaction(transaction)); } Ok(unorphaned_transactions) @@ -36,8 +41,8 @@ impl Mempool { transactions_to_remove.insert(*redeemer_id); } } - transactions_to_remove - .iter() - .try_for_each(|x| self.remove_transaction(x, true, "double spend", format!(" favouring {}", transaction.id()).as_str())) + transactions_to_remove.iter().try_for_each(|x| { + self.remove_transaction(x, true, TxRemovalReason::DoubleSpend, format!(" favouring {}", transaction.id()).as_str()) + }) } } diff --git a/mining/src/mempool/model/orphan_pool.rs b/mining/src/mempool/model/orphan_pool.rs index 5aceec4819..285668a659 100644 --- a/mining/src/mempool/model/orphan_pool.rs +++ b/mining/src/mempool/model/orphan_pool.rs @@ -3,8 +3,8 @@ use crate::mempool::{ errors::{RuleError, RuleResult}, model::{ map::{MempoolTransactionCollection, OutpointIndex}, - pool::Pool, - tx::MempoolTransaction, + pool::{Pool, TransactionsEdges}, + tx::{MempoolTransaction, TxRemovalReason}, }, tx::Priority, }; @@ -17,8 +17,6 @@ use kaspa_core::{debug, warn}; use kaspa_utils::iter::IterExtensions; use std::sync::Arc; -use super::pool::TransactionsEdges; - /// Pool of orphan transactions depending on some missing utxo entries /// /// ### Rust rewrite notes @@ -96,7 +94,7 @@ impl OrphanPool { } // Don't remove redeemers in the case of a random eviction since the evicted transaction is // not invalid, therefore it's redeemers are as good as any orphan that just arrived. - self.remove_orphan(&orphan_to_remove.unwrap().id(), false, "making room")?; + self.remove_orphan(&orphan_to_remove.unwrap().id(), false, TxRemovalReason::MakingRoom)?; } Ok(()) } @@ -163,7 +161,7 @@ impl OrphanPool { &mut self, transaction_id: &TransactionId, remove_redeemers: bool, - reason: &str, + reason: TxRemovalReason, ) -> RuleResult> { // Rust rewrite: // - the call cycle removeOrphan -> removeRedeemersOf -> removeOrphan is replaced by @@ -302,7 +300,7 @@ impl Pool for OrphanPool { .collect(); for transaction_id in expired_low_priority_transactions.iter() { - self.remove_orphan(transaction_id, false, "expired")?; + self.remove_orphan(transaction_id, false, TxRemovalReason::Expired)?; } self.last_expire_scan = virtual_daa_score; diff --git a/mining/src/mempool/model/tx.rs b/mining/src/mempool/model/tx.rs index 66e85e3116..6235425358 100644 --- a/mining/src/mempool/model/tx.rs +++ b/mining/src/mempool/model/tx.rs @@ -1,6 +1,9 @@ use crate::mempool::tx::Priority; use kaspa_consensus_core::{tx::MutableTransaction, tx::TransactionId}; -use std::cmp::Ordering; +use std::{ + cmp::Ordering, + fmt::{Display, Formatter}, +}; pub(crate) struct MempoolTransaction { pub(crate) mtx: MutableTransaction, @@ -47,3 +50,40 @@ impl PartialEq for MempoolTransaction { self.fee_rate() == other.fee_rate() } } + +#[derive(PartialEq, Eq)] +pub(crate) enum TxRemovalReason { + Muted, + Accepted, + MakingRoom, + Unorphaned, + Expired, + DoubleSpend, + InvalidInBlockTemplate, + RevalidationWithMissingOutpoints, +} + +impl TxRemovalReason { + pub(crate) fn as_str(&self) -> &'static str { + match self { + TxRemovalReason::Muted => "", + TxRemovalReason::Accepted => "accepted", + TxRemovalReason::MakingRoom => "making room", + TxRemovalReason::Unorphaned => "unorphaned", + TxRemovalReason::Expired => "expired", + TxRemovalReason::DoubleSpend => "double spend", + TxRemovalReason::InvalidInBlockTemplate => "invalid in block template", + TxRemovalReason::RevalidationWithMissingOutpoints => "revalidation with missing outpoints", + } + } + + pub(crate) fn verbose(&self) -> bool { + matches!(self, TxRemovalReason::Muted) + } +} + +impl Display for TxRemovalReason { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + f.write_str(self.as_str()) + } +} diff --git a/mining/src/mempool/remove_transaction.rs b/mining/src/mempool/remove_transaction.rs index 1433621878..0e2862b3be 100644 --- a/mining/src/mempool/remove_transaction.rs +++ b/mining/src/mempool/remove_transaction.rs @@ -1,4 +1,8 @@ -use crate::mempool::{errors::RuleResult, model::pool::Pool, Mempool}; +use crate::mempool::{ + errors::RuleResult, + model::{pool::Pool, tx::TxRemovalReason}, + Mempool, +}; use kaspa_consensus_core::tx::TransactionId; use kaspa_core::debug; use kaspa_utils::iter::IterExtensions; @@ -8,7 +12,7 @@ impl Mempool { &mut self, transaction_id: &TransactionId, remove_redeemers: bool, - reason: &str, + reason: TxRemovalReason, extra_info: &str, ) -> RuleResult<()> { if self.orphan_pool.has(transaction_id) { @@ -35,7 +39,7 @@ impl Mempool { removed_transactions.extend(self.orphan_pool.remove_redeemers_of(transaction_id)?.iter().map(|x| x.id())); } - if !reason.is_empty() { + if reason.verbose() { match removed_transactions.len() { 0 => {} 1 => debug!("Removed transaction ({}) {}{}", reason, removed_transactions[0], extra_info), diff --git a/mining/src/mempool/validate_and_insert_transaction.rs b/mining/src/mempool/validate_and_insert_transaction.rs index 990632a548..8050fdd318 100644 --- a/mining/src/mempool/validate_and_insert_transaction.rs +++ b/mining/src/mempool/validate_and_insert_transaction.rs @@ -1,8 +1,10 @@ -use std::sync::Arc; - use crate::mempool::{ errors::{RuleError, RuleResult}, - model::{pool::Pool, tx::MempoolTransaction}, + model::{ + pool::Pool, + tx::{MempoolTransaction, TxRemovalReason}, + }, + tx::{Orphan, Priority}, Mempool, }; use kaspa_consensus_core::{ @@ -11,8 +13,7 @@ use kaspa_consensus_core::{ tx::{MutableTransaction, Transaction, TransactionId, TransactionOutpoint, UtxoEntry}, }; use kaspa_core::{debug, info}; - -use super::tx::{Orphan, Priority}; +use std::sync::Arc; impl Mempool { pub(crate) fn pre_validate_and_populate_transaction( @@ -67,10 +68,9 @@ impl Mempool { self.validate_transaction_in_context(&transaction)?; // Before adding the transaction, check if there is room in the pool - self.transaction_pool - .limit_transaction_count(1, &transaction)? - .iter() - .try_for_each(|x| self.remove_transaction(x, true, "making room", format!(" for {}", transaction_id).as_str()))?; + self.transaction_pool.limit_transaction_count(1, &transaction)?.iter().try_for_each(|x| { + self.remove_transaction(x, true, TxRemovalReason::MakingRoom, format!(" for {}", transaction_id).as_str()) + })?; // Add the transaction to the mempool as a MempoolTransaction and return a clone of the embedded Arc let accepted_transaction = @@ -151,7 +151,7 @@ impl Mempool { // This job is delegated to a fn called later in the process (Manager::validate_and_insert_unorphaned_transactions). // Remove the transaction identified by transaction_id from the orphan pool. - let mut transactions = self.orphan_pool.remove_orphan(transaction_id, false, "unorphaned")?; + let mut transactions = self.orphan_pool.remove_orphan(transaction_id, false, TxRemovalReason::Unorphaned)?; // At this point, `transactions` contains exactly one transaction. // The one we just removed from the orphan pool. From ece7e977cc3a3997e4974f67a54d087b7e2d6349 Mon Sep 17 00:00:00 2001 From: Tiram <18632023+tiram88@users.noreply.github.com> Date: Thu, 7 Sep 2023 18:31:11 +0300 Subject: [PATCH 21/86] Add an accepted transaction ids cache to the mempool and use it to prevent reentrance in mempool, broadcasting to and asking from peers --- mining/errors/src/mempool.rs | 3 + mining/src/manager.rs | 52 ++++++++++++-- mining/src/manager_tests.rs | 10 +-- mining/src/mempool/config.rs | 20 +++++- .../mempool/handle_new_block_transactions.rs | 10 ++- mining/src/mempool/mod.rs | 14 +++- .../mempool/model/accepted_transactions.rs | 67 +++++++++++++++++++ mining/src/mempool/model/mod.rs | 1 + .../validate_and_insert_transaction.rs | 17 ++++- protocol/flows/src/flow_context.rs | 2 +- protocol/flows/src/v5/txrelay/flow.rs | 1 + 11 files changed, 178 insertions(+), 19 deletions(-) create mode 100644 mining/src/mempool/model/accepted_transactions.rs diff --git a/mining/errors/src/mempool.rs b/mining/errors/src/mempool.rs index 0a4acbc79d..fade94c858 100644 --- a/mining/errors/src/mempool.rs +++ b/mining/errors/src/mempool.rs @@ -18,6 +18,9 @@ pub enum RuleError { #[error("at least one outpoint of transaction is lacking a matching UTXO entry")] RejectMissingOutpoint, + #[error("transaction {0} was already accepted by the consensus")] + RejectAlreadyAccepted(TransactionId), + #[error("transaction {0} is already in the mempool")] RejectDuplicate(TransactionId), diff --git a/mining/src/manager.rs b/mining/src/manager.rs index d50356f1ee..5634d242e7 100644 --- a/mining/src/manager.rs +++ b/mining/src/manager.rs @@ -273,6 +273,10 @@ impl MiningManager { let transaction_id = tx.id(); match mempool.pre_validate_and_populate_transaction(consensus, tx) { Ok(tx) => Some(tx), + Err(RuleError::RejectAlreadyAccepted(transaction_id)) => { + debug!("Ignoring already accepted transaction {}", transaction_id); + None + } Err(err) => { debug!("Failed to pre validate transaction {0} due to rule error: {1}", transaction_id, err); None @@ -391,12 +395,13 @@ impl MiningManager { pub fn handle_new_block_transactions( &self, consensus: &dyn ConsensusApi, + block_daa_score: u64, block_transactions: &[Transaction], ) -> MiningManagerResult>> { // TODO: should use tx acceptance data to verify that new block txs are actually accepted into virtual state. // write lock on mempool - let unorphaned_transactions = self.mempool.write().handle_new_block_transactions(block_transactions)?; + let unorphaned_transactions = self.mempool.write().handle_new_block_transactions(block_daa_score, block_transactions)?; // alternate no & write lock on mempool let accepted_transactions = self.validate_and_insert_unorphaned_transactions(consensus, unorphaned_transactions); @@ -433,10 +438,15 @@ impl MiningManager { let mut transactions = transactions .into_iter() .filter_map(|mut x| { - if mempool.has_transaction(&x.id(), true, false) { - x.clear_entries(); - mempool.populate_mempool_entries(&mut x); - Some(x) + let transaction_id = x.id(); + if mempool.has_transaction(&transaction_id, true, false) { + if mempool.has_accepted_transaction(&transaction_id) { + None + } else { + x.clear_entries(); + mempool.populate_mempool_entries(&mut x); + Some(x) + } } else { None } @@ -533,6 +543,14 @@ impl MiningManager { pub fn is_transaction_output_dust(&self, transaction_output: &TransactionOutput) -> bool { self.mempool.read().is_transaction_output_dust(transaction_output) } + + pub fn has_accepted_transaction(&self, transaction_id: &TransactionId) -> bool { + self.mempool.read().has_accepted_transaction(transaction_id) + } + + pub fn unaccepted_transactions(&self, transactions: Vec) -> Vec { + self.mempool.read().unaccepted_transactions(transactions) + } } /// Async proxy for the mining manager @@ -590,9 +608,13 @@ impl MiningManagerProxy { pub async fn handle_new_block_transactions( self, consensus: &ConsensusProxy, + block_daa_score: u64, block_transactions: Arc>, ) -> MiningManagerResult>> { - consensus.clone().spawn_blocking(move |c| self.inner.handle_new_block_transactions(c, &block_transactions)).await + consensus + .clone() + .spawn_blocking(move |c| self.inner.handle_new_block_transactions(c, block_daa_score, &block_transactions)) + .await } pub async fn revalidate_high_priority_transactions(self, consensus: &ConsensusProxy) -> MiningManagerResult> { @@ -653,4 +675,22 @@ impl MiningManagerProxy { .await .unwrap() } + + /// Returns whether a transaction id was registered as accepted in the mempool, meaning + /// that the consensus accepted a block containing it and said block was handled by the + /// mempool. + /// + /// Registered transaction ids expire after a delay and are unregistered from the mempool. + /// So a returned value of true means with certitude that the transaction was accepted and + /// a false means either the transaction was never accepted or it was but beyond the expiration + /// delay. + pub async fn has_accepted_transaction(self, transaction_id: TransactionId) -> bool { + spawn_blocking(move || self.inner.has_accepted_transaction(&transaction_id)).await.unwrap() + } + + /// Returns a vector of unaccepted transactions. + /// For more details, see [`Self::has_accepted_transaction()`]. + pub async fn unaccepted_transactions(self, transactions: Vec) -> Vec { + spawn_blocking(move || self.inner.unaccepted_transactions(transactions)).await.unwrap() + } } diff --git a/mining/src/manager_tests.rs b/mining/src/manager_tests.rs index fbefbcb15d..ac3d919959 100644 --- a/mining/src/manager_tests.rs +++ b/mining/src/manager_tests.rs @@ -252,7 +252,7 @@ mod tests { let block_with_first_part = build_block_transactions(first_part.iter().map(|mtx| mtx.tx.as_ref())); let block_with_rest = build_block_transactions(rest.iter().map(|mtx| mtx.tx.as_ref())); - let result = mining_manager.handle_new_block_transactions(consensus.as_ref(), &block_with_first_part); + let result = mining_manager.handle_new_block_transactions(consensus.as_ref(), 2, &block_with_first_part); assert!( result.is_ok(), "the handling by the mempool of the transactions of a block accepted by the consensus should succeed but returned {result:?}" @@ -273,7 +273,7 @@ mod tests { } // Handle all the other transactions. - let result = mining_manager.handle_new_block_transactions(consensus.as_ref(), &block_with_rest); + let result = mining_manager.handle_new_block_transactions(consensus.as_ref(), 3, &block_with_rest); assert!( result.is_ok(), "the handling by the mempool of the transactions of a block accepted by the consensus should succeed but returned {result:?}" @@ -307,7 +307,7 @@ mod tests { transaction_in_the_mempool.tx.inputs[0].previous_outpoint; let block_transactions = build_block_transactions(std::iter::once(double_spend_transaction_in_the_block.tx.as_ref())); - let result = mining_manager.handle_new_block_transactions(consensus.as_ref(), &block_transactions); + let result = mining_manager.handle_new_block_transactions(consensus.as_ref(), 2, &block_transactions); assert!(result.is_ok()); assert!( @@ -368,7 +368,7 @@ mod tests { let added_parent_txs = parent_txs.iter().skip(SKIPPED_TXS).cloned().collect::>(); added_parent_txs.iter().for_each(|x| consensus.add_transaction(x.clone(), 1)); let result = - mining_manager.handle_new_block_transactions(consensus.as_ref(), &build_block_transactions(added_parent_txs.iter())); + mining_manager.handle_new_block_transactions(consensus.as_ref(), 2, &build_block_transactions(added_parent_txs.iter())); assert!(result.is_ok(), "mining manager should handle new block transactions successfully but returns {result:?}"); let unorphaned_txs = result.unwrap(); let (populated_txs, orphans) = mining_manager.get_all_transactions(true, true); @@ -453,7 +453,7 @@ mod tests { let added_child_txs = child_txs.iter().skip(SKIPPED_TXS).cloned().collect::>(); added_child_txs.iter().for_each(|x| consensus.add_transaction(x.clone(), 2)); let result = - mining_manager.handle_new_block_transactions(consensus.as_ref(), &build_block_transactions(added_child_txs.iter())); + mining_manager.handle_new_block_transactions(consensus.as_ref(), 4, &build_block_transactions(added_child_txs.iter())); assert!(result.is_ok(), "mining manager should handle new block transactions successfully but returns {result:?}"); let unorphaned_txs = result.unwrap(); diff --git a/mining/src/mempool/config.rs b/mining/src/mempool/config.rs index f567e8088d..d8cffb1234 100644 --- a/mining/src/mempool/config.rs +++ b/mining/src/mempool/config.rs @@ -4,6 +4,8 @@ pub(crate) const DEFAULT_MAXIMUM_TRANSACTION_COUNT: u64 = 1_000_000; pub(crate) const DEFAULT_TRANSACTION_EXPIRE_INTERVAL_SECONDS: u64 = 60; pub(crate) const DEFAULT_TRANSACTION_EXPIRE_SCAN_INTERVAL_SECONDS: u64 = 10; +pub(crate) const DEFAULT_ACCEPTED_TRANSACTION_EXPIRE_INTERVAL_SECONDS: u64 = 120; +pub(crate) const DEFAULT_ACCEPTED_TRANSACTION_EXPIRE_SCAN_INTERVAL_SECONDS: u64 = 10; pub(crate) const DEFAULT_ORPHAN_EXPIRE_INTERVAL_SECONDS: u64 = 60; pub(crate) const DEFAULT_ORPHAN_EXPIRE_SCAN_INTERVAL_SECONDS: u64 = 10; @@ -29,6 +31,9 @@ pub struct Config { pub transaction_expire_interval_daa_score: u64, pub transaction_expire_scan_interval_daa_score: u64, pub transaction_expire_scan_interval_milliseconds: u64, + pub accepted_transaction_expire_interval_daa_score: u64, + pub accepted_transaction_expire_scan_interval_daa_score: u64, + pub accepted_transaction_expire_scan_interval_milliseconds: u64, pub orphan_expire_interval_daa_score: u64, pub orphan_expire_scan_interval_daa_score: u64, pub maximum_orphan_transaction_mass: u64, @@ -46,7 +51,10 @@ impl Config { maximum_transaction_count: u64, transaction_expire_interval_daa_score: u64, transaction_expire_scan_interval_daa_score: u64, - transaction_expire_scan_interval_seconds: u64, + transaction_expire_scan_interval_milliseconds: u64, + accepted_transaction_expire_interval_daa_score: u64, + accepted_transaction_expire_scan_interval_daa_score: u64, + accepted_transaction_expire_scan_interval_milliseconds: u64, orphan_expire_interval_daa_score: u64, orphan_expire_scan_interval_daa_score: u64, maximum_orphan_transaction_mass: u64, @@ -61,7 +69,10 @@ impl Config { maximum_transaction_count, transaction_expire_interval_daa_score, transaction_expire_scan_interval_daa_score, - transaction_expire_scan_interval_milliseconds: transaction_expire_scan_interval_seconds, + transaction_expire_scan_interval_milliseconds, + accepted_transaction_expire_interval_daa_score, + accepted_transaction_expire_scan_interval_daa_score, + accepted_transaction_expire_scan_interval_milliseconds, orphan_expire_interval_daa_score, orphan_expire_scan_interval_daa_score, maximum_orphan_transaction_mass, @@ -83,6 +94,11 @@ impl Config { transaction_expire_scan_interval_daa_score: DEFAULT_TRANSACTION_EXPIRE_SCAN_INTERVAL_SECONDS * 1000 / target_milliseconds_per_block, transaction_expire_scan_interval_milliseconds: DEFAULT_TRANSACTION_EXPIRE_SCAN_INTERVAL_SECONDS * 1000, + accepted_transaction_expire_interval_daa_score: DEFAULT_ACCEPTED_TRANSACTION_EXPIRE_INTERVAL_SECONDS * 1000 + / target_milliseconds_per_block, + accepted_transaction_expire_scan_interval_daa_score: DEFAULT_ACCEPTED_TRANSACTION_EXPIRE_SCAN_INTERVAL_SECONDS * 1000 + / target_milliseconds_per_block, + accepted_transaction_expire_scan_interval_milliseconds: DEFAULT_ACCEPTED_TRANSACTION_EXPIRE_SCAN_INTERVAL_SECONDS * 1000, orphan_expire_interval_daa_score: DEFAULT_ORPHAN_EXPIRE_INTERVAL_SECONDS * 1000 / target_milliseconds_per_block, orphan_expire_scan_interval_daa_score: DEFAULT_ORPHAN_EXPIRE_SCAN_INTERVAL_SECONDS * 1000 / target_milliseconds_per_block, maximum_orphan_transaction_mass: DEFAULT_MAXIMUM_ORPHAN_TRANSACTION_MASS, diff --git a/mining/src/mempool/handle_new_block_transactions.rs b/mining/src/mempool/handle_new_block_transactions.rs index 914b0af577..55f1fce77f 100644 --- a/mining/src/mempool/handle_new_block_transactions.rs +++ b/mining/src/mempool/handle_new_block_transactions.rs @@ -10,7 +10,11 @@ use kaspa_consensus_core::{api::ConsensusApi, tx::Transaction}; use std::collections::HashSet; impl Mempool { - pub(crate) fn handle_new_block_transactions(&mut self, block_transactions: &[Transaction]) -> RuleResult> { + pub(crate) fn handle_new_block_transactions( + &mut self, + block_daa_score: u64, + block_transactions: &[Transaction], + ) -> RuleResult> { let mut unorphaned_transactions = vec![]; for transaction in block_transactions[1..].iter() { let transaction_id = transaction.id(); @@ -22,7 +26,8 @@ impl Mempool { self.remove_transaction(&transaction_id, false, TxRemovalReason::Accepted, "")?; } self.remove_double_spends(transaction)?; - self.orphan_pool.remove_orphan(&transaction_id, false, TxRemovalReason::Accepted)?; + self.orphan_pool.remove_orphan(&transaction_id, false, TxRemovalReason::Accepted, "")?; + self.accepted_transactions.add(transaction_id, block_daa_score); unorphaned_transactions.append(&mut self.get_unorphaned_transactions_after_accepted_transaction(transaction)); } Ok(unorphaned_transactions) @@ -31,6 +36,7 @@ impl Mempool { pub(crate) fn expire_low_priority_transactions(&mut self, consensus: &dyn ConsensusApi) -> RuleResult<()> { self.orphan_pool.expire_low_priority_transactions(consensus.get_virtual_daa_score())?; self.transaction_pool.expire_low_priority_transactions(consensus.get_virtual_daa_score())?; + self.accepted_transactions.expire(consensus.get_virtual_daa_score()); Ok(()) } diff --git a/mining/src/mempool/mod.rs b/mining/src/mempool/mod.rs index ce8308e1ac..cd2fe18f3d 100644 --- a/mining/src/mempool/mod.rs +++ b/mining/src/mempool/mod.rs @@ -5,7 +5,7 @@ use crate::model::{ use self::{ config::Config, - model::{orphan_pool::OrphanPool, pool::Pool, transactions_pool::TransactionsPool}, + model::{accepted_transactions::AcceptedTransactions, orphan_pool::OrphanPool, pool::Pool, transactions_pool::TransactionsPool}, tx::Priority, }; use kaspa_consensus_core::tx::{MutableTransaction, TransactionId}; @@ -40,6 +40,7 @@ pub(crate) struct Mempool { config: Arc, transaction_pool: TransactionsPool, orphan_pool: OrphanPool, + accepted_transactions: AcceptedTransactions, } impl Mempool { @@ -47,7 +48,8 @@ impl Mempool { let config = Arc::new(config); let transaction_pool = TransactionsPool::new(config.clone()); let orphan_pool = OrphanPool::new(config.clone()); - Self { config, transaction_pool, orphan_pool } + let accepted_transactions = AcceptedTransactions::new(config.clone()); + Self { config, transaction_pool, orphan_pool, accepted_transactions } } pub(crate) fn get_transaction( @@ -135,6 +137,14 @@ impl Mempool { false } } + + pub(crate) fn has_accepted_transaction(&self, transaction_id: &TransactionId) -> bool { + self.accepted_transactions.has(transaction_id) + } + + pub(crate) fn unaccepted_transactions(&self, transactions: Vec) -> Vec { + self.accepted_transactions.unaccepted(transactions) + } } pub mod tx { diff --git a/mining/src/mempool/model/accepted_transactions.rs b/mining/src/mempool/model/accepted_transactions.rs new file mode 100644 index 0000000000..3705ad7f7c --- /dev/null +++ b/mining/src/mempool/model/accepted_transactions.rs @@ -0,0 +1,67 @@ +use crate::mempool::config::Config; +use kaspa_consensus_core::tx::TransactionId; +use kaspa_core::time::unix_now; +use std::{collections::HashMap, sync::Arc}; + +pub(crate) struct AcceptedTransactions { + /// Mempool config + config: Arc, + + /// A map of Transaction IDs to DAA scores + transactions: HashMap, + + /// Last expire scan DAA score + last_expire_scan_daa_score: u64, + /// last expire scan time in milliseconds + last_expire_scan_time: u64, +} + +impl AcceptedTransactions { + pub(crate) fn new(config: Arc) -> Self { + Self { config, transactions: Default::default(), last_expire_scan_daa_score: 0, last_expire_scan_time: unix_now() } + } + + pub(crate) fn add(&mut self, transaction_id: TransactionId, daa_score: u64) -> bool { + self.transactions.insert(transaction_id, daa_score).is_none() + } + + pub(crate) fn remove(&mut self, transaction_id: &TransactionId) -> bool { + self.transactions.remove(transaction_id).is_some() + } + + pub(crate) fn has(&self, transaction_id: &TransactionId) -> bool { + self.transactions.contains_key(transaction_id) + } + + pub(crate) fn unaccepted(&self, transactions: Vec) -> Vec { + transactions.into_iter().filter(|transaction_id| !self.has(transaction_id)).collect() + } + + pub(crate) fn expire(&mut self, virtual_daa_score: u64) { + let now = unix_now(); + if virtual_daa_score < self.last_expire_scan_daa_score + self.config.accepted_transaction_expire_scan_interval_daa_score + || now < self.last_expire_scan_time + self.config.accepted_transaction_expire_scan_interval_milliseconds + { + return; + } + + let expired_transactions: Vec = self + .transactions + .iter() + .filter_map(|(transaction_id, daa_score)| { + if virtual_daa_score > daa_score + self.config.accepted_transaction_expire_interval_daa_score { + Some(*transaction_id) + } else { + None + } + }) + .collect(); + + for transaction_id in expired_transactions.iter() { + self.remove(transaction_id); + } + + self.last_expire_scan_daa_score = virtual_daa_score; + self.last_expire_scan_time = now; + } +} diff --git a/mining/src/mempool/model/mod.rs b/mining/src/mempool/model/mod.rs index 4712336dcd..88997e46f1 100644 --- a/mining/src/mempool/model/mod.rs +++ b/mining/src/mempool/model/mod.rs @@ -1,3 +1,4 @@ +pub(crate) mod accepted_transactions; pub(crate) mod map; pub(crate) mod orphan_pool; pub(crate) mod pool; diff --git a/mining/src/mempool/validate_and_insert_transaction.rs b/mining/src/mempool/validate_and_insert_transaction.rs index 8050fdd318..ff80391c6f 100644 --- a/mining/src/mempool/validate_and_insert_transaction.rs +++ b/mining/src/mempool/validate_and_insert_transaction.rs @@ -21,6 +21,7 @@ impl Mempool { consensus: &dyn ConsensusApi, mut transaction: MutableTransaction, ) -> RuleResult { + self.validate_transaction_acceptance(&transaction)?; // Populate mass in the beginning, it will be used in multiple places throughout the validation and insertion. transaction.calculated_mass = Some(consensus.calculate_transaction_mass(&transaction.tx)); self.validate_transaction_in_isolation(&transaction)?; @@ -66,6 +67,7 @@ impl Mempool { } self.validate_transaction_in_context(&transaction)?; + self.validate_transaction_acceptance(&transaction)?; // Before adding the transaction, check if there is room in the pool self.transaction_pool.limit_transaction_count(1, &transaction)?.iter().try_for_each(|x| { @@ -78,6 +80,15 @@ impl Mempool { Ok(Some(accepted_transaction)) } + fn validate_transaction_acceptance(&self, transaction: &MutableTransaction) -> RuleResult<()> { + // Reject if the transaction is registered as an accepted transaction + let transaction_id = transaction.id(); + match self.accepted_transactions.has(&transaction_id) { + true => Err(RuleError::RejectAlreadyAccepted(transaction_id)), + false => Ok(()), + } + } + fn validate_transaction_in_isolation(&self, transaction: &MutableTransaction) -> RuleResult<()> { let transaction_id = transaction.id(); if self.transaction_pool.has(&transaction_id) { @@ -130,6 +141,9 @@ impl Mempool { unorphaned_transactions.push(unorphaned_tx); debug!("Transaction {0} unorphaned", transaction_id); } + Err(RuleError::RejectAlreadyAccepted(transaction_id)) => { + debug!("Ignoring already accepted transaction {}", transaction_id); + } Err(err) => { // In case of validation error, we log the problem and drop the // erroneous transaction. @@ -151,13 +165,14 @@ impl Mempool { // This job is delegated to a fn called later in the process (Manager::validate_and_insert_unorphaned_transactions). // Remove the transaction identified by transaction_id from the orphan pool. - let mut transactions = self.orphan_pool.remove_orphan(transaction_id, false, TxRemovalReason::Unorphaned)?; + let mut transactions = self.orphan_pool.remove_orphan(transaction_id, false, TxRemovalReason::Unorphaned, "")?; // At this point, `transactions` contains exactly one transaction. // The one we just removed from the orphan pool. assert_eq!(transactions.len(), 1, "the list returned by remove_orphan is expected to contain exactly one transaction"); let transaction = transactions.pop().unwrap(); + self.validate_transaction_acceptance(&transaction.mtx)?; self.transaction_pool.check_double_spends(&transaction.mtx)?; Ok(transaction) } diff --git a/protocol/flows/src/flow_context.rs b/protocol/flows/src/flow_context.rs index 5c77eb671c..b4563683a3 100644 --- a/protocol/flows/src/flow_context.rs +++ b/protocol/flows/src/flow_context.rs @@ -361,7 +361,7 @@ impl FlowContext { transactions_to_broadcast.enqueue_chunk( self.mining_manager() .clone() - .handle_new_block_transactions(consensus, block.transactions.clone()) + .handle_new_block_transactions(consensus, block.header.daa_score, block.transactions.clone()) .await? .iter() .map(|x| x.id()), diff --git a/protocol/flows/src/v5/txrelay/flow.rs b/protocol/flows/src/v5/txrelay/flow.rs index 32a99bb369..c46da3ed01 100644 --- a/protocol/flows/src/v5/txrelay/flow.rs +++ b/protocol/flows/src/v5/txrelay/flow.rs @@ -104,6 +104,7 @@ impl RelayTransactionsFlow { ) -> Result>, ProtocolError> { // Build a vector with the transaction ids unknown in the mempool and not already requested // by another peer + let transaction_ids = self.ctx.mining_manager().clone().unaccepted_transactions(transaction_ids).await; let mut requests = Vec::new(); for transaction_id in transaction_ids { if !self.is_known_transaction(transaction_id).await { From ea17aaa3b4b2fb3536af38ec22b87ceaf541f18d Mon Sep 17 00:00:00 2001 From: Tiram <18632023+tiram88@users.noreply.github.com> Date: Thu, 7 Sep 2023 19:22:01 +0300 Subject: [PATCH 22/86] Improve the filtering of unknown transactions in tx relay --- mining/src/manager.rs | 10 ++++++++++ mining/src/mempool/mod.rs | 9 ++++++++- mining/src/mempool/model/accepted_transactions.rs | 4 ++-- protocol/flows/src/v5/txrelay/flow.rs | 14 +++----------- 4 files changed, 23 insertions(+), 14 deletions(-) diff --git a/mining/src/manager.rs b/mining/src/manager.rs index 5634d242e7..c3f6c24ee6 100644 --- a/mining/src/manager.rs +++ b/mining/src/manager.rs @@ -551,6 +551,10 @@ impl MiningManager { pub fn unaccepted_transactions(&self, transactions: Vec) -> Vec { self.mempool.read().unaccepted_transactions(transactions) } + + pub fn unknown_transactions(&self, transactions: Vec) -> Vec { + self.mempool.read().unknown_transactions(transactions) + } } /// Async proxy for the mining manager @@ -693,4 +697,10 @@ impl MiningManagerProxy { pub async fn unaccepted_transactions(self, transactions: Vec) -> Vec { spawn_blocking(move || self.inner.unaccepted_transactions(transactions)).await.unwrap() } + + /// Returns a vector with all transaction ids that are neither in the mempool, nor in the orphan pool + /// nor accepted. + pub async fn unknown_transactions(self, transactions: Vec) -> Vec { + spawn_blocking(move || self.inner.unknown_transactions(transactions)).await.unwrap() + } } diff --git a/mining/src/mempool/mod.rs b/mining/src/mempool/mod.rs index cd2fe18f3d..3d713a750c 100644 --- a/mining/src/mempool/mod.rs +++ b/mining/src/mempool/mod.rs @@ -143,7 +143,14 @@ impl Mempool { } pub(crate) fn unaccepted_transactions(&self, transactions: Vec) -> Vec { - self.accepted_transactions.unaccepted(transactions) + self.accepted_transactions.unaccepted(&mut transactions.into_iter()) + } + + pub(crate) fn unknown_transactions(&self, transactions: Vec) -> Vec { + let mut not_in_pools_txs = transactions + .into_iter() + .filter(|transaction_id| !(self.transaction_pool.has(transaction_id) || self.orphan_pool.has(transaction_id))); + self.accepted_transactions.unaccepted(&mut not_in_pools_txs) } } diff --git a/mining/src/mempool/model/accepted_transactions.rs b/mining/src/mempool/model/accepted_transactions.rs index 3705ad7f7c..cbdc5181fb 100644 --- a/mining/src/mempool/model/accepted_transactions.rs +++ b/mining/src/mempool/model/accepted_transactions.rs @@ -33,8 +33,8 @@ impl AcceptedTransactions { self.transactions.contains_key(transaction_id) } - pub(crate) fn unaccepted(&self, transactions: Vec) -> Vec { - transactions.into_iter().filter(|transaction_id| !self.has(transaction_id)).collect() + pub(crate) fn unaccepted(&self, transactions: &mut impl Iterator) -> Vec { + transactions.filter(|transaction_id| !self.has(transaction_id)).collect() } pub(crate) fn expire(&mut self, virtual_daa_score: u64) { diff --git a/protocol/flows/src/v5/txrelay/flow.rs b/protocol/flows/src/v5/txrelay/flow.rs index c46da3ed01..6190cb8435 100644 --- a/protocol/flows/src/v5/txrelay/flow.rs +++ b/protocol/flows/src/v5/txrelay/flow.rs @@ -104,13 +104,11 @@ impl RelayTransactionsFlow { ) -> Result>, ProtocolError> { // Build a vector with the transaction ids unknown in the mempool and not already requested // by another peer - let transaction_ids = self.ctx.mining_manager().clone().unaccepted_transactions(transaction_ids).await; + let transaction_ids = self.ctx.mining_manager().clone().unknown_transactions(transaction_ids).await; let mut requests = Vec::new(); for transaction_id in transaction_ids { - if !self.is_known_transaction(transaction_id).await { - if let Some(req) = self.ctx.try_adding_transaction_request(transaction_id) { - requests.push(req); - } + if let Some(req) = self.ctx.try_adding_transaction_request(transaction_id) { + requests.push(req); } } @@ -129,12 +127,6 @@ impl RelayTransactionsFlow { Ok(requests) } - async fn is_known_transaction(&self, transaction_id: TransactionId) -> bool { - // Ask the transaction memory pool if the transaction is known - // to it in any form (main pool or orphan). - self.ctx.mining_manager().clone().has_transaction(transaction_id, true, true).await - } - /// Returns the next Transaction or TransactionNotFound message in msg_route, /// returning only one of the message types at a time. async fn read_response(&mut self) -> Result { From 46fb8df2887ad90ed7b61ca3424ced6ef9ecd4ee Mon Sep 17 00:00:00 2001 From: Tiram <18632023+tiram88@users.noreply.github.com> Date: Thu, 7 Sep 2023 19:22:38 +0300 Subject: [PATCH 23/86] Enhance tx removal logging --- mining/src/mempool/model/orphan_pool.rs | 39 ++++++++++--------- mining/src/mempool/model/transactions_pool.rs | 26 ++++++++----- mining/src/mempool/model/tx.rs | 2 +- mining/src/mempool/remove_transaction.rs | 21 ++++++++-- 4 files changed, 56 insertions(+), 32 deletions(-) diff --git a/mining/src/mempool/model/orphan_pool.rs b/mining/src/mempool/model/orphan_pool.rs index 285668a659..47726d65da 100644 --- a/mining/src/mempool/model/orphan_pool.rs +++ b/mining/src/mempool/model/orphan_pool.rs @@ -94,7 +94,7 @@ impl OrphanPool { } // Don't remove redeemers in the case of a random eviction since the evicted transaction is // not invalid, therefore it's redeemers are as good as any orphan that just arrived. - self.remove_orphan(&orphan_to_remove.unwrap().id(), false, TxRemovalReason::MakingRoom)?; + self.remove_orphan(&orphan_to_remove.unwrap().id(), false, TxRemovalReason::MakingRoom, "")?; } Ok(()) } @@ -162,6 +162,7 @@ impl OrphanPool { transaction_id: &TransactionId, remove_redeemers: bool, reason: TxRemovalReason, + extra_info: &str, ) -> RuleResult> { // Rust rewrite: // - the call cycle removeOrphan -> removeRedeemersOf -> removeOrphan is replaced by @@ -178,18 +179,21 @@ impl OrphanPool { } let removed_transactions = transaction_ids_to_remove.iter().map(|x| self.remove_single_orphan(x)).collect::>>()?; - match removed_transactions.len() { - 0 => (), // This is not possible - 1 => { - debug!("Removed transaction from orphan pool ({}): {}", reason, removed_transactions[0].id()); - } - n => { - debug!( - "Removed {} transactions from orphan pool ({}): {}", - n, - reason, - removed_transactions.iter().map(|x| x.id()).reusable_format(", ") - ); + if reason.verbose() { + match removed_transactions.len() { + 0 => (), // This is not possible + 1 => { + debug!("Removed transaction from orphan pool ({}): {}{}", reason, removed_transactions[0].id(), extra_info); + } + n => { + debug!( + "Removed {} transactions from orphan pool ({}): {}{}", + n, + reason, + removed_transactions.iter().map(|x| x.id()).reusable_format(", "), + extra_info + ); + } } } Ok(removed_transactions) @@ -235,11 +239,10 @@ impl OrphanPool { &mut self, removed_transaction: &MempoolTransaction, remove_redeemers: bool, - ) -> RuleResult<()> { + ) -> RuleResult> { let removed_transaction_id = removed_transaction.id(); if remove_redeemers { - self.remove_redeemers_of(&removed_transaction_id)?; - return Ok(()); + return self.remove_redeemers_of(&removed_transaction_id); } let mut outpoint = TransactionOutpoint::new(removed_transaction_id, 0); @@ -253,7 +256,7 @@ impl OrphanPool { } } } - Ok(()) + Ok(vec![]) } fn get_random_low_priority_orphan(&self) -> Option<&MempoolTransaction> { @@ -300,7 +303,7 @@ impl Pool for OrphanPool { .collect(); for transaction_id in expired_low_priority_transactions.iter() { - self.remove_orphan(transaction_id, false, TxRemovalReason::Expired)?; + self.remove_orphan(transaction_id, false, TxRemovalReason::Expired, "")?; } self.last_expire_scan = virtual_daa_score; diff --git a/mining/src/mempool/model/transactions_pool.rs b/mining/src/mempool/model/transactions_pool.rs index f20ef11f95..4aa67308b1 100644 --- a/mining/src/mempool/model/transactions_pool.rs +++ b/mining/src/mempool/model/transactions_pool.rs @@ -2,7 +2,12 @@ use crate::{ mempool::{ config::Config, errors::{RuleError, RuleResult}, - model::{map::MempoolTransactionCollection, pool::Pool, tx::MempoolTransaction, utxo_set::MempoolUtxoSet}, + model::{ + map::MempoolTransactionCollection, + pool::{Pool, TransactionsEdges}, + tx::{MempoolTransaction, TxRemovalReason}, + utxo_set::MempoolUtxoSet, + }, tx::Priority, }, model::{candidate_tx::CandidateTransaction, topological_index::TopologicalIndex}, @@ -12,13 +17,12 @@ use kaspa_consensus_core::{ tx::{MutableTransaction, TransactionOutpoint}, }; use kaspa_core::{debug, time::unix_now, trace, warn}; +use kaspa_utils::iter::IterExtensions; use std::{ collections::{hash_map::Keys, hash_set::Iter}, sync::Arc, }; -use super::pool::TransactionsEdges; - /// Pool of transactions to be included in a block template /// /// ### Rust rewrite notes @@ -309,12 +313,6 @@ impl Pool for TransactionsPool { if (x.priority == Priority::Low) && virtual_daa_score > x.added_at_daa_score + self.config.transaction_expire_interval_daa_score { - debug!( - "Removing transaction {}, because it expired, virtual DAA score is {} and expire limit is {}", - x.id(), - virtual_daa_score, - x.added_at_daa_score + self.config.transaction_expire_interval_daa_score - ); Some(x.id()) } else { None @@ -325,6 +323,16 @@ impl Pool for TransactionsPool { for transaction_id in expired_low_priority_transactions.iter() { self.remove_transaction(transaction_id)?; } + match expired_low_priority_transactions.len() { + 0 => {} + 1 => debug!("Removed transaction ({}) {}", TxRemovalReason::Expired, expired_low_priority_transactions[0]), + n => debug!( + "Removed {} transactions ({}): {}", + n, + TxRemovalReason::Expired, + expired_low_priority_transactions.iter().reusable_format(", ") + ), + } self.last_expire_scan_daa_score = virtual_daa_score; self.last_expire_scan_time = now; diff --git a/mining/src/mempool/model/tx.rs b/mining/src/mempool/model/tx.rs index 6235425358..6d07da67f0 100644 --- a/mining/src/mempool/model/tx.rs +++ b/mining/src/mempool/model/tx.rs @@ -78,7 +78,7 @@ impl TxRemovalReason { } pub(crate) fn verbose(&self) -> bool { - matches!(self, TxRemovalReason::Muted) + !matches!(self, TxRemovalReason::Muted) } } diff --git a/mining/src/mempool/remove_transaction.rs b/mining/src/mempool/remove_transaction.rs index 0e2862b3be..3e51107083 100644 --- a/mining/src/mempool/remove_transaction.rs +++ b/mining/src/mempool/remove_transaction.rs @@ -1,6 +1,9 @@ use crate::mempool::{ errors::RuleResult, - model::{pool::Pool, tx::TxRemovalReason}, + model::{ + pool::Pool, + tx::{MempoolTransaction, TxRemovalReason}, + }, Mempool, }; use kaspa_consensus_core::tx::TransactionId; @@ -16,7 +19,7 @@ impl Mempool { extra_info: &str, ) -> RuleResult<()> { if self.orphan_pool.has(transaction_id) { - return self.orphan_pool.remove_orphan(transaction_id, true, reason).map(|_| ()); + return self.orphan_pool.remove_orphan(transaction_id, true, reason, extra_info).map(|_| ()); } if !self.transaction_pool.has(transaction_id) { @@ -33,7 +36,13 @@ impl Mempool { }); } - removed_transactions.iter().try_for_each(|x| self.remove_transaction_from_sets(x, remove_redeemers))?; + let mut removed_orphans: Vec = vec![]; + removed_transactions.iter().try_for_each(|tx_id| { + self.remove_transaction_from_sets(tx_id, remove_redeemers).map(|txs| { + removed_orphans.extend(txs.iter().map(|x| x.id())); + }) + })?; + removed_transactions.extend(removed_orphans); if remove_redeemers { removed_transactions.extend(self.orphan_pool.remove_redeemers_of(transaction_id)?.iter().map(|x| x.id())); @@ -56,7 +65,11 @@ impl Mempool { Ok(()) } - fn remove_transaction_from_sets(&mut self, transaction_id: &TransactionId, remove_redeemers: bool) -> RuleResult<()> { + fn remove_transaction_from_sets( + &mut self, + transaction_id: &TransactionId, + remove_redeemers: bool, + ) -> RuleResult> { let removed_transaction = self.transaction_pool.remove_transaction(transaction_id)?; self.transaction_pool.remove_transaction_utxos(&removed_transaction.mtx); self.orphan_pool.update_orphans_after_transaction_removed(&removed_transaction, remove_redeemers) From 8522ced884f57c47fa3ff8b2cf10144fa5b83680 Mon Sep 17 00:00:00 2001 From: Tiram <18632023+tiram88@users.noreply.github.com> Date: Thu, 7 Sep 2023 21:31:11 +0300 Subject: [PATCH 24/86] Add mempool stats --- mining/src/block_template/builder.rs | 4 ++-- mining/src/manager.rs | 10 ++++++++++ mining/src/mempool/handle_new_block_transactions.rs | 1 + mining/src/mempool/mod.rs | 5 +++++ mining/src/mempool/model/accepted_transactions.rs | 12 +++++++++++- mining/src/mempool/model/orphan_pool.rs | 4 ++-- 6 files changed, 31 insertions(+), 5 deletions(-) diff --git a/mining/src/block_template/builder.rs b/mining/src/block_template/builder.rs index 4c46efed56..a2369fd41c 100644 --- a/mining/src/block_template/builder.rs +++ b/mining/src/block_template/builder.rs @@ -3,7 +3,7 @@ use crate::{block_template::selector::TransactionsSelector, model::candidate_tx: use kaspa_consensus_core::{ api::ConsensusApi, block::BlockTemplate, coinbase::MinerData, merkle::calc_hash_merkle_root, tx::COINBASE_TRANSACTION_INDEX, }; -use kaspa_core::{time::unix_now, trace}; +use kaspa_core::{time::unix_now, debug}; pub(crate) struct BlockTemplateBuilder { policy: Policy, @@ -84,7 +84,7 @@ impl BlockTemplateBuilder { miner_data: &MinerData, transactions: Vec, ) -> BuilderResult { - trace!("Considering {} transactions for inclusion into a new block", transactions.len()); + debug!("Considering {} transactions for a new block template", transactions.len()); let mut selector = TransactionsSelector::new(self.policy.clone(), transactions); let block_txs = selector.select_transactions(); Ok(consensus.build_block_template(miner_data.clone(), block_txs)?) diff --git a/mining/src/manager.rs b/mining/src/manager.rs index c3f6c24ee6..3c8ca47f4c 100644 --- a/mining/src/manager.rs +++ b/mining/src/manager.rs @@ -277,6 +277,14 @@ impl MiningManager { debug!("Ignoring already accepted transaction {}", transaction_id); None } + Err(RuleError::RejectDuplicate(transaction_id)) => { + debug!("Ignoring transaction already in the mempool {}", transaction_id); + None + } + Err(RuleError::RejectDuplicateOrphan(transaction_id)) => { + debug!("Ignoring transaction already in the orphan pool {}", transaction_id); + None + } Err(err) => { debug!("Failed to pre validate transaction {0} due to rule error: {1}", transaction_id, err); None @@ -321,6 +329,7 @@ impl MiningManager { } }) .collect::>(); + mempool.log_stats(); drop(mempool); // TODO: handle RuleError::RejectInvalid errors when a banning process gets implemented @@ -527,6 +536,7 @@ impl MiningManager { } } } + mempool.log_stats(); drop(mempool); } // Return the successfully processed high priority transaction ids diff --git a/mining/src/mempool/handle_new_block_transactions.rs b/mining/src/mempool/handle_new_block_transactions.rs index 55f1fce77f..bcda383e0a 100644 --- a/mining/src/mempool/handle_new_block_transactions.rs +++ b/mining/src/mempool/handle_new_block_transactions.rs @@ -37,6 +37,7 @@ impl Mempool { self.orphan_pool.expire_low_priority_transactions(consensus.get_virtual_daa_score())?; self.transaction_pool.expire_low_priority_transactions(consensus.get_virtual_daa_score())?; self.accepted_transactions.expire(consensus.get_virtual_daa_score()); + self.log_stats(); Ok(()) } diff --git a/mining/src/mempool/mod.rs b/mining/src/mempool/mod.rs index 3d713a750c..0ab2ecd260 100644 --- a/mining/src/mempool/mod.rs +++ b/mining/src/mempool/mod.rs @@ -9,6 +9,7 @@ use self::{ tx::Priority, }; use kaspa_consensus_core::tx::{MutableTransaction, TransactionId}; +use kaspa_core::debug; use std::{collections::hash_map::Entry, sync::Arc}; pub(crate) mod check_transaction_standard; @@ -152,6 +153,10 @@ impl Mempool { .filter(|transaction_id| !(self.transaction_pool.has(transaction_id) || self.orphan_pool.has(transaction_id))); self.accepted_transactions.unaccepted(&mut not_in_pools_txs) } + + pub(crate) fn log_stats(&self) { + debug!("Mempool stats: {} txs, {} orphans, {} accepted", self.transaction_pool.len(), self.orphan_pool.len(), self.accepted_transactions.len()); + } } pub mod tx { diff --git a/mining/src/mempool/model/accepted_transactions.rs b/mining/src/mempool/model/accepted_transactions.rs index cbdc5181fb..94ad0d0761 100644 --- a/mining/src/mempool/model/accepted_transactions.rs +++ b/mining/src/mempool/model/accepted_transactions.rs @@ -1,6 +1,6 @@ use crate::mempool::config::Config; use kaspa_consensus_core::tx::TransactionId; -use kaspa_core::time::unix_now; +use kaspa_core::{debug, time::unix_now}; use std::{collections::HashMap, sync::Arc}; pub(crate) struct AcceptedTransactions { @@ -33,6 +33,10 @@ impl AcceptedTransactions { self.transactions.contains_key(transaction_id) } + pub(crate) fn len(&self) -> usize { + self.transactions.len() + } + pub(crate) fn unaccepted(&self, transactions: &mut impl Iterator) -> Vec { transactions.filter(|transaction_id| !self.has(transaction_id)).collect() } @@ -61,6 +65,12 @@ impl AcceptedTransactions { self.remove(transaction_id); } + debug!( + "Removed {} accepted transactions from mempool cache. Currently containing {}", + expired_transactions.len(), + self.transactions.len() + ); + self.last_expire_scan_daa_score = virtual_daa_score; self.last_expire_scan_time = now; } diff --git a/mining/src/mempool/model/orphan_pool.rs b/mining/src/mempool/model/orphan_pool.rs index 47726d65da..fa8ca1955f 100644 --- a/mining/src/mempool/model/orphan_pool.rs +++ b/mining/src/mempool/model/orphan_pool.rs @@ -183,11 +183,11 @@ impl OrphanPool { match removed_transactions.len() { 0 => (), // This is not possible 1 => { - debug!("Removed transaction from orphan pool ({}): {}{}", reason, removed_transactions[0].id(), extra_info); + debug!("Removed orphan transaction ({}): {}{}", reason, removed_transactions[0].id(), extra_info); } n => { debug!( - "Removed {} transactions from orphan pool ({}): {}{}", + "Removed {} orphan transactions ({}): {}{}", n, reason, removed_transactions.iter().map(|x| x.id()).reusable_format(", "), From 61ca786a4100628efa1943f2cc7ef7e801f2297a Mon Sep 17 00:00:00 2001 From: Tiram <18632023+tiram88@users.noreply.github.com> Date: Sun, 10 Sep 2023 18:06:15 +0300 Subject: [PATCH 25/86] Process new and unorphaned blocks in topological order --- protocol/flows/src/flow_context.rs | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/protocol/flows/src/flow_context.rs b/protocol/flows/src/flow_context.rs index b4563683a3..fe4bbd2225 100644 --- a/protocol/flows/src/flow_context.rs +++ b/protocol/flows/src/flow_context.rs @@ -353,11 +353,14 @@ impl FlowContext { /// /// _GO-KASPAD: OnNewBlock + broadcastTransactionsAfterBlockAdded_ pub async fn on_new_block(&self, consensus: &ConsensusProxy, block: Block) -> Result<(), ProtocolError> { + let _sw = Stopwatch::<500>::with_threshold("on_new_block lock"); let hash = block.hash(); - let blocks = self.unorphan_blocks(consensus, hash).await; + let mut blocks = self.unorphan_blocks(consensus, hash).await; + // Process blocks in topological order + blocks.sort_by(|a, b| a.header.blue_work.partial_cmp(&b.header.blue_work).unwrap()); // Use a ProcessQueue so we get rid of duplicates let mut transactions_to_broadcast = ProcessQueue::new(); - for block in once(block).chain(blocks.into_iter()) { + for block in blocks.into_iter().chain(once(block)) { transactions_to_broadcast.enqueue_chunk( self.mining_manager() .clone() From 5ba09f53836eedd323d6bff84d09749162703cfe Mon Sep 17 00:00:00 2001 From: Tiram <18632023+tiram88@users.noreply.github.com> Date: Sun, 10 Sep 2023 19:20:34 +0300 Subject: [PATCH 26/86] Run revalidation of HP txs in a dedicated task --- Cargo.lock | 2 + core/src/time.rs | 2 +- mining/Cargo.toml | 7 + mining/src/manager.rs | 209 ++++++++++++------ mining/src/manager_tests.rs | 23 +- mining/src/mempool/mod.rs | 27 ++- mining/src/mempool/model/transactions_pool.rs | 4 + protocol/flows/src/flow_context.rs | 25 ++- .../flows/src/flowcontext/transactions.rs | 23 +- 9 files changed, 234 insertions(+), 88 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 8f81a1a50e..6a38fad7f8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2461,6 +2461,7 @@ version = "0.1.6" dependencies = [ "criterion", "futures-util", + "itertools 0.10.5", "kaspa-addresses", "kaspa-consensus-core", "kaspa-consensusmanager", @@ -2477,6 +2478,7 @@ dependencies = [ "serde", "smallvec", "thiserror", + "tokio", ] [[package]] diff --git a/core/src/time.rs b/core/src/time.rs index 65ed6dcdea..7b377f2341 100644 --- a/core/src/time.rs +++ b/core/src/time.rs @@ -28,7 +28,7 @@ impl Drop for Stopwatch { fn drop(&mut self) { let elapsed = self.start.elapsed(); if elapsed > Duration::from_millis(TR) { - kaspa_core::warn!("\n[{}] Abnormal time: {:#?}", self.name, elapsed); + kaspa_core::warn!("[{}] Abnormal time: {:#?}", self.name, elapsed); } } } diff --git a/mining/Cargo.toml b/mining/Cargo.toml index 67b39917e1..f690e45c6e 100644 --- a/mining/Cargo.toml +++ b/mining/Cargo.toml @@ -17,6 +17,7 @@ kaspa-core.workspace = true kaspa-mining-errors.workspace = true kaspa-consensusmanager.workspace = true kaspa-utils.workspace = true + thiserror.workspace = true serde.workspace = true log.workspace = true @@ -24,6 +25,12 @@ futures-util.workspace = true smallvec.workspace = true rand.workspace = true parking_lot.workspace = true +itertools.workspace = true +tokio = { workspace = true, features = [ + "rt-multi-thread", + "macros", + "signal", +] } [dev-dependencies] kaspa-txscript.workspace = true diff --git a/mining/src/manager.rs b/mining/src/manager.rs index 3c8ca47f4c..86e56790fb 100644 --- a/mining/src/manager.rs +++ b/mining/src/manager.rs @@ -1,7 +1,3 @@ -// TODO: add integration tests - -use std::sync::Arc; - use crate::{ block_template::{builder::BlockTemplateBuilder, errors::BuilderError}, cache::BlockTemplateCache, @@ -19,6 +15,7 @@ use crate::{ txs_stager::TransactionsStagger, }, }; +use itertools::Itertools; use kaspa_consensus_core::{ api::ConsensusApi, block::BlockTemplate, @@ -27,9 +24,11 @@ use kaspa_consensus_core::{ tx::{MutableTransaction, Transaction, TransactionId, TransactionOutput}, }; use kaspa_consensusmanager::{spawn_blocking, ConsensusProxy}; -use kaspa_core::{debug, error, warn}; +use kaspa_core::{debug, error, info, time::Stopwatch, warn}; use kaspa_mining_errors::mempool::RuleError; use parking_lot::{Mutex, RwLock}; +use std::sync::Arc; +use tokio::sync::mpsc::UnboundedSender; pub struct MiningManager { block_template_builder: BlockTemplateBuilder, @@ -421,12 +420,32 @@ impl MiningManager { Ok(accepted_transactions) } - pub fn revalidate_high_priority_transactions(&self, consensus: &dyn ConsensusApi) -> MiningManagerResult> { + pub fn revalidate_high_priority_transactions( + &self, + consensus: &dyn ConsensusApi, + transaction_ids_sender: UnboundedSender>, + ) { + let _sw = Stopwatch::<1000>::with_threshold("revalidate_high_priority_transactions lock"); + // read lock on mempool // Prepare a vector with clones of high priority transactions found in the mempool - let transactions = self.mempool.read().all_transactions_with_priority(Priority::High); + let mempool = self.mempool.read(); + if mempool.has_transactions_with_priority(Priority::High) { + info!("Revalidating high priority transactions..."); + } else { + debug!("Revalidating high priority transactions found no transactions"); + return; + } + let _swo = Stopwatch::<50>::with_threshold("revalidate all_transactions_with_priority op"); + let transactions = mempool.all_transactions_with_priority(Priority::High); + drop(_swo); + drop(mempool); - let mut valid_ids = Vec::with_capacity(transactions.len()); + let mut valid: usize = 0; + let mut accepted: usize = 0; + let mut other: usize = 0; + let mut missing_outpoint: usize = 0; + let mut invalid: usize = 0; // We process the transactions by level of dependency inside the batch. // Doing so allows to remove all chained dependencies of rejected transactions before actually trying @@ -444,12 +463,14 @@ impl MiningManager { // As the revalidation process is no longer atomic, we filter the transactions ready for revalidation, // keeping only the ones actually present in the mempool (see comment above). let mempool = self.mempool.read(); + let _swo = Stopwatch::<100>::with_threshold("revalidate populate_mempool_entries op"); let mut transactions = transactions .into_iter() .filter_map(|mut x| { let transaction_id = x.id(); if mempool.has_transaction(&transaction_id, true, false) { if mempool.has_accepted_transaction(&transaction_id) { + accepted += 1; None } else { x.clear_entries(); @@ -457,10 +478,12 @@ impl MiningManager { Some(x) } } else { + other += 1; None } }) .collect::>(); + drop(_swo); drop(mempool); // no lock on mempool @@ -468,79 +491,121 @@ impl MiningManager { let mut lower_bound: usize = 0; let mut validation_results = Vec::with_capacity(transactions.len()); while let Some(upper_bound) = self.next_transaction_chunk_upper_bound(&transactions, lower_bound) { + assert!(lower_bound < upper_bound, "the chunk is never empty"); + let _swo = Stopwatch::<60>::with_threshold("revalidate validate_mempool_transactions_in_parallel op"); validation_results .extend(validate_mempool_transactions_in_parallel(consensus, &mut transactions[lower_bound..upper_bound])); + drop(_swo); lower_bound = upper_bound; } assert_eq!(transactions.len(), validation_results.len(), "every transaction should have a matching validation result"); // write lock on mempool // Depending on the validation result, transactions are either accepted or removed - let mut mempool = self.mempool.write(); - for (transaction, validation_result) in transactions.into_iter().zip(validation_results) { - let transaction_id = transaction.id(); - // Only consider transactions still being in the mempool since during the validation some might have been removed. - if mempool.update_revalidated_transaction(transaction) { - match validation_result { - Ok(()) => { - // A following transaction should not remove this one from the pool since we process in a topological order. - // Still, considering the (very unlikely) scenario of two high priority txs sandwiching a low one, where - // in this case topological order is not guaranteed since we only considered chained dependencies of - // high-priority transactions, we might wrongfully return as valid the id of a removed transaction. - // However, as only consequence, said transaction would then be advertised to registered peers and not be - // provided upon request. - valid_ids.push(transaction_id); - } - Err(RuleError::RejectMissingOutpoint) => { - let transaction = mempool.get_transaction(&transaction_id, true, false).unwrap(); - let missing_txs = transaction - .entries - .iter() - .zip(transaction.tx.inputs.iter()) - .flat_map( - |(entry, input)| if entry.is_none() { Some(input.previous_outpoint.transaction_id) } else { None }, - ) - .collect::>(); - - // A transaction may have missing outpoints for legitimate reasons related to concurrency, like a race condition between - // an accepted block having not started yet or unfinished call to handle_new_block_transactions but already processed by - // the consensus and this ongoing call to revalidate. - // - // So we only remove the transaction and keep its redeemers in the mempool because we cannot be sure they are invalid, in - // fact in the race condition case they are valid regarding outpoints. - let extra_info = match missing_txs.len() { - 0 => " but no missing tx!".to_string(), // this is never supposed to happen - 1 => format!(" missing tx {}", missing_txs[0]), - n => format!(" with {} missing txs {}..{}", n, missing_txs[0], missing_txs.last().unwrap()), - }; - - // This call cleanly removes the invalid transaction. - mempool.remove_transaction( - &transaction_id, - false, - TxRemovalReason::RevalidationWithMissingOutpoints, - extra_info.as_str(), - )?; - } - Err(err) => { - // Rust rewrite note: - // The behavior changes here compared to the golang version. - // The failed revalidation is simply logged and the process continues. - warn!( - "Removing high priority transaction {0} and its redeemers, it failed revalidation with {1}", - transaction_id, err - ); - // This call cleanly removes the invalid transaction and its redeemers. - mempool.remove_transaction(&transaction_id, true, TxRemovalReason::Muted, "")?; + const TRANSACTION_CHUNK_SIZE: usize = 246 * 4; + for chunk in &transactions.into_iter().zip(validation_results).chunks(TRANSACTION_CHUNK_SIZE) { + let mut valid_ids = Vec::with_capacity(TRANSACTION_CHUNK_SIZE); + let mut mempool = self.mempool.write(); + let _swo = Stopwatch::<60>::with_threshold("revalidate update_revalidated_transaction op"); + for (transaction, validation_result) in chunk { + let transaction_id = transaction.id(); + // Only consider transactions still being in the mempool since during the validation some might have been removed. + if mempool.update_revalidated_transaction(transaction) { + match validation_result { + Ok(()) => { + // A following transaction should not remove this one from the pool since we process in a topological order. + // Still, considering the (very unlikely) scenario of two high priority txs sandwiching a low one, where + // in this case topological order is not guaranteed since we only considered chained dependencies of + // high-priority transactions, we might wrongfully return as valid the id of a removed transaction. + // However, as only consequence, said transaction would then be advertised to registered peers and not be + // provided upon request. + valid_ids.push(transaction_id); + valid += 1; + } + Err(RuleError::RejectMissingOutpoint) => { + let transaction = mempool.get_transaction(&transaction_id, true, false).unwrap(); + let missing_txs = transaction + .entries + .iter() + .zip(transaction.tx.inputs.iter()) + .flat_map( + |(entry, input)| { + if entry.is_none() { + Some(input.previous_outpoint.transaction_id) + } else { + None + } + }, + ) + .collect::>(); + + // A transaction may have missing outpoints for legitimate reasons related to concurrency, like a race condition between + // an accepted block having not started yet or unfinished call to handle_new_block_transactions but already processed by + // the consensus and this ongoing call to revalidate. + // + // So we only remove the transaction and keep its redeemers in the mempool because we cannot be sure they are invalid, in + // fact in the race condition case they are valid regarding outpoints. + let extra_info = match missing_txs.len() { + 0 => " but no missing tx!".to_string(), // this is never supposed to happen + 1 => format!(" missing tx {}", missing_txs[0]), + n => format!(" with {} missing txs {}..{}", n, missing_txs[0], missing_txs.last().unwrap()), + }; + + // This call cleanly removes the invalid transaction. + let result = mempool.remove_transaction( + &transaction_id, + false, + TxRemovalReason::RevalidationWithMissingOutpoints, + extra_info.as_str(), + ); + if let Err(err) = result { + warn!("Failed to remove transaction {} from mempool: {}", transaction_id, err); + } + missing_outpoint += 1; + } + Err(err) => { + // Rust rewrite note: + // The behavior changes here compared to the golang version. + // The failed revalidation is simply logged and the process continues. + warn!( + "Removing high priority transaction {0} and its redeemers, it failed revalidation with {1}", + transaction_id, err + ); + // This call cleanly removes the invalid transaction and its redeemers. + let result = mempool.remove_transaction(&transaction_id, true, TxRemovalReason::Muted, ""); + if let Err(err) = result { + warn!("Failed to remove transaction {} from mempool: {}", transaction_id, err); + } + invalid += 1; + } } + } else { + other += 1; } } + if !valid_ids.is_empty() { + assert!(transaction_ids_sender.send(valid_ids).is_ok(), "the channel expected to have a receiver and be opened"); + } + drop(_swo); + mempool.log_stats(); + drop(mempool); + } + } + match accepted + missing_outpoint + invalid { + 0 => { + info!("Revalidated {} high priority transactions", valid); + } + _ => { + info!( + "Revalidated {} and removed {} high priority transactions (removals: {} accepted, {} missing outpoint, {} invalid)", + valid, + accepted + missing_outpoint + invalid, + accepted, + missing_outpoint, + invalid, + ); } - mempool.log_stats(); - drop(mempool); } - // Return the successfully processed high priority transaction ids - Ok(valid_ids) } /// is_transaction_output_dust returns whether or not the passed transaction output @@ -631,8 +696,12 @@ impl MiningManagerProxy { .await } - pub async fn revalidate_high_priority_transactions(self, consensus: &ConsensusProxy) -> MiningManagerResult> { - consensus.clone().spawn_blocking(move |c| self.inner.revalidate_high_priority_transactions(c)).await + pub async fn revalidate_high_priority_transactions( + self, + consensus: &ConsensusProxy, + transaction_ids_sender: UnboundedSender>, + ) { + consensus.clone().spawn_blocking(move |c| self.inner.revalidate_high_priority_transactions(c, transaction_ids_sender)).await; } /// Try to return a mempool transaction by its id. diff --git a/mining/src/manager_tests.rs b/mining/src/manager_tests.rs index ac3d919959..31851a4b2f 100644 --- a/mining/src/manager_tests.rs +++ b/mining/src/manager_tests.rs @@ -31,6 +31,7 @@ mod tests { test_helpers::{create_transaction, op_true_script}, }; use std::sync::Arc; + use tokio::sync::mpsc::{error::TryRecvError, unbounded_channel}; const TARGET_TIME_PER_BLOCK: u64 = 1_000; const MAX_BLOCK_MASS: u64 = 500_000; @@ -675,8 +676,15 @@ mod tests { assert!(result.is_ok(), "the insertion in the mempool of the spending transaction failed"); // Revalidate, to make sure spending_tx is still valid - let result = mining_manager.revalidate_high_priority_transactions(consensus.as_ref()); - assert!(result.is_ok(), "the revalidation of high-priority transactions should succeed"); + let (tx, mut rx) = unbounded_channel(); + mining_manager.revalidate_high_priority_transactions(consensus.as_ref(), tx); + let result = rx.blocking_recv(); + assert!(result.is_some(), "the revalidation of high-priority transactions must yield one message"); + assert_eq!( + Err(TryRecvError::Disconnected), + rx.try_recv(), + "the revalidation of high-priority transactions must yield exactly one message" + ); let valid_txs = result.unwrap(); assert_eq!(1, valid_txs.len(), "the revalidated transaction count is wrong: expected: {}, got: {}", 1, valid_txs.len()); assert_eq!(spending_tx.id(), valid_txs[0], "the revalidated transaction is not the right one"); @@ -692,10 +700,13 @@ mod tests { ); // Revalidate again, this time valid_txs should be empty - let result = mining_manager.revalidate_high_priority_transactions(consensus.as_ref()); - assert!(result.is_ok(), "the revalidation of high-priority transactions should succeed"); - let valid_txs = result.unwrap(); - assert!(valid_txs.is_empty(), "the revalidated transaction count is wrong: expected: {}, got: {}", 0, valid_txs.len()); + let (tx, mut rx) = unbounded_channel(); + mining_manager.revalidate_high_priority_transactions(consensus.as_ref(), tx); + assert_eq!( + Err(TryRecvError::Disconnected), + rx.try_recv(), + "the revalidation of high-priority transactions must yield no message" + ); // And the mempool should be empty too let (populated_txs, orphan_txs) = mining_manager.get_all_transactions(true, true); diff --git a/mining/src/mempool/mod.rs b/mining/src/mempool/mod.rs index 0ab2ecd260..40581a9e14 100644 --- a/mining/src/mempool/mod.rs +++ b/mining/src/mempool/mod.rs @@ -9,7 +9,10 @@ use self::{ tx::Priority, }; use kaspa_consensus_core::tx::{MutableTransaction, TransactionId}; -use kaspa_core::debug; +use kaspa_core::{ + info, + time::{unix_now, Stopwatch}, +}; use std::{collections::hash_map::Entry, sync::Arc}; pub(crate) mod check_transaction_standard; @@ -42,6 +45,7 @@ pub(crate) struct Mempool { transaction_pool: TransactionsPool, orphan_pool: OrphanPool, accepted_transactions: AcceptedTransactions, + last_stat_report_time: u64, } impl Mempool { @@ -50,7 +54,7 @@ impl Mempool { let transaction_pool = TransactionsPool::new(config.clone()); let orphan_pool = OrphanPool::new(config.clone()); let accepted_transactions = AcceptedTransactions::new(config.clone()); - Self { config, transaction_pool, orphan_pool, accepted_transactions } + Self { config, transaction_pool, orphan_pool, accepted_transactions, last_stat_report_time: unix_now() } } pub(crate) fn get_transaction( @@ -123,6 +127,7 @@ impl Mempool { } pub(crate) fn block_candidate_transactions(&self) -> Vec { + let _sw = Stopwatch::<80>::with_threshold("block_candidate_transactions op"); self.transaction_pool.all_ready_transactions() } @@ -130,6 +135,10 @@ impl Mempool { self.transaction_pool.all_transactions_with_priority(priority) } + pub(crate) fn has_transactions_with_priority(&self, priority: Priority) -> bool { + self.transaction_pool.has_transactions_with_priority(priority) + } + pub(crate) fn update_revalidated_transaction(&mut self, transaction: MutableTransaction) -> bool { if let Entry::Occupied(mut entry) = self.transaction_pool.all_mut().entry(transaction.id()) { entry.get_mut().mtx = transaction; @@ -154,8 +163,18 @@ impl Mempool { self.accepted_transactions.unaccepted(&mut not_in_pools_txs) } - pub(crate) fn log_stats(&self) { - debug!("Mempool stats: {} txs, {} orphans, {} accepted", self.transaction_pool.len(), self.orphan_pool.len(), self.accepted_transactions.len()); + pub(crate) fn log_stats(&mut self) { + const LOG_STATS_REPORT_INTERVAL_MILLISECONDS: u64 = 2000; + let now = unix_now(); + if now >= self.last_stat_report_time + LOG_STATS_REPORT_INTERVAL_MILLISECONDS { + info!( + "Mempool stats: {} txs, {} orphans, {} accepted", + self.transaction_pool.len(), + self.orphan_pool.len(), + self.accepted_transactions.len() + ); + self.last_stat_report_time = now; + } } } diff --git a/mining/src/mempool/model/transactions_pool.rs b/mining/src/mempool/model/transactions_pool.rs index 4aa67308b1..6b380d3207 100644 --- a/mining/src/mempool/model/transactions_pool.rs +++ b/mining/src/mempool/model/transactions_pool.rs @@ -248,6 +248,10 @@ impl TransactionsPool { self.all().values().filter_map(|x| if x.priority == priority { Some(x.mtx.clone()) } else { None }).collect() } + pub(crate) fn has_transactions_with_priority(&self, priority: Priority) -> bool { + self.all().values().any(|x| x.priority == priority) + } + pub(crate) fn get_outpoint_owner_id(&self, outpoint: &TransactionOutpoint) -> Option<&TransactionId> { self.utxo_set.get_outpoint_owner_id(outpoint) } diff --git a/protocol/flows/src/flow_context.rs b/protocol/flows/src/flow_context.rs index fe4bbd2225..e703c2fa86 100644 --- a/protocol/flows/src/flow_context.rs +++ b/protocol/flows/src/flow_context.rs @@ -12,6 +12,7 @@ use kaspa_consensus_notify::{ root::ConsensusNotificationRoot, }; use kaspa_consensusmanager::{ConsensusInstance, ConsensusManager, ConsensusProxy}; +use kaspa_core::time::Stopwatch; use kaspa_core::{ debug, info, kaspad_env::{name, version}, @@ -376,9 +377,24 @@ impl FlowContext { return Ok(()); } + // TODO: refactor this adding a worker and a scheduler to FlowContext if self.should_rebroadcast_transactions().await { - transactions_to_broadcast - .enqueue_chunk(self.mining_manager().clone().revalidate_high_priority_transactions(consensus).await?.into_iter()); + // Spawn a task revalidating concurrently the high priority transactions. + // The TransactionSpread instance ensures at most one rebroadcast running at any + // given time. + let mining_manager = self.mining_manager().clone(); + let consensus_clone = consensus.clone(); + let context = self.clone(); + tokio::spawn(async move { + let (tx, mut rx) = unbounded_channel(); + tokio::spawn(async move { + mining_manager.revalidate_high_priority_transactions(&consensus_clone, tx).await; + }); + while let Some(transactions) = rx.recv().await { + let _ = context.broadcast_transactions(transactions).await; + } + context.rebroadcast_done().await; + }); } self.broadcast_transactions(transactions_to_broadcast).await @@ -429,6 +445,10 @@ impl FlowContext { self.transactions_spread.write().await.should_rebroadcast_transactions() } + pub async fn rebroadcast_done(&self) { + self.transactions_spread.write().await.rebroadcast_done(); + } + /// Add the given transactions IDs to a set of IDs to broadcast. The IDs will be broadcasted to all peers /// within transaction Inv messages. /// @@ -438,6 +458,7 @@ impl FlowContext { &self, transaction_ids: I, ) -> Result<(), ProtocolError> { + let _sw = Stopwatch::<100>::with_threshold("broadcast_transactions lock"); self.transactions_spread.write().await.broadcast_transactions(transaction_ids).await } } diff --git a/protocol/flows/src/flowcontext/transactions.rs b/protocol/flows/src/flowcontext/transactions.rs index 4a43789893..f8886c4e89 100644 --- a/protocol/flows/src/flowcontext/transactions.rs +++ b/protocol/flows/src/flowcontext/transactions.rs @@ -17,27 +17,40 @@ pub(crate) const MAX_INV_PER_TX_INV_MSG: usize = 131_072; pub struct TransactionsSpread { hub: Hub, last_rebroadcast_time: Instant, + executing_rebroadcast: bool, transaction_ids: ProcessQueue, last_broadcast_time: Instant, } impl TransactionsSpread { pub fn new(hub: Hub) -> Self { - Self { hub, last_rebroadcast_time: Instant::now(), transaction_ids: ProcessQueue::new(), last_broadcast_time: Instant::now() } + Self { + hub, + last_rebroadcast_time: Instant::now(), + executing_rebroadcast: false, + transaction_ids: ProcessQueue::new(), + last_broadcast_time: Instant::now(), + } } /// Returns true if the time for a rebroadcast of the mempool high priority transactions has come. /// /// If true, the instant of the call is registered as the last rebroadcast time. pub fn should_rebroadcast_transactions(&mut self) -> bool { - let now = Instant::now(); - if now - self.last_rebroadcast_time < REBROADCAST_INTERVAL { + if self.executing_rebroadcast || Instant::now() < self.last_rebroadcast_time + REBROADCAST_INTERVAL { return false; } - self.last_rebroadcast_time = now; + self.executing_rebroadcast = true; true } + pub fn rebroadcast_done(&mut self) { + if self.executing_rebroadcast { + self.executing_rebroadcast = false; + self.last_rebroadcast_time = Instant::now(); + } + } + /// Add the given transactions IDs to a set of IDs to broadcast. The IDs will be broadcasted to all peers /// within transaction Inv messages. /// @@ -53,7 +66,7 @@ impl TransactionsSpread { self.transaction_ids.enqueue_chunk(transaction_ids); let now = Instant::now(); - if now - self.last_broadcast_time < BROADCAST_INTERVAL && self.transaction_ids.len() < MAX_INV_PER_TX_INV_MSG { + if now < self.last_broadcast_time + BROADCAST_INTERVAL && self.transaction_ids.len() < MAX_INV_PER_TX_INV_MSG { return Ok(()); } From 2df30290b998eb4468bb60a2310e95be98441092 Mon Sep 17 00:00:00 2001 From: Tiram <18632023+tiram88@users.noreply.github.com> Date: Sun, 10 Sep 2023 19:32:22 +0300 Subject: [PATCH 27/86] Some profiling and debug logs --- mining/src/block_template/builder.rs | 7 +++- mining/src/block_template/selector.rs | 4 ++- mining/src/manager.rs | 36 +++++++++++++++---- .../mempool/handle_new_block_transactions.rs | 2 ++ mining/src/model/txs_stager.rs | 2 ++ 5 files changed, 43 insertions(+), 8 deletions(-) diff --git a/mining/src/block_template/builder.rs b/mining/src/block_template/builder.rs index a2369fd41c..c301fdf0b1 100644 --- a/mining/src/block_template/builder.rs +++ b/mining/src/block_template/builder.rs @@ -3,7 +3,10 @@ use crate::{block_template::selector::TransactionsSelector, model::candidate_tx: use kaspa_consensus_core::{ api::ConsensusApi, block::BlockTemplate, coinbase::MinerData, merkle::calc_hash_merkle_root, tx::COINBASE_TRANSACTION_INDEX, }; -use kaspa_core::{time::unix_now, debug}; +use kaspa_core::{ + debug, + time::{unix_now, Stopwatch}, +}; pub(crate) struct BlockTemplateBuilder { policy: Policy, @@ -84,6 +87,7 @@ impl BlockTemplateBuilder { miner_data: &MinerData, transactions: Vec, ) -> BuilderResult { + let _sw = Stopwatch::<200>::with_threshold("build_block_template op"); debug!("Considering {} transactions for a new block template", transactions.len()); let mut selector = TransactionsSelector::new(self.policy.clone(), transactions); let block_txs = selector.select_transactions(); @@ -120,6 +124,7 @@ impl BlockTemplateBuilder { Ok(block_template) } + #[inline(always)] pub fn max_block_mass(&self) -> u64 { self.policy.max_block_mass } diff --git a/mining/src/block_template/selector.rs b/mining/src/block_template/selector.rs index 7123fdf852..ed421327e8 100644 --- a/mining/src/block_template/selector.rs +++ b/mining/src/block_template/selector.rs @@ -1,4 +1,4 @@ -use kaspa_core::trace; +use kaspa_core::{time::Stopwatch, trace}; use rand::Rng; use std::{collections::HashMap, vec}; @@ -39,6 +39,7 @@ pub(crate) struct TransactionsSelector { impl TransactionsSelector { pub(crate) fn new(policy: Policy, mut transactions: Vec) -> Self { + let _sw = Stopwatch::<100>::with_threshold("TransactionsSelector::new op"); // Sort the transactions by subnetwork_id. transactions.sort_by(|a, b| a.tx.subnetwork_id.cmp(&b.tx.subnetwork_id)); @@ -73,6 +74,7 @@ impl TransactionsSelector { /// and appends the ones that will be included in the next block into /// selected_txs. pub(crate) fn select_transactions(&mut self) -> Vec { + let _sw = Stopwatch::<100>::with_threshold("select_transaction op"); let mut rng = rand::thread_rng(); self.reset(); diff --git a/mining/src/manager.rs b/mining/src/manager.rs index 86e56790fb..b35ed9415b 100644 --- a/mining/src/manager.rs +++ b/mining/src/manager.rs @@ -77,15 +77,19 @@ impl MiningManager { // We avoid passing a mempool ref to blockTemplateBuilder by calling // mempool.BlockCandidateTransactions and mempool.RemoveTransactions here. // We remove recursion seen in blockTemplateBuilder.BuildBlockTemplate here. + debug!("Building a new block template..."); loop { let transactions = self.block_candidate_transactions(); match self.block_template_builder.build_block_template(consensus, miner_data, transactions) { Ok(block_template) => { let block_template = cache_lock.set_immutable_cached_template(block_template); + debug!("Built a new block template with {} transactions", block_template.block.transactions.len()); return Ok(block_template.as_ref().clone()); } Err(BuilderError::ConsensusError(BlockRuleError::InvalidTransactionsInNewBlock(invalid_transactions))) => { let mut mempool_write = self.mempool.write(); + let mut missing_outpoint: usize = 0; + let mut invalid: usize = 0; invalid_transactions.iter().for_each(|(x, err)| { // On missing outpoints, the most likely is that the tx was already in a block accepted by // the consensus but not yet processed by handle_new_block_transactions(). Another possibility @@ -97,12 +101,18 @@ impl MiningManager { // redeemers being unexpectedly either orphaned or rejected in case orphans are disallowed. // // For all other errors, we do remove the redeemers. - let removal_result = mempool_write.remove_transaction( - x, - *err != TxRuleError::MissingTxOutpoints, - TxRemovalReason::InvalidInBlockTemplate, - format!(" error: {}", err).as_str(), - ); + let removal_result = if *err == TxRuleError::MissingTxOutpoints { + missing_outpoint += 1; + mempool_write.remove_transaction(x, false, TxRemovalReason::Muted, "") + } else { + invalid += 1; + mempool_write.remove_transaction( + x, + true, + TxRemovalReason::InvalidInBlockTemplate, + format!(" error: {}", err).as_str(), + ) + }; if let Err(err) = removal_result { // Original golang comment: // mempool.remove_transactions might return errors in situations that are perfectly fine in this context. @@ -112,8 +122,13 @@ impl MiningManager { error!("Error from mempool.remove_transactions: {:?}", err); } }); + debug!( + "Building a new block template failed for {} txs missing outpoint and {} invalid txs", + missing_outpoint, invalid + ) } Err(err) => { + warn!("Building a new block template failed: {}", err); return Err(err)?; } } @@ -121,6 +136,7 @@ impl MiningManager { } pub(crate) fn block_candidate_transactions(&self) -> Vec { + let _sw = Stopwatch::<100>::with_threshold("block_candidate_transactions lock"); self.mempool.read().block_candidate_transactions() } @@ -157,6 +173,7 @@ impl MiningManager { priority: Priority, orphan: Orphan, ) -> MiningManagerResult>> { + let _sw = Stopwatch::<200>::with_threshold("validate_and_insert_mutable_transaction lock"); // read lock on mempool let mut transaction = self.mempool.read().pre_validate_and_populate_transaction(consensus, transaction)?; // no lock on mempool @@ -167,6 +184,7 @@ impl MiningManager { mempool.post_validate_and_insert_transaction(consensus, validation_result, transaction, priority, orphan)? { let unorphaned_transactions = mempool.get_unorphaned_transactions_after_accepted_transaction(&accepted_transaction); + mempool.log_stats(); drop(mempool); // The capacity used here may be exceeded since accepted unorphaned transaction may themselves unorphan other transactions. @@ -186,6 +204,7 @@ impl MiningManager { consensus: &dyn ConsensusApi, mut incoming_transactions: Vec, ) -> Vec> { + let _sw = Stopwatch::<200>::with_threshold("validate_and_insert_unorphaned_transactions lock"); // The capacity used here may be exceeded (see next comment). let mut accepted_transactions = Vec::with_capacity(incoming_transactions.len()); // We loop as long as incoming unorphaned transactions do unorphan other transactions when they @@ -201,6 +220,7 @@ impl MiningManager { let mut lower_bound: usize = 0; let mut validation_results = Vec::with_capacity(transactions.len()); while let Some(upper_bound) = self.next_transaction_chunk_upper_bound(&transactions, lower_bound) { + assert!(lower_bound < upper_bound, "the chunk is never empty"); validation_results .extend(validate_mempool_transactions_in_parallel(consensus, &mut transactions[lower_bound..upper_bound])); lower_bound = upper_bound; @@ -234,6 +254,7 @@ impl MiningManager { } }) .collect::>(); + mempool.log_stats(); drop(mempool); } accepted_transactions @@ -251,6 +272,7 @@ impl MiningManager { priority: Priority, orphan: Orphan, ) -> MiningManagerResult>> { + let _sw = Stopwatch::<500>::with_threshold("validate_and_insert_transaction_batch lock"); // The capacity used here may be exceeded since accepted transactions may unorphan other transactions. let mut accepted_transactions: Vec> = Vec::with_capacity(transactions.len()); let mut batch = TransactionsStagger::new(transactions); @@ -298,6 +320,7 @@ impl MiningManager { let mut lower_bound: usize = 0; let mut validation_results = Vec::with_capacity(transactions.len()); while let Some(upper_bound) = self.next_transaction_chunk_upper_bound(&transactions, lower_bound) { + assert!(lower_bound < upper_bound, "the chunk is never empty"); validation_results .extend(validate_mempool_transactions_in_parallel(consensus, &mut transactions[lower_bound..upper_bound])); lower_bound = upper_bound; @@ -406,6 +429,7 @@ impl MiningManager { block_daa_score: u64, block_transactions: &[Transaction], ) -> MiningManagerResult>> { + let _sw = Stopwatch::<500>::with_threshold("handle_new_block_transactions lock"); // TODO: should use tx acceptance data to verify that new block txs are actually accepted into virtual state. // write lock on mempool diff --git a/mining/src/mempool/handle_new_block_transactions.rs b/mining/src/mempool/handle_new_block_transactions.rs index bcda383e0a..427b84986a 100644 --- a/mining/src/mempool/handle_new_block_transactions.rs +++ b/mining/src/mempool/handle_new_block_transactions.rs @@ -7,6 +7,7 @@ use crate::mempool::{ Mempool, }; use kaspa_consensus_core::{api::ConsensusApi, tx::Transaction}; +use kaspa_core::time::Stopwatch; use std::collections::HashSet; impl Mempool { @@ -15,6 +16,7 @@ impl Mempool { block_daa_score: u64, block_transactions: &[Transaction], ) -> RuleResult> { + let _sw = Stopwatch::<400>::with_threshold("handle_new_block_transactions op"); let mut unorphaned_transactions = vec![]; for transaction in block_transactions[1..].iter() { let transaction_id = transaction.id(); diff --git a/mining/src/model/txs_stager.rs b/mining/src/model/txs_stager.rs index 86377596c2..719fe06147 100644 --- a/mining/src/model/txs_stager.rs +++ b/mining/src/model/txs_stager.rs @@ -1,5 +1,6 @@ use super::TransactionIdSet; use kaspa_consensus_core::tx::{Transaction, TransactionId}; +use kaspa_core::time::Stopwatch; pub struct TransactionsStagger> { txs: Vec, @@ -18,6 +19,7 @@ impl> TransactionsStagger { /// Extract and return all independent transactions pub fn stagger(&mut self) -> Option> { + let _sw = Stopwatch::<50>::with_threshold("stagger op"); if self.is_empty() { return None; } From 254a7e82bf5b8daf103cd365a9740a810d925119 Mon Sep 17 00:00:00 2001 From: Tiram <18632023+tiram88@users.noreply.github.com> Date: Mon, 11 Sep 2023 00:43:33 +0300 Subject: [PATCH 28/86] Run expiration of LP txs in a dedicated task --- mining/src/manager.rs | 38 ++++++++++- .../mempool/handle_new_block_transactions.rs | 21 ++++-- mining/src/mempool/mod.rs | 1 + mining/src/mempool/model/transactions_pool.rs | 68 ++++++++----------- protocol/flows/src/flow_context.rs | 25 +++++++ .../flows/src/flowcontext/transactions.rs | 23 ++++++- 6 files changed, 123 insertions(+), 53 deletions(-) diff --git a/mining/src/manager.rs b/mining/src/manager.rs index b35ed9415b..1a299b0b40 100644 --- a/mining/src/manager.rs +++ b/mining/src/manager.rs @@ -439,11 +439,41 @@ impl MiningManager { let accepted_transactions = self.validate_and_insert_unorphaned_transactions(consensus, unorphaned_transactions); // write lock on mempool - self.mempool.write().expire_low_priority_transactions(consensus)?; + self.mempool.write().log_stats(); Ok(accepted_transactions) } + pub fn expire_low_priority_transactions(&self, consensus: &dyn ConsensusApi) { + // very fine-grained write locks on mempool + + // orphan pool + if let Err(err) = self.mempool.write().expire_orphan_low_priority_transactions(consensus) { + warn!("Failed to expire transactions from orphan pool: {}", err); + } + + // accepted transaction cache + self.mempool.write().expire_accepted_transactions(consensus); + + // mempool + let expired_low_priority_transactions = self.mempool.write().collect_expired_low_priority_transactions(consensus); + for chunk in &expired_low_priority_transactions.iter().chunks(24) { + let mut mempool = self.mempool.write(); + chunk.into_iter().for_each(|tx| { + if let Err(err) = mempool.remove_transaction(tx, false, TxRemovalReason::Muted, "") { + warn!("Failed to remove transaction {} from mempool: {}", tx, err); + } + }); + } + match expired_low_priority_transactions.len() { + 0 => {} + 1 => debug!("Removed transaction ({}) {}", TxRemovalReason::Expired, expired_low_priority_transactions[0]), + n => debug!("Removed {} transactions ({}): {}...", n, TxRemovalReason::Expired, expired_low_priority_transactions[0]), + } + + self.mempool.write().log_stats(); + } + pub fn revalidate_high_priority_transactions( &self, consensus: &dyn ConsensusApi, @@ -460,9 +490,7 @@ impl MiningManager { debug!("Revalidating high priority transactions found no transactions"); return; } - let _swo = Stopwatch::<50>::with_threshold("revalidate all_transactions_with_priority op"); let transactions = mempool.all_transactions_with_priority(Priority::High); - drop(_swo); drop(mempool); let mut valid: usize = 0; @@ -720,6 +748,10 @@ impl MiningManagerProxy { .await } + pub async fn expire_low_priority_transactions(self, consensus: &ConsensusProxy) { + consensus.clone().spawn_blocking(move |c| self.inner.expire_low_priority_transactions(c)).await; + } + pub async fn revalidate_high_priority_transactions( self, consensus: &ConsensusProxy, diff --git a/mining/src/mempool/handle_new_block_transactions.rs b/mining/src/mempool/handle_new_block_transactions.rs index 427b84986a..42d4702e79 100644 --- a/mining/src/mempool/handle_new_block_transactions.rs +++ b/mining/src/mempool/handle_new_block_transactions.rs @@ -6,7 +6,10 @@ use crate::mempool::{ }, Mempool, }; -use kaspa_consensus_core::{api::ConsensusApi, tx::Transaction}; +use kaspa_consensus_core::{ + api::ConsensusApi, + tx::{Transaction, TransactionId}, +}; use kaspa_core::time::Stopwatch; use std::collections::HashSet; @@ -30,17 +33,21 @@ impl Mempool { self.remove_double_spends(transaction)?; self.orphan_pool.remove_orphan(&transaction_id, false, TxRemovalReason::Accepted, "")?; self.accepted_transactions.add(transaction_id, block_daa_score); - unorphaned_transactions.append(&mut self.get_unorphaned_transactions_after_accepted_transaction(transaction)); + unorphaned_transactions.extend(self.get_unorphaned_transactions_after_accepted_transaction(transaction)); } Ok(unorphaned_transactions) } - pub(crate) fn expire_low_priority_transactions(&mut self, consensus: &dyn ConsensusApi) -> RuleResult<()> { - self.orphan_pool.expire_low_priority_transactions(consensus.get_virtual_daa_score())?; - self.transaction_pool.expire_low_priority_transactions(consensus.get_virtual_daa_score())?; + pub(crate) fn expire_orphan_low_priority_transactions(&mut self, consensus: &dyn ConsensusApi) -> RuleResult<()> { + self.orphan_pool.expire_low_priority_transactions(consensus.get_virtual_daa_score()) + } + + pub(crate) fn expire_accepted_transactions(&mut self, consensus: &dyn ConsensusApi) { self.accepted_transactions.expire(consensus.get_virtual_daa_score()); - self.log_stats(); - Ok(()) + } + + pub(crate) fn collect_expired_low_priority_transactions(&mut self, consensus: &dyn ConsensusApi) -> Vec { + self.transaction_pool.collect_expired_low_priority_transactions(consensus.get_virtual_daa_score()) } fn remove_double_spends(&mut self, transaction: &Transaction) -> RuleResult<()> { diff --git a/mining/src/mempool/mod.rs b/mining/src/mempool/mod.rs index 40581a9e14..bd090a7954 100644 --- a/mining/src/mempool/mod.rs +++ b/mining/src/mempool/mod.rs @@ -132,6 +132,7 @@ impl Mempool { } pub(crate) fn all_transactions_with_priority(&self, priority: Priority) -> Vec { + let _sw = Stopwatch::<50>::with_threshold("all_transactions_with_priority op"); self.transaction_pool.all_transactions_with_priority(priority) } diff --git a/mining/src/mempool/model/transactions_pool.rs b/mining/src/mempool/model/transactions_pool.rs index 6b380d3207..c7cfc1f4c8 100644 --- a/mining/src/mempool/model/transactions_pool.rs +++ b/mining/src/mempool/model/transactions_pool.rs @@ -5,7 +5,7 @@ use crate::{ model::{ map::MempoolTransactionCollection, pool::{Pool, TransactionsEdges}, - tx::{MempoolTransaction, TxRemovalReason}, + tx::MempoolTransaction, utxo_set::MempoolUtxoSet, }, tx::Priority, @@ -16,8 +16,7 @@ use kaspa_consensus_core::{ tx::TransactionId, tx::{MutableTransaction, TransactionOutpoint}, }; -use kaspa_core::{debug, time::unix_now, trace, warn}; -use kaspa_utils::iter::IterExtensions; +use kaspa_core::{time::unix_now, trace, warn}; use std::{ collections::{hash_map::Keys, hash_set::Iter}, sync::Arc, @@ -264,6 +263,30 @@ impl TransactionsPool { let parent_ids = self.get_parent_transaction_ids_in_pool(transaction); self.utxo_set.remove_transaction(transaction, &parent_ids) } + + pub(crate) fn collect_expired_low_priority_transactions(&mut self, virtual_daa_score: u64) -> Vec { + let now = unix_now(); + if virtual_daa_score < self.last_expire_scan_daa_score + self.config.transaction_expire_scan_interval_daa_score + || now < self.last_expire_scan_time + self.config.transaction_expire_scan_interval_milliseconds + { + return vec![]; + } + + // Never expire high priority transactions + // Remove all transactions whose added_at_daa_score is older then transaction_expire_interval_daa_score + self.all_transactions + .values() + .filter_map(|x| { + if (x.priority == Priority::Low) + && virtual_daa_score > x.added_at_daa_score + self.config.transaction_expire_interval_daa_score + { + Some(x.id()) + } else { + None + } + }) + .collect() + } } type IterTxId<'a> = Iter<'a, TransactionId>; @@ -301,45 +324,8 @@ impl Pool for TransactionsPool { } fn expire_low_priority_transactions(&mut self, virtual_daa_score: u64) -> RuleResult<()> { - let now = unix_now(); - if virtual_daa_score < self.last_expire_scan_daa_score + self.config.transaction_expire_scan_interval_daa_score - || now < self.last_expire_scan_time + self.config.transaction_expire_scan_interval_milliseconds - { - return Ok(()); - } - - // Never expire high priority transactions - // Remove all transactions whose added_at_daa_score is older then transaction_expire_interval_daa_score - let expired_low_priority_transactions: Vec = self - .all_transactions - .values() - .filter_map(|x| { - if (x.priority == Priority::Low) - && virtual_daa_score > x.added_at_daa_score + self.config.transaction_expire_interval_daa_score - { - Some(x.id()) - } else { - None - } - }) - .collect(); - - for transaction_id in expired_low_priority_transactions.iter() { - self.remove_transaction(transaction_id)?; - } - match expired_low_priority_transactions.len() { - 0 => {} - 1 => debug!("Removed transaction ({}) {}", TxRemovalReason::Expired, expired_low_priority_transactions[0]), - n => debug!( - "Removed {} transactions ({}): {}", - n, - TxRemovalReason::Expired, - expired_low_priority_transactions.iter().reusable_format(", ") - ), - } - self.last_expire_scan_daa_score = virtual_daa_score; - self.last_expire_scan_time = now; + self.last_expire_scan_time = unix_now(); Ok(()) } } diff --git a/protocol/flows/src/flow_context.rs b/protocol/flows/src/flow_context.rs index e703c2fa86..69eff609b6 100644 --- a/protocol/flows/src/flow_context.rs +++ b/protocol/flows/src/flow_context.rs @@ -397,6 +397,22 @@ impl FlowContext { }); } + // TODO: refactor this adding a worker and a scheduler to FlowContext + if self.should_expire_transactions().await { + // Spawn a task expiring concurrently the low priority transactions. + // The TransactionSpread instance ensures at most one expire running at any + // given time. + let mining_manager = self.mining_manager().clone(); + let consensus_clone = consensus.clone(); + let context = self.clone(); + tokio::spawn(async move { + tokio::spawn(async move { + mining_manager.expire_low_priority_transactions(&consensus_clone).await; + }); + context.expire_done().await; + }); + } + self.broadcast_transactions(transactions_to_broadcast).await } @@ -449,6 +465,15 @@ impl FlowContext { self.transactions_spread.write().await.rebroadcast_done(); } + /// Returns true if the time for expiring the mempool low priority transactions has come. + pub async fn should_expire_transactions(&self) -> bool { + self.transactions_spread.write().await.should_expire_transactions() + } + + pub async fn expire_done(&self) { + self.transactions_spread.write().await.expire_done(); + } + /// Add the given transactions IDs to a set of IDs to broadcast. The IDs will be broadcasted to all peers /// within transaction Inv messages. /// diff --git a/protocol/flows/src/flowcontext/transactions.rs b/protocol/flows/src/flowcontext/transactions.rs index f8886c4e89..47ce5ae9d6 100644 --- a/protocol/flows/src/flowcontext/transactions.rs +++ b/protocol/flows/src/flowcontext/transactions.rs @@ -11,6 +11,7 @@ use kaspa_p2p_lib::{ use std::time::{Duration, Instant}; const REBROADCAST_INTERVAL: Duration = Duration::from_secs(30); +const EXPIRE_INTERVAL: Duration = Duration::from_secs(10); const BROADCAST_INTERVAL: Duration = Duration::from_millis(500); pub(crate) const MAX_INV_PER_TX_INV_MSG: usize = 131_072; @@ -18,6 +19,8 @@ pub struct TransactionsSpread { hub: Hub, last_rebroadcast_time: Instant, executing_rebroadcast: bool, + last_expire_time: Instant, + executing_expire: bool, transaction_ids: ProcessQueue, last_broadcast_time: Instant, } @@ -28,14 +31,14 @@ impl TransactionsSpread { hub, last_rebroadcast_time: Instant::now(), executing_rebroadcast: false, + last_expire_time: Instant::now(), + executing_expire: false, transaction_ids: ProcessQueue::new(), last_broadcast_time: Instant::now(), } } /// Returns true if the time for a rebroadcast of the mempool high priority transactions has come. - /// - /// If true, the instant of the call is registered as the last rebroadcast time. pub fn should_rebroadcast_transactions(&mut self) -> bool { if self.executing_rebroadcast || Instant::now() < self.last_rebroadcast_time + REBROADCAST_INTERVAL { return false; @@ -51,6 +54,22 @@ impl TransactionsSpread { } } + /// Returns true if the time for expiring the mempool low priority transactions has come. + pub fn should_expire_transactions(&mut self) -> bool { + if self.executing_expire || Instant::now() < self.last_expire_time + EXPIRE_INTERVAL { + return false; + } + self.executing_expire = true; + true + } + + pub fn expire_done(&mut self) { + if self.executing_expire { + self.executing_expire = false; + self.last_expire_time = Instant::now(); + } + } + /// Add the given transactions IDs to a set of IDs to broadcast. The IDs will be broadcasted to all peers /// within transaction Inv messages. /// From 375f78df1766ab4b24123821a2f2e86333ce279d Mon Sep 17 00:00:00 2001 From: Michael Sutton Date: Mon, 11 Sep 2023 17:11:38 +0300 Subject: [PATCH 29/86] remove some stopwatch calls which were timing locks --- mining/src/manager.rs | 7 ------- 1 file changed, 7 deletions(-) diff --git a/mining/src/manager.rs b/mining/src/manager.rs index 1a299b0b40..8c3647b83c 100644 --- a/mining/src/manager.rs +++ b/mining/src/manager.rs @@ -136,7 +136,6 @@ impl MiningManager { } pub(crate) fn block_candidate_transactions(&self) -> Vec { - let _sw = Stopwatch::<100>::with_threshold("block_candidate_transactions lock"); self.mempool.read().block_candidate_transactions() } @@ -173,7 +172,6 @@ impl MiningManager { priority: Priority, orphan: Orphan, ) -> MiningManagerResult>> { - let _sw = Stopwatch::<200>::with_threshold("validate_and_insert_mutable_transaction lock"); // read lock on mempool let mut transaction = self.mempool.read().pre_validate_and_populate_transaction(consensus, transaction)?; // no lock on mempool @@ -204,7 +202,6 @@ impl MiningManager { consensus: &dyn ConsensusApi, mut incoming_transactions: Vec, ) -> Vec> { - let _sw = Stopwatch::<200>::with_threshold("validate_and_insert_unorphaned_transactions lock"); // The capacity used here may be exceeded (see next comment). let mut accepted_transactions = Vec::with_capacity(incoming_transactions.len()); // We loop as long as incoming unorphaned transactions do unorphan other transactions when they @@ -272,7 +269,6 @@ impl MiningManager { priority: Priority, orphan: Orphan, ) -> MiningManagerResult>> { - let _sw = Stopwatch::<500>::with_threshold("validate_and_insert_transaction_batch lock"); // The capacity used here may be exceeded since accepted transactions may unorphan other transactions. let mut accepted_transactions: Vec> = Vec::with_capacity(transactions.len()); let mut batch = TransactionsStagger::new(transactions); @@ -429,7 +425,6 @@ impl MiningManager { block_daa_score: u64, block_transactions: &[Transaction], ) -> MiningManagerResult>> { - let _sw = Stopwatch::<500>::with_threshold("handle_new_block_transactions lock"); // TODO: should use tx acceptance data to verify that new block txs are actually accepted into virtual state. // write lock on mempool @@ -479,8 +474,6 @@ impl MiningManager { consensus: &dyn ConsensusApi, transaction_ids_sender: UnboundedSender>, ) { - let _sw = Stopwatch::<1000>::with_threshold("revalidate_high_priority_transactions lock"); - // read lock on mempool // Prepare a vector with clones of high priority transactions found in the mempool let mempool = self.mempool.read(); From 004beb61b6ad128b58f2b6103ff974b739fa3783 Mon Sep 17 00:00:00 2001 From: Michael Sutton Date: Mon, 11 Sep 2023 17:33:10 +0300 Subject: [PATCH 30/86] crucial: fix exploding complexity of `handle_new_block_transactions`/`remove_transaction` --- mining/src/mempool/model/pool.rs | 16 +++++++++++++++- mining/src/mempool/remove_transaction.rs | 8 ++++++-- 2 files changed, 21 insertions(+), 3 deletions(-) diff --git a/mining/src/mempool/model/pool.rs b/mining/src/mempool/model/pool.rs index f587ee5fa7..441fb0fe09 100644 --- a/mining/src/mempool/model/pool.rs +++ b/mining/src/mempool/model/pool.rs @@ -64,6 +64,9 @@ pub(crate) trait Pool { /// Returns the ids of all transactions being directly and indirectly chained to `transaction_id` /// and existing in the pool. + /// + /// NOTE: this operation's complexity might become linear in the size of the mempool if the mempool + /// contains deeply chained transactions fn get_redeemer_ids_in_pool(&self, transaction_id: &TransactionId) -> TransactionIdSet { let mut redeemers = TransactionIdSet::new(); if let Some(transaction) = self.get(transaction_id) { @@ -72,7 +75,7 @@ pub(crate) trait Pool { if let Some(chains) = self.chained().get(&transaction.id()) { for redeemer_id in chains { if let Some(redeemer) = self.get(redeemer_id) { - // Do no revisit transactions + // Do not revisit transactions if redeemers.insert(*redeemer_id) { stack.push(redeemer); } @@ -84,6 +87,17 @@ pub(crate) trait Pool { redeemers } + /// Returns the ids of all transactions which directly chained to `transaction_id` + /// and exist in the pool. + fn get_direct_redeemer_ids_in_pool(&self, transaction_id: &TransactionId) -> TransactionIdSet { + if let Some(transaction) = self.get(transaction_id) { + if let Some(chains) = self.chained().get(&transaction.id()) { + return chains.clone(); + } + } + Default::default() + } + /// Returns a vector with clones of all the transactions in the pool. fn get_all_transactions(&self) -> Vec { self.all().values().map(|x| x.mtx.clone()).collect() diff --git a/mining/src/mempool/remove_transaction.rs b/mining/src/mempool/remove_transaction.rs index 3e51107083..29d507e1d2 100644 --- a/mining/src/mempool/remove_transaction.rs +++ b/mining/src/mempool/remove_transaction.rs @@ -27,11 +27,15 @@ impl Mempool { } let mut removed_transactions = vec![*transaction_id]; - let redeemers = self.transaction_pool.get_redeemer_ids_in_pool(transaction_id); if remove_redeemers { + let redeemers = self.transaction_pool.get_redeemer_ids_in_pool(transaction_id); removed_transactions.extend(redeemers); } else { - redeemers.iter().for_each(|x| { + // Note: when `remove_redeemers=false` we avoid calling `get_redeemer_ids_in_pool` which might + // have linear complexity (in mempool size) in the worst-case. Instead, we only obtain the direct + // tx children since only for these txs we need to update the parent/chain relation to the removed tx + let direct_redeemers = self.transaction_pool.get_direct_redeemer_ids_in_pool(transaction_id); + direct_redeemers.iter().for_each(|x| { self.transaction_pool.remove_parent_chained_relation_in_pool(x, transaction_id); }); } From f8d440108a283b452b614b993c93cc62bfdc4308 Mon Sep 17 00:00:00 2001 From: Michael Sutton Date: Mon, 11 Sep 2023 17:36:23 +0300 Subject: [PATCH 31/86] fixes in `on_new_block` --- protocol/flows/src/flow_context.rs | 35 +++++++++++++----------------- 1 file changed, 15 insertions(+), 20 deletions(-) diff --git a/protocol/flows/src/flow_context.rs b/protocol/flows/src/flow_context.rs index 69eff609b6..ffff9bfa71 100644 --- a/protocol/flows/src/flow_context.rs +++ b/protocol/flows/src/flow_context.rs @@ -12,7 +12,6 @@ use kaspa_consensus_notify::{ root::ConsensusNotificationRoot, }; use kaspa_consensusmanager::{ConsensusInstance, ConsensusManager, ConsensusProxy}; -use kaspa_core::time::Stopwatch; use kaspa_core::{ debug, info, kaspad_env::{name, version}, @@ -354,14 +353,13 @@ impl FlowContext { /// /// _GO-KASPAD: OnNewBlock + broadcastTransactionsAfterBlockAdded_ pub async fn on_new_block(&self, consensus: &ConsensusProxy, block: Block) -> Result<(), ProtocolError> { - let _sw = Stopwatch::<500>::with_threshold("on_new_block lock"); let hash = block.hash(); let mut blocks = self.unorphan_blocks(consensus, hash).await; // Process blocks in topological order blocks.sort_by(|a, b| a.header.blue_work.partial_cmp(&b.header.blue_work).unwrap()); // Use a ProcessQueue so we get rid of duplicates let mut transactions_to_broadcast = ProcessQueue::new(); - for block in blocks.into_iter().chain(once(block)) { + for block in once(block).chain(blocks.into_iter()) { transactions_to_broadcast.enqueue_chunk( self.mining_manager() .clone() @@ -378,38 +376,36 @@ impl FlowContext { } // TODO: refactor this adding a worker and a scheduler to FlowContext - if self.should_rebroadcast_transactions().await { - // Spawn a task revalidating concurrently the high priority transactions. - // The TransactionSpread instance ensures at most one rebroadcast running at any + if self.should_expire_transactions().await { + // Spawn a task expiring concurrently the low priority transactions. + // The TransactionSpread instance ensures at most one expire running at any // given time. let mining_manager = self.mining_manager().clone(); let consensus_clone = consensus.clone(); let context = self.clone(); tokio::spawn(async move { - let (tx, mut rx) = unbounded_channel(); - tokio::spawn(async move { - mining_manager.revalidate_high_priority_transactions(&consensus_clone, tx).await; - }); - while let Some(transactions) = rx.recv().await { - let _ = context.broadcast_transactions(transactions).await; - } - context.rebroadcast_done().await; + mining_manager.expire_low_priority_transactions(&consensus_clone).await; + context.expire_done().await; }); } // TODO: refactor this adding a worker and a scheduler to FlowContext - if self.should_expire_transactions().await { - // Spawn a task expiring concurrently the low priority transactions. - // The TransactionSpread instance ensures at most one expire running at any + if self.should_rebroadcast_transactions().await { + // Spawn a task revalidating concurrently the high priority transactions. + // The TransactionSpread instance ensures at most one rebroadcast running at any // given time. let mining_manager = self.mining_manager().clone(); let consensus_clone = consensus.clone(); let context = self.clone(); tokio::spawn(async move { + let (tx, mut rx) = unbounded_channel(); tokio::spawn(async move { - mining_manager.expire_low_priority_transactions(&consensus_clone).await; + mining_manager.revalidate_high_priority_transactions(&consensus_clone, tx).await; }); - context.expire_done().await; + while let Some(transactions) = rx.recv().await { + let _ = context.broadcast_transactions(transactions).await; + } + context.rebroadcast_done().await; }); } @@ -483,7 +479,6 @@ impl FlowContext { &self, transaction_ids: I, ) -> Result<(), ProtocolError> { - let _sw = Stopwatch::<100>::with_threshold("broadcast_transactions lock"); self.transactions_spread.write().await.broadcast_transactions(transaction_ids).await } } From c32b660418a8974201656d93fcfe5236559957a6 Mon Sep 17 00:00:00 2001 From: Michael Sutton Date: Mon, 11 Sep 2023 20:00:32 +0300 Subject: [PATCH 32/86] refactor block template cache into `Inner` --- mining/src/cache.rs | 24 +++++++++++++++++++++--- mining/src/manager.rs | 6 +++--- 2 files changed, 24 insertions(+), 6 deletions(-) diff --git a/mining/src/cache.rs b/mining/src/cache.rs index f56c12659a..0ec93f7250 100644 --- a/mining/src/cache.rs +++ b/mining/src/cache.rs @@ -1,20 +1,23 @@ use kaspa_consensus_core::block::BlockTemplate; use kaspa_core::time::unix_now; +use parking_lot::{Mutex, MutexGuard}; use std::sync::Arc; /// CACHE_LIFETIME indicates the default duration in milliseconds after which the cached data expires. const DEFAULT_CACHE_LIFETIME: u64 = 1_000; -pub(crate) struct BlockTemplateCache { - /// Time, in milliseconds, when the cache was last updated +pub(crate) struct Inner { + /// Time, in milliseconds, at which the cache was last updated last_update_time: u64, + + /// The optional template block_template: Option>, /// Duration in milliseconds after which the cached data expires cache_lifetime: u64, } -impl BlockTemplateCache { +impl Inner { pub(crate) fn new(cache_lifetime: Option) -> Self { let cache_lifetime = cache_lifetime.unwrap_or(DEFAULT_CACHE_LIFETIME); Self { last_update_time: 0, block_template: None, cache_lifetime } @@ -22,6 +25,7 @@ impl BlockTemplateCache { pub(crate) fn clear(&mut self) { // The cache timer is reset to 0 so its lifetime is expired. + // TODO self.last_update_time = 0; self.block_template = None; } @@ -42,3 +46,17 @@ impl BlockTemplateCache { self.block_template.as_ref().unwrap().clone() } } + +pub(crate) struct BlockTemplateCache { + inner: Mutex, +} + +impl BlockTemplateCache { + pub(crate) fn new(cache_lifetime: Option) -> Self { + Self { inner: Mutex::new(Inner::new(cache_lifetime)) } + } + + pub(crate) fn lock(&self) -> MutexGuard { + self.inner.lock() + } +} diff --git a/mining/src/manager.rs b/mining/src/manager.rs index 8c3647b83c..d5069cc49c 100644 --- a/mining/src/manager.rs +++ b/mining/src/manager.rs @@ -26,13 +26,13 @@ use kaspa_consensus_core::{ use kaspa_consensusmanager::{spawn_blocking, ConsensusProxy}; use kaspa_core::{debug, error, info, time::Stopwatch, warn}; use kaspa_mining_errors::mempool::RuleError; -use parking_lot::{Mutex, RwLock}; +use parking_lot::RwLock; use std::sync::Arc; use tokio::sync::mpsc::UnboundedSender; pub struct MiningManager { block_template_builder: BlockTemplateBuilder, - block_template_cache: Mutex, + block_template_cache: BlockTemplateCache, pub(crate) mempool: RwLock, } @@ -50,7 +50,7 @@ impl MiningManager { pub(crate) fn with_config(config: Config, cache_lifetime: Option) -> Self { let block_template_builder = BlockTemplateBuilder::new(config.maximum_mass_per_block); let mempool = RwLock::new(Mempool::new(config)); - let block_template_cache = Mutex::new(BlockTemplateCache::new(cache_lifetime)); + let block_template_cache = BlockTemplateCache::new(cache_lifetime); Self { block_template_builder, block_template_cache, mempool } } From 625cf1245ec7e58146f3cef47c0da02124191bc8 Mon Sep 17 00:00:00 2001 From: Michael Sutton Date: Mon, 11 Sep 2023 20:17:38 +0300 Subject: [PATCH 33/86] make `block_template_cache` a non-blocking call (never blocks) --- mining/src/cache.rs | 31 +++++++++++++++++++++++-------- mining/src/manager.rs | 2 +- 2 files changed, 24 insertions(+), 9 deletions(-) diff --git a/mining/src/cache.rs b/mining/src/cache.rs index 0ec93f7250..b6fae4f18e 100644 --- a/mining/src/cache.rs +++ b/mining/src/cache.rs @@ -1,7 +1,10 @@ use kaspa_consensus_core::block::BlockTemplate; use kaspa_core::time::unix_now; use parking_lot::{Mutex, MutexGuard}; -use std::sync::Arc; +use std::sync::{ + atomic::{AtomicBool, Ordering::SeqCst}, + Arc, +}; /// CACHE_LIFETIME indicates the default duration in milliseconds after which the cached data expires. const DEFAULT_CACHE_LIFETIME: u64 = 1_000; @@ -23,9 +26,8 @@ impl Inner { Self { last_update_time: 0, block_template: None, cache_lifetime } } - pub(crate) fn clear(&mut self) { + fn clear(&mut self) { // The cache timer is reset to 0 so its lifetime is expired. - // TODO self.last_update_time = 0; self.block_template = None; } @@ -36,27 +38,40 @@ impl Inner { if now < self.last_update_time || now - self.last_update_time > self.cache_lifetime { None } else { - Some(self.block_template.as_ref().unwrap().clone()) + Some(self.block_template.as_ref().expect("last_update_time would be 0").clone()) } } pub(crate) fn set_immutable_cached_template(&mut self, block_template: BlockTemplate) -> Arc { self.last_update_time = unix_now(); - self.block_template = Some(Arc::new(block_template)); - self.block_template.as_ref().unwrap().clone() + let block_template = Arc::new(block_template); + self.block_template = Some(block_template.clone()); + block_template } } pub(crate) struct BlockTemplateCache { inner: Mutex, + clear_flag: AtomicBool, } impl BlockTemplateCache { pub(crate) fn new(cache_lifetime: Option) -> Self { - Self { inner: Mutex::new(Inner::new(cache_lifetime)) } + Self { inner: Mutex::new(Inner::new(cache_lifetime)), clear_flag: AtomicBool::new(false) } + } + + pub(crate) fn clear(&self) { + // We avoid blocking on the mutex for clear but rather signal to the next + // thread acquiring the lock to clear the template + self.clear_flag.store(true, SeqCst) } pub(crate) fn lock(&self) -> MutexGuard { - self.inner.lock() + let mut guard = self.inner.lock(); + if self.clear_flag.swap(false, SeqCst) { + // If clear was signaled, perform the actual clear + guard.clear(); + } + guard } } diff --git a/mining/src/manager.rs b/mining/src/manager.rs index d5069cc49c..363be24fbf 100644 --- a/mining/src/manager.rs +++ b/mining/src/manager.rs @@ -141,7 +141,7 @@ impl MiningManager { /// Clears the block template cache, forcing the next call to get_block_template to build a new block template. pub fn clear_block_template(&self) { - self.block_template_cache.lock().clear(); + self.block_template_cache.clear(); } #[cfg(test)] From 205e934cfb8b7f5fa5aff7cb46b4ce64d7129670 Mon Sep 17 00:00:00 2001 From: Tiram <18632023+tiram88@users.noreply.github.com> Date: Tue, 12 Sep 2023 15:17:34 +0300 Subject: [PATCH 34/86] Log build_block_template retries --- mining/src/manager.rs | 21 ++++++++++++++++++++- 1 file changed, 20 insertions(+), 1 deletion(-) diff --git a/mining/src/manager.rs b/mining/src/manager.rs index 363be24fbf..a4892220a0 100644 --- a/mining/src/manager.rs +++ b/mining/src/manager.rs @@ -78,12 +78,30 @@ impl MiningManager { // mempool.BlockCandidateTransactions and mempool.RemoveTransactions here. // We remove recursion seen in blockTemplateBuilder.BuildBlockTemplate here. debug!("Building a new block template..."); + let mut retries: usize = 0; loop { let transactions = self.block_candidate_transactions(); match self.block_template_builder.build_block_template(consensus, miner_data, transactions) { Ok(block_template) => { let block_template = cache_lock.set_immutable_cached_template(block_template); - debug!("Built a new block template with {} transactions", block_template.block.transactions.len()); + match retries { + 0 => { + debug!("Built a new block template with {} transactions", block_template.block.transactions.len()); + } + 1 => { + debug!( + "Built a new block template with {} transactions after one retry", + block_template.block.transactions.len() + ); + } + n => { + debug!( + "Built a new block template with {} transactions after {} retries", + block_template.block.transactions.len(), + n + ); + } + } return Ok(block_template.as_ref().clone()); } Err(BuilderError::ConsensusError(BlockRuleError::InvalidTransactionsInNewBlock(invalid_transactions))) => { @@ -132,6 +150,7 @@ impl MiningManager { return Err(err)?; } } + retries += 1; } } From 674481f3ea58585655c3a2a98bc2d4b7faddc5dc Mon Sep 17 00:00:00 2001 From: Tiram <18632023+tiram88@users.noreply.github.com> Date: Tue, 12 Sep 2023 19:00:39 +0300 Subject: [PATCH 35/86] While revalidating HP txs, only recheck transaction entries --- consensus/core/src/api/mod.rs | 12 ++++- consensus/src/consensus/mod.rs | 13 +++++- .../pipeline/virtual_processor/processor.rs | 44 +++++++++++++------ .../virtual_processor/utxo_validation.rs | 17 +++++-- mining/src/manager.rs | 8 ++-- .../populate_entries_and_try_validate.rs | 14 +++--- mining/src/testutils/consensus_mock.rs | 8 +++- 7 files changed, 86 insertions(+), 30 deletions(-) diff --git a/consensus/core/src/api/mod.rs b/consensus/core/src/api/mod.rs index 6ef00b8deb..316bfd3f5a 100644 --- a/consensus/core/src/api/mod.rs +++ b/consensus/core/src/api/mod.rs @@ -41,7 +41,7 @@ pub trait ConsensusApi: Send + Sync { /// Populates the mempool transaction with maximally found UTXO entry data and proceeds to full transaction /// validation if all are found. If validation is successful, also [`transaction.calculated_fee`] is expected to be populated. - fn validate_mempool_transaction_and_populate(&self, transaction: &mut MutableTransaction) -> TxResult<()> { + fn validate_mempool_transaction(&self, transaction: &mut MutableTransaction) -> TxResult<()> { unimplemented!() } @@ -51,6 +51,16 @@ pub trait ConsensusApi: Send + Sync { unimplemented!() } + /// Populates the mempool transaction with maximally found UTXO entry data. + fn populate_mempool_transaction(&self, transaction: &mut MutableTransaction) -> TxResult<()> { + unimplemented!() + } + + /// Populates the mempool transactions with maximally found UTXO entry data. + fn populate_mempool_transactions_in_parallel(&self, transactions: &mut [MutableTransaction]) -> Vec> { + unimplemented!() + } + fn calculate_transaction_mass(&self, transaction: &Transaction) -> u64 { unimplemented!() } diff --git a/consensus/src/consensus/mod.rs b/consensus/src/consensus/mod.rs index 7c2a6eac3a..673a7823bb 100644 --- a/consensus/src/consensus/mod.rs +++ b/consensus/src/consensus/mod.rs @@ -367,8 +367,8 @@ impl ConsensusApi for Consensus { Box::pin(result) } - fn validate_mempool_transaction_and_populate(&self, transaction: &mut MutableTransaction) -> TxResult<()> { - self.virtual_processor.validate_mempool_transaction_and_populate(transaction)?; + fn validate_mempool_transaction(&self, transaction: &mut MutableTransaction) -> TxResult<()> { + self.virtual_processor.validate_mempool_transaction(transaction)?; Ok(()) } @@ -376,6 +376,15 @@ impl ConsensusApi for Consensus { self.virtual_processor.validate_mempool_transactions_in_parallel(transactions) } + fn populate_mempool_transaction(&self, transaction: &mut MutableTransaction) -> TxResult<()> { + self.virtual_processor.populate_mempool_transaction(transaction)?; + Ok(()) + } + + fn populate_mempool_transactions_in_parallel(&self, transactions: &mut [MutableTransaction]) -> Vec> { + self.virtual_processor.populate_mempool_transactions_in_parallel(transactions) + } + fn calculate_transaction_mass(&self, transaction: &Transaction) -> u64 { self.services.mass_calculator.calc_tx_mass(transaction) } diff --git a/consensus/src/pipeline/virtual_processor/processor.rs b/consensus/src/pipeline/virtual_processor/processor.rs index 7d182f91af..499103a059 100644 --- a/consensus/src/pipeline/virtual_processor/processor.rs +++ b/consensus/src/pipeline/virtual_processor/processor.rs @@ -708,7 +708,7 @@ impl VirtualStateProcessor { (virtual_parents, ghostdag_data) } - fn validate_mempool_transaction_and_populate_impl( + fn validate_mempool_transaction_impl( &self, mutable_tx: &mut MutableTransaction, virtual_utxo_view: &impl UtxoView, @@ -716,23 +716,18 @@ impl VirtualStateProcessor { virtual_past_median_time: u64, ) -> TxResult<()> { self.transaction_validator.validate_tx_in_isolation(&mutable_tx.tx)?; - self.transaction_validator.utxo_free_tx_validation(&mutable_tx.tx, virtual_daa_score, virtual_past_median_time)?; self.validate_mempool_transaction_in_utxo_context(mutable_tx, virtual_utxo_view, virtual_daa_score)?; - Ok(()) } - pub fn validate_mempool_transaction_and_populate(&self, mutable_tx: &mut MutableTransaction) -> TxResult<()> { - self.transaction_validator.validate_tx_in_isolation(&mutable_tx.tx)?; - + pub fn validate_mempool_transaction(&self, mutable_tx: &mut MutableTransaction) -> TxResult<()> { let virtual_read = self.virtual_stores.read(); let virtual_state = virtual_read.state.get().unwrap(); let virtual_utxo_view = &virtual_read.utxo_set; let virtual_daa_score = virtual_state.daa_score; let virtual_past_median_time = virtual_state.past_median_time; - - self.validate_mempool_transaction_and_populate_impl(mutable_tx, virtual_utxo_view, virtual_daa_score, virtual_past_median_time) + self.validate_mempool_transaction_impl(mutable_tx, virtual_utxo_view, virtual_daa_score, virtual_past_median_time) } pub fn validate_mempool_transactions_in_parallel(&self, mutable_txs: &mut [MutableTransaction]) -> Vec> { @@ -746,17 +741,38 @@ impl VirtualStateProcessor { mutable_txs .par_iter_mut() .map(|mtx| { - self.validate_mempool_transaction_and_populate_impl( - mtx, - &virtual_utxo_view, - virtual_daa_score, - virtual_past_median_time, - ) + self.validate_mempool_transaction_impl(mtx, &virtual_utxo_view, virtual_daa_score, virtual_past_median_time) }) .collect::>>() }) } + fn populate_mempool_transaction_impl( + &self, + mutable_tx: &mut MutableTransaction, + virtual_utxo_view: &impl UtxoView, + ) -> TxResult<()> { + self.populate_mempool_transaction_in_utxo_context(mutable_tx, virtual_utxo_view)?; + Ok(()) + } + + pub fn populate_mempool_transaction(&self, mutable_tx: &mut MutableTransaction) -> TxResult<()> { + let virtual_read = self.virtual_stores.read(); + let virtual_utxo_view = &virtual_read.utxo_set; + self.populate_mempool_transaction_impl(mutable_tx, virtual_utxo_view) + } + + pub fn populate_mempool_transactions_in_parallel(&self, mutable_txs: &mut [MutableTransaction]) -> Vec> { + let virtual_read = self.virtual_stores.read(); + let virtual_utxo_view = &virtual_read.utxo_set; + self.thread_pool.install(|| { + mutable_txs + .par_iter_mut() + .map(|mtx| self.populate_mempool_transaction_impl(mtx, &virtual_utxo_view)) + .collect::>>() + }) + } + fn validate_block_template_transaction( &self, tx: &Transaction, diff --git a/consensus/src/pipeline/virtual_processor/utxo_validation.rs b/consensus/src/pipeline/virtual_processor/utxo_validation.rs index 9956ab88e8..d1e0bfcf4f 100644 --- a/consensus/src/pipeline/virtual_processor/utxo_validation.rs +++ b/consensus/src/pipeline/virtual_processor/utxo_validation.rs @@ -229,12 +229,11 @@ impl VirtualStateProcessor { } } - /// Populates the mempool transaction with maximally found UTXO entry data and proceeds to validation if all found - pub(super) fn validate_mempool_transaction_in_utxo_context( + /// Populates the mempool transaction with maximally found UTXO entry data + pub(crate) fn populate_mempool_transaction_in_utxo_context( &self, mutable_tx: &mut MutableTransaction, utxo_view: &impl UtxoView, - pov_daa_score: u64, ) -> TxResult<()> { let mut has_missing_outpoints = false; for i in 0..mutable_tx.tx.inputs.len() { @@ -253,6 +252,18 @@ impl VirtualStateProcessor { if has_missing_outpoints { return Err(TxRuleError::MissingTxOutpoints); } + Ok(()) + } + + /// Populates the mempool transaction with maximally found UTXO entry data and proceeds to validation if all found + pub(super) fn validate_mempool_transaction_in_utxo_context( + &self, + mutable_tx: &mut MutableTransaction, + utxo_view: &impl UtxoView, + pov_daa_score: u64, + ) -> TxResult<()> { + self.populate_mempool_transaction_in_utxo_context(mutable_tx, utxo_view)?; + // At this point we know all UTXO entries are populated, so we can safely pass the tx as verifiable let calculated_fee = self.transaction_validator.validate_populated_transaction_and_get_fee(&mutable_tx.as_verifiable(), pov_daa_score)?; diff --git a/mining/src/manager.rs b/mining/src/manager.rs index a4892220a0..fdc49c5e7e 100644 --- a/mining/src/manager.rs +++ b/mining/src/manager.rs @@ -5,7 +5,9 @@ use crate::{ mempool::{ config::Config, model::tx::{MempoolTransaction, TxRemovalReason}, - populate_entries_and_try_validate::{validate_mempool_transaction_and_populate, validate_mempool_transactions_in_parallel}, + populate_entries_and_try_validate::{ + populate_mempool_transactions_in_parallel, validate_mempool_transaction, validate_mempool_transactions_in_parallel, + }, tx::{Orphan, Priority}, Mempool, }, @@ -194,7 +196,7 @@ impl MiningManager { // read lock on mempool let mut transaction = self.mempool.read().pre_validate_and_populate_transaction(consensus, transaction)?; // no lock on mempool - let validation_result = validate_mempool_transaction_and_populate(consensus, &mut transaction); + let validation_result = validate_mempool_transaction(consensus, &mut transaction); // write lock on mempool let mut mempool = self.mempool.write(); if let Some(accepted_transaction) = @@ -558,7 +560,7 @@ impl MiningManager { assert!(lower_bound < upper_bound, "the chunk is never empty"); let _swo = Stopwatch::<60>::with_threshold("revalidate validate_mempool_transactions_in_parallel op"); validation_results - .extend(validate_mempool_transactions_in_parallel(consensus, &mut transactions[lower_bound..upper_bound])); + .extend(populate_mempool_transactions_in_parallel(consensus, &mut transactions[lower_bound..upper_bound])); drop(_swo); lower_bound = upper_bound; } diff --git a/mining/src/mempool/populate_entries_and_try_validate.rs b/mining/src/mempool/populate_entries_and_try_validate.rs index 1626bd3eb8..0c0dcf9a1e 100644 --- a/mining/src/mempool/populate_entries_and_try_validate.rs +++ b/mining/src/mempool/populate_entries_and_try_validate.rs @@ -14,11 +14,8 @@ impl Mempool { } } -pub(crate) fn validate_mempool_transaction_and_populate( - consensus: &dyn ConsensusApi, - transaction: &mut MutableTransaction, -) -> RuleResult<()> { - Ok(consensus.validate_mempool_transaction_and_populate(transaction)?) +pub(crate) fn validate_mempool_transaction(consensus: &dyn ConsensusApi, transaction: &mut MutableTransaction) -> RuleResult<()> { + Ok(consensus.validate_mempool_transaction(transaction)?) } pub(crate) fn validate_mempool_transactions_in_parallel( @@ -27,3 +24,10 @@ pub(crate) fn validate_mempool_transactions_in_parallel( ) -> Vec> { consensus.validate_mempool_transactions_in_parallel(transactions).into_iter().map(|x| x.map_err(RuleError::from)).collect() } + +pub(crate) fn populate_mempool_transactions_in_parallel( + consensus: &dyn ConsensusApi, + transactions: &mut [MutableTransaction], +) -> Vec> { + consensus.populate_mempool_transactions_in_parallel(transactions).into_iter().map(|x| x.map_err(RuleError::from)).collect() +} diff --git a/mining/src/testutils/consensus_mock.rs b/mining/src/testutils/consensus_mock.rs index 18b5fb49b5..7976bd1f01 100644 --- a/mining/src/testutils/consensus_mock.rs +++ b/mining/src/testutils/consensus_mock.rs @@ -97,7 +97,7 @@ impl ConsensusApi for ConsensusMock { Ok(BlockTemplate::new(mutable_block, miner_data, coinbase.has_red_reward, now, 0)) } - fn validate_mempool_transaction_and_populate(&self, mutable_tx: &mut MutableTransaction) -> TxResult<()> { + fn validate_mempool_transaction(&self, mutable_tx: &mut MutableTransaction) -> TxResult<()> { // If a predefined status was registered to simulate an error, return it right away if let Some(status) = self.statuses.read().get(&mutable_tx.id()) { if status.is_err() { @@ -130,7 +130,11 @@ impl ConsensusApi for ConsensusMock { } fn validate_mempool_transactions_in_parallel(&self, transactions: &mut [MutableTransaction]) -> Vec> { - transactions.iter_mut().map(|x| self.validate_mempool_transaction_and_populate(x)).collect() + transactions.iter_mut().map(|x| self.validate_mempool_transaction(x)).collect() + } + + fn populate_mempool_transactions_in_parallel(&self, transactions: &mut [MutableTransaction]) -> Vec> { + transactions.iter_mut().map(|x| self.validate_mempool_transaction(x)).collect() } fn calculate_transaction_mass(&self, transaction: &Transaction) -> u64 { From 8420832ab2663ec4036b3c9588f18afec69354fb Mon Sep 17 00:00:00 2001 From: Tiram <18632023+tiram88@users.noreply.github.com> Date: Tue, 12 Sep 2023 20:10:02 +0300 Subject: [PATCH 36/86] Fix accepted count during revalidation --- mining/src/manager.rs | 19 ++++++++----------- 1 file changed, 8 insertions(+), 11 deletions(-) diff --git a/mining/src/manager.rs b/mining/src/manager.rs index fdc49c5e7e..d41089c057 100644 --- a/mining/src/manager.rs +++ b/mining/src/manager.rs @@ -514,8 +514,7 @@ impl MiningManager { let mut invalid: usize = 0; // We process the transactions by level of dependency inside the batch. - // Doing so allows to remove all chained dependencies of rejected transactions before actually trying - // to revalidate those, saving potentially a lot of computing resources. + // Doing so allows to remove all chained dependencies of rejected transactions. let mut batch = TransactionsStagger::new(transactions); while let Some(transactions) = batch.stagger() { if transactions.is_empty() { @@ -534,15 +533,13 @@ impl MiningManager { .into_iter() .filter_map(|mut x| { let transaction_id = x.id(); - if mempool.has_transaction(&transaction_id, true, false) { - if mempool.has_accepted_transaction(&transaction_id) { - accepted += 1; - None - } else { - x.clear_entries(); - mempool.populate_mempool_entries(&mut x); - Some(x) - } + if mempool.has_accepted_transaction(&transaction_id) { + accepted += 1; + None + } else if mempool.has_transaction(&transaction_id, true, false) { + x.clear_entries(); + mempool.populate_mempool_entries(&mut x); + Some(x) } else { other += 1; None From 78fefb3c3d289f124e1b77c08fb465920973a812 Mon Sep 17 00:00:00 2001 From: Michael Sutton Date: Wed, 13 Sep 2023 12:31:01 +0300 Subject: [PATCH 37/86] mempool bmk: use client pools + various improvements --- core/src/time.rs | 2 +- testing/integration/src/common/client_pool.rs | 52 +++++++ testing/integration/src/common/daemon.rs | 20 +++ testing/integration/src/common/mod.rs | 1 + testing/integration/src/mempool_benchmarks.rs | 145 +++++++++++++----- utils/src/channel.rs | 7 +- 6 files changed, 185 insertions(+), 42 deletions(-) create mode 100644 testing/integration/src/common/client_pool.rs diff --git a/core/src/time.rs b/core/src/time.rs index 7b377f2341..8fa388c2e4 100644 --- a/core/src/time.rs +++ b/core/src/time.rs @@ -28,7 +28,7 @@ impl Drop for Stopwatch { fn drop(&mut self) { let elapsed = self.start.elapsed(); if elapsed > Duration::from_millis(TR) { - kaspa_core::warn!("[{}] Abnormal time: {:#?}", self.name, elapsed); + kaspa_core::trace!("[{}] Abnormal time: {:#?}", self.name, elapsed); } } } diff --git a/testing/integration/src/common/client_pool.rs b/testing/integration/src/common/client_pool.rs new file mode 100644 index 0000000000..21abeaf3ad --- /dev/null +++ b/testing/integration/src/common/client_pool.rs @@ -0,0 +1,52 @@ +use async_channel::{SendError, Sender}; +use futures_util::Future; +use kaspa_core::trace; +use kaspa_grpc_client::GrpcClient; +use kaspa_utils::{any::type_name_short, channel::Channel}; +use std::sync::Arc; +use tokio::task::JoinHandle; + +pub struct ClientPool { + distribution_channel: Channel, + pub join_handles: Vec>, +} + +impl ClientPool { + pub fn new(clients: Vec>, distribution_channel_capacity: usize, client_op: F) -> Self + where + F: Fn(Arc, T) -> R + Sync + Send + Copy + 'static, + R: Future + Send, + { + let distribution_channel = Channel::bounded(distribution_channel_capacity); + let join_handles = clients + .into_iter() + .enumerate() + .map(|(index, client)| { + let rx = distribution_channel.receiver(); + tokio::spawn(async move { + while let Ok(msg) = rx.recv().await { + if client_op(client.clone(), msg).await { + break; + } + } + client.disconnect().await.unwrap(); + trace!("Client pool {} task {} exited", type_name_short::(), index); + }) + }) + .collect(); + + Self { distribution_channel, join_handles } + } + + pub async fn send_via_available_client(&self, msg: T) -> Result<(), SendError> { + self.distribution_channel.send(msg).await + } + + pub fn sender(&self) -> Sender { + self.distribution_channel.sender() + } + + pub fn close(&self) { + self.distribution_channel.close() + } +} diff --git a/testing/integration/src/common/daemon.rs b/testing/integration/src/common/daemon.rs index bbb7b25383..e4f57dff0e 100644 --- a/testing/integration/src/common/daemon.rs +++ b/testing/integration/src/common/daemon.rs @@ -1,3 +1,4 @@ +use futures_util::Future; use kaspa_consensus_core::network::NetworkId; use kaspa_core::{core::Core, signals::Shutdown}; use kaspa_database::utils::get_kaspa_tempdir; @@ -7,6 +8,8 @@ use kaspad::{args::Args, daemon::create_core_with_runtime}; use std::{sync::Arc, time::Duration}; use tempfile::TempDir; +use super::client_pool::ClientPool; + pub struct Daemon { // Type and suffix of the daemon network pub network: NetworkId, @@ -77,6 +80,23 @@ impl Daemon { .await .unwrap() } + + pub async fn new_client_pool( + &self, + pool_size: usize, + distribution_channel_capacity: usize, + client_op: F, + ) -> ClientPool + where + F: Fn(Arc, T) -> R + Sync + Send + Copy + 'static, + R: Future + Send, + { + let mut clients = Vec::with_capacity(pool_size); + for _ in 0..pool_size { + clients.push(Arc::new(self.new_client().await)); + } + ClientPool::new(clients, distribution_channel_capacity, client_op) + } } impl Drop for Daemon { diff --git a/testing/integration/src/common/mod.rs b/testing/integration/src/common/mod.rs index 1ca870b397..095f189026 100644 --- a/testing/integration/src/common/mod.rs +++ b/testing/integration/src/common/mod.rs @@ -4,6 +4,7 @@ use std::{ path::Path, }; +pub mod client_pool; pub mod daemon; pub fn open_file(file_path: &Path) -> File { diff --git a/testing/integration/src/mempool_benchmarks.rs b/testing/integration/src/mempool_benchmarks.rs index 8409f23af2..1ecd79e2e6 100644 --- a/testing/integration/src/mempool_benchmarks.rs +++ b/testing/integration/src/mempool_benchmarks.rs @@ -1,5 +1,6 @@ use crate::common::daemon::Daemon; use async_channel::Sender; +use futures_util::future::join_all; use itertools::Itertools; use kaspa_addresses::Address; use kaspa_consensus::params::Params; @@ -14,7 +15,7 @@ use kaspa_consensus_core::{ utxo_diff::UtxoDiff, }, }; -use kaspa_core::{debug, info, time::Stopwatch}; +use kaspa_core::{debug, info}; use kaspa_notify::{ listener::ListenerId, notifier::Notify, @@ -32,7 +33,10 @@ use std::{ cmp::max, collections::{hash_map::Entry::Occupied, HashMap, HashSet}, fmt::Debug, - sync::Arc, + sync::{ + atomic::{AtomicBool, Ordering}, + Arc, + }, time::Duration, }; use tokio::join; @@ -49,9 +53,8 @@ impl Notify for ChannelNotify { } } -const FEE_PER_MASS: u64 = 10; - fn required_fee(num_inputs: usize, num_outputs: u64) -> u64 { + const FEE_PER_MASS: u64 = 10; FEE_PER_MASS * estimated_mass(num_inputs, num_outputs) } @@ -59,8 +62,17 @@ fn estimated_mass(num_inputs: usize, num_outputs: u64) -> u64 { 200 + 34 * num_outputs + 1000 * (num_inputs as u64) } +const EXPAND_FACTOR: u64 = 2; +const CONTRACT_FACTOR: u64 = 2; + /// Builds a TX DAG based on the initial UTXO set and on constant params -fn generate_tx_dag(mut utxoset: UtxoCollection, schnorr_key: KeyPair, spk: ScriptPublicKey) -> Vec> { +fn generate_tx_dag( + mut utxoset: UtxoCollection, + schnorr_key: KeyPair, + spk: ScriptPublicKey, + target_levels: usize, + target_width: usize, +) -> Vec> { /* Algo: perform level by level: @@ -72,10 +84,8 @@ fn generate_tx_dag(mut utxoset: UtxoCollection, schnorr_key: KeyPair, spk: Scrip apply level utxo diff to the utxo collection */ - let target_levels = 1_000; - let target_width = 500; - let num_inputs = 2; - let num_outputs = 2; + let num_inputs = CONTRACT_FACTOR as usize; + let num_outputs = EXPAND_FACTOR; let mut txs = Vec::with_capacity(target_levels * target_width); @@ -138,7 +148,14 @@ fn verify_tx_dag(initial_utxoset: &UtxoCollection, txs: &Vec>) #[ignore = "bmk"] async fn bench_bbt_latency() { kaspa_core::panic::configure_panic(); - kaspa_core::log::try_init_logger("info"); + kaspa_core::log::try_init_logger("info,kaspa_core::time=trace"); + + // Constants + const BLOCK_COUNT: usize = 20_000; + const TX_COUNT: usize = 600_000; + const TX_LEVEL_WIDTH: usize = 1000; + const SUBMIT_BLOCK_CLIENTS: usize = 20; + const SUBMIT_TX_CLIENTS: usize = 1; /* Logic: @@ -166,7 +183,7 @@ async fn bench_bbt_latency() { let args = Args { simnet: true, enable_unsynced_mining: true, - num_prealloc_utxos: Some(1_000), + num_prealloc_utxos: Some(TX_LEVEL_WIDTH as u64 * CONTRACT_FACTOR), prealloc_address: Some(prealloc_address.to_string()), prealloc_amount: 500 * SOMPI_PER_KASPA, ..Default::default() @@ -175,15 +192,13 @@ async fn bench_bbt_latency() { let params: Params = network.into(); let utxoset = args.generate_prealloc_utxos(args.num_prealloc_utxos.unwrap()); - let txs = generate_tx_dag(utxoset.clone(), schnorr_key, spk); + let txs = generate_tx_dag(utxoset.clone(), schnorr_key, spk, TX_COUNT / TX_LEVEL_WIDTH, TX_LEVEL_WIDTH); verify_tx_dag(&utxoset, &txs); info!("Generated overall {} txs", txs.len()); let mut daemon = Daemon::new_random_with_args(args); let client = daemon.start().await; - // TODO: use only a single client once grpc server-side supports concurrent requests - let block_template_client = daemon.new_client().await; - let submit_block_client = daemon.new_client().await; + let bbt_client = daemon.new_client().await; // The time interval between Poisson(lambda) events distributes ~Exp(lambda) let dist: Exp = Exp::new(params.bps() as f64).unwrap(); @@ -195,31 +210,64 @@ async fn bench_bbt_latency() { Address::new(network.network_type().into(), kaspa_addresses::Version::PubKey, &pk.x_only_public_key().0.serialize()); debug!("Generated private key {} and address {}", sk.display_secret(), pay_address); - let current_template = Arc::new(Mutex::new(block_template_client.get_block_template(pay_address.clone(), vec![]).await.unwrap())); + let current_template = Arc::new(Mutex::new(bbt_client.get_block_template(pay_address.clone(), vec![]).await.unwrap())); let current_template_consume = current_template.clone(); + let executing = Arc::new(AtomicBool::new(true)); let (sender, receiver) = async_channel::unbounded(); - block_template_client.start(Some(Arc::new(ChannelNotify { sender }))).await; - block_template_client.start_notify(ListenerId::default(), Scope::NewBlockTemplate(NewBlockTemplateScope {})).await.unwrap(); + bbt_client.start(Some(Arc::new(ChannelNotify { sender }))).await; + bbt_client.start_notify(ListenerId::default(), Scope::NewBlockTemplate(NewBlockTemplateScope {})).await.unwrap(); + + let submit_block_pool = daemon + .new_client_pool(SUBMIT_BLOCK_CLIENTS, 100, |c, block| async move { + let response = c.submit_block(block, false).await.unwrap(); + assert_eq!(response.report, kaspa_rpc_core::SubmitBlockReport::Success); + false + }) + .await; + + let submit_tx_pool = daemon + .new_client_pool::<(usize, Arc), _, _>(SUBMIT_TX_CLIENTS, 100, |c, (i, tx)| async move { + match c.submit_transaction(tx.as_ref().into(), false).await { + Ok(_) => {} + Err(RpcError::General(msg)) if msg.contains("orphan") => { + kaspa_core::warn!("\n\n\n{msg}\n\n"); + kaspa_core::warn!("Submitted {} transactions, exiting tx submit loop", i); + return true; + } + Err(e) => panic!("{e}"), + } + false + }) + .await; - let cc = block_template_client.clone(); + let cc = bbt_client.clone(); + let exec = executing.clone(); + let notification_rx = receiver.clone(); let miner_receiver_task = tokio::spawn(async move { - while let Ok(notification) = receiver.recv().await { + while let Ok(notification) = notification_rx.recv().await { match notification { Notification::NewBlockTemplate(_) => { - while receiver.try_recv().is_ok() { + while notification_rx.try_recv().is_ok() { // Drain the channel } - let _sw = Stopwatch::<500>::with_threshold("get_block_template"); + // let _sw = Stopwatch::<500>::with_threshold("get_block_template"); *current_template.lock() = cc.get_block_template(pay_address.clone(), vec![]).await.unwrap(); } _ => panic!(), } + if !exec.load(Ordering::Relaxed) { + kaspa_core::warn!("Test is over, stopping miner receiver loop"); + break; + } } + kaspa_core::warn!("Miner receiver loop task exited"); }); + let block_sender = submit_block_pool.sender(); + let exec = executing.clone(); let miner_loop_task = tokio::spawn(async move { - for i in 0..10000 { + for i in 0..BLOCK_COUNT { // Simulate mining time let timeout = max((dist.sample(&mut thread_rng()) * 1000.0) as u64, 1); tokio::time::sleep(Duration::from_millis(timeout)).await; @@ -227,46 +275,63 @@ async fn bench_bbt_latency() { // Read the most up-to-date block template let mut block = current_template_consume.lock().block.clone(); // Use index as nonce to avoid duplicate blocks - block.header.nonce = i; + block.header.nonce = i as u64; - let mcc = submit_block_client.clone(); + let bs = block_sender.clone(); tokio::spawn(async move { // Simulate communication delay. TODO: consider adding gaussian noise tokio::time::sleep(Duration::from_millis(comm_delay)).await; - // let _sw = Stopwatch::<500>::with_threshold("submit_block"); - let response = mcc.submit_block(block, false).await.unwrap(); - assert_eq!(response.report, kaspa_rpc_core::SubmitBlockReport::Success); + let _ = bs.send(block).await; }); + if !exec.load(Ordering::Relaxed) { + kaspa_core::warn!("Test is over, stopping miner loop"); + break; + } } - block_template_client.disconnect().await.unwrap(); - submit_block_client.disconnect().await.unwrap(); + exec.store(false, Ordering::Relaxed); + bbt_client.stop_notify(ListenerId::default(), Scope::NewBlockTemplate(NewBlockTemplateScope {})).await.unwrap(); + bbt_client.disconnect().await.unwrap(); + kaspa_core::warn!("Miner loop task exited"); }); + let tx_sender = submit_tx_pool.sender(); + let exec = executing.clone(); let cc = client.clone(); let tx_sender_task = tokio::spawn(async move { - let total_txs = txs.len(); for (i, tx) in txs.into_iter().enumerate() { - let _sw = Stopwatch::<500>::with_threshold("submit_transaction"); - let res = cc.submit_transaction(tx.as_ref().into(), false).await; - match res { + match tx_sender.send((i, tx)).await { Ok(_) => {} - Err(RpcError::General(msg)) if msg.contains("orphan") => { - kaspa_core::error!("\n\n\n{msg}\n\n"); - kaspa_core::warn!("Submitted {} out of {}, exiting tx submit loop", i, total_txs); + Err(_) => { break; } - Err(e) => panic!("{e}"), + } + if !exec.load(Ordering::Relaxed) { + break; } } - kaspa_core::warn!("Tx submit task exited"); + + kaspa_core::warn!("Tx sender task, waiting for mempool to drain.."); + while cc.get_info().await.unwrap().mempool_size > 0 { + if !exec.load(Ordering::Relaxed) { + break; + } + tokio::time::sleep(std::time::Duration::from_secs(1)).await; + } + exec.store(false, Ordering::Relaxed); + kaspa_core::warn!("Tx sender task exited"); }); let _ = join!(miner_receiver_task, miner_loop_task, tx_sender_task); + submit_block_pool.close(); + submit_tx_pool.close(); + + join_all(submit_block_pool.join_handles).await; + join_all(submit_tx_pool.join_handles).await; + // // Fold-up // - // tokio::time::sleep(std::time::Duration::from_secs(5)).await; client.disconnect().await.unwrap(); drop(client); daemon.shutdown(); diff --git a/utils/src/channel.rs b/utils/src/channel.rs index bf30a6f891..02c2089265 100644 --- a/utils/src/channel.rs +++ b/utils/src/channel.rs @@ -1,4 +1,4 @@ -use async_channel::{unbounded, Receiver, RecvError, SendError, Sender, TryRecvError, TrySendError}; +use async_channel::{bounded, unbounded, Receiver, RecvError, SendError, Sender, TryRecvError, TrySendError}; /// Multiple producers multiple consumers channel #[derive(Clone, Debug)] @@ -12,6 +12,11 @@ impl Channel { Self { sender: channel.0, receiver: channel.1 } } + pub fn bounded(capacity: usize) -> Channel { + let channel = bounded(capacity); + Self { sender: channel.0, receiver: channel.1 } + } + pub fn sender(&self) -> Sender { self.sender.clone() } From 118559edfbc4e4239626683361fc864a1ccc91e0 Mon Sep 17 00:00:00 2001 From: Tiram <18632023+tiram88@users.noreply.github.com> Date: Thu, 14 Sep 2023 01:11:55 +0300 Subject: [PATCH 38/86] Improve the topological sorting of transactions --- mining/src/manager.rs | 399 +++++++++++++-------------- mining/src/mempool/mod.rs | 4 +- mining/src/model/mod.rs | 2 +- mining/src/model/topological_sort.rs | 284 +++++++++++++++++++ mining/src/model/txs_stager.rs | 47 ---- 5 files changed, 484 insertions(+), 252 deletions(-) create mode 100644 mining/src/model/topological_sort.rs delete mode 100644 mining/src/model/txs_stager.rs diff --git a/mining/src/manager.rs b/mining/src/manager.rs index d41089c057..6f7b7fd6a5 100644 --- a/mining/src/manager.rs +++ b/mining/src/manager.rs @@ -14,7 +14,7 @@ use crate::{ model::{ candidate_tx::CandidateTransaction, owner_txs::{GroupedOwnerTransactions, ScriptPublicKeySet}, - txs_stager::TransactionsStagger, + topological_sort::IntoIterTopologically, }, }; use itertools::Itertools; @@ -290,91 +290,84 @@ impl MiningManager { priority: Priority, orphan: Orphan, ) -> MiningManagerResult>> { + const TRANSACTION_CHUNK_SIZE: usize = 250; + // The capacity used here may be exceeded since accepted transactions may unorphan other transactions. let mut accepted_transactions: Vec> = Vec::with_capacity(transactions.len()); - let mut batch = TransactionsStagger::new(transactions); - while let Some(transactions) = batch.stagger() { - if transactions.is_empty() { - panic!( - "The mempool got a batch of transactions for validation with cyclic dependencies: {:?}", - transactions.iter().map(|x| x.id()).collect::>() - ); - } - let mut transactions = transactions.into_iter().map(MutableTransaction::from_tx).collect::>(); + let mut unorphaned_transactions = vec![]; + let _swo = Stopwatch::<80>::with_threshold("validate_and_insert_transaction_batch topological_sort op"); + let sorted_transactions = transactions.into_iter().map(MutableTransaction::from_tx).topological_into_iter(); + drop(_swo); - // read lock on mempool - // Here, we simply log and drop all erroneous transactions since the caller doesn't care about those anyway + // read lock on mempool + // Here, we simply log and drop all erroneous transactions since the caller doesn't care about those anyway + let mut transactions = Vec::with_capacity(sorted_transactions.len()); + for chunk in &sorted_transactions.chunks(TRANSACTION_CHUNK_SIZE) { let mempool = self.mempool.read(); - transactions = transactions - .into_iter() - .filter_map(|tx| { - let transaction_id = tx.id(); - match mempool.pre_validate_and_populate_transaction(consensus, tx) { - Ok(tx) => Some(tx), - Err(RuleError::RejectAlreadyAccepted(transaction_id)) => { - debug!("Ignoring already accepted transaction {}", transaction_id); - None - } - Err(RuleError::RejectDuplicate(transaction_id)) => { - debug!("Ignoring transaction already in the mempool {}", transaction_id); - None - } - Err(RuleError::RejectDuplicateOrphan(transaction_id)) => { - debug!("Ignoring transaction already in the orphan pool {}", transaction_id); - None - } - Err(err) => { - debug!("Failed to pre validate transaction {0} due to rule error: {1}", transaction_id, err); - None - } + let txs = chunk.filter_map(|tx| { + let transaction_id = tx.id(); + match mempool.pre_validate_and_populate_transaction(consensus, tx) { + Ok(tx) => Some(tx), + Err(RuleError::RejectAlreadyAccepted(transaction_id)) => { + debug!("Ignoring already accepted transaction {}", transaction_id); + None } - }) - .collect(); - drop(mempool); + Err(RuleError::RejectDuplicate(transaction_id)) => { + debug!("Ignoring transaction already in the mempool {}", transaction_id); + None + } + Err(RuleError::RejectDuplicateOrphan(transaction_id)) => { + debug!("Ignoring transaction already in the orphan pool {}", transaction_id); + None + } + Err(err) => { + debug!("Failed to pre validate transaction {0} due to rule error: {1}", transaction_id, err); + None + } + } + }); + transactions.extend(txs); + } - // no lock on mempool - // We process the transactions by chunks of max block mass to prevent locking the virtual processor for too long. - let mut lower_bound: usize = 0; - let mut validation_results = Vec::with_capacity(transactions.len()); - while let Some(upper_bound) = self.next_transaction_chunk_upper_bound(&transactions, lower_bound) { - assert!(lower_bound < upper_bound, "the chunk is never empty"); - validation_results - .extend(validate_mempool_transactions_in_parallel(consensus, &mut transactions[lower_bound..upper_bound])); - lower_bound = upper_bound; - } - assert_eq!(transactions.len(), validation_results.len(), "every transaction should have a matching validation result"); + // no lock on mempool + // We process the transactions by chunks of max block mass to prevent locking the virtual processor for too long. + let mut lower_bound: usize = 0; + let mut validation_results = Vec::with_capacity(transactions.len()); + while let Some(upper_bound) = self.next_transaction_chunk_upper_bound(&transactions, lower_bound) { + assert!(lower_bound < upper_bound, "the chunk is never empty"); + validation_results + .extend(validate_mempool_transactions_in_parallel(consensus, &mut transactions[lower_bound..upper_bound])); + lower_bound = upper_bound; + } + assert_eq!(transactions.len(), validation_results.len(), "every transaction should have a matching validation result"); - // write lock on mempool - // Here again, transactions failing post validation are logged and dropped + // write lock on mempool + // Here again, transactions failing post validation are logged and dropped + for chunk in &transactions.into_iter().zip(validation_results).chunks(TRANSACTION_CHUNK_SIZE) { let mut mempool = self.mempool.write(); - let unorphaned_transactions = transactions - .into_iter() - .zip(validation_results) - .flat_map(|(transaction, validation_result)| { - let transaction_id = transaction.id(); - match mempool.post_validate_and_insert_transaction(consensus, validation_result, transaction, priority, orphan) { - Ok(Some(accepted_transaction)) => { - accepted_transactions.push(accepted_transaction.clone()); - mempool.get_unorphaned_transactions_after_accepted_transaction(&accepted_transaction) - } - Ok(None) => { - // Either orphaned or already existing in the mempool - vec![] - } - Err(err) => { - debug!("Failed to post validate transaction {0} due to rule error: {1}", transaction_id, err); - vec![] - } + let txs = chunk.flat_map(|(transaction, validation_result)| { + let transaction_id = transaction.id(); + match mempool.post_validate_and_insert_transaction(consensus, validation_result, transaction, priority, orphan) { + Ok(Some(accepted_transaction)) => { + accepted_transactions.push(accepted_transaction.clone()); + mempool.get_unorphaned_transactions_after_accepted_transaction(&accepted_transaction) } - }) - .collect::>(); + Ok(None) => { + // Either orphaned or already existing in the mempool + vec![] + } + Err(err) => { + debug!("Failed to post validate transaction {0} due to rule error: {1}", transaction_id, err); + vec![] + } // TODO: handle RuleError::RejectInvalid errors when a banning process gets implemented + } + }); + unorphaned_transactions.extend(txs); mempool.log_stats(); - drop(mempool); - - // TODO: handle RuleError::RejectInvalid errors when a banning process gets implemented - accepted_transactions.extend(self.validate_and_insert_unorphaned_transactions(consensus, unorphaned_transactions)); } + accepted_transactions.extend(self.validate_and_insert_unorphaned_transactions(consensus, unorphaned_transactions)); + // Please note: the only reason this function returns a Result is the future handling of misbehaving nodes // and the related RuleError::RejectInvalid Ok(accepted_transactions) @@ -495,6 +488,8 @@ impl MiningManager { consensus: &dyn ConsensusApi, transaction_ids_sender: UnboundedSender>, ) { + const TRANSACTION_CHUNK_SIZE: usize = 1000; + // read lock on mempool // Prepare a vector with clones of high priority transactions found in the mempool let mempool = self.mempool.read(); @@ -515,144 +510,144 @@ impl MiningManager { // We process the transactions by level of dependency inside the batch. // Doing so allows to remove all chained dependencies of rejected transactions. - let mut batch = TransactionsStagger::new(transactions); - while let Some(transactions) = batch.stagger() { - if transactions.is_empty() { - panic!( - "The mempool high priorities transactions have cyclic dependencies: {:?}", - transactions.iter().map(|x| x.id()).collect::>() - ); - } - - // read lock on mempool - // As the revalidation process is no longer atomic, we filter the transactions ready for revalidation, - // keeping only the ones actually present in the mempool (see comment above). + let _swo = Stopwatch::<200>::with_threshold("revalidate topological_sort op"); + let sorted_transactions = transactions.topological_into_iter(); + drop(_swo); + + // read lock on mempool by transaction chunks + // As the revalidation process is no longer atomic, we filter the transactions ready for revalidation, + // keeping only the ones actually present in the mempool (see comment above). + let _swo = Stopwatch::<50>::with_threshold("revalidate populate_mempool_entries op"); + let mut transactions = Vec::with_capacity(sorted_transactions.len()); + for chunk in &sorted_transactions.chunks(TRANSACTION_CHUNK_SIZE) { let mempool = self.mempool.read(); - let _swo = Stopwatch::<100>::with_threshold("revalidate populate_mempool_entries op"); - let mut transactions = transactions - .into_iter() - .filter_map(|mut x| { - let transaction_id = x.id(); - if mempool.has_accepted_transaction(&transaction_id) { - accepted += 1; - None - } else if mempool.has_transaction(&transaction_id, true, false) { - x.clear_entries(); - mempool.populate_mempool_entries(&mut x); - Some(x) - } else { - other += 1; - None + let txs = chunk.filter_map(|mut x| { + let transaction_id = x.id(); + if mempool.has_accepted_transaction(&transaction_id) { + accepted += 1; + None + } else if mempool.has_transaction(&transaction_id, true, false) { + x.clear_entries(); + mempool.populate_mempool_entries(&mut x); + match x.is_fully_populated() { + false => Some(x), + true => { + // If all entries are populated with mempool UTXOs, we already know the transaction is valid + valid += 1; + None + } } - }) - .collect::>(); - drop(_swo); - drop(mempool); + } else { + other += 1; + None + } + }); + transactions.extend(txs); + } + drop(_swo); - // no lock on mempool - // We process the transactions by chunks of max block mass to prevent locking the virtual processor for too long. - let mut lower_bound: usize = 0; - let mut validation_results = Vec::with_capacity(transactions.len()); - while let Some(upper_bound) = self.next_transaction_chunk_upper_bound(&transactions, lower_bound) { - assert!(lower_bound < upper_bound, "the chunk is never empty"); - let _swo = Stopwatch::<60>::with_threshold("revalidate validate_mempool_transactions_in_parallel op"); - validation_results - .extend(populate_mempool_transactions_in_parallel(consensus, &mut transactions[lower_bound..upper_bound])); - drop(_swo); - lower_bound = upper_bound; - } - assert_eq!(transactions.len(), validation_results.len(), "every transaction should have a matching validation result"); + // no lock on mempool + // We process the transactions by chunks of max block mass to prevent locking the virtual processor for too long. + let mut lower_bound: usize = 0; + let mut validation_results = Vec::with_capacity(transactions.len()); + while let Some(upper_bound) = self.next_transaction_chunk_upper_bound(&transactions, lower_bound) { + assert!(lower_bound < upper_bound, "the chunk is never empty"); + let _swo = Stopwatch::<60>::with_threshold("revalidate validate_mempool_transactions_in_parallel op"); + validation_results + .extend(populate_mempool_transactions_in_parallel(consensus, &mut transactions[lower_bound..upper_bound])); + drop(_swo); + lower_bound = upper_bound; + } + assert_eq!(transactions.len(), validation_results.len(), "every transaction should have a matching validation result"); - // write lock on mempool - // Depending on the validation result, transactions are either accepted or removed - const TRANSACTION_CHUNK_SIZE: usize = 246 * 4; - for chunk in &transactions.into_iter().zip(validation_results).chunks(TRANSACTION_CHUNK_SIZE) { - let mut valid_ids = Vec::with_capacity(TRANSACTION_CHUNK_SIZE); - let mut mempool = self.mempool.write(); - let _swo = Stopwatch::<60>::with_threshold("revalidate update_revalidated_transaction op"); - for (transaction, validation_result) in chunk { - let transaction_id = transaction.id(); - // Only consider transactions still being in the mempool since during the validation some might have been removed. - if mempool.update_revalidated_transaction(transaction) { - match validation_result { - Ok(()) => { - // A following transaction should not remove this one from the pool since we process in a topological order. - // Still, considering the (very unlikely) scenario of two high priority txs sandwiching a low one, where - // in this case topological order is not guaranteed since we only considered chained dependencies of - // high-priority transactions, we might wrongfully return as valid the id of a removed transaction. - // However, as only consequence, said transaction would then be advertised to registered peers and not be - // provided upon request. - valid_ids.push(transaction_id); - valid += 1; - } - Err(RuleError::RejectMissingOutpoint) => { - let transaction = mempool.get_transaction(&transaction_id, true, false).unwrap(); - let missing_txs = transaction - .entries - .iter() - .zip(transaction.tx.inputs.iter()) - .flat_map( - |(entry, input)| { - if entry.is_none() { - Some(input.previous_outpoint.transaction_id) - } else { - None - } - }, - ) - .collect::>(); - - // A transaction may have missing outpoints for legitimate reasons related to concurrency, like a race condition between - // an accepted block having not started yet or unfinished call to handle_new_block_transactions but already processed by - // the consensus and this ongoing call to revalidate. - // - // So we only remove the transaction and keep its redeemers in the mempool because we cannot be sure they are invalid, in - // fact in the race condition case they are valid regarding outpoints. - let extra_info = match missing_txs.len() { - 0 => " but no missing tx!".to_string(), // this is never supposed to happen - 1 => format!(" missing tx {}", missing_txs[0]), - n => format!(" with {} missing txs {}..{}", n, missing_txs[0], missing_txs.last().unwrap()), - }; - - // This call cleanly removes the invalid transaction. - let result = mempool.remove_transaction( - &transaction_id, - false, - TxRemovalReason::RevalidationWithMissingOutpoints, - extra_info.as_str(), - ); - if let Err(err) = result { - warn!("Failed to remove transaction {} from mempool: {}", transaction_id, err); - } - missing_outpoint += 1; + // write lock on mempool + // Depending on the validation result, transactions are either accepted or removed + for chunk in &transactions.into_iter().zip(validation_results).chunks(TRANSACTION_CHUNK_SIZE) { + let mut valid_ids = Vec::with_capacity(TRANSACTION_CHUNK_SIZE); + let mut mempool = self.mempool.write(); + let _swo = Stopwatch::<60>::with_threshold("revalidate update_revalidated_transaction op"); + for (transaction, validation_result) in chunk { + let transaction_id = transaction.id(); + // Only consider transactions still being in the mempool since during the validation some might have been removed. + if mempool.update_revalidated_transaction(transaction) { + match validation_result { + Ok(()) => { + // A following transaction should not remove this one from the pool since we process in a topological order. + // Still, considering the (very unlikely) scenario of two high priority txs sandwiching a low one, where + // in this case topological order is not guaranteed since we only considered chained dependencies of + // high-priority transactions, we might wrongfully return as valid the id of a removed transaction. + // However, as only consequence, said transaction would then be advertised to registered peers and not be + // provided upon request. + valid_ids.push(transaction_id); + valid += 1; + } + Err(RuleError::RejectMissingOutpoint) => { + let transaction = mempool.get_transaction(&transaction_id, true, false).unwrap(); + let missing_txs = transaction + .entries + .iter() + .zip(transaction.tx.inputs.iter()) + .flat_map( + |(entry, input)| { + if entry.is_none() { + Some(input.previous_outpoint.transaction_id) + } else { + None + } + }, + ) + .collect::>(); + + // A transaction may have missing outpoints for legitimate reasons related to concurrency, like a race condition between + // an accepted block having not started yet or unfinished call to handle_new_block_transactions but already processed by + // the consensus and this ongoing call to revalidate. + // + // So we only remove the transaction and keep its redeemers in the mempool because we cannot be sure they are invalid, in + // fact in the race condition case they are valid regarding outpoints. + let extra_info = match missing_txs.len() { + 0 => " but no missing tx!".to_string(), // this is never supposed to happen + 1 => format!(" missing tx {}", missing_txs[0]), + n => format!(" with {} missing txs {}..{}", n, missing_txs[0], missing_txs.last().unwrap()), + }; + + // This call cleanly removes the invalid transaction. + let result = mempool.remove_transaction( + &transaction_id, + false, + TxRemovalReason::RevalidationWithMissingOutpoints, + extra_info.as_str(), + ); + if let Err(err) = result { + warn!("Failed to remove transaction {} from mempool: {}", transaction_id, err); } - Err(err) => { - // Rust rewrite note: - // The behavior changes here compared to the golang version. - // The failed revalidation is simply logged and the process continues. - warn!( - "Removing high priority transaction {0} and its redeemers, it failed revalidation with {1}", - transaction_id, err - ); - // This call cleanly removes the invalid transaction and its redeemers. - let result = mempool.remove_transaction(&transaction_id, true, TxRemovalReason::Muted, ""); - if let Err(err) = result { - warn!("Failed to remove transaction {} from mempool: {}", transaction_id, err); - } - invalid += 1; + missing_outpoint += 1; + } + Err(err) => { + // Rust rewrite note: + // The behavior changes here compared to the golang version. + // The failed revalidation is simply logged and the process continues. + warn!( + "Removing high priority transaction {0} and its redeemers, it failed revalidation with {1}", + transaction_id, err + ); + // This call cleanly removes the invalid transaction and its redeemers. + let result = mempool.remove_transaction(&transaction_id, true, TxRemovalReason::Muted, ""); + if let Err(err) = result { + warn!("Failed to remove transaction {} from mempool: {}", transaction_id, err); } + invalid += 1; } - } else { - other += 1; } + } else { + other += 1; } - if !valid_ids.is_empty() { - assert!(transaction_ids_sender.send(valid_ids).is_ok(), "the channel expected to have a receiver and be opened"); - } - drop(_swo); - mempool.log_stats(); - drop(mempool); } + if !valid_ids.is_empty() { + assert!(transaction_ids_sender.send(valid_ids).is_ok(), "the channel is expected to have a receiver and be opened"); + } + drop(_swo); + mempool.log_stats(); + drop(mempool); } match accepted + missing_outpoint + invalid { 0 => { diff --git a/mining/src/mempool/mod.rs b/mining/src/mempool/mod.rs index bd090a7954..95dc66d980 100644 --- a/mining/src/mempool/mod.rs +++ b/mining/src/mempool/mod.rs @@ -127,12 +127,12 @@ impl Mempool { } pub(crate) fn block_candidate_transactions(&self) -> Vec { - let _sw = Stopwatch::<80>::with_threshold("block_candidate_transactions op"); + let _sw = Stopwatch::<120>::with_threshold("block_candidate_transactions op"); self.transaction_pool.all_ready_transactions() } pub(crate) fn all_transactions_with_priority(&self, priority: Priority) -> Vec { - let _sw = Stopwatch::<50>::with_threshold("all_transactions_with_priority op"); + let _sw = Stopwatch::<100>::with_threshold("all_transactions_with_priority op"); self.transaction_pool.all_transactions_with_priority(priority) } diff --git a/mining/src/model/mod.rs b/mining/src/model/mod.rs index 8f9128e881..482cc82f11 100644 --- a/mining/src/model/mod.rs +++ b/mining/src/model/mod.rs @@ -4,7 +4,7 @@ use std::collections::HashSet; pub(crate) mod candidate_tx; pub mod owner_txs; pub mod topological_index; -pub mod txs_stager; +pub mod topological_sort; /// A set of unique transaction ids pub type TransactionIdSet = HashSet; diff --git a/mining/src/model/topological_sort.rs b/mining/src/model/topological_sort.rs new file mode 100644 index 0000000000..aa88cce023 --- /dev/null +++ b/mining/src/model/topological_sort.rs @@ -0,0 +1,284 @@ +use itertools::Itertools; +use kaspa_consensus_core::tx::Transaction; +use std::{ + collections::{HashMap, HashSet, VecDeque}, + iter::{FusedIterator, Map}, +}; + +type IndexSet = HashSet; + +pub trait TopologicalSort { + fn topological_sort(self) -> Self + where + Self: Sized; +} + +impl + Clone> TopologicalSort for Vec { + fn topological_sort(self) -> Self { + let mut sorted = Vec::with_capacity(self.len()); + let mut in_degree: Vec = vec![0; self.len()]; + + // Index on transaction ids + let mut index = HashMap::with_capacity(self.len()); + self.iter().enumerate().for_each(|(idx, tx)| { + let _ = index.insert(tx.as_ref().id(), idx); + }); + + // Transaction edges + let mut all_edges: Vec> = vec![None; self.len()]; + self.iter().enumerate().for_each(|(destination_idx, tx)| { + tx.as_ref().inputs.iter().for_each(|input| { + if let Some(origin_idx) = index.get(&input.previous_outpoint.transaction_id) { + all_edges[*origin_idx].get_or_insert_with(IndexSet::new).insert(destination_idx); + } + }) + }); + + // Degrees + (0..self.len()).for_each(|origin_idx| { + if let Some(ref edges) = all_edges[origin_idx] { + edges.iter().for_each(|destination_idx| { + in_degree[*destination_idx] += 1; + }); + } + }); + + // Degree 0 + let mut queue = VecDeque::with_capacity(self.len()); + (0..self.len()).for_each(|destination_idx| { + if in_degree[destination_idx] == 0 { + queue.push_back(destination_idx); + } + }); + + // Sorted transactions + while !queue.is_empty() { + let current = queue.pop_front().unwrap(); + if let Some(ref edges) = all_edges[current] { + edges.iter().for_each(|destination_idx| { + let degree = in_degree.get_mut(*destination_idx).unwrap(); + *degree -= 1; + if *degree == 0 { + queue.push_back(*destination_idx); + } + }); + } + sorted.push(self[current].clone()); + } + assert_eq!(sorted.len(), self.len(), "by definition, cryptographically no cycle can exist in a DAG of transactions"); + + sorted + } +} + +pub trait IterTopologically +where + T: AsRef, +{ + fn topological_iter(&self) -> TopologicalIter<'_, T>; +} + +impl> IterTopologically for &[T] { + fn topological_iter(&self) -> TopologicalIter<'_, T> { + TopologicalIter::new(self) + } +} + +impl> IterTopologically for Vec { + fn topological_iter(&self) -> TopologicalIter<'_, T> { + TopologicalIter::new(self) + } +} + +pub struct TopologicalIter<'a, T: AsRef> { + transactions: &'a [T], + in_degree: Vec, + edges: Vec>, + queue: VecDeque, + yields_count: usize, +} + +impl<'a, T: AsRef> TopologicalIter<'a, T> { + pub fn new(transactions: &'a [T]) -> Self { + let mut in_degree: Vec = vec![0; transactions.len()]; + + // Index on transaction ids + let mut index = HashMap::with_capacity(transactions.len()); + transactions.iter().enumerate().for_each(|(idx, tx)| { + let _ = index.insert(tx.as_ref().id(), idx); + }); + + // Transaction edges + let mut edges: Vec> = vec![None; transactions.len()]; + transactions.iter().enumerate().for_each(|(destination_idx, tx)| { + tx.as_ref().inputs.iter().for_each(|input| { + if let Some(origin_idx) = index.get(&input.previous_outpoint.transaction_id) { + edges[*origin_idx].get_or_insert_with(IndexSet::new).insert(destination_idx); + } + }) + }); + + // Degrees + (0..transactions.len()).for_each(|origin_idx| { + if let Some(ref edges) = edges[origin_idx] { + edges.iter().for_each(|destination_idx| { + in_degree[*destination_idx] += 1; + }); + } + }); + + // Degree 0 + let mut queue = VecDeque::with_capacity(transactions.len()); + (0..transactions.len()).for_each(|destination_idx| { + if in_degree[destination_idx] == 0 { + queue.push_back(destination_idx); + } + }); + Self { transactions, in_degree, edges, queue, yields_count: 0 } + } +} + +impl<'a, T: AsRef> Iterator for TopologicalIter<'a, T> { + type Item = &'a T; + + fn next(&mut self) -> Option { + match self.queue.pop_front() { + Some(current) => { + if let Some(ref edges) = self.edges[current] { + edges.iter().for_each(|destination_idx| { + let degree = self.in_degree.get_mut(*destination_idx).unwrap(); + *degree -= 1; + if *degree == 0 { + self.queue.push_back(*destination_idx); + } + }); + } + self.yields_count += 1; + Some(&self.transactions[current]) + } + None => None, + } + } + + fn size_hint(&self) -> (usize, Option) { + let items_remaining = self.transactions.len() - self.yields_count.min(self.transactions.len()); + (self.yields_count, Some(items_remaining)) + } +} + +impl<'a, T: AsRef> FusedIterator for TopologicalIter<'a, T> {} +impl<'a, T: AsRef> ExactSizeIterator for TopologicalIter<'a, T> { + fn len(&self) -> usize { + self.transactions.len() + } +} + +pub trait IntoIterTopologically +where + T: AsRef, +{ + fn topological_into_iter(self) -> TopologicalIntoIter; +} + +impl> IntoIterTopologically for Vec { + fn topological_into_iter(self) -> TopologicalIntoIter { + TopologicalIntoIter::new(self) + } +} + +impl IntoIterTopologically for Map +where + T: AsRef, + I: Iterator, + F: FnMut(::Item) -> T, +{ + fn topological_into_iter(self) -> TopologicalIntoIter { + TopologicalIntoIter::new(self) + } +} + +pub struct TopologicalIntoIter> { + transactions: Vec>, + in_degree: Vec, + edges: Vec>, + queue: VecDeque, + yields_count: usize, +} + +impl> TopologicalIntoIter { + pub fn new(transactions: impl IntoIterator) -> Self { + // Collect all transactions + let transactions = transactions.into_iter().map(|tx| Some(tx)).collect_vec(); + + let mut in_degree: Vec = vec![0; transactions.len()]; + + // Index on transaction ids + let mut index = HashMap::with_capacity(transactions.len()); + transactions.iter().enumerate().for_each(|(idx, tx)| { + let _ = index.insert(tx.as_ref().unwrap().as_ref().id(), idx); + }); + + // Transaction edges + let mut edges: Vec> = vec![None; transactions.len()]; + transactions.iter().enumerate().for_each(|(destination_idx, tx)| { + tx.as_ref().unwrap().as_ref().inputs.iter().for_each(|input| { + if let Some(origin_idx) = index.get(&input.previous_outpoint.transaction_id) { + edges[*origin_idx].get_or_insert_with(IndexSet::new).insert(destination_idx); + } + }) + }); + + // Degrees + (0..transactions.len()).for_each(|origin_idx| { + if let Some(ref edges) = edges[origin_idx] { + edges.iter().for_each(|destination_idx| { + in_degree[*destination_idx] += 1; + }); + } + }); + + // Degree 0 + let mut queue = VecDeque::with_capacity(transactions.len()); + (0..transactions.len()).for_each(|destination_idx| { + if in_degree[destination_idx] == 0 { + queue.push_back(destination_idx); + } + }); + Self { transactions, in_degree, edges, queue, yields_count: 0 } + } +} + +impl> Iterator for TopologicalIntoIter { + type Item = T; + + fn next(&mut self) -> Option { + match self.queue.pop_front() { + Some(current) => { + if let Some(ref edges) = self.edges[current] { + edges.iter().for_each(|destination_idx| { + let degree = self.in_degree.get_mut(*destination_idx).unwrap(); + *degree -= 1; + if *degree == 0 { + self.queue.push_back(*destination_idx); + } + }); + } + self.yields_count += 1; + self.transactions[current].take() + } + None => None, + } + } + + fn size_hint(&self) -> (usize, Option) { + let items_remaining = self.transactions.len() - self.yields_count.min(self.transactions.len()); + (self.yields_count, Some(items_remaining)) + } +} + +impl> FusedIterator for TopologicalIntoIter {} +impl> ExactSizeIterator for TopologicalIntoIter { + fn len(&self) -> usize { + self.transactions.len() + } +} diff --git a/mining/src/model/txs_stager.rs b/mining/src/model/txs_stager.rs deleted file mode 100644 index 719fe06147..0000000000 --- a/mining/src/model/txs_stager.rs +++ /dev/null @@ -1,47 +0,0 @@ -use super::TransactionIdSet; -use kaspa_consensus_core::tx::{Transaction, TransactionId}; -use kaspa_core::time::Stopwatch; - -pub struct TransactionsStagger> { - txs: Vec, - ids: TransactionIdSet, -} - -impl> TransactionsStagger { - pub fn new(txs: Vec) -> Self { - let ids = txs.iter().map(|x| x.as_ref().id()).collect(); - Self { txs, ids } - } - - pub fn is_empty(&self) -> bool { - self.txs.is_empty() - } - - /// Extract and return all independent transactions - pub fn stagger(&mut self) -> Option> { - let _sw = Stopwatch::<50>::with_threshold("stagger op"); - if self.is_empty() { - return None; - } - let mut ready = Vec::with_capacity(self.txs.len()); - let mut dependent = Vec::with_capacity(self.txs.len()); - while let Some(tx) = self.txs.pop() { - if self.is_dependent(&tx) { - dependent.push(tx); - } else { - ready.push(tx); - } - } - self.txs = dependent; - self.ids = self.txs.iter().map(|x| x.as_ref().id()).collect(); - Some(ready) - } - - fn has(&self, transaction_id: &TransactionId) -> bool { - self.ids.contains(transaction_id) - } - - fn is_dependent(&self, tx: &T) -> bool { - tx.as_ref().inputs.iter().any(|x| self.has(&x.previous_outpoint.transaction_id)) - } -} From 93025742a393756936e538713ac30d7224391901 Mon Sep 17 00:00:00 2001 From: Tiram <18632023+tiram88@users.noreply.github.com> Date: Thu, 14 Sep 2023 14:05:07 +0300 Subject: [PATCH 39/86] Return transaction descendants BFS ordered + some optimizations --- mining/src/mempool/model/orphan_pool.rs | 4 +- mining/src/mempool/model/pool.rs | 37 ++++++++++--------- mining/src/mempool/model/transactions_pool.rs | 4 +- 3 files changed, 22 insertions(+), 23 deletions(-) diff --git a/mining/src/mempool/model/orphan_pool.rs b/mining/src/mempool/model/orphan_pool.rs index fa8ca1955f..6b2330de30 100644 --- a/mining/src/mempool/model/orphan_pool.rs +++ b/mining/src/mempool/model/orphan_pool.rs @@ -139,9 +139,7 @@ impl OrphanPool { // ... incoming for parent_id in self.get_parent_transaction_ids_in_pool(&transaction.mtx) { let entry = self.chained_mut().entry(parent_id).or_default(); - if !entry.contains(&id) { - entry.insert(id); - } + entry.insert(id); } // ... outgoing let mut outpoint = TransactionOutpoint::new(id, 0); diff --git a/mining/src/mempool/model/pool.rs b/mining/src/mempool/model/pool.rs index 441fb0fe09..1eb6c87a32 100644 --- a/mining/src/mempool/model/pool.rs +++ b/mining/src/mempool/model/pool.rs @@ -11,7 +11,7 @@ use crate::{ }; use kaspa_consensus_core::tx::{MutableTransaction, TransactionId}; use kaspa_mining_errors::mempool::RuleResult; -use std::collections::{hash_set::Iter, HashMap, HashSet}; +use std::collections::{hash_set::Iter, HashMap, HashSet, VecDeque}; pub(crate) type TransactionsEdges = HashMap; @@ -65,26 +65,33 @@ pub(crate) trait Pool { /// Returns the ids of all transactions being directly and indirectly chained to `transaction_id` /// and existing in the pool. /// + /// The transactions are traversed in BFS mode. The returned order is not guaranteed to be + /// topological. + /// /// NOTE: this operation's complexity might become linear in the size of the mempool if the mempool /// contains deeply chained transactions - fn get_redeemer_ids_in_pool(&self, transaction_id: &TransactionId) -> TransactionIdSet { - let mut redeemers = TransactionIdSet::new(); + fn get_redeemer_ids_in_pool(&self, transaction_id: &TransactionId) -> Vec { + // TODO: study if removals based on the results of this function should occur in reversed + // topological order to prevent missing outpoints in concurrent processes. + let mut visited = TransactionIdSet::new(); + let mut descendants = vec![]; if let Some(transaction) = self.get(transaction_id) { - let mut stack = vec![transaction]; - while let Some(transaction) = stack.pop() { + let mut queue = VecDeque::new(); + queue.push_back(transaction); + while let Some(transaction) = queue.pop_front() { if let Some(chains) = self.chained().get(&transaction.id()) { - for redeemer_id in chains { + chains.iter().for_each(|redeemer_id| { if let Some(redeemer) = self.get(redeemer_id) { - // Do not revisit transactions - if redeemers.insert(*redeemer_id) { - stack.push(redeemer); + if visited.insert(*redeemer_id) { + descendants.push(*redeemer_id); + queue.push_back(redeemer); } } - } + }) } } } - redeemers + descendants } /// Returns the ids of all transactions which directly chained to `transaction_id` @@ -114,9 +121,7 @@ pub(crate) trait Pool { // Insert the mutable transaction in the owners object if not already present. // Clone since the transaction leaves the mempool. owner_set.transactions.entry(*id).or_insert_with(|| transaction.mtx.clone()); - if !owner.sending_txs.contains(id) { - owner.sending_txs.insert(*id); - } + owner.sending_txs.insert(*id); } // Receiving transactions @@ -124,9 +129,7 @@ pub(crate) trait Pool { // Insert the mutable transaction in the owners object if not already present. // Clone since the transaction leaves the mempool. owner_set.transactions.entry(*id).or_insert_with(|| transaction.mtx.clone()); - if !owner.receiving_txs.contains(id) { - owner.receiving_txs.insert(*id); - } + owner.receiving_txs.insert(*id); } }); }); diff --git a/mining/src/mempool/model/transactions_pool.rs b/mining/src/mempool/model/transactions_pool.rs index c7cfc1f4c8..80c5c40345 100644 --- a/mining/src/mempool/model/transactions_pool.rs +++ b/mining/src/mempool/model/transactions_pool.rs @@ -103,9 +103,7 @@ impl TransactionsPool { self.parent_transactions.insert(id, parents.clone()); for parent_id in parents { let entry = self.chained_mut().entry(parent_id).or_default(); - if !entry.contains(&id) { - entry.insert(id); - } + entry.insert(id); } self.utxo_set.add_transaction(&transaction.mtx); From b63b5867bbb90454788d6bf313ff01b99c31684f Mon Sep 17 00:00:00 2001 From: Tiram <18632023+tiram88@users.noreply.github.com> Date: Thu, 14 Sep 2023 16:24:31 +0300 Subject: [PATCH 40/86] Group expiration and revalidation of mempool txs in one task --- protocol/flows/src/flow_context.rs | 62 +++++++------------ .../flows/src/flowcontext/transactions.rs | 55 +++++++--------- 2 files changed, 46 insertions(+), 71 deletions(-) diff --git a/protocol/flows/src/flow_context.rs b/protocol/flows/src/flow_context.rs index ffff9bfa71..e61138b298 100644 --- a/protocol/flows/src/flow_context.rs +++ b/protocol/flows/src/flow_context.rs @@ -375,37 +375,27 @@ impl FlowContext { return Ok(()); } - // TODO: refactor this adding a worker and a scheduler to FlowContext - if self.should_expire_transactions().await { - // Spawn a task expiring concurrently the low priority transactions. - // The TransactionSpread instance ensures at most one expire running at any + if self.should_run_cleaning_task().await { + // Spawn a task executing the removal of expired low priority transactions and, if time has come too, + // the revalidation of high priority transactions. + // + // The TransactionSpread member ensures at most one instance of this task is running at any // given time. let mining_manager = self.mining_manager().clone(); let consensus_clone = consensus.clone(); let context = self.clone(); tokio::spawn(async move { - mining_manager.expire_low_priority_transactions(&consensus_clone).await; - context.expire_done().await; - }); - } - - // TODO: refactor this adding a worker and a scheduler to FlowContext - if self.should_rebroadcast_transactions().await { - // Spawn a task revalidating concurrently the high priority transactions. - // The TransactionSpread instance ensures at most one rebroadcast running at any - // given time. - let mining_manager = self.mining_manager().clone(); - let consensus_clone = consensus.clone(); - let context = self.clone(); - tokio::spawn(async move { - let (tx, mut rx) = unbounded_channel(); - tokio::spawn(async move { - mining_manager.revalidate_high_priority_transactions(&consensus_clone, tx).await; - }); - while let Some(transactions) = rx.recv().await { - let _ = context.broadcast_transactions(transactions).await; + mining_manager.clone().expire_low_priority_transactions(&consensus_clone).await; + if context.should_rebroadcast().await { + let (tx, mut rx) = unbounded_channel(); + tokio::spawn(async move { + mining_manager.revalidate_high_priority_transactions(&consensus_clone, tx).await; + }); + while let Some(transactions) = rx.recv().await { + let _ = context.broadcast_transactions(transactions).await; + } } - context.rebroadcast_done().await; + context.cleaning_is_done().await; }); } @@ -450,24 +440,18 @@ impl FlowContext { self.broadcast_transactions(accepted_transactions.iter().map(|x| x.id())).await } - /// Returns true if the time for a rebroadcast of the mempool high priority transactions has come. - /// - /// If true, the instant of the call is registered as the last rebroadcast time. - pub async fn should_rebroadcast_transactions(&self) -> bool { - self.transactions_spread.write().await.should_rebroadcast_transactions() - } - - pub async fn rebroadcast_done(&self) { - self.transactions_spread.write().await.rebroadcast_done(); + /// Returns true if the time has come for running the task cleaning mempool transactions. + async fn should_run_cleaning_task(&self) -> bool { + self.transactions_spread.write().await.should_run_cleaning_task() } - /// Returns true if the time for expiring the mempool low priority transactions has come. - pub async fn should_expire_transactions(&self) -> bool { - self.transactions_spread.write().await.should_expire_transactions() + /// Returns true if the time has come for a rebroadcast of the mempool high priority transactions. + async fn should_rebroadcast(&self) -> bool { + self.transactions_spread.write().await.should_rebroadcast() } - pub async fn expire_done(&self) { - self.transactions_spread.write().await.expire_done(); + async fn cleaning_is_done(&self) { + self.transactions_spread.write().await.cleaning_is_done() } /// Add the given transactions IDs to a set of IDs to broadcast. The IDs will be broadcasted to all peers diff --git a/protocol/flows/src/flowcontext/transactions.rs b/protocol/flows/src/flowcontext/transactions.rs index 47ce5ae9d6..c22e3533b0 100644 --- a/protocol/flows/src/flowcontext/transactions.rs +++ b/protocol/flows/src/flowcontext/transactions.rs @@ -10,17 +10,16 @@ use kaspa_p2p_lib::{ }; use std::time::{Duration, Instant}; -const REBROADCAST_INTERVAL: Duration = Duration::from_secs(30); -const EXPIRE_INTERVAL: Duration = Duration::from_secs(10); +const CLEANING_TASK_INTERVAL: Duration = Duration::from_secs(10); +const REBROADCAST_FREQUENCY: u64 = 3; const BROADCAST_INTERVAL: Duration = Duration::from_millis(500); pub(crate) const MAX_INV_PER_TX_INV_MSG: usize = 131_072; pub struct TransactionsSpread { hub: Hub, - last_rebroadcast_time: Instant, - executing_rebroadcast: bool, - last_expire_time: Instant, - executing_expire: bool, + last_cleaning_time: Instant, + cleaning_task_running: bool, + cleaning_count: u64, transaction_ids: ProcessQueue, last_broadcast_time: Instant, } @@ -29,45 +28,37 @@ impl TransactionsSpread { pub fn new(hub: Hub) -> Self { Self { hub, - last_rebroadcast_time: Instant::now(), - executing_rebroadcast: false, - last_expire_time: Instant::now(), - executing_expire: false, + last_cleaning_time: Instant::now(), + cleaning_task_running: false, + cleaning_count: 0, transaction_ids: ProcessQueue::new(), last_broadcast_time: Instant::now(), } } - /// Returns true if the time for a rebroadcast of the mempool high priority transactions has come. - pub fn should_rebroadcast_transactions(&mut self) -> bool { - if self.executing_rebroadcast || Instant::now() < self.last_rebroadcast_time + REBROADCAST_INTERVAL { + /// Returns true if the time has come for running the task cleaning mempool transactions + /// and if so, mark the task as running. + pub fn should_run_cleaning_task(&mut self) -> bool { + if self.cleaning_task_running || Instant::now() < self.last_cleaning_time + CLEANING_TASK_INTERVAL { return false; } - self.executing_rebroadcast = true; + self.cleaning_task_running = true; true } - pub fn rebroadcast_done(&mut self) { - if self.executing_rebroadcast { - self.executing_rebroadcast = false; - self.last_rebroadcast_time = Instant::now(); - } - } - - /// Returns true if the time for expiring the mempool low priority transactions has come. - pub fn should_expire_transactions(&mut self) -> bool { - if self.executing_expire || Instant::now() < self.last_expire_time + EXPIRE_INTERVAL { - return false; - } - self.executing_expire = true; - true + /// Returns true if the time for a rebroadcast of the mempool high priority transactions has come. + pub fn should_rebroadcast(&self) -> bool { + Instant::now() >= self.last_cleaning_time + CLEANING_TASK_INTERVAL && self.cleaning_count % REBROADCAST_FREQUENCY == 0 } - pub fn expire_done(&mut self) { - if self.executing_expire { - self.executing_expire = false; - self.last_expire_time = Instant::now(); + pub fn cleaning_is_done(&mut self) { + assert!(self.cleaning_task_running, "no stop without a matching start"); + // Keep launching the cleaning task respecting the exact intervals + while self.last_cleaning_time <= Instant::now() { + self.last_cleaning_time += CLEANING_TASK_INTERVAL; } + self.cleaning_count += 1; + self.cleaning_task_running = false; } /// Add the given transactions IDs to a set of IDs to broadcast. The IDs will be broadcasted to all peers From 55273ab2c690b025dc2f33276c5b761697731049 Mon Sep 17 00:00:00 2001 From: Tiram <18632023+tiram88@users.noreply.github.com> Date: Thu, 14 Sep 2023 18:41:15 +0300 Subject: [PATCH 41/86] Refine the schedule of the cleaning task --- mining/src/manager.rs | 5 +++-- protocol/flows/src/flow_context.rs | 8 +++++++- protocol/flows/src/flowcontext/transactions.rs | 17 +++++++++++------ 3 files changed, 21 insertions(+), 9 deletions(-) diff --git a/mining/src/manager.rs b/mining/src/manager.rs index 6f7b7fd6a5..47497c37a2 100644 --- a/mining/src/manager.rs +++ b/mining/src/manager.rs @@ -455,6 +455,7 @@ impl MiningManager { pub fn expire_low_priority_transactions(&self, consensus: &dyn ConsensusApi) { // very fine-grained write locks on mempool + debug!("<> Expiring low priority transactions..."); // orphan pool if let Err(err) = self.mempool.write().expire_orphan_low_priority_transactions(consensus) { @@ -494,9 +495,9 @@ impl MiningManager { // Prepare a vector with clones of high priority transactions found in the mempool let mempool = self.mempool.read(); if mempool.has_transactions_with_priority(Priority::High) { - info!("Revalidating high priority transactions..."); + debug!("<> Revalidating high priority transactions..."); } else { - debug!("Revalidating high priority transactions found no transactions"); + debug!("<> Revalidating high priority transactions found no transactions"); return; } let transactions = mempool.all_transactions_with_priority(Priority::High); diff --git a/protocol/flows/src/flow_context.rs b/protocol/flows/src/flow_context.rs index e61138b298..d08d0b7f8e 100644 --- a/protocol/flows/src/flow_context.rs +++ b/protocol/flows/src/flow_context.rs @@ -384,6 +384,7 @@ impl FlowContext { let mining_manager = self.mining_manager().clone(); let consensus_clone = consensus.clone(); let context = self.clone(); + debug!("<> Starting cleaning task #{}...", self.cleaning_count().await); tokio::spawn(async move { mining_manager.clone().expire_low_priority_transactions(&consensus_clone).await; if context.should_rebroadcast().await { @@ -396,6 +397,7 @@ impl FlowContext { } } context.cleaning_is_done().await; + debug!("<> Cleaning task is done"); }); } @@ -447,7 +449,11 @@ impl FlowContext { /// Returns true if the time has come for a rebroadcast of the mempool high priority transactions. async fn should_rebroadcast(&self) -> bool { - self.transactions_spread.write().await.should_rebroadcast() + self.transactions_spread.read().await.should_rebroadcast() + } + + async fn cleaning_count(&self) -> u64 { + self.transactions_spread.read().await.cleaning_count() } async fn cleaning_is_done(&self) { diff --git a/protocol/flows/src/flowcontext/transactions.rs b/protocol/flows/src/flowcontext/transactions.rs index c22e3533b0..2927152fa0 100644 --- a/protocol/flows/src/flowcontext/transactions.rs +++ b/protocol/flows/src/flowcontext/transactions.rs @@ -42,22 +42,27 @@ impl TransactionsSpread { if self.cleaning_task_running || Instant::now() < self.last_cleaning_time + CLEANING_TASK_INTERVAL { return false; } + // Keep the launching times aligned to exact intervals + let call_time = Instant::now(); + while self.last_cleaning_time + CLEANING_TASK_INTERVAL < call_time { + self.last_cleaning_time += CLEANING_TASK_INTERVAL; + } + self.cleaning_count += 1; self.cleaning_task_running = true; true } /// Returns true if the time for a rebroadcast of the mempool high priority transactions has come. pub fn should_rebroadcast(&self) -> bool { - Instant::now() >= self.last_cleaning_time + CLEANING_TASK_INTERVAL && self.cleaning_count % REBROADCAST_FREQUENCY == 0 + self.cleaning_count % REBROADCAST_FREQUENCY == 0 + } + + pub fn cleaning_count(&self) -> u64 { + self.cleaning_count } pub fn cleaning_is_done(&mut self) { assert!(self.cleaning_task_running, "no stop without a matching start"); - // Keep launching the cleaning task respecting the exact intervals - while self.last_cleaning_time <= Instant::now() { - self.last_cleaning_time += CLEANING_TASK_INTERVAL; - } - self.cleaning_count += 1; self.cleaning_task_running = false; } From a9a93a06275a9776754c5566e262b42fd420f9bf Mon Sep 17 00:00:00 2001 From: Michael Sutton Date: Fri, 15 Sep 2023 12:30:27 +0300 Subject: [PATCH 42/86] ignore perf logs --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index c823c21b88..8ee71e433b 100644 --- a/.gitignore +++ b/.gitignore @@ -10,3 +10,4 @@ analyzer-target .DS_Store *.code-workspace /setup +testing/integration/perflogs* From 782315f4ee931d137b16c6daba486bab506845e8 Mon Sep 17 00:00:00 2001 From: Michael Sutton Date: Fri, 15 Sep 2023 12:31:41 +0300 Subject: [PATCH 43/86] maintain mempool ready transactions in a dedicated set --- mining/src/mempool/model/transactions_pool.rs | 43 +++++++++++++------ 1 file changed, 29 insertions(+), 14 deletions(-) diff --git a/mining/src/mempool/model/transactions_pool.rs b/mining/src/mempool/model/transactions_pool.rs index 80c5c40345..c99acd32fb 100644 --- a/mining/src/mempool/model/transactions_pool.rs +++ b/mining/src/mempool/model/transactions_pool.rs @@ -18,7 +18,7 @@ use kaspa_consensus_core::{ }; use kaspa_core::{time::unix_now, trace, warn}; use std::{ - collections::{hash_map::Keys, hash_set::Iter}, + collections::{hash_map::Keys, hash_set::Iter, HashSet}, sync::Arc, }; @@ -53,6 +53,8 @@ pub(crate) struct TransactionsPool { parent_transactions: TransactionsEdges, /// Transactions dependencies formed by outputs present in pool - successor relations. chained_transactions: TransactionsEdges, + /// Transactions with no parents in the mempool -- ready to be inserted into a block template + ready_transactions: HashSet, last_expire_scan_daa_score: u64, /// last expire scan time in milliseconds @@ -69,6 +71,7 @@ impl TransactionsPool { all_transactions: MempoolTransactionCollection::default(), parent_transactions: TransactionsEdges::default(), chained_transactions: TransactionsEdges::default(), + ready_transactions: Default::default(), last_expire_scan_daa_score: 0, last_expire_scan_time: unix_now(), utxo_set: MempoolUtxoSet::new(), @@ -101,6 +104,9 @@ impl TransactionsPool { // here yet since, by definition, they would have been orphans. let parents = self.get_parent_transaction_ids_in_pool(&transaction.mtx); self.parent_transactions.insert(id, parents.clone()); + if parents.is_empty() { + self.ready_transactions.insert(id); + } for parent_id in parents { let entry = self.chained_mut().entry(parent_id).or_default(); entry.insert(id); @@ -121,6 +127,9 @@ impl TransactionsPool { // Remove the bijective parent/chained relation if let Some(parents) = self.parent_transactions.get_mut(transaction_id) { found = parents.remove(parent_id); + if parents.is_empty() { + self.ready_transactions.insert(*transaction_id); + } } if let Some(chains) = self.chained_transactions.get_mut(parent_id) { found = chains.remove(transaction_id) || found; @@ -141,34 +150,40 @@ impl TransactionsPool { for chain in chains.iter() { if let Some(parents) = self.parent_transactions.get_mut(chain) { parents.remove(transaction_id); + if parents.is_empty() { + self.ready_transactions.insert(*chain); + } } } } self.parent_transactions.remove(transaction_id); self.chained_transactions.remove(transaction_id); + self.ready_transactions.remove(transaction_id); // Remove the transaction itself self.all_transactions.remove(transaction_id).ok_or(RuleError::RejectMissingTransaction(*transaction_id)) } /// Is the mempool transaction identified by `transaction_id` ready for being inserted into a block template? - pub(crate) fn is_transaction_ready(&self, transaction_id: &TransactionId) -> bool { - if self.all_transactions.contains_key(transaction_id) { - if let Some(parents) = self.parent_transactions.get(transaction_id) { - return parents.is_empty(); - } - return true; - } - false - } + // fn is_transaction_ready(&self, transaction_id: &TransactionId) -> bool { + // if let Some(parents) = self.parent_transactions.get(transaction_id) { + // return parents.is_empty(); + // } + // true + // } /// all_ready_transactions returns all fully populated mempool transactions having no parents in the mempool. /// These transactions are ready for being inserted in a block template. pub(crate) fn all_ready_transactions(&self) -> Vec { // The returned transactions are leaving the mempool so they are cloned - self.all_transactions - .values() - .filter_map(|x| if self.is_transaction_ready(&x.id()) { Some(CandidateTransaction::from_mutable(&x.mtx)) } else { None }) + // self.all_transactions + // .values() + // .filter_map(|x| if self.is_transaction_ready(&x.id()) { Some(CandidateTransaction::from_mutable(&x.mtx)) } else { None }) + // .collect() + + self.ready_transactions + .iter() + .map(|id| CandidateTransaction::from_mutable(&self.all_transactions.get(id).unwrap().mtx)) .collect() } @@ -262,7 +277,7 @@ impl TransactionsPool { self.utxo_set.remove_transaction(transaction, &parent_ids) } - pub(crate) fn collect_expired_low_priority_transactions(&mut self, virtual_daa_score: u64) -> Vec { + pub(crate) fn collect_expired_low_priority_transactions(&self, virtual_daa_score: u64) -> Vec { let now = unix_now(); if virtual_daa_score < self.last_expire_scan_daa_score + self.config.transaction_expire_scan_interval_daa_score || now < self.last_expire_scan_time + self.config.transaction_expire_scan_interval_milliseconds From 0614dcb5688042c70d01b9bd79cf3e36a41430b3 Mon Sep 17 00:00:00 2001 From: Tiram <18632023+tiram88@users.noreply.github.com> Date: Fri, 15 Sep 2023 13:59:54 +0300 Subject: [PATCH 44/86] Bound the returned candidate transactions to a maximum --- mining/src/mempool/config.rs | 5 +++++ mining/src/mempool/model/transactions_pool.rs | 14 +------------- 2 files changed, 6 insertions(+), 13 deletions(-) diff --git a/mining/src/mempool/config.rs b/mining/src/mempool/config.rs index d8cffb1234..58b9a18b0a 100644 --- a/mining/src/mempool/config.rs +++ b/mining/src/mempool/config.rs @@ -1,6 +1,7 @@ use kaspa_consensus_core::constants::TX_VERSION; pub(crate) const DEFAULT_MAXIMUM_TRANSACTION_COUNT: u64 = 1_000_000; +pub(crate) const DEFAULT_MAXIMUM_READY_TRANSACTION_COUNT: u64 = 100_000; pub(crate) const DEFAULT_TRANSACTION_EXPIRE_INTERVAL_SECONDS: u64 = 60; pub(crate) const DEFAULT_TRANSACTION_EXPIRE_SCAN_INTERVAL_SECONDS: u64 = 10; @@ -28,6 +29,7 @@ pub(crate) const DEFAULT_MAXIMUM_STANDARD_TRANSACTION_VERSION: u16 = TX_VERSION; #[derive(Clone, Debug)] pub struct Config { pub maximum_transaction_count: u64, + pub maximum_ready_transaction_count: u64, pub transaction_expire_interval_daa_score: u64, pub transaction_expire_scan_interval_daa_score: u64, pub transaction_expire_scan_interval_milliseconds: u64, @@ -49,6 +51,7 @@ impl Config { #[allow(clippy::too_many_arguments)] pub fn new( maximum_transaction_count: u64, + maximum_candidate_transaction_count: u64, transaction_expire_interval_daa_score: u64, transaction_expire_scan_interval_daa_score: u64, transaction_expire_scan_interval_milliseconds: u64, @@ -67,6 +70,7 @@ impl Config { ) -> Self { Self { maximum_transaction_count, + maximum_ready_transaction_count: maximum_candidate_transaction_count, transaction_expire_interval_daa_score, transaction_expire_scan_interval_daa_score, transaction_expire_scan_interval_milliseconds, @@ -90,6 +94,7 @@ impl Config { pub fn build_default(target_milliseconds_per_block: u64, relay_non_std_transactions: bool, max_block_mass: u64) -> Self { Self { maximum_transaction_count: DEFAULT_MAXIMUM_TRANSACTION_COUNT, + maximum_ready_transaction_count: DEFAULT_MAXIMUM_READY_TRANSACTION_COUNT, transaction_expire_interval_daa_score: DEFAULT_TRANSACTION_EXPIRE_INTERVAL_SECONDS * 1000 / target_milliseconds_per_block, transaction_expire_scan_interval_daa_score: DEFAULT_TRANSACTION_EXPIRE_SCAN_INTERVAL_SECONDS * 1000 / target_milliseconds_per_block, diff --git a/mining/src/mempool/model/transactions_pool.rs b/mining/src/mempool/model/transactions_pool.rs index c99acd32fb..9ef694d9bc 100644 --- a/mining/src/mempool/model/transactions_pool.rs +++ b/mining/src/mempool/model/transactions_pool.rs @@ -164,25 +164,13 @@ impl TransactionsPool { self.all_transactions.remove(transaction_id).ok_or(RuleError::RejectMissingTransaction(*transaction_id)) } - /// Is the mempool transaction identified by `transaction_id` ready for being inserted into a block template? - // fn is_transaction_ready(&self, transaction_id: &TransactionId) -> bool { - // if let Some(parents) = self.parent_transactions.get(transaction_id) { - // return parents.is_empty(); - // } - // true - // } - /// all_ready_transactions returns all fully populated mempool transactions having no parents in the mempool. /// These transactions are ready for being inserted in a block template. pub(crate) fn all_ready_transactions(&self) -> Vec { // The returned transactions are leaving the mempool so they are cloned - // self.all_transactions - // .values() - // .filter_map(|x| if self.is_transaction_ready(&x.id()) { Some(CandidateTransaction::from_mutable(&x.mtx)) } else { None }) - // .collect() - self.ready_transactions .iter() + .take(self.config.maximum_ready_transaction_count as usize) .map(|id| CandidateTransaction::from_mutable(&self.all_transactions.get(id).unwrap().mtx)) .collect() } From 21e3d1c9518650e67a25e23f6291afb50f2d89dd Mon Sep 17 00:00:00 2001 From: Tiram <18632023+tiram88@users.noreply.github.com> Date: Sun, 17 Sep 2023 23:21:38 +0300 Subject: [PATCH 45/86] Reduces the max execution time of build block template --- mining/src/block_template/builder.rs | 42 +++++---- mining/src/block_template/model/tx.rs | 15 ++-- mining/src/block_template/selector.rs | 49 +++++++++- mining/src/cache.rs | 3 +- mining/src/manager.rs | 89 +++++++++++++------ mining/src/manager_tests.rs | 83 +++++++++++++---- .../src/mempool/check_transaction_standard.rs | 7 +- mining/src/mempool/config.rs | 15 +++- mining/src/mempool/mod.rs | 15 ++-- mining/src/mempool/model/transactions_pool.rs | 8 +- 10 files changed, 235 insertions(+), 91 deletions(-) diff --git a/mining/src/block_template/builder.rs b/mining/src/block_template/builder.rs index c301fdf0b1..5348c653a8 100644 --- a/mining/src/block_template/builder.rs +++ b/mining/src/block_template/builder.rs @@ -1,7 +1,11 @@ use super::{errors::BuilderResult, policy::Policy}; use crate::{block_template::selector::TransactionsSelector, model::candidate_tx::CandidateTransaction}; use kaspa_consensus_core::{ - api::ConsensusApi, block::BlockTemplate, coinbase::MinerData, merkle::calc_hash_merkle_root, tx::COINBASE_TRANSACTION_INDEX, + api::ConsensusApi, + block::BlockTemplate, + coinbase::MinerData, + merkle::calc_hash_merkle_root, + tx::{TransactionId, COINBASE_TRANSACTION_INDEX}, }; use kaspa_core::{ debug, @@ -10,12 +14,15 @@ use kaspa_core::{ pub(crate) struct BlockTemplateBuilder { policy: Policy, + selector: TransactionsSelector, } impl BlockTemplateBuilder { - pub(crate) fn new(max_block_mass: u64) -> Self { + pub(crate) fn new(max_block_mass: u64, transactions: Vec) -> Self { + let _sw = Stopwatch::<50>::with_threshold("BlockTemplateBuilder::new"); let policy = Policy::new(max_block_mass); - Self { policy } + let selector = TransactionsSelector::new(policy.clone(), transactions); + Self { policy, selector } } /// BuildBlockTemplate creates a block template for a miner to consume @@ -82,21 +89,31 @@ impl BlockTemplateBuilder { /// | <= policy.BlockMinSize) | | /// ----------------------------------- -- pub(crate) fn build_block_template( - &self, + &mut self, consensus: &dyn ConsensusApi, miner_data: &MinerData, - transactions: Vec, ) -> BuilderResult { - let _sw = Stopwatch::<200>::with_threshold("build_block_template op"); - debug!("Considering {} transactions for a new block template", transactions.len()); - let mut selector = TransactionsSelector::new(self.policy.clone(), transactions); - let block_txs = selector.select_transactions(); + let _sw = Stopwatch::<20>::with_threshold("build_block_template op"); + debug!("Considering {} transactions for a new block template", self.selector.len()); + let block_txs = self.selector.select_transactions(); Ok(consensus.build_block_template(miner_data.clone(), block_txs)?) } + pub(crate) fn update_transactions(&mut self, transactions: Vec) { + let selector = TransactionsSelector::new(self.policy.clone(), transactions); + self.selector = selector; + } + + pub(crate) fn reject_transaction(&mut self, transaction_id: TransactionId) { + self.selector.reject(transaction_id); + } + + pub(crate) fn candidates_len(&self) -> usize { + self.selector.len() + } + /// modify_block_template clones an existing block template, modifies it to the requested coinbase data and updates the timestamp pub(crate) fn modify_block_template( - &self, consensus: &dyn ConsensusApi, new_miner_data: &MinerData, block_template_to_modify: &BlockTemplate, @@ -123,9 +140,4 @@ impl BlockTemplateBuilder { block_template.miner_data = new_miner_data.clone(); Ok(block_template) } - - #[inline(always)] - pub fn max_block_mass(&self) -> u64 { - self.policy.max_block_mass - } } diff --git a/mining/src/block_template/model/tx.rs b/mining/src/block_template/model/tx.rs index 6dc95dc440..dee461f633 100644 --- a/mining/src/block_template/model/tx.rs +++ b/mining/src/block_template/model/tx.rs @@ -1,11 +1,14 @@ pub(crate) struct SelectableTransaction { pub(crate) gas_limit: u64, pub(crate) p: f64, + + /// Has this candidate been rejected by the consensus? + pub(crate) is_rejected: bool, } impl SelectableTransaction { pub(crate) fn new(tx_value: f64, gas_limit: u64, alpha: i32) -> Self { - Self { gas_limit, p: tx_value.powi(alpha) } + Self { gas_limit, p: tx_value.powi(alpha), is_rejected: false } } } @@ -42,10 +45,12 @@ impl CandidateList { let mut candidates = Vec::with_capacity(selectable_txs.len()); let mut total_p = 0.0; selectable_txs.iter().enumerate().for_each(|(i, tx)| { - let current_p = tx.p; - let candidate = Candidate::new(i, total_p, total_p + current_p); - candidates.push(candidate); - total_p += current_p; + if !tx.is_rejected { + let current_p = tx.p; + let candidate = Candidate::new(i, total_p, total_p + current_p); + candidates.push(candidate); + total_p += current_p; + } }); Self { candidates, total_p } } diff --git a/mining/src/block_template/selector.rs b/mining/src/block_template/selector.rs index ed421327e8..5472e5499c 100644 --- a/mining/src/block_template/selector.rs +++ b/mining/src/block_template/selector.rs @@ -1,6 +1,9 @@ use kaspa_core::{time::Stopwatch, trace}; use rand::Rng; -use std::{collections::HashMap, vec}; +use std::{ + collections::{HashMap, HashSet}, + vec, +}; use crate::model::candidate_tx::CandidateTransaction; @@ -8,7 +11,10 @@ use super::{ model::tx::{CandidateList, SelectableTransaction, SelectableTransactions, TransactionIndex}, policy::Policy, }; -use kaspa_consensus_core::{subnets::SubnetworkId, tx::Transaction}; +use kaspa_consensus_core::{ + subnets::SubnetworkId, + tx::{Transaction, TransactionId}, +}; /// ALPHA is a coefficient that defines how uniform the distribution of /// candidate transactions should be. A smaller alpha makes the distribution @@ -31,6 +37,9 @@ pub(crate) struct TransactionsSelector { /// Selectable transactions store selectable_txs: SelectableTransactions, + /// Indexes of transactions keys in stores + rejected_txs: HashSet, + /// Indexes of selected transactions in stores selected_txs: Vec, total_mass: u64, @@ -44,7 +53,15 @@ impl TransactionsSelector { transactions.sort_by(|a, b| a.tx.subnetwork_id.cmp(&b.tx.subnetwork_id)); // Create the object without selectable transactions - let mut selector = Self { policy, transactions, selectable_txs: vec![], selected_txs: vec![], total_mass: 0, total_fees: 0 }; + let mut selector = Self { + policy, + transactions, + selectable_txs: vec![], + rejected_txs: Default::default(), + selected_txs: vec![], + total_mass: 0, + total_fees: 0, + }; // Create the selectable transactions selector.selectable_txs = @@ -53,6 +70,10 @@ impl TransactionsSelector { selector } + pub(crate) fn len(&self) -> usize { + self.transactions.len() - self.rejected_txs.len() + } + /// select_transactions implements a probabilistic transaction selection algorithm. /// The algorithm, roughly, is as follows: /// 1. We assign a probability to each transaction equal to: @@ -74,7 +95,7 @@ impl TransactionsSelector { /// and appends the ones that will be included in the next block into /// selected_txs. pub(crate) fn select_transactions(&mut self) -> Vec { - let _sw = Stopwatch::<100>::with_threshold("select_transaction op"); + let _sw = Stopwatch::<15>::with_threshold("select_transaction op"); let mut rng = rand::thread_rng(); self.reset(); @@ -178,9 +199,29 @@ impl TransactionsSelector { self.selected_txs.iter().map(|x| self.transactions[*x].tx.as_ref().clone()).collect() } + pub(crate) fn reject(&mut self, transaction_id: TransactionId) { + self.rejected_txs.insert(transaction_id); + } + + fn commit_rejections(&mut self) { + let _sw = Stopwatch::<5>::with_threshold("commit_rejections op"); + if self.rejected_txs.is_empty() { + return; + } + for (index, tx) in self.transactions.iter().enumerate() { + if !self.selectable_txs[index].is_rejected && self.rejected_txs.remove(&tx.tx.id()) { + self.selectable_txs[index].is_rejected = true; + if self.rejected_txs.is_empty() { + break; + } + } + } + } + fn reset(&mut self) { assert_eq!(self.transactions.len(), self.selectable_txs.len()); self.selected_txs = Vec::with_capacity(self.transactions.len()); + self.commit_rejections(); } /// calc_tx_value calculates a value to be used in transaction selection. diff --git a/mining/src/cache.rs b/mining/src/cache.rs index b6fae4f18e..ce8a1ffde5 100644 --- a/mining/src/cache.rs +++ b/mining/src/cache.rs @@ -6,8 +6,9 @@ use std::sync::{ Arc, }; +// TODO: revisit this value /// CACHE_LIFETIME indicates the default duration in milliseconds after which the cached data expires. -const DEFAULT_CACHE_LIFETIME: u64 = 1_000; +const DEFAULT_CACHE_LIFETIME: u64 = 15; pub(crate) struct Inner { /// Time, in milliseconds, at which the cache was last updated diff --git a/mining/src/manager.rs b/mining/src/manager.rs index 47497c37a2..ef5a6da677 100644 --- a/mining/src/manager.rs +++ b/mining/src/manager.rs @@ -33,9 +33,9 @@ use std::sync::Arc; use tokio::sync::mpsc::UnboundedSender; pub struct MiningManager { - block_template_builder: BlockTemplateBuilder, + config: Arc, block_template_cache: BlockTemplateCache, - pub(crate) mempool: RwLock, + mempool: RwLock, } impl MiningManager { @@ -50,10 +50,10 @@ impl MiningManager { } pub(crate) fn with_config(config: Config, cache_lifetime: Option) -> Self { - let block_template_builder = BlockTemplateBuilder::new(config.maximum_mass_per_block); - let mempool = RwLock::new(Mempool::new(config)); + let config = Arc::new(config); + let mempool = RwLock::new(Mempool::new(config.clone())); let block_template_cache = BlockTemplateCache::new(cache_lifetime); - Self { block_template_builder, block_template_cache, mempool } + Self { config, block_template_cache, mempool } } pub fn get_block_template(&self, consensus: &dyn ConsensusApi, miner_data: &MinerData) -> MiningManagerResult { @@ -68,7 +68,7 @@ impl MiningManager { } // Miner data is new -- make the minimum changes required // Note the call returns a modified clone of the cached block template - let block_template = self.block_template_builder.modify_block_template(consensus, miner_data, &immutable_template)?; + let block_template = BlockTemplateBuilder::modify_block_template(consensus, miner_data, &immutable_template)?; // No point in updating cache since we have no reason to believe this coinbase will be used more // than the previous one, and we want to maintain the original template caching time @@ -80,25 +80,34 @@ impl MiningManager { // mempool.BlockCandidateTransactions and mempool.RemoveTransactions here. // We remove recursion seen in blockTemplateBuilder.BuildBlockTemplate here. debug!("Building a new block template..."); - let mut retries: usize = 0; + let _swo = Stopwatch::<22>::with_threshold("build_block_template full loop"); + let mut attempts: u64 = 0; + let transactions = self.block_candidate_transactions(); + let mut block_template_builder = BlockTemplateBuilder::new(self.config.maximum_mass_per_block, transactions); loop { - let transactions = self.block_candidate_transactions(); - match self.block_template_builder.build_block_template(consensus, miner_data, transactions) { + attempts += 1; + + // TODO: consider a parameter forcing the consensus to build a template with the remaining successfully validated transactions + // + // let force_build = attempts == self.config.maximum_build_block_template_attempts; + // match block_template_builder.build_block_template(consensus, miner_data, force_build) { + + match block_template_builder.build_block_template(consensus, miner_data) { Ok(block_template) => { let block_template = cache_lock.set_immutable_cached_template(block_template); - match retries { - 0 => { + match attempts { + 1 => { debug!("Built a new block template with {} transactions", block_template.block.transactions.len()); } - 1 => { + 2 => { debug!( - "Built a new block template with {} transactions after one retry", + "Built a new block template with {} transactions at second attempt", block_template.block.transactions.len() ); } n => { debug!( - "Built a new block template with {} transactions after {} retries", + "Built a new block template with {} transactions in {} attempts", block_template.block.transactions.len(), n ); @@ -107,10 +116,21 @@ impl MiningManager { return Ok(block_template.as_ref().clone()); } Err(BuilderError::ConsensusError(BlockRuleError::InvalidTransactionsInNewBlock(invalid_transactions))) => { - let mut mempool_write = self.mempool.write(); + // Do not refetch candidates if not absolutely necessary so we do not lock the mempool + // and optimize for the quickest possible resolution + let keep_candidates = block_template_builder.candidates_len() + > self.config.ready_transactions_refetch_limit + invalid_transactions.len() + || attempts + 1 >= self.config.maximum_build_block_template_attempts; + let mut missing_outpoint: usize = 0; let mut invalid: usize = 0; + + let mut mempool_write = self.mempool.write(); invalid_transactions.iter().for_each(|(x, err)| { + if keep_candidates { + block_template_builder.reject_transaction(*x); + } + // On missing outpoints, the most likely is that the tx was already in a block accepted by // the consensus but not yet processed by handle_new_block_transactions(). Another possibility // is a double spend. In both cases, we simply remove the transaction but keep its redeemers. @@ -121,11 +141,13 @@ impl MiningManager { // redeemers being unexpectedly either orphaned or rejected in case orphans are disallowed. // // For all other errors, we do remove the redeemers. + let removal_result = if *err == TxRuleError::MissingTxOutpoints { missing_outpoint += 1; mempool_write.remove_transaction(x, false, TxRemovalReason::Muted, "") } else { invalid += 1; + warn!("Remove per BBT invalid transaction and descendants"); mempool_write.remove_transaction( x, true, @@ -142,17 +164,24 @@ impl MiningManager { error!("Error from mempool.remove_transactions: {:?}", err); } }); + drop(mempool_write); + debug!( "Building a new block template failed for {} txs missing outpoint and {} invalid txs", missing_outpoint, invalid - ) + ); + + // Refetch candidates if asked to + if !keep_candidates { + let transactions = self.block_candidate_transactions(); + block_template_builder.update_transactions(transactions); + } } Err(err) => { warn!("Building a new block template failed: {}", err); return Err(err)?; } } - retries += 1; } } @@ -166,8 +195,8 @@ impl MiningManager { } #[cfg(test)] - pub(crate) fn block_template_builder(&self) -> &BlockTemplateBuilder { - &self.block_template_builder + pub(crate) fn block_template_builder(&self, transactions: Vec) -> BlockTemplateBuilder { + BlockTemplateBuilder::new(self.config.maximum_mass_per_block, transactions) } /// validate_and_insert_transaction validates the given transaction, and @@ -382,7 +411,7 @@ impl MiningManager { .iter() .position(|tx| { mass += tx.calculated_mass.unwrap(); - mass >= self.block_template_builder.max_block_mass() + mass >= self.config.maximum_mass_per_block }) // Make sure the upper bound is greater than the lower bound, allowing to handle a very unlikely, // (if not impossible) case where the mass of a single transaction is greater than the maximum @@ -413,6 +442,7 @@ impl MiningManager { include_transaction_pool: bool, include_orphan_pool: bool, ) -> (Vec, Vec) { + // TODO: break the monolithic lock self.mempool.read().get_all_transactions(include_transaction_pool, include_orphan_pool) } @@ -426,6 +456,7 @@ impl MiningManager { include_transaction_pool: bool, include_orphan_pool: bool, ) -> GroupedOwnerTransactions { + // TODO: break the monolithic lock self.mempool.read().get_transactions_by_addresses(script_public_keys, include_transaction_pool, include_orphan_pool) } @@ -494,14 +525,20 @@ impl MiningManager { // read lock on mempool // Prepare a vector with clones of high priority transactions found in the mempool let mempool = self.mempool.read(); - if mempool.has_transactions_with_priority(Priority::High) { - debug!("<> Revalidating high priority transactions..."); - } else { + let transaction_ids = mempool.all_transaction_ids_with_priority(Priority::High); + if transaction_ids.is_empty() { debug!("<> Revalidating high priority transactions found no transactions"); return; + } else { + debug!("<> Revalidating high priority transactions..."); } - let transactions = mempool.all_transactions_with_priority(Priority::High); drop(mempool); + // read lock on mempool by transaction chunks + let mut transactions = Vec::with_capacity(transaction_ids.len()); + for chunk in &transaction_ids.iter().chunks(TRANSACTION_CHUNK_SIZE) { + let mempool = self.mempool.read(); + transactions.extend(chunk.filter_map(|x| mempool.get_transaction(x, true, false))); + } let mut valid: usize = 0; let mut accepted: usize = 0; @@ -511,14 +548,14 @@ impl MiningManager { // We process the transactions by level of dependency inside the batch. // Doing so allows to remove all chained dependencies of rejected transactions. - let _swo = Stopwatch::<200>::with_threshold("revalidate topological_sort op"); + let _swo = Stopwatch::<800>::with_threshold("revalidate topological_sort op"); let sorted_transactions = transactions.topological_into_iter(); drop(_swo); // read lock on mempool by transaction chunks // As the revalidation process is no longer atomic, we filter the transactions ready for revalidation, // keeping only the ones actually present in the mempool (see comment above). - let _swo = Stopwatch::<50>::with_threshold("revalidate populate_mempool_entries op"); + let _swo = Stopwatch::<900>::with_threshold("revalidate populate_mempool_entries op"); let mut transactions = Vec::with_capacity(sorted_transactions.len()); for chunk in &sorted_transactions.chunks(TRANSACTION_CHUNK_SIZE) { let mempool = self.mempool.read(); diff --git a/mining/src/manager_tests.rs b/mining/src/manager_tests.rs index 31851a4b2f..10780f08a8 100644 --- a/mining/src/manager_tests.rs +++ b/mining/src/manager_tests.rs @@ -750,12 +750,7 @@ mod tests { }); // Test modify block template - sweep_compare_modified_template_to_built( - consensus.as_ref(), - Prefix::Testnet, - mining_manager.block_template_builder(), - transactions, - ); + sweep_compare_modified_template_to_built(consensus.as_ref(), Prefix::Testnet, &mining_manager, transactions); // TODO: extend the test according to the golang scenario } @@ -763,26 +758,75 @@ mod tests { fn sweep_compare_modified_template_to_built( consensus: &dyn ConsensusApi, address_prefix: Prefix, - builder: &BlockTemplateBuilder, + mining_manager: &MiningManager, transactions: Vec, ) { for _ in 0..4 { // Run a few times to get more randomness - compare_modified_template_to_built(consensus, address_prefix, builder, transactions.clone(), OpType::Usual, OpType::Usual); - compare_modified_template_to_built(consensus, address_prefix, builder, transactions.clone(), OpType::Edcsa, OpType::Edcsa); + compare_modified_template_to_built( + consensus, + address_prefix, + mining_manager, + transactions.clone(), + OpType::Usual, + OpType::Usual, + ); + compare_modified_template_to_built( + consensus, + address_prefix, + mining_manager, + transactions.clone(), + OpType::Edcsa, + OpType::Edcsa, + ); } - compare_modified_template_to_built(consensus, address_prefix, builder, transactions.clone(), OpType::True, OpType::Usual); - compare_modified_template_to_built(consensus, address_prefix, builder, transactions.clone(), OpType::Usual, OpType::True); - compare_modified_template_to_built(consensus, address_prefix, builder, transactions.clone(), OpType::Edcsa, OpType::Usual); - compare_modified_template_to_built(consensus, address_prefix, builder, transactions.clone(), OpType::Usual, OpType::Edcsa); - compare_modified_template_to_built(consensus, address_prefix, builder, transactions.clone(), OpType::Empty, OpType::Usual); - compare_modified_template_to_built(consensus, address_prefix, builder, transactions, OpType::Usual, OpType::Empty); + compare_modified_template_to_built( + consensus, + address_prefix, + mining_manager, + transactions.clone(), + OpType::True, + OpType::Usual, + ); + compare_modified_template_to_built( + consensus, + address_prefix, + mining_manager, + transactions.clone(), + OpType::Usual, + OpType::True, + ); + compare_modified_template_to_built( + consensus, + address_prefix, + mining_manager, + transactions.clone(), + OpType::Edcsa, + OpType::Usual, + ); + compare_modified_template_to_built( + consensus, + address_prefix, + mining_manager, + transactions.clone(), + OpType::Usual, + OpType::Edcsa, + ); + compare_modified_template_to_built( + consensus, + address_prefix, + mining_manager, + transactions.clone(), + OpType::Empty, + OpType::Usual, + ); + compare_modified_template_to_built(consensus, address_prefix, mining_manager, transactions, OpType::Usual, OpType::Empty); } fn compare_modified_template_to_built( consensus: &dyn ConsensusApi, address_prefix: Prefix, - builder: &BlockTemplateBuilder, + mining_manager: &MiningManager, transactions: Vec, first_op: OpType, second_op: OpType, @@ -791,12 +835,13 @@ mod tests { let miner_data_2 = generate_new_coinbase(address_prefix, second_op); // Build a fresh template for coinbase2 as a reference - let result = builder.build_block_template(consensus, &miner_data_2, transactions); + let mut builder = mining_manager.block_template_builder(transactions); + let result = builder.build_block_template(consensus, &miner_data_2); assert!(result.is_ok(), "build block template failed for miner data 2"); let expected_template = result.unwrap(); // Modify to miner_data_1 - let result = builder.modify_block_template(consensus, &miner_data_1, &expected_template); + let result = BlockTemplateBuilder::modify_block_template(consensus, &miner_data_1, &expected_template); assert!(result.is_ok(), "modify block template failed for miner data 1"); let mut modified_template = result.unwrap(); // Make sure timestamps are equal before comparing the hash @@ -815,7 +860,7 @@ mod tests { assert_ne!(expected_block.hash(), modified_block.hash(), "built and modified blocks should have different hashes"); // And modify back to miner_data_2 - let result = builder.modify_block_template(consensus, &miner_data_2, &modified_template); + let result = BlockTemplateBuilder::modify_block_template(consensus, &miner_data_2, &modified_template); assert!(result.is_ok(), "modify block template failed for miner data 2"); let mut modified_template_2 = result.unwrap(); // Make sure timestamps are equal before comparing the hash diff --git a/mining/src/mempool/check_transaction_standard.rs b/mining/src/mempool/check_transaction_standard.rs index 8550c34689..929cbfd41a 100644 --- a/mining/src/mempool/check_transaction_standard.rs +++ b/mining/src/mempool/check_transaction_standard.rs @@ -238,6 +238,7 @@ mod tests { script_builder::ScriptBuilder, }; use smallvec::smallvec; + use std::sync::Arc; #[test] fn test_calc_min_required_tx_relay_fee() { @@ -281,7 +282,7 @@ mod tests { let params: Params = net.into(); let mut config = Config::build_default(params.target_time_per_block, false, params.max_block_mass); config.minimum_relay_transaction_fee = test.minimum_relay_transaction_fee; - let mempool = Mempool::new(config); + let mempool = Mempool::new(Arc::new(config)); let got = mempool.minimum_required_transaction_relay_fee(test.size); if got != test.want { @@ -365,7 +366,7 @@ mod tests { let params: Params = net.into(); let mut config = Config::build_default(params.target_time_per_block, false, params.max_block_mass); config.minimum_relay_transaction_fee = test.minimum_relay_transaction_fee; - let mempool = Mempool::new(config); + let mempool = Mempool::new(Arc::new(config)); println!("test_is_transaction_output_dust test '{}' ", test.name); let res = mempool.is_transaction_output_dust(&test.tx_out); @@ -543,7 +544,7 @@ mod tests { for net in NetworkType::iter() { let params: Params = net.into(); let config = Config::build_default(params.target_time_per_block, false, params.max_block_mass); - let mempool = Mempool::new(config); + let mempool = Mempool::new(Arc::new(config)); // Ensure standard-ness is as expected. println!("test_check_transaction_standard_in_isolation test '{}' ", test.name); diff --git a/mining/src/mempool/config.rs b/mining/src/mempool/config.rs index 58b9a18b0a..3399cfebef 100644 --- a/mining/src/mempool/config.rs +++ b/mining/src/mempool/config.rs @@ -2,6 +2,9 @@ use kaspa_consensus_core::constants::TX_VERSION; pub(crate) const DEFAULT_MAXIMUM_TRANSACTION_COUNT: u64 = 1_000_000; pub(crate) const DEFAULT_MAXIMUM_READY_TRANSACTION_COUNT: u64 = 100_000; +pub(crate) const DEFAULT_MAXIMUM_BUILD_BLOCK_TEMPLATE_ATTEMPTS: u64 = 3; +// TODO: revisit this value +pub(crate) const DEFAULT_READY_TRANSACTIONS_REFETCH_LIMIT: usize = 2_500; pub(crate) const DEFAULT_TRANSACTION_EXPIRE_INTERVAL_SECONDS: u64 = 60; pub(crate) const DEFAULT_TRANSACTION_EXPIRE_SCAN_INTERVAL_SECONDS: u64 = 10; @@ -30,6 +33,8 @@ pub(crate) const DEFAULT_MAXIMUM_STANDARD_TRANSACTION_VERSION: u16 = TX_VERSION; pub struct Config { pub maximum_transaction_count: u64, pub maximum_ready_transaction_count: u64, + pub maximum_build_block_template_attempts: u64, + pub ready_transactions_refetch_limit: usize, pub transaction_expire_interval_daa_score: u64, pub transaction_expire_scan_interval_daa_score: u64, pub transaction_expire_scan_interval_milliseconds: u64, @@ -51,7 +56,9 @@ impl Config { #[allow(clippy::too_many_arguments)] pub fn new( maximum_transaction_count: u64, - maximum_candidate_transaction_count: u64, + maximum_ready_transaction_count: u64, + maximum_build_block_template_attempts: u64, + ready_transactions_refetch_limit: usize, transaction_expire_interval_daa_score: u64, transaction_expire_scan_interval_daa_score: u64, transaction_expire_scan_interval_milliseconds: u64, @@ -70,7 +77,9 @@ impl Config { ) -> Self { Self { maximum_transaction_count, - maximum_ready_transaction_count: maximum_candidate_transaction_count, + maximum_ready_transaction_count, + maximum_build_block_template_attempts, + ready_transactions_refetch_limit, transaction_expire_interval_daa_score, transaction_expire_scan_interval_daa_score, transaction_expire_scan_interval_milliseconds, @@ -95,6 +104,8 @@ impl Config { Self { maximum_transaction_count: DEFAULT_MAXIMUM_TRANSACTION_COUNT, maximum_ready_transaction_count: DEFAULT_MAXIMUM_READY_TRANSACTION_COUNT, + maximum_build_block_template_attempts: DEFAULT_MAXIMUM_BUILD_BLOCK_TEMPLATE_ATTEMPTS, + ready_transactions_refetch_limit: DEFAULT_READY_TRANSACTIONS_REFETCH_LIMIT, transaction_expire_interval_daa_score: DEFAULT_TRANSACTION_EXPIRE_INTERVAL_SECONDS * 1000 / target_milliseconds_per_block, transaction_expire_scan_interval_daa_score: DEFAULT_TRANSACTION_EXPIRE_SCAN_INTERVAL_SECONDS * 1000 / target_milliseconds_per_block, diff --git a/mining/src/mempool/mod.rs b/mining/src/mempool/mod.rs index 95dc66d980..bf15ff2872 100644 --- a/mining/src/mempool/mod.rs +++ b/mining/src/mempool/mod.rs @@ -49,8 +49,7 @@ pub(crate) struct Mempool { } impl Mempool { - pub(crate) fn new(config: Config) -> Self { - let config = Arc::new(config); + pub(crate) fn new(config: Arc) -> Self { let transaction_pool = TransactionsPool::new(config.clone()); let orphan_pool = OrphanPool::new(config.clone()); let accepted_transactions = AcceptedTransactions::new(config.clone()); @@ -127,17 +126,13 @@ impl Mempool { } pub(crate) fn block_candidate_transactions(&self) -> Vec { - let _sw = Stopwatch::<120>::with_threshold("block_candidate_transactions op"); + let _sw = Stopwatch::<10>::with_threshold("block_candidate_transactions op"); self.transaction_pool.all_ready_transactions() } - pub(crate) fn all_transactions_with_priority(&self, priority: Priority) -> Vec { - let _sw = Stopwatch::<100>::with_threshold("all_transactions_with_priority op"); - self.transaction_pool.all_transactions_with_priority(priority) - } - - pub(crate) fn has_transactions_with_priority(&self, priority: Priority) -> bool { - self.transaction_pool.has_transactions_with_priority(priority) + pub(crate) fn all_transaction_ids_with_priority(&self, priority: Priority) -> Vec { + let _sw = Stopwatch::<15>::with_threshold("all_transaction_ids_with_priority op"); + self.transaction_pool.all_transaction_ids_with_priority(priority) } pub(crate) fn update_revalidated_transaction(&mut self, transaction: MutableTransaction) -> bool { diff --git a/mining/src/mempool/model/transactions_pool.rs b/mining/src/mempool/model/transactions_pool.rs index 9ef694d9bc..a425339a4b 100644 --- a/mining/src/mempool/model/transactions_pool.rs +++ b/mining/src/mempool/model/transactions_pool.rs @@ -244,12 +244,8 @@ impl TransactionsPool { self.all().values().map(|x| x.mtx.clone()).collect() } - pub(crate) fn all_transactions_with_priority(&self, priority: Priority) -> Vec { - self.all().values().filter_map(|x| if x.priority == priority { Some(x.mtx.clone()) } else { None }).collect() - } - - pub(crate) fn has_transactions_with_priority(&self, priority: Priority) -> bool { - self.all().values().any(|x| x.priority == priority) + pub(crate) fn all_transaction_ids_with_priority(&self, priority: Priority) -> Vec { + self.all().values().filter_map(|x| if x.priority == priority { Some(x.id()) } else { None }).collect() } pub(crate) fn get_outpoint_owner_id(&self, outpoint: &TransactionOutpoint) -> Option<&TransactionId> { From 9c0acba32a2fd1619a02c105b704be34520f9280 Mon Sep 17 00:00:00 2001 From: Tiram <18632023+tiram88@users.noreply.github.com> Date: Sun, 17 Sep 2023 23:26:49 +0300 Subject: [PATCH 46/86] lint --- mining/src/manager.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mining/src/manager.rs b/mining/src/manager.rs index ef5a6da677..067632e648 100644 --- a/mining/src/manager.rs +++ b/mining/src/manager.rs @@ -88,7 +88,7 @@ impl MiningManager { attempts += 1; // TODO: consider a parameter forcing the consensus to build a template with the remaining successfully validated transactions - // + // // let force_build = attempts == self.config.maximum_build_block_template_attempts; // match block_template_builder.build_block_template(consensus, miner_data, force_build) { From e320045adb2041fc3a07d6af63a09008c07bd9b9 Mon Sep 17 00:00:00 2001 From: Tiram <18632023+tiram88@users.noreply.github.com> Date: Mon, 18 Sep 2023 00:41:24 +0300 Subject: [PATCH 47/86] Add mempool lock granularity to get_all_transactions --- mining/src/manager.rs | 23 +++++++++++++++++-- mining/src/mempool/mod.rs | 20 +++++++++------- mining/src/mempool/model/pool.rs | 5 ++++ mining/src/mempool/model/transactions_pool.rs | 4 ---- 4 files changed, 38 insertions(+), 14 deletions(-) diff --git a/mining/src/manager.rs b/mining/src/manager.rs index 067632e648..cf6e1d03e0 100644 --- a/mining/src/manager.rs +++ b/mining/src/manager.rs @@ -429,11 +429,13 @@ impl MiningManager { include_transaction_pool: bool, include_orphan_pool: bool, ) -> Option { + assert!(include_transaction_pool || include_orphan_pool, "at least one of either transactions or orphans must be included"); self.mempool.read().get_transaction(transaction_id, include_transaction_pool, include_orphan_pool) } /// Returns whether the mempool holds this transaction in any form. pub fn has_transaction(&self, transaction_id: &TransactionId, include_transaction_pool: bool, include_orphan_pool: bool) -> bool { + assert!(include_transaction_pool || include_orphan_pool, "at least one of either transactions or orphans must be included"); self.mempool.read().has_transaction(transaction_id, include_transaction_pool, include_orphan_pool) } @@ -442,8 +444,23 @@ impl MiningManager { include_transaction_pool: bool, include_orphan_pool: bool, ) -> (Vec, Vec) { - // TODO: break the monolithic lock - self.mempool.read().get_all_transactions(include_transaction_pool, include_orphan_pool) + const TRANSACTION_CHUNK_SIZE: usize = 1000; + assert!(include_transaction_pool || include_orphan_pool, "at least one of either transactions or orphans must be included"); + // read lock on mempool by transaction chunks + let transactions = if include_transaction_pool { + let transaction_ids = self.mempool.read().get_all_transaction_ids(true, false).0; + let mut transactions = Vec::with_capacity(self.mempool.read().transaction_count(true, false)); + for chunks in transaction_ids.chunks(TRANSACTION_CHUNK_SIZE) { + let mempool = self.mempool.read(); + transactions.extend(chunks.iter().filter_map(|x| mempool.get_transaction(x, true, false))); + } + transactions + } else { + vec![] + }; + // read lock on mempool + let orphans = if include_orphan_pool { self.mempool.read().get_all_transactions(false, true).1 } else { vec![] }; + (transactions, orphans) } /// get_transactions_by_addresses returns the sending and receiving transactions for @@ -456,11 +473,13 @@ impl MiningManager { include_transaction_pool: bool, include_orphan_pool: bool, ) -> GroupedOwnerTransactions { + assert!(include_transaction_pool || include_orphan_pool, "at least one of either transactions or orphans must be included"); // TODO: break the monolithic lock self.mempool.read().get_transactions_by_addresses(script_public_keys, include_transaction_pool, include_orphan_pool) } pub fn transaction_count(&self, include_transaction_pool: bool, include_orphan_pool: bool) -> usize { + assert!(include_transaction_pool || include_orphan_pool, "at least one of either transactions or orphans must be included"); self.mempool.read().transaction_count(include_transaction_pool, include_orphan_pool) } diff --git a/mining/src/mempool/mod.rs b/mining/src/mempool/mod.rs index bf15ff2872..9cf055a28a 100644 --- a/mining/src/mempool/mod.rs +++ b/mining/src/mempool/mod.rs @@ -87,14 +87,18 @@ impl Mempool { include_transaction_pool: bool, include_orphan_pool: bool, ) -> (Vec, Vec) { - let mut transactions = vec![]; - let mut orphans = vec![]; - if include_transaction_pool { - transactions = self.transaction_pool.get_all_transactions() - } - if include_orphan_pool { - orphans = self.orphan_pool.get_all_transactions() - } + let transactions = if include_transaction_pool { self.transaction_pool.get_all_transactions() } else { vec![] }; + let orphans = if include_orphan_pool { self.orphan_pool.get_all_transactions() } else { vec![] }; + (transactions, orphans) + } + + pub(crate) fn get_all_transaction_ids( + &self, + include_transaction_pool: bool, + include_orphan_pool: bool, + ) -> (Vec, Vec) { + let transactions = if include_transaction_pool { self.transaction_pool.get_all_transaction_ids() } else { vec![] }; + let orphans = if include_orphan_pool { self.orphan_pool.get_all_transaction_ids() } else { vec![] }; (transactions, orphans) } diff --git a/mining/src/mempool/model/pool.rs b/mining/src/mempool/model/pool.rs index 1eb6c87a32..6bdfcbdcef 100644 --- a/mining/src/mempool/model/pool.rs +++ b/mining/src/mempool/model/pool.rs @@ -110,6 +110,11 @@ pub(crate) trait Pool { self.all().values().map(|x| x.mtx.clone()).collect() } + /// Returns a vector with ids of all the transactions in the pool. + fn get_all_transaction_ids(&self) -> Vec { + self.all().keys().cloned().collect() + } + /// Fills owner transactions for a set of script public keys. fn fill_owner_set_transactions(&self, script_public_keys: &ScriptPublicKeySet, owner_set: &mut GroupedOwnerTransactions) { script_public_keys.iter().for_each(|script_public_key| { diff --git a/mining/src/mempool/model/transactions_pool.rs b/mining/src/mempool/model/transactions_pool.rs index a425339a4b..d5b4a081af 100644 --- a/mining/src/mempool/model/transactions_pool.rs +++ b/mining/src/mempool/model/transactions_pool.rs @@ -240,10 +240,6 @@ impl TransactionsPool { Ok(transactions_to_remove.iter().map(|x| x.id()).collect()) } - pub(crate) fn get_all_transactions(&self) -> Vec { - self.all().values().map(|x| x.mtx.clone()).collect() - } - pub(crate) fn all_transaction_ids_with_priority(&self, priority: Priority) -> Vec { self.all().values().filter_map(|x| if x.priority == priority { Some(x.id()) } else { None }).collect() } From 84b0476ecfa37f927c35698ab40ec0f37ddc373b Mon Sep 17 00:00:00 2001 From: Tiram <18632023+tiram88@users.noreply.github.com> Date: Mon, 18 Sep 2023 01:33:59 +0300 Subject: [PATCH 48/86] Restore block template cache lifetime & make it customizable in devnet-prealloc feature --- consensus/core/src/config/mod.rs | 5 +++++ kaspad/src/args.rs | 10 ++++++++++ kaspad/src/daemon.rs | 14 ++++++++++++-- mining/src/cache.rs | 3 +-- testing/integration/src/mempool_benchmarks.rs | 1 + 5 files changed, 29 insertions(+), 4 deletions(-) diff --git a/consensus/core/src/config/mod.rs b/consensus/core/src/config/mod.rs index f453a6128f..0dabd9626b 100644 --- a/consensus/core/src/config/mod.rs +++ b/consensus/core/src/config/mod.rs @@ -61,6 +61,9 @@ pub struct Config { #[cfg(feature = "devnet-prealloc")] pub initial_utxo_set: Arc, + + #[cfg(feature = "devnet-prealloc")] + pub block_template_cache_lifetime: Option, } impl Config { @@ -85,6 +88,8 @@ impl Config { #[cfg(feature = "devnet-prealloc")] initial_utxo_set: Default::default(), + #[cfg(feature = "devnet-prealloc")] + block_template_cache_lifetime: None, } } diff --git a/kaspad/src/args.rs b/kaspad/src/args.rs index 44a0477064..83920557c4 100644 --- a/kaspad/src/args.rs +++ b/kaspad/src/args.rs @@ -62,6 +62,8 @@ pub struct Args { pub prealloc_address: Option, #[cfg(feature = "devnet-prealloc")] pub prealloc_amount: u64, + #[cfg(feature = "devnet-prealloc")] + pub block_template_cache_lifetime: Option, } impl Default for Args { @@ -105,6 +107,8 @@ impl Default for Args { prealloc_address: None, #[cfg(feature = "devnet-prealloc")] prealloc_amount: 1_000_000, + #[cfg(feature = "devnet-prealloc")] + block_template_cache_lifetime: None, } } } @@ -123,6 +127,10 @@ impl Args { if let Some(num_prealloc_utxos) = self.num_prealloc_utxos { config.initial_utxo_set = Arc::new(self.generate_prealloc_utxos(num_prealloc_utxos)); } + #[cfg(feature = "devnet-prealloc")] + if self.block_template_cache_lifetime.is_some() { + config.block_template_cache_lifetime = self.block_template_cache_lifetime; + } } #[cfg(feature = "devnet-prealloc")] @@ -363,6 +371,8 @@ pub fn parse_args() -> Args { prealloc_address: m.get_one::("prealloc-address").cloned(), #[cfg(feature = "devnet-prealloc")] prealloc_amount: m.get_one::("prealloc-amount").cloned().unwrap_or(defaults.prealloc_amount), + #[cfg(feature = "devnet-prealloc")] + block_template_cache_lifetime: m.get_one::("block-template-cache-lifetime").cloned(), } } diff --git a/kaspad/src/daemon.rs b/kaspad/src/daemon.rs index bfb0118ea6..4d955326e7 100644 --- a/kaspad/src/daemon.rs +++ b/kaspad/src/daemon.rs @@ -275,8 +275,18 @@ do you confirm? (answer y/n or pass --yes to the Kaspad command line to confirm }; let address_manager = AddressManager::new(config.clone(), meta_db); - let mining_manager = - MiningManagerProxy::new(Arc::new(MiningManager::new(config.target_time_per_block, false, config.max_block_mass, None))); + + let mut block_template_cache_lifetime: Option = None; + #[cfg(feature = "devnet-prealloc")] + if config.block_template_cache_lifetime.is_some() { + block_template_cache_lifetime = config.block_template_cache_lifetime; + } + let mining_manager = MiningManagerProxy::new(Arc::new(MiningManager::new( + config.target_time_per_block, + false, + config.max_block_mass, + block_template_cache_lifetime, + ))); let flow_context = Arc::new(FlowContext::new( consensus_manager.clone(), diff --git a/mining/src/cache.rs b/mining/src/cache.rs index ce8a1ffde5..b6fae4f18e 100644 --- a/mining/src/cache.rs +++ b/mining/src/cache.rs @@ -6,9 +6,8 @@ use std::sync::{ Arc, }; -// TODO: revisit this value /// CACHE_LIFETIME indicates the default duration in milliseconds after which the cached data expires. -const DEFAULT_CACHE_LIFETIME: u64 = 15; +const DEFAULT_CACHE_LIFETIME: u64 = 1_000; pub(crate) struct Inner { /// Time, in milliseconds, at which the cache was last updated diff --git a/testing/integration/src/mempool_benchmarks.rs b/testing/integration/src/mempool_benchmarks.rs index 1ecd79e2e6..7b5cd4a071 100644 --- a/testing/integration/src/mempool_benchmarks.rs +++ b/testing/integration/src/mempool_benchmarks.rs @@ -186,6 +186,7 @@ async fn bench_bbt_latency() { num_prealloc_utxos: Some(TX_LEVEL_WIDTH as u64 * CONTRACT_FACTOR), prealloc_address: Some(prealloc_address.to_string()), prealloc_amount: 500 * SOMPI_PER_KASPA, + block_template_cache_lifetime: Some(5), ..Default::default() }; let network = args.network(); From 8212a05cc31453514b71ef5723fe91479880ca8e Mon Sep 17 00:00:00 2001 From: Tiram <18632023+tiram88@users.noreply.github.com> Date: Mon, 18 Sep 2023 01:44:02 +0300 Subject: [PATCH 49/86] Restore block template cache lifetime & make it customizable in devnet-prealloc feature --- consensus/core/src/config/mod.rs | 5 +++++ kaspad/src/args.rs | 10 ++++++++++ kaspad/src/daemon.rs | 13 +++++++++++-- mining/src/cache.rs | 3 +-- testing/integration/src/mempool_benchmarks.rs | 1 + 5 files changed, 28 insertions(+), 4 deletions(-) diff --git a/consensus/core/src/config/mod.rs b/consensus/core/src/config/mod.rs index f453a6128f..0dabd9626b 100644 --- a/consensus/core/src/config/mod.rs +++ b/consensus/core/src/config/mod.rs @@ -61,6 +61,9 @@ pub struct Config { #[cfg(feature = "devnet-prealloc")] pub initial_utxo_set: Arc, + + #[cfg(feature = "devnet-prealloc")] + pub block_template_cache_lifetime: Option, } impl Config { @@ -85,6 +88,8 @@ impl Config { #[cfg(feature = "devnet-prealloc")] initial_utxo_set: Default::default(), + #[cfg(feature = "devnet-prealloc")] + block_template_cache_lifetime: None, } } diff --git a/kaspad/src/args.rs b/kaspad/src/args.rs index 44a0477064..83920557c4 100644 --- a/kaspad/src/args.rs +++ b/kaspad/src/args.rs @@ -62,6 +62,8 @@ pub struct Args { pub prealloc_address: Option, #[cfg(feature = "devnet-prealloc")] pub prealloc_amount: u64, + #[cfg(feature = "devnet-prealloc")] + pub block_template_cache_lifetime: Option, } impl Default for Args { @@ -105,6 +107,8 @@ impl Default for Args { prealloc_address: None, #[cfg(feature = "devnet-prealloc")] prealloc_amount: 1_000_000, + #[cfg(feature = "devnet-prealloc")] + block_template_cache_lifetime: None, } } } @@ -123,6 +127,10 @@ impl Args { if let Some(num_prealloc_utxos) = self.num_prealloc_utxos { config.initial_utxo_set = Arc::new(self.generate_prealloc_utxos(num_prealloc_utxos)); } + #[cfg(feature = "devnet-prealloc")] + if self.block_template_cache_lifetime.is_some() { + config.block_template_cache_lifetime = self.block_template_cache_lifetime; + } } #[cfg(feature = "devnet-prealloc")] @@ -363,6 +371,8 @@ pub fn parse_args() -> Args { prealloc_address: m.get_one::("prealloc-address").cloned(), #[cfg(feature = "devnet-prealloc")] prealloc_amount: m.get_one::("prealloc-amount").cloned().unwrap_or(defaults.prealloc_amount), + #[cfg(feature = "devnet-prealloc")] + block_template_cache_lifetime: m.get_one::("block-template-cache-lifetime").cloned(), } } diff --git a/kaspad/src/daemon.rs b/kaspad/src/daemon.rs index bfb0118ea6..61a1257c66 100644 --- a/kaspad/src/daemon.rs +++ b/kaspad/src/daemon.rs @@ -275,8 +275,17 @@ do you confirm? (answer y/n or pass --yes to the Kaspad command line to confirm }; let address_manager = AddressManager::new(config.clone(), meta_db); - let mining_manager = - MiningManagerProxy::new(Arc::new(MiningManager::new(config.target_time_per_block, false, config.max_block_mass, None))); + + #[cfg(not(feature = "devnet-prealloc"))] + let cache_lifetime: Option = None; + #[cfg(feature = "devnet-prealloc")] + let cache_lifetime = config.block_template_cache_lifetime; + let mining_manager = MiningManagerProxy::new(Arc::new(MiningManager::new( + config.target_time_per_block, + false, + config.max_block_mass, + cache_lifetime, + ))); let flow_context = Arc::new(FlowContext::new( consensus_manager.clone(), diff --git a/mining/src/cache.rs b/mining/src/cache.rs index ce8a1ffde5..b6fae4f18e 100644 --- a/mining/src/cache.rs +++ b/mining/src/cache.rs @@ -6,9 +6,8 @@ use std::sync::{ Arc, }; -// TODO: revisit this value /// CACHE_LIFETIME indicates the default duration in milliseconds after which the cached data expires. -const DEFAULT_CACHE_LIFETIME: u64 = 15; +const DEFAULT_CACHE_LIFETIME: u64 = 1_000; pub(crate) struct Inner { /// Time, in milliseconds, at which the cache was last updated diff --git a/testing/integration/src/mempool_benchmarks.rs b/testing/integration/src/mempool_benchmarks.rs index 1ecd79e2e6..7b5cd4a071 100644 --- a/testing/integration/src/mempool_benchmarks.rs +++ b/testing/integration/src/mempool_benchmarks.rs @@ -186,6 +186,7 @@ async fn bench_bbt_latency() { num_prealloc_utxos: Some(TX_LEVEL_WIDTH as u64 * CONTRACT_FACTOR), prealloc_address: Some(prealloc_address.to_string()), prealloc_amount: 500 * SOMPI_PER_KASPA, + block_template_cache_lifetime: Some(5), ..Default::default() }; let network = args.network(); From 01c8f9cdc079b476a9c00e8c700fbe45757183cd Mon Sep 17 00:00:00 2001 From: Tiram <18632023+tiram88@users.noreply.github.com> Date: Mon, 18 Sep 2023 21:50:27 +0300 Subject: [PATCH 50/86] Relax a bit the BBT maximum attempts constraint --- core/src/time.rs | 4 ++++ mining/src/manager.rs | 16 +++++++++++----- mining/src/mempool/config.rs | 2 +- 3 files changed, 16 insertions(+), 6 deletions(-) diff --git a/core/src/time.rs b/core/src/time.rs index 8fa388c2e4..03555ec760 100644 --- a/core/src/time.rs +++ b/core/src/time.rs @@ -22,6 +22,10 @@ impl Stopwatch { pub fn with_threshold(name: &'static str) -> Self { Self { name, start: Instant::now() } } + + pub fn elapsed(&self) -> Duration { + self.start.elapsed() + } } impl Drop for Stopwatch { diff --git a/mining/src/manager.rs b/mining/src/manager.rs index cf6e1d03e0..d37fc12bcf 100644 --- a/mining/src/manager.rs +++ b/mining/src/manager.rs @@ -97,19 +97,25 @@ impl MiningManager { let block_template = cache_lock.set_immutable_cached_template(block_template); match attempts { 1 => { - debug!("Built a new block template with {} transactions", block_template.block.transactions.len()); + debug!( + "Built a new block template with {} transactions in {:#?}", + block_template.block.transactions.len(), + _swo.elapsed() + ); } 2 => { debug!( - "Built a new block template with {} transactions at second attempt", - block_template.block.transactions.len() + "Built a new block template with {} transactions at second attempt in {:#?}", + block_template.block.transactions.len(), + _swo.elapsed() ); } n => { debug!( - "Built a new block template with {} transactions in {} attempts", + "Built a new block template with {} transactions in {} attempts totaling {:#?}", block_template.block.transactions.len(), - n + n, + _swo.elapsed() ); } } diff --git a/mining/src/mempool/config.rs b/mining/src/mempool/config.rs index 3399cfebef..2e3f68a2ab 100644 --- a/mining/src/mempool/config.rs +++ b/mining/src/mempool/config.rs @@ -2,7 +2,7 @@ use kaspa_consensus_core::constants::TX_VERSION; pub(crate) const DEFAULT_MAXIMUM_TRANSACTION_COUNT: u64 = 1_000_000; pub(crate) const DEFAULT_MAXIMUM_READY_TRANSACTION_COUNT: u64 = 100_000; -pub(crate) const DEFAULT_MAXIMUM_BUILD_BLOCK_TEMPLATE_ATTEMPTS: u64 = 3; +pub(crate) const DEFAULT_MAXIMUM_BUILD_BLOCK_TEMPLATE_ATTEMPTS: u64 = 5; // TODO: revisit this value pub(crate) const DEFAULT_READY_TRANSACTIONS_REFETCH_LIMIT: usize = 2_500; From d02490461dd4f626e7431e290068e6f84962a4bb Mon Sep 17 00:00:00 2001 From: Tiram <18632023+tiram88@users.noreply.github.com> Date: Tue, 19 Sep 2023 13:29:11 +0300 Subject: [PATCH 51/86] Refactor multiple `contained_by_txs` fns into one generic --- mining/src/manager_tests.rs | 40 +++++++++++++++---------------------- 1 file changed, 16 insertions(+), 24 deletions(-) diff --git a/mining/src/manager_tests.rs b/mining/src/manager_tests.rs index 10780f08a8..dc0ba43360 100644 --- a/mining/src/manager_tests.rs +++ b/mining/src/manager_tests.rs @@ -101,7 +101,7 @@ mod tests { assert!(result.is_ok(), "inserting the child transaction {} into the mempool failed", transaction_not_an_orphan.id()); let (transactions_from_pool, _) = mining_manager.get_all_transactions(true, false); assert!( - contained_by_mtxs(transaction_not_an_orphan.id(), &transactions_from_pool), + contained_by(transaction_not_an_orphan.id(), &transactions_from_pool), "missing transaction {} in the mempool", transaction_not_an_orphan.id() ); @@ -339,13 +339,13 @@ mod tests { assert!(populated_txs.is_empty(), "the mempool should have no populated transaction since only orphans were submitted"); for orphan in orphans.iter() { assert!( - contained_by_txs(orphan.id(), &child_txs), + contained_by(orphan.id(), &child_txs), "orphan transaction {} should exist in the child transactions", orphan.id() ); } for child in child_txs.iter() { - assert!(contained_by_mtxs(child.id(), &orphans), "child transaction {} should exist in the orphan pool", child.id()); + assert!(contained_by(child.id(), &orphans), "child transaction {} should exist in the orphan pool", child.id()); } // Try to build a block template. @@ -357,7 +357,7 @@ mod tests { let template = result.unwrap(); for block_tx in template.block.transactions.iter().skip(1) { assert!( - !contained_by_txs(block_tx.id(), &child_txs), + !contained_by(block_tx.id(), &child_txs), "transaction {} is an orphan and is found in a built block template", block_tx.id() ); @@ -385,23 +385,23 @@ mod tests { ); for populated in populated_txs.iter() { assert!( - contained_by_tx_arcs(populated.id(), &unorphaned_txs), + contained_by(populated.id(), &unorphaned_txs), "mempool transaction {} should exist in the unorphaned transactions", populated.id() ); assert!( - contained_by_txs(populated.id(), &child_txs), + contained_by(populated.id(), &child_txs), "mempool transaction {} should exist in the child transactions", populated.id() ); } for child in child_txs.iter().skip(SKIPPED_TXS) { assert!( - contained_by_tx_arcs(child.id(), &unorphaned_txs), + contained_by(child.id(), &unorphaned_txs), "child transaction {} should exist in the unorphaned transactions", child.id() ); - assert!(contained_by_mtxs(child.id(), &populated_txs), "child transaction {} should exist in the mempool", child.id()); + assert!(contained_by(child.id(), &populated_txs), "child transaction {} should exist in the mempool", child.id()); } assert_eq!( SKIPPED_TXS, orphans.len(), @@ -410,13 +410,13 @@ mod tests { ); for orphan in orphans.iter() { assert!( - contained_by_txs(orphan.id(), &child_txs), + contained_by(orphan.id(), &child_txs), "orphan transaction {} should exist in the child transactions", orphan.id() ); } for child in child_txs.iter().take(SKIPPED_TXS) { - assert!(contained_by_mtxs(child.id(), &orphans), "child transaction {} should exist in the orphan pool", child.id()); + assert!(contained_by(child.id(), &orphans), "child transaction {} should exist in the orphan pool", child.id()); } // Build a new block template with all ready transactions, meaning all child transactions but one. @@ -436,14 +436,14 @@ mod tests { ); for block_tx in template.block.transactions.iter().skip(1) { assert!( - contained_by_txs(block_tx.id(), &child_txs), + contained_by(block_tx.id(), &child_txs), "transaction {} in the built block template does not exist in ready child transactions", block_tx.id() ); } for child in child_txs.iter().skip(SKIPPED_TXS) { assert!( - contained_by_txs(child.id(), &template.block.transactions), + contained_by(child.id(), &template.block.transactions), "child transaction {} in the mempool was ready but is not found in the built block template", child.id() ) @@ -495,14 +495,14 @@ mod tests { ); for parent in parent_txs.iter().take(SKIPPED_TXS) { assert!( - contained_by_mtxs(parent.id(), &populated_txs), + contained_by(parent.id(), &populated_txs), "mempool transaction {} should exist in the remaining parent transactions", parent.id() ); } for child in child_txs.iter().take(SKIPPED_TXS) { assert!( - contained_by_mtxs(child.id(), &populated_txs), + contained_by(child.id(), &populated_txs), "mempool transaction {} should exist in the remaining child transactions", child.id() ); @@ -958,16 +958,8 @@ mod tests { Transaction::new(TX_VERSION, vec![], outputs, 0, SUBNETWORK_ID_NATIVE, 0, vec![]) } - fn contained_by_mtxs(transaction_id: TransactionId, transactions: &[MutableTransaction]) -> bool { - transactions.iter().any(|x| x.id() == transaction_id) - } - - fn contained_by_txs(transaction_id: TransactionId, transactions: &[Transaction]) -> bool { - transactions.iter().any(|x| x.id() == transaction_id) - } - - fn contained_by_tx_arcs(transaction_id: TransactionId, transactions: &[Arc]) -> bool { - transactions.iter().any(|x| x.id() == transaction_id) + fn contained_by>(transaction_id: TransactionId, transactions: &[T]) -> bool { + transactions.iter().any(|x| x.as_ref().id() == transaction_id) } fn into_status(result: MiningManagerResult) -> TxResult<()> { From 1f0c44e09b4671eabbc4233bf86a9311de53ec14 Mon Sep 17 00:00:00 2001 From: Tiram <18632023+tiram88@users.noreply.github.com> Date: Tue, 19 Sep 2023 13:33:02 +0300 Subject: [PATCH 52/86] Test selector transaction rejects & fix empty template returned by `select_transactions` upon selector reuse --- mining/src/block_template/selector.rs | 68 +++++++++++++++++++++++++-- 1 file changed, 63 insertions(+), 5 deletions(-) diff --git a/mining/src/block_template/selector.rs b/mining/src/block_template/selector.rs index 5472e5499c..3348d7f2f9 100644 --- a/mining/src/block_template/selector.rs +++ b/mining/src/block_template/selector.rs @@ -40,6 +40,9 @@ pub(crate) struct TransactionsSelector { /// Indexes of transactions keys in stores rejected_txs: HashSet, + /// Number of transactions marked as rejected + committed_rejects: usize, + /// Indexes of selected transactions in stores selected_txs: Vec, total_mass: u64, @@ -58,6 +61,7 @@ impl TransactionsSelector { transactions, selectable_txs: vec![], rejected_txs: Default::default(), + committed_rejects: 0, selected_txs: vec![], total_mass: 0, total_fees: 0, @@ -71,7 +75,7 @@ impl TransactionsSelector { } pub(crate) fn len(&self) -> usize { - self.transactions.len() - self.rejected_txs.len() + self.transactions.len() - self.rejected_txs.len() - self.committed_rejects } /// select_transactions implements a probabilistic transaction selection algorithm. @@ -203,25 +207,29 @@ impl TransactionsSelector { self.rejected_txs.insert(transaction_id); } - fn commit_rejections(&mut self) { - let _sw = Stopwatch::<5>::with_threshold("commit_rejections op"); + fn commit_rejects(&mut self) { + let _sw = Stopwatch::<5>::with_threshold("commit_rejects op"); if self.rejected_txs.is_empty() { return; } for (index, tx) in self.transactions.iter().enumerate() { if !self.selectable_txs[index].is_rejected && self.rejected_txs.remove(&tx.tx.id()) { self.selectable_txs[index].is_rejected = true; + self.committed_rejects += 1; if self.rejected_txs.is_empty() { break; } } } + assert!(self.rejected_txs.is_empty()); } fn reset(&mut self) { assert_eq!(self.transactions.len(), self.selectable_txs.len()); self.selected_txs = Vec::with_capacity(self.transactions.len()); - self.commit_rejections(); + self.total_fees = 0; + self.total_mass = 0; + self.commit_rejects(); } /// calc_tx_value calculates a value to be used in transaction selection. @@ -243,5 +251,55 @@ impl TransactionsSelector { #[cfg(test)] mod tests { - // TODO: add unit-tests for select_transactions + use super::*; + use itertools::Itertools; + use kaspa_consensus_core::{ + constants::{MAX_TX_IN_SEQUENCE_NUM, SOMPI_PER_KASPA, TX_VERSION}, + mass::transaction_estimated_serialized_size, + subnets::SUBNETWORK_ID_NATIVE, + tx::{Transaction, TransactionId, TransactionInput, TransactionOutpoint, TransactionOutput}, + }; + use kaspa_txscript::{pay_to_script_hash_signature_script, test_helpers::op_true_script}; + use std::sync::Arc; + + use crate::{mempool::config::DEFAULT_MINIMUM_RELAY_TRANSACTION_FEE, model::candidate_tx::CandidateTransaction}; + + #[test] + fn test_reject_transaction() { + const TX_INITIAL_COUNT: usize = 1_000; + const REJECT_COUNT: usize = 10; + + // Create a vector of transactions differing by output value so they have unique ids + let transactions = (0..TX_INITIAL_COUNT).map(|i| create_transaction(SOMPI_PER_KASPA * (i + 1) as u64)).collect_vec(); + let policy = Policy::new(100_000); + let mut selector = TransactionsSelector::new(policy, transactions); + assert_eq!(selector.len(), TX_INITIAL_COUNT, "selector length matches initial transaction vector length"); + + let mut remaining_count = TX_INITIAL_COUNT; + for i in 0..3 { + let selected_txs = selector.select_transactions(); + selected_txs.iter().skip((i + 1) * 100).take(REJECT_COUNT).for_each(|x| selector.reject(x.id())); + remaining_count -= REJECT_COUNT; + assert_eq!(selector.len(), remaining_count, "selector length matches remaining transaction count"); + selector.commit_rejects(); + assert_eq!(selector.len(), remaining_count, "selector length matches remaining transaction count"); + let selected_txs_2 = selector.select_transactions(); + assert_eq!(selector.len(), remaining_count, "selector length matches remaining transaction count"); + assert_eq!(selected_txs.len(), selected_txs_2.len()); + } + } + + fn create_transaction(value: u64) -> CandidateTransaction { + let previous_outpoint = TransactionOutpoint::new(TransactionId::default(), 0); + let (script_public_key, redeem_script) = op_true_script(); + let signature_script = pay_to_script_hash_signature_script(redeem_script, vec![]).expect("the redeem script is canonical"); + + let input = TransactionInput::new(previous_outpoint, signature_script, MAX_TX_IN_SEQUENCE_NUM, 1); + let output = TransactionOutput::new(value - DEFAULT_MINIMUM_RELAY_TRANSACTION_FEE, script_public_key); + let tx = Arc::new(Transaction::new(TX_VERSION, vec![input], vec![output], 0, SUBNETWORK_ID_NATIVE, 0, vec![])); + let calculated_mass = transaction_estimated_serialized_size(&tx); + let calculated_fee = DEFAULT_MINIMUM_RELAY_TRANSACTION_FEE; + + CandidateTransaction { tx, calculated_fee, calculated_mass } + } } From bb56c70ff39f43593f42ca3d10f091cbcd917524 Mon Sep 17 00:00:00 2001 From: Tiram <18632023+tiram88@users.noreply.github.com> Date: Wed, 20 Sep 2023 01:00:48 +0300 Subject: [PATCH 53/86] Log some mempool metrics --- kaspad/src/daemon.rs | 10 +- mining/src/lib.rs | 78 +++++++++++++++ mining/src/manager.rs | 14 ++- mining/src/manager_tests.rs | 31 ++++-- .../src/mempool/check_transaction_standard.rs | 14 ++- .../mempool/handle_new_block_transactions.rs | 9 +- mining/src/mempool/mod.rs | 14 ++- mining/src/monitor.rs | 94 +++++++++++++++++++ 8 files changed, 238 insertions(+), 26 deletions(-) create mode 100644 mining/src/monitor.rs diff --git a/kaspad/src/daemon.rs b/kaspad/src/daemon.rs index 61a1257c66..b279d6f154 100644 --- a/kaspad/src/daemon.rs +++ b/kaspad/src/daemon.rs @@ -18,7 +18,11 @@ use kaspa_consensus::{consensus::factory::Factory as ConsensusFactory, pipeline: use kaspa_consensusmanager::ConsensusManager; use kaspa_core::task::runtime::AsyncRuntime; use kaspa_index_processor::service::IndexService; -use kaspa_mining::manager::{MiningManager, MiningManagerProxy}; +use kaspa_mining::{ + manager::{MiningManager, MiningManagerProxy}, + monitor::MiningMonitor, + MiningCounters, +}; use kaspa_p2p_flows::{flow_context::FlowContext, service::P2pService}; use kaspa_perf_monitor::builder::Builder as PerfMonitorBuilder; @@ -233,6 +237,7 @@ do you confirm? (answer y/n or pass --yes to the Kaspad command line to confirm let (notification_send, notification_recv) = unbounded(); let notification_root = Arc::new(ConsensusNotificationRoot::new(notification_send)); let processing_counters = Arc::new(ProcessingCounters::default()); + let mining_counters = Arc::new(MiningCounters::default()); let wrpc_borsh_counters = Arc::new(WrpcServerCounters::default()); let wrpc_json_counters = Arc::new(WrpcServerCounters::default()); @@ -280,11 +285,13 @@ do you confirm? (answer y/n or pass --yes to the Kaspad command line to confirm let cache_lifetime: Option = None; #[cfg(feature = "devnet-prealloc")] let cache_lifetime = config.block_template_cache_lifetime; + let mining_monitor = Arc::new(MiningMonitor::new(mining_counters.clone(), tick_service.clone())); let mining_manager = MiningManagerProxy::new(Arc::new(MiningManager::new( config.target_time_per_block, false, config.max_block_mass, cache_lifetime, + mining_counters, ))); let flow_context = Arc::new(FlowContext::new( @@ -333,6 +340,7 @@ do you confirm? (answer y/n or pass --yes to the Kaspad command line to confirm async_runtime.register(grpc_service); async_runtime.register(p2p_service); async_runtime.register(consensus_monitor); + async_runtime.register(mining_monitor); async_runtime.register(perf_monitor); let wrpc_service_tasks: usize = 2; // num_cpus::get() / 2; // Register wRPC servers based on command line arguments diff --git a/mining/src/lib.rs b/mining/src/lib.rs index f0f4028642..47b11f4d9d 100644 --- a/mining/src/lib.rs +++ b/mining/src/lib.rs @@ -1,3 +1,7 @@ +use std::sync::atomic::{AtomicU64, Ordering}; + +use mempool::tx::Priority; + mod block_template; pub(crate) mod cache; pub mod errors; @@ -5,6 +9,80 @@ pub mod manager; mod manager_tests; pub mod mempool; pub mod model; +pub mod monitor; #[cfg(test)] pub mod testutils; + +#[derive(Default)] +pub struct MiningCounters { + pub high_priority_tx_counts: AtomicU64, + pub low_priority_tx_counts: AtomicU64, + pub block_tx_counts: AtomicU64, + pub tx_accepted_counts: AtomicU64, + pub input_counts: AtomicU64, + pub output_counts: AtomicU64, +} + +impl MiningCounters { + pub fn snapshot(&self) -> MempoolCountersSnapshot { + MempoolCountersSnapshot { + high_priority_tx_counts: self.high_priority_tx_counts.load(Ordering::Relaxed), + low_priority_tx_counts: self.low_priority_tx_counts.load(Ordering::Relaxed), + block_tx_counts: self.block_tx_counts.load(Ordering::Relaxed), + tx_accepted_counts: self.tx_accepted_counts.load(Ordering::Relaxed), + input_counts: self.input_counts.load(Ordering::Relaxed), + output_counts: self.output_counts.load(Ordering::Relaxed), + } + } + + pub fn increase_tx_counts(&self, value: u64, priority: Priority) { + match priority { + Priority::Low => { + self.low_priority_tx_counts.fetch_add(value, Ordering::SeqCst); + } + Priority::High => { + self.high_priority_tx_counts.fetch_add(value, Ordering::SeqCst); + } + } + } +} + +#[derive(Debug, PartialEq, Eq)] +pub struct MempoolCountersSnapshot { + pub high_priority_tx_counts: u64, + pub low_priority_tx_counts: u64, + pub block_tx_counts: u64, + pub tx_accepted_counts: u64, + pub input_counts: u64, + pub output_counts: u64, +} + +impl MempoolCountersSnapshot { + pub fn in_tx_counts(&self) -> u64 { + self.high_priority_tx_counts + self.low_priority_tx_counts + } + + pub fn collision_ratio(&self) -> f64 { + if self.block_tx_counts > 0 { + (self.block_tx_counts - self.tx_accepted_counts) as f64 / self.block_tx_counts as f64 + } else { + 0f64 + } + } +} + +impl core::ops::Sub for &MempoolCountersSnapshot { + type Output = MempoolCountersSnapshot; + + fn sub(self, rhs: Self) -> Self::Output { + Self::Output { + high_priority_tx_counts: self.high_priority_tx_counts.checked_sub(rhs.high_priority_tx_counts).unwrap_or_default(), + low_priority_tx_counts: self.low_priority_tx_counts.checked_sub(rhs.low_priority_tx_counts).unwrap_or_default(), + block_tx_counts: self.block_tx_counts.checked_sub(rhs.block_tx_counts).unwrap_or_default(), + tx_accepted_counts: self.tx_accepted_counts.checked_sub(rhs.tx_accepted_counts).unwrap_or_default(), + input_counts: self.input_counts.checked_sub(rhs.input_counts).unwrap_or_default(), + output_counts: self.output_counts.checked_sub(rhs.output_counts).unwrap_or_default(), + } + } +} diff --git a/mining/src/manager.rs b/mining/src/manager.rs index d37fc12bcf..00c1f2872c 100644 --- a/mining/src/manager.rs +++ b/mining/src/manager.rs @@ -16,6 +16,7 @@ use crate::{ owner_txs::{GroupedOwnerTransactions, ScriptPublicKeySet}, topological_sort::IntoIterTopologically, }, + MiningCounters, }; use itertools::Itertools; use kaspa_consensus_core::{ @@ -36,6 +37,7 @@ pub struct MiningManager { config: Arc, block_template_cache: BlockTemplateCache, mempool: RwLock, + counters: Arc, } impl MiningManager { @@ -44,16 +46,17 @@ impl MiningManager { relay_non_std_transactions: bool, max_block_mass: u64, cache_lifetime: Option, + counters: Arc, ) -> Self { let config = Config::build_default(target_time_per_block, relay_non_std_transactions, max_block_mass); - Self::with_config(config, cache_lifetime) + Self::with_config(config, cache_lifetime, counters) } - pub(crate) fn with_config(config: Config, cache_lifetime: Option) -> Self { + pub(crate) fn with_config(config: Config, cache_lifetime: Option, counters: Arc) -> Self { let config = Arc::new(config); - let mempool = RwLock::new(Mempool::new(config.clone())); + let mempool = RwLock::new(Mempool::new(config.clone(), counters.clone())); let block_template_cache = BlockTemplateCache::new(cache_lifetime); - Self { config, block_template_cache, mempool } + Self { config, block_template_cache, mempool, counters } } pub fn get_block_template(&self, consensus: &dyn ConsensusApi, miner_data: &MinerData) -> MiningManagerResult { @@ -246,6 +249,7 @@ impl MiningManager { // We include the original accepted transaction as well accepted_transactions.push(accepted_transaction); accepted_transactions.extend(self.validate_and_insert_unorphaned_transactions(consensus, unorphaned_transactions)); + self.counters.increase_tx_counts(1, priority); Ok(accepted_transactions) } else { @@ -297,6 +301,7 @@ impl MiningManager { ) { Ok(Some(accepted_transaction)) => { accepted_transactions.push(accepted_transaction.clone()); + self.counters.increase_tx_counts(1, priority); mempool.get_unorphaned_transactions_after_accepted_transaction(&accepted_transaction) } Ok(None) => vec![], @@ -385,6 +390,7 @@ impl MiningManager { match mempool.post_validate_and_insert_transaction(consensus, validation_result, transaction, priority, orphan) { Ok(Some(accepted_transaction)) => { accepted_transactions.push(accepted_transaction.clone()); + self.counters.increase_tx_counts(1, priority); mempool.get_unorphaned_transactions_after_accepted_transaction(&accepted_transaction) } Ok(None) => { diff --git a/mining/src/manager_tests.rs b/mining/src/manager_tests.rs index dc0ba43360..0ef9e53b47 100644 --- a/mining/src/manager_tests.rs +++ b/mining/src/manager_tests.rs @@ -11,6 +11,7 @@ mod tests { }, model::candidate_tx::CandidateTransaction, testutils::consensus_mock::ConsensusMock, + MiningCounters, }; use kaspa_addresses::{Address, Prefix, Version}; use kaspa_consensus_core::{ @@ -41,7 +42,8 @@ mod tests { fn test_validate_and_insert_transaction() { const TX_COUNT: u32 = 10; let consensus = Arc::new(ConsensusMock::new()); - let mining_manager = MiningManager::new(TARGET_TIME_PER_BLOCK, false, MAX_BLOCK_MASS, None); + let counters = Arc::new(MiningCounters::default()); + let mining_manager = MiningManager::new(TARGET_TIME_PER_BLOCK, false, MAX_BLOCK_MASS, None, counters); let transactions_to_insert = (0..TX_COUNT).map(|i| create_transaction_with_utxo_entry(i, 0)).collect::>(); for transaction in transactions_to_insert.iter() { let result = mining_manager.validate_and_insert_mutable_transaction( @@ -113,7 +115,8 @@ mod tests { #[test] fn test_simulated_error_in_consensus() { let consensus = Arc::new(ConsensusMock::new()); - let mining_manager = MiningManager::new(TARGET_TIME_PER_BLOCK, false, MAX_BLOCK_MASS, None); + let counters = Arc::new(MiningCounters::default()); + let mining_manager = MiningManager::new(TARGET_TIME_PER_BLOCK, false, MAX_BLOCK_MASS, None, counters); // Build an invalid transaction with some gas and inform the consensus mock about the result it should return // when the mempool will submit this transaction for validation. @@ -143,7 +146,8 @@ mod tests { #[test] fn test_insert_double_transactions_to_mempool() { let consensus = Arc::new(ConsensusMock::new()); - let mining_manager = MiningManager::new(TARGET_TIME_PER_BLOCK, false, MAX_BLOCK_MASS, None); + let counters = Arc::new(MiningCounters::default()); + let mining_manager = MiningManager::new(TARGET_TIME_PER_BLOCK, false, MAX_BLOCK_MASS, None, counters); let transaction = create_transaction_with_utxo_entry(0, 0); @@ -185,7 +189,8 @@ mod tests { #[test] fn test_double_spend_in_mempool() { let consensus = Arc::new(ConsensusMock::new()); - let mining_manager = MiningManager::new(TARGET_TIME_PER_BLOCK, false, MAX_BLOCK_MASS, None); + let counters = Arc::new(MiningCounters::default()); + let mining_manager = MiningManager::new(TARGET_TIME_PER_BLOCK, false, MAX_BLOCK_MASS, None, counters); let transaction = create_child_and_parent_txs_and_add_parent_to_consensus(&consensus); assert!( @@ -233,7 +238,8 @@ mod tests { #[test] fn test_handle_new_block_transactions() { let consensus = Arc::new(ConsensusMock::new()); - let mining_manager = MiningManager::new(TARGET_TIME_PER_BLOCK, false, MAX_BLOCK_MASS, None); + let counters = Arc::new(MiningCounters::default()); + let mining_manager = MiningManager::new(TARGET_TIME_PER_BLOCK, false, MAX_BLOCK_MASS, None, counters); const TX_COUNT: u32 = 10; let transactions_to_insert = (0..TX_COUNT).map(|i| create_transaction_with_utxo_entry(i, 0)).collect::>(); @@ -292,7 +298,8 @@ mod tests { // will be removed from the mempool. fn test_double_spend_with_block() { let consensus = Arc::new(ConsensusMock::new()); - let mining_manager = MiningManager::new(TARGET_TIME_PER_BLOCK, false, MAX_BLOCK_MASS, None); + let counters = Arc::new(MiningCounters::default()); + let mining_manager = MiningManager::new(TARGET_TIME_PER_BLOCK, false, MAX_BLOCK_MASS, None, counters); let transaction_in_the_mempool = create_transaction_with_utxo_entry(0, 0); let result = mining_manager.validate_and_insert_transaction( @@ -322,7 +329,8 @@ mod tests { #[test] fn test_orphan_transactions() { let consensus = Arc::new(ConsensusMock::new()); - let mining_manager = MiningManager::new(TARGET_TIME_PER_BLOCK, false, MAX_BLOCK_MASS, None); + let counters = Arc::new(MiningCounters::default()); + let mining_manager = MiningManager::new(TARGET_TIME_PER_BLOCK, false, MAX_BLOCK_MASS, None, counters); // Before each parent transaction we add a transaction that funds it and insert the funding transaction in the consensus. const TX_PAIRS_COUNT: usize = 5; @@ -575,7 +583,8 @@ mod tests { let mut config = Config::build_default(TARGET_TIME_PER_BLOCK, false, MAX_BLOCK_MASS); // Limit the orphan pool to 2 transactions config.maximum_orphan_transaction_count = 2; - let mining_manager = MiningManager::with_config(config.clone(), None); + let counters = Arc::new(MiningCounters::default()); + let mining_manager = MiningManager::with_config(config.clone(), None, counters); // Create pairs of transaction parent-and-child pairs according to the test vector let (parent_txs, child_txs) = create_arrays_of_parent_and_children_transactions(&consensus, tests.len()); @@ -655,7 +664,8 @@ mod tests { #[test] fn test_revalidate_high_priority_transactions() { let consensus = Arc::new(ConsensusMock::new()); - let mining_manager = MiningManager::new(TARGET_TIME_PER_BLOCK, false, MAX_BLOCK_MASS, None); + let counters = Arc::new(MiningCounters::default()); + let mining_manager = MiningManager::new(TARGET_TIME_PER_BLOCK, false, MAX_BLOCK_MASS, None, counters); // Create two valid transactions that double-spend each other (child_tx_1, child_tx_2) let (parent_tx, child_tx_1) = create_parent_and_children_transactions(&consensus, vec![3000 * SOMPI_PER_KASPA]); @@ -718,7 +728,8 @@ mod tests { #[test] fn test_modify_block_template() { let consensus = Arc::new(ConsensusMock::new()); - let mining_manager = MiningManager::new(TARGET_TIME_PER_BLOCK, false, MAX_BLOCK_MASS, None); + let counters = Arc::new(MiningCounters::default()); + let mining_manager = MiningManager::new(TARGET_TIME_PER_BLOCK, false, MAX_BLOCK_MASS, None, counters); // Before each parent transaction we add a transaction that funds it and insert the funding transaction in the consensus. const TX_PAIRS_COUNT: usize = 12; diff --git a/mining/src/mempool/check_transaction_standard.rs b/mining/src/mempool/check_transaction_standard.rs index 929cbfd41a..6fdee4d316 100644 --- a/mining/src/mempool/check_transaction_standard.rs +++ b/mining/src/mempool/check_transaction_standard.rs @@ -224,7 +224,10 @@ impl Mempool { #[cfg(test)] mod tests { use super::*; - use crate::mempool::config::{Config, DEFAULT_MINIMUM_RELAY_TRANSACTION_FEE}; + use crate::{ + mempool::config::{Config, DEFAULT_MINIMUM_RELAY_TRANSACTION_FEE}, + MiningCounters, + }; use kaspa_addresses::{Address, Prefix, Version}; use kaspa_consensus_core::{ config::params::Params, @@ -282,7 +285,8 @@ mod tests { let params: Params = net.into(); let mut config = Config::build_default(params.target_time_per_block, false, params.max_block_mass); config.minimum_relay_transaction_fee = test.minimum_relay_transaction_fee; - let mempool = Mempool::new(Arc::new(config)); + let counters = Arc::new(MiningCounters::default()); + let mempool = Mempool::new(Arc::new(config), counters); let got = mempool.minimum_required_transaction_relay_fee(test.size); if got != test.want { @@ -366,7 +370,8 @@ mod tests { let params: Params = net.into(); let mut config = Config::build_default(params.target_time_per_block, false, params.max_block_mass); config.minimum_relay_transaction_fee = test.minimum_relay_transaction_fee; - let mempool = Mempool::new(Arc::new(config)); + let counters = Arc::new(MiningCounters::default()); + let mempool = Mempool::new(Arc::new(config), counters); println!("test_is_transaction_output_dust test '{}' ", test.name); let res = mempool.is_transaction_output_dust(&test.tx_out); @@ -544,7 +549,8 @@ mod tests { for net in NetworkType::iter() { let params: Params = net.into(); let config = Config::build_default(params.target_time_per_block, false, params.max_block_mass); - let mempool = Mempool::new(Arc::new(config)); + let counters = Arc::new(MiningCounters::default()); + let mempool = Mempool::new(Arc::new(config), counters); // Ensure standard-ness is as expected. println!("test_check_transaction_standard_in_isolation test '{}' ", test.name); diff --git a/mining/src/mempool/handle_new_block_transactions.rs b/mining/src/mempool/handle_new_block_transactions.rs index 42d4702e79..b6eb233275 100644 --- a/mining/src/mempool/handle_new_block_transactions.rs +++ b/mining/src/mempool/handle_new_block_transactions.rs @@ -11,7 +11,7 @@ use kaspa_consensus_core::{ tx::{Transaction, TransactionId}, }; use kaspa_core::time::Stopwatch; -use std::collections::HashSet; +use std::{collections::HashSet, sync::atomic::Ordering}; impl Mempool { pub(crate) fn handle_new_block_transactions( @@ -32,7 +32,12 @@ impl Mempool { } self.remove_double_spends(transaction)?; self.orphan_pool.remove_orphan(&transaction_id, false, TxRemovalReason::Accepted, "")?; - self.accepted_transactions.add(transaction_id, block_daa_score); + self.counters.block_tx_counts.fetch_add(1, Ordering::SeqCst); + if self.accepted_transactions.add(transaction_id, block_daa_score) { + self.counters.tx_accepted_counts.fetch_add(1, Ordering::SeqCst); + self.counters.input_counts.fetch_add(transaction.inputs.len() as u64, Ordering::SeqCst); + self.counters.output_counts.fetch_add(transaction.outputs.len() as u64, Ordering::SeqCst); + } unorphaned_transactions.extend(self.get_unorphaned_transactions_after_accepted_transaction(transaction)); } Ok(unorphaned_transactions) diff --git a/mining/src/mempool/mod.rs b/mining/src/mempool/mod.rs index 9cf055a28a..87bc0c01ba 100644 --- a/mining/src/mempool/mod.rs +++ b/mining/src/mempool/mod.rs @@ -1,6 +1,9 @@ -use crate::model::{ - candidate_tx::CandidateTransaction, - owner_txs::{GroupedOwnerTransactions, ScriptPublicKeySet}, +use crate::{ + model::{ + candidate_tx::CandidateTransaction, + owner_txs::{GroupedOwnerTransactions, ScriptPublicKeySet}, + }, + MiningCounters, }; use self::{ @@ -46,14 +49,15 @@ pub(crate) struct Mempool { orphan_pool: OrphanPool, accepted_transactions: AcceptedTransactions, last_stat_report_time: u64, + counters: Arc, } impl Mempool { - pub(crate) fn new(config: Arc) -> Self { + pub(crate) fn new(config: Arc, counters: Arc) -> Self { let transaction_pool = TransactionsPool::new(config.clone()); let orphan_pool = OrphanPool::new(config.clone()); let accepted_transactions = AcceptedTransactions::new(config.clone()); - Self { config, transaction_pool, orphan_pool, accepted_transactions, last_stat_report_time: unix_now() } + Self { config, transaction_pool, orphan_pool, accepted_transactions, last_stat_report_time: unix_now(), counters } } pub(crate) fn get_transaction( diff --git a/mining/src/monitor.rs b/mining/src/monitor.rs new file mode 100644 index 0000000000..1c90a21956 --- /dev/null +++ b/mining/src/monitor.rs @@ -0,0 +1,94 @@ +use super::MiningCounters; +use kaspa_core::{ + info, + task::{ + service::{AsyncService, AsyncServiceFuture}, + tick::{TickReason, TickService}, + }, + trace, +}; +use std::{ + sync::Arc, + time::{Duration, Instant}, +}; + +const MONITOR: &str = "mempool-monitor"; + +pub struct MiningMonitor { + // Counters + counters: Arc, + + // Tick service + tick_service: Arc, +} + +impl MiningMonitor { + pub fn new(counters: Arc, tick_service: Arc) -> MiningMonitor { + MiningMonitor { counters, tick_service } + } + + pub async fn worker(self: &Arc) { + let mut last_snapshot = self.counters.snapshot(); + let mut last_log_time = Instant::now(); + let snapshot_interval = 10; + loop { + if let TickReason::Shutdown = self.tick_service.tick(Duration::from_secs(snapshot_interval)).await { + // Let the system print final logs before exiting + tokio::time::sleep(Duration::from_millis(500)).await; + break; + } + + let snapshot = self.counters.snapshot(); + if snapshot == last_snapshot { + // No update, avoid printing useless info + last_log_time = Instant::now(); + continue; + } + + // Subtract the snapshots + let delta = &snapshot - &last_snapshot; + let now = Instant::now(); + let elapsed = (now - last_log_time).as_secs_f64(); + + info!("Processed {} unique transactions in the last {:.2}s ({:.2} avg txs/s, in: {} via RPC, {} via P2P, out: {} via accepted blocks, {:.2}% collisions)", + delta.tx_accepted_counts, + elapsed, + delta.tx_accepted_counts as f64 / elapsed, + delta.high_priority_tx_counts, + delta.low_priority_tx_counts, + delta.block_tx_counts, + delta.collision_ratio() * 100.0, + ); + + last_snapshot = snapshot; + last_log_time = now; + } + + trace!("mempool monitor thread exiting"); + } +} + +// service trait implementation for Monitor +impl AsyncService for MiningMonitor { + fn ident(self: Arc) -> &'static str { + MONITOR + } + + fn start(self: Arc) -> AsyncServiceFuture { + Box::pin(async move { + self.worker().await; + Ok(()) + }) + } + + fn signal_exit(self: Arc) { + trace!("sending an exit signal to {}", MONITOR); + } + + fn stop(self: Arc) -> AsyncServiceFuture { + Box::pin(async move { + trace!("{} stopped", MONITOR); + Ok(()) + }) + } +} From 14da8a286dddfbde5447d524e315cc38ce626181 Mon Sep 17 00:00:00 2001 From: Tiram <18632023+tiram88@users.noreply.github.com> Date: Thu, 21 Sep 2023 00:17:54 +0300 Subject: [PATCH 54/86] Handle new block and then new block template --- protocol/flows/src/v5/blockrelay/flow.rs | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/protocol/flows/src/v5/blockrelay/flow.rs b/protocol/flows/src/v5/blockrelay/flow.rs index ae92260a93..02551c2242 100644 --- a/protocol/flows/src/v5/blockrelay/flow.rs +++ b/protocol/flows/src/v5/blockrelay/flow.rs @@ -154,10 +154,8 @@ impl HandleRelayInvsFlow { } self.ctx.log_block_acceptance(inv.hash, BlockSource::Relay); - // TODO: investigate if reversing the following 2 calls may lower the tx missing outpoints error rate - // in Manager::get_block_template() - self.ctx.on_new_block_template().await?; self.ctx.on_new_block(&session, block).await?; + self.ctx.on_new_block_template().await?; // Broadcast all *new* virtual parents. As a policy, we avoid directly relaying the new block since // we wish to relay only blocks who entered past(virtual). From 92bb8dd039b058906a69b6c89dc393eb82a678b4 Mon Sep 17 00:00:00 2001 From: Michael Sutton Date: Thu, 21 Sep 2023 10:22:10 +0300 Subject: [PATCH 55/86] turn tx selector into an ongoing process with persistent state (wip: some tests are broken; selector is not used correctly by builder) --- mining/src/block_template/builder.rs | 4 +- mining/src/block_template/model/tx.rs | 17 ++-- mining/src/block_template/selector.rs | 137 +++++++++++++------------- 3 files changed, 79 insertions(+), 79 deletions(-) diff --git a/mining/src/block_template/builder.rs b/mining/src/block_template/builder.rs index 5348c653a8..3f83919ed6 100644 --- a/mining/src/block_template/builder.rs +++ b/mining/src/block_template/builder.rs @@ -1,4 +1,4 @@ -use super::{errors::BuilderResult, policy::Policy}; +use super::{errors::BuilderResult, policy::Policy, selector::TxSelector}; use crate::{block_template::selector::TransactionsSelector, model::candidate_tx::CandidateTransaction}; use kaspa_consensus_core::{ api::ConsensusApi, @@ -105,7 +105,7 @@ impl BlockTemplateBuilder { } pub(crate) fn reject_transaction(&mut self, transaction_id: TransactionId) { - self.selector.reject(transaction_id); + self.selector.reject_selection(transaction_id); } pub(crate) fn candidates_len(&self) -> usize { diff --git a/mining/src/block_template/model/tx.rs b/mining/src/block_template/model/tx.rs index dee461f633..b0c7e3f56e 100644 --- a/mining/src/block_template/model/tx.rs +++ b/mining/src/block_template/model/tx.rs @@ -1,14 +1,11 @@ pub(crate) struct SelectableTransaction { pub(crate) gas_limit: u64, pub(crate) p: f64, - - /// Has this candidate been rejected by the consensus? - pub(crate) is_rejected: bool, } impl SelectableTransaction { pub(crate) fn new(tx_value: f64, gas_limit: u64, alpha: i32) -> Self { - Self { gas_limit, p: tx_value.powi(alpha), is_rejected: false } + Self { gas_limit, p: tx_value.powi(alpha) } } } @@ -22,6 +19,7 @@ pub(crate) struct Candidate { /// Range start in the candidate list total_p space pub(crate) start: f64, + /// Range end in the candidate list total_p space pub(crate) end: f64, @@ -35,6 +33,7 @@ impl Candidate { } } +#[derive(Default)] pub(crate) struct CandidateList { pub(crate) candidates: Vec, pub(crate) total_p: f64, @@ -45,12 +44,10 @@ impl CandidateList { let mut candidates = Vec::with_capacity(selectable_txs.len()); let mut total_p = 0.0; selectable_txs.iter().enumerate().for_each(|(i, tx)| { - if !tx.is_rejected { - let current_p = tx.p; - let candidate = Candidate::new(i, total_p, total_p + current_p); - candidates.push(candidate); - total_p += current_p; - } + let current_p = tx.p; + let candidate = Candidate::new(i, total_p, total_p + current_p); + candidates.push(candidate); + total_p += current_p; }); Self { candidates, total_p } } diff --git a/mining/src/block_template/selector.rs b/mining/src/block_template/selector.rs index 3348d7f2f9..e55642a6a9 100644 --- a/mining/src/block_template/selector.rs +++ b/mining/src/block_template/selector.rs @@ -1,9 +1,6 @@ use kaspa_core::{time::Stopwatch, trace}; use rand::Rng; -use std::{ - collections::{HashMap, HashSet}, - vec, -}; +use std::collections::HashMap; use crate::model::candidate_tx::CandidateTransaction; @@ -30,6 +27,11 @@ const ALPHA: i32 = 3; /// if REBALANCE_THRESHOLD is 0.95, there's a 1-in-20 chance of collision. const REBALANCE_THRESHOLD: f64 = 0.95; +pub trait TxSelector { + fn select_transactions(&mut self) -> Vec; + fn reject_selection(&mut self, invalid_tx_id: TransactionId); +} + pub(crate) struct TransactionsSelector { policy: Policy, /// Transaction store @@ -37,16 +39,20 @@ pub(crate) struct TransactionsSelector { /// Selectable transactions store selectable_txs: SelectableTransactions, - /// Indexes of transactions keys in stores - rejected_txs: HashSet, - - /// Number of transactions marked as rejected - committed_rejects: usize, - /// Indexes of selected transactions in stores selected_txs: Vec, + + /// Optional state for handling selection rejections. Maps from a selected tx id + /// to the index of the tx in the `transactions` vec + selected_txs_map: Option>, + + // Inner state of the selection process + candidate_list: CandidateList, + used_count: usize, + used_p: f64, total_mass: u64, total_fees: u64, + gas_usage_map: HashMap, } impl TransactionsSelector { @@ -59,23 +65,28 @@ impl TransactionsSelector { let mut selector = Self { policy, transactions, - selectable_txs: vec![], - rejected_txs: Default::default(), - committed_rejects: 0, - selected_txs: vec![], + selectable_txs: Default::default(), + selected_txs: Default::default(), + selected_txs_map: None, + candidate_list: Default::default(), + used_count: 0, + used_p: 0.0, total_mass: 0, total_fees: 0, + gas_usage_map: Default::default(), }; // Create the selectable transactions selector.selectable_txs = selector.transactions.iter().map(|x| SelectableTransaction::new(selector.calc_tx_value(x), 0, ALPHA)).collect(); + // Prepare the initial candidate list + selector.candidate_list = CandidateList::new(&selector.selectable_txs); selector } pub(crate) fn len(&self) -> usize { - self.transactions.len() - self.rejected_txs.len() - self.committed_rejects + self.transactions.len() } /// select_transactions implements a probabilistic transaction selection algorithm. @@ -102,29 +113,25 @@ impl TransactionsSelector { let _sw = Stopwatch::<15>::with_threshold("select_transaction op"); let mut rng = rand::thread_rng(); - self.reset(); - let mut candidate_list = CandidateList::new(&self.selectable_txs); - let mut used_count = 0; - let mut used_p = 0.0; - let mut gas_usage_map: HashMap = HashMap::new(); + self.reset_selection(); - while candidate_list.candidates.len() - used_count > 0 { + while self.candidate_list.candidates.len() - self.used_count > 0 { // Rebalance the candidates if it's required - if used_p >= REBALANCE_THRESHOLD * candidate_list.total_p { - candidate_list = candidate_list.rebalanced(&self.selectable_txs); - used_count = 0; - used_p = 0.0; + if self.used_p >= REBALANCE_THRESHOLD * self.candidate_list.total_p { + self.candidate_list = self.candidate_list.rebalanced(&self.selectable_txs); + self.used_count = 0; + self.used_p = 0.0; // Break if we now ran out of transactions - if candidate_list.is_empty() { + if self.candidate_list.is_empty() { break; } } // Select a candidate tx at random - let r = rng.gen::() * candidate_list.total_p; - let selected_candidate_idx = candidate_list.find(r); - let selected_candidate = candidate_list.candidates.get_mut(selected_candidate_idx).unwrap(); + let r = rng.gen::() * self.candidate_list.total_p; + let selected_candidate_idx = self.candidate_list.find(r); + let selected_candidate = self.candidate_list.candidates.get_mut(selected_candidate_idx).unwrap(); // If is_marked_for_deletion is set, it means we got a collision. // Ignore and select another Tx. @@ -145,7 +152,7 @@ impl TransactionsSelector { // Also check for overflow. if !selected_tx.tx.subnetwork_id.is_builtin_or_native() { let subnetwork_id = selected_tx.tx.subnetwork_id.clone(); - let gas_usage = gas_usage_map.entry(subnetwork_id.clone()).or_insert(0); + let gas_usage = self.gas_usage_map.entry(subnetwork_id.clone()).or_insert(0); let tx_gas = selected_tx.tx.gas; let next_gas_usage = (*gas_usage).checked_add(tx_gas); if next_gas_usage.is_none() || next_gas_usage.unwrap() > self.selectable_txs[selected_candidate.index].gas_limit { @@ -154,19 +161,19 @@ impl TransactionsSelector { selected_tx.tx.id(), subnetwork_id ); - for i in selected_candidate_idx..candidate_list.candidates.len() { - let transaction_index = candidate_list.candidates[i].index; - // candidateTxs are ordered by subnetwork, so we can safely assume - // that transactions after subnetworkID will not be relevant. + for i in selected_candidate_idx..self.candidate_list.candidates.len() { + let transaction_index = self.candidate_list.candidates[i].index; + // Candidate txs are ordered by subnetwork, so we can safely assume + // that transactions after subnetwork_id will not be relevant. if subnetwork_id < self.transactions[transaction_index].tx.subnetwork_id { break; } - let current = candidate_list.candidates.get_mut(i).unwrap(); + let current = self.candidate_list.candidates.get_mut(i).unwrap(); // Mark for deletion current.is_marked_for_deletion = true; - used_count += 1; - used_p += self.selectable_txs[transaction_index].p; + self.used_count += 1; + self.used_p += self.selectable_txs[transaction_index].p; } continue; } @@ -182,15 +189,15 @@ impl TransactionsSelector { self.total_fees += selected_tx.calculated_fee; trace!( - "Adding tx {0} (feePerMegaGram {1})", + "Adding tx {0} (fee per megagram: {1})", selected_tx.tx.id(), selected_tx.calculated_fee * 1_000_000 / selected_tx.calculated_mass ); // Mark for deletion selected_candidate.is_marked_for_deletion = true; - used_count += 1; - used_p += self.selectable_txs[selected_candidate.index].p; + self.used_count += 1; + self.used_p += self.selectable_txs[selected_candidate.index].p; } self.selected_txs.sort(); @@ -203,33 +210,11 @@ impl TransactionsSelector { self.selected_txs.iter().map(|x| self.transactions[*x].tx.as_ref().clone()).collect() } - pub(crate) fn reject(&mut self, transaction_id: TransactionId) { - self.rejected_txs.insert(transaction_id); - } - - fn commit_rejects(&mut self) { - let _sw = Stopwatch::<5>::with_threshold("commit_rejects op"); - if self.rejected_txs.is_empty() { - return; - } - for (index, tx) in self.transactions.iter().enumerate() { - if !self.selectable_txs[index].is_rejected && self.rejected_txs.remove(&tx.tx.id()) { - self.selectable_txs[index].is_rejected = true; - self.committed_rejects += 1; - if self.rejected_txs.is_empty() { - break; - } - } - } - assert!(self.rejected_txs.is_empty()); - } - - fn reset(&mut self) { + fn reset_selection(&mut self) { assert_eq!(self.transactions.len(), self.selectable_txs.len()); + // TODO: consider to min with the approximated amount of txs which fit into max block mass self.selected_txs = Vec::with_capacity(self.transactions.len()); - self.total_fees = 0; - self.total_mass = 0; - self.commit_rejects(); + self.selected_txs_map = None; } /// calc_tx_value calculates a value to be used in transaction selection. @@ -249,6 +234,25 @@ impl TransactionsSelector { } } +impl TxSelector for TransactionsSelector { + fn select_transactions(&mut self) -> Vec { + self.select_transactions() + } + + fn reject_selection(&mut self, invalid_tx_id: TransactionId) { + let selected_txs_map = self + .selected_txs_map + .get_or_insert_with(|| self.selected_txs.iter().map(|&x| (self.transactions[x].tx.id(), x)).collect()); + let tx_index = *selected_txs_map.get(&invalid_tx_id).expect("only previously selected txs can be rejected"); + let tx = &self.transactions[tx_index]; + self.total_mass -= tx.calculated_mass; + self.total_fees -= tx.calculated_fee; + if !tx.tx.subnetwork_id.is_builtin_or_native() { + *self.gas_usage_map.get_mut(&tx.tx.subnetwork_id).expect("previously selected txs have an entry") -= tx.tx.gas; + } + } +} + #[cfg(test)] mod tests { use super::*; @@ -278,10 +282,9 @@ mod tests { let mut remaining_count = TX_INITIAL_COUNT; for i in 0..3 { let selected_txs = selector.select_transactions(); - selected_txs.iter().skip((i + 1) * 100).take(REJECT_COUNT).for_each(|x| selector.reject(x.id())); + selected_txs.iter().skip((i + 1) * 100).take(REJECT_COUNT).for_each(|x| selector.reject_selection(x.id())); remaining_count -= REJECT_COUNT; assert_eq!(selector.len(), remaining_count, "selector length matches remaining transaction count"); - selector.commit_rejects(); assert_eq!(selector.len(), remaining_count, "selector length matches remaining transaction count"); let selected_txs_2 = selector.select_transactions(); assert_eq!(selector.len(), remaining_count, "selector length matches remaining transaction count"); From 0e54b664e1ad66ff85ef46af213e4252be567ad0 Mon Sep 17 00:00:00 2001 From: Michael Sutton Date: Thu, 21 Sep 2023 11:51:28 +0300 Subject: [PATCH 56/86] use tx selector for BBT (wip: virtual processor retry logic) --- consensus/core/src/api/mod.rs | 8 ++- consensus/core/src/block.rs | 11 +++- consensus/src/consensus/mod.rs | 10 ++-- .../pipeline/virtual_processor/processor.rs | 11 +++- .../src/pipeline/virtual_processor/tests.rs | 29 +++++++++-- mining/src/block_template/builder.rs | 37 ++++---------- mining/src/block_template/selector.rs | 50 +++++++++---------- mining/src/manager.rs | 31 ++---------- mining/src/manager_tests.rs | 4 +- mining/src/testutils/consensus_mock.rs | 9 +++- simpa/src/simulator/miner.rs | 24 ++++++++- 11 files changed, 128 insertions(+), 96 deletions(-) diff --git a/consensus/core/src/api/mod.rs b/consensus/core/src/api/mod.rs index 316bfd3f5a..c11ea8317d 100644 --- a/consensus/core/src/api/mod.rs +++ b/consensus/core/src/api/mod.rs @@ -4,7 +4,7 @@ use std::sync::Arc; use crate::{ acceptance_data::AcceptanceData, - block::{Block, BlockTemplate}, + block::{Block, BlockTemplate, TemplateTransactionSelector}, block_count::BlockCount, blockstatus::BlockStatus, coinbase::MinerData, @@ -27,7 +27,11 @@ pub type BlockValidationFuture = BoxFuture<'static, BlockProcessResult) -> Result { + fn build_block_template( + &self, + miner_data: MinerData, + tx_selector: Box, + ) -> Result { unimplemented!() } diff --git a/consensus/core/src/block.rs b/consensus/core/src/block.rs index acbdfcb883..21c0b531c7 100644 --- a/consensus/core/src/block.rs +++ b/consensus/core/src/block.rs @@ -1,6 +1,10 @@ use std::sync::Arc; -use crate::{coinbase::MinerData, header::Header, tx::Transaction}; +use crate::{ + coinbase::MinerData, + header::Header, + tx::{Transaction, TransactionId}, +}; use kaspa_hashes::Hash; /// A mutable block structure where header and transactions within can still be mutated. @@ -64,6 +68,11 @@ impl Block { } } +pub trait TemplateTransactionSelector { + fn select_transactions(&mut self) -> Vec; + fn reject_selection(&mut self, tx_id: TransactionId); +} + /// A block template for miners. #[derive(Debug, Clone)] pub struct BlockTemplate { diff --git a/consensus/src/consensus/mod.rs b/consensus/src/consensus/mod.rs index 673a7823bb..0071ea07f6 100644 --- a/consensus/src/consensus/mod.rs +++ b/consensus/src/consensus/mod.rs @@ -41,7 +41,7 @@ use crate::{ use kaspa_consensus_core::{ acceptance_data::AcceptanceData, api::{BlockValidationFuture, ConsensusApi}, - block::{Block, BlockTemplate}, + block::{Block, BlockTemplate, TemplateTransactionSelector}, block_count::BlockCount, blockhash::BlockHashExtensions, blockstatus::BlockStatus, @@ -353,8 +353,12 @@ impl Consensus { } impl ConsensusApi for Consensus { - fn build_block_template(&self, miner_data: MinerData, txs: Vec) -> Result { - self.virtual_processor.build_block_template(miner_data, txs) + fn build_block_template( + &self, + miner_data: MinerData, + tx_selector: Box, + ) -> Result { + self.virtual_processor.build_block_template(miner_data, tx_selector) } fn validate_and_insert_block(&self, block: Block) -> BlockValidationFuture { diff --git a/consensus/src/pipeline/virtual_processor/processor.rs b/consensus/src/pipeline/virtual_processor/processor.rs index 499103a059..90720f5ac2 100644 --- a/consensus/src/pipeline/virtual_processor/processor.rs +++ b/consensus/src/pipeline/virtual_processor/processor.rs @@ -48,7 +48,7 @@ use crate::{ }; use kaspa_consensus_core::{ acceptance_data::AcceptanceData, - block::{BlockTemplate, MutableBlock}, + block::{BlockTemplate, MutableBlock, TemplateTransactionSelector}, blockstatus::BlockStatus::{StatusDisqualifiedFromChain, StatusUTXOValid}, coinbase::MinerData, config::genesis::GenesisBlock, @@ -787,8 +787,15 @@ impl VirtualStateProcessor { Ok(()) } - pub fn build_block_template(&self, miner_data: MinerData, txs: Vec) -> Result { + pub fn build_block_template( + &self, + miner_data: MinerData, + mut tx_selector: Box, + ) -> Result { // TODO: tests + // + + let txs = tx_selector.select_transactions(); let virtual_read = self.virtual_stores.read(); let virtual_state = virtual_read.state.get().unwrap(); let virtual_utxo_view = &virtual_read.utxo_set; diff --git a/consensus/src/pipeline/virtual_processor/tests.rs b/consensus/src/pipeline/virtual_processor/tests.rs index 9c6f309270..119a57087b 100644 --- a/consensus/src/pipeline/virtual_processor/tests.rs +++ b/consensus/src/pipeline/virtual_processor/tests.rs @@ -1,17 +1,37 @@ use crate::{consensus::test_consensus::TestConsensus, model::services::reachability::ReachabilityService}; use kaspa_consensus_core::{ api::ConsensusApi, - block::{Block, BlockTemplate, MutableBlock}, + block::{Block, BlockTemplate, MutableBlock, TemplateTransactionSelector}, blockhash, blockstatus::BlockStatus, coinbase::MinerData, config::{params::MAINNET_PARAMS, ConfigBuilder}, - tx::{ScriptPublicKey, ScriptVec}, + tx::{ScriptPublicKey, ScriptVec, Transaction}, BlockHashSet, }; use kaspa_hashes::Hash; use std::{collections::VecDeque, thread::JoinHandle}; +struct OnetimeTxSelector { + txs: Option>, +} + +impl OnetimeTxSelector { + fn new(txs: Vec) -> Self { + Self { txs: Some(txs) } + } +} + +impl TemplateTransactionSelector for OnetimeTxSelector { + fn select_transactions(&mut self) -> Vec { + self.txs.take().unwrap() + } + + fn reject_selection(&mut self, _tx_id: kaspa_consensus_core::tx::TransactionId) { + unimplemented!() + } +} + struct TestContext { consensus: TestConsensus, join_handles: Vec>, @@ -78,7 +98,10 @@ impl TestContext { } pub fn build_block_template(&self, nonce: u64, timestamp: u64) -> BlockTemplate { - let mut t = self.consensus.build_block_template(self.miner_data.clone(), Default::default()).unwrap(); + let mut t = self + .consensus + .build_block_template(self.miner_data.clone(), Box::new(OnetimeTxSelector::new(Default::default()))) + .unwrap(); t.block.header.timestamp = timestamp; t.block.header.nonce = nonce; t.block.header.finalize(); diff --git a/mining/src/block_template/builder.rs b/mining/src/block_template/builder.rs index 3f83919ed6..a3468e8e27 100644 --- a/mining/src/block_template/builder.rs +++ b/mining/src/block_template/builder.rs @@ -1,11 +1,7 @@ -use super::{errors::BuilderResult, policy::Policy, selector::TxSelector}; +use super::{errors::BuilderResult, policy::Policy}; use crate::{block_template::selector::TransactionsSelector, model::candidate_tx::CandidateTransaction}; use kaspa_consensus_core::{ - api::ConsensusApi, - block::BlockTemplate, - coinbase::MinerData, - merkle::calc_hash_merkle_root, - tx::{TransactionId, COINBASE_TRANSACTION_INDEX}, + api::ConsensusApi, block::BlockTemplate, coinbase::MinerData, merkle::calc_hash_merkle_root, tx::COINBASE_TRANSACTION_INDEX, }; use kaspa_core::{ debug, @@ -14,15 +10,12 @@ use kaspa_core::{ pub(crate) struct BlockTemplateBuilder { policy: Policy, - selector: TransactionsSelector, } impl BlockTemplateBuilder { - pub(crate) fn new(max_block_mass: u64, transactions: Vec) -> Self { - let _sw = Stopwatch::<50>::with_threshold("BlockTemplateBuilder::new"); + pub(crate) fn new(max_block_mass: u64) -> Self { let policy = Policy::new(max_block_mass); - let selector = TransactionsSelector::new(policy.clone(), transactions); - Self { policy, selector } + Self { policy } } /// BuildBlockTemplate creates a block template for a miner to consume @@ -89,27 +82,15 @@ impl BlockTemplateBuilder { /// | <= policy.BlockMinSize) | | /// ----------------------------------- -- pub(crate) fn build_block_template( - &mut self, + &self, consensus: &dyn ConsensusApi, miner_data: &MinerData, + transactions: Vec, ) -> BuilderResult { let _sw = Stopwatch::<20>::with_threshold("build_block_template op"); - debug!("Considering {} transactions for a new block template", self.selector.len()); - let block_txs = self.selector.select_transactions(); - Ok(consensus.build_block_template(miner_data.clone(), block_txs)?) - } - - pub(crate) fn update_transactions(&mut self, transactions: Vec) { - let selector = TransactionsSelector::new(self.policy.clone(), transactions); - self.selector = selector; - } - - pub(crate) fn reject_transaction(&mut self, transaction_id: TransactionId) { - self.selector.reject_selection(transaction_id); - } - - pub(crate) fn candidates_len(&self) -> usize { - self.selector.len() + debug!("Considering {} transactions for a new block template", transactions.len()); + let selector = Box::new(TransactionsSelector::new(self.policy.clone(), transactions)); + Ok(consensus.build_block_template(miner_data.clone(), selector)?) } /// modify_block_template clones an existing block template, modifies it to the requested coinbase data and updates the timestamp diff --git a/mining/src/block_template/selector.rs b/mining/src/block_template/selector.rs index e55642a6a9..53ec0a3320 100644 --- a/mining/src/block_template/selector.rs +++ b/mining/src/block_template/selector.rs @@ -9,6 +9,7 @@ use super::{ policy::Policy, }; use kaspa_consensus_core::{ + block::TemplateTransactionSelector, subnets::SubnetworkId, tx::{Transaction, TransactionId}, }; @@ -27,11 +28,6 @@ const ALPHA: i32 = 3; /// if REBALANCE_THRESHOLD is 0.95, there's a 1-in-20 chance of collision. const REBALANCE_THRESHOLD: f64 = 0.95; -pub trait TxSelector { - fn select_transactions(&mut self) -> Vec; - fn reject_selection(&mut self, invalid_tx_id: TransactionId); -} - pub(crate) struct TransactionsSelector { policy: Policy, /// Transaction store @@ -85,10 +81,6 @@ impl TransactionsSelector { selector } - pub(crate) fn len(&self) -> usize { - self.transactions.len() - } - /// select_transactions implements a probabilistic transaction selection algorithm. /// The algorithm, roughly, is as follows: /// 1. We assign a probability to each transaction equal to: @@ -234,16 +226,16 @@ impl TransactionsSelector { } } -impl TxSelector for TransactionsSelector { +impl TemplateTransactionSelector for TransactionsSelector { fn select_transactions(&mut self) -> Vec { self.select_transactions() } - fn reject_selection(&mut self, invalid_tx_id: TransactionId) { + fn reject_selection(&mut self, tx_id: TransactionId) { let selected_txs_map = self .selected_txs_map .get_or_insert_with(|| self.selected_txs.iter().map(|&x| (self.transactions[x].tx.id(), x)).collect()); - let tx_index = *selected_txs_map.get(&invalid_tx_id).expect("only previously selected txs can be rejected"); + let tx_index = *selected_txs_map.get(&tx_id).expect("only previously selected txs can be rejected"); let tx = &self.transactions[tx_index]; self.total_mass -= tx.calculated_mass; self.total_fees -= tx.calculated_fee; @@ -264,31 +256,39 @@ mod tests { tx::{Transaction, TransactionId, TransactionInput, TransactionOutpoint, TransactionOutput}, }; use kaspa_txscript::{pay_to_script_hash_signature_script, test_helpers::op_true_script}; - use std::sync::Arc; + use std::{collections::HashSet, sync::Arc}; use crate::{mempool::config::DEFAULT_MINIMUM_RELAY_TRANSACTION_FEE, model::candidate_tx::CandidateTransaction}; #[test] fn test_reject_transaction() { const TX_INITIAL_COUNT: usize = 1_000; - const REJECT_COUNT: usize = 10; // Create a vector of transactions differing by output value so they have unique ids let transactions = (0..TX_INITIAL_COUNT).map(|i| create_transaction(SOMPI_PER_KASPA * (i + 1) as u64)).collect_vec(); let policy = Policy::new(100_000); let mut selector = TransactionsSelector::new(policy, transactions); - assert_eq!(selector.len(), TX_INITIAL_COUNT, "selector length matches initial transaction vector length"); - - let mut remaining_count = TX_INITIAL_COUNT; - for i in 0..3 { + let (mut kept, mut rejected) = (HashSet::new(), HashSet::new()); + let mut reject_count = 32; + for i in 0..10 { let selected_txs = selector.select_transactions(); - selected_txs.iter().skip((i + 1) * 100).take(REJECT_COUNT).for_each(|x| selector.reject_selection(x.id())); - remaining_count -= REJECT_COUNT; - assert_eq!(selector.len(), remaining_count, "selector length matches remaining transaction count"); - assert_eq!(selector.len(), remaining_count, "selector length matches remaining transaction count"); - let selected_txs_2 = selector.select_transactions(); - assert_eq!(selector.len(), remaining_count, "selector length matches remaining transaction count"); - assert_eq!(selected_txs.len(), selected_txs_2.len()); + if i > 0 { + assert_eq!( + selected_txs.len(), + reject_count, + "subsequent select calls are expected to only refill the previous rejections" + ); + reject_count /= 2; + } + for tx in selected_txs.iter() { + kept.insert(tx.id()).then_some(()).expect("selected txs should never repeat themselves"); + assert!(!rejected.contains(&tx.id()), "selected txs should never repeat themselves"); + } + selected_txs.iter().take(reject_count).for_each(|x| { + selector.reject_selection(x.id()); + kept.remove(&x.id()).then_some(()).expect("was just inserted"); + rejected.insert(x.id()).then_some(()).expect("was just verified"); + }); } } diff --git a/mining/src/manager.rs b/mining/src/manager.rs index 00c1f2872c..f7fe03a77d 100644 --- a/mining/src/manager.rs +++ b/mining/src/manager.rs @@ -85,17 +85,12 @@ impl MiningManager { debug!("Building a new block template..."); let _swo = Stopwatch::<22>::with_threshold("build_block_template full loop"); let mut attempts: u64 = 0; - let transactions = self.block_candidate_transactions(); - let mut block_template_builder = BlockTemplateBuilder::new(self.config.maximum_mass_per_block, transactions); loop { attempts += 1; - // TODO: consider a parameter forcing the consensus to build a template with the remaining successfully validated transactions - // - // let force_build = attempts == self.config.maximum_build_block_template_attempts; - // match block_template_builder.build_block_template(consensus, miner_data, force_build) { - - match block_template_builder.build_block_template(consensus, miner_data) { + let transactions = self.block_candidate_transactions(); + let block_template_builder = BlockTemplateBuilder::new(self.config.maximum_mass_per_block); + match block_template_builder.build_block_template(consensus, miner_data, transactions) { Ok(block_template) => { let block_template = cache_lock.set_immutable_cached_template(block_template); match attempts { @@ -125,21 +120,11 @@ impl MiningManager { return Ok(block_template.as_ref().clone()); } Err(BuilderError::ConsensusError(BlockRuleError::InvalidTransactionsInNewBlock(invalid_transactions))) => { - // Do not refetch candidates if not absolutely necessary so we do not lock the mempool - // and optimize for the quickest possible resolution - let keep_candidates = block_template_builder.candidates_len() - > self.config.ready_transactions_refetch_limit + invalid_transactions.len() - || attempts + 1 >= self.config.maximum_build_block_template_attempts; - let mut missing_outpoint: usize = 0; let mut invalid: usize = 0; let mut mempool_write = self.mempool.write(); invalid_transactions.iter().for_each(|(x, err)| { - if keep_candidates { - block_template_builder.reject_transaction(*x); - } - // On missing outpoints, the most likely is that the tx was already in a block accepted by // the consensus but not yet processed by handle_new_block_transactions(). Another possibility // is a double spend. In both cases, we simply remove the transaction but keep its redeemers. @@ -179,12 +164,6 @@ impl MiningManager { "Building a new block template failed for {} txs missing outpoint and {} invalid txs", missing_outpoint, invalid ); - - // Refetch candidates if asked to - if !keep_candidates { - let transactions = self.block_candidate_transactions(); - block_template_builder.update_transactions(transactions); - } } Err(err) => { warn!("Building a new block template failed: {}", err); @@ -204,8 +183,8 @@ impl MiningManager { } #[cfg(test)] - pub(crate) fn block_template_builder(&self, transactions: Vec) -> BlockTemplateBuilder { - BlockTemplateBuilder::new(self.config.maximum_mass_per_block, transactions) + pub(crate) fn block_template_builder(&self) -> BlockTemplateBuilder { + BlockTemplateBuilder::new(self.config.maximum_mass_per_block) } /// validate_and_insert_transaction validates the given transaction, and diff --git a/mining/src/manager_tests.rs b/mining/src/manager_tests.rs index 0ef9e53b47..ddcc05be1d 100644 --- a/mining/src/manager_tests.rs +++ b/mining/src/manager_tests.rs @@ -846,8 +846,8 @@ mod tests { let miner_data_2 = generate_new_coinbase(address_prefix, second_op); // Build a fresh template for coinbase2 as a reference - let mut builder = mining_manager.block_template_builder(transactions); - let result = builder.build_block_template(consensus, &miner_data_2); + let builder = mining_manager.block_template_builder(); + let result = builder.build_block_template(consensus, &miner_data_2, transactions); assert!(result.is_ok(), "build block template failed for miner data 2"); let expected_template = result.unwrap(); diff --git a/mining/src/testutils/consensus_mock.rs b/mining/src/testutils/consensus_mock.rs index 7976bd1f01..c1f5cda4c6 100644 --- a/mining/src/testutils/consensus_mock.rs +++ b/mining/src/testutils/consensus_mock.rs @@ -1,7 +1,7 @@ use super::coinbase_mock::CoinbaseManagerMock; use kaspa_consensus_core::{ api::ConsensusApi, - block::{BlockTemplate, MutableBlock}, + block::{BlockTemplate, MutableBlock, TemplateTransactionSelector}, coinbase::MinerData, constants::BLOCK_VERSION, errors::{ @@ -72,7 +72,12 @@ impl ConsensusMock { } impl ConsensusApi for ConsensusMock { - fn build_block_template(&self, miner_data: MinerData, mut txs: Vec) -> Result { + fn build_block_template( + &self, + miner_data: MinerData, + mut tx_selector: Box, + ) -> Result { + let mut txs = tx_selector.select_transactions(); let coinbase_manager = CoinbaseManagerMock::new(); let coinbase = coinbase_manager.expected_coinbase_transaction(miner_data.clone()); txs.insert(0, coinbase.tx); diff --git a/simpa/src/simulator/miner.rs b/simpa/src/simulator/miner.rs index 9bc3aae644..c69a0ca23f 100644 --- a/simpa/src/simulator/miner.rs +++ b/simpa/src/simulator/miner.rs @@ -4,7 +4,7 @@ use kaspa_consensus::consensus::Consensus; use kaspa_consensus::model::stores::virtual_state::VirtualStateStoreReader; use kaspa_consensus::params::Params; use kaspa_consensus_core::api::ConsensusApi; -use kaspa_consensus_core::block::Block; +use kaspa_consensus_core::block::{Block, TemplateTransactionSelector}; use kaspa_consensus_core::coinbase::MinerData; use kaspa_consensus_core::sign::sign; use kaspa_consensus_core::subnets::SUBNETWORK_ID_NATIVE; @@ -22,6 +22,26 @@ use std::cmp::max; use std::iter::once; use std::sync::Arc; +struct OnetimeTxSelector { + txs: Option>, +} + +impl OnetimeTxSelector { + fn new(txs: Vec) -> Self { + Self { txs: Some(txs) } + } +} + +impl TemplateTransactionSelector for OnetimeTxSelector { + fn select_transactions(&mut self) -> Vec { + self.txs.take().unwrap() + } + + fn reject_selection(&mut self, _tx_id: kaspa_consensus_core::tx::TransactionId) { + unimplemented!() + } +} + pub struct Miner { // ID pub(super) id: u64, @@ -89,7 +109,7 @@ impl Miner { let session = self.consensus.acquire_session(); let mut block_template = self .consensus - .build_block_template(self.miner_data.clone(), txs) + .build_block_template(self.miner_data.clone(), Box::new(OnetimeTxSelector::new(txs))) .expect("simulation txs are selected in sync with virtual state and are expected to be valid"); drop(session); block_template.block.header.timestamp = timestamp; // Use simulation time rather than real time From d23113471ffb37872cb33d6f48358a3451b2de9b Mon Sep 17 00:00:00 2001 From: Michael Sutton Date: Thu, 21 Sep 2023 12:19:37 +0300 Subject: [PATCH 57/86] virtual processor selector retry logic --- .../pipeline/virtual_processor/processor.rs | 32 ++++++++++++++++--- mining/src/mempool/config.rs | 4 +-- 2 files changed, 30 insertions(+), 6 deletions(-) diff --git a/consensus/src/pipeline/virtual_processor/processor.rs b/consensus/src/pipeline/virtual_processor/processor.rs index 90720f5ac2..2979ab60db 100644 --- a/consensus/src/pipeline/virtual_processor/processor.rs +++ b/consensus/src/pipeline/virtual_processor/processor.rs @@ -88,7 +88,7 @@ use rayon::{ use rocksdb::WriteBatch; use std::{ cmp::min, - collections::{BinaryHeap, VecDeque}, + collections::{BinaryHeap, HashMap, VecDeque}, ops::Deref, sync::{atomic::Ordering, Arc}, }; @@ -795,13 +795,37 @@ impl VirtualStateProcessor { // TODO: tests // - let txs = tx_selector.select_transactions(); + // We call for the initial tx batch out of the virtual read lock + let mut txs = tx_selector.select_transactions(); + let virtual_read = self.virtual_stores.read(); let virtual_state = virtual_read.state.get().unwrap(); let virtual_utxo_view = &virtual_read.utxo_set; - // Validate the transactions in virtual's utxo context - self.validate_block_template_transactions(&txs, &virtual_state, virtual_utxo_view)?; + let mut invalid_transactions = HashMap::new(); + for tx in txs.iter() { + if let Err(e) = self.validate_block_template_transaction(tx, &virtual_state, virtual_utxo_view) { + invalid_transactions.insert(tx.id(), e); + tx_selector.reject_selection(tx.id()); + } + } + + if !invalid_transactions.is_empty() { + txs.retain(|tx| !invalid_transactions.contains_key(&tx.id())); + } + + while !invalid_transactions.is_empty() { + invalid_transactions.clear(); + let next_batch = tx_selector.select_transactions(); + for tx in next_batch { + if let Err(e) = self.validate_block_template_transaction(&tx, &virtual_state, virtual_utxo_view) { + invalid_transactions.insert(tx.id(), e); + tx_selector.reject_selection(tx.id()); + } else { + txs.push(tx); + } + } + } // At this point we can safely drop the read lock drop(virtual_read); diff --git a/mining/src/mempool/config.rs b/mining/src/mempool/config.rs index 2e3f68a2ab..15b42bbfa6 100644 --- a/mining/src/mempool/config.rs +++ b/mining/src/mempool/config.rs @@ -104,8 +104,8 @@ impl Config { Self { maximum_transaction_count: DEFAULT_MAXIMUM_TRANSACTION_COUNT, maximum_ready_transaction_count: DEFAULT_MAXIMUM_READY_TRANSACTION_COUNT, - maximum_build_block_template_attempts: DEFAULT_MAXIMUM_BUILD_BLOCK_TEMPLATE_ATTEMPTS, - ready_transactions_refetch_limit: DEFAULT_READY_TRANSACTIONS_REFETCH_LIMIT, + maximum_build_block_template_attempts: DEFAULT_MAXIMUM_BUILD_BLOCK_TEMPLATE_ATTEMPTS, // TODO + ready_transactions_refetch_limit: DEFAULT_READY_TRANSACTIONS_REFETCH_LIMIT, // TODO transaction_expire_interval_daa_score: DEFAULT_TRANSACTION_EXPIRE_INTERVAL_SECONDS * 1000 / target_milliseconds_per_block, transaction_expire_scan_interval_daa_score: DEFAULT_TRANSACTION_EXPIRE_SCAN_INTERVAL_SECONDS * 1000 / target_milliseconds_per_block, From 9a015d1320d1fa671748c217ae57dc8e211639b3 Mon Sep 17 00:00:00 2001 From: Michael Sutton Date: Thu, 21 Sep 2023 21:07:18 +0300 Subject: [PATCH 58/86] make BBT fallible by some selector criteria + comments and some docs --- consensus/core/src/block.rs | 11 +++++++++ consensus/core/src/errors/block.rs | 4 ++-- .../pipeline/virtual_processor/processor.rs | 24 ++++++++++++++----- .../src/pipeline/virtual_processor/tests.rs | 4 ++++ mining/src/block_template/selector.rs | 13 +++++++++- simpa/src/simulator/miner.rs | 4 ++++ 6 files changed, 51 insertions(+), 9 deletions(-) diff --git a/consensus/core/src/block.rs b/consensus/core/src/block.rs index 21c0b531c7..74aa42186e 100644 --- a/consensus/core/src/block.rs +++ b/consensus/core/src/block.rs @@ -68,9 +68,20 @@ impl Block { } } +/// An abstraction for a recallable transaction selector with persistent state pub trait TemplateTransactionSelector { + /// Expected to return a batch of transactions which were not previously selected. + /// The batch will typically contain sufficient transactions to fill the block + /// mass (along with the previously unrejected txs), or will drain the selector fn select_transactions(&mut self) -> Vec; + + /// Should be used to report invalid transactions obtained from the *most recent* + /// `select_transactions` call. Implementors should use this call to internally + /// track the selection state and discard the rejected tx from internal occupation calculations fn reject_selection(&mut self, tx_id: TransactionId); + + /// Determine whether this was an overall successful selection episode + fn is_successful(&self) -> bool; } /// A block template for miners. diff --git a/consensus/core/src/errors/block.rs b/consensus/core/src/errors/block.rs index e78d0f0f88..bc72136fe6 100644 --- a/consensus/core/src/errors/block.rs +++ b/consensus/core/src/errors/block.rs @@ -1,4 +1,4 @@ -use std::fmt::Display; +use std::{collections::HashMap, fmt::Display}; use crate::{ constants, @@ -140,7 +140,7 @@ pub enum RuleError { InvalidTransactionsInUtxoContext(usize, usize), #[error("invalid transactions in new block template")] - InvalidTransactionsInNewBlock(Vec<(TransactionId, TxRuleError)>), + InvalidTransactionsInNewBlock(HashMap), #[error("DAA window data has only {0} entries")] InsufficientDaaWindowSize(usize), diff --git a/consensus/src/pipeline/virtual_processor/processor.rs b/consensus/src/pipeline/virtual_processor/processor.rs index 2979ab60db..2bde3b4c48 100644 --- a/consensus/src/pipeline/virtual_processor/processor.rs +++ b/consensus/src/pipeline/virtual_processor/processor.rs @@ -792,10 +792,13 @@ impl VirtualStateProcessor { miner_data: MinerData, mut tx_selector: Box, ) -> Result { + // // TODO: tests // - // We call for the initial tx batch out of the virtual read lock + // We call for the initial tx batch before acquiring the virtual read lock, + // optimizing for the common case where all txs are valid. Following selection calls + // are called within the lock in order to preserve validness of already validated txs let mut txs = tx_selector.select_transactions(); let virtual_read = self.virtual_stores.read(); @@ -814,19 +817,28 @@ impl VirtualStateProcessor { txs.retain(|tx| !invalid_transactions.contains_key(&tx.id())); } - while !invalid_transactions.is_empty() { - invalid_transactions.clear(); - let next_batch = tx_selector.select_transactions(); + let mut has_rejections = !invalid_transactions.is_empty(); + while has_rejections { + has_rejections = false; + let next_batch = tx_selector.select_transactions(); // Note that once next_batch is empty the loop will exit for tx in next_batch { if let Err(e) = self.validate_block_template_transaction(&tx, &virtual_state, virtual_utxo_view) { invalid_transactions.insert(tx.id(), e); tx_selector.reject_selection(tx.id()); + has_rejections = true; } else { txs.push(tx); } } } + // Check whether this was an overall successful selection episode. We pass this decision + // to the selector implementation which has the broadest picture and can use mempool config + // and context + if !tx_selector.is_successful() { + return Err(RuleError::InvalidTransactionsInNewBlock(invalid_transactions)); + } + // At this point we can safely drop the read lock drop(virtual_read); @@ -842,10 +854,10 @@ impl VirtualStateProcessor { ) -> Result<(), RuleError> { // Search for invalid transactions. This can happen since the mining manager calling this function is not atomically in sync with virtual state // TODO: process transactions in parallel - let mut invalid_transactions = Vec::new(); + let mut invalid_transactions = HashMap::new(); for tx in txs.iter() { if let Err(e) = self.validate_block_template_transaction(tx, virtual_state, utxo_view) { - invalid_transactions.push((tx.id(), e)) + invalid_transactions.insert(tx.id(), e); } } if !invalid_transactions.is_empty() { diff --git a/consensus/src/pipeline/virtual_processor/tests.rs b/consensus/src/pipeline/virtual_processor/tests.rs index 119a57087b..e0b43b1b86 100644 --- a/consensus/src/pipeline/virtual_processor/tests.rs +++ b/consensus/src/pipeline/virtual_processor/tests.rs @@ -30,6 +30,10 @@ impl TemplateTransactionSelector for OnetimeTxSelector { fn reject_selection(&mut self, _tx_id: kaspa_consensus_core::tx::TransactionId) { unimplemented!() } + + fn is_successful(&self) -> bool { + true + } } struct TestContext { diff --git a/mining/src/block_template/selector.rs b/mining/src/block_template/selector.rs index 53ec0a3320..369d3450d9 100644 --- a/mining/src/block_template/selector.rs +++ b/mining/src/block_template/selector.rs @@ -44,6 +44,7 @@ pub(crate) struct TransactionsSelector { // Inner state of the selection process candidate_list: CandidateList, + overall_rejections: usize, used_count: usize, used_p: f64, total_mass: u64, @@ -65,6 +66,7 @@ impl TransactionsSelector { selected_txs: Default::default(), selected_txs_map: None, candidate_list: Default::default(), + overall_rejections: 0, used_count: 0, used_p: 0.0, total_mass: 0, @@ -234,14 +236,23 @@ impl TemplateTransactionSelector for TransactionsSelector { fn reject_selection(&mut self, tx_id: TransactionId) { let selected_txs_map = self .selected_txs_map + // We lazy-create the map only when there are actual rejections .get_or_insert_with(|| self.selected_txs.iter().map(|&x| (self.transactions[x].tx.id(), x)).collect()); - let tx_index = *selected_txs_map.get(&tx_id).expect("only previously selected txs can be rejected"); + let tx_index = selected_txs_map.remove(&tx_id).expect("only previously selected txs can be rejected (and only once)"); let tx = &self.transactions[tx_index]; self.total_mass -= tx.calculated_mass; self.total_fees -= tx.calculated_fee; if !tx.tx.subnetwork_id.is_builtin_or_native() { *self.gas_usage_map.get_mut(&tx.tx.subnetwork_id).expect("previously selected txs have an entry") -= tx.tx.gas; } + self.overall_rejections += 1; + } + + fn is_successful(&self) -> bool { + // TODO: comment + constants + self.transactions.is_empty() + || (self.total_mass as f64) > self.policy.max_block_mass as f64 * 0.8 + || (self.overall_rejections as f64) < self.transactions.len() as f64 * 0.2 } } diff --git a/simpa/src/simulator/miner.rs b/simpa/src/simulator/miner.rs index c69a0ca23f..2cdf1af4b5 100644 --- a/simpa/src/simulator/miner.rs +++ b/simpa/src/simulator/miner.rs @@ -40,6 +40,10 @@ impl TemplateTransactionSelector for OnetimeTxSelector { fn reject_selection(&mut self, _tx_id: kaspa_consensus_core::tx::TransactionId) { unimplemented!() } + + fn is_successful(&self) -> bool { + true + } } pub struct Miner { From 58e0ed5af4d705eff4bf2e78056dcec24f3b7024 Mon Sep 17 00:00:00 2001 From: Michael Sutton Date: Thu, 21 Sep 2023 21:30:13 +0300 Subject: [PATCH 59/86] add an infallible mode to virtual processor `build_block_template()` --- consensus/core/src/api/mod.rs | 3 ++- consensus/core/src/block.rs | 12 ++++++++++++ consensus/src/consensus/mod.rs | 5 +++-- .../src/pipeline/virtual_processor/processor.rs | 8 +++++--- consensus/src/pipeline/virtual_processor/tests.rs | 8 ++++++-- mining/src/block_template/builder.rs | 9 +++++++-- mining/src/block_template/selector.rs | 2 +- mining/src/manager.rs | 9 +++++++-- mining/src/manager_tests.rs | 3 ++- mining/src/testutils/consensus_mock.rs | 3 ++- simpa/src/simulator/miner.rs | 4 ++-- 11 files changed, 49 insertions(+), 17 deletions(-) diff --git a/consensus/core/src/api/mod.rs b/consensus/core/src/api/mod.rs index c11ea8317d..880a271723 100644 --- a/consensus/core/src/api/mod.rs +++ b/consensus/core/src/api/mod.rs @@ -4,7 +4,7 @@ use std::sync::Arc; use crate::{ acceptance_data::AcceptanceData, - block::{Block, BlockTemplate, TemplateTransactionSelector}, + block::{Block, BlockTemplate, TemplateBuildMode, TemplateTransactionSelector}, block_count::BlockCount, blockstatus::BlockStatus, coinbase::MinerData, @@ -31,6 +31,7 @@ pub trait ConsensusApi: Send + Sync { &self, miner_data: MinerData, tx_selector: Box, + build_mode: TemplateBuildMode, ) -> Result { unimplemented!() } diff --git a/consensus/core/src/block.rs b/consensus/core/src/block.rs index 74aa42186e..77a3631969 100644 --- a/consensus/core/src/block.rs +++ b/consensus/core/src/block.rs @@ -84,6 +84,18 @@ pub trait TemplateTransactionSelector { fn is_successful(&self) -> bool; } +/// Block template build mode +#[derive(Clone, Copy, Debug)] +pub enum TemplateBuildMode { + /// Block template build can possibly fail if `TemplateTransactionSelector::is_successful` deems the operation unsuccessful. + /// + /// In such a case, the build fails with `BlockRuleError::InvalidTransactionsInNewBlock`. + Standard, + + /// Block template build always succeeds. The built block contains only the validated transactions. + Infallible, +} + /// A block template for miners. #[derive(Debug, Clone)] pub struct BlockTemplate { diff --git a/consensus/src/consensus/mod.rs b/consensus/src/consensus/mod.rs index 0071ea07f6..dd82c09d6b 100644 --- a/consensus/src/consensus/mod.rs +++ b/consensus/src/consensus/mod.rs @@ -41,7 +41,7 @@ use crate::{ use kaspa_consensus_core::{ acceptance_data::AcceptanceData, api::{BlockValidationFuture, ConsensusApi}, - block::{Block, BlockTemplate, TemplateTransactionSelector}, + block::{Block, BlockTemplate, TemplateBuildMode, TemplateTransactionSelector}, block_count::BlockCount, blockhash::BlockHashExtensions, blockstatus::BlockStatus, @@ -357,8 +357,9 @@ impl ConsensusApi for Consensus { &self, miner_data: MinerData, tx_selector: Box, + build_mode: TemplateBuildMode, ) -> Result { - self.virtual_processor.build_block_template(miner_data, tx_selector) + self.virtual_processor.build_block_template(miner_data, tx_selector, build_mode) } fn validate_and_insert_block(&self, block: Block) -> BlockValidationFuture { diff --git a/consensus/src/pipeline/virtual_processor/processor.rs b/consensus/src/pipeline/virtual_processor/processor.rs index 2bde3b4c48..47d0cf6793 100644 --- a/consensus/src/pipeline/virtual_processor/processor.rs +++ b/consensus/src/pipeline/virtual_processor/processor.rs @@ -48,7 +48,7 @@ use crate::{ }; use kaspa_consensus_core::{ acceptance_data::AcceptanceData, - block::{BlockTemplate, MutableBlock, TemplateTransactionSelector}, + block::{BlockTemplate, MutableBlock, TemplateBuildMode, TemplateTransactionSelector}, blockstatus::BlockStatus::{StatusDisqualifiedFromChain, StatusUTXOValid}, coinbase::MinerData, config::genesis::GenesisBlock, @@ -791,6 +791,7 @@ impl VirtualStateProcessor { &self, miner_data: MinerData, mut tx_selector: Box, + build_mode: TemplateBuildMode, ) -> Result { // // TODO: tests @@ -835,8 +836,9 @@ impl VirtualStateProcessor { // Check whether this was an overall successful selection episode. We pass this decision // to the selector implementation which has the broadest picture and can use mempool config // and context - if !tx_selector.is_successful() { - return Err(RuleError::InvalidTransactionsInNewBlock(invalid_transactions)); + match (build_mode, tx_selector.is_successful()) { + (TemplateBuildMode::Standard, false) => return Err(RuleError::InvalidTransactionsInNewBlock(invalid_transactions)), + (TemplateBuildMode::Standard, true) | (TemplateBuildMode::Infallible, _) => {} } // At this point we can safely drop the read lock diff --git a/consensus/src/pipeline/virtual_processor/tests.rs b/consensus/src/pipeline/virtual_processor/tests.rs index e0b43b1b86..fc6cb73dae 100644 --- a/consensus/src/pipeline/virtual_processor/tests.rs +++ b/consensus/src/pipeline/virtual_processor/tests.rs @@ -1,7 +1,7 @@ use crate::{consensus::test_consensus::TestConsensus, model::services::reachability::ReachabilityService}; use kaspa_consensus_core::{ api::ConsensusApi, - block::{Block, BlockTemplate, MutableBlock, TemplateTransactionSelector}, + block::{Block, BlockTemplate, MutableBlock, TemplateBuildMode, TemplateTransactionSelector}, blockhash, blockstatus::BlockStatus, coinbase::MinerData, @@ -104,7 +104,11 @@ impl TestContext { pub fn build_block_template(&self, nonce: u64, timestamp: u64) -> BlockTemplate { let mut t = self .consensus - .build_block_template(self.miner_data.clone(), Box::new(OnetimeTxSelector::new(Default::default()))) + .build_block_template( + self.miner_data.clone(), + Box::new(OnetimeTxSelector::new(Default::default())), + TemplateBuildMode::Standard, + ) .unwrap(); t.block.header.timestamp = timestamp; t.block.header.nonce = nonce; diff --git a/mining/src/block_template/builder.rs b/mining/src/block_template/builder.rs index a3468e8e27..de3428a745 100644 --- a/mining/src/block_template/builder.rs +++ b/mining/src/block_template/builder.rs @@ -1,7 +1,11 @@ use super::{errors::BuilderResult, policy::Policy}; use crate::{block_template::selector::TransactionsSelector, model::candidate_tx::CandidateTransaction}; use kaspa_consensus_core::{ - api::ConsensusApi, block::BlockTemplate, coinbase::MinerData, merkle::calc_hash_merkle_root, tx::COINBASE_TRANSACTION_INDEX, + api::ConsensusApi, + block::{BlockTemplate, TemplateBuildMode}, + coinbase::MinerData, + merkle::calc_hash_merkle_root, + tx::COINBASE_TRANSACTION_INDEX, }; use kaspa_core::{ debug, @@ -86,11 +90,12 @@ impl BlockTemplateBuilder { consensus: &dyn ConsensusApi, miner_data: &MinerData, transactions: Vec, + build_mode: TemplateBuildMode, ) -> BuilderResult { let _sw = Stopwatch::<20>::with_threshold("build_block_template op"); debug!("Considering {} transactions for a new block template", transactions.len()); let selector = Box::new(TransactionsSelector::new(self.policy.clone(), transactions)); - Ok(consensus.build_block_template(miner_data.clone(), selector)?) + Ok(consensus.build_block_template(miner_data.clone(), selector, build_mode)?) } /// modify_block_template clones an existing block template, modifies it to the requested coinbase data and updates the timestamp diff --git a/mining/src/block_template/selector.rs b/mining/src/block_template/selector.rs index 369d3450d9..9b34d964c5 100644 --- a/mining/src/block_template/selector.rs +++ b/mining/src/block_template/selector.rs @@ -250,7 +250,7 @@ impl TemplateTransactionSelector for TransactionsSelector { fn is_successful(&self) -> bool { // TODO: comment + constants - self.transactions.is_empty() + self.overall_rejections == 0 || (self.total_mass as f64) > self.policy.max_block_mass as f64 * 0.8 || (self.overall_rejections as f64) < self.transactions.len() as f64 * 0.2 } diff --git a/mining/src/manager.rs b/mining/src/manager.rs index f7fe03a77d..9a081bff05 100644 --- a/mining/src/manager.rs +++ b/mining/src/manager.rs @@ -21,7 +21,7 @@ use crate::{ use itertools::Itertools; use kaspa_consensus_core::{ api::ConsensusApi, - block::BlockTemplate, + block::{BlockTemplate, TemplateBuildMode}, coinbase::MinerData, errors::{block::RuleError as BlockRuleError, tx::TxRuleError}, tx::{MutableTransaction, Transaction, TransactionId, TransactionOutput}, @@ -90,7 +90,12 @@ impl MiningManager { let transactions = self.block_candidate_transactions(); let block_template_builder = BlockTemplateBuilder::new(self.config.maximum_mass_per_block); - match block_template_builder.build_block_template(consensus, miner_data, transactions) { + let build_mode = if attempts < self.config.maximum_build_block_template_attempts { + TemplateBuildMode::Standard + } else { + TemplateBuildMode::Infallible + }; + match block_template_builder.build_block_template(consensus, miner_data, transactions, build_mode) { Ok(block_template) => { let block_template = cache_lock.set_immutable_cached_template(block_template); match attempts { diff --git a/mining/src/manager_tests.rs b/mining/src/manager_tests.rs index ddcc05be1d..776ef017fa 100644 --- a/mining/src/manager_tests.rs +++ b/mining/src/manager_tests.rs @@ -16,6 +16,7 @@ mod tests { use kaspa_addresses::{Address, Prefix, Version}; use kaspa_consensus_core::{ api::ConsensusApi, + block::TemplateBuildMode, coinbase::MinerData, constants::{MAX_TX_IN_SEQUENCE_NUM, SOMPI_PER_KASPA, TX_VERSION}, errors::tx::{TxResult, TxRuleError}, @@ -847,7 +848,7 @@ mod tests { // Build a fresh template for coinbase2 as a reference let builder = mining_manager.block_template_builder(); - let result = builder.build_block_template(consensus, &miner_data_2, transactions); + let result = builder.build_block_template(consensus, &miner_data_2, transactions, TemplateBuildMode::Standard); assert!(result.is_ok(), "build block template failed for miner data 2"); let expected_template = result.unwrap(); diff --git a/mining/src/testutils/consensus_mock.rs b/mining/src/testutils/consensus_mock.rs index c1f5cda4c6..ecf5319e0a 100644 --- a/mining/src/testutils/consensus_mock.rs +++ b/mining/src/testutils/consensus_mock.rs @@ -1,7 +1,7 @@ use super::coinbase_mock::CoinbaseManagerMock; use kaspa_consensus_core::{ api::ConsensusApi, - block::{BlockTemplate, MutableBlock, TemplateTransactionSelector}, + block::{BlockTemplate, MutableBlock, TemplateBuildMode, TemplateTransactionSelector}, coinbase::MinerData, constants::BLOCK_VERSION, errors::{ @@ -76,6 +76,7 @@ impl ConsensusApi for ConsensusMock { &self, miner_data: MinerData, mut tx_selector: Box, + _build_mode: TemplateBuildMode, ) -> Result { let mut txs = tx_selector.select_transactions(); let coinbase_manager = CoinbaseManagerMock::new(); diff --git a/simpa/src/simulator/miner.rs b/simpa/src/simulator/miner.rs index 2cdf1af4b5..2f144fc668 100644 --- a/simpa/src/simulator/miner.rs +++ b/simpa/src/simulator/miner.rs @@ -4,7 +4,7 @@ use kaspa_consensus::consensus::Consensus; use kaspa_consensus::model::stores::virtual_state::VirtualStateStoreReader; use kaspa_consensus::params::Params; use kaspa_consensus_core::api::ConsensusApi; -use kaspa_consensus_core::block::{Block, TemplateTransactionSelector}; +use kaspa_consensus_core::block::{Block, TemplateBuildMode, TemplateTransactionSelector}; use kaspa_consensus_core::coinbase::MinerData; use kaspa_consensus_core::sign::sign; use kaspa_consensus_core::subnets::SUBNETWORK_ID_NATIVE; @@ -113,7 +113,7 @@ impl Miner { let session = self.consensus.acquire_session(); let mut block_template = self .consensus - .build_block_template(self.miner_data.clone(), Box::new(OnetimeTxSelector::new(txs))) + .build_block_template(self.miner_data.clone(), Box::new(OnetimeTxSelector::new(txs)), TemplateBuildMode::Standard) .expect("simulation txs are selected in sync with virtual state and are expected to be valid"); drop(session); block_template.block.header.timestamp = timestamp; // Use simulation time rather than real time From 827b9fa7d1d8b2f865d946ce99843a1a7442eecd Mon Sep 17 00:00:00 2001 From: Michael Sutton Date: Thu, 21 Sep 2023 21:45:30 +0300 Subject: [PATCH 60/86] constants for tx selector successful decision --- mining/src/block_template/selector.rs | 9 ++++++--- mining/src/mempool/config.rs | 8 +------- 2 files changed, 7 insertions(+), 10 deletions(-) diff --git a/mining/src/block_template/selector.rs b/mining/src/block_template/selector.rs index 9b34d964c5..a2ab7bad7b 100644 --- a/mining/src/block_template/selector.rs +++ b/mining/src/block_template/selector.rs @@ -249,10 +249,13 @@ impl TemplateTransactionSelector for TransactionsSelector { } fn is_successful(&self) -> bool { - // TODO: comment + constants + const SUFFICIENT_MASS_THRESHOLD: f64 = 0.8; + const LOW_REJECTION_FRACTION: f64 = 0.2; + + // We consider the operation successful if either mass occupation is above 80% or rejection rate is below 20% self.overall_rejections == 0 - || (self.total_mass as f64) > self.policy.max_block_mass as f64 * 0.8 - || (self.overall_rejections as f64) < self.transactions.len() as f64 * 0.2 + || (self.total_mass as f64) > self.policy.max_block_mass as f64 * SUFFICIENT_MASS_THRESHOLD + || (self.overall_rejections as f64) < self.transactions.len() as f64 * LOW_REJECTION_FRACTION } } diff --git a/mining/src/mempool/config.rs b/mining/src/mempool/config.rs index 15b42bbfa6..8c082d689d 100644 --- a/mining/src/mempool/config.rs +++ b/mining/src/mempool/config.rs @@ -3,8 +3,6 @@ use kaspa_consensus_core::constants::TX_VERSION; pub(crate) const DEFAULT_MAXIMUM_TRANSACTION_COUNT: u64 = 1_000_000; pub(crate) const DEFAULT_MAXIMUM_READY_TRANSACTION_COUNT: u64 = 100_000; pub(crate) const DEFAULT_MAXIMUM_BUILD_BLOCK_TEMPLATE_ATTEMPTS: u64 = 5; -// TODO: revisit this value -pub(crate) const DEFAULT_READY_TRANSACTIONS_REFETCH_LIMIT: usize = 2_500; pub(crate) const DEFAULT_TRANSACTION_EXPIRE_INTERVAL_SECONDS: u64 = 60; pub(crate) const DEFAULT_TRANSACTION_EXPIRE_SCAN_INTERVAL_SECONDS: u64 = 10; @@ -34,7 +32,6 @@ pub struct Config { pub maximum_transaction_count: u64, pub maximum_ready_transaction_count: u64, pub maximum_build_block_template_attempts: u64, - pub ready_transactions_refetch_limit: usize, pub transaction_expire_interval_daa_score: u64, pub transaction_expire_scan_interval_daa_score: u64, pub transaction_expire_scan_interval_milliseconds: u64, @@ -58,7 +55,6 @@ impl Config { maximum_transaction_count: u64, maximum_ready_transaction_count: u64, maximum_build_block_template_attempts: u64, - ready_transactions_refetch_limit: usize, transaction_expire_interval_daa_score: u64, transaction_expire_scan_interval_daa_score: u64, transaction_expire_scan_interval_milliseconds: u64, @@ -79,7 +75,6 @@ impl Config { maximum_transaction_count, maximum_ready_transaction_count, maximum_build_block_template_attempts, - ready_transactions_refetch_limit, transaction_expire_interval_daa_score, transaction_expire_scan_interval_daa_score, transaction_expire_scan_interval_milliseconds, @@ -104,8 +99,7 @@ impl Config { Self { maximum_transaction_count: DEFAULT_MAXIMUM_TRANSACTION_COUNT, maximum_ready_transaction_count: DEFAULT_MAXIMUM_READY_TRANSACTION_COUNT, - maximum_build_block_template_attempts: DEFAULT_MAXIMUM_BUILD_BLOCK_TEMPLATE_ATTEMPTS, // TODO - ready_transactions_refetch_limit: DEFAULT_READY_TRANSACTIONS_REFETCH_LIMIT, // TODO + maximum_build_block_template_attempts: DEFAULT_MAXIMUM_BUILD_BLOCK_TEMPLATE_ATTEMPTS, transaction_expire_interval_daa_score: DEFAULT_TRANSACTION_EXPIRE_INTERVAL_SECONDS * 1000 / target_milliseconds_per_block, transaction_expire_scan_interval_daa_score: DEFAULT_TRANSACTION_EXPIRE_SCAN_INTERVAL_SECONDS * 1000 / target_milliseconds_per_block, From db0cddf9ed5c18fdcfc141e2828c15494f9eb519 Mon Sep 17 00:00:00 2001 From: Tiram <18632023+tiram88@users.noreply.github.com> Date: Fri, 22 Sep 2023 00:12:04 +0300 Subject: [PATCH 61/86] Add e-tps to logged mempool metrics --- mining/src/lib.rs | 12 +++++++++--- mining/src/manager.rs | 5 ++++- mining/src/monitor.rs | 4 ++-- 3 files changed, 15 insertions(+), 6 deletions(-) diff --git a/mining/src/lib.rs b/mining/src/lib.rs index 47b11f4d9d..28fe583914 100644 --- a/mining/src/lib.rs +++ b/mining/src/lib.rs @@ -22,6 +22,7 @@ pub struct MiningCounters { pub tx_accepted_counts: AtomicU64, pub input_counts: AtomicU64, pub output_counts: AtomicU64, + pub mempool_sample: AtomicU64, } impl MiningCounters { @@ -33,6 +34,7 @@ impl MiningCounters { tx_accepted_counts: self.tx_accepted_counts.load(Ordering::Relaxed), input_counts: self.input_counts.load(Ordering::Relaxed), output_counts: self.output_counts.load(Ordering::Relaxed), + mempool_sample: self.mempool_sample.load(Ordering::Relaxed), } } @@ -56,6 +58,7 @@ pub struct MempoolCountersSnapshot { pub tx_accepted_counts: u64, pub input_counts: u64, pub output_counts: u64, + pub mempool_sample: u64, } impl MempoolCountersSnapshot { @@ -63,9 +66,11 @@ impl MempoolCountersSnapshot { self.high_priority_tx_counts + self.low_priority_tx_counts } - pub fn collision_ratio(&self) -> f64 { - if self.block_tx_counts > 0 { - (self.block_tx_counts - self.tx_accepted_counts) as f64 / self.block_tx_counts as f64 + pub fn e_tps(&self) -> f64 { + let accepted_txs = u64::min(self.mempool_sample, self.tx_accepted_counts); + let total_txs = u64::min(self.mempool_sample, self.block_tx_counts); + if total_txs > 0 { + accepted_txs as f64 / total_txs as f64 } else { 0f64 } @@ -83,6 +88,7 @@ impl core::ops::Sub for &MempoolCountersSnapshot { tx_accepted_counts: self.tx_accepted_counts.checked_sub(rhs.tx_accepted_counts).unwrap_or_default(), input_counts: self.input_counts.checked_sub(rhs.input_counts).unwrap_or_default(), output_counts: self.output_counts.checked_sub(rhs.output_counts).unwrap_or_default(), + mempool_sample: (self.mempool_sample + rhs.mempool_sample) / 2, } } } diff --git a/mining/src/manager.rs b/mining/src/manager.rs index 00c1f2872c..f917fcf754 100644 --- a/mining/src/manager.rs +++ b/mining/src/manager.rs @@ -30,7 +30,7 @@ use kaspa_consensusmanager::{spawn_blocking, ConsensusProxy}; use kaspa_core::{debug, error, info, time::Stopwatch, warn}; use kaspa_mining_errors::mempool::RuleError; use parking_lot::RwLock; -use std::sync::Arc; +use std::sync::{atomic::Ordering, Arc}; use tokio::sync::mpsc::UnboundedSender; pub struct MiningManager { @@ -509,6 +509,9 @@ impl MiningManager { // alternate no & write lock on mempool let accepted_transactions = self.validate_and_insert_unorphaned_transactions(consensus, unorphaned_transactions); + // read lock on mempool + self.counters.mempool_sample.store(self.mempool.read().transaction_count(true, false) as u64, Ordering::SeqCst); + // write lock on mempool self.mempool.write().log_stats(); diff --git a/mining/src/monitor.rs b/mining/src/monitor.rs index 1c90a21956..a79df99d1b 100644 --- a/mining/src/monitor.rs +++ b/mining/src/monitor.rs @@ -50,14 +50,14 @@ impl MiningMonitor { let now = Instant::now(); let elapsed = (now - last_log_time).as_secs_f64(); - info!("Processed {} unique transactions in the last {:.2}s ({:.2} avg txs/s, in: {} via RPC, {} via P2P, out: {} via accepted blocks, {:.2}% collisions)", + info!("Processed {} unique transactions in the last {:.2}s ({:.2} avg txs/s, in: {} via RPC, {} via P2P, out: {} via accepted blocks, {:.2}% e-tps)", delta.tx_accepted_counts, elapsed, delta.tx_accepted_counts as f64 / elapsed, delta.high_priority_tx_counts, delta.low_priority_tx_counts, delta.block_tx_counts, - delta.collision_ratio() * 100.0, + delta.e_tps() * 100.0, ); last_snapshot = snapshot; From 08456c6719fc4834dc9dbca5577ee9bf26dc84fc Mon Sep 17 00:00:00 2001 From: Michael Sutton Date: Fri, 22 Sep 2023 01:14:19 +0300 Subject: [PATCH 62/86] avoid realloc --- mining/src/block_template/selector.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/mining/src/block_template/selector.rs b/mining/src/block_template/selector.rs index a2ab7bad7b..b65126caf6 100644 --- a/mining/src/block_template/selector.rs +++ b/mining/src/block_template/selector.rs @@ -206,8 +206,9 @@ impl TransactionsSelector { fn reset_selection(&mut self) { assert_eq!(self.transactions.len(), self.selectable_txs.len()); + self.selected_txs.clear(); // TODO: consider to min with the approximated amount of txs which fit into max block mass - self.selected_txs = Vec::with_capacity(self.transactions.len()); + self.selected_txs.reserve_exact(self.transactions.len()); self.selected_txs_map = None; } From 099497b67793648a96e92d48926adc29755a7247 Mon Sep 17 00:00:00 2001 From: Tiram <18632023+tiram88@users.noreply.github.com> Date: Fri, 22 Sep 2023 14:46:14 +0300 Subject: [PATCH 63/86] Address review comments --- consensus/src/pipeline/virtual_processor/processor.rs | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/consensus/src/pipeline/virtual_processor/processor.rs b/consensus/src/pipeline/virtual_processor/processor.rs index 47d0cf6793..2718ebd91c 100644 --- a/consensus/src/pipeline/virtual_processor/processor.rs +++ b/consensus/src/pipeline/virtual_processor/processor.rs @@ -814,11 +814,11 @@ impl VirtualStateProcessor { } } - if !invalid_transactions.is_empty() { + let mut has_rejections = !invalid_transactions.is_empty(); + if has_rejections { txs.retain(|tx| !invalid_transactions.contains_key(&tx.id())); } - let mut has_rejections = !invalid_transactions.is_empty(); while has_rejections { has_rejections = false; let next_batch = tx_selector.select_transactions(); // Note that once next_batch is empty the loop will exit @@ -848,14 +848,13 @@ impl VirtualStateProcessor { self.build_block_template_from_virtual_state(virtual_state, miner_data, txs) } - pub fn validate_block_template_transactions( + pub(crate) fn validate_block_template_transactions( &self, txs: &[Transaction], virtual_state: &VirtualState, utxo_view: &impl UtxoView, ) -> Result<(), RuleError> { - // Search for invalid transactions. This can happen since the mining manager calling this function is not atomically in sync with virtual state - // TODO: process transactions in parallel + // Search for invalid transactions let mut invalid_transactions = HashMap::new(); for tx in txs.iter() { if let Err(e) = self.validate_block_template_transaction(tx, virtual_state, utxo_view) { From bb724e6b977a181bf2d1281be4b341a972ff794d Mon Sep 17 00:00:00 2001 From: Tiram <18632023+tiram88@users.noreply.github.com> Date: Fri, 22 Sep 2023 15:50:26 +0300 Subject: [PATCH 64/86] Use number of ready txs in e-tps & enhance mempool lock --- mining/src/lib.rs | 12 ++++++------ mining/src/manager.rs | 8 +------- mining/src/mempool/handle_new_block_transactions.rs | 5 +++++ mining/src/mempool/model/transactions_pool.rs | 4 ++++ 4 files changed, 16 insertions(+), 13 deletions(-) diff --git a/mining/src/lib.rs b/mining/src/lib.rs index 28fe583914..d1469e96b2 100644 --- a/mining/src/lib.rs +++ b/mining/src/lib.rs @@ -22,7 +22,7 @@ pub struct MiningCounters { pub tx_accepted_counts: AtomicU64, pub input_counts: AtomicU64, pub output_counts: AtomicU64, - pub mempool_sample: AtomicU64, + pub ready_txs_sample: AtomicU64, } impl MiningCounters { @@ -34,7 +34,7 @@ impl MiningCounters { tx_accepted_counts: self.tx_accepted_counts.load(Ordering::Relaxed), input_counts: self.input_counts.load(Ordering::Relaxed), output_counts: self.output_counts.load(Ordering::Relaxed), - mempool_sample: self.mempool_sample.load(Ordering::Relaxed), + ready_txs_sample: self.ready_txs_sample.load(Ordering::Relaxed), } } @@ -58,7 +58,7 @@ pub struct MempoolCountersSnapshot { pub tx_accepted_counts: u64, pub input_counts: u64, pub output_counts: u64, - pub mempool_sample: u64, + pub ready_txs_sample: u64, } impl MempoolCountersSnapshot { @@ -67,8 +67,8 @@ impl MempoolCountersSnapshot { } pub fn e_tps(&self) -> f64 { - let accepted_txs = u64::min(self.mempool_sample, self.tx_accepted_counts); - let total_txs = u64::min(self.mempool_sample, self.block_tx_counts); + let accepted_txs = u64::min(self.ready_txs_sample, self.tx_accepted_counts); + let total_txs = u64::min(self.ready_txs_sample, self.block_tx_counts); if total_txs > 0 { accepted_txs as f64 / total_txs as f64 } else { @@ -88,7 +88,7 @@ impl core::ops::Sub for &MempoolCountersSnapshot { tx_accepted_counts: self.tx_accepted_counts.checked_sub(rhs.tx_accepted_counts).unwrap_or_default(), input_counts: self.input_counts.checked_sub(rhs.input_counts).unwrap_or_default(), output_counts: self.output_counts.checked_sub(rhs.output_counts).unwrap_or_default(), - mempool_sample: (self.mempool_sample + rhs.mempool_sample) / 2, + ready_txs_sample: (self.ready_txs_sample + rhs.ready_txs_sample) / 2, } } } diff --git a/mining/src/manager.rs b/mining/src/manager.rs index a379d2a2a0..8ace013914 100644 --- a/mining/src/manager.rs +++ b/mining/src/manager.rs @@ -30,7 +30,7 @@ use kaspa_consensusmanager::{spawn_blocking, ConsensusProxy}; use kaspa_core::{debug, error, info, time::Stopwatch, warn}; use kaspa_mining_errors::mempool::RuleError; use parking_lot::RwLock; -use std::sync::{atomic::Ordering, Arc}; +use std::sync::Arc; use tokio::sync::mpsc::UnboundedSender; pub struct MiningManager { @@ -493,12 +493,6 @@ impl MiningManager { // alternate no & write lock on mempool let accepted_transactions = self.validate_and_insert_unorphaned_transactions(consensus, unorphaned_transactions); - // read lock on mempool - self.counters.mempool_sample.store(self.mempool.read().transaction_count(true, false) as u64, Ordering::SeqCst); - - // write lock on mempool - self.mempool.write().log_stats(); - Ok(accepted_transactions) } diff --git a/mining/src/mempool/handle_new_block_transactions.rs b/mining/src/mempool/handle_new_block_transactions.rs index b6eb233275..0d5c818644 100644 --- a/mining/src/mempool/handle_new_block_transactions.rs +++ b/mining/src/mempool/handle_new_block_transactions.rs @@ -40,6 +40,11 @@ impl Mempool { } unorphaned_transactions.extend(self.get_unorphaned_transactions_after_accepted_transaction(transaction)); } + + // Update the sample of number of ready transactions in the mempool and log the stats + self.counters.ready_txs_sample.store(self.transaction_pool.ready_transaction_count() as u64, Ordering::SeqCst); + self.log_stats(); + Ok(unorphaned_transactions) } diff --git a/mining/src/mempool/model/transactions_pool.rs b/mining/src/mempool/model/transactions_pool.rs index d5b4a081af..82676cc29f 100644 --- a/mining/src/mempool/model/transactions_pool.rs +++ b/mining/src/mempool/model/transactions_pool.rs @@ -164,6 +164,10 @@ impl TransactionsPool { self.all_transactions.remove(transaction_id).ok_or(RuleError::RejectMissingTransaction(*transaction_id)) } + pub(crate) fn ready_transaction_count(&self) -> usize { + self.ready_transactions.len() + } + /// all_ready_transactions returns all fully populated mempool transactions having no parents in the mempool. /// These transactions are ready for being inserted in a block template. pub(crate) fn all_ready_transactions(&self) -> Vec { From 66dbca87d788eebf99102341dc6e5b263cd9995a Mon Sep 17 00:00:00 2001 From: Tiram <18632023+tiram88@users.noreply.github.com> Date: Fri, 22 Sep 2023 15:50:45 +0300 Subject: [PATCH 65/86] Ignore failing send for clean tokio shutdown --- mining/src/manager.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mining/src/manager.rs b/mining/src/manager.rs index 8ace013914..bc140e933d 100644 --- a/mining/src/manager.rs +++ b/mining/src/manager.rs @@ -693,7 +693,7 @@ impl MiningManager { } } if !valid_ids.is_empty() { - assert!(transaction_ids_sender.send(valid_ids).is_ok(), "the channel is expected to have a receiver and be opened"); + let _ = transaction_ids_sender.send(valid_ids); } drop(_swo); mempool.log_stats(); From abd3143a5bcc4c8411e237ffb0e2c76af9732b4e Mon Sep 17 00:00:00 2001 From: Tiram <18632023+tiram88@users.noreply.github.com> Date: Sat, 23 Sep 2023 17:26:12 +0300 Subject: [PATCH 66/86] Log double spends --- mining/src/mempool/remove_transaction.rs | 38 ++++++++++++++++-------- 1 file changed, 26 insertions(+), 12 deletions(-) diff --git a/mining/src/mempool/remove_transaction.rs b/mining/src/mempool/remove_transaction.rs index 29d507e1d2..d4fa633d93 100644 --- a/mining/src/mempool/remove_transaction.rs +++ b/mining/src/mempool/remove_transaction.rs @@ -7,7 +7,7 @@ use crate::mempool::{ Mempool, }; use kaspa_consensus_core::tx::TransactionId; -use kaspa_core::debug; +use kaspa_core::{debug, warn}; use kaspa_utils::iter::IterExtensions; impl Mempool { @@ -52,17 +52,31 @@ impl Mempool { removed_transactions.extend(self.orphan_pool.remove_redeemers_of(transaction_id)?.iter().map(|x| x.id())); } - if reason.verbose() { - match removed_transactions.len() { - 0 => {} - 1 => debug!("Removed transaction ({}) {}{}", reason, removed_transactions[0], extra_info), - n => debug!( - "Removed {} transactions ({}): {}{}", - n, - reason, - removed_transactions.iter().reusable_format(", "), - extra_info - ), + if !removed_transactions.is_empty() { + match reason { + TxRemovalReason::Muted => {} + TxRemovalReason::DoubleSpend => match removed_transactions.len() { + 0 => {} + 1 => warn!("Removed transaction ({}) {}{}", reason, removed_transactions[0], extra_info), + n => warn!( + "Removed {} transactions ({}): {}{}", + n, + reason, + removed_transactions.iter().reusable_format(", "), + extra_info + ), + }, + _ => match removed_transactions.len() { + 0 => {} + 1 => debug!("Removed transaction ({}) {}{}", reason, removed_transactions[0], extra_info), + n => debug!( + "Removed {} transactions ({}): {}{}", + n, + reason, + removed_transactions.iter().reusable_format(", "), + extra_info + ), + }, } } From c29680fe229d8bd461146b92768153cf8b037075 Mon Sep 17 00:00:00 2001 From: Tiram <18632023+tiram88@users.noreply.github.com> Date: Sun, 24 Sep 2023 15:01:56 +0300 Subject: [PATCH 67/86] Log tx script cache stats (wip) --- consensus/src/consensus/factory.rs | 6 ++ consensus/src/consensus/mod.rs | 4 +- consensus/src/consensus/services.rs | 9 ++- consensus/src/consensus/test_consensus.rs | 39 +++++++++-- .../processes/transaction_validator/mod.rs | 10 ++- .../transaction_validator_populated.rs | 8 +++ .../tx_validation_in_isolation.rs | 1 + crypto/txscript/src/caches.rs | 66 +++++++++++++++++-- crypto/txscript/src/lib.rs | 2 +- crypto/txscript/src/opcodes/mod.rs | 8 +-- crypto/txscript/src/standard/multisig.rs | 2 +- kaspad/src/daemon.rs | 5 +- mining/src/monitor.rs | 52 +++++++++++---- simpa/src/main.rs | 22 +++++-- simpa/src/simulator/network.rs | 1 + .../src/consensus_integration_tests.rs | 5 +- 16 files changed, 201 insertions(+), 39 deletions(-) diff --git a/consensus/src/consensus/factory.rs b/consensus/src/consensus/factory.rs index 0d66b61f2f..a0b42ad654 100644 --- a/consensus/src/consensus/factory.rs +++ b/consensus/src/consensus/factory.rs @@ -11,6 +11,7 @@ use kaspa_database::{ registry::DatabaseStorePrefixes, }; +use kaspa_txscript::caches::TxScriptCacheCounters; use parking_lot::RwLock; use rocksdb::WriteBatch; use serde::{Deserialize, Serialize}; @@ -153,6 +154,7 @@ pub struct Factory { db_parallelism: usize, notification_root: Arc, counters: Arc, + tx_script_cache_counters: Arc, } impl Factory { @@ -163,6 +165,7 @@ impl Factory { db_parallelism: usize, notification_root: Arc, counters: Arc, + tx_script_cache_counters: Arc, ) -> Self { let mut config = config.clone(); #[cfg(feature = "devnet-prealloc")] @@ -175,6 +178,7 @@ impl Factory { db_parallelism, notification_root, counters, + tx_script_cache_counters, } } } @@ -208,6 +212,7 @@ impl ConsensusFactory for Factory { session_lock.clone(), self.notification_root.clone(), self.counters.clone(), + self.tx_script_cache_counters.clone(), entry.creation_timestamp, )); @@ -236,6 +241,7 @@ impl ConsensusFactory for Factory { session_lock.clone(), self.notification_root.clone(), self.counters.clone(), + self.tx_script_cache_counters.clone(), entry.creation_timestamp, )); diff --git a/consensus/src/consensus/mod.rs b/consensus/src/consensus/mod.rs index dd82c09d6b..7485f5547d 100644 --- a/consensus/src/consensus/mod.rs +++ b/consensus/src/consensus/mod.rs @@ -70,6 +70,7 @@ use kaspa_consensusmanager::{SessionLock, SessionReadGuard}; use kaspa_database::prelude::StoreResultExtensions; use kaspa_hashes::Hash; use kaspa_muhash::MuHash; +use kaspa_txscript::caches::TxScriptCacheCounters; use std::thread::{self, JoinHandle}; use std::{ @@ -132,6 +133,7 @@ impl Consensus { pruning_lock: SessionLock, notification_root: Arc, counters: Arc, + tx_script_cache_counters: Arc, creation_timestamp: u64, ) -> Self { let params = &config.params; @@ -147,7 +149,7 @@ impl Consensus { // Services and managers // - let services = ConsensusServices::new(db.clone(), storage.clone(), config.clone()); + let services = ConsensusServices::new(db.clone(), storage.clone(), config.clone(), tx_script_cache_counters); // // Processor channels diff --git a/consensus/src/consensus/services.rs b/consensus/src/consensus/services.rs index f0b791ba26..d76a28d5a9 100644 --- a/consensus/src/consensus/services.rs +++ b/consensus/src/consensus/services.rs @@ -18,6 +18,7 @@ use crate::{ }; use itertools::Itertools; +use kaspa_txscript::caches::TxScriptCacheCounters; use std::sync::Arc; pub type DbGhostdagManager = @@ -65,7 +66,12 @@ pub struct ConsensusServices { } impl ConsensusServices { - pub fn new(db: Arc, storage: Arc, config: Arc) -> Arc { + pub fn new( + db: Arc, + storage: Arc, + config: Arc, + tx_script_cache_counters: Arc, + ) -> Arc { let params = &config.params; let statuses_service = MTStatusesService::new(storage.statuses_store.clone()); @@ -144,6 +150,7 @@ impl ConsensusServices { params.ghostdag_k, params.coinbase_payload_script_public_key_max_len, params.coinbase_maturity, + tx_script_cache_counters, ); let pruning_point_manager = PruningPointManager::new( diff --git a/consensus/src/consensus/test_consensus.rs b/consensus/src/consensus/test_consensus.rs index 620722022a..10d6d5dd23 100644 --- a/consensus/src/consensus/test_consensus.rs +++ b/consensus/src/consensus/test_consensus.rs @@ -49,8 +49,17 @@ impl TestConsensus { /// Creates a test consensus instance based on `config` with the provided `db` and `notification_sender` pub fn with_db(db: Arc, config: &Config, notification_sender: Sender) -> Self { let notification_root = Arc::new(ConsensusNotificationRoot::new(notification_sender)); - let counters = Arc::new(ProcessingCounters::default()); - let consensus = Arc::new(Consensus::new(db, Arc::new(config.clone()), Default::default(), notification_root, counters, 0)); + let counters = Default::default(); + let tx_script_cache_counters = Default::default(); + let consensus = Arc::new(Consensus::new( + db, + Arc::new(config.clone()), + Default::default(), + notification_root, + counters, + tx_script_cache_counters, + 0, + )); let block_builder = TestBlockBuilder::new(consensus.virtual_processor.clone()); Self { params: config.params.clone(), consensus, block_builder, db_lifetime: Default::default() } @@ -60,8 +69,17 @@ impl TestConsensus { pub fn with_notifier(config: &Config, notification_sender: Sender) -> Self { let (db_lifetime, db) = create_temp_db!(ConnBuilder::default()); let notification_root = Arc::new(ConsensusNotificationRoot::new(notification_sender)); - let counters = Arc::new(ProcessingCounters::default()); - let consensus = Arc::new(Consensus::new(db, Arc::new(config.clone()), Default::default(), notification_root, counters, 0)); + let counters = Default::default(); + let tx_script_cache_counters = Default::default(); + let consensus = Arc::new(Consensus::new( + db, + Arc::new(config.clone()), + Default::default(), + notification_root, + counters, + tx_script_cache_counters, + 0, + )); let block_builder = TestBlockBuilder::new(consensus.virtual_processor.clone()); Self { consensus, block_builder, params: config.params.clone(), db_lifetime } @@ -72,8 +90,17 @@ impl TestConsensus { let (db_lifetime, db) = create_temp_db!(ConnBuilder::default()); let (dummy_notification_sender, _) = async_channel::unbounded(); let notification_root = Arc::new(ConsensusNotificationRoot::new(dummy_notification_sender)); - let counters = Arc::new(ProcessingCounters::default()); - let consensus = Arc::new(Consensus::new(db, Arc::new(config.clone()), Default::default(), notification_root, counters, 0)); + let counters = Default::default(); + let tx_script_cache_counters = Default::default(); + let consensus = Arc::new(Consensus::new( + db, + Arc::new(config.clone()), + Default::default(), + notification_root, + counters, + tx_script_cache_counters, + 0, + )); let block_builder = TestBlockBuilder::new(consensus.virtual_processor.clone()); Self { consensus, block_builder, params: config.params.clone(), db_lifetime } diff --git a/consensus/src/processes/transaction_validator/mod.rs b/consensus/src/processes/transaction_validator/mod.rs index 792ecb9b1f..0565a89a98 100644 --- a/consensus/src/processes/transaction_validator/mod.rs +++ b/consensus/src/processes/transaction_validator/mod.rs @@ -2,9 +2,14 @@ pub mod errors; pub mod transaction_validator_populated; mod tx_validation_in_isolation; pub mod tx_validation_not_utxo_related; +use std::sync::Arc; + use crate::model::stores::ghostdag; -use kaspa_txscript::{caches::Cache, SigCacheKey}; +use kaspa_txscript::{ + caches::{Cache, TxScriptCacheCounters}, + SigCacheKey, +}; pub use tx_validation_in_isolation::*; #[derive(Clone)] @@ -28,6 +33,7 @@ impl TransactionValidator { ghostdag_k: ghostdag::KType, coinbase_payload_script_public_key_max_len: u8, coinbase_maturity: u64, + counters: Arc, ) -> Self { Self { max_tx_inputs, @@ -37,7 +43,7 @@ impl TransactionValidator { ghostdag_k, coinbase_payload_script_public_key_max_len, coinbase_maturity, - sig_cache: Cache::new(10_000), + sig_cache: Cache::new(10_000, counters), } } } diff --git a/consensus/src/processes/transaction_validator/transaction_validator_populated.rs b/consensus/src/processes/transaction_validator/transaction_validator_populated.rs index 5e04431642..b46527aca0 100644 --- a/consensus/src/processes/transaction_validator/transaction_validator_populated.rs +++ b/consensus/src/processes/transaction_validator/transaction_validator_populated.rs @@ -142,6 +142,7 @@ mod tests { params.ghostdag_k, params.coinbase_payload_script_public_key_max_len, params.coinbase_maturity, + Default::default(), ); let prev_tx_id = TransactionId::from_str("746915c8dfc5e1550eacbe1d87625a105750cf1a65aaddd1baa60f8bcf7e953c").unwrap(); @@ -202,6 +203,7 @@ mod tests { params.ghostdag_k, params.coinbase_payload_script_public_key_max_len, params.coinbase_maturity, + Default::default(), ); // Taken from: 3f582463d73c77d93f278b7bf649bd890e75fe9bb8a1edd7a6854df1a2a2bfc1 @@ -263,6 +265,7 @@ mod tests { params.ghostdag_k, params.coinbase_payload_script_public_key_max_len, params.coinbase_maturity, + Default::default(), ); // Taken from: d839d29b549469d0f9a23e51febe68d4084967a6a477868b511a5a8d88c5ae06 @@ -324,6 +327,7 @@ mod tests { params.ghostdag_k, params.coinbase_payload_script_public_key_max_len, params.coinbase_maturity, + Default::default(), ); // Taken from: d839d29b549469d0f9a23e51febe68d4084967a6a477868b511a5a8d88c5ae06 @@ -386,6 +390,7 @@ mod tests { params.ghostdag_k, params.coinbase_payload_script_public_key_max_len, params.coinbase_maturity, + Default::default(), ); // Taken from: d839d29b549469d0f9a23e51febe68d4084967a6a477868b511a5a8d88c5ae06 @@ -448,6 +453,7 @@ mod tests { params.ghostdag_k, params.coinbase_payload_script_public_key_max_len, params.coinbase_maturity, + Default::default(), ); // Taken from: d839d29b549469d0f9a23e51febe68d4084967a6a477868b511a5a8d88c5ae06 @@ -510,6 +516,7 @@ mod tests { params.ghostdag_k, params.coinbase_payload_script_public_key_max_len, params.coinbase_maturity, + Default::default(), ); let prev_tx_id = TransactionId::from_str("1111111111111111111111111111111111111111111111111111111111111111").unwrap(); @@ -563,6 +570,7 @@ mod tests { params.ghostdag_k, params.coinbase_payload_script_public_key_max_len, params.coinbase_maturity, + Default::default(), ); let secp = Secp256k1::new(); diff --git a/consensus/src/processes/transaction_validator/tx_validation_in_isolation.rs b/consensus/src/processes/transaction_validator/tx_validation_in_isolation.rs index a30d0c8262..88a2e63a23 100644 --- a/consensus/src/processes/transaction_validator/tx_validation_in_isolation.rs +++ b/consensus/src/processes/transaction_validator/tx_validation_in_isolation.rs @@ -173,6 +173,7 @@ mod tests { params.ghostdag_k, params.coinbase_payload_script_public_key_max_len, params.coinbase_maturity, + Default::default(), ); let valid_cb = Transaction::new( diff --git a/crypto/txscript/src/caches.rs b/crypto/txscript/src/caches.rs index 854bfd1b87..209bf6854b 100644 --- a/crypto/txscript/src/caches.rs +++ b/crypto/txscript/src/caches.rs @@ -1,22 +1,37 @@ use indexmap::IndexMap; use parking_lot::RwLock; use rand::Rng; -use std::{collections::hash_map::RandomState, hash::BuildHasher, sync::Arc}; +use std::{ + collections::hash_map::RandomState, + hash::BuildHasher, + sync::{ + atomic::{AtomicU64, Ordering}, + Arc, + }, +}; #[derive(Clone)] pub struct Cache { // We use IndexMap and not HashMap, because it makes it cheaper to remove a random element when the cache is full. map: Arc>>, size: usize, + counters: Arc, } impl Cache { - pub fn new(size: u64) -> Self { - Self { map: Arc::new(RwLock::new(IndexMap::with_capacity_and_hasher(size as usize, S::default()))), size: size as usize } + pub fn new(size: u64, counters: Arc) -> Self { + Self { + map: Arc::new(RwLock::new(IndexMap::with_capacity_and_hasher(size as usize, S::default()))), + size: size as usize, + counters, + } } pub(crate) fn get(&self, key: &TKey) -> Option { - self.map.read().get(key).cloned() + self.map.read().get(key).cloned().map(|data| { + self.counters.get_counts.fetch_add(1, Ordering::Relaxed); + data + }) } pub(crate) fn insert(&self, key: TKey, data: TData) { @@ -28,5 +43,48 @@ impl TxScriptCacheCountersSnapshot { + TxScriptCacheCountersSnapshot { + insert_counts: self.insert_counts.load(Ordering::Relaxed), + get_counts: self.get_counts.load(Ordering::Relaxed), + } + } +} + +#[derive(Debug, PartialEq, Eq)] +pub struct TxScriptCacheCountersSnapshot { + pub insert_counts: u64, + pub get_counts: u64, +} + +impl TxScriptCacheCountersSnapshot { + pub fn hit_ratio(&self) -> f64 { + if self.insert_counts > 0 { + self.get_counts as f64 / self.insert_counts as f64 + } else { + 0f64 + } + } +} + +impl core::ops::Sub for &TxScriptCacheCountersSnapshot { + type Output = TxScriptCacheCountersSnapshot; + + fn sub(self, rhs: Self) -> Self::Output { + Self::Output { + insert_counts: self.insert_counts.checked_sub(rhs.insert_counts).unwrap_or_default(), + get_counts: self.get_counts.checked_sub(rhs.get_counts).unwrap_or_default(), + } } } diff --git a/crypto/txscript/src/lib.rs b/crypto/txscript/src/lib.rs index 31e5dcbd44..0c941222c8 100644 --- a/crypto/txscript/src/lib.rs +++ b/crypto/txscript/src/lib.rs @@ -529,7 +529,7 @@ mod tests { } fn run_test_script_cases(test_cases: Vec) { - let sig_cache = Cache::new(10_000); + let sig_cache = Cache::new(10_000, Default::default()); let mut reused_values = SigHashReusedValues::new(); for test in test_cases { diff --git a/crypto/txscript/src/opcodes/mod.rs b/crypto/txscript/src/opcodes/mod.rs index 30c8932110..2030456947 100644 --- a/crypto/txscript/src/opcodes/mod.rs +++ b/crypto/txscript/src/opcodes/mod.rs @@ -961,7 +961,7 @@ mod test { } fn run_test_cases(tests: Vec) { - let cache = Cache::new(10_000); + let cache = Cache::new(10_000, Default::default()); let mut reused_values = SigHashReusedValues::new(); for TestCase { init, code, dstack } in tests { let mut vm = TxScriptEngine::new(&mut reused_values, &cache); @@ -990,7 +990,7 @@ mod test { opcodes::OpRShift::empty().expect("Should accept empty"), ]; - let cache = Cache::new(10_000); + let cache = Cache::new(10_000, Default::default()); let mut reused_values = SigHashReusedValues::new(); let mut vm = TxScriptEngine::new(&mut reused_values, &cache); @@ -1013,7 +1013,7 @@ mod test { opcodes::OpReserved2::empty().expect("Should accept empty"), ]; - let cache = Cache::new(10_000); + let cache = Cache::new(10_000, Default::default()); let mut reused_values = SigHashReusedValues::new(); let mut vm = TxScriptEngine::new(&mut reused_values, &cache); @@ -1104,7 +1104,7 @@ mod test { opcodes::OpUnknown249::empty().expect("Should accept empty"), ]; - let cache = Cache::new(10_000); + let cache = Cache::new(10_000, Default::default()); let mut reused_values = SigHashReusedValues::new(); let mut vm = TxScriptEngine::new(&mut reused_values, &cache); diff --git a/crypto/txscript/src/standard/multisig.rs b/crypto/txscript/src/standard/multisig.rs index 28d9173d5f..afe6ef0d9a 100644 --- a/crypto/txscript/src/standard/multisig.rs +++ b/crypto/txscript/src/standard/multisig.rs @@ -183,7 +183,7 @@ mod tests { let tx = tx.as_verifiable(); let (input, entry) = tx.populated_inputs().next().unwrap(); - let cache = Cache::new(10_000); + let cache = Cache::new(10_000, Default::default()); let mut engine = TxScriptEngine::from_transaction_input(&tx, input, 0, entry, &mut reused_values, &cache).unwrap(); assert_eq!(engine.execute().is_ok(), is_ok); } diff --git a/kaspad/src/daemon.rs b/kaspad/src/daemon.rs index b279d6f154..8d37405bc4 100644 --- a/kaspad/src/daemon.rs +++ b/kaspad/src/daemon.rs @@ -10,6 +10,7 @@ use kaspa_core::{core::Core, info, trace}; use kaspa_core::{kaspad_env::version, task::tick::TickService}; use kaspa_grpc_server::service::GrpcService; use kaspa_rpc_service::service::RpcCoreService; +use kaspa_txscript::caches::TxScriptCacheCounters; use kaspa_utils::networking::ContextualNetAddress; use kaspa_addressmanager::AddressManager; @@ -240,6 +241,7 @@ do you confirm? (answer y/n or pass --yes to the Kaspad command line to confirm let mining_counters = Arc::new(MiningCounters::default()); let wrpc_borsh_counters = Arc::new(WrpcServerCounters::default()); let wrpc_json_counters = Arc::new(WrpcServerCounters::default()); + let tx_script_cache_counters = Arc::new(TxScriptCacheCounters::default()); // Use `num_cpus` background threads for the consensus database as recommended by rocksdb let consensus_db_parallelism = num_cpus::get(); @@ -250,6 +252,7 @@ do you confirm? (answer y/n or pass --yes to the Kaspad command line to confirm consensus_db_parallelism, notification_root.clone(), processing_counters.clone(), + tx_script_cache_counters.clone(), )); let consensus_manager = Arc::new(ConsensusManager::new(consensus_factory)); let consensus_monitor = Arc::new(ConsensusMonitor::new(processing_counters.clone(), tick_service.clone())); @@ -285,7 +288,7 @@ do you confirm? (answer y/n or pass --yes to the Kaspad command line to confirm let cache_lifetime: Option = None; #[cfg(feature = "devnet-prealloc")] let cache_lifetime = config.block_template_cache_lifetime; - let mining_monitor = Arc::new(MiningMonitor::new(mining_counters.clone(), tick_service.clone())); + let mining_monitor = Arc::new(MiningMonitor::new(mining_counters.clone(), tx_script_cache_counters.clone(), tick_service.clone())); let mining_manager = MiningManagerProxy::new(Arc::new(MiningManager::new( config.target_time_per_block, false, diff --git a/mining/src/monitor.rs b/mining/src/monitor.rs index a79df99d1b..1359286a51 100644 --- a/mining/src/monitor.rs +++ b/mining/src/monitor.rs @@ -7,6 +7,7 @@ use kaspa_core::{ }, trace, }; +use kaspa_txscript::caches::TxScriptCacheCounters; use std::{ sync::Arc, time::{Duration, Instant}, @@ -18,17 +19,24 @@ pub struct MiningMonitor { // Counters counters: Arc, + tx_script_cache_counters: Arc, + // Tick service tick_service: Arc, } impl MiningMonitor { - pub fn new(counters: Arc, tick_service: Arc) -> MiningMonitor { - MiningMonitor { counters, tick_service } + pub fn new( + counters: Arc, + tx_script_cache_counters: Arc, + tick_service: Arc, + ) -> MiningMonitor { + MiningMonitor { counters, tx_script_cache_counters, tick_service } } pub async fn worker(self: &Arc) { let mut last_snapshot = self.counters.snapshot(); + let mut last_tx_script_cache_snapshot = self.tx_script_cache_counters.snapshot(); let mut last_log_time = Instant::now(); let snapshot_interval = 10; loop { @@ -39,6 +47,9 @@ impl MiningMonitor { } let snapshot = self.counters.snapshot(); + let tx_script_cache_snapshot = self.tx_script_cache_counters.snapshot(); + let now = Instant::now(); + let elapsed = (now - last_log_time).as_secs_f64(); if snapshot == last_snapshot { // No update, avoid printing useless info last_log_time = Instant::now(); @@ -47,20 +58,35 @@ impl MiningMonitor { // Subtract the snapshots let delta = &snapshot - &last_snapshot; - let now = Instant::now(); - let elapsed = (now - last_log_time).as_secs_f64(); + let tx_script_cache_delta = &tx_script_cache_snapshot - &last_tx_script_cache_snapshot; - info!("Processed {} unique transactions in the last {:.2}s ({:.2} avg txs/s, in: {} via RPC, {} via P2P, out: {} via accepted blocks, {:.2}% e-tps)", - delta.tx_accepted_counts, - elapsed, - delta.tx_accepted_counts as f64 / elapsed, - delta.high_priority_tx_counts, - delta.low_priority_tx_counts, - delta.block_tx_counts, - delta.e_tps() * 100.0, - ); + // Avoid printing useless info if no update + if snapshot != last_snapshot { + info!("Processed {} unique transactions in the last {:.2}s ({:.2} avg txs/s, in: {} via RPC, {} via P2P, out: {} via accepted blocks, {:.2}% e-tps)", + delta.tx_accepted_counts, + elapsed, + delta.tx_accepted_counts as f64 / elapsed, + delta.high_priority_tx_counts, + delta.low_priority_tx_counts, + delta.block_tx_counts, + delta.e_tps() * 100.0, + ); + // FIXME: (wip) decide if the log level should be debug and what info should be kept or formulated differently + if tx_script_cache_snapshot != last_tx_script_cache_snapshot { + info!( + "Created {} UTXOs, spent {} in the last {:.2}s ({} signatures validated, {} cache hits, {:.2} hit ratio)", + delta.output_counts, + delta.input_counts, + elapsed, + tx_script_cache_delta.insert_counts, + tx_script_cache_delta.get_counts, + tx_script_cache_delta.hit_ratio() + ); + } + } last_snapshot = snapshot; + last_tx_script_cache_snapshot = tx_script_cache_snapshot; last_log_time = now; } diff --git a/simpa/src/main.rs b/simpa/src/main.rs index 123f21f724..0458a34ef5 100644 --- a/simpa/src/main.rs +++ b/simpa/src/main.rs @@ -187,8 +187,15 @@ fn main() { }; let (dummy_notification_sender, _) = unbounded(); let notification_root = Arc::new(ConsensusNotificationRoot::new(dummy_notification_sender)); - let consensus = - Arc::new(Consensus::new(db, config.clone(), Default::default(), notification_root, Default::default(), unix_now())); + let consensus = Arc::new(Consensus::new( + db, + config.clone(), + Default::default(), + notification_root, + Default::default(), + Default::default(), + unix_now(), + )); (consensus, lifetime) } else { let until = if args.target_blocks.is_none() { config.genesis.timestamp + args.sim_time * 1000 } else { u64::MAX }; // milliseconds @@ -216,8 +223,15 @@ fn main() { let (_lifetime2, db2) = create_temp_db!(ConnBuilder::default().with_parallelism(num_cpus::get())); let (dummy_notification_sender, _) = unbounded(); let notification_root = Arc::new(ConsensusNotificationRoot::new(dummy_notification_sender)); - let consensus2 = - Arc::new(Consensus::new(db2, config.clone(), Default::default(), notification_root, Default::default(), unix_now())); + let consensus2 = Arc::new(Consensus::new( + db2, + config.clone(), + Default::default(), + notification_root, + Default::default(), + Default::default(), + unix_now(), + )); let handles2 = consensus2.run_processors(); rt.block_on(validate(&consensus, &consensus2, &config, args.delay, args.bps)); consensus2.shutdown(handles2); diff --git a/simpa/src/simulator/network.rs b/simpa/src/simulator/network.rs index 82e75178e8..74fdabf696 100644 --- a/simpa/src/simulator/network.rs +++ b/simpa/src/simulator/network.rs @@ -82,6 +82,7 @@ impl KaspaNetworkSimulator { Default::default(), notification_root, Default::default(), + Default::default(), unix_now(), )); let handles = consensus.run_processors(); diff --git a/testing/integration/src/consensus_integration_tests.rs b/testing/integration/src/consensus_integration_tests.rs index 7dcf3fe84b..09776015e6 100644 --- a/testing/integration/src/consensus_integration_tests.rs +++ b/testing/integration/src/consensus_integration_tests.rs @@ -52,6 +52,7 @@ use kaspa_database::prelude::ConnBuilder; use kaspa_index_processor::service::IndexService; use kaspa_math::Uint256; use kaspa_muhash::MuHash; +use kaspa_txscript::caches::TxScriptCacheCounters; use kaspa_utxoindex::api::{UtxoIndexApi, UtxoIndexProxy}; use kaspa_utxoindex::UtxoIndex; use serde::{Deserialize, Serialize}; @@ -1689,8 +1690,10 @@ async fn staging_consensus_test() { let (notification_send, _notification_recv) = unbounded(); let notification_root = Arc::new(ConsensusNotificationRoot::new(notification_send)); let counters = Arc::new(ProcessingCounters::default()); + let tx_script_cache_counters = Arc::new(TxScriptCacheCounters::default()); - let consensus_factory = Arc::new(ConsensusFactory::new(meta_db, &config, consensus_db_dir, 4, notification_root, counters)); + let consensus_factory = + Arc::new(ConsensusFactory::new(meta_db, &config, consensus_db_dir, 4, notification_root, counters, tx_script_cache_counters)); let consensus_manager = Arc::new(ConsensusManager::new(consensus_factory)); let core = Arc::new(Core::new()); From 6f8b024fa5b0e7dc6999c5988a8fc513d45c16da Mon Sep 17 00:00:00 2001 From: Tiram <18632023+tiram88@users.noreply.github.com> Date: Sun, 24 Sep 2023 16:05:11 +0300 Subject: [PATCH 68/86] Ease atomic lock ordering & enhance counter updates --- mining/src/lib.rs | 4 ++-- .../mempool/handle_new_block_transactions.rs | 18 +++++++++++------- 2 files changed, 13 insertions(+), 9 deletions(-) diff --git a/mining/src/lib.rs b/mining/src/lib.rs index d1469e96b2..45b49ab934 100644 --- a/mining/src/lib.rs +++ b/mining/src/lib.rs @@ -41,10 +41,10 @@ impl MiningCounters { pub fn increase_tx_counts(&self, value: u64, priority: Priority) { match priority { Priority::Low => { - self.low_priority_tx_counts.fetch_add(value, Ordering::SeqCst); + self.low_priority_tx_counts.fetch_add(value, Ordering::Relaxed); } Priority::High => { - self.high_priority_tx_counts.fetch_add(value, Ordering::SeqCst); + self.high_priority_tx_counts.fetch_add(value, Ordering::Relaxed); } } } diff --git a/mining/src/mempool/handle_new_block_transactions.rs b/mining/src/mempool/handle_new_block_transactions.rs index 0d5c818644..c234fcc7e9 100644 --- a/mining/src/mempool/handle_new_block_transactions.rs +++ b/mining/src/mempool/handle_new_block_transactions.rs @@ -21,6 +21,9 @@ impl Mempool { ) -> RuleResult> { let _sw = Stopwatch::<400>::with_threshold("handle_new_block_transactions op"); let mut unorphaned_transactions = vec![]; + let mut tx_accepted_counts = 0; + let mut input_counts = 0; + let mut output_counts = 0; for transaction in block_transactions[1..].iter() { let transaction_id = transaction.id(); // Rust rewrite: This behavior does differ from golang implementation. @@ -32,17 +35,18 @@ impl Mempool { } self.remove_double_spends(transaction)?; self.orphan_pool.remove_orphan(&transaction_id, false, TxRemovalReason::Accepted, "")?; - self.counters.block_tx_counts.fetch_add(1, Ordering::SeqCst); if self.accepted_transactions.add(transaction_id, block_daa_score) { - self.counters.tx_accepted_counts.fetch_add(1, Ordering::SeqCst); - self.counters.input_counts.fetch_add(transaction.inputs.len() as u64, Ordering::SeqCst); - self.counters.output_counts.fetch_add(transaction.outputs.len() as u64, Ordering::SeqCst); + tx_accepted_counts += 1; + input_counts += transaction.inputs.len(); + output_counts += transaction.outputs.len(); } unorphaned_transactions.extend(self.get_unorphaned_transactions_after_accepted_transaction(transaction)); } - - // Update the sample of number of ready transactions in the mempool and log the stats - self.counters.ready_txs_sample.store(self.transaction_pool.ready_transaction_count() as u64, Ordering::SeqCst); + self.counters.block_tx_counts.fetch_add(block_transactions.len() as u64 - 1, Ordering::Relaxed); + self.counters.tx_accepted_counts.fetch_add(tx_accepted_counts, Ordering::Relaxed); + self.counters.input_counts.fetch_add(input_counts as u64, Ordering::Relaxed); + self.counters.output_counts.fetch_add(output_counts as u64, Ordering::Relaxed); + self.counters.ready_txs_sample.store(self.transaction_pool.ready_transaction_count() as u64, Ordering::Relaxed); self.log_stats(); Ok(unorphaned_transactions) From 87f20f0c52b78f805ceabe9aed4225272044a085 Mon Sep 17 00:00:00 2001 From: Tiram <18632023+tiram88@users.noreply.github.com> Date: Tue, 26 Sep 2023 10:43:29 +0300 Subject: [PATCH 69/86] Enhance tx throughput stats log line --- mining/src/lib.rs | 34 ++++++++++++++++++++++++++++-- mining/src/monitor.rs | 49 ++++++++++++++++--------------------------- 2 files changed, 50 insertions(+), 33 deletions(-) diff --git a/mining/src/lib.rs b/mining/src/lib.rs index 45b49ab934..d5eb767dfb 100644 --- a/mining/src/lib.rs +++ b/mining/src/lib.rs @@ -1,4 +1,7 @@ -use std::sync::atomic::{AtomicU64, Ordering}; +use std::{ + sync::atomic::{AtomicU64, Ordering}, + time::{Duration, Instant}, +}; use mempool::tx::Priority; @@ -14,8 +17,8 @@ pub mod monitor; #[cfg(test)] pub mod testutils; -#[derive(Default)] pub struct MiningCounters { + pub creation_time: Instant, pub high_priority_tx_counts: AtomicU64, pub low_priority_tx_counts: AtomicU64, pub block_tx_counts: AtomicU64, @@ -25,9 +28,25 @@ pub struct MiningCounters { pub ready_txs_sample: AtomicU64, } +impl Default for MiningCounters { + fn default() -> Self { + Self { + creation_time: Instant::now(), + high_priority_tx_counts: Default::default(), + low_priority_tx_counts: Default::default(), + block_tx_counts: Default::default(), + tx_accepted_counts: Default::default(), + input_counts: Default::default(), + output_counts: Default::default(), + ready_txs_sample: Default::default(), + } + } +} + impl MiningCounters { pub fn snapshot(&self) -> MempoolCountersSnapshot { MempoolCountersSnapshot { + elapsed_time: (Instant::now() - self.creation_time), high_priority_tx_counts: self.high_priority_tx_counts.load(Ordering::Relaxed), low_priority_tx_counts: self.low_priority_tx_counts.load(Ordering::Relaxed), block_tx_counts: self.block_tx_counts.load(Ordering::Relaxed), @@ -52,6 +71,7 @@ impl MiningCounters { #[derive(Debug, PartialEq, Eq)] pub struct MempoolCountersSnapshot { + pub elapsed_time: Duration, pub high_priority_tx_counts: u64, pub low_priority_tx_counts: u64, pub block_tx_counts: u64, @@ -66,6 +86,15 @@ impl MempoolCountersSnapshot { self.high_priority_tx_counts + self.low_priority_tx_counts } + pub fn u_tps(&self) -> f64 { + let elapsed = self.elapsed_time.as_secs_f64(); + if elapsed != 0f64 { + self.tx_accepted_counts as f64 / elapsed + } else { + 0f64 + } + } + pub fn e_tps(&self) -> f64 { let accepted_txs = u64::min(self.ready_txs_sample, self.tx_accepted_counts); let total_txs = u64::min(self.ready_txs_sample, self.block_tx_counts); @@ -82,6 +111,7 @@ impl core::ops::Sub for &MempoolCountersSnapshot { fn sub(self, rhs: Self) -> Self::Output { Self::Output { + elapsed_time: self.elapsed_time.checked_sub(rhs.elapsed_time).unwrap_or_default(), high_priority_tx_counts: self.high_priority_tx_counts.checked_sub(rhs.high_priority_tx_counts).unwrap_or_default(), low_priority_tx_counts: self.low_priority_tx_counts.checked_sub(rhs.low_priority_tx_counts).unwrap_or_default(), block_tx_counts: self.block_tx_counts.checked_sub(rhs.block_tx_counts).unwrap_or_default(), diff --git a/mining/src/monitor.rs b/mining/src/monitor.rs index 1359286a51..b71bb345c3 100644 --- a/mining/src/monitor.rs +++ b/mining/src/monitor.rs @@ -8,10 +8,7 @@ use kaspa_core::{ trace, }; use kaspa_txscript::caches::TxScriptCacheCounters; -use std::{ - sync::Arc, - time::{Duration, Instant}, -}; +use std::{sync::Arc, time::Duration}; const MONITOR: &str = "mempool-monitor"; @@ -37,7 +34,6 @@ impl MiningMonitor { pub async fn worker(self: &Arc) { let mut last_snapshot = self.counters.snapshot(); let mut last_tx_script_cache_snapshot = self.tx_script_cache_counters.snapshot(); - let mut last_log_time = Instant::now(); let snapshot_interval = 10; loop { if let TickReason::Shutdown = self.tick_service.tick(Duration::from_secs(snapshot_interval)).await { @@ -48,11 +44,8 @@ impl MiningMonitor { let snapshot = self.counters.snapshot(); let tx_script_cache_snapshot = self.tx_script_cache_counters.snapshot(); - let now = Instant::now(); - let elapsed = (now - last_log_time).as_secs_f64(); if snapshot == last_snapshot { // No update, avoid printing useless info - last_log_time = Instant::now(); continue; } @@ -60,34 +53,28 @@ impl MiningMonitor { let delta = &snapshot - &last_snapshot; let tx_script_cache_delta = &tx_script_cache_snapshot - &last_tx_script_cache_snapshot; - // Avoid printing useless info if no update - if snapshot != last_snapshot { - info!("Processed {} unique transactions in the last {:.2}s ({:.2} avg txs/s, in: {} via RPC, {} via P2P, out: {} via accepted blocks, {:.2}% e-tps)", - delta.tx_accepted_counts, - elapsed, - delta.tx_accepted_counts as f64 / elapsed, - delta.high_priority_tx_counts, - delta.low_priority_tx_counts, - delta.block_tx_counts, - delta.e_tps() * 100.0, + info!( + "Tx throughput stats: {:.2} u-tps, {:.2}% e-tps (in: {} via RPC, {} via P2P, out: {} via accepted blocks)", + delta.u_tps(), + delta.e_tps() * 100.0, + delta.high_priority_tx_counts, + delta.low_priority_tx_counts, + delta.tx_accepted_counts, + ); + // FIXME: (wip) decide if the log level should be debug and what info should be kept or formulated differently + if tx_script_cache_snapshot != last_tx_script_cache_snapshot { + info!( + "UTXOs stats: {} spent, {} created ({} signatures validated, {} cache hits, {:.2} hit ratio)", + delta.input_counts, + delta.output_counts, + tx_script_cache_delta.insert_counts, + tx_script_cache_delta.get_counts, + tx_script_cache_delta.hit_ratio() ); - // FIXME: (wip) decide if the log level should be debug and what info should be kept or formulated differently - if tx_script_cache_snapshot != last_tx_script_cache_snapshot { - info!( - "Created {} UTXOs, spent {} in the last {:.2}s ({} signatures validated, {} cache hits, {:.2} hit ratio)", - delta.output_counts, - delta.input_counts, - elapsed, - tx_script_cache_delta.insert_counts, - tx_script_cache_delta.get_counts, - tx_script_cache_delta.hit_ratio() - ); - } } last_snapshot = snapshot; last_tx_script_cache_snapshot = tx_script_cache_snapshot; - last_log_time = now; } trace!("mempool monitor thread exiting"); From bc6bdc6e599c2c2c32cf222f565b2fcb82fc6974 Mon Sep 17 00:00:00 2001 From: Tiram <18632023+tiram88@users.noreply.github.com> Date: Tue, 26 Sep 2023 14:42:29 +0300 Subject: [PATCH 70/86] More robust management of cached data life cycle --- mining/src/cache.rs | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/mining/src/cache.rs b/mining/src/cache.rs index b6fae4f18e..7db6e4f65d 100644 --- a/mining/src/cache.rs +++ b/mining/src/cache.rs @@ -27,18 +27,16 @@ impl Inner { } fn clear(&mut self) { - // The cache timer is reset to 0 so its lifetime is expired. - self.last_update_time = 0; self.block_template = None; } pub(crate) fn get_immutable_cached_template(&self) -> Option> { let now = unix_now(); // We verify that `now > last update` in order to avoid theoretic clock change bugs - if now < self.last_update_time || now - self.last_update_time > self.cache_lifetime { + if now > self.last_update_time + self.cache_lifetime || now < self.last_update_time { None } else { - Some(self.block_template.as_ref().expect("last_update_time would be 0").clone()) + self.block_template.clone() } } From 1c7357dcaade741c439ecd60c11f7e24fcdeeed5 Mon Sep 17 00:00:00 2001 From: Tiram <18632023+tiram88@users.noreply.github.com> Date: Tue, 26 Sep 2023 15:20:21 +0300 Subject: [PATCH 71/86] Log mempool sampled instead of exact lengths --- mining/src/lib.rs | 19 ++++++++++++++++ mining/src/manager.rs | 6 ----- .../mempool/handle_new_block_transactions.rs | 4 +++- mining/src/mempool/mod.rs | 22 ++----------------- mining/src/monitor.rs | 11 ++++++++-- 5 files changed, 33 insertions(+), 29 deletions(-) diff --git a/mining/src/lib.rs b/mining/src/lib.rs index d5eb767dfb..90affba842 100644 --- a/mining/src/lib.rs +++ b/mining/src/lib.rs @@ -19,13 +19,20 @@ pub mod testutils; pub struct MiningCounters { pub creation_time: Instant, + + // Counters pub high_priority_tx_counts: AtomicU64, pub low_priority_tx_counts: AtomicU64, pub block_tx_counts: AtomicU64, pub tx_accepted_counts: AtomicU64, pub input_counts: AtomicU64, pub output_counts: AtomicU64, + + // Samples pub ready_txs_sample: AtomicU64, + pub txs_sample: AtomicU64, + pub orphans_sample: AtomicU64, + pub accepted_sample: AtomicU64, } impl Default for MiningCounters { @@ -39,6 +46,9 @@ impl Default for MiningCounters { input_counts: Default::default(), output_counts: Default::default(), ready_txs_sample: Default::default(), + txs_sample: Default::default(), + orphans_sample: Default::default(), + accepted_sample: Default::default(), } } } @@ -54,6 +64,9 @@ impl MiningCounters { input_counts: self.input_counts.load(Ordering::Relaxed), output_counts: self.output_counts.load(Ordering::Relaxed), ready_txs_sample: self.ready_txs_sample.load(Ordering::Relaxed), + txs_sample: self.txs_sample.load(Ordering::Relaxed), + orphans_sample: self.orphans_sample.load(Ordering::Relaxed), + accepted_sample: self.accepted_sample.load(Ordering::Relaxed), } } @@ -79,6 +92,9 @@ pub struct MempoolCountersSnapshot { pub input_counts: u64, pub output_counts: u64, pub ready_txs_sample: u64, + pub txs_sample: u64, + pub orphans_sample: u64, + pub accepted_sample: u64, } impl MempoolCountersSnapshot { @@ -119,6 +135,9 @@ impl core::ops::Sub for &MempoolCountersSnapshot { input_counts: self.input_counts.checked_sub(rhs.input_counts).unwrap_or_default(), output_counts: self.output_counts.checked_sub(rhs.output_counts).unwrap_or_default(), ready_txs_sample: (self.ready_txs_sample + rhs.ready_txs_sample) / 2, + txs_sample: (self.txs_sample + rhs.txs_sample) / 2, + orphans_sample: (self.orphans_sample + rhs.orphans_sample) / 2, + accepted_sample: (self.accepted_sample + rhs.accepted_sample) / 2, } } } diff --git a/mining/src/manager.rs b/mining/src/manager.rs index bc140e933d..65e089de21 100644 --- a/mining/src/manager.rs +++ b/mining/src/manager.rs @@ -225,7 +225,6 @@ impl MiningManager { mempool.post_validate_and_insert_transaction(consensus, validation_result, transaction, priority, orphan)? { let unorphaned_transactions = mempool.get_unorphaned_transactions_after_accepted_transaction(&accepted_transaction); - mempool.log_stats(); drop(mempool); // The capacity used here may be exceeded since accepted unorphaned transaction may themselves unorphan other transactions. @@ -296,7 +295,6 @@ impl MiningManager { } }) .collect::>(); - mempool.log_stats(); drop(mempool); } accepted_transactions @@ -388,7 +386,6 @@ impl MiningManager { } }); unorphaned_transactions.extend(txs); - mempool.log_stats(); } accepted_transactions.extend(self.validate_and_insert_unorphaned_transactions(consensus, unorphaned_transactions)); @@ -523,8 +520,6 @@ impl MiningManager { 1 => debug!("Removed transaction ({}) {}", TxRemovalReason::Expired, expired_low_priority_transactions[0]), n => debug!("Removed {} transactions ({}): {}...", n, TxRemovalReason::Expired, expired_low_priority_transactions[0]), } - - self.mempool.write().log_stats(); } pub fn revalidate_high_priority_transactions( @@ -696,7 +691,6 @@ impl MiningManager { let _ = transaction_ids_sender.send(valid_ids); } drop(_swo); - mempool.log_stats(); drop(mempool); } match accepted + missing_outpoint + invalid { diff --git a/mining/src/mempool/handle_new_block_transactions.rs b/mining/src/mempool/handle_new_block_transactions.rs index c234fcc7e9..21295ae1e3 100644 --- a/mining/src/mempool/handle_new_block_transactions.rs +++ b/mining/src/mempool/handle_new_block_transactions.rs @@ -47,7 +47,9 @@ impl Mempool { self.counters.input_counts.fetch_add(input_counts as u64, Ordering::Relaxed); self.counters.output_counts.fetch_add(output_counts as u64, Ordering::Relaxed); self.counters.ready_txs_sample.store(self.transaction_pool.ready_transaction_count() as u64, Ordering::Relaxed); - self.log_stats(); + self.counters.txs_sample.store(self.transaction_pool.len() as u64, Ordering::Relaxed); + self.counters.orphans_sample.store(self.orphan_pool.len() as u64, Ordering::Relaxed); + self.counters.accepted_sample.store(self.accepted_transactions.len() as u64, Ordering::Relaxed); Ok(unorphaned_transactions) } diff --git a/mining/src/mempool/mod.rs b/mining/src/mempool/mod.rs index 87bc0c01ba..4849c3c83a 100644 --- a/mining/src/mempool/mod.rs +++ b/mining/src/mempool/mod.rs @@ -12,10 +12,7 @@ use self::{ tx::Priority, }; use kaspa_consensus_core::tx::{MutableTransaction, TransactionId}; -use kaspa_core::{ - info, - time::{unix_now, Stopwatch}, -}; +use kaspa_core::time::Stopwatch; use std::{collections::hash_map::Entry, sync::Arc}; pub(crate) mod check_transaction_standard; @@ -48,7 +45,6 @@ pub(crate) struct Mempool { transaction_pool: TransactionsPool, orphan_pool: OrphanPool, accepted_transactions: AcceptedTransactions, - last_stat_report_time: u64, counters: Arc, } @@ -57,7 +53,7 @@ impl Mempool { let transaction_pool = TransactionsPool::new(config.clone()); let orphan_pool = OrphanPool::new(config.clone()); let accepted_transactions = AcceptedTransactions::new(config.clone()); - Self { config, transaction_pool, orphan_pool, accepted_transactions, last_stat_report_time: unix_now(), counters } + Self { config, transaction_pool, orphan_pool, accepted_transactions, counters } } pub(crate) fn get_transaction( @@ -166,20 +162,6 @@ impl Mempool { .filter(|transaction_id| !(self.transaction_pool.has(transaction_id) || self.orphan_pool.has(transaction_id))); self.accepted_transactions.unaccepted(&mut not_in_pools_txs) } - - pub(crate) fn log_stats(&mut self) { - const LOG_STATS_REPORT_INTERVAL_MILLISECONDS: u64 = 2000; - let now = unix_now(); - if now >= self.last_stat_report_time + LOG_STATS_REPORT_INTERVAL_MILLISECONDS { - info!( - "Mempool stats: {} txs, {} orphans, {} accepted", - self.transaction_pool.len(), - self.orphan_pool.len(), - self.accepted_transactions.len() - ); - self.last_stat_report_time = now; - } - } } pub mod tx { diff --git a/mining/src/monitor.rs b/mining/src/monitor.rs index b71bb345c3..b748ff09e8 100644 --- a/mining/src/monitor.rs +++ b/mining/src/monitor.rs @@ -61,10 +61,10 @@ impl MiningMonitor { delta.low_priority_tx_counts, delta.tx_accepted_counts, ); - // FIXME: (wip) decide if the log level should be debug and what info should be kept or formulated differently + // FIXME: (wip) change the log level to debug and decide what info should be kept or formulated differently if tx_script_cache_snapshot != last_tx_script_cache_snapshot { info!( - "UTXOs stats: {} spent, {} created ({} signatures validated, {} cache hits, {:.2} hit ratio)", + "UTXO set stats: {} spent, {} created ({} signatures validated, {} cache hits, {:.2} hit ratio)", delta.input_counts, delta.output_counts, tx_script_cache_delta.insert_counts, @@ -72,6 +72,13 @@ impl MiningMonitor { tx_script_cache_delta.hit_ratio() ); } + // FIXME: change to debug + if delta.txs_sample + delta.orphans_sample > 0 { + info!( + "Mempool sample: {} ready out of {} txs, {} orphans, {} cached as accepted", + delta.ready_txs_sample, delta.txs_sample, delta.orphans_sample, delta.accepted_sample + ); + } last_snapshot = snapshot; last_tx_script_cache_snapshot = tx_script_cache_snapshot; From fd4731eb78900860aee42c3d13a7666309d5c440 Mon Sep 17 00:00:00 2001 From: Michael Sutton Date: Thu, 28 Sep 2023 12:44:39 +0300 Subject: [PATCH 72/86] avoid passing consensus to orphan pool --- mining/src/mempool/model/orphan_pool.rs | 9 ++++----- mining/src/mempool/model/utxo_set.rs | 2 +- mining/src/mempool/validate_and_insert_transaction.rs | 2 +- 3 files changed, 6 insertions(+), 7 deletions(-) diff --git a/mining/src/mempool/model/orphan_pool.rs b/mining/src/mempool/model/orphan_pool.rs index 6b2330de30..31e3c902c7 100644 --- a/mining/src/mempool/model/orphan_pool.rs +++ b/mining/src/mempool/model/orphan_pool.rs @@ -9,7 +9,6 @@ use crate::mempool::{ tx::Priority, }; use kaspa_consensus_core::{ - api::ConsensusApi, tx::MutableTransaction, tx::{TransactionId, TransactionOutpoint}, }; @@ -62,7 +61,7 @@ impl OrphanPool { pub(crate) fn try_add_orphan( &mut self, - consensus: &dyn ConsensusApi, + virtual_daa_score: u64, transaction: MutableTransaction, priority: Priority, ) -> RuleResult<()> { @@ -76,7 +75,7 @@ impl OrphanPool { self.check_orphan_double_spend(&transaction)?; // Make sure there is room in the pool for the new transaction self.limit_orphan_pool_size(1)?; - self.add_orphan(consensus, transaction, priority)?; + self.add_orphan(virtual_daa_score, transaction, priority)?; Ok(()) } @@ -127,9 +126,9 @@ impl OrphanPool { Ok(()) } - fn add_orphan(&mut self, consensus: &dyn ConsensusApi, transaction: MutableTransaction, priority: Priority) -> RuleResult<()> { + fn add_orphan(&mut self, virtual_daa_score: u64, transaction: MutableTransaction, priority: Priority) -> RuleResult<()> { let id = transaction.id(); - let transaction = MempoolTransaction::new(transaction, priority, consensus.get_virtual_daa_score()); + let transaction = MempoolTransaction::new(transaction, priority, virtual_daa_score); // Add all entries in outpoint_owner_id for input in transaction.mtx.tx.inputs.iter() { self.outpoint_owner_id.insert(input.previous_outpoint, id); diff --git a/mining/src/mempool/model/utxo_set.rs b/mining/src/mempool/model/utxo_set.rs index d622fe44fb..38c2bcb4ee 100644 --- a/mining/src/mempool/model/utxo_set.rs +++ b/mining/src/mempool/model/utxo_set.rs @@ -68,7 +68,7 @@ impl MempoolUtxoSet { self.outpoint_owner_id.get(outpoint) } - /// Make sure no other transaction in the mempool is already spending an output one of this transaction inputs spends + /// Make sure no other transaction in the mempool is already spending an output which one of this transaction inputs spends pub(crate) fn check_double_spends(&self, transaction: &MutableTransaction) -> RuleResult<()> { let transaction_id = transaction.id(); for input in transaction.tx.inputs.iter() { diff --git a/mining/src/mempool/validate_and_insert_transaction.rs b/mining/src/mempool/validate_and_insert_transaction.rs index ff80391c6f..bf5b266c30 100644 --- a/mining/src/mempool/validate_and_insert_transaction.rs +++ b/mining/src/mempool/validate_and_insert_transaction.rs @@ -58,7 +58,7 @@ impl Mempool { if orphan == Orphan::Forbidden { return Err(RuleError::RejectDisallowedOrphan(transaction_id)); } - self.orphan_pool.try_add_orphan(consensus, transaction, priority)?; + self.orphan_pool.try_add_orphan(consensus.get_virtual_daa_score(), transaction, priority)?; return Ok(None); } Err(err) => { From 8bec91824a401a73dffcb9c7144453defad621bb Mon Sep 17 00:00:00 2001 From: Michael Sutton Date: Wed, 4 Oct 2023 01:06:46 +0300 Subject: [PATCH 73/86] rename ro `validate_transaction_unacceptance` and move to before the orphan case (accepted txs will usually be orphan) --- mining/src/mempool/validate_and_insert_transaction.rs | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/mining/src/mempool/validate_and_insert_transaction.rs b/mining/src/mempool/validate_and_insert_transaction.rs index bf5b266c30..08b53c63b1 100644 --- a/mining/src/mempool/validate_and_insert_transaction.rs +++ b/mining/src/mempool/validate_and_insert_transaction.rs @@ -21,7 +21,7 @@ impl Mempool { consensus: &dyn ConsensusApi, mut transaction: MutableTransaction, ) -> RuleResult { - self.validate_transaction_acceptance(&transaction)?; + self.validate_transaction_unacceptance(&transaction)?; // Populate mass in the beginning, it will be used in multiple places throughout the validation and insertion. transaction.calculated_mass = Some(consensus.calculate_transaction_mass(&transaction.tx)); self.validate_transaction_in_isolation(&transaction)?; @@ -49,6 +49,8 @@ impl Mempool { return Ok(None); } + self.validate_transaction_unacceptance(&transaction)?; + // Re-check double spends since validate_and_insert_transaction is no longer atomic self.transaction_pool.check_double_spends(&transaction)?; @@ -67,7 +69,6 @@ impl Mempool { } self.validate_transaction_in_context(&transaction)?; - self.validate_transaction_acceptance(&transaction)?; // Before adding the transaction, check if there is room in the pool self.transaction_pool.limit_transaction_count(1, &transaction)?.iter().try_for_each(|x| { @@ -80,7 +81,8 @@ impl Mempool { Ok(Some(accepted_transaction)) } - fn validate_transaction_acceptance(&self, transaction: &MutableTransaction) -> RuleResult<()> { + /// Validates that the transaction wasn't already accepted into the DAG + fn validate_transaction_unacceptance(&self, transaction: &MutableTransaction) -> RuleResult<()> { // Reject if the transaction is registered as an accepted transaction let transaction_id = transaction.id(); match self.accepted_transactions.has(&transaction_id) { @@ -172,7 +174,7 @@ impl Mempool { assert_eq!(transactions.len(), 1, "the list returned by remove_orphan is expected to contain exactly one transaction"); let transaction = transactions.pop().unwrap(); - self.validate_transaction_acceptance(&transaction.mtx)?; + self.validate_transaction_unacceptance(&transaction.mtx)?; self.transaction_pool.check_double_spends(&transaction.mtx)?; Ok(transaction) } From a99d3d89d11f6dd3d682ad6f7f1bc4bf494811f0 Mon Sep 17 00:00:00 2001 From: Michael Sutton Date: Thu, 5 Oct 2023 15:59:48 +0300 Subject: [PATCH 74/86] rename `cleaning` -> `mempool_scanning` --- protocol/flows/src/flow_context.rs | 20 +++++----- .../flows/src/flowcontext/transactions.rs | 40 +++++++++---------- 2 files changed, 30 insertions(+), 30 deletions(-) diff --git a/protocol/flows/src/flow_context.rs b/protocol/flows/src/flow_context.rs index d08d0b7f8e..642977d027 100644 --- a/protocol/flows/src/flow_context.rs +++ b/protocol/flows/src/flow_context.rs @@ -375,7 +375,7 @@ impl FlowContext { return Ok(()); } - if self.should_run_cleaning_task().await { + if self.should_run_mempool_scanning_task().await { // Spawn a task executing the removal of expired low priority transactions and, if time has come too, // the revalidation of high priority transactions. // @@ -384,7 +384,7 @@ impl FlowContext { let mining_manager = self.mining_manager().clone(); let consensus_clone = consensus.clone(); let context = self.clone(); - debug!("<> Starting cleaning task #{}...", self.cleaning_count().await); + debug!("<> Starting mempool scanning task #{}...", self.mempool_scanning_job_count().await); tokio::spawn(async move { mining_manager.clone().expire_low_priority_transactions(&consensus_clone).await; if context.should_rebroadcast().await { @@ -396,8 +396,8 @@ impl FlowContext { let _ = context.broadcast_transactions(transactions).await; } } - context.cleaning_is_done().await; - debug!("<> Cleaning task is done"); + context.mempool_scanning_is_done().await; + debug!("<> Mempool scanning task is done"); }); } @@ -443,8 +443,8 @@ impl FlowContext { } /// Returns true if the time has come for running the task cleaning mempool transactions. - async fn should_run_cleaning_task(&self) -> bool { - self.transactions_spread.write().await.should_run_cleaning_task() + async fn should_run_mempool_scanning_task(&self) -> bool { + self.transactions_spread.write().await.should_run_mempool_scanning_task() } /// Returns true if the time has come for a rebroadcast of the mempool high priority transactions. @@ -452,12 +452,12 @@ impl FlowContext { self.transactions_spread.read().await.should_rebroadcast() } - async fn cleaning_count(&self) -> u64 { - self.transactions_spread.read().await.cleaning_count() + async fn mempool_scanning_job_count(&self) -> u64 { + self.transactions_spread.read().await.mempool_scanning_job_count() } - async fn cleaning_is_done(&self) { - self.transactions_spread.write().await.cleaning_is_done() + async fn mempool_scanning_is_done(&self) { + self.transactions_spread.write().await.mempool_scanning_is_done() } /// Add the given transactions IDs to a set of IDs to broadcast. The IDs will be broadcasted to all peers diff --git a/protocol/flows/src/flowcontext/transactions.rs b/protocol/flows/src/flowcontext/transactions.rs index 2927152fa0..7fb53db0d3 100644 --- a/protocol/flows/src/flowcontext/transactions.rs +++ b/protocol/flows/src/flowcontext/transactions.rs @@ -10,16 +10,16 @@ use kaspa_p2p_lib::{ }; use std::time::{Duration, Instant}; -const CLEANING_TASK_INTERVAL: Duration = Duration::from_secs(10); +const SCANNING_TASK_INTERVAL: Duration = Duration::from_secs(10); const REBROADCAST_FREQUENCY: u64 = 3; const BROADCAST_INTERVAL: Duration = Duration::from_millis(500); pub(crate) const MAX_INV_PER_TX_INV_MSG: usize = 131_072; pub struct TransactionsSpread { hub: Hub, - last_cleaning_time: Instant, - cleaning_task_running: bool, - cleaning_count: u64, + last_scanning_time: Instant, + scanning_task_running: bool, + scanning_job_count: u64, transaction_ids: ProcessQueue, last_broadcast_time: Instant, } @@ -28,42 +28,42 @@ impl TransactionsSpread { pub fn new(hub: Hub) -> Self { Self { hub, - last_cleaning_time: Instant::now(), - cleaning_task_running: false, - cleaning_count: 0, + last_scanning_time: Instant::now(), + scanning_task_running: false, + scanning_job_count: 0, transaction_ids: ProcessQueue::new(), last_broadcast_time: Instant::now(), } } - /// Returns true if the time has come for running the task cleaning mempool transactions + /// Returns true if the time has come for running the task of scanning mempool transactions /// and if so, mark the task as running. - pub fn should_run_cleaning_task(&mut self) -> bool { - if self.cleaning_task_running || Instant::now() < self.last_cleaning_time + CLEANING_TASK_INTERVAL { + pub fn should_run_mempool_scanning_task(&mut self) -> bool { + if self.scanning_task_running || Instant::now() < self.last_scanning_time + SCANNING_TASK_INTERVAL { return false; } // Keep the launching times aligned to exact intervals let call_time = Instant::now(); - while self.last_cleaning_time + CLEANING_TASK_INTERVAL < call_time { - self.last_cleaning_time += CLEANING_TASK_INTERVAL; + while self.last_scanning_time + SCANNING_TASK_INTERVAL < call_time { + self.last_scanning_time += SCANNING_TASK_INTERVAL; } - self.cleaning_count += 1; - self.cleaning_task_running = true; + self.scanning_job_count += 1; + self.scanning_task_running = true; true } /// Returns true if the time for a rebroadcast of the mempool high priority transactions has come. pub fn should_rebroadcast(&self) -> bool { - self.cleaning_count % REBROADCAST_FREQUENCY == 0 + self.scanning_job_count % REBROADCAST_FREQUENCY == 0 } - pub fn cleaning_count(&self) -> u64 { - self.cleaning_count + pub fn mempool_scanning_job_count(&self) -> u64 { + self.scanning_job_count } - pub fn cleaning_is_done(&mut self) { - assert!(self.cleaning_task_running, "no stop without a matching start"); - self.cleaning_task_running = false; + pub fn mempool_scanning_is_done(&mut self) { + assert!(self.scanning_task_running, "no stop without a matching start"); + self.scanning_task_running = false; } /// Add the given transactions IDs to a set of IDs to broadcast. The IDs will be broadcasted to all peers From f161b1cff01ee3ced9dca5359f287de191f51456 Mon Sep 17 00:00:00 2001 From: Michael Sutton Date: Thu, 5 Oct 2023 16:44:08 +0300 Subject: [PATCH 75/86] keep intervals aligned using a round-up formula (rather than a loop) --- protocol/flows/src/flowcontext/transactions.rs | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/protocol/flows/src/flowcontext/transactions.rs b/protocol/flows/src/flowcontext/transactions.rs index 7fb53db0d3..20851c8d6e 100644 --- a/protocol/flows/src/flowcontext/transactions.rs +++ b/protocol/flows/src/flowcontext/transactions.rs @@ -10,7 +10,8 @@ use kaspa_p2p_lib::{ }; use std::time::{Duration, Instant}; -const SCANNING_TASK_INTERVAL: Duration = Duration::from_secs(10); +/// Interval between mempool scanning tasks (in seconds) +const SCANNING_TASK_INTERVAL: u64 = 10; const REBROADCAST_FREQUENCY: u64 = 3; const BROADCAST_INTERVAL: Duration = Duration::from_millis(500); pub(crate) const MAX_INV_PER_TX_INV_MSG: usize = 131_072; @@ -39,14 +40,17 @@ impl TransactionsSpread { /// Returns true if the time has come for running the task of scanning mempool transactions /// and if so, mark the task as running. pub fn should_run_mempool_scanning_task(&mut self) -> bool { - if self.scanning_task_running || Instant::now() < self.last_scanning_time + SCANNING_TASK_INTERVAL { + let now = Instant::now(); + if self.scanning_task_running || now < self.last_scanning_time + Duration::from_secs(SCANNING_TASK_INTERVAL) { return false; } - // Keep the launching times aligned to exact intervals - let call_time = Instant::now(); - while self.last_scanning_time + SCANNING_TASK_INTERVAL < call_time { - self.last_scanning_time += SCANNING_TASK_INTERVAL; - } + let delta = now.checked_duration_since(self.last_scanning_time).expect("verified above"); + // Keep the launching times aligned to exact intervals. Note that `delta=10.1` seconds will result in + // adding 10 seconds to last scan time, while `delta=11` will result in adding 20 (assuming scanning + // interval is 10 seconds). + self.last_scanning_time += + Duration::from_secs(((delta.as_secs() + SCANNING_TASK_INTERVAL - 1) / SCANNING_TASK_INTERVAL) * SCANNING_TASK_INTERVAL); + self.scanning_job_count += 1; self.scanning_task_running = true; true From 41e4a94afda4246990a00c4b3b6457fc3d1db446 Mon Sep 17 00:00:00 2001 From: Michael Sutton Date: Thu, 5 Oct 2023 17:04:40 +0300 Subject: [PATCH 76/86] design fix: avoid exposing full collections as mut. This violates encapsulation logic since collections can be completely modified externally; while in tx pools it is important to make sure various internal collections are maintained consistently (for instance the `ready_transactions` field on `TransactionsPool` needs careful maintenance) --- mining/src/mempool/mod.rs | 6 +++--- mining/src/mempool/model/orphan_pool.rs | 12 ++++++------ mining/src/mempool/model/pool.rs | 4 ++-- mining/src/mempool/model/transactions_pool.rs | 12 +++--------- 4 files changed, 14 insertions(+), 20 deletions(-) diff --git a/mining/src/mempool/mod.rs b/mining/src/mempool/mod.rs index 4849c3c83a..50fe239b64 100644 --- a/mining/src/mempool/mod.rs +++ b/mining/src/mempool/mod.rs @@ -13,7 +13,7 @@ use self::{ }; use kaspa_consensus_core::tx::{MutableTransaction, TransactionId}; use kaspa_core::time::Stopwatch; -use std::{collections::hash_map::Entry, sync::Arc}; +use std::sync::Arc; pub(crate) mod check_transaction_standard; pub mod config; @@ -140,8 +140,8 @@ impl Mempool { } pub(crate) fn update_revalidated_transaction(&mut self, transaction: MutableTransaction) -> bool { - if let Entry::Occupied(mut entry) = self.transaction_pool.all_mut().entry(transaction.id()) { - entry.get_mut().mtx = transaction; + if let Some(tx) = self.transaction_pool.entry_mut(&transaction.id()) { + tx.mtx = transaction; true } else { false diff --git a/mining/src/mempool/model/orphan_pool.rs b/mining/src/mempool/model/orphan_pool.rs index 31e3c902c7..9df86e1789 100644 --- a/mining/src/mempool/model/orphan_pool.rs +++ b/mining/src/mempool/model/orphan_pool.rs @@ -259,6 +259,10 @@ impl OrphanPool { fn get_random_low_priority_orphan(&self) -> Option<&MempoolTransaction> { self.all_orphans.values().find(|x| x.priority == Priority::Low) } + + fn chained_mut(&mut self) -> &mut TransactionsEdges { + &mut self.chained_orphans + } } impl Pool for OrphanPool { @@ -266,16 +270,12 @@ impl Pool for OrphanPool { &self.all_orphans } - fn all_mut(&mut self) -> &mut MempoolTransactionCollection { - &mut self.all_orphans - } - fn chained(&self) -> &TransactionsEdges { &self.chained_orphans } - fn chained_mut(&mut self) -> &mut TransactionsEdges { - &mut self.chained_orphans + fn entry_mut(&mut self, transaction_id: &TransactionId) -> Option<&mut MempoolTransaction> { + self.all_orphans.get_mut(transaction_id) } fn expire_low_priority_transactions(&mut self, virtual_daa_score: u64) -> RuleResult<()> { diff --git a/mining/src/mempool/model/pool.rs b/mining/src/mempool/model/pool.rs index 6bdfcbdcef..0d4b3d44c8 100644 --- a/mining/src/mempool/model/pool.rs +++ b/mining/src/mempool/model/pool.rs @@ -17,10 +17,8 @@ pub(crate) type TransactionsEdges = HashMap; pub(crate) trait Pool { fn all(&self) -> &MempoolTransactionCollection; - fn all_mut(&mut self) -> &mut MempoolTransactionCollection; fn chained(&self) -> &TransactionsEdges; - fn chained_mut(&mut self) -> &mut TransactionsEdges; fn has(&self, transaction_id: &TransactionId) -> bool { self.all().contains_key(transaction_id) @@ -30,6 +28,8 @@ pub(crate) trait Pool { self.all().get(transaction_id) } + fn entry_mut(&mut self, transaction_id: &TransactionId) -> Option<&mut MempoolTransaction>; + /// Returns the number of transactions in the pool fn len(&self) -> usize { self.all().len() diff --git a/mining/src/mempool/model/transactions_pool.rs b/mining/src/mempool/model/transactions_pool.rs index 82676cc29f..4e6eb2ecf6 100644 --- a/mining/src/mempool/model/transactions_pool.rs +++ b/mining/src/mempool/model/transactions_pool.rs @@ -108,7 +108,7 @@ impl TransactionsPool { self.ready_transactions.insert(id); } for parent_id in parents { - let entry = self.chained_mut().entry(parent_id).or_default(); + let entry = self.chained_transactions.entry(parent_id).or_default(); entry.insert(id); } @@ -305,19 +305,13 @@ impl Pool for TransactionsPool { &self.all_transactions } - #[inline] - fn all_mut(&mut self) -> &mut MempoolTransactionCollection { - &mut self.all_transactions - } - #[inline] fn chained(&self) -> &TransactionsEdges { &self.chained_transactions } - #[inline] - fn chained_mut(&mut self) -> &mut TransactionsEdges { - &mut self.chained_transactions + fn entry_mut(&mut self, transaction_id: &TransactionId) -> Option<&mut MempoolTransaction> { + self.all_transactions.get_mut(transaction_id) } fn expire_low_priority_transactions(&mut self, virtual_daa_score: u64) -> RuleResult<()> { From 316f9d1a3a687d8af5afea455365dbdcf9943323 Mon Sep 17 00:00:00 2001 From: Michael Sutton Date: Thu, 5 Oct 2023 18:25:59 +0300 Subject: [PATCH 77/86] minor: close all pool receivers on op error --- testing/integration/src/common/client_pool.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/testing/integration/src/common/client_pool.rs b/testing/integration/src/common/client_pool.rs index 21abeaf3ad..b01f6e8700 100644 --- a/testing/integration/src/common/client_pool.rs +++ b/testing/integration/src/common/client_pool.rs @@ -26,6 +26,7 @@ impl ClientPool { tokio::spawn(async move { while let Ok(msg) = rx.recv().await { if client_op(client.clone(), msg).await { + rx.close(); break; } } From 8ce99e74dd0e3cb0bddb337e14450a760e3463a4 Mon Sep 17 00:00:00 2001 From: Michael Sutton Date: Fri, 6 Oct 2023 00:54:45 +0300 Subject: [PATCH 78/86] `remove_transaction`: no need to manually update parent-child relations in the case `remove_redeemers=false`. This is already done via `remove_transaction_from_sets` -> `transaction_pool.remove_transaction`. + a few minor changes --- mining/src/mempool/mod.rs | 2 +- mining/src/mempool/model/orphan_pool.rs | 32 +++++----- mining/src/mempool/model/pool.rs | 16 +---- mining/src/mempool/model/transactions_pool.rs | 27 +------- mining/src/mempool/remove_transaction.rs | 63 ++++++++----------- 5 files changed, 45 insertions(+), 95 deletions(-) diff --git a/mining/src/mempool/mod.rs b/mining/src/mempool/mod.rs index 50fe239b64..c3a5e677e7 100644 --- a/mining/src/mempool/mod.rs +++ b/mining/src/mempool/mod.rs @@ -140,7 +140,7 @@ impl Mempool { } pub(crate) fn update_revalidated_transaction(&mut self, transaction: MutableTransaction) -> bool { - if let Some(tx) = self.transaction_pool.entry_mut(&transaction.id()) { + if let Some(tx) = self.transaction_pool.get_mut(&transaction.id()) { tx.mtx = transaction; true } else { diff --git a/mining/src/mempool/model/orphan_pool.rs b/mining/src/mempool/model/orphan_pool.rs index 9df86e1789..8adef6fa27 100644 --- a/mining/src/mempool/model/orphan_pool.rs +++ b/mining/src/mempool/model/orphan_pool.rs @@ -263,28 +263,14 @@ impl OrphanPool { fn chained_mut(&mut self) -> &mut TransactionsEdges { &mut self.chained_orphans } -} - -impl Pool for OrphanPool { - fn all(&self) -> &MempoolTransactionCollection { - &self.all_orphans - } - - fn chained(&self) -> &TransactionsEdges { - &self.chained_orphans - } - - fn entry_mut(&mut self, transaction_id: &TransactionId) -> Option<&mut MempoolTransaction> { - self.all_orphans.get_mut(transaction_id) - } - fn expire_low_priority_transactions(&mut self, virtual_daa_score: u64) -> RuleResult<()> { + pub(crate) fn expire_low_priority_transactions(&mut self, virtual_daa_score: u64) -> RuleResult<()> { if virtual_daa_score < self.last_expire_scan + self.config.orphan_expire_scan_interval_daa_score { return Ok(()); } // Never expire high priority transactions - // Remove all transactions whose addedAtDAAScore is older then TransactionExpireIntervalDAAScore + // Remove all transactions whose `added_at_daa_score` is older then TransactionExpireIntervalDAAScore let expired_low_priority_transactions: Vec = self .all_orphans .values() @@ -307,3 +293,17 @@ impl Pool for OrphanPool { Ok(()) } } + +impl Pool for OrphanPool { + fn all(&self) -> &MempoolTransactionCollection { + &self.all_orphans + } + + fn chained(&self) -> &TransactionsEdges { + &self.chained_orphans + } + + fn get_mut(&mut self, transaction_id: &TransactionId) -> Option<&mut MempoolTransaction> { + self.all_orphans.get_mut(transaction_id) + } +} diff --git a/mining/src/mempool/model/pool.rs b/mining/src/mempool/model/pool.rs index 0d4b3d44c8..1f9ff15529 100644 --- a/mining/src/mempool/model/pool.rs +++ b/mining/src/mempool/model/pool.rs @@ -10,7 +10,6 @@ use crate::{ }, }; use kaspa_consensus_core::tx::{MutableTransaction, TransactionId}; -use kaspa_mining_errors::mempool::RuleResult; use std::collections::{hash_set::Iter, HashMap, HashSet, VecDeque}; pub(crate) type TransactionsEdges = HashMap; @@ -28,7 +27,7 @@ pub(crate) trait Pool { self.all().get(transaction_id) } - fn entry_mut(&mut self, transaction_id: &TransactionId) -> Option<&mut MempoolTransaction>; + fn get_mut(&mut self, transaction_id: &TransactionId) -> Option<&mut MempoolTransaction>; /// Returns the number of transactions in the pool fn len(&self) -> usize { @@ -94,17 +93,6 @@ pub(crate) trait Pool { descendants } - /// Returns the ids of all transactions which directly chained to `transaction_id` - /// and exist in the pool. - fn get_direct_redeemer_ids_in_pool(&self, transaction_id: &TransactionId) -> TransactionIdSet { - if let Some(transaction) = self.get(transaction_id) { - if let Some(chains) = self.chained().get(&transaction.id()) { - return chains.clone(); - } - } - Default::default() - } - /// Returns a vector with clones of all the transactions in the pool. fn get_all_transactions(&self) -> Vec { self.all().values().map(|x| x.mtx.clone()).collect() @@ -139,8 +127,6 @@ pub(crate) trait Pool { }); }); } - - fn expire_low_priority_transactions(&mut self, virtual_daa_score: u64) -> RuleResult<()>; } pub(crate) struct PoolIndex { diff --git a/mining/src/mempool/model/transactions_pool.rs b/mining/src/mempool/model/transactions_pool.rs index 4e6eb2ecf6..5b69b4a4e5 100644 --- a/mining/src/mempool/model/transactions_pool.rs +++ b/mining/src/mempool/model/transactions_pool.rs @@ -118,25 +118,6 @@ impl TransactionsPool { Ok(()) } - pub(crate) fn remove_parent_chained_relation_in_pool( - &mut self, - transaction_id: &TransactionId, - parent_id: &TransactionId, - ) -> bool { - let mut found = false; - // Remove the bijective parent/chained relation - if let Some(parents) = self.parent_transactions.get_mut(transaction_id) { - found = parents.remove(parent_id); - if parents.is_empty() { - self.ready_transactions.insert(*transaction_id); - } - } - if let Some(chains) = self.chained_transactions.get_mut(parent_id) { - found = chains.remove(transaction_id) || found; - } - found - } - pub(crate) fn remove_transaction(&mut self, transaction_id: &TransactionId) -> RuleResult { // Remove all bijective parent/chained relations if let Some(parents) = self.parent_transactions.get(transaction_id) { @@ -310,13 +291,7 @@ impl Pool for TransactionsPool { &self.chained_transactions } - fn entry_mut(&mut self, transaction_id: &TransactionId) -> Option<&mut MempoolTransaction> { + fn get_mut(&mut self, transaction_id: &TransactionId) -> Option<&mut MempoolTransaction> { self.all_transactions.get_mut(transaction_id) } - - fn expire_low_priority_transactions(&mut self, virtual_daa_score: u64) -> RuleResult<()> { - self.last_expire_scan_daa_score = virtual_daa_score; - self.last_expire_scan_time = unix_now(); - Ok(()) - } } diff --git a/mining/src/mempool/remove_transaction.rs b/mining/src/mempool/remove_transaction.rs index d4fa633d93..e05e519587 100644 --- a/mining/src/mempool/remove_transaction.rs +++ b/mining/src/mempool/remove_transaction.rs @@ -28,22 +28,13 @@ impl Mempool { let mut removed_transactions = vec![*transaction_id]; if remove_redeemers { - let redeemers = self.transaction_pool.get_redeemer_ids_in_pool(transaction_id); - removed_transactions.extend(redeemers); - } else { - // Note: when `remove_redeemers=false` we avoid calling `get_redeemer_ids_in_pool` which might - // have linear complexity (in mempool size) in the worst-case. Instead, we only obtain the direct - // tx children since only for these txs we need to update the parent/chain relation to the removed tx - let direct_redeemers = self.transaction_pool.get_direct_redeemer_ids_in_pool(transaction_id); - direct_redeemers.iter().for_each(|x| { - self.transaction_pool.remove_parent_chained_relation_in_pool(x, transaction_id); - }); + removed_transactions.extend(self.transaction_pool.get_redeemer_ids_in_pool(transaction_id)); } let mut removed_orphans: Vec = vec![]; removed_transactions.iter().try_for_each(|tx_id| { self.remove_transaction_from_sets(tx_id, remove_redeemers).map(|txs| { - removed_orphans.extend(txs.iter().map(|x| x.id())); + removed_orphans.extend(txs.into_iter().map(|x| x.id())); }) })?; removed_transactions.extend(removed_orphans); @@ -52,32 +43,30 @@ impl Mempool { removed_transactions.extend(self.orphan_pool.remove_redeemers_of(transaction_id)?.iter().map(|x| x.id())); } - if !removed_transactions.is_empty() { - match reason { - TxRemovalReason::Muted => {} - TxRemovalReason::DoubleSpend => match removed_transactions.len() { - 0 => {} - 1 => warn!("Removed transaction ({}) {}{}", reason, removed_transactions[0], extra_info), - n => warn!( - "Removed {} transactions ({}): {}{}", - n, - reason, - removed_transactions.iter().reusable_format(", "), - extra_info - ), - }, - _ => match removed_transactions.len() { - 0 => {} - 1 => debug!("Removed transaction ({}) {}{}", reason, removed_transactions[0], extra_info), - n => debug!( - "Removed {} transactions ({}): {}{}", - n, - reason, - removed_transactions.iter().reusable_format(", "), - extra_info - ), - }, - } + match reason { + TxRemovalReason::Muted => {} + TxRemovalReason::DoubleSpend => match removed_transactions.len() { + 0 => {} + 1 => warn!("Removed transaction ({}) {}{}", reason, removed_transactions[0], extra_info), + n => warn!( + "Removed {} transactions ({}): {}{}", + n, + reason, + removed_transactions.iter().reusable_format(", "), + extra_info + ), + }, + _ => match removed_transactions.len() { + 0 => {} + 1 => debug!("Removed transaction ({}) {}{}", reason, removed_transactions[0], extra_info), + n => debug!( + "Removed {} transactions ({}): {}{}", + n, + reason, + removed_transactions.iter().reusable_format(", "), + extra_info + ), + }, } Ok(()) From 4faf2fa37c5fbf39ccd48362106248015f301b9f Mon Sep 17 00:00:00 2001 From: Michael Sutton Date: Fri, 6 Oct 2023 01:15:44 +0300 Subject: [PATCH 79/86] encapsulate `remove_transaction_utxos` into `transaction_pool` --- mining/src/mempool/model/transactions_pool.rs | 19 +++++++++++++------ mining/src/mempool/remove_transaction.rs | 5 ++--- 2 files changed, 15 insertions(+), 9 deletions(-) diff --git a/mining/src/mempool/model/transactions_pool.rs b/mining/src/mempool/model/transactions_pool.rs index 5b69b4a4e5..ba1ba65a31 100644 --- a/mining/src/mempool/model/transactions_pool.rs +++ b/mining/src/mempool/model/transactions_pool.rs @@ -118,6 +118,7 @@ impl TransactionsPool { Ok(()) } + /// Fully removes the transaction from all relational sets, as well as updates the UTXO set pub(crate) fn remove_transaction(&mut self, transaction_id: &TransactionId) -> RuleResult { // Remove all bijective parent/chained relations if let Some(parents) = self.parent_transactions.get(transaction_id) { @@ -142,7 +143,18 @@ impl TransactionsPool { self.ready_transactions.remove(transaction_id); // Remove the transaction itself - self.all_transactions.remove(transaction_id).ok_or(RuleError::RejectMissingTransaction(*transaction_id)) + let removed_tx = self.all_transactions.remove(transaction_id).ok_or(RuleError::RejectMissingTransaction(*transaction_id))?; + + // TODO: consider using `self.parent_transactions.get(transaction_id)` + // The tradeoff to consider is whether it might be possible that a parent tx exists in the pool + // however its relation as parent is not registered. This can supposedly happen in rare cases where + // the parent was removed w/o redeemers and then re-added + let parent_ids = self.get_parent_transaction_ids_in_pool(&removed_tx.mtx); + + // Remove the transaction from the mempool UTXO set + self.utxo_set.remove_transaction(&removed_tx.mtx, &parent_ids); + + Ok(removed_tx) } pub(crate) fn ready_transaction_count(&self) -> usize { @@ -237,11 +249,6 @@ impl TransactionsPool { self.utxo_set.check_double_spends(transaction) } - pub(crate) fn remove_transaction_utxos(&mut self, transaction: &MutableTransaction) { - let parent_ids = self.get_parent_transaction_ids_in_pool(transaction); - self.utxo_set.remove_transaction(transaction, &parent_ids) - } - pub(crate) fn collect_expired_low_priority_transactions(&self, virtual_daa_score: u64) -> Vec { let now = unix_now(); if virtual_daa_score < self.last_expire_scan_daa_score + self.config.transaction_expire_scan_interval_daa_score diff --git a/mining/src/mempool/remove_transaction.rs b/mining/src/mempool/remove_transaction.rs index e05e519587..f2b94421ca 100644 --- a/mining/src/mempool/remove_transaction.rs +++ b/mining/src/mempool/remove_transaction.rs @@ -33,7 +33,7 @@ impl Mempool { let mut removed_orphans: Vec = vec![]; removed_transactions.iter().try_for_each(|tx_id| { - self.remove_transaction_from_sets(tx_id, remove_redeemers).map(|txs| { + self.remove_from_transaction_pool_and_update_orphans(tx_id, remove_redeemers).map(|txs| { removed_orphans.extend(txs.into_iter().map(|x| x.id())); }) })?; @@ -72,13 +72,12 @@ impl Mempool { Ok(()) } - fn remove_transaction_from_sets( + fn remove_from_transaction_pool_and_update_orphans( &mut self, transaction_id: &TransactionId, remove_redeemers: bool, ) -> RuleResult> { let removed_transaction = self.transaction_pool.remove_transaction(transaction_id)?; - self.transaction_pool.remove_transaction_utxos(&removed_transaction.mtx); self.orphan_pool.update_orphans_after_transaction_removed(&removed_transaction, remove_redeemers) } } From 7be8b61949e8db03a7c78128b24d7e54e8f8ccac Mon Sep 17 00:00:00 2001 From: Michael Sutton Date: Fri, 6 Oct 2023 01:18:31 +0300 Subject: [PATCH 80/86] no need to `remove_redeemers_of` for the initial removed tx since this happens as part of: `remove_from_transaction_pool_and_update_orphans` -> `orphan_pool.update_orphans_after_transaction_removed` -> `orphan_pool.remove_redeemers_of` --- mining/src/mempool/remove_transaction.rs | 4 ---- 1 file changed, 4 deletions(-) diff --git a/mining/src/mempool/remove_transaction.rs b/mining/src/mempool/remove_transaction.rs index f2b94421ca..c3212447d3 100644 --- a/mining/src/mempool/remove_transaction.rs +++ b/mining/src/mempool/remove_transaction.rs @@ -39,10 +39,6 @@ impl Mempool { })?; removed_transactions.extend(removed_orphans); - if remove_redeemers { - removed_transactions.extend(self.orphan_pool.remove_redeemers_of(transaction_id)?.iter().map(|x| x.id())); - } - match reason { TxRemovalReason::Muted => {} TxRemovalReason::DoubleSpend => match removed_transactions.len() { From e6142e77ee8eccbe17f65e538008d03d97a4989b Mon Sep 17 00:00:00 2001 From: Michael Sutton Date: Fri, 6 Oct 2023 02:13:18 +0300 Subject: [PATCH 81/86] inline `remove_from_transaction_pool_and_update_orphans` --- mining/src/mempool/model/transactions_pool.rs | 2 +- mining/src/mempool/remove_transaction.rs | 27 +++++++------------ 2 files changed, 10 insertions(+), 19 deletions(-) diff --git a/mining/src/mempool/model/transactions_pool.rs b/mining/src/mempool/model/transactions_pool.rs index ba1ba65a31..605647be04 100644 --- a/mining/src/mempool/model/transactions_pool.rs +++ b/mining/src/mempool/model/transactions_pool.rs @@ -118,7 +118,7 @@ impl TransactionsPool { Ok(()) } - /// Fully removes the transaction from all relational sets, as well as updates the UTXO set + /// Fully removes the transaction from all relational sets, as well as from the UTXO set pub(crate) fn remove_transaction(&mut self, transaction_id: &TransactionId) -> RuleResult { // Remove all bijective parent/chained relations if let Some(parents) = self.parent_transactions.get(transaction_id) { diff --git a/mining/src/mempool/remove_transaction.rs b/mining/src/mempool/remove_transaction.rs index c3212447d3..960ebc264b 100644 --- a/mining/src/mempool/remove_transaction.rs +++ b/mining/src/mempool/remove_transaction.rs @@ -1,9 +1,6 @@ use crate::mempool::{ errors::RuleResult, - model::{ - pool::Pool, - tx::{MempoolTransaction, TxRemovalReason}, - }, + model::{pool::Pool, tx::TxRemovalReason}, Mempool, }; use kaspa_consensus_core::tx::TransactionId; @@ -28,15 +25,18 @@ impl Mempool { let mut removed_transactions = vec![*transaction_id]; if remove_redeemers { + // Add all descendent txs as pending removals removed_transactions.extend(self.transaction_pool.get_redeemer_ids_in_pool(transaction_id)); } let mut removed_orphans: Vec = vec![]; - removed_transactions.iter().try_for_each(|tx_id| { - self.remove_from_transaction_pool_and_update_orphans(tx_id, remove_redeemers).map(|txs| { - removed_orphans.extend(txs.into_iter().map(|x| x.id())); - }) - })?; + for tx_id in removed_transactions.iter() { + // Remove the tx from the transaction pool and the UTXO set (handled within the pool) + let tx = self.transaction_pool.remove_transaction(tx_id)?; + // Update/remove descendent orphan txs (depending on `remove_redeemers`) + let txs = self.orphan_pool.update_orphans_after_transaction_removed(&tx, remove_redeemers)?; + removed_orphans.extend(txs.into_iter().map(|x| x.id())); + } removed_transactions.extend(removed_orphans); match reason { @@ -67,13 +67,4 @@ impl Mempool { Ok(()) } - - fn remove_from_transaction_pool_and_update_orphans( - &mut self, - transaction_id: &TransactionId, - remove_redeemers: bool, - ) -> RuleResult> { - let removed_transaction = self.transaction_pool.remove_transaction(transaction_id)?; - self.orphan_pool.update_orphans_after_transaction_removed(&removed_transaction, remove_redeemers) - } } From 044fa3d3a07b6a9b6918fb2f39a5274bffdb8512 Mon Sep 17 00:00:00 2001 From: Michael Sutton Date: Fri, 6 Oct 2023 02:28:07 +0300 Subject: [PATCH 82/86] remove redeemers of expired low-prio txs + register scan time and daa score after collection (bug fix) --- mining/src/manager.rs | 2 +- mining/src/mempool/model/transactions_pool.rs | 5 ++++- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/mining/src/manager.rs b/mining/src/manager.rs index 65e089de21..9eae41c15a 100644 --- a/mining/src/manager.rs +++ b/mining/src/manager.rs @@ -510,7 +510,7 @@ impl MiningManager { for chunk in &expired_low_priority_transactions.iter().chunks(24) { let mut mempool = self.mempool.write(); chunk.into_iter().for_each(|tx| { - if let Err(err) = mempool.remove_transaction(tx, false, TxRemovalReason::Muted, "") { + if let Err(err) = mempool.remove_transaction(tx, true, TxRemovalReason::Muted, "") { warn!("Failed to remove transaction {} from mempool: {}", tx, err); } }); diff --git a/mining/src/mempool/model/transactions_pool.rs b/mining/src/mempool/model/transactions_pool.rs index 605647be04..cf70150df7 100644 --- a/mining/src/mempool/model/transactions_pool.rs +++ b/mining/src/mempool/model/transactions_pool.rs @@ -249,7 +249,7 @@ impl TransactionsPool { self.utxo_set.check_double_spends(transaction) } - pub(crate) fn collect_expired_low_priority_transactions(&self, virtual_daa_score: u64) -> Vec { + pub(crate) fn collect_expired_low_priority_transactions(&mut self, virtual_daa_score: u64) -> Vec { let now = unix_now(); if virtual_daa_score < self.last_expire_scan_daa_score + self.config.transaction_expire_scan_interval_daa_score || now < self.last_expire_scan_time + self.config.transaction_expire_scan_interval_milliseconds @@ -257,6 +257,9 @@ impl TransactionsPool { return vec![]; } + self.last_expire_scan_daa_score = virtual_daa_score; + self.last_expire_scan_time = now; + // Never expire high priority transactions // Remove all transactions whose added_at_daa_score is older then transaction_expire_interval_daa_score self.all_transactions From 969fae4d1f763f7667b0d880765413d35c74c635 Mon Sep 17 00:00:00 2001 From: Michael Sutton Date: Fri, 6 Oct 2023 02:48:19 +0300 Subject: [PATCH 83/86] change mempool monitor logs to debug --- mining/src/manager.rs | 2 +- mining/src/monitor.rs | 8 +++----- 2 files changed, 4 insertions(+), 6 deletions(-) diff --git a/mining/src/manager.rs b/mining/src/manager.rs index 9eae41c15a..1271ca106d 100644 --- a/mining/src/manager.rs +++ b/mining/src/manager.rs @@ -537,7 +537,7 @@ impl MiningManager { debug!("<> Revalidating high priority transactions found no transactions"); return; } else { - debug!("<> Revalidating high priority transactions..."); + debug!("<> Revalidating {} high priority transactions...", transaction_ids.len()); } drop(mempool); // read lock on mempool by transaction chunks diff --git a/mining/src/monitor.rs b/mining/src/monitor.rs index b748ff09e8..74cbd6d728 100644 --- a/mining/src/monitor.rs +++ b/mining/src/monitor.rs @@ -1,6 +1,6 @@ use super::MiningCounters; use kaspa_core::{ - info, + debug, info, task::{ service::{AsyncService, AsyncServiceFuture}, tick::{TickReason, TickService}, @@ -61,9 +61,8 @@ impl MiningMonitor { delta.low_priority_tx_counts, delta.tx_accepted_counts, ); - // FIXME: (wip) change the log level to debug and decide what info should be kept or formulated differently if tx_script_cache_snapshot != last_tx_script_cache_snapshot { - info!( + debug!( "UTXO set stats: {} spent, {} created ({} signatures validated, {} cache hits, {:.2} hit ratio)", delta.input_counts, delta.output_counts, @@ -72,9 +71,8 @@ impl MiningMonitor { tx_script_cache_delta.hit_ratio() ); } - // FIXME: change to debug if delta.txs_sample + delta.orphans_sample > 0 { - info!( + debug!( "Mempool sample: {} ready out of {} txs, {} orphans, {} cached as accepted", delta.ready_txs_sample, delta.txs_sample, delta.orphans_sample, delta.accepted_sample ); From 2e11cf6748edfea4be6568a8f0e30078f0552c21 Mon Sep 17 00:00:00 2001 From: Michael Sutton Date: Fri, 6 Oct 2023 11:30:43 +0300 Subject: [PATCH 84/86] make tps logging more accurate --- mining/src/lib.rs | 18 +++++++++++++++--- mining/src/monitor.rs | 18 ++++++++++-------- 2 files changed, 25 insertions(+), 11 deletions(-) diff --git a/mining/src/lib.rs b/mining/src/lib.rs index 90affba842..d4589a65fe 100644 --- a/mining/src/lib.rs +++ b/mining/src/lib.rs @@ -102,6 +102,13 @@ impl MempoolCountersSnapshot { self.high_priority_tx_counts + self.low_priority_tx_counts } + /// Indicates whether this snapshot has any TPS activity which is worth logging + pub fn has_tps_activity(&self) -> bool { + self.tx_accepted_counts > 0 || self.block_tx_counts > 0 || self.low_priority_tx_counts > 0 || self.high_priority_tx_counts > 0 + } + + /// Returns an estimate of _Unique-TPS_, i.e. the number of unique transactions per second on average + /// (excluding coinbase transactions) pub fn u_tps(&self) -> f64 { let elapsed = self.elapsed_time.as_secs_f64(); if elapsed != 0f64 { @@ -111,13 +118,18 @@ impl MempoolCountersSnapshot { } } + /// Returns an estimate to the _Effective-TPS_ fraction which is a measure of how much of DAG capacity + /// is utilized compared to the number of available mempool transactions. For instance a max + /// value of `1.0` indicates that we cannot do any better in terms of throughput vs. current + /// demand. A value close to `0.0` means that DAG capacity is mostly filled with duplicate + /// transactions even though the mempool (demand) offers a much larger amount of unique transactions. pub fn e_tps(&self) -> f64 { - let accepted_txs = u64::min(self.ready_txs_sample, self.tx_accepted_counts); - let total_txs = u64::min(self.ready_txs_sample, self.block_tx_counts); + let accepted_txs = u64::min(self.ready_txs_sample, self.tx_accepted_counts); // The throughput + let total_txs = u64::min(self.ready_txs_sample, self.block_tx_counts); // The min of demand and capacity if total_txs > 0 { accepted_txs as f64 / total_txs as f64 } else { - 0f64 + 1f64 // No demand means we are 100% efficient } } } diff --git a/mining/src/monitor.rs b/mining/src/monitor.rs index 74cbd6d728..517bd82763 100644 --- a/mining/src/monitor.rs +++ b/mining/src/monitor.rs @@ -53,14 +53,16 @@ impl MiningMonitor { let delta = &snapshot - &last_snapshot; let tx_script_cache_delta = &tx_script_cache_snapshot - &last_tx_script_cache_snapshot; - info!( - "Tx throughput stats: {:.2} u-tps, {:.2}% e-tps (in: {} via RPC, {} via P2P, out: {} via accepted blocks)", - delta.u_tps(), - delta.e_tps() * 100.0, - delta.high_priority_tx_counts, - delta.low_priority_tx_counts, - delta.tx_accepted_counts, - ); + if delta.has_tps_activity() { + info!( + "Tx throughput stats: {:.2} u-tps, {:.2}% e-tps (in: {} via RPC, {} via P2P, out: {} via accepted blocks)", + delta.u_tps(), + delta.e_tps() * 100.0, + delta.high_priority_tx_counts, + delta.low_priority_tx_counts, + delta.tx_accepted_counts, + ); + } if tx_script_cache_snapshot != last_tx_script_cache_snapshot { debug!( "UTXO set stats: {} spent, {} created ({} signatures validated, {} cache hits, {:.2} hit ratio)", From 5aff48098ed3feba15ebe6da1d13f4993f3093b4 Mon Sep 17 00:00:00 2001 From: Michael Sutton Date: Fri, 6 Oct 2023 14:59:34 +0300 Subject: [PATCH 85/86] import bmk improvements from mempool-perf-stats branch --- testing/integration/src/mempool_benchmarks.rs | 77 +++++++++++++++---- 1 file changed, 64 insertions(+), 13 deletions(-) diff --git a/testing/integration/src/mempool_benchmarks.rs b/testing/integration/src/mempool_benchmarks.rs index 419c143895..8f9a9cd8dd 100644 --- a/testing/integration/src/mempool_benchmarks.rs +++ b/testing/integration/src/mempool_benchmarks.rs @@ -37,7 +37,7 @@ use std::{ atomic::{AtomicBool, Ordering}, Arc, }, - time::Duration, + time::{Duration, Instant}, }; use tokio::join; @@ -62,8 +62,8 @@ fn estimated_mass(num_inputs: usize, num_outputs: u64) -> u64 { 200 + 34 * num_outputs + 1000 * (num_inputs as u64) } -const EXPAND_FACTOR: u64 = 2; -const CONTRACT_FACTOR: u64 = 2; +const EXPAND_FACTOR: u64 = 1; +const CONTRACT_FACTOR: u64 = 1; /// Builds a TX DAG based on the initial UTXO set and on constant params fn generate_tx_dag( @@ -117,7 +117,7 @@ fn generate_tx_dag( utxoset.remove_collection(&utxo_diff.remove); utxoset.add_collection(&utxo_diff.add); - if i % 100 == 0 { + if i % (target_levels / 10).max(1) == 0 { info!("Generated {} txs", txs.len()); } } @@ -148,14 +148,22 @@ fn verify_tx_dag(initial_utxoset: &UtxoCollection, txs: &Vec>) #[ignore = "bmk"] async fn bench_bbt_latency() { kaspa_core::panic::configure_panic(); - kaspa_core::log::try_init_logger("info,kaspa_core::time=trace"); + kaspa_core::log::try_init_logger("info,kaspa_core::time=debug,kaspa_mining::monitor=debug"); // Constants - const BLOCK_COUNT: usize = 20_000; - const TX_COUNT: usize = 600_000; - const TX_LEVEL_WIDTH: usize = 1000; + const BLOCK_COUNT: usize = usize::MAX; + + const MEMPOOL_TARGET: u64 = 600_000; + const TX_COUNT: usize = 1_400_000; + const TX_LEVEL_WIDTH: usize = 20_000; + const TPS_PRESSURE: u64 = u64::MAX; + const SUBMIT_BLOCK_CLIENTS: usize = 20; - const SUBMIT_TX_CLIENTS: usize = 1; + const SUBMIT_TX_CLIENTS: usize = 2; + + if TX_COUNT < TX_LEVEL_WIDTH { + panic!() + } /* Logic: @@ -186,7 +194,7 @@ async fn bench_bbt_latency() { num_prealloc_utxos: Some(TX_LEVEL_WIDTH as u64 * CONTRACT_FACTOR), prealloc_address: Some(prealloc_address.to_string()), prealloc_amount: 500 * SOMPI_PER_KASPA, - block_template_cache_lifetime: Some(5), + block_template_cache_lifetime: Some(0), ..Default::default() }; let network = args.network(); @@ -221,6 +229,7 @@ async fn bench_bbt_latency() { let submit_block_pool = daemon .new_client_pool(SUBMIT_BLOCK_CLIENTS, 100, |c, block| async move { + let _sw = kaspa_core::time::Stopwatch::<500>::with_threshold("sb"); let response = c.submit_block(block, false).await.unwrap(); assert_eq!(response.report, kaspa_rpc_core::SubmitBlockReport::Success); false @@ -245,6 +254,7 @@ async fn bench_bbt_latency() { let cc = bbt_client.clone(); let exec = executing.clone(); let notification_rx = receiver.clone(); + let pac = pay_address.clone(); let miner_receiver_task = tokio::spawn(async move { while let Ok(notification) = notification_rx.recv().await { match notification { @@ -252,8 +262,8 @@ async fn bench_bbt_latency() { while notification_rx.try_recv().is_ok() { // Drain the channel } - // let _sw = Stopwatch::<500>::with_threshold("get_block_template"); - *current_template.lock() = cc.get_block_template(pay_address.clone(), vec![]).await.unwrap(); + // let _sw = kaspa_core::time::Stopwatch::<500>::with_threshold("bbt"); + *current_template.lock() = cc.get_block_template(pac.clone(), vec![]).await.unwrap(); } _ => panic!(), } @@ -267,6 +277,7 @@ async fn bench_bbt_latency() { let block_sender = submit_block_pool.sender(); let exec = executing.clone(); + let cc = Arc::new(bbt_client.clone()); let miner_loop_task = tokio::spawn(async move { for i in 0..BLOCK_COUNT { // Simulate mining time @@ -278,6 +289,15 @@ async fn bench_bbt_latency() { // Use index as nonce to avoid duplicate blocks block.header.nonce = i as u64; + let ctc = current_template_consume.clone(); + let ccc = cc.clone(); + let pac = pay_address.clone(); + tokio::spawn(async move { + // let _sw = kaspa_core::time::Stopwatch::<500>::with_threshold("bbt"); + // We used the current template so let's refetch a new template with new txs + *ctc.lock() = ccc.get_block_template(pac, vec![]).await.unwrap(); + }); + let bs = block_sender.clone(); tokio::spawn(async move { // Simulate communication delay. TODO: consider adding gaussian noise @@ -298,8 +318,34 @@ async fn bench_bbt_latency() { let tx_sender = submit_tx_pool.sender(); let exec = executing.clone(); let cc = client.clone(); + let mut tps_pressure = if MEMPOOL_TARGET < u64::MAX { u64::MAX } else { TPS_PRESSURE }; + let mut last_log_time = Instant::now() - Duration::from_secs(5); + let mut log_index = 0; let tx_sender_task = tokio::spawn(async move { for (i, tx) in txs.into_iter().enumerate() { + if tps_pressure != u64::MAX { + tokio::time::sleep(std::time::Duration::from_secs_f64(1.0 / tps_pressure as f64)).await; + } + if last_log_time.elapsed() > Duration::from_millis(200) { + let mut mempool_size = cc.get_info().await.unwrap().mempool_size; + if log_index % 10 == 0 { + info!("Mempool size: {:#?}, txs submitted: {}", mempool_size, i); + } + log_index += 1; + last_log_time = Instant::now(); + + if mempool_size > (MEMPOOL_TARGET as f32 * 1.05) as u64 { + tps_pressure = TPS_PRESSURE; + while mempool_size > MEMPOOL_TARGET { + tokio::time::sleep(std::time::Duration::from_millis(200)).await; + mempool_size = cc.get_info().await.unwrap().mempool_size; + if log_index % 10 == 0 { + info!("Mempool size: {:#?}, txs submitted: {}", mempool_size, i); + } + log_index += 1; + } + } + } match tx_sender.send((i, tx)).await { Ok(_) => {} Err(_) => { @@ -312,10 +358,15 @@ async fn bench_bbt_latency() { } kaspa_core::warn!("Tx sender task, waiting for mempool to drain.."); - while cc.get_info().await.unwrap().mempool_size > 0 { + loop { if !exec.load(Ordering::Relaxed) { break; } + let mempool_size = cc.get_info().await.unwrap().mempool_size; + info!("Mempool size: {:#?}", mempool_size); + if mempool_size == 0 || (TX_COUNT as u64 > MEMPOOL_TARGET && mempool_size < MEMPOOL_TARGET) { + break; + } tokio::time::sleep(std::time::Duration::from_secs(1)).await; } exec.store(false, Ordering::Relaxed); From 099aa40ce1190eb7f427d2da6cfbc47c2bbcd55f Mon Sep 17 00:00:00 2001 From: Michael Sutton Date: Fri, 6 Oct 2023 15:11:56 +0300 Subject: [PATCH 86/86] make `config.block_template_cache_lifetime` non-feature dependent --- consensus/core/src/config/mod.rs | 8 +++----- kaspad/src/args.rs | 15 +++++---------- kaspad/src/daemon.rs | 6 +----- 3 files changed, 9 insertions(+), 20 deletions(-) diff --git a/consensus/core/src/config/mod.rs b/consensus/core/src/config/mod.rs index 0dabd9626b..079ccb092e 100644 --- a/consensus/core/src/config/mod.rs +++ b/consensus/core/src/config/mod.rs @@ -59,11 +59,10 @@ pub struct Config { pub externalip: Option, - #[cfg(feature = "devnet-prealloc")] - pub initial_utxo_set: Arc, + pub block_template_cache_lifetime: Option, #[cfg(feature = "devnet-prealloc")] - pub block_template_cache_lifetime: Option, + pub initial_utxo_set: Arc, } impl Config { @@ -85,11 +84,10 @@ impl Config { user_agent_comments: Default::default(), externalip: None, p2p_listen_address: ContextualNetAddress::unspecified(), + block_template_cache_lifetime: None, #[cfg(feature = "devnet-prealloc")] initial_utxo_set: Default::default(), - #[cfg(feature = "devnet-prealloc")] - block_template_cache_lifetime: None, } } diff --git a/kaspad/src/args.rs b/kaspad/src/args.rs index 2efa50e085..5c455bfc89 100644 --- a/kaspad/src/args.rs +++ b/kaspad/src/args.rs @@ -55,6 +55,7 @@ pub struct Args { pub externalip: Option, pub perf_metrics: bool, pub perf_metrics_interval_sec: u64, + pub block_template_cache_lifetime: Option, #[cfg(feature = "devnet-prealloc")] pub num_prealloc_utxos: Option, @@ -62,8 +63,6 @@ pub struct Args { pub prealloc_address: Option, #[cfg(feature = "devnet-prealloc")] pub prealloc_amount: u64, - #[cfg(feature = "devnet-prealloc")] - pub block_template_cache_lifetime: Option, } impl Default for Args { @@ -100,6 +99,7 @@ impl Default for Args { perf_metrics: false, perf_metrics_interval_sec: 1, externalip: None, + block_template_cache_lifetime: None, #[cfg(feature = "devnet-prealloc")] num_prealloc_utxos: None, @@ -107,8 +107,6 @@ impl Default for Args { prealloc_address: None, #[cfg(feature = "devnet-prealloc")] prealloc_amount: 1_000_000, - #[cfg(feature = "devnet-prealloc")] - block_template_cache_lifetime: None, } } } @@ -122,15 +120,12 @@ impl Args { // TODO: change to `config.enable_sanity_checks = self.sanity` when we reach stable versions config.enable_sanity_checks = true; config.user_agent_comments = self.user_agent_comments.clone(); + config.block_template_cache_lifetime = self.block_template_cache_lifetime; #[cfg(feature = "devnet-prealloc")] if let Some(num_prealloc_utxos) = self.num_prealloc_utxos { config.initial_utxo_set = Arc::new(self.generate_prealloc_utxos(num_prealloc_utxos)); } - #[cfg(feature = "devnet-prealloc")] - if self.block_template_cache_lifetime.is_some() { - config.block_template_cache_lifetime = self.block_template_cache_lifetime; - } } #[cfg(feature = "devnet-prealloc")] @@ -364,6 +359,8 @@ pub fn parse_args() -> Args { .get_one::("perf-metrics-interval-sec") .cloned() .unwrap_or(defaults.perf_metrics_interval_sec), + // Note: currently used programmatically by benchmarks and not exposed to CLI users + block_template_cache_lifetime: defaults.block_template_cache_lifetime, #[cfg(feature = "devnet-prealloc")] num_prealloc_utxos: m.get_one::("num-prealloc-utxos").cloned(), @@ -371,8 +368,6 @@ pub fn parse_args() -> Args { prealloc_address: m.get_one::("prealloc-address").cloned(), #[cfg(feature = "devnet-prealloc")] prealloc_amount: m.get_one::("prealloc-amount").cloned().unwrap_or(defaults.prealloc_amount), - #[cfg(feature = "devnet-prealloc")] - block_template_cache_lifetime: m.get_one::("block-template-cache-lifetime").cloned(), } } diff --git a/kaspad/src/daemon.rs b/kaspad/src/daemon.rs index 6bfabfe0eb..a2290210fb 100644 --- a/kaspad/src/daemon.rs +++ b/kaspad/src/daemon.rs @@ -323,17 +323,13 @@ do you confirm? (answer y/n or pass --yes to the Kaspad command line to confirm let address_manager = AddressManager::new(config.clone(), meta_db); - #[cfg(not(feature = "devnet-prealloc"))] - let cache_lifetime: Option = None; - #[cfg(feature = "devnet-prealloc")] - let cache_lifetime = config.block_template_cache_lifetime; let mining_monitor = Arc::new(MiningMonitor::new(mining_counters.clone(), tx_script_cache_counters.clone(), tick_service.clone())); let mining_manager = MiningManagerProxy::new(Arc::new(MiningManager::new_with_spam_blocking_option( network.is_mainnet(), config.target_time_per_block, false, config.max_block_mass, - cache_lifetime, + config.block_template_cache_lifetime, mining_counters, )));