diff --git a/Cargo.lock b/Cargo.lock index 470fbc4709..2197b486af 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2462,6 +2462,7 @@ version = "0.1.7" dependencies = [ "criterion", "futures-util", + "itertools 0.10.5", "kaspa-addresses", "kaspa-consensus-core", "kaspa-consensusmanager", @@ -2478,6 +2479,7 @@ dependencies = [ "serde", "smallvec", "thiserror", + "tokio", ] [[package]] diff --git a/consensus/core/src/api/mod.rs b/consensus/core/src/api/mod.rs index b9662ed770..880a271723 100644 --- a/consensus/core/src/api/mod.rs +++ b/consensus/core/src/api/mod.rs @@ -4,7 +4,7 @@ use std::sync::Arc; use crate::{ acceptance_data::AcceptanceData, - block::{Block, BlockTemplate}, + block::{Block, BlockTemplate, TemplateBuildMode, TemplateTransactionSelector}, block_count::BlockCount, blockstatus::BlockStatus, coinbase::MinerData, @@ -27,7 +27,12 @@ pub type BlockValidationFuture = BoxFuture<'static, BlockProcessResult) -> Result { + fn build_block_template( + &self, + miner_data: MinerData, + tx_selector: Box, + build_mode: TemplateBuildMode, + ) -> Result { unimplemented!() } @@ -40,8 +45,24 @@ pub trait ConsensusApi: Send + Sync { } /// Populates the mempool transaction with maximally found UTXO entry data and proceeds to full transaction - /// validation if all are found. If validation is successful, also [`transaction.calculated_fee`] is expected to be populated - fn validate_mempool_transaction_and_populate(&self, transaction: &mut MutableTransaction) -> TxResult<()> { + /// validation if all are found. If validation is successful, also [`transaction.calculated_fee`] is expected to be populated. + fn validate_mempool_transaction(&self, transaction: &mut MutableTransaction) -> TxResult<()> { + unimplemented!() + } + + /// Populates the mempool transactions with maximally found UTXO entry data and proceeds to full transactions + /// validation if all are found. If validation is successful, also [`transaction.calculated_fee`] is expected to be populated. + fn validate_mempool_transactions_in_parallel(&self, transactions: &mut [MutableTransaction]) -> Vec> { + unimplemented!() + } + + /// Populates the mempool transaction with maximally found UTXO entry data. + fn populate_mempool_transaction(&self, transaction: &mut MutableTransaction) -> TxResult<()> { + unimplemented!() + } + + /// Populates the mempool transactions with maximally found UTXO entry data. + fn populate_mempool_transactions_in_parallel(&self, transactions: &mut [MutableTransaction]) -> Vec> { unimplemented!() } diff --git a/consensus/core/src/block.rs b/consensus/core/src/block.rs index acbdfcb883..77a3631969 100644 --- a/consensus/core/src/block.rs +++ b/consensus/core/src/block.rs @@ -1,6 +1,10 @@ use std::sync::Arc; -use crate::{coinbase::MinerData, header::Header, tx::Transaction}; +use crate::{ + coinbase::MinerData, + header::Header, + tx::{Transaction, TransactionId}, +}; use kaspa_hashes::Hash; /// A mutable block structure where header and transactions within can still be mutated. @@ -64,6 +68,34 @@ impl Block { } } +/// An abstraction for a recallable transaction selector with persistent state +pub trait TemplateTransactionSelector { + /// Expected to return a batch of transactions which were not previously selected. + /// The batch will typically contain sufficient transactions to fill the block + /// mass (along with the previously unrejected txs), or will drain the selector + fn select_transactions(&mut self) -> Vec; + + /// Should be used to report invalid transactions obtained from the *most recent* + /// `select_transactions` call. Implementors should use this call to internally + /// track the selection state and discard the rejected tx from internal occupation calculations + fn reject_selection(&mut self, tx_id: TransactionId); + + /// Determine whether this was an overall successful selection episode + fn is_successful(&self) -> bool; +} + +/// Block template build mode +#[derive(Clone, Copy, Debug)] +pub enum TemplateBuildMode { + /// Block template build can possibly fail if `TemplateTransactionSelector::is_successful` deems the operation unsuccessful. + /// + /// In such a case, the build fails with `BlockRuleError::InvalidTransactionsInNewBlock`. + Standard, + + /// Block template build always succeeds. The built block contains only the validated transactions. + Infallible, +} + /// A block template for miners. #[derive(Debug, Clone)] pub struct BlockTemplate { diff --git a/consensus/core/src/config/mod.rs b/consensus/core/src/config/mod.rs index f453a6128f..079ccb092e 100644 --- a/consensus/core/src/config/mod.rs +++ b/consensus/core/src/config/mod.rs @@ -59,6 +59,8 @@ pub struct Config { pub externalip: Option, + pub block_template_cache_lifetime: Option, + #[cfg(feature = "devnet-prealloc")] pub initial_utxo_set: Arc, } @@ -82,6 +84,7 @@ impl Config { user_agent_comments: Default::default(), externalip: None, p2p_listen_address: ContextualNetAddress::unspecified(), + block_template_cache_lifetime: None, #[cfg(feature = "devnet-prealloc")] initial_utxo_set: Default::default(), diff --git a/consensus/core/src/errors/block.rs b/consensus/core/src/errors/block.rs index e78d0f0f88..bc72136fe6 100644 --- a/consensus/core/src/errors/block.rs +++ b/consensus/core/src/errors/block.rs @@ -1,4 +1,4 @@ -use std::fmt::Display; +use std::{collections::HashMap, fmt::Display}; use crate::{ constants, @@ -140,7 +140,7 @@ pub enum RuleError { InvalidTransactionsInUtxoContext(usize, usize), #[error("invalid transactions in new block template")] - InvalidTransactionsInNewBlock(Vec<(TransactionId, TxRuleError)>), + InvalidTransactionsInNewBlock(HashMap), #[error("DAA window data has only {0} entries")] InsufficientDaaWindowSize(usize), diff --git a/consensus/core/src/tx.rs b/consensus/core/src/tx.rs index f9e1fef7c2..4d3ae47518 100644 --- a/consensus/core/src/tx.rs +++ b/consensus/core/src/tx.rs @@ -340,6 +340,12 @@ impl> MutableTransaction { } } +impl> AsRef for MutableTransaction { + fn as_ref(&self) -> &Transaction { + self.tx.as_ref() + } +} + /// Private struct used to wrap a [`MutableTransaction`] as a [`VerifiableTransaction`] struct MutableTransactionVerifiableWrapper<'a, T: AsRef> { inner: &'a MutableTransaction, diff --git a/consensus/src/consensus/factory.rs b/consensus/src/consensus/factory.rs index 0d66b61f2f..a0b42ad654 100644 --- a/consensus/src/consensus/factory.rs +++ b/consensus/src/consensus/factory.rs @@ -11,6 +11,7 @@ use kaspa_database::{ registry::DatabaseStorePrefixes, }; +use kaspa_txscript::caches::TxScriptCacheCounters; use parking_lot::RwLock; use rocksdb::WriteBatch; use serde::{Deserialize, Serialize}; @@ -153,6 +154,7 @@ pub struct Factory { db_parallelism: usize, notification_root: Arc, counters: Arc, + tx_script_cache_counters: Arc, } impl Factory { @@ -163,6 +165,7 @@ impl Factory { db_parallelism: usize, notification_root: Arc, counters: Arc, + tx_script_cache_counters: Arc, ) -> Self { let mut config = config.clone(); #[cfg(feature = "devnet-prealloc")] @@ -175,6 +178,7 @@ impl Factory { db_parallelism, notification_root, counters, + tx_script_cache_counters, } } } @@ -208,6 +212,7 @@ impl ConsensusFactory for Factory { session_lock.clone(), self.notification_root.clone(), self.counters.clone(), + self.tx_script_cache_counters.clone(), entry.creation_timestamp, )); @@ -236,6 +241,7 @@ impl ConsensusFactory for Factory { session_lock.clone(), self.notification_root.clone(), self.counters.clone(), + self.tx_script_cache_counters.clone(), entry.creation_timestamp, )); diff --git a/consensus/src/consensus/mod.rs b/consensus/src/consensus/mod.rs index 1795236753..7485f5547d 100644 --- a/consensus/src/consensus/mod.rs +++ b/consensus/src/consensus/mod.rs @@ -41,7 +41,7 @@ use crate::{ use kaspa_consensus_core::{ acceptance_data::AcceptanceData, api::{BlockValidationFuture, ConsensusApi}, - block::{Block, BlockTemplate}, + block::{Block, BlockTemplate, TemplateBuildMode, TemplateTransactionSelector}, block_count::BlockCount, blockhash::BlockHashExtensions, blockstatus::BlockStatus, @@ -70,6 +70,7 @@ use kaspa_consensusmanager::{SessionLock, SessionReadGuard}; use kaspa_database::prelude::StoreResultExtensions; use kaspa_hashes::Hash; use kaspa_muhash::MuHash; +use kaspa_txscript::caches::TxScriptCacheCounters; use std::thread::{self, JoinHandle}; use std::{ @@ -132,6 +133,7 @@ impl Consensus { pruning_lock: SessionLock, notification_root: Arc, counters: Arc, + tx_script_cache_counters: Arc, creation_timestamp: u64, ) -> Self { let params = &config.params; @@ -147,7 +149,7 @@ impl Consensus { // Services and managers // - let services = ConsensusServices::new(db.clone(), storage.clone(), config.clone()); + let services = ConsensusServices::new(db.clone(), storage.clone(), config.clone(), tx_script_cache_counters); // // Processor channels @@ -353,8 +355,13 @@ impl Consensus { } impl ConsensusApi for Consensus { - fn build_block_template(&self, miner_data: MinerData, txs: Vec) -> Result { - self.virtual_processor.build_block_template(miner_data, txs) + fn build_block_template( + &self, + miner_data: MinerData, + tx_selector: Box, + build_mode: TemplateBuildMode, + ) -> Result { + self.virtual_processor.build_block_template(miner_data, tx_selector, build_mode) } fn validate_and_insert_block(&self, block: Block) -> BlockValidationFuture { @@ -367,11 +374,24 @@ impl ConsensusApi for Consensus { Box::pin(result) } - fn validate_mempool_transaction_and_populate(&self, transaction: &mut MutableTransaction) -> TxResult<()> { - self.virtual_processor.validate_mempool_transaction_and_populate(transaction)?; + fn validate_mempool_transaction(&self, transaction: &mut MutableTransaction) -> TxResult<()> { + self.virtual_processor.validate_mempool_transaction(transaction)?; + Ok(()) + } + + fn validate_mempool_transactions_in_parallel(&self, transactions: &mut [MutableTransaction]) -> Vec> { + self.virtual_processor.validate_mempool_transactions_in_parallel(transactions) + } + + fn populate_mempool_transaction(&self, transaction: &mut MutableTransaction) -> TxResult<()> { + self.virtual_processor.populate_mempool_transaction(transaction)?; Ok(()) } + fn populate_mempool_transactions_in_parallel(&self, transactions: &mut [MutableTransaction]) -> Vec> { + self.virtual_processor.populate_mempool_transactions_in_parallel(transactions) + } + fn calculate_transaction_mass(&self, transaction: &Transaction) -> u64 { self.services.mass_calculator.calc_tx_mass(transaction) } diff --git a/consensus/src/consensus/services.rs b/consensus/src/consensus/services.rs index f0b791ba26..d76a28d5a9 100644 --- a/consensus/src/consensus/services.rs +++ b/consensus/src/consensus/services.rs @@ -18,6 +18,7 @@ use crate::{ }; use itertools::Itertools; +use kaspa_txscript::caches::TxScriptCacheCounters; use std::sync::Arc; pub type DbGhostdagManager = @@ -65,7 +66,12 @@ pub struct ConsensusServices { } impl ConsensusServices { - pub fn new(db: Arc, storage: Arc, config: Arc) -> Arc { + pub fn new( + db: Arc, + storage: Arc, + config: Arc, + tx_script_cache_counters: Arc, + ) -> Arc { let params = &config.params; let statuses_service = MTStatusesService::new(storage.statuses_store.clone()); @@ -144,6 +150,7 @@ impl ConsensusServices { params.ghostdag_k, params.coinbase_payload_script_public_key_max_len, params.coinbase_maturity, + tx_script_cache_counters, ); let pruning_point_manager = PruningPointManager::new( diff --git a/consensus/src/consensus/test_consensus.rs b/consensus/src/consensus/test_consensus.rs index 620722022a..10d6d5dd23 100644 --- a/consensus/src/consensus/test_consensus.rs +++ b/consensus/src/consensus/test_consensus.rs @@ -49,8 +49,17 @@ impl TestConsensus { /// Creates a test consensus instance based on `config` with the provided `db` and `notification_sender` pub fn with_db(db: Arc, config: &Config, notification_sender: Sender) -> Self { let notification_root = Arc::new(ConsensusNotificationRoot::new(notification_sender)); - let counters = Arc::new(ProcessingCounters::default()); - let consensus = Arc::new(Consensus::new(db, Arc::new(config.clone()), Default::default(), notification_root, counters, 0)); + let counters = Default::default(); + let tx_script_cache_counters = Default::default(); + let consensus = Arc::new(Consensus::new( + db, + Arc::new(config.clone()), + Default::default(), + notification_root, + counters, + tx_script_cache_counters, + 0, + )); let block_builder = TestBlockBuilder::new(consensus.virtual_processor.clone()); Self { params: config.params.clone(), consensus, block_builder, db_lifetime: Default::default() } @@ -60,8 +69,17 @@ impl TestConsensus { pub fn with_notifier(config: &Config, notification_sender: Sender) -> Self { let (db_lifetime, db) = create_temp_db!(ConnBuilder::default()); let notification_root = Arc::new(ConsensusNotificationRoot::new(notification_sender)); - let counters = Arc::new(ProcessingCounters::default()); - let consensus = Arc::new(Consensus::new(db, Arc::new(config.clone()), Default::default(), notification_root, counters, 0)); + let counters = Default::default(); + let tx_script_cache_counters = Default::default(); + let consensus = Arc::new(Consensus::new( + db, + Arc::new(config.clone()), + Default::default(), + notification_root, + counters, + tx_script_cache_counters, + 0, + )); let block_builder = TestBlockBuilder::new(consensus.virtual_processor.clone()); Self { consensus, block_builder, params: config.params.clone(), db_lifetime } @@ -72,8 +90,17 @@ impl TestConsensus { let (db_lifetime, db) = create_temp_db!(ConnBuilder::default()); let (dummy_notification_sender, _) = async_channel::unbounded(); let notification_root = Arc::new(ConsensusNotificationRoot::new(dummy_notification_sender)); - let counters = Arc::new(ProcessingCounters::default()); - let consensus = Arc::new(Consensus::new(db, Arc::new(config.clone()), Default::default(), notification_root, counters, 0)); + let counters = Default::default(); + let tx_script_cache_counters = Default::default(); + let consensus = Arc::new(Consensus::new( + db, + Arc::new(config.clone()), + Default::default(), + notification_root, + counters, + tx_script_cache_counters, + 0, + )); let block_builder = TestBlockBuilder::new(consensus.virtual_processor.clone()); Self { consensus, block_builder, params: config.params.clone(), db_lifetime } diff --git a/consensus/src/pipeline/virtual_processor/processor.rs b/consensus/src/pipeline/virtual_processor/processor.rs index 5a0c182de0..2718ebd91c 100644 --- a/consensus/src/pipeline/virtual_processor/processor.rs +++ b/consensus/src/pipeline/virtual_processor/processor.rs @@ -48,7 +48,7 @@ use crate::{ }; use kaspa_consensus_core::{ acceptance_data::AcceptanceData, - block::{BlockTemplate, MutableBlock}, + block::{BlockTemplate, MutableBlock, TemplateBuildMode, TemplateTransactionSelector}, blockstatus::BlockStatus::{StatusDisqualifiedFromChain, StatusUTXOValid}, coinbase::MinerData, config::genesis::GenesisBlock, @@ -81,11 +81,14 @@ use itertools::Itertools; use kaspa_utils::binary_heap::BinaryHeapExtensions; use parking_lot::{RwLock, RwLockUpgradableReadGuard}; use rand::seq::SliceRandom; -use rayon::ThreadPool; +use rayon::{ + prelude::{IntoParallelRefMutIterator, ParallelIterator}, + ThreadPool, +}; use rocksdb::WriteBatch; use std::{ cmp::min, - collections::{BinaryHeap, VecDeque}, + collections::{BinaryHeap, HashMap, VecDeque}, ops::Deref, sync::{atomic::Ordering, Arc}, }; @@ -705,21 +708,71 @@ impl VirtualStateProcessor { (virtual_parents, ghostdag_data) } - pub fn validate_mempool_transaction_and_populate(&self, mutable_tx: &mut MutableTransaction) -> TxResult<()> { + fn validate_mempool_transaction_impl( + &self, + mutable_tx: &mut MutableTransaction, + virtual_utxo_view: &impl UtxoView, + virtual_daa_score: u64, + virtual_past_median_time: u64, + ) -> TxResult<()> { self.transaction_validator.validate_tx_in_isolation(&mutable_tx.tx)?; + self.transaction_validator.utxo_free_tx_validation(&mutable_tx.tx, virtual_daa_score, virtual_past_median_time)?; + self.validate_mempool_transaction_in_utxo_context(mutable_tx, virtual_utxo_view, virtual_daa_score)?; + Ok(()) + } + pub fn validate_mempool_transaction(&self, mutable_tx: &mut MutableTransaction) -> TxResult<()> { let virtual_read = self.virtual_stores.read(); let virtual_state = virtual_read.state.get().unwrap(); let virtual_utxo_view = &virtual_read.utxo_set; let virtual_daa_score = virtual_state.daa_score; let virtual_past_median_time = virtual_state.past_median_time; + self.validate_mempool_transaction_impl(mutable_tx, virtual_utxo_view, virtual_daa_score, virtual_past_median_time) + } - self.transaction_validator.utxo_free_tx_validation(&mutable_tx.tx, virtual_daa_score, virtual_past_median_time)?; - self.validate_mempool_transaction_in_utxo_context(mutable_tx, virtual_utxo_view, virtual_daa_score)?; + pub fn validate_mempool_transactions_in_parallel(&self, mutable_txs: &mut [MutableTransaction]) -> Vec> { + let virtual_read = self.virtual_stores.read(); + let virtual_state = virtual_read.state.get().unwrap(); + let virtual_utxo_view = &virtual_read.utxo_set; + let virtual_daa_score = virtual_state.daa_score; + let virtual_past_median_time = virtual_state.past_median_time; + self.thread_pool.install(|| { + mutable_txs + .par_iter_mut() + .map(|mtx| { + self.validate_mempool_transaction_impl(mtx, &virtual_utxo_view, virtual_daa_score, virtual_past_median_time) + }) + .collect::>>() + }) + } + + fn populate_mempool_transaction_impl( + &self, + mutable_tx: &mut MutableTransaction, + virtual_utxo_view: &impl UtxoView, + ) -> TxResult<()> { + self.populate_mempool_transaction_in_utxo_context(mutable_tx, virtual_utxo_view)?; Ok(()) } + pub fn populate_mempool_transaction(&self, mutable_tx: &mut MutableTransaction) -> TxResult<()> { + let virtual_read = self.virtual_stores.read(); + let virtual_utxo_view = &virtual_read.utxo_set; + self.populate_mempool_transaction_impl(mutable_tx, virtual_utxo_view) + } + + pub fn populate_mempool_transactions_in_parallel(&self, mutable_txs: &mut [MutableTransaction]) -> Vec> { + let virtual_read = self.virtual_stores.read(); + let virtual_utxo_view = &virtual_read.utxo_set; + self.thread_pool.install(|| { + mutable_txs + .par_iter_mut() + .map(|mtx| self.populate_mempool_transaction_impl(mtx, &virtual_utxo_view)) + .collect::>>() + }) + } + fn validate_block_template_transaction( &self, tx: &Transaction, @@ -734,14 +787,59 @@ impl VirtualStateProcessor { Ok(()) } - pub fn build_block_template(&self, miner_data: MinerData, txs: Vec) -> Result { + pub fn build_block_template( + &self, + miner_data: MinerData, + mut tx_selector: Box, + build_mode: TemplateBuildMode, + ) -> Result { + // // TODO: tests + // + + // We call for the initial tx batch before acquiring the virtual read lock, + // optimizing for the common case where all txs are valid. Following selection calls + // are called within the lock in order to preserve validness of already validated txs + let mut txs = tx_selector.select_transactions(); + let virtual_read = self.virtual_stores.read(); let virtual_state = virtual_read.state.get().unwrap(); let virtual_utxo_view = &virtual_read.utxo_set; - // Validate the transactions in virtual's utxo context - self.validate_block_template_transactions(&txs, &virtual_state, virtual_utxo_view)?; + let mut invalid_transactions = HashMap::new(); + for tx in txs.iter() { + if let Err(e) = self.validate_block_template_transaction(tx, &virtual_state, virtual_utxo_view) { + invalid_transactions.insert(tx.id(), e); + tx_selector.reject_selection(tx.id()); + } + } + + let mut has_rejections = !invalid_transactions.is_empty(); + if has_rejections { + txs.retain(|tx| !invalid_transactions.contains_key(&tx.id())); + } + + while has_rejections { + has_rejections = false; + let next_batch = tx_selector.select_transactions(); // Note that once next_batch is empty the loop will exit + for tx in next_batch { + if let Err(e) = self.validate_block_template_transaction(&tx, &virtual_state, virtual_utxo_view) { + invalid_transactions.insert(tx.id(), e); + tx_selector.reject_selection(tx.id()); + has_rejections = true; + } else { + txs.push(tx); + } + } + } + + // Check whether this was an overall successful selection episode. We pass this decision + // to the selector implementation which has the broadest picture and can use mempool config + // and context + match (build_mode, tx_selector.is_successful()) { + (TemplateBuildMode::Standard, false) => return Err(RuleError::InvalidTransactionsInNewBlock(invalid_transactions)), + (TemplateBuildMode::Standard, true) | (TemplateBuildMode::Infallible, _) => {} + } // At this point we can safely drop the read lock drop(virtual_read); @@ -750,17 +848,17 @@ impl VirtualStateProcessor { self.build_block_template_from_virtual_state(virtual_state, miner_data, txs) } - pub fn validate_block_template_transactions( + pub(crate) fn validate_block_template_transactions( &self, txs: &[Transaction], virtual_state: &VirtualState, utxo_view: &impl UtxoView, ) -> Result<(), RuleError> { - // Search for invalid transactions. This can happen since the mining manager calling this function is not atomically in sync with virtual state - let mut invalid_transactions = Vec::new(); + // Search for invalid transactions + let mut invalid_transactions = HashMap::new(); for tx in txs.iter() { if let Err(e) = self.validate_block_template_transaction(tx, virtual_state, utxo_view) { - invalid_transactions.push((tx.id(), e)) + invalid_transactions.insert(tx.id(), e); } } if !invalid_transactions.is_empty() { diff --git a/consensus/src/pipeline/virtual_processor/tests.rs b/consensus/src/pipeline/virtual_processor/tests.rs index 9c6f309270..fc6cb73dae 100644 --- a/consensus/src/pipeline/virtual_processor/tests.rs +++ b/consensus/src/pipeline/virtual_processor/tests.rs @@ -1,17 +1,41 @@ use crate::{consensus::test_consensus::TestConsensus, model::services::reachability::ReachabilityService}; use kaspa_consensus_core::{ api::ConsensusApi, - block::{Block, BlockTemplate, MutableBlock}, + block::{Block, BlockTemplate, MutableBlock, TemplateBuildMode, TemplateTransactionSelector}, blockhash, blockstatus::BlockStatus, coinbase::MinerData, config::{params::MAINNET_PARAMS, ConfigBuilder}, - tx::{ScriptPublicKey, ScriptVec}, + tx::{ScriptPublicKey, ScriptVec, Transaction}, BlockHashSet, }; use kaspa_hashes::Hash; use std::{collections::VecDeque, thread::JoinHandle}; +struct OnetimeTxSelector { + txs: Option>, +} + +impl OnetimeTxSelector { + fn new(txs: Vec) -> Self { + Self { txs: Some(txs) } + } +} + +impl TemplateTransactionSelector for OnetimeTxSelector { + fn select_transactions(&mut self) -> Vec { + self.txs.take().unwrap() + } + + fn reject_selection(&mut self, _tx_id: kaspa_consensus_core::tx::TransactionId) { + unimplemented!() + } + + fn is_successful(&self) -> bool { + true + } +} + struct TestContext { consensus: TestConsensus, join_handles: Vec>, @@ -78,7 +102,14 @@ impl TestContext { } pub fn build_block_template(&self, nonce: u64, timestamp: u64) -> BlockTemplate { - let mut t = self.consensus.build_block_template(self.miner_data.clone(), Default::default()).unwrap(); + let mut t = self + .consensus + .build_block_template( + self.miner_data.clone(), + Box::new(OnetimeTxSelector::new(Default::default())), + TemplateBuildMode::Standard, + ) + .unwrap(); t.block.header.timestamp = timestamp; t.block.header.nonce = nonce; t.block.header.finalize(); diff --git a/consensus/src/pipeline/virtual_processor/utxo_validation.rs b/consensus/src/pipeline/virtual_processor/utxo_validation.rs index 3c117d46e1..2d7d19eb7c 100644 --- a/consensus/src/pipeline/virtual_processor/utxo_validation.rs +++ b/consensus/src/pipeline/virtual_processor/utxo_validation.rs @@ -247,12 +247,11 @@ impl VirtualStateProcessor { } } - /// Populates the mempool transaction with maximally found UTXO entry data and proceeds to validation if all found - pub(super) fn validate_mempool_transaction_in_utxo_context( + /// Populates the mempool transaction with maximally found UTXO entry data + pub(crate) fn populate_mempool_transaction_in_utxo_context( &self, mutable_tx: &mut MutableTransaction, utxo_view: &impl UtxoView, - pov_daa_score: u64, ) -> TxResult<()> { let mut has_missing_outpoints = false; for i in 0..mutable_tx.tx.inputs.len() { @@ -271,6 +270,18 @@ impl VirtualStateProcessor { if has_missing_outpoints { return Err(TxRuleError::MissingTxOutpoints); } + Ok(()) + } + + /// Populates the mempool transaction with maximally found UTXO entry data and proceeds to validation if all found + pub(super) fn validate_mempool_transaction_in_utxo_context( + &self, + mutable_tx: &mut MutableTransaction, + utxo_view: &impl UtxoView, + pov_daa_score: u64, + ) -> TxResult<()> { + self.populate_mempool_transaction_in_utxo_context(mutable_tx, utxo_view)?; + // At this point we know all UTXO entries are populated, so we can safely pass the tx as verifiable let calculated_fee = self.transaction_validator.validate_populated_transaction_and_get_fee(&mutable_tx.as_verifiable(), pov_daa_score)?; diff --git a/consensus/src/processes/transaction_validator/mod.rs b/consensus/src/processes/transaction_validator/mod.rs index 792ecb9b1f..8839af2047 100644 --- a/consensus/src/processes/transaction_validator/mod.rs +++ b/consensus/src/processes/transaction_validator/mod.rs @@ -2,9 +2,14 @@ pub mod errors; pub mod transaction_validator_populated; mod tx_validation_in_isolation; pub mod tx_validation_not_utxo_related; +use std::sync::Arc; + use crate::model::stores::ghostdag; -use kaspa_txscript::{caches::Cache, SigCacheKey}; +use kaspa_txscript::{ + caches::{Cache, TxScriptCacheCounters}, + SigCacheKey, +}; pub use tx_validation_in_isolation::*; #[derive(Clone)] @@ -28,6 +33,7 @@ impl TransactionValidator { ghostdag_k: ghostdag::KType, coinbase_payload_script_public_key_max_len: u8, coinbase_maturity: u64, + counters: Arc, ) -> Self { Self { max_tx_inputs, @@ -37,7 +43,7 @@ impl TransactionValidator { ghostdag_k, coinbase_payload_script_public_key_max_len, coinbase_maturity, - sig_cache: Cache::new(10_000), + sig_cache: Cache::with_counters(10_000, counters), } } } diff --git a/consensus/src/processes/transaction_validator/transaction_validator_populated.rs b/consensus/src/processes/transaction_validator/transaction_validator_populated.rs index 5e04431642..b46527aca0 100644 --- a/consensus/src/processes/transaction_validator/transaction_validator_populated.rs +++ b/consensus/src/processes/transaction_validator/transaction_validator_populated.rs @@ -142,6 +142,7 @@ mod tests { params.ghostdag_k, params.coinbase_payload_script_public_key_max_len, params.coinbase_maturity, + Default::default(), ); let prev_tx_id = TransactionId::from_str("746915c8dfc5e1550eacbe1d87625a105750cf1a65aaddd1baa60f8bcf7e953c").unwrap(); @@ -202,6 +203,7 @@ mod tests { params.ghostdag_k, params.coinbase_payload_script_public_key_max_len, params.coinbase_maturity, + Default::default(), ); // Taken from: 3f582463d73c77d93f278b7bf649bd890e75fe9bb8a1edd7a6854df1a2a2bfc1 @@ -263,6 +265,7 @@ mod tests { params.ghostdag_k, params.coinbase_payload_script_public_key_max_len, params.coinbase_maturity, + Default::default(), ); // Taken from: d839d29b549469d0f9a23e51febe68d4084967a6a477868b511a5a8d88c5ae06 @@ -324,6 +327,7 @@ mod tests { params.ghostdag_k, params.coinbase_payload_script_public_key_max_len, params.coinbase_maturity, + Default::default(), ); // Taken from: d839d29b549469d0f9a23e51febe68d4084967a6a477868b511a5a8d88c5ae06 @@ -386,6 +390,7 @@ mod tests { params.ghostdag_k, params.coinbase_payload_script_public_key_max_len, params.coinbase_maturity, + Default::default(), ); // Taken from: d839d29b549469d0f9a23e51febe68d4084967a6a477868b511a5a8d88c5ae06 @@ -448,6 +453,7 @@ mod tests { params.ghostdag_k, params.coinbase_payload_script_public_key_max_len, params.coinbase_maturity, + Default::default(), ); // Taken from: d839d29b549469d0f9a23e51febe68d4084967a6a477868b511a5a8d88c5ae06 @@ -510,6 +516,7 @@ mod tests { params.ghostdag_k, params.coinbase_payload_script_public_key_max_len, params.coinbase_maturity, + Default::default(), ); let prev_tx_id = TransactionId::from_str("1111111111111111111111111111111111111111111111111111111111111111").unwrap(); @@ -563,6 +570,7 @@ mod tests { params.ghostdag_k, params.coinbase_payload_script_public_key_max_len, params.coinbase_maturity, + Default::default(), ); let secp = Secp256k1::new(); diff --git a/consensus/src/processes/transaction_validator/tx_validation_in_isolation.rs b/consensus/src/processes/transaction_validator/tx_validation_in_isolation.rs index a30d0c8262..88a2e63a23 100644 --- a/consensus/src/processes/transaction_validator/tx_validation_in_isolation.rs +++ b/consensus/src/processes/transaction_validator/tx_validation_in_isolation.rs @@ -173,6 +173,7 @@ mod tests { params.ghostdag_k, params.coinbase_payload_script_public_key_max_len, params.coinbase_maturity, + Default::default(), ); let valid_cb = Transaction::new( diff --git a/core/src/time.rs b/core/src/time.rs index 65ed6dcdea..03555ec760 100644 --- a/core/src/time.rs +++ b/core/src/time.rs @@ -22,13 +22,17 @@ impl Stopwatch { pub fn with_threshold(name: &'static str) -> Self { Self { name, start: Instant::now() } } + + pub fn elapsed(&self) -> Duration { + self.start.elapsed() + } } impl Drop for Stopwatch { fn drop(&mut self) { let elapsed = self.start.elapsed(); if elapsed > Duration::from_millis(TR) { - kaspa_core::warn!("\n[{}] Abnormal time: {:#?}", self.name, elapsed); + kaspa_core::trace!("[{}] Abnormal time: {:#?}", self.name, elapsed); } } } diff --git a/crypto/txscript/src/caches.rs b/crypto/txscript/src/caches.rs index 854bfd1b87..3f76b25750 100644 --- a/crypto/txscript/src/caches.rs +++ b/crypto/txscript/src/caches.rs @@ -1,22 +1,41 @@ use indexmap::IndexMap; use parking_lot::RwLock; use rand::Rng; -use std::{collections::hash_map::RandomState, hash::BuildHasher, sync::Arc}; +use std::{ + collections::hash_map::RandomState, + hash::BuildHasher, + sync::{ + atomic::{AtomicU64, Ordering}, + Arc, + }, +}; #[derive(Clone)] pub struct Cache { // We use IndexMap and not HashMap, because it makes it cheaper to remove a random element when the cache is full. map: Arc>>, size: usize, + counters: Arc, } impl Cache { pub fn new(size: u64) -> Self { - Self { map: Arc::new(RwLock::new(IndexMap::with_capacity_and_hasher(size as usize, S::default()))), size: size as usize } + Self::with_counters(size, Default::default()) + } + + pub fn with_counters(size: u64, counters: Arc) -> Self { + Self { + map: Arc::new(RwLock::new(IndexMap::with_capacity_and_hasher(size as usize, S::default()))), + size: size as usize, + counters, + } } pub(crate) fn get(&self, key: &TKey) -> Option { - self.map.read().get(key).cloned() + self.map.read().get(key).cloned().map(|data| { + self.counters.get_counts.fetch_add(1, Ordering::Relaxed); + data + }) } pub(crate) fn insert(&self, key: TKey, data: TData) { @@ -28,5 +47,48 @@ impl TxScriptCacheCountersSnapshot { + TxScriptCacheCountersSnapshot { + insert_counts: self.insert_counts.load(Ordering::Relaxed), + get_counts: self.get_counts.load(Ordering::Relaxed), + } + } +} + +#[derive(Debug, PartialEq, Eq)] +pub struct TxScriptCacheCountersSnapshot { + pub insert_counts: u64, + pub get_counts: u64, +} + +impl TxScriptCacheCountersSnapshot { + pub fn hit_ratio(&self) -> f64 { + if self.insert_counts > 0 { + self.get_counts as f64 / self.insert_counts as f64 + } else { + 0f64 + } + } +} + +impl core::ops::Sub for &TxScriptCacheCountersSnapshot { + type Output = TxScriptCacheCountersSnapshot; + + fn sub(self, rhs: Self) -> Self::Output { + Self::Output { + insert_counts: self.insert_counts.checked_sub(rhs.insert_counts).unwrap_or_default(), + get_counts: self.get_counts.checked_sub(rhs.get_counts).unwrap_or_default(), + } } } diff --git a/kaspad/src/args.rs b/kaspad/src/args.rs index f75548b583..5c455bfc89 100644 --- a/kaspad/src/args.rs +++ b/kaspad/src/args.rs @@ -55,6 +55,7 @@ pub struct Args { pub externalip: Option, pub perf_metrics: bool, pub perf_metrics_interval_sec: u64, + pub block_template_cache_lifetime: Option, #[cfg(feature = "devnet-prealloc")] pub num_prealloc_utxos: Option, @@ -98,6 +99,7 @@ impl Default for Args { perf_metrics: false, perf_metrics_interval_sec: 1, externalip: None, + block_template_cache_lifetime: None, #[cfg(feature = "devnet-prealloc")] num_prealloc_utxos: None, @@ -118,6 +120,7 @@ impl Args { // TODO: change to `config.enable_sanity_checks = self.sanity` when we reach stable versions config.enable_sanity_checks = true; config.user_agent_comments = self.user_agent_comments.clone(); + config.block_template_cache_lifetime = self.block_template_cache_lifetime; #[cfg(feature = "devnet-prealloc")] if let Some(num_prealloc_utxos) = self.num_prealloc_utxos { @@ -356,6 +359,8 @@ pub fn parse_args() -> Args { .get_one::("perf-metrics-interval-sec") .cloned() .unwrap_or(defaults.perf_metrics_interval_sec), + // Note: currently used programmatically by benchmarks and not exposed to CLI users + block_template_cache_lifetime: defaults.block_template_cache_lifetime, #[cfg(feature = "devnet-prealloc")] num_prealloc_utxos: m.get_one::("num-prealloc-utxos").cloned(), diff --git a/kaspad/src/daemon.rs b/kaspad/src/daemon.rs index 6be15991be..a2290210fb 100644 --- a/kaspad/src/daemon.rs +++ b/kaspad/src/daemon.rs @@ -10,6 +10,7 @@ use kaspa_core::{core::Core, info, trace}; use kaspa_core::{kaspad_env::version, task::tick::TickService}; use kaspa_grpc_server::service::GrpcService; use kaspa_rpc_service::service::RpcCoreService; +use kaspa_txscript::caches::TxScriptCacheCounters; use kaspa_utils::networking::ContextualNetAddress; use kaspa_addressmanager::AddressManager; @@ -18,7 +19,11 @@ use kaspa_consensus::{consensus::factory::Factory as ConsensusFactory, pipeline: use kaspa_consensusmanager::ConsensusManager; use kaspa_core::task::runtime::AsyncRuntime; use kaspa_index_processor::service::IndexService; -use kaspa_mining::manager::{MiningManager, MiningManagerProxy}; +use kaspa_mining::{ + manager::{MiningManager, MiningManagerProxy}, + monitor::MiningMonitor, + MiningCounters, +}; use kaspa_p2p_flows::{flow_context::FlowContext, service::P2pService}; use kaspa_perf_monitor::builder::Builder as PerfMonitorBuilder; @@ -269,8 +274,10 @@ do you confirm? (answer y/n or pass --yes to the Kaspad command line to confirm let (notification_send, notification_recv) = unbounded(); let notification_root = Arc::new(ConsensusNotificationRoot::new(notification_send)); let processing_counters = Arc::new(ProcessingCounters::default()); + let mining_counters = Arc::new(MiningCounters::default()); let wrpc_borsh_counters = Arc::new(WrpcServerCounters::default()); let wrpc_json_counters = Arc::new(WrpcServerCounters::default()); + let tx_script_cache_counters = Arc::new(TxScriptCacheCounters::default()); // Use `num_cpus` background threads for the consensus database as recommended by rocksdb let consensus_db_parallelism = num_cpus::get(); @@ -281,6 +288,7 @@ do you confirm? (answer y/n or pass --yes to the Kaspad command line to confirm consensus_db_parallelism, notification_root.clone(), processing_counters.clone(), + tx_script_cache_counters.clone(), )); let consensus_manager = Arc::new(ConsensusManager::new(consensus_factory)); let consensus_monitor = Arc::new(ConsensusMonitor::new(processing_counters.clone(), tick_service.clone())); @@ -314,12 +322,15 @@ do you confirm? (answer y/n or pass --yes to the Kaspad command line to confirm }; let address_manager = AddressManager::new(config.clone(), meta_db); + + let mining_monitor = Arc::new(MiningMonitor::new(mining_counters.clone(), tx_script_cache_counters.clone(), tick_service.clone())); let mining_manager = MiningManagerProxy::new(Arc::new(MiningManager::new_with_spam_blocking_option( network.is_mainnet(), config.target_time_per_block, false, config.max_block_mass, - None, + config.block_template_cache_lifetime, + mining_counters, ))); let flow_context = Arc::new(FlowContext::new( @@ -368,6 +379,7 @@ do you confirm? (answer y/n or pass --yes to the Kaspad command line to confirm async_runtime.register(grpc_service); async_runtime.register(p2p_service); async_runtime.register(consensus_monitor); + async_runtime.register(mining_monitor); async_runtime.register(perf_monitor); let wrpc_service_tasks: usize = 2; // num_cpus::get() / 2; // Register wRPC servers based on command line arguments diff --git a/mining/Cargo.toml b/mining/Cargo.toml index 67b39917e1..f690e45c6e 100644 --- a/mining/Cargo.toml +++ b/mining/Cargo.toml @@ -17,6 +17,7 @@ kaspa-core.workspace = true kaspa-mining-errors.workspace = true kaspa-consensusmanager.workspace = true kaspa-utils.workspace = true + thiserror.workspace = true serde.workspace = true log.workspace = true @@ -24,6 +25,12 @@ futures-util.workspace = true smallvec.workspace = true rand.workspace = true parking_lot.workspace = true +itertools.workspace = true +tokio = { workspace = true, features = [ + "rt-multi-thread", + "macros", + "signal", +] } [dev-dependencies] kaspa-txscript.workspace = true diff --git a/mining/errors/src/mempool.rs b/mining/errors/src/mempool.rs index 6071caaf7e..4e59fc2899 100644 --- a/mining/errors/src/mempool.rs +++ b/mining/errors/src/mempool.rs @@ -18,6 +18,9 @@ pub enum RuleError { #[error("at least one outpoint of transaction is lacking a matching UTXO entry")] RejectMissingOutpoint, + #[error("transaction {0} was already accepted by the consensus")] + RejectAlreadyAccepted(TransactionId), + #[error("transaction {0} is already in the mempool")] RejectDuplicate(TransactionId), diff --git a/mining/src/block_template/builder.rs b/mining/src/block_template/builder.rs index feae39d85d..de3428a745 100644 --- a/mining/src/block_template/builder.rs +++ b/mining/src/block_template/builder.rs @@ -1,9 +1,16 @@ use super::{errors::BuilderResult, policy::Policy}; use crate::{block_template::selector::TransactionsSelector, model::candidate_tx::CandidateTransaction}; use kaspa_consensus_core::{ - api::ConsensusApi, block::BlockTemplate, coinbase::MinerData, merkle::calc_hash_merkle_root, tx::COINBASE_TRANSACTION_INDEX, + api::ConsensusApi, + block::{BlockTemplate, TemplateBuildMode}, + coinbase::MinerData, + merkle::calc_hash_merkle_root, + tx::COINBASE_TRANSACTION_INDEX, +}; +use kaspa_core::{ + debug, + time::{unix_now, Stopwatch}, }; -use kaspa_core::{debug, time::unix_now}; pub(crate) struct BlockTemplateBuilder { policy: Policy, @@ -83,16 +90,16 @@ impl BlockTemplateBuilder { consensus: &dyn ConsensusApi, miner_data: &MinerData, transactions: Vec, + build_mode: TemplateBuildMode, ) -> BuilderResult { - debug!("Considering {} transactions for inclusion to new block", transactions.len()); - let mut selector = TransactionsSelector::new(self.policy.clone(), transactions); - let block_txs = selector.select_transactions(); - Ok(consensus.build_block_template(miner_data.clone(), block_txs)?) + let _sw = Stopwatch::<20>::with_threshold("build_block_template op"); + debug!("Considering {} transactions for a new block template", transactions.len()); + let selector = Box::new(TransactionsSelector::new(self.policy.clone(), transactions)); + Ok(consensus.build_block_template(miner_data.clone(), selector, build_mode)?) } /// modify_block_template clones an existing block template, modifies it to the requested coinbase data and updates the timestamp pub(crate) fn modify_block_template( - &self, consensus: &dyn ConsensusApi, new_miner_data: &MinerData, block_template_to_modify: &BlockTemplate, diff --git a/mining/src/block_template/model/tx.rs b/mining/src/block_template/model/tx.rs index 6dc95dc440..b0c7e3f56e 100644 --- a/mining/src/block_template/model/tx.rs +++ b/mining/src/block_template/model/tx.rs @@ -19,6 +19,7 @@ pub(crate) struct Candidate { /// Range start in the candidate list total_p space pub(crate) start: f64, + /// Range end in the candidate list total_p space pub(crate) end: f64, @@ -32,6 +33,7 @@ impl Candidate { } } +#[derive(Default)] pub(crate) struct CandidateList { pub(crate) candidates: Vec, pub(crate) total_p: f64, diff --git a/mining/src/block_template/selector.rs b/mining/src/block_template/selector.rs index 7123fdf852..b65126caf6 100644 --- a/mining/src/block_template/selector.rs +++ b/mining/src/block_template/selector.rs @@ -1,6 +1,6 @@ -use kaspa_core::trace; +use kaspa_core::{time::Stopwatch, trace}; use rand::Rng; -use std::{collections::HashMap, vec}; +use std::collections::HashMap; use crate::model::candidate_tx::CandidateTransaction; @@ -8,7 +8,11 @@ use super::{ model::tx::{CandidateList, SelectableTransaction, SelectableTransactions, TransactionIndex}, policy::Policy, }; -use kaspa_consensus_core::{subnets::SubnetworkId, tx::Transaction}; +use kaspa_consensus_core::{ + block::TemplateTransactionSelector, + subnets::SubnetworkId, + tx::{Transaction, TransactionId}, +}; /// ALPHA is a coefficient that defines how uniform the distribution of /// candidate transactions should be. A smaller alpha makes the distribution @@ -33,21 +37,48 @@ pub(crate) struct TransactionsSelector { /// Indexes of selected transactions in stores selected_txs: Vec, + + /// Optional state for handling selection rejections. Maps from a selected tx id + /// to the index of the tx in the `transactions` vec + selected_txs_map: Option>, + + // Inner state of the selection process + candidate_list: CandidateList, + overall_rejections: usize, + used_count: usize, + used_p: f64, total_mass: u64, total_fees: u64, + gas_usage_map: HashMap, } impl TransactionsSelector { pub(crate) fn new(policy: Policy, mut transactions: Vec) -> Self { + let _sw = Stopwatch::<100>::with_threshold("TransactionsSelector::new op"); // Sort the transactions by subnetwork_id. transactions.sort_by(|a, b| a.tx.subnetwork_id.cmp(&b.tx.subnetwork_id)); // Create the object without selectable transactions - let mut selector = Self { policy, transactions, selectable_txs: vec![], selected_txs: vec![], total_mass: 0, total_fees: 0 }; + let mut selector = Self { + policy, + transactions, + selectable_txs: Default::default(), + selected_txs: Default::default(), + selected_txs_map: None, + candidate_list: Default::default(), + overall_rejections: 0, + used_count: 0, + used_p: 0.0, + total_mass: 0, + total_fees: 0, + gas_usage_map: Default::default(), + }; // Create the selectable transactions selector.selectable_txs = selector.transactions.iter().map(|x| SelectableTransaction::new(selector.calc_tx_value(x), 0, ALPHA)).collect(); + // Prepare the initial candidate list + selector.candidate_list = CandidateList::new(&selector.selectable_txs); selector } @@ -73,31 +104,28 @@ impl TransactionsSelector { /// and appends the ones that will be included in the next block into /// selected_txs. pub(crate) fn select_transactions(&mut self) -> Vec { + let _sw = Stopwatch::<15>::with_threshold("select_transaction op"); let mut rng = rand::thread_rng(); - self.reset(); - let mut candidate_list = CandidateList::new(&self.selectable_txs); - let mut used_count = 0; - let mut used_p = 0.0; - let mut gas_usage_map: HashMap = HashMap::new(); + self.reset_selection(); - while candidate_list.candidates.len() - used_count > 0 { + while self.candidate_list.candidates.len() - self.used_count > 0 { // Rebalance the candidates if it's required - if used_p >= REBALANCE_THRESHOLD * candidate_list.total_p { - candidate_list = candidate_list.rebalanced(&self.selectable_txs); - used_count = 0; - used_p = 0.0; + if self.used_p >= REBALANCE_THRESHOLD * self.candidate_list.total_p { + self.candidate_list = self.candidate_list.rebalanced(&self.selectable_txs); + self.used_count = 0; + self.used_p = 0.0; // Break if we now ran out of transactions - if candidate_list.is_empty() { + if self.candidate_list.is_empty() { break; } } // Select a candidate tx at random - let r = rng.gen::() * candidate_list.total_p; - let selected_candidate_idx = candidate_list.find(r); - let selected_candidate = candidate_list.candidates.get_mut(selected_candidate_idx).unwrap(); + let r = rng.gen::() * self.candidate_list.total_p; + let selected_candidate_idx = self.candidate_list.find(r); + let selected_candidate = self.candidate_list.candidates.get_mut(selected_candidate_idx).unwrap(); // If is_marked_for_deletion is set, it means we got a collision. // Ignore and select another Tx. @@ -118,7 +146,7 @@ impl TransactionsSelector { // Also check for overflow. if !selected_tx.tx.subnetwork_id.is_builtin_or_native() { let subnetwork_id = selected_tx.tx.subnetwork_id.clone(); - let gas_usage = gas_usage_map.entry(subnetwork_id.clone()).or_insert(0); + let gas_usage = self.gas_usage_map.entry(subnetwork_id.clone()).or_insert(0); let tx_gas = selected_tx.tx.gas; let next_gas_usage = (*gas_usage).checked_add(tx_gas); if next_gas_usage.is_none() || next_gas_usage.unwrap() > self.selectable_txs[selected_candidate.index].gas_limit { @@ -127,19 +155,19 @@ impl TransactionsSelector { selected_tx.tx.id(), subnetwork_id ); - for i in selected_candidate_idx..candidate_list.candidates.len() { - let transaction_index = candidate_list.candidates[i].index; - // candidateTxs are ordered by subnetwork, so we can safely assume - // that transactions after subnetworkID will not be relevant. + for i in selected_candidate_idx..self.candidate_list.candidates.len() { + let transaction_index = self.candidate_list.candidates[i].index; + // Candidate txs are ordered by subnetwork, so we can safely assume + // that transactions after subnetwork_id will not be relevant. if subnetwork_id < self.transactions[transaction_index].tx.subnetwork_id { break; } - let current = candidate_list.candidates.get_mut(i).unwrap(); + let current = self.candidate_list.candidates.get_mut(i).unwrap(); // Mark for deletion current.is_marked_for_deletion = true; - used_count += 1; - used_p += self.selectable_txs[transaction_index].p; + self.used_count += 1; + self.used_p += self.selectable_txs[transaction_index].p; } continue; } @@ -155,15 +183,15 @@ impl TransactionsSelector { self.total_fees += selected_tx.calculated_fee; trace!( - "Adding tx {0} (feePerMegaGram {1})", + "Adding tx {0} (fee per megagram: {1})", selected_tx.tx.id(), selected_tx.calculated_fee * 1_000_000 / selected_tx.calculated_mass ); // Mark for deletion selected_candidate.is_marked_for_deletion = true; - used_count += 1; - used_p += self.selectable_txs[selected_candidate.index].p; + self.used_count += 1; + self.used_p += self.selectable_txs[selected_candidate.index].p; } self.selected_txs.sort(); @@ -176,9 +204,12 @@ impl TransactionsSelector { self.selected_txs.iter().map(|x| self.transactions[*x].tx.as_ref().clone()).collect() } - fn reset(&mut self) { + fn reset_selection(&mut self) { assert_eq!(self.transactions.len(), self.selectable_txs.len()); - self.selected_txs = Vec::with_capacity(self.transactions.len()); + self.selected_txs.clear(); + // TODO: consider to min with the approximated amount of txs which fit into max block mass + self.selected_txs.reserve_exact(self.transactions.len()); + self.selected_txs_map = None; } /// calc_tx_value calculates a value to be used in transaction selection. @@ -198,7 +229,95 @@ impl TransactionsSelector { } } +impl TemplateTransactionSelector for TransactionsSelector { + fn select_transactions(&mut self) -> Vec { + self.select_transactions() + } + + fn reject_selection(&mut self, tx_id: TransactionId) { + let selected_txs_map = self + .selected_txs_map + // We lazy-create the map only when there are actual rejections + .get_or_insert_with(|| self.selected_txs.iter().map(|&x| (self.transactions[x].tx.id(), x)).collect()); + let tx_index = selected_txs_map.remove(&tx_id).expect("only previously selected txs can be rejected (and only once)"); + let tx = &self.transactions[tx_index]; + self.total_mass -= tx.calculated_mass; + self.total_fees -= tx.calculated_fee; + if !tx.tx.subnetwork_id.is_builtin_or_native() { + *self.gas_usage_map.get_mut(&tx.tx.subnetwork_id).expect("previously selected txs have an entry") -= tx.tx.gas; + } + self.overall_rejections += 1; + } + + fn is_successful(&self) -> bool { + const SUFFICIENT_MASS_THRESHOLD: f64 = 0.8; + const LOW_REJECTION_FRACTION: f64 = 0.2; + + // We consider the operation successful if either mass occupation is above 80% or rejection rate is below 20% + self.overall_rejections == 0 + || (self.total_mass as f64) > self.policy.max_block_mass as f64 * SUFFICIENT_MASS_THRESHOLD + || (self.overall_rejections as f64) < self.transactions.len() as f64 * LOW_REJECTION_FRACTION + } +} + #[cfg(test)] mod tests { - // TODO: add unit-tests for select_transactions + use super::*; + use itertools::Itertools; + use kaspa_consensus_core::{ + constants::{MAX_TX_IN_SEQUENCE_NUM, SOMPI_PER_KASPA, TX_VERSION}, + mass::transaction_estimated_serialized_size, + subnets::SUBNETWORK_ID_NATIVE, + tx::{Transaction, TransactionId, TransactionInput, TransactionOutpoint, TransactionOutput}, + }; + use kaspa_txscript::{pay_to_script_hash_signature_script, test_helpers::op_true_script}; + use std::{collections::HashSet, sync::Arc}; + + use crate::{mempool::config::DEFAULT_MINIMUM_RELAY_TRANSACTION_FEE, model::candidate_tx::CandidateTransaction}; + + #[test] + fn test_reject_transaction() { + const TX_INITIAL_COUNT: usize = 1_000; + + // Create a vector of transactions differing by output value so they have unique ids + let transactions = (0..TX_INITIAL_COUNT).map(|i| create_transaction(SOMPI_PER_KASPA * (i + 1) as u64)).collect_vec(); + let policy = Policy::new(100_000); + let mut selector = TransactionsSelector::new(policy, transactions); + let (mut kept, mut rejected) = (HashSet::new(), HashSet::new()); + let mut reject_count = 32; + for i in 0..10 { + let selected_txs = selector.select_transactions(); + if i > 0 { + assert_eq!( + selected_txs.len(), + reject_count, + "subsequent select calls are expected to only refill the previous rejections" + ); + reject_count /= 2; + } + for tx in selected_txs.iter() { + kept.insert(tx.id()).then_some(()).expect("selected txs should never repeat themselves"); + assert!(!rejected.contains(&tx.id()), "selected txs should never repeat themselves"); + } + selected_txs.iter().take(reject_count).for_each(|x| { + selector.reject_selection(x.id()); + kept.remove(&x.id()).then_some(()).expect("was just inserted"); + rejected.insert(x.id()).then_some(()).expect("was just verified"); + }); + } + } + + fn create_transaction(value: u64) -> CandidateTransaction { + let previous_outpoint = TransactionOutpoint::new(TransactionId::default(), 0); + let (script_public_key, redeem_script) = op_true_script(); + let signature_script = pay_to_script_hash_signature_script(redeem_script, vec![]).expect("the redeem script is canonical"); + + let input = TransactionInput::new(previous_outpoint, signature_script, MAX_TX_IN_SEQUENCE_NUM, 1); + let output = TransactionOutput::new(value - DEFAULT_MINIMUM_RELAY_TRANSACTION_FEE, script_public_key); + let tx = Arc::new(Transaction::new(TX_VERSION, vec![input], vec![output], 0, SUBNETWORK_ID_NATIVE, 0, vec![])); + let calculated_mass = transaction_estimated_serialized_size(&tx); + let calculated_fee = DEFAULT_MINIMUM_RELAY_TRANSACTION_FEE; + + CandidateTransaction { tx, calculated_fee, calculated_mass } + } } diff --git a/mining/src/cache.rs b/mining/src/cache.rs index f56c12659a..7db6e4f65d 100644 --- a/mining/src/cache.rs +++ b/mining/src/cache.rs @@ -1,44 +1,75 @@ use kaspa_consensus_core::block::BlockTemplate; use kaspa_core::time::unix_now; -use std::sync::Arc; +use parking_lot::{Mutex, MutexGuard}; +use std::sync::{ + atomic::{AtomicBool, Ordering::SeqCst}, + Arc, +}; /// CACHE_LIFETIME indicates the default duration in milliseconds after which the cached data expires. const DEFAULT_CACHE_LIFETIME: u64 = 1_000; -pub(crate) struct BlockTemplateCache { - /// Time, in milliseconds, when the cache was last updated +pub(crate) struct Inner { + /// Time, in milliseconds, at which the cache was last updated last_update_time: u64, + + /// The optional template block_template: Option>, /// Duration in milliseconds after which the cached data expires cache_lifetime: u64, } -impl BlockTemplateCache { +impl Inner { pub(crate) fn new(cache_lifetime: Option) -> Self { let cache_lifetime = cache_lifetime.unwrap_or(DEFAULT_CACHE_LIFETIME); Self { last_update_time: 0, block_template: None, cache_lifetime } } - pub(crate) fn clear(&mut self) { - // The cache timer is reset to 0 so its lifetime is expired. - self.last_update_time = 0; + fn clear(&mut self) { self.block_template = None; } pub(crate) fn get_immutable_cached_template(&self) -> Option> { let now = unix_now(); // We verify that `now > last update` in order to avoid theoretic clock change bugs - if now < self.last_update_time || now - self.last_update_time > self.cache_lifetime { + if now > self.last_update_time + self.cache_lifetime || now < self.last_update_time { None } else { - Some(self.block_template.as_ref().unwrap().clone()) + self.block_template.clone() } } pub(crate) fn set_immutable_cached_template(&mut self, block_template: BlockTemplate) -> Arc { self.last_update_time = unix_now(); - self.block_template = Some(Arc::new(block_template)); - self.block_template.as_ref().unwrap().clone() + let block_template = Arc::new(block_template); + self.block_template = Some(block_template.clone()); + block_template + } +} + +pub(crate) struct BlockTemplateCache { + inner: Mutex, + clear_flag: AtomicBool, +} + +impl BlockTemplateCache { + pub(crate) fn new(cache_lifetime: Option) -> Self { + Self { inner: Mutex::new(Inner::new(cache_lifetime)), clear_flag: AtomicBool::new(false) } + } + + pub(crate) fn clear(&self) { + // We avoid blocking on the mutex for clear but rather signal to the next + // thread acquiring the lock to clear the template + self.clear_flag.store(true, SeqCst) + } + + pub(crate) fn lock(&self) -> MutexGuard { + let mut guard = self.inner.lock(); + if self.clear_flag.swap(false, SeqCst) { + // If clear was signaled, perform the actual clear + guard.clear(); + } + guard } } diff --git a/mining/src/lib.rs b/mining/src/lib.rs index f0f4028642..d4589a65fe 100644 --- a/mining/src/lib.rs +++ b/mining/src/lib.rs @@ -1,3 +1,10 @@ +use std::{ + sync::atomic::{AtomicU64, Ordering}, + time::{Duration, Instant}, +}; + +use mempool::tx::Priority; + mod block_template; pub(crate) mod cache; pub mod errors; @@ -5,6 +12,144 @@ pub mod manager; mod manager_tests; pub mod mempool; pub mod model; +pub mod monitor; #[cfg(test)] pub mod testutils; + +pub struct MiningCounters { + pub creation_time: Instant, + + // Counters + pub high_priority_tx_counts: AtomicU64, + pub low_priority_tx_counts: AtomicU64, + pub block_tx_counts: AtomicU64, + pub tx_accepted_counts: AtomicU64, + pub input_counts: AtomicU64, + pub output_counts: AtomicU64, + + // Samples + pub ready_txs_sample: AtomicU64, + pub txs_sample: AtomicU64, + pub orphans_sample: AtomicU64, + pub accepted_sample: AtomicU64, +} + +impl Default for MiningCounters { + fn default() -> Self { + Self { + creation_time: Instant::now(), + high_priority_tx_counts: Default::default(), + low_priority_tx_counts: Default::default(), + block_tx_counts: Default::default(), + tx_accepted_counts: Default::default(), + input_counts: Default::default(), + output_counts: Default::default(), + ready_txs_sample: Default::default(), + txs_sample: Default::default(), + orphans_sample: Default::default(), + accepted_sample: Default::default(), + } + } +} + +impl MiningCounters { + pub fn snapshot(&self) -> MempoolCountersSnapshot { + MempoolCountersSnapshot { + elapsed_time: (Instant::now() - self.creation_time), + high_priority_tx_counts: self.high_priority_tx_counts.load(Ordering::Relaxed), + low_priority_tx_counts: self.low_priority_tx_counts.load(Ordering::Relaxed), + block_tx_counts: self.block_tx_counts.load(Ordering::Relaxed), + tx_accepted_counts: self.tx_accepted_counts.load(Ordering::Relaxed), + input_counts: self.input_counts.load(Ordering::Relaxed), + output_counts: self.output_counts.load(Ordering::Relaxed), + ready_txs_sample: self.ready_txs_sample.load(Ordering::Relaxed), + txs_sample: self.txs_sample.load(Ordering::Relaxed), + orphans_sample: self.orphans_sample.load(Ordering::Relaxed), + accepted_sample: self.accepted_sample.load(Ordering::Relaxed), + } + } + + pub fn increase_tx_counts(&self, value: u64, priority: Priority) { + match priority { + Priority::Low => { + self.low_priority_tx_counts.fetch_add(value, Ordering::Relaxed); + } + Priority::High => { + self.high_priority_tx_counts.fetch_add(value, Ordering::Relaxed); + } + } + } +} + +#[derive(Debug, PartialEq, Eq)] +pub struct MempoolCountersSnapshot { + pub elapsed_time: Duration, + pub high_priority_tx_counts: u64, + pub low_priority_tx_counts: u64, + pub block_tx_counts: u64, + pub tx_accepted_counts: u64, + pub input_counts: u64, + pub output_counts: u64, + pub ready_txs_sample: u64, + pub txs_sample: u64, + pub orphans_sample: u64, + pub accepted_sample: u64, +} + +impl MempoolCountersSnapshot { + pub fn in_tx_counts(&self) -> u64 { + self.high_priority_tx_counts + self.low_priority_tx_counts + } + + /// Indicates whether this snapshot has any TPS activity which is worth logging + pub fn has_tps_activity(&self) -> bool { + self.tx_accepted_counts > 0 || self.block_tx_counts > 0 || self.low_priority_tx_counts > 0 || self.high_priority_tx_counts > 0 + } + + /// Returns an estimate of _Unique-TPS_, i.e. the number of unique transactions per second on average + /// (excluding coinbase transactions) + pub fn u_tps(&self) -> f64 { + let elapsed = self.elapsed_time.as_secs_f64(); + if elapsed != 0f64 { + self.tx_accepted_counts as f64 / elapsed + } else { + 0f64 + } + } + + /// Returns an estimate to the _Effective-TPS_ fraction which is a measure of how much of DAG capacity + /// is utilized compared to the number of available mempool transactions. For instance a max + /// value of `1.0` indicates that we cannot do any better in terms of throughput vs. current + /// demand. A value close to `0.0` means that DAG capacity is mostly filled with duplicate + /// transactions even though the mempool (demand) offers a much larger amount of unique transactions. + pub fn e_tps(&self) -> f64 { + let accepted_txs = u64::min(self.ready_txs_sample, self.tx_accepted_counts); // The throughput + let total_txs = u64::min(self.ready_txs_sample, self.block_tx_counts); // The min of demand and capacity + if total_txs > 0 { + accepted_txs as f64 / total_txs as f64 + } else { + 1f64 // No demand means we are 100% efficient + } + } +} + +impl core::ops::Sub for &MempoolCountersSnapshot { + type Output = MempoolCountersSnapshot; + + fn sub(self, rhs: Self) -> Self::Output { + Self::Output { + elapsed_time: self.elapsed_time.checked_sub(rhs.elapsed_time).unwrap_or_default(), + high_priority_tx_counts: self.high_priority_tx_counts.checked_sub(rhs.high_priority_tx_counts).unwrap_or_default(), + low_priority_tx_counts: self.low_priority_tx_counts.checked_sub(rhs.low_priority_tx_counts).unwrap_or_default(), + block_tx_counts: self.block_tx_counts.checked_sub(rhs.block_tx_counts).unwrap_or_default(), + tx_accepted_counts: self.tx_accepted_counts.checked_sub(rhs.tx_accepted_counts).unwrap_or_default(), + input_counts: self.input_counts.checked_sub(rhs.input_counts).unwrap_or_default(), + output_counts: self.output_counts.checked_sub(rhs.output_counts).unwrap_or_default(), + ready_txs_sample: (self.ready_txs_sample + rhs.ready_txs_sample) / 2, + txs_sample: (self.txs_sample + rhs.txs_sample) / 2, + orphans_sample: (self.orphans_sample + rhs.orphans_sample) / 2, + accepted_sample: (self.accepted_sample + rhs.accepted_sample) / 2, + } + } +} diff --git a/mining/src/manager.rs b/mining/src/manager.rs index b8e448f55b..e322865de7 100644 --- a/mining/src/manager.rs +++ b/mining/src/manager.rs @@ -1,36 +1,43 @@ -// TODO: add integration tests - -use std::sync::Arc; - use crate::{ block_template::{builder::BlockTemplateBuilder, errors::BuilderError}, cache::BlockTemplateCache, errors::MiningManagerResult, mempool::{ config::Config, + model::tx::{MempoolTransaction, TxRemovalReason}, + populate_entries_and_try_validate::{ + populate_mempool_transactions_in_parallel, validate_mempool_transaction, validate_mempool_transactions_in_parallel, + }, tx::{Orphan, Priority}, Mempool, }, model::{ candidate_tx::CandidateTransaction, owner_txs::{GroupedOwnerTransactions, ScriptPublicKeySet}, + topological_sort::IntoIterTopologically, }, + MiningCounters, }; +use itertools::Itertools; use kaspa_consensus_core::{ api::ConsensusApi, - block::BlockTemplate, + block::{BlockTemplate, TemplateBuildMode}, coinbase::MinerData, - errors::block::RuleError, + errors::{block::RuleError as BlockRuleError, tx::TxRuleError}, tx::{MutableTransaction, Transaction, TransactionId, TransactionOutput}, }; use kaspa_consensusmanager::{spawn_blocking, ConsensusProxy}; -use kaspa_core::error; -use parking_lot::{Mutex, RwLock}; +use kaspa_core::{debug, error, info, time::Stopwatch, warn}; +use kaspa_mining_errors::{manager::MiningManagerError, mempool::RuleError}; +use parking_lot::RwLock; +use std::sync::Arc; +use tokio::sync::mpsc::UnboundedSender; pub struct MiningManager { - block_template_builder: BlockTemplateBuilder, - block_template_cache: Mutex, - pub(crate) mempool: RwLock, + config: Arc, + block_template_cache: BlockTemplateCache, + mempool: RwLock, + counters: Arc, } impl MiningManager { @@ -39,9 +46,10 @@ impl MiningManager { relay_non_std_transactions: bool, max_block_mass: u64, cache_lifetime: Option, + counters: Arc, ) -> Self { let config = Config::build_default(target_time_per_block, relay_non_std_transactions, max_block_mass); - Self::with_config(config, cache_lifetime) + Self::with_config(config, cache_lifetime, counters) } pub fn new_with_spam_blocking_option( @@ -50,6 +58,7 @@ impl MiningManager { relay_non_std_transactions: bool, max_block_mass: u64, cache_lifetime: Option, + counters: Arc, ) -> Self { let config = Config::build_default_with_spam_blocking_option( block_spam_txs, @@ -57,14 +66,14 @@ impl MiningManager { relay_non_std_transactions, max_block_mass, ); - Self::with_config(config, cache_lifetime) + Self::with_config(config, cache_lifetime, counters) } - pub(crate) fn with_config(config: Config, cache_lifetime: Option) -> Self { - let block_template_builder = BlockTemplateBuilder::new(config.maximum_mass_per_block); - let mempool = RwLock::new(Mempool::new(config)); - let block_template_cache = Mutex::new(BlockTemplateCache::new(cache_lifetime)); - Self { block_template_builder, block_template_cache, mempool } + pub(crate) fn with_config(config: Config, cache_lifetime: Option, counters: Arc) -> Self { + let config = Arc::new(config); + let mempool = RwLock::new(Mempool::new(config.clone(), counters.clone())); + let block_template_cache = BlockTemplateCache::new(cache_lifetime); + Self { config, block_template_cache, mempool, counters } } pub fn get_block_template(&self, consensus: &dyn ConsensusApi, miner_data: &MinerData) -> MiningManagerResult { @@ -79,7 +88,7 @@ impl MiningManager { } // Miner data is new -- make the minimum changes required // Note the call returns a modified clone of the cached block template - let block_template = self.block_template_builder.modify_block_template(consensus, miner_data, &immutable_template)?; + let block_template = BlockTemplateBuilder::modify_block_template(consensus, miner_data, &immutable_template)?; // No point in updating cache since we have no reason to believe this coinbase will be used more // than the previous one, and we want to maintain the original template caching time @@ -90,17 +99,78 @@ impl MiningManager { // We avoid passing a mempool ref to blockTemplateBuilder by calling // mempool.BlockCandidateTransactions and mempool.RemoveTransactions here. // We remove recursion seen in blockTemplateBuilder.BuildBlockTemplate here. + debug!("Building a new block template..."); + let _swo = Stopwatch::<22>::with_threshold("build_block_template full loop"); + let mut attempts: u64 = 0; loop { + attempts += 1; + let transactions = self.block_candidate_transactions(); - match self.block_template_builder.build_block_template(consensus, miner_data, transactions) { + let block_template_builder = BlockTemplateBuilder::new(self.config.maximum_mass_per_block); + let build_mode = if attempts < self.config.maximum_build_block_template_attempts { + TemplateBuildMode::Standard + } else { + TemplateBuildMode::Infallible + }; + match block_template_builder.build_block_template(consensus, miner_data, transactions, build_mode) { Ok(block_template) => { let block_template = cache_lock.set_immutable_cached_template(block_template); + match attempts { + 1 => { + debug!( + "Built a new block template with {} transactions in {:#?}", + block_template.block.transactions.len(), + _swo.elapsed() + ); + } + 2 => { + debug!( + "Built a new block template with {} transactions at second attempt in {:#?}", + block_template.block.transactions.len(), + _swo.elapsed() + ); + } + n => { + debug!( + "Built a new block template with {} transactions in {} attempts totaling {:#?}", + block_template.block.transactions.len(), + n, + _swo.elapsed() + ); + } + } return Ok(block_template.as_ref().clone()); } - Err(BuilderError::ConsensusError(RuleError::InvalidTransactionsInNewBlock(invalid_transactions))) => { + Err(BuilderError::ConsensusError(BlockRuleError::InvalidTransactionsInNewBlock(invalid_transactions))) => { + let mut missing_outpoint: usize = 0; + let mut invalid: usize = 0; + let mut mempool_write = self.mempool.write(); - invalid_transactions.iter().for_each(|(x, _)| { - let removal_result = mempool_write.remove_transaction(x, true); + invalid_transactions.iter().for_each(|(x, err)| { + // On missing outpoints, the most likely is that the tx was already in a block accepted by + // the consensus but not yet processed by handle_new_block_transactions(). Another possibility + // is a double spend. In both cases, we simply remove the transaction but keep its redeemers. + // Those will either be valid in a next block template or invalidated if it's a double spend. + // + // If the redeemers of a transaction accepted in consensus but not yet handled in mempool were + // removed, it would lead to having subsequently submitted children transactions of the removed + // redeemers being unexpectedly either orphaned or rejected in case orphans are disallowed. + // + // For all other errors, we do remove the redeemers. + + let removal_result = if *err == TxRuleError::MissingTxOutpoints { + missing_outpoint += 1; + mempool_write.remove_transaction(x, false, TxRemovalReason::Muted, "") + } else { + invalid += 1; + warn!("Remove per BBT invalid transaction and descendants"); + mempool_write.remove_transaction( + x, + true, + TxRemovalReason::InvalidInBlockTemplate, + format!(" error: {}", err).as_str(), + ) + }; if let Err(err) = removal_result { // Original golang comment: // mempool.remove_transactions might return errors in situations that are perfectly fine in this context. @@ -110,8 +180,15 @@ impl MiningManager { error!("Error from mempool.remove_transactions: {:?}", err); } }); + drop(mempool_write); + + debug!( + "Building a new block template failed for {} txs missing outpoint and {} invalid txs", + missing_outpoint, invalid + ); } Err(err) => { + warn!("Building a new block template failed: {}", err); return Err(err)?; } } @@ -124,12 +201,12 @@ impl MiningManager { /// Clears the block template cache, forcing the next call to get_block_template to build a new block template. pub fn clear_block_template(&self) { - self.block_template_cache.lock().clear(); + self.block_template_cache.clear(); } #[cfg(test)] - pub(crate) fn block_template_builder(&self) -> &BlockTemplateBuilder { - &self.block_template_builder + pub(crate) fn block_template_builder(&self) -> BlockTemplateBuilder { + BlockTemplateBuilder::new(self.config.maximum_mass_per_block) } /// validate_and_insert_transaction validates the given transaction, and @@ -144,11 +221,10 @@ impl MiningManager { priority: Priority, orphan: Orphan, ) -> MiningManagerResult>> { - Ok(self.mempool.write().validate_and_insert_transaction(consensus, transaction, priority, orphan)?) + self.validate_and_insert_mutable_transaction(consensus, MutableTransaction::from_tx(transaction), priority, orphan) } - /// Exposed only for tests. Ordinary users should let the mempool create the mutable tx internally - #[cfg(test)] + /// Exposed only for tests. Ordinary users should call `validate_and_insert_transaction` instead pub fn validate_and_insert_mutable_transaction( &self, consensus: &dyn ConsensusApi, @@ -156,7 +232,202 @@ impl MiningManager { priority: Priority, orphan: Orphan, ) -> MiningManagerResult>> { - Ok(self.mempool.write().validate_and_insert_mutable_transaction(consensus, transaction, priority, orphan)?) + // read lock on mempool + let mut transaction = self.mempool.read().pre_validate_and_populate_transaction(consensus, transaction)?; + // no lock on mempool + let validation_result = validate_mempool_transaction(consensus, &mut transaction); + // write lock on mempool + let mut mempool = self.mempool.write(); + if let Some(accepted_transaction) = + mempool.post_validate_and_insert_transaction(consensus, validation_result, transaction, priority, orphan)? + { + let unorphaned_transactions = mempool.get_unorphaned_transactions_after_accepted_transaction(&accepted_transaction); + drop(mempool); + + // The capacity used here may be exceeded since accepted unorphaned transaction may themselves unorphan other transactions. + let mut accepted_transactions = Vec::with_capacity(unorphaned_transactions.len() + 1); + // We include the original accepted transaction as well + accepted_transactions.push(accepted_transaction); + accepted_transactions.extend(self.validate_and_insert_unorphaned_transactions(consensus, unorphaned_transactions)); + self.counters.increase_tx_counts(1, priority); + + Ok(accepted_transactions) + } else { + Ok(vec![]) + } + } + + fn validate_and_insert_unorphaned_transactions( + &self, + consensus: &dyn ConsensusApi, + mut incoming_transactions: Vec, + ) -> Vec> { + // The capacity used here may be exceeded (see next comment). + let mut accepted_transactions = Vec::with_capacity(incoming_transactions.len()); + // We loop as long as incoming unorphaned transactions do unorphan other transactions when they + // get validated and inserted into the mempool. + while !incoming_transactions.is_empty() { + // Since the consensus validation requires a slice of MutableTransaction, we destructure the vector of + // MempoolTransaction into 2 distinct vectors holding respectively the needed MutableTransaction and Priority. + let (mut transactions, priorities): (Vec, Vec) = + incoming_transactions.into_iter().map(|x| (x.mtx, x.priority)).unzip(); + + // no lock on mempool + // We process the transactions by chunks of max block mass to prevent locking the virtual processor for too long. + let mut lower_bound: usize = 0; + let mut validation_results = Vec::with_capacity(transactions.len()); + while let Some(upper_bound) = self.next_transaction_chunk_upper_bound(&transactions, lower_bound) { + assert!(lower_bound < upper_bound, "the chunk is never empty"); + validation_results + .extend(validate_mempool_transactions_in_parallel(consensus, &mut transactions[lower_bound..upper_bound])); + lower_bound = upper_bound; + } + assert_eq!(transactions.len(), validation_results.len(), "every transaction should have a matching validation result"); + + // write lock on mempool + let mut mempool = self.mempool.write(); + incoming_transactions = transactions + .into_iter() + .zip(priorities) + .zip(validation_results) + .flat_map(|((transaction, priority), validation_result)| { + let orphan_id = transaction.id(); + match mempool.post_validate_and_insert_transaction( + consensus, + validation_result, + transaction, + priority, + Orphan::Forbidden, + ) { + Ok(Some(accepted_transaction)) => { + accepted_transactions.push(accepted_transaction.clone()); + self.counters.increase_tx_counts(1, priority); + mempool.get_unorphaned_transactions_after_accepted_transaction(&accepted_transaction) + } + Ok(None) => vec![], + Err(err) => { + debug!("Failed to unorphan transaction {0} due to rule error: {1}", orphan_id, err); + vec![] + } + } + }) + .collect::>(); + drop(mempool); + } + accepted_transactions + } + + /// Validates a batch of transactions, handling iteratively only the independent ones, and + /// adds those to the set of known transactions that have not yet been added to any block. + /// + /// Returns transactions that where unorphaned following the insertion of the provided + /// transactions. The returned transactions are clones of objects owned by the mempool. + pub fn validate_and_insert_transaction_batch( + &self, + consensus: &dyn ConsensusApi, + transactions: Vec, + priority: Priority, + orphan: Orphan, + ) -> Vec>> { + const TRANSACTION_CHUNK_SIZE: usize = 250; + + // The capacity used here may be exceeded since accepted transactions may unorphan other transactions. + let mut insert_results: Vec>> = Vec::with_capacity(transactions.len()); + let mut unorphaned_transactions = vec![]; + let _swo = Stopwatch::<80>::with_threshold("validate_and_insert_transaction_batch topological_sort op"); + let sorted_transactions = transactions.into_iter().map(MutableTransaction::from_tx).topological_into_iter(); + drop(_swo); + + // read lock on mempool + // Here, we simply log and drop all erroneous transactions since the caller doesn't care about those anyway + let mut transactions = Vec::with_capacity(sorted_transactions.len()); + for chunk in &sorted_transactions.chunks(TRANSACTION_CHUNK_SIZE) { + let mempool = self.mempool.read(); + let txs = chunk.filter_map(|tx| { + let transaction_id = tx.id(); + match mempool.pre_validate_and_populate_transaction(consensus, tx) { + Ok(tx) => Some(tx), + Err(RuleError::RejectAlreadyAccepted(transaction_id)) => { + debug!("Ignoring already accepted transaction {}", transaction_id); + None + } + Err(RuleError::RejectDuplicate(transaction_id)) => { + debug!("Ignoring transaction already in the mempool {}", transaction_id); + None + } + Err(RuleError::RejectDuplicateOrphan(transaction_id)) => { + debug!("Ignoring transaction already in the orphan pool {}", transaction_id); + None + } + Err(err) => { + debug!("Failed to pre validate transaction {0} due to rule error: {1}", transaction_id, err); + insert_results.push(Err(MiningManagerError::MempoolError(err))); + None + } + } + }); + transactions.extend(txs); + } + + // no lock on mempool + // We process the transactions by chunks of max block mass to prevent locking the virtual processor for too long. + let mut lower_bound: usize = 0; + let mut validation_results = Vec::with_capacity(transactions.len()); + while let Some(upper_bound) = self.next_transaction_chunk_upper_bound(&transactions, lower_bound) { + assert!(lower_bound < upper_bound, "the chunk is never empty"); + validation_results + .extend(validate_mempool_transactions_in_parallel(consensus, &mut transactions[lower_bound..upper_bound])); + lower_bound = upper_bound; + } + assert_eq!(transactions.len(), validation_results.len(), "every transaction should have a matching validation result"); + + // write lock on mempool + // Here again, transactions failing post validation are logged and dropped + for chunk in &transactions.into_iter().zip(validation_results).chunks(TRANSACTION_CHUNK_SIZE) { + let mut mempool = self.mempool.write(); + let txs = chunk.flat_map(|(transaction, validation_result)| { + let transaction_id = transaction.id(); + match mempool.post_validate_and_insert_transaction(consensus, validation_result, transaction, priority, orphan) { + Ok(Some(accepted_transaction)) => { + insert_results.push(Ok(accepted_transaction.clone())); + self.counters.increase_tx_counts(1, priority); + mempool.get_unorphaned_transactions_after_accepted_transaction(&accepted_transaction) + } + Ok(None) => { + // Either orphaned or already existing in the mempool + vec![] + } + Err(err) => { + debug!("Failed to post validate transaction {0} due to rule error: {1}", transaction_id, err); + insert_results.push(Err(MiningManagerError::MempoolError(err))); + vec![] + } + } + }); + unorphaned_transactions.extend(txs); + } + + insert_results + .extend(self.validate_and_insert_unorphaned_transactions(consensus, unorphaned_transactions).into_iter().map(Ok)); + insert_results + } + + fn next_transaction_chunk_upper_bound(&self, transactions: &[MutableTransaction], lower_bound: usize) -> Option { + if lower_bound >= transactions.len() { + return None; + } + let mut mass = 0; + transactions[lower_bound..] + .iter() + .position(|tx| { + mass += tx.calculated_mass.unwrap(); + mass >= self.config.maximum_mass_per_block + }) + // Make sure the upper bound is greater than the lower bound, allowing to handle a very unlikely, + // (if not impossible) case where the mass of a single transaction is greater than the maximum + // chunk mass. + .map(|relative_index| relative_index.max(1) + lower_bound) + .or(Some(transactions.len())) } /// Try to return a mempool transaction by its id. @@ -168,11 +439,13 @@ impl MiningManager { include_transaction_pool: bool, include_orphan_pool: bool, ) -> Option { + assert!(include_transaction_pool || include_orphan_pool, "at least one of either transactions or orphans must be included"); self.mempool.read().get_transaction(transaction_id, include_transaction_pool, include_orphan_pool) } /// Returns whether the mempool holds this transaction in any form. pub fn has_transaction(&self, transaction_id: &TransactionId, include_transaction_pool: bool, include_orphan_pool: bool) -> bool { + assert!(include_transaction_pool || include_orphan_pool, "at least one of either transactions or orphans must be included"); self.mempool.read().has_transaction(transaction_id, include_transaction_pool, include_orphan_pool) } @@ -181,7 +454,23 @@ impl MiningManager { include_transaction_pool: bool, include_orphan_pool: bool, ) -> (Vec, Vec) { - self.mempool.read().get_all_transactions(include_transaction_pool, include_orphan_pool) + const TRANSACTION_CHUNK_SIZE: usize = 1000; + assert!(include_transaction_pool || include_orphan_pool, "at least one of either transactions or orphans must be included"); + // read lock on mempool by transaction chunks + let transactions = if include_transaction_pool { + let transaction_ids = self.mempool.read().get_all_transaction_ids(true, false).0; + let mut transactions = Vec::with_capacity(self.mempool.read().transaction_count(true, false)); + for chunks in transaction_ids.chunks(TRANSACTION_CHUNK_SIZE) { + let mempool = self.mempool.read(); + transactions.extend(chunks.iter().filter_map(|x| mempool.get_transaction(x, true, false))); + } + transactions + } else { + vec![] + }; + // read lock on mempool + let orphans = if include_orphan_pool { self.mempool.read().get_all_transactions(false, true).1 } else { vec![] }; + (transactions, orphans) } /// get_transactions_by_addresses returns the sending and receiving transactions for @@ -194,24 +483,248 @@ impl MiningManager { include_transaction_pool: bool, include_orphan_pool: bool, ) -> GroupedOwnerTransactions { + assert!(include_transaction_pool || include_orphan_pool, "at least one of either transactions or orphans must be included"); + // TODO: break the monolithic lock self.mempool.read().get_transactions_by_addresses(script_public_keys, include_transaction_pool, include_orphan_pool) } pub fn transaction_count(&self, include_transaction_pool: bool, include_orphan_pool: bool) -> usize { + assert!(include_transaction_pool || include_orphan_pool, "at least one of either transactions or orphans must be included"); self.mempool.read().transaction_count(include_transaction_pool, include_orphan_pool) } pub fn handle_new_block_transactions( &self, consensus: &dyn ConsensusApi, + block_daa_score: u64, block_transactions: &[Transaction], ) -> MiningManagerResult>> { // TODO: should use tx acceptance data to verify that new block txs are actually accepted into virtual state. - Ok(self.mempool.write().handle_new_block_transactions(consensus, block_transactions)?) + + // write lock on mempool + let unorphaned_transactions = self.mempool.write().handle_new_block_transactions(block_daa_score, block_transactions)?; + + // alternate no & write lock on mempool + let accepted_transactions = self.validate_and_insert_unorphaned_transactions(consensus, unorphaned_transactions); + + Ok(accepted_transactions) } - pub fn revalidate_high_priority_transactions(&self, consensus: &dyn ConsensusApi) -> MiningManagerResult> { - Ok(self.mempool.write().revalidate_high_priority_transactions(consensus)?) + pub fn expire_low_priority_transactions(&self, consensus: &dyn ConsensusApi) { + // very fine-grained write locks on mempool + debug!("<> Expiring low priority transactions..."); + + // orphan pool + if let Err(err) = self.mempool.write().expire_orphan_low_priority_transactions(consensus) { + warn!("Failed to expire transactions from orphan pool: {}", err); + } + + // accepted transaction cache + self.mempool.write().expire_accepted_transactions(consensus); + + // mempool + let expired_low_priority_transactions = self.mempool.write().collect_expired_low_priority_transactions(consensus); + for chunk in &expired_low_priority_transactions.iter().chunks(24) { + let mut mempool = self.mempool.write(); + chunk.into_iter().for_each(|tx| { + if let Err(err) = mempool.remove_transaction(tx, true, TxRemovalReason::Muted, "") { + warn!("Failed to remove transaction {} from mempool: {}", tx, err); + } + }); + } + match expired_low_priority_transactions.len() { + 0 => {} + 1 => debug!("Removed transaction ({}) {}", TxRemovalReason::Expired, expired_low_priority_transactions[0]), + n => debug!("Removed {} transactions ({}): {}...", n, TxRemovalReason::Expired, expired_low_priority_transactions[0]), + } + } + + pub fn revalidate_high_priority_transactions( + &self, + consensus: &dyn ConsensusApi, + transaction_ids_sender: UnboundedSender>, + ) { + const TRANSACTION_CHUNK_SIZE: usize = 1000; + + // read lock on mempool + // Prepare a vector with clones of high priority transactions found in the mempool + let mempool = self.mempool.read(); + let transaction_ids = mempool.all_transaction_ids_with_priority(Priority::High); + if transaction_ids.is_empty() { + debug!("<> Revalidating high priority transactions found no transactions"); + return; + } else { + debug!("<> Revalidating {} high priority transactions...", transaction_ids.len()); + } + drop(mempool); + // read lock on mempool by transaction chunks + let mut transactions = Vec::with_capacity(transaction_ids.len()); + for chunk in &transaction_ids.iter().chunks(TRANSACTION_CHUNK_SIZE) { + let mempool = self.mempool.read(); + transactions.extend(chunk.filter_map(|x| mempool.get_transaction(x, true, false))); + } + + let mut valid: usize = 0; + let mut accepted: usize = 0; + let mut other: usize = 0; + let mut missing_outpoint: usize = 0; + let mut invalid: usize = 0; + + // We process the transactions by level of dependency inside the batch. + // Doing so allows to remove all chained dependencies of rejected transactions. + let _swo = Stopwatch::<800>::with_threshold("revalidate topological_sort op"); + let sorted_transactions = transactions.topological_into_iter(); + drop(_swo); + + // read lock on mempool by transaction chunks + // As the revalidation process is no longer atomic, we filter the transactions ready for revalidation, + // keeping only the ones actually present in the mempool (see comment above). + let _swo = Stopwatch::<900>::with_threshold("revalidate populate_mempool_entries op"); + let mut transactions = Vec::with_capacity(sorted_transactions.len()); + for chunk in &sorted_transactions.chunks(TRANSACTION_CHUNK_SIZE) { + let mempool = self.mempool.read(); + let txs = chunk.filter_map(|mut x| { + let transaction_id = x.id(); + if mempool.has_accepted_transaction(&transaction_id) { + accepted += 1; + None + } else if mempool.has_transaction(&transaction_id, true, false) { + x.clear_entries(); + mempool.populate_mempool_entries(&mut x); + match x.is_fully_populated() { + false => Some(x), + true => { + // If all entries are populated with mempool UTXOs, we already know the transaction is valid + valid += 1; + None + } + } + } else { + other += 1; + None + } + }); + transactions.extend(txs); + } + drop(_swo); + + // no lock on mempool + // We process the transactions by chunks of max block mass to prevent locking the virtual processor for too long. + let mut lower_bound: usize = 0; + let mut validation_results = Vec::with_capacity(transactions.len()); + while let Some(upper_bound) = self.next_transaction_chunk_upper_bound(&transactions, lower_bound) { + assert!(lower_bound < upper_bound, "the chunk is never empty"); + let _swo = Stopwatch::<60>::with_threshold("revalidate validate_mempool_transactions_in_parallel op"); + validation_results + .extend(populate_mempool_transactions_in_parallel(consensus, &mut transactions[lower_bound..upper_bound])); + drop(_swo); + lower_bound = upper_bound; + } + assert_eq!(transactions.len(), validation_results.len(), "every transaction should have a matching validation result"); + + // write lock on mempool + // Depending on the validation result, transactions are either accepted or removed + for chunk in &transactions.into_iter().zip(validation_results).chunks(TRANSACTION_CHUNK_SIZE) { + let mut valid_ids = Vec::with_capacity(TRANSACTION_CHUNK_SIZE); + let mut mempool = self.mempool.write(); + let _swo = Stopwatch::<60>::with_threshold("revalidate update_revalidated_transaction op"); + for (transaction, validation_result) in chunk { + let transaction_id = transaction.id(); + // Only consider transactions still being in the mempool since during the validation some might have been removed. + if mempool.update_revalidated_transaction(transaction) { + match validation_result { + Ok(()) => { + // A following transaction should not remove this one from the pool since we process in a topological order. + // Still, considering the (very unlikely) scenario of two high priority txs sandwiching a low one, where + // in this case topological order is not guaranteed since we only considered chained dependencies of + // high-priority transactions, we might wrongfully return as valid the id of a removed transaction. + // However, as only consequence, said transaction would then be advertised to registered peers and not be + // provided upon request. + valid_ids.push(transaction_id); + valid += 1; + } + Err(RuleError::RejectMissingOutpoint) => { + let transaction = mempool.get_transaction(&transaction_id, true, false).unwrap(); + let missing_txs = transaction + .entries + .iter() + .zip(transaction.tx.inputs.iter()) + .flat_map( + |(entry, input)| { + if entry.is_none() { + Some(input.previous_outpoint.transaction_id) + } else { + None + } + }, + ) + .collect::>(); + + // A transaction may have missing outpoints for legitimate reasons related to concurrency, like a race condition between + // an accepted block having not started yet or unfinished call to handle_new_block_transactions but already processed by + // the consensus and this ongoing call to revalidate. + // + // So we only remove the transaction and keep its redeemers in the mempool because we cannot be sure they are invalid, in + // fact in the race condition case they are valid regarding outpoints. + let extra_info = match missing_txs.len() { + 0 => " but no missing tx!".to_string(), // this is never supposed to happen + 1 => format!(" missing tx {}", missing_txs[0]), + n => format!(" with {} missing txs {}..{}", n, missing_txs[0], missing_txs.last().unwrap()), + }; + + // This call cleanly removes the invalid transaction. + let result = mempool.remove_transaction( + &transaction_id, + false, + TxRemovalReason::RevalidationWithMissingOutpoints, + extra_info.as_str(), + ); + if let Err(err) = result { + warn!("Failed to remove transaction {} from mempool: {}", transaction_id, err); + } + missing_outpoint += 1; + } + Err(err) => { + // Rust rewrite note: + // The behavior changes here compared to the golang version. + // The failed revalidation is simply logged and the process continues. + warn!( + "Removing high priority transaction {0} and its redeemers, it failed revalidation with {1}", + transaction_id, err + ); + // This call cleanly removes the invalid transaction and its redeemers. + let result = mempool.remove_transaction(&transaction_id, true, TxRemovalReason::Muted, ""); + if let Err(err) = result { + warn!("Failed to remove transaction {} from mempool: {}", transaction_id, err); + } + invalid += 1; + } + } + } else { + other += 1; + } + } + if !valid_ids.is_empty() { + let _ = transaction_ids_sender.send(valid_ids); + } + drop(_swo); + drop(mempool); + } + match accepted + missing_outpoint + invalid { + 0 => { + info!("Revalidated {} high priority transactions", valid); + } + _ => { + info!( + "Revalidated {} and removed {} high priority transactions (removals: {} accepted, {} missing outpoint, {} invalid)", + valid, + accepted + missing_outpoint + invalid, + accepted, + missing_outpoint, + invalid, + ); + } + } } /// is_transaction_output_dust returns whether or not the passed transaction output @@ -224,6 +737,18 @@ impl MiningManager { pub fn is_transaction_output_dust(&self, transaction_output: &TransactionOutput) -> bool { self.mempool.read().is_transaction_output_dust(transaction_output) } + + pub fn has_accepted_transaction(&self, transaction_id: &TransactionId) -> bool { + self.mempool.read().has_accepted_transaction(transaction_id) + } + + pub fn unaccepted_transactions(&self, transactions: Vec) -> Vec { + self.mempool.read().unaccepted_transactions(transactions) + } + + pub fn unknown_transactions(&self, transactions: Vec) -> Vec { + self.mempool.read().unknown_transactions(transactions) + } } /// Async proxy for the mining manager @@ -246,8 +771,7 @@ impl MiningManagerProxy { self.inner.clear_block_template() } - /// validate_and_insert_transaction validates the given transaction, and - /// adds it to the set of known transactions that have not yet been + /// Validates a transaction and adds it to the set of known transactions that have not yet been /// added to any block. /// /// The returned transactions are clones of objects owned by the mempool. @@ -261,16 +785,46 @@ impl MiningManagerProxy { consensus.clone().spawn_blocking(move |c| self.inner.validate_and_insert_transaction(c, transaction, priority, orphan)).await } + /// Validates a batch of transactions, handling iteratively only the independent ones, and + /// adds those to the set of known transactions that have not yet been added to any block. + /// + /// Returns transactions that where unorphaned following the insertion of the provided + /// transactions. The returned transactions are clones of objects owned by the mempool. + pub async fn validate_and_insert_transaction_batch( + self, + consensus: &ConsensusProxy, + transactions: Vec, + priority: Priority, + orphan: Orphan, + ) -> Vec>> { + consensus + .clone() + .spawn_blocking(move |c| self.inner.validate_and_insert_transaction_batch(c, transactions, priority, orphan)) + .await + } + pub async fn handle_new_block_transactions( self, consensus: &ConsensusProxy, + block_daa_score: u64, block_transactions: Arc>, ) -> MiningManagerResult>> { - consensus.clone().spawn_blocking(move |c| self.inner.handle_new_block_transactions(c, &block_transactions)).await + consensus + .clone() + .spawn_blocking(move |c| self.inner.handle_new_block_transactions(c, block_daa_score, &block_transactions)) + .await + } + + pub async fn expire_low_priority_transactions(self, consensus: &ConsensusProxy) { + consensus.clone().spawn_blocking(move |c| self.inner.expire_low_priority_transactions(c)).await; } - pub async fn revalidate_high_priority_transactions(self, consensus: &ConsensusProxy) -> MiningManagerResult> { - consensus.clone().spawn_blocking(move |c| self.inner.revalidate_high_priority_transactions(c)).await + pub async fn revalidate_high_priority_transactions( + self, + consensus: &ConsensusProxy, + transaction_ids_sender: UnboundedSender>, + ) { + consensus.clone().spawn_blocking(move |c| self.inner.revalidate_high_priority_transactions(c, transaction_ids_sender)).await; } /// Try to return a mempool transaction by its id. @@ -327,4 +881,28 @@ impl MiningManagerProxy { .await .unwrap() } + + /// Returns whether a transaction id was registered as accepted in the mempool, meaning + /// that the consensus accepted a block containing it and said block was handled by the + /// mempool. + /// + /// Registered transaction ids expire after a delay and are unregistered from the mempool. + /// So a returned value of true means with certitude that the transaction was accepted and + /// a false means either the transaction was never accepted or it was but beyond the expiration + /// delay. + pub async fn has_accepted_transaction(self, transaction_id: TransactionId) -> bool { + spawn_blocking(move || self.inner.has_accepted_transaction(&transaction_id)).await.unwrap() + } + + /// Returns a vector of unaccepted transactions. + /// For more details, see [`Self::has_accepted_transaction()`]. + pub async fn unaccepted_transactions(self, transactions: Vec) -> Vec { + spawn_blocking(move || self.inner.unaccepted_transactions(transactions)).await.unwrap() + } + + /// Returns a vector with all transaction ids that are neither in the mempool, nor in the orphan pool + /// nor accepted. + pub async fn unknown_transactions(self, transactions: Vec) -> Vec { + spawn_blocking(move || self.inner.unknown_transactions(transactions)).await.unwrap() + } } diff --git a/mining/src/manager_tests.rs b/mining/src/manager_tests.rs index 6a1637e2ce..776ef017fa 100644 --- a/mining/src/manager_tests.rs +++ b/mining/src/manager_tests.rs @@ -8,14 +8,15 @@ mod tests { config::{Config, DEFAULT_MINIMUM_RELAY_TRANSACTION_FEE}, errors::RuleError, tx::{Orphan, Priority}, - Mempool, }, model::candidate_tx::CandidateTransaction, testutils::consensus_mock::ConsensusMock, + MiningCounters, }; use kaspa_addresses::{Address, Prefix, Version}; use kaspa_consensus_core::{ api::ConsensusApi, + block::TemplateBuildMode, coinbase::MinerData, constants::{MAX_TX_IN_SEQUENCE_NUM, SOMPI_PER_KASPA, TX_VERSION}, errors::tx::{TxResult, TxRuleError}, @@ -32,6 +33,7 @@ mod tests { test_helpers::{create_transaction, op_true_script}, }; use std::sync::Arc; + use tokio::sync::mpsc::{error::TryRecvError, unbounded_channel}; const TARGET_TIME_PER_BLOCK: u64 = 1_000; const MAX_BLOCK_MASS: u64 = 500_000; @@ -41,10 +43,11 @@ mod tests { fn test_validate_and_insert_transaction() { const TX_COUNT: u32 = 10; let consensus = Arc::new(ConsensusMock::new()); - let mut mempool = Mempool::new(Config::build_default(TARGET_TIME_PER_BLOCK, false, MAX_BLOCK_MASS)); + let counters = Arc::new(MiningCounters::default()); + let mining_manager = MiningManager::new(TARGET_TIME_PER_BLOCK, false, MAX_BLOCK_MASS, None, counters); let transactions_to_insert = (0..TX_COUNT).map(|i| create_transaction_with_utxo_entry(i, 0)).collect::>(); for transaction in transactions_to_insert.iter() { - let result = mempool.validate_and_insert_mutable_transaction( + let result = mining_manager.validate_and_insert_mutable_transaction( consensus.as_ref(), transaction.clone(), Priority::Low, @@ -55,7 +58,7 @@ mod tests { // The UtxoEntry was filled manually for those transactions, so the transactions won't be considered orphans. // Therefore, all the transactions expected to be contained in the mempool. - let (transactions_from_pool, _) = mempool.get_all_transactions(true, false); + let (transactions_from_pool, _) = mining_manager.get_all_transactions(true, false); assert_eq!( transactions_to_insert.len(), transactions_from_pool.len(), @@ -92,16 +95,16 @@ mod tests { // The parent's transaction was inserted into the consensus, so we want to verify that // the child transaction is not considered an orphan and inserted into the mempool. let transaction_not_an_orphan = create_child_and_parent_txs_and_add_parent_to_consensus(&consensus); - let result = mempool.validate_and_insert_transaction( + let result = mining_manager.validate_and_insert_transaction( consensus.as_ref(), transaction_not_an_orphan.clone(), Priority::Low, Orphan::Allowed, ); assert!(result.is_ok(), "inserting the child transaction {} into the mempool failed", transaction_not_an_orphan.id()); - let (transactions_from_pool, _) = mempool.get_all_transactions(true, false); + let (transactions_from_pool, _) = mining_manager.get_all_transactions(true, false); assert!( - contained_by_mtxs(transaction_not_an_orphan.id(), &transactions_from_pool), + contained_by(transaction_not_an_orphan.id(), &transactions_from_pool), "missing transaction {} in the mempool", transaction_not_an_orphan.id() ); @@ -113,7 +116,8 @@ mod tests { #[test] fn test_simulated_error_in_consensus() { let consensus = Arc::new(ConsensusMock::new()); - let mut mempool = Mempool::new(Config::build_default(TARGET_TIME_PER_BLOCK, false, MAX_BLOCK_MASS)); + let counters = Arc::new(MiningCounters::default()); + let mining_manager = MiningManager::new(TARGET_TIME_PER_BLOCK, false, MAX_BLOCK_MASS, None, counters); // Build an invalid transaction with some gas and inform the consensus mock about the result it should return // when the mempool will submit this transaction for validation. @@ -123,17 +127,18 @@ mod tests { consensus.set_status(transaction.id(), status.clone()); // Try validate and insert the transaction into the mempool - let result = into_status( - mempool - .validate_and_insert_transaction(consensus.as_ref(), transaction.tx.as_ref().clone(), Priority::Low, Orphan::Allowed) - .map_err(MiningManagerError::from), - ); + let result = into_status(mining_manager.validate_and_insert_transaction( + consensus.as_ref(), + transaction.tx.as_ref().clone(), + Priority::Low, + Orphan::Allowed, + )); assert_eq!( status, result, "Unexpected result when trying to insert an invalid transaction: expected: {status:?}, got: {result:?}", ); - let pool_tx = mempool.get_transaction(&transaction.id(), true, true); + let pool_tx = mining_manager.get_transaction(&transaction.id(), true, true); assert!(pool_tx.is_none(), "Mempool contains a transaction that should have been rejected"); } @@ -142,24 +147,29 @@ mod tests { #[test] fn test_insert_double_transactions_to_mempool() { let consensus = Arc::new(ConsensusMock::new()); - let mut mempool = Mempool::new(Config::build_default(TARGET_TIME_PER_BLOCK, false, MAX_BLOCK_MASS)); + let counters = Arc::new(MiningCounters::default()); + let mining_manager = MiningManager::new(TARGET_TIME_PER_BLOCK, false, MAX_BLOCK_MASS, None, counters); let transaction = create_transaction_with_utxo_entry(0, 0); // submit the transaction to the mempool - let result = - mempool.validate_and_insert_mutable_transaction(consensus.as_ref(), transaction.clone(), Priority::Low, Orphan::Allowed); + let result = mining_manager.validate_and_insert_mutable_transaction( + consensus.as_ref(), + transaction.clone(), + Priority::Low, + Orphan::Allowed, + ); assert!(result.is_ok(), "mempool should have accepted a valid transaction but did not"); // submit the same transaction again to the mempool - let result = mempool.validate_and_insert_transaction( + let result = mining_manager.validate_and_insert_transaction( consensus.as_ref(), transaction.tx.as_ref().clone(), Priority::Low, Orphan::Allowed, ); assert!(result.is_err(), "mempool should refuse a double submit of the same transaction but accepts it"); - if let Err(RuleError::RejectDuplicate(transaction_id)) = result { + if let Err(MiningManagerError::MempoolError(RuleError::RejectDuplicate(transaction_id))) = result { assert_eq!( transaction.id(), transaction_id, @@ -180,7 +190,8 @@ mod tests { #[test] fn test_double_spend_in_mempool() { let consensus = Arc::new(ConsensusMock::new()); - let mut mempool = Mempool::new(Config::build_default(TARGET_TIME_PER_BLOCK, false, MAX_BLOCK_MASS)); + let counters = Arc::new(MiningCounters::default()); + let mining_manager = MiningManager::new(TARGET_TIME_PER_BLOCK, false, MAX_BLOCK_MASS, None, counters); let transaction = create_child_and_parent_txs_and_add_parent_to_consensus(&consensus); assert!( @@ -189,7 +200,8 @@ mod tests { transaction.id() ); - let result = mempool.validate_and_insert_transaction(consensus.as_ref(), transaction.clone(), Priority::Low, Orphan::Allowed); + let result = + mining_manager.validate_and_insert_transaction(consensus.as_ref(), transaction.clone(), Priority::Low, Orphan::Allowed); assert!(result.is_ok(), "the mempool should accept a valid transaction when it is able to populate its UTXO entries"); let mut double_spending_transaction = transaction.clone(); @@ -200,14 +212,14 @@ mod tests { double_spending_transaction.id(), "two transactions differing by only one output value should have different ids" ); - let result = mempool.validate_and_insert_transaction( + let result = mining_manager.validate_and_insert_transaction( consensus.as_ref(), double_spending_transaction.clone(), Priority::Low, Orphan::Allowed, ); assert!(result.is_err(), "mempool should refuse a double spend transaction but accepts it"); - if let Err(RuleError::RejectDoubleSpendInMempool(_, transaction_id)) = result { + if let Err(MiningManagerError::MempoolError(RuleError::RejectDoubleSpendInMempool(_, transaction_id))) = result { assert_eq!( transaction.id(), transaction_id, @@ -227,12 +239,13 @@ mod tests { #[test] fn test_handle_new_block_transactions() { let consensus = Arc::new(ConsensusMock::new()); - let mut mempool = Mempool::new(Config::build_default(TARGET_TIME_PER_BLOCK, false, MAX_BLOCK_MASS)); + let counters = Arc::new(MiningCounters::default()); + let mining_manager = MiningManager::new(TARGET_TIME_PER_BLOCK, false, MAX_BLOCK_MASS, None, counters); const TX_COUNT: u32 = 10; let transactions_to_insert = (0..TX_COUNT).map(|i| create_transaction_with_utxo_entry(i, 0)).collect::>(); for transaction in transactions_to_insert.iter() { - let result = mempool.validate_and_insert_transaction( + let result = mining_manager.validate_and_insert_transaction( consensus.as_ref(), transaction.tx.as_ref().clone(), Priority::Low, @@ -247,14 +260,14 @@ mod tests { let block_with_first_part = build_block_transactions(first_part.iter().map(|mtx| mtx.tx.as_ref())); let block_with_rest = build_block_transactions(rest.iter().map(|mtx| mtx.tx.as_ref())); - let result = mempool.handle_new_block_transactions(consensus.as_ref(), &block_with_first_part); + let result = mining_manager.handle_new_block_transactions(consensus.as_ref(), 2, &block_with_first_part); assert!( result.is_ok(), "the handling by the mempool of the transactions of a block accepted by the consensus should succeed but returned {result:?}" ); for handled_tx_id in first_part.iter().map(|x| x.id()) { assert!( - mempool.get_transaction(&handled_tx_id, true, true).is_none(), + mining_manager.get_transaction(&handled_tx_id, true, true).is_none(), "the transaction {handled_tx_id} should not be in the mempool" ); } @@ -262,20 +275,20 @@ mod tests { // transactions, will still be included in the mempool. for handled_tx_id in rest.iter().map(|x| x.id()) { assert!( - mempool.get_transaction(&handled_tx_id, true, true).is_some(), + mining_manager.get_transaction(&handled_tx_id, true, true).is_some(), "the transaction {handled_tx_id} is lacking from the mempool" ); } // Handle all the other transactions. - let result = mempool.handle_new_block_transactions(consensus.as_ref(), &block_with_rest); + let result = mining_manager.handle_new_block_transactions(consensus.as_ref(), 3, &block_with_rest); assert!( result.is_ok(), "the handling by the mempool of the transactions of a block accepted by the consensus should succeed but returned {result:?}" ); for handled_tx_id in rest.iter().map(|x| x.id()) { assert!( - mempool.get_transaction(&handled_tx_id, true, true).is_none(), + mining_manager.get_transaction(&handled_tx_id, true, true).is_none(), "the transaction {handled_tx_id} should no longer be in the mempool" ); } @@ -286,10 +299,11 @@ mod tests { // will be removed from the mempool. fn test_double_spend_with_block() { let consensus = Arc::new(ConsensusMock::new()); - let mut mempool = Mempool::new(Config::build_default(TARGET_TIME_PER_BLOCK, false, MAX_BLOCK_MASS)); + let counters = Arc::new(MiningCounters::default()); + let mining_manager = MiningManager::new(TARGET_TIME_PER_BLOCK, false, MAX_BLOCK_MASS, None, counters); let transaction_in_the_mempool = create_transaction_with_utxo_entry(0, 0); - let result = mempool.validate_and_insert_transaction( + let result = mining_manager.validate_and_insert_transaction( consensus.as_ref(), transaction_in_the_mempool.tx.as_ref().clone(), Priority::Low, @@ -302,11 +316,11 @@ mod tests { transaction_in_the_mempool.tx.inputs[0].previous_outpoint; let block_transactions = build_block_transactions(std::iter::once(double_spend_transaction_in_the_block.tx.as_ref())); - let result = mempool.handle_new_block_transactions(consensus.as_ref(), &block_transactions); + let result = mining_manager.handle_new_block_transactions(consensus.as_ref(), 2, &block_transactions); assert!(result.is_ok()); assert!( - mempool.get_transaction(&transaction_in_the_mempool.id(), true, true).is_none(), + mining_manager.get_transaction(&transaction_in_the_mempool.id(), true, true).is_none(), "the transaction {} shouldn't be in the mempool since at least one output was already spent", transaction_in_the_mempool.id() ); @@ -316,7 +330,8 @@ mod tests { #[test] fn test_orphan_transactions() { let consensus = Arc::new(ConsensusMock::new()); - let mining_manager = MiningManager::new(TARGET_TIME_PER_BLOCK, false, MAX_BLOCK_MASS, None); + let counters = Arc::new(MiningCounters::default()); + let mining_manager = MiningManager::new(TARGET_TIME_PER_BLOCK, false, MAX_BLOCK_MASS, None, counters); // Before each parent transaction we add a transaction that funds it and insert the funding transaction in the consensus. const TX_PAIRS_COUNT: usize = 5; @@ -333,13 +348,13 @@ mod tests { assert!(populated_txs.is_empty(), "the mempool should have no populated transaction since only orphans were submitted"); for orphan in orphans.iter() { assert!( - contained_by_txs(orphan.id(), &child_txs), + contained_by(orphan.id(), &child_txs), "orphan transaction {} should exist in the child transactions", orphan.id() ); } for child in child_txs.iter() { - assert!(contained_by_mtxs(child.id(), &orphans), "child transaction {} should exist in the orphan pool", child.id()); + assert!(contained_by(child.id(), &orphans), "child transaction {} should exist in the orphan pool", child.id()); } // Try to build a block template. @@ -351,7 +366,7 @@ mod tests { let template = result.unwrap(); for block_tx in template.block.transactions.iter().skip(1) { assert!( - !contained_by_txs(block_tx.id(), &child_txs), + !contained_by(block_tx.id(), &child_txs), "transaction {} is an orphan and is found in a built block template", block_tx.id() ); @@ -363,7 +378,7 @@ mod tests { let added_parent_txs = parent_txs.iter().skip(SKIPPED_TXS).cloned().collect::>(); added_parent_txs.iter().for_each(|x| consensus.add_transaction(x.clone(), 1)); let result = - mining_manager.handle_new_block_transactions(consensus.as_ref(), &build_block_transactions(added_parent_txs.iter())); + mining_manager.handle_new_block_transactions(consensus.as_ref(), 2, &build_block_transactions(added_parent_txs.iter())); assert!(result.is_ok(), "mining manager should handle new block transactions successfully but returns {result:?}"); let unorphaned_txs = result.unwrap(); let (populated_txs, orphans) = mining_manager.get_all_transactions(true, true); @@ -379,23 +394,23 @@ mod tests { ); for populated in populated_txs.iter() { assert!( - contained_by_tx_arcs(populated.id(), &unorphaned_txs), + contained_by(populated.id(), &unorphaned_txs), "mempool transaction {} should exist in the unorphaned transactions", populated.id() ); assert!( - contained_by_txs(populated.id(), &child_txs), + contained_by(populated.id(), &child_txs), "mempool transaction {} should exist in the child transactions", populated.id() ); } for child in child_txs.iter().skip(SKIPPED_TXS) { assert!( - contained_by_tx_arcs(child.id(), &unorphaned_txs), + contained_by(child.id(), &unorphaned_txs), "child transaction {} should exist in the unorphaned transactions", child.id() ); - assert!(contained_by_mtxs(child.id(), &populated_txs), "child transaction {} should exist in the mempool", child.id()); + assert!(contained_by(child.id(), &populated_txs), "child transaction {} should exist in the mempool", child.id()); } assert_eq!( SKIPPED_TXS, orphans.len(), @@ -404,13 +419,13 @@ mod tests { ); for orphan in orphans.iter() { assert!( - contained_by_txs(orphan.id(), &child_txs), + contained_by(orphan.id(), &child_txs), "orphan transaction {} should exist in the child transactions", orphan.id() ); } for child in child_txs.iter().take(SKIPPED_TXS) { - assert!(contained_by_mtxs(child.id(), &orphans), "child transaction {} should exist in the orphan pool", child.id()); + assert!(contained_by(child.id(), &orphans), "child transaction {} should exist in the orphan pool", child.id()); } // Build a new block template with all ready transactions, meaning all child transactions but one. @@ -430,14 +445,14 @@ mod tests { ); for block_tx in template.block.transactions.iter().skip(1) { assert!( - contained_by_txs(block_tx.id(), &child_txs), + contained_by(block_tx.id(), &child_txs), "transaction {} in the built block template does not exist in ready child transactions", block_tx.id() ); } for child in child_txs.iter().skip(SKIPPED_TXS) { assert!( - contained_by_txs(child.id(), &template.block.transactions), + contained_by(child.id(), &template.block.transactions), "child transaction {} in the mempool was ready but is not found in the built block template", child.id() ) @@ -448,7 +463,7 @@ mod tests { let added_child_txs = child_txs.iter().skip(SKIPPED_TXS).cloned().collect::>(); added_child_txs.iter().for_each(|x| consensus.add_transaction(x.clone(), 2)); let result = - mining_manager.handle_new_block_transactions(consensus.as_ref(), &build_block_transactions(added_child_txs.iter())); + mining_manager.handle_new_block_transactions(consensus.as_ref(), 4, &build_block_transactions(added_child_txs.iter())); assert!(result.is_ok(), "mining manager should handle new block transactions successfully but returns {result:?}"); let unorphaned_txs = result.unwrap(); @@ -489,14 +504,14 @@ mod tests { ); for parent in parent_txs.iter().take(SKIPPED_TXS) { assert!( - contained_by_mtxs(parent.id(), &populated_txs), + contained_by(parent.id(), &populated_txs), "mempool transaction {} should exist in the remaining parent transactions", parent.id() ); } for child in child_txs.iter().take(SKIPPED_TXS) { assert!( - contained_by_mtxs(child.id(), &populated_txs), + contained_by(child.id(), &populated_txs), "mempool transaction {} should exist in the remaining child transactions", child.id() ); @@ -569,7 +584,8 @@ mod tests { let mut config = Config::build_default(TARGET_TIME_PER_BLOCK, false, MAX_BLOCK_MASS); // Limit the orphan pool to 2 transactions config.maximum_orphan_transaction_count = 2; - let mining_manager = MiningManager::with_config(config.clone(), None); + let counters = Arc::new(MiningCounters::default()); + let mining_manager = MiningManager::with_config(config.clone(), None, counters); // Create pairs of transaction parent-and-child pairs according to the test vector let (parent_txs, child_txs) = create_arrays_of_parent_and_children_transactions(&consensus, tests.len()); @@ -649,7 +665,8 @@ mod tests { #[test] fn test_revalidate_high_priority_transactions() { let consensus = Arc::new(ConsensusMock::new()); - let mining_manager = MiningManager::new(TARGET_TIME_PER_BLOCK, false, MAX_BLOCK_MASS, None); + let counters = Arc::new(MiningCounters::default()); + let mining_manager = MiningManager::new(TARGET_TIME_PER_BLOCK, false, MAX_BLOCK_MASS, None, counters); // Create two valid transactions that double-spend each other (child_tx_1, child_tx_2) let (parent_tx, child_tx_1) = create_parent_and_children_transactions(&consensus, vec![3000 * SOMPI_PER_KASPA]); @@ -670,8 +687,15 @@ mod tests { assert!(result.is_ok(), "the insertion in the mempool of the spending transaction failed"); // Revalidate, to make sure spending_tx is still valid - let result = mining_manager.revalidate_high_priority_transactions(consensus.as_ref()); - assert!(result.is_ok(), "the revalidation of high-priority transactions should succeed"); + let (tx, mut rx) = unbounded_channel(); + mining_manager.revalidate_high_priority_transactions(consensus.as_ref(), tx); + let result = rx.blocking_recv(); + assert!(result.is_some(), "the revalidation of high-priority transactions must yield one message"); + assert_eq!( + Err(TryRecvError::Disconnected), + rx.try_recv(), + "the revalidation of high-priority transactions must yield exactly one message" + ); let valid_txs = result.unwrap(); assert_eq!(1, valid_txs.len(), "the revalidated transaction count is wrong: expected: {}, got: {}", 1, valid_txs.len()); assert_eq!(spending_tx.id(), valid_txs[0], "the revalidated transaction is not the right one"); @@ -687,10 +711,13 @@ mod tests { ); // Revalidate again, this time valid_txs should be empty - let result = mining_manager.revalidate_high_priority_transactions(consensus.as_ref()); - assert!(result.is_ok(), "the revalidation of high-priority transactions should succeed"); - let valid_txs = result.unwrap(); - assert!(valid_txs.is_empty(), "the revalidated transaction count is wrong: expected: {}, got: {}", 0, valid_txs.len()); + let (tx, mut rx) = unbounded_channel(); + mining_manager.revalidate_high_priority_transactions(consensus.as_ref(), tx); + assert_eq!( + Err(TryRecvError::Disconnected), + rx.try_recv(), + "the revalidation of high-priority transactions must yield no message" + ); // And the mempool should be empty too let (populated_txs, orphan_txs) = mining_manager.get_all_transactions(true, true); @@ -702,7 +729,8 @@ mod tests { #[test] fn test_modify_block_template() { let consensus = Arc::new(ConsensusMock::new()); - let mining_manager = MiningManager::new(TARGET_TIME_PER_BLOCK, false, MAX_BLOCK_MASS, None); + let counters = Arc::new(MiningCounters::default()); + let mining_manager = MiningManager::new(TARGET_TIME_PER_BLOCK, false, MAX_BLOCK_MASS, None, counters); // Before each parent transaction we add a transaction that funds it and insert the funding transaction in the consensus. const TX_PAIRS_COUNT: usize = 12; @@ -734,12 +762,7 @@ mod tests { }); // Test modify block template - sweep_compare_modified_template_to_built( - consensus.as_ref(), - Prefix::Testnet, - mining_manager.block_template_builder(), - transactions, - ); + sweep_compare_modified_template_to_built(consensus.as_ref(), Prefix::Testnet, &mining_manager, transactions); // TODO: extend the test according to the golang scenario } @@ -747,26 +770,75 @@ mod tests { fn sweep_compare_modified_template_to_built( consensus: &dyn ConsensusApi, address_prefix: Prefix, - builder: &BlockTemplateBuilder, + mining_manager: &MiningManager, transactions: Vec, ) { for _ in 0..4 { // Run a few times to get more randomness - compare_modified_template_to_built(consensus, address_prefix, builder, transactions.clone(), OpType::Usual, OpType::Usual); - compare_modified_template_to_built(consensus, address_prefix, builder, transactions.clone(), OpType::Edcsa, OpType::Edcsa); + compare_modified_template_to_built( + consensus, + address_prefix, + mining_manager, + transactions.clone(), + OpType::Usual, + OpType::Usual, + ); + compare_modified_template_to_built( + consensus, + address_prefix, + mining_manager, + transactions.clone(), + OpType::Edcsa, + OpType::Edcsa, + ); } - compare_modified_template_to_built(consensus, address_prefix, builder, transactions.clone(), OpType::True, OpType::Usual); - compare_modified_template_to_built(consensus, address_prefix, builder, transactions.clone(), OpType::Usual, OpType::True); - compare_modified_template_to_built(consensus, address_prefix, builder, transactions.clone(), OpType::Edcsa, OpType::Usual); - compare_modified_template_to_built(consensus, address_prefix, builder, transactions.clone(), OpType::Usual, OpType::Edcsa); - compare_modified_template_to_built(consensus, address_prefix, builder, transactions.clone(), OpType::Empty, OpType::Usual); - compare_modified_template_to_built(consensus, address_prefix, builder, transactions, OpType::Usual, OpType::Empty); + compare_modified_template_to_built( + consensus, + address_prefix, + mining_manager, + transactions.clone(), + OpType::True, + OpType::Usual, + ); + compare_modified_template_to_built( + consensus, + address_prefix, + mining_manager, + transactions.clone(), + OpType::Usual, + OpType::True, + ); + compare_modified_template_to_built( + consensus, + address_prefix, + mining_manager, + transactions.clone(), + OpType::Edcsa, + OpType::Usual, + ); + compare_modified_template_to_built( + consensus, + address_prefix, + mining_manager, + transactions.clone(), + OpType::Usual, + OpType::Edcsa, + ); + compare_modified_template_to_built( + consensus, + address_prefix, + mining_manager, + transactions.clone(), + OpType::Empty, + OpType::Usual, + ); + compare_modified_template_to_built(consensus, address_prefix, mining_manager, transactions, OpType::Usual, OpType::Empty); } fn compare_modified_template_to_built( consensus: &dyn ConsensusApi, address_prefix: Prefix, - builder: &BlockTemplateBuilder, + mining_manager: &MiningManager, transactions: Vec, first_op: OpType, second_op: OpType, @@ -775,12 +847,13 @@ mod tests { let miner_data_2 = generate_new_coinbase(address_prefix, second_op); // Build a fresh template for coinbase2 as a reference - let result = builder.build_block_template(consensus, &miner_data_2, transactions); + let builder = mining_manager.block_template_builder(); + let result = builder.build_block_template(consensus, &miner_data_2, transactions, TemplateBuildMode::Standard); assert!(result.is_ok(), "build block template failed for miner data 2"); let expected_template = result.unwrap(); // Modify to miner_data_1 - let result = builder.modify_block_template(consensus, &miner_data_1, &expected_template); + let result = BlockTemplateBuilder::modify_block_template(consensus, &miner_data_1, &expected_template); assert!(result.is_ok(), "modify block template failed for miner data 1"); let mut modified_template = result.unwrap(); // Make sure timestamps are equal before comparing the hash @@ -799,7 +872,7 @@ mod tests { assert_ne!(expected_block.hash(), modified_block.hash(), "built and modified blocks should have different hashes"); // And modify back to miner_data_2 - let result = builder.modify_block_template(consensus, &miner_data_2, &modified_template); + let result = BlockTemplateBuilder::modify_block_template(consensus, &miner_data_2, &modified_template); assert!(result.is_ok(), "modify block template failed for miner data 2"); let mut modified_template_2 = result.unwrap(); // Make sure timestamps are equal before comparing the hash @@ -897,16 +970,8 @@ mod tests { Transaction::new(TX_VERSION, vec![], outputs, 0, SUBNETWORK_ID_NATIVE, 0, vec![]) } - fn contained_by_mtxs(transaction_id: TransactionId, transactions: &[MutableTransaction]) -> bool { - transactions.iter().any(|x| x.id() == transaction_id) - } - - fn contained_by_txs(transaction_id: TransactionId, transactions: &[Transaction]) -> bool { - transactions.iter().any(|x| x.id() == transaction_id) - } - - fn contained_by_tx_arcs(transaction_id: TransactionId, transactions: &[Arc]) -> bool { - transactions.iter().any(|x| x.id() == transaction_id) + fn contained_by>(transaction_id: TransactionId, transactions: &[T]) -> bool { + transactions.iter().any(|x| x.as_ref().id() == transaction_id) } fn into_status(result: MiningManagerResult) -> TxResult<()> { diff --git a/mining/src/mempool/check_transaction_standard.rs b/mining/src/mempool/check_transaction_standard.rs index 8550c34689..6fdee4d316 100644 --- a/mining/src/mempool/check_transaction_standard.rs +++ b/mining/src/mempool/check_transaction_standard.rs @@ -224,7 +224,10 @@ impl Mempool { #[cfg(test)] mod tests { use super::*; - use crate::mempool::config::{Config, DEFAULT_MINIMUM_RELAY_TRANSACTION_FEE}; + use crate::{ + mempool::config::{Config, DEFAULT_MINIMUM_RELAY_TRANSACTION_FEE}, + MiningCounters, + }; use kaspa_addresses::{Address, Prefix, Version}; use kaspa_consensus_core::{ config::params::Params, @@ -238,6 +241,7 @@ mod tests { script_builder::ScriptBuilder, }; use smallvec::smallvec; + use std::sync::Arc; #[test] fn test_calc_min_required_tx_relay_fee() { @@ -281,7 +285,8 @@ mod tests { let params: Params = net.into(); let mut config = Config::build_default(params.target_time_per_block, false, params.max_block_mass); config.minimum_relay_transaction_fee = test.minimum_relay_transaction_fee; - let mempool = Mempool::new(config); + let counters = Arc::new(MiningCounters::default()); + let mempool = Mempool::new(Arc::new(config), counters); let got = mempool.minimum_required_transaction_relay_fee(test.size); if got != test.want { @@ -365,7 +370,8 @@ mod tests { let params: Params = net.into(); let mut config = Config::build_default(params.target_time_per_block, false, params.max_block_mass); config.minimum_relay_transaction_fee = test.minimum_relay_transaction_fee; - let mempool = Mempool::new(config); + let counters = Arc::new(MiningCounters::default()); + let mempool = Mempool::new(Arc::new(config), counters); println!("test_is_transaction_output_dust test '{}' ", test.name); let res = mempool.is_transaction_output_dust(&test.tx_out); @@ -543,7 +549,8 @@ mod tests { for net in NetworkType::iter() { let params: Params = net.into(); let config = Config::build_default(params.target_time_per_block, false, params.max_block_mass); - let mempool = Mempool::new(config); + let counters = Arc::new(MiningCounters::default()); + let mempool = Mempool::new(Arc::new(config), counters); // Ensure standard-ness is as expected. println!("test_check_transaction_standard_in_isolation test '{}' ", test.name); diff --git a/mining/src/mempool/config.rs b/mining/src/mempool/config.rs index c238e4a673..49d0205a0b 100644 --- a/mining/src/mempool/config.rs +++ b/mining/src/mempool/config.rs @@ -1,9 +1,13 @@ use kaspa_consensus_core::constants::TX_VERSION; pub(crate) const DEFAULT_MAXIMUM_TRANSACTION_COUNT: u64 = 1_000_000; +pub(crate) const DEFAULT_MAXIMUM_READY_TRANSACTION_COUNT: u64 = 100_000; +pub(crate) const DEFAULT_MAXIMUM_BUILD_BLOCK_TEMPLATE_ATTEMPTS: u64 = 5; pub(crate) const DEFAULT_TRANSACTION_EXPIRE_INTERVAL_SECONDS: u64 = 60; pub(crate) const DEFAULT_TRANSACTION_EXPIRE_SCAN_INTERVAL_SECONDS: u64 = 10; +pub(crate) const DEFAULT_ACCEPTED_TRANSACTION_EXPIRE_INTERVAL_SECONDS: u64 = 120; +pub(crate) const DEFAULT_ACCEPTED_TRANSACTION_EXPIRE_SCAN_INTERVAL_SECONDS: u64 = 10; pub(crate) const DEFAULT_ORPHAN_EXPIRE_INTERVAL_SECONDS: u64 = 60; pub(crate) const DEFAULT_ORPHAN_EXPIRE_SCAN_INTERVAL_SECONDS: u64 = 10; @@ -26,9 +30,14 @@ pub(crate) const DEFAULT_MAXIMUM_STANDARD_TRANSACTION_VERSION: u16 = TX_VERSION; #[derive(Clone, Debug)] pub struct Config { pub maximum_transaction_count: u64, + pub maximum_ready_transaction_count: u64, + pub maximum_build_block_template_attempts: u64, pub transaction_expire_interval_daa_score: u64, pub transaction_expire_scan_interval_daa_score: u64, pub transaction_expire_scan_interval_milliseconds: u64, + pub accepted_transaction_expire_interval_daa_score: u64, + pub accepted_transaction_expire_scan_interval_daa_score: u64, + pub accepted_transaction_expire_scan_interval_milliseconds: u64, pub orphan_expire_interval_daa_score: u64, pub orphan_expire_scan_interval_daa_score: u64, pub maximum_orphan_transaction_mass: u64, @@ -45,9 +54,14 @@ impl Config { #[allow(clippy::too_many_arguments)] pub fn new( maximum_transaction_count: u64, + maximum_ready_transaction_count: u64, + maximum_build_block_template_attempts: u64, transaction_expire_interval_daa_score: u64, transaction_expire_scan_interval_daa_score: u64, - transaction_expire_scan_interval_seconds: u64, + transaction_expire_scan_interval_milliseconds: u64, + accepted_transaction_expire_interval_daa_score: u64, + accepted_transaction_expire_scan_interval_daa_score: u64, + accepted_transaction_expire_scan_interval_milliseconds: u64, orphan_expire_interval_daa_score: u64, orphan_expire_scan_interval_daa_score: u64, maximum_orphan_transaction_mass: u64, @@ -61,9 +75,14 @@ impl Config { ) -> Self { Self { maximum_transaction_count, + maximum_ready_transaction_count, + maximum_build_block_template_attempts, transaction_expire_interval_daa_score, transaction_expire_scan_interval_daa_score, - transaction_expire_scan_interval_milliseconds: transaction_expire_scan_interval_seconds, + transaction_expire_scan_interval_milliseconds, + accepted_transaction_expire_interval_daa_score, + accepted_transaction_expire_scan_interval_daa_score, + accepted_transaction_expire_scan_interval_milliseconds, orphan_expire_interval_daa_score, orphan_expire_scan_interval_daa_score, maximum_orphan_transaction_mass, @@ -82,10 +101,17 @@ impl Config { pub const fn build_default(target_milliseconds_per_block: u64, relay_non_std_transactions: bool, max_block_mass: u64) -> Self { Self { maximum_transaction_count: DEFAULT_MAXIMUM_TRANSACTION_COUNT, + maximum_ready_transaction_count: DEFAULT_MAXIMUM_READY_TRANSACTION_COUNT, + maximum_build_block_template_attempts: DEFAULT_MAXIMUM_BUILD_BLOCK_TEMPLATE_ATTEMPTS, transaction_expire_interval_daa_score: DEFAULT_TRANSACTION_EXPIRE_INTERVAL_SECONDS * 1000 / target_milliseconds_per_block, transaction_expire_scan_interval_daa_score: DEFAULT_TRANSACTION_EXPIRE_SCAN_INTERVAL_SECONDS * 1000 / target_milliseconds_per_block, transaction_expire_scan_interval_milliseconds: DEFAULT_TRANSACTION_EXPIRE_SCAN_INTERVAL_SECONDS * 1000, + accepted_transaction_expire_interval_daa_score: DEFAULT_ACCEPTED_TRANSACTION_EXPIRE_INTERVAL_SECONDS * 1000 + / target_milliseconds_per_block, + accepted_transaction_expire_scan_interval_daa_score: DEFAULT_ACCEPTED_TRANSACTION_EXPIRE_SCAN_INTERVAL_SECONDS * 1000 + / target_milliseconds_per_block, + accepted_transaction_expire_scan_interval_milliseconds: DEFAULT_ACCEPTED_TRANSACTION_EXPIRE_SCAN_INTERVAL_SECONDS * 1000, orphan_expire_interval_daa_score: DEFAULT_ORPHAN_EXPIRE_INTERVAL_SECONDS * 1000 / target_milliseconds_per_block, orphan_expire_scan_interval_daa_score: DEFAULT_ORPHAN_EXPIRE_SCAN_INTERVAL_SECONDS * 1000 / target_milliseconds_per_block, maximum_orphan_transaction_mass: DEFAULT_MAXIMUM_ORPHAN_TRANSACTION_MASS, diff --git a/mining/src/mempool/handle_new_block_transactions.rs b/mining/src/mempool/handle_new_block_transactions.rs index 9fbd7e2d92..21295ae1e3 100644 --- a/mining/src/mempool/handle_new_block_transactions.rs +++ b/mining/src/mempool/handle_new_block_transactions.rs @@ -1,25 +1,69 @@ -use crate::mempool::{errors::RuleResult, Mempool}; -use kaspa_consensus_core::{api::ConsensusApi, tx::Transaction}; -use std::{collections::HashSet, sync::Arc}; +use crate::mempool::{ + errors::RuleResult, + model::{ + pool::Pool, + tx::{MempoolTransaction, TxRemovalReason}, + }, + Mempool, +}; +use kaspa_consensus_core::{ + api::ConsensusApi, + tx::{Transaction, TransactionId}, +}; +use kaspa_core::time::Stopwatch; +use std::{collections::HashSet, sync::atomic::Ordering}; impl Mempool { pub(crate) fn handle_new_block_transactions( &mut self, - consensus: &dyn ConsensusApi, + block_daa_score: u64, block_transactions: &[Transaction], - ) -> RuleResult>> { - let mut accepted_orphans = vec![]; + ) -> RuleResult> { + let _sw = Stopwatch::<400>::with_threshold("handle_new_block_transactions op"); + let mut unorphaned_transactions = vec![]; + let mut tx_accepted_counts = 0; + let mut input_counts = 0; + let mut output_counts = 0; for transaction in block_transactions[1..].iter() { let transaction_id = transaction.id(); - self.remove_transaction(&transaction_id, false)?; + // Rust rewrite: This behavior does differ from golang implementation. + // If the transaction got accepted via a peer but is still an orphan here, do not remove + // its redeemers in the orphan pool. We give those a chance to be unorphaned and included + // in the next block template. + if !self.orphan_pool.has(&transaction_id) { + self.remove_transaction(&transaction_id, false, TxRemovalReason::Accepted, "")?; + } self.remove_double_spends(transaction)?; - self.orphan_pool.remove_orphan(&transaction_id, false)?; - let mut unorphaned_transactions = self.process_orphans_after_accepted_transaction(consensus, transaction)?; - accepted_orphans.append(&mut unorphaned_transactions); + self.orphan_pool.remove_orphan(&transaction_id, false, TxRemovalReason::Accepted, "")?; + if self.accepted_transactions.add(transaction_id, block_daa_score) { + tx_accepted_counts += 1; + input_counts += transaction.inputs.len(); + output_counts += transaction.outputs.len(); + } + unorphaned_transactions.extend(self.get_unorphaned_transactions_after_accepted_transaction(transaction)); } - self.orphan_pool.expire_low_priority_transactions(consensus)?; - self.transaction_pool.expire_low_priority_transactions(consensus.get_virtual_daa_score())?; - Ok(accepted_orphans) + self.counters.block_tx_counts.fetch_add(block_transactions.len() as u64 - 1, Ordering::Relaxed); + self.counters.tx_accepted_counts.fetch_add(tx_accepted_counts, Ordering::Relaxed); + self.counters.input_counts.fetch_add(input_counts as u64, Ordering::Relaxed); + self.counters.output_counts.fetch_add(output_counts as u64, Ordering::Relaxed); + self.counters.ready_txs_sample.store(self.transaction_pool.ready_transaction_count() as u64, Ordering::Relaxed); + self.counters.txs_sample.store(self.transaction_pool.len() as u64, Ordering::Relaxed); + self.counters.orphans_sample.store(self.orphan_pool.len() as u64, Ordering::Relaxed); + self.counters.accepted_sample.store(self.accepted_transactions.len() as u64, Ordering::Relaxed); + + Ok(unorphaned_transactions) + } + + pub(crate) fn expire_orphan_low_priority_transactions(&mut self, consensus: &dyn ConsensusApi) -> RuleResult<()> { + self.orphan_pool.expire_low_priority_transactions(consensus.get_virtual_daa_score()) + } + + pub(crate) fn expire_accepted_transactions(&mut self, consensus: &dyn ConsensusApi) { + self.accepted_transactions.expire(consensus.get_virtual_daa_score()); + } + + pub(crate) fn collect_expired_low_priority_transactions(&mut self, consensus: &dyn ConsensusApi) -> Vec { + self.transaction_pool.collect_expired_low_priority_transactions(consensus.get_virtual_daa_score()) } fn remove_double_spends(&mut self, transaction: &Transaction) -> RuleResult<()> { @@ -29,6 +73,8 @@ impl Mempool { transactions_to_remove.insert(*redeemer_id); } } - transactions_to_remove.iter().try_for_each(|x| self.remove_transaction(x, true)) + transactions_to_remove.iter().try_for_each(|x| { + self.remove_transaction(x, true, TxRemovalReason::DoubleSpend, format!(" favouring {}", transaction.id()).as_str()) + }) } } diff --git a/mining/src/mempool/mod.rs b/mining/src/mempool/mod.rs index cfe8b44d1c..c3a5e677e7 100644 --- a/mining/src/mempool/mod.rs +++ b/mining/src/mempool/mod.rs @@ -1,23 +1,27 @@ -use crate::model::{ - candidate_tx::CandidateTransaction, - owner_txs::{GroupedOwnerTransactions, ScriptPublicKeySet}, +use crate::{ + model::{ + candidate_tx::CandidateTransaction, + owner_txs::{GroupedOwnerTransactions, ScriptPublicKeySet}, + }, + MiningCounters, }; use self::{ config::Config, - model::{orphan_pool::OrphanPool, pool::Pool, transactions_pool::TransactionsPool}, + model::{accepted_transactions::AcceptedTransactions, orphan_pool::OrphanPool, pool::Pool, transactions_pool::TransactionsPool}, + tx::Priority, }; use kaspa_consensus_core::tx::{MutableTransaction, TransactionId}; +use kaspa_core::time::Stopwatch; use std::sync::Arc; pub(crate) mod check_transaction_standard; pub mod config; pub mod errors; pub(crate) mod handle_new_block_transactions; -mod model; +pub(crate) mod model; pub(crate) mod populate_entries_and_try_validate; pub(crate) mod remove_transaction; -pub(crate) mod revalidate_high_priority_transactions; pub(crate) mod validate_and_insert_transaction; /// Mempool contains transactions intended to be inserted into a block and mined. @@ -40,14 +44,16 @@ pub(crate) struct Mempool { config: Arc, transaction_pool: TransactionsPool, orphan_pool: OrphanPool, + accepted_transactions: AcceptedTransactions, + counters: Arc, } impl Mempool { - pub(crate) fn new(config: Config) -> Self { - let config = Arc::new(config); + pub(crate) fn new(config: Arc, counters: Arc) -> Self { let transaction_pool = TransactionsPool::new(config.clone()); let orphan_pool = OrphanPool::new(config.clone()); - Self { config, transaction_pool, orphan_pool } + let accepted_transactions = AcceptedTransactions::new(config.clone()); + Self { config, transaction_pool, orphan_pool, accepted_transactions, counters } } pub(crate) fn get_transaction( @@ -81,14 +87,18 @@ impl Mempool { include_transaction_pool: bool, include_orphan_pool: bool, ) -> (Vec, Vec) { - let mut transactions = vec![]; - let mut orphans = vec![]; - if include_transaction_pool { - transactions = self.transaction_pool.get_all_transactions() - } - if include_orphan_pool { - orphans = self.orphan_pool.get_all_transactions() - } + let transactions = if include_transaction_pool { self.transaction_pool.get_all_transactions() } else { vec![] }; + let orphans = if include_orphan_pool { self.orphan_pool.get_all_transactions() } else { vec![] }; + (transactions, orphans) + } + + pub(crate) fn get_all_transaction_ids( + &self, + include_transaction_pool: bool, + include_orphan_pool: bool, + ) -> (Vec, Vec) { + let transactions = if include_transaction_pool { self.transaction_pool.get_all_transaction_ids() } else { vec![] }; + let orphans = if include_orphan_pool { self.orphan_pool.get_all_transaction_ids() } else { vec![] }; (transactions, orphans) } @@ -120,8 +130,38 @@ impl Mempool { } pub(crate) fn block_candidate_transactions(&self) -> Vec { + let _sw = Stopwatch::<10>::with_threshold("block_candidate_transactions op"); self.transaction_pool.all_ready_transactions() } + + pub(crate) fn all_transaction_ids_with_priority(&self, priority: Priority) -> Vec { + let _sw = Stopwatch::<15>::with_threshold("all_transaction_ids_with_priority op"); + self.transaction_pool.all_transaction_ids_with_priority(priority) + } + + pub(crate) fn update_revalidated_transaction(&mut self, transaction: MutableTransaction) -> bool { + if let Some(tx) = self.transaction_pool.get_mut(&transaction.id()) { + tx.mtx = transaction; + true + } else { + false + } + } + + pub(crate) fn has_accepted_transaction(&self, transaction_id: &TransactionId) -> bool { + self.accepted_transactions.has(transaction_id) + } + + pub(crate) fn unaccepted_transactions(&self, transactions: Vec) -> Vec { + self.accepted_transactions.unaccepted(&mut transactions.into_iter()) + } + + pub(crate) fn unknown_transactions(&self, transactions: Vec) -> Vec { + let mut not_in_pools_txs = transactions + .into_iter() + .filter(|transaction_id| !(self.transaction_pool.has(transaction_id) || self.orphan_pool.has(transaction_id))); + self.accepted_transactions.unaccepted(&mut not_in_pools_txs) + } } pub mod tx { diff --git a/mining/src/mempool/model/accepted_transactions.rs b/mining/src/mempool/model/accepted_transactions.rs new file mode 100644 index 0000000000..94ad0d0761 --- /dev/null +++ b/mining/src/mempool/model/accepted_transactions.rs @@ -0,0 +1,77 @@ +use crate::mempool::config::Config; +use kaspa_consensus_core::tx::TransactionId; +use kaspa_core::{debug, time::unix_now}; +use std::{collections::HashMap, sync::Arc}; + +pub(crate) struct AcceptedTransactions { + /// Mempool config + config: Arc, + + /// A map of Transaction IDs to DAA scores + transactions: HashMap, + + /// Last expire scan DAA score + last_expire_scan_daa_score: u64, + /// last expire scan time in milliseconds + last_expire_scan_time: u64, +} + +impl AcceptedTransactions { + pub(crate) fn new(config: Arc) -> Self { + Self { config, transactions: Default::default(), last_expire_scan_daa_score: 0, last_expire_scan_time: unix_now() } + } + + pub(crate) fn add(&mut self, transaction_id: TransactionId, daa_score: u64) -> bool { + self.transactions.insert(transaction_id, daa_score).is_none() + } + + pub(crate) fn remove(&mut self, transaction_id: &TransactionId) -> bool { + self.transactions.remove(transaction_id).is_some() + } + + pub(crate) fn has(&self, transaction_id: &TransactionId) -> bool { + self.transactions.contains_key(transaction_id) + } + + pub(crate) fn len(&self) -> usize { + self.transactions.len() + } + + pub(crate) fn unaccepted(&self, transactions: &mut impl Iterator) -> Vec { + transactions.filter(|transaction_id| !self.has(transaction_id)).collect() + } + + pub(crate) fn expire(&mut self, virtual_daa_score: u64) { + let now = unix_now(); + if virtual_daa_score < self.last_expire_scan_daa_score + self.config.accepted_transaction_expire_scan_interval_daa_score + || now < self.last_expire_scan_time + self.config.accepted_transaction_expire_scan_interval_milliseconds + { + return; + } + + let expired_transactions: Vec = self + .transactions + .iter() + .filter_map(|(transaction_id, daa_score)| { + if virtual_daa_score > daa_score + self.config.accepted_transaction_expire_interval_daa_score { + Some(*transaction_id) + } else { + None + } + }) + .collect(); + + for transaction_id in expired_transactions.iter() { + self.remove(transaction_id); + } + + debug!( + "Removed {} accepted transactions from mempool cache. Currently containing {}", + expired_transactions.len(), + self.transactions.len() + ); + + self.last_expire_scan_daa_score = virtual_daa_score; + self.last_expire_scan_time = now; + } +} diff --git a/mining/src/mempool/model/mod.rs b/mining/src/mempool/model/mod.rs index 4712336dcd..88997e46f1 100644 --- a/mining/src/mempool/model/mod.rs +++ b/mining/src/mempool/model/mod.rs @@ -1,3 +1,4 @@ +pub(crate) mod accepted_transactions; pub(crate) mod map; pub(crate) mod orphan_pool; pub(crate) mod pool; diff --git a/mining/src/mempool/model/orphan_pool.rs b/mining/src/mempool/model/orphan_pool.rs index 327f831064..8adef6fa27 100644 --- a/mining/src/mempool/model/orphan_pool.rs +++ b/mining/src/mempool/model/orphan_pool.rs @@ -3,21 +3,19 @@ use crate::mempool::{ errors::{RuleError, RuleResult}, model::{ map::{MempoolTransactionCollection, OutpointIndex}, - pool::Pool, - tx::MempoolTransaction, + pool::{Pool, TransactionsEdges}, + tx::{MempoolTransaction, TxRemovalReason}, }, tx::Priority, }; use kaspa_consensus_core::{ - api::ConsensusApi, tx::MutableTransaction, tx::{TransactionId, TransactionOutpoint}, }; -use kaspa_core::warn; +use kaspa_core::{debug, warn}; +use kaspa_utils::iter::IterExtensions; use std::sync::Arc; -use super::pool::TransactionsEdges; - /// Pool of orphan transactions depending on some missing utxo entries /// /// ### Rust rewrite notes @@ -63,7 +61,7 @@ impl OrphanPool { pub(crate) fn try_add_orphan( &mut self, - consensus: &dyn ConsensusApi, + virtual_daa_score: u64, transaction: MutableTransaction, priority: Priority, ) -> RuleResult<()> { @@ -77,7 +75,7 @@ impl OrphanPool { self.check_orphan_double_spend(&transaction)?; // Make sure there is room in the pool for the new transaction self.limit_orphan_pool_size(1)?; - self.add_orphan(consensus, transaction, priority)?; + self.add_orphan(virtual_daa_score, transaction, priority)?; Ok(()) } @@ -95,7 +93,7 @@ impl OrphanPool { } // Don't remove redeemers in the case of a random eviction since the evicted transaction is // not invalid, therefore it's redeemers are as good as any orphan that just arrived. - self.remove_orphan(&orphan_to_remove.unwrap().id(), false)?; + self.remove_orphan(&orphan_to_remove.unwrap().id(), false, TxRemovalReason::MakingRoom, "")?; } Ok(()) } @@ -128,9 +126,9 @@ impl OrphanPool { Ok(()) } - fn add_orphan(&mut self, consensus: &dyn ConsensusApi, transaction: MutableTransaction, priority: Priority) -> RuleResult<()> { + fn add_orphan(&mut self, virtual_daa_score: u64, transaction: MutableTransaction, priority: Priority) -> RuleResult<()> { let id = transaction.id(); - let transaction = MempoolTransaction::new(transaction, priority, consensus.get_virtual_daa_score()); + let transaction = MempoolTransaction::new(transaction, priority, virtual_daa_score); // Add all entries in outpoint_owner_id for input in transaction.mtx.tx.inputs.iter() { self.outpoint_owner_id.insert(input.previous_outpoint, id); @@ -140,9 +138,7 @@ impl OrphanPool { // ... incoming for parent_id in self.get_parent_transaction_ids_in_pool(&transaction.mtx) { let entry = self.chained_mut().entry(parent_id).or_default(); - if !entry.contains(&id) { - entry.insert(id); - } + entry.insert(id); } // ... outgoing let mut outpoint = TransactionOutpoint::new(id, 0); @@ -154,6 +150,7 @@ impl OrphanPool { } self.all_orphans.insert(id, transaction); + debug!("Added transaction to orphan pool: {}", id); Ok(()) } @@ -161,6 +158,8 @@ impl OrphanPool { &mut self, transaction_id: &TransactionId, remove_redeemers: bool, + reason: TxRemovalReason, + extra_info: &str, ) -> RuleResult> { // Rust rewrite: // - the call cycle removeOrphan -> removeRedeemersOf -> removeOrphan is replaced by @@ -175,7 +174,26 @@ impl OrphanPool { if remove_redeemers { transaction_ids_to_remove.extend(self.get_redeemer_ids_in_pool(transaction_id)); } - transaction_ids_to_remove.iter().map(|x| self.remove_single_orphan(x)).collect() + let removed_transactions = + transaction_ids_to_remove.iter().map(|x| self.remove_single_orphan(x)).collect::>>()?; + if reason.verbose() { + match removed_transactions.len() { + 0 => (), // This is not possible + 1 => { + debug!("Removed orphan transaction ({}): {}{}", reason, removed_transactions[0].id(), extra_info); + } + n => { + debug!( + "Removed {} orphan transactions ({}): {}{}", + n, + reason, + removed_transactions.iter().map(|x| x.id()).reusable_format(", "), + extra_info + ); + } + } + } + Ok(removed_transactions) } fn remove_single_orphan(&mut self, transaction_id: &TransactionId) -> RuleResult { @@ -214,14 +232,45 @@ impl OrphanPool { self.get_redeemer_ids_in_pool(transaction_id).iter().map(|x| self.remove_single_orphan(x)).collect() } - pub(crate) fn expire_low_priority_transactions(&mut self, consensus: &dyn ConsensusApi) -> RuleResult<()> { - let virtual_daa_score = consensus.get_virtual_daa_score(); + pub(crate) fn update_orphans_after_transaction_removed( + &mut self, + removed_transaction: &MempoolTransaction, + remove_redeemers: bool, + ) -> RuleResult> { + let removed_transaction_id = removed_transaction.id(); + if remove_redeemers { + return self.remove_redeemers_of(&removed_transaction_id); + } + + let mut outpoint = TransactionOutpoint::new(removed_transaction_id, 0); + for i in 0..removed_transaction.mtx.tx.outputs.len() { + outpoint.index = i as u32; + if let Some(orphan) = self.outpoint_orphan_mut(&outpoint) { + for (i, input) in orphan.mtx.tx.inputs.iter().enumerate() { + if input.previous_outpoint.transaction_id == removed_transaction_id { + orphan.mtx.entries[i] = None; + } + } + } + } + Ok(vec![]) + } + + fn get_random_low_priority_orphan(&self) -> Option<&MempoolTransaction> { + self.all_orphans.values().find(|x| x.priority == Priority::Low) + } + + fn chained_mut(&mut self) -> &mut TransactionsEdges { + &mut self.chained_orphans + } + + pub(crate) fn expire_low_priority_transactions(&mut self, virtual_daa_score: u64) -> RuleResult<()> { if virtual_daa_score < self.last_expire_scan + self.config.orphan_expire_scan_interval_daa_score { return Ok(()); } // Never expire high priority transactions - // Remove all transactions whose addedAtDAAScore is older then TransactionExpireIntervalDAAScore + // Remove all transactions whose `added_at_daa_score` is older then TransactionExpireIntervalDAAScore let expired_low_priority_transactions: Vec = self .all_orphans .values() @@ -237,41 +286,12 @@ impl OrphanPool { .collect(); for transaction_id in expired_low_priority_transactions.iter() { - self.remove_orphan(transaction_id, false)?; + self.remove_orphan(transaction_id, false, TxRemovalReason::Expired, "")?; } self.last_expire_scan = virtual_daa_score; Ok(()) } - - pub(crate) fn update_orphans_after_transaction_removed( - &mut self, - removed_transaction: &MempoolTransaction, - remove_redeemers: bool, - ) -> RuleResult<()> { - let removed_transaction_id = removed_transaction.id(); - if remove_redeemers { - self.remove_redeemers_of(&removed_transaction_id)?; - return Ok(()); - } - - let mut outpoint = TransactionOutpoint::new(removed_transaction_id, 0); - for i in 0..removed_transaction.mtx.tx.outputs.len() { - outpoint.index = i as u32; - if let Some(orphan) = self.outpoint_orphan_mut(&outpoint) { - for (i, input) in orphan.mtx.tx.inputs.iter().enumerate() { - if input.previous_outpoint.transaction_id == removed_transaction_id { - orphan.mtx.entries[i] = None; - } - } - } - } - Ok(()) - } - - fn get_random_low_priority_orphan(&self) -> Option<&MempoolTransaction> { - self.all_orphans.values().find(|x| x.priority == Priority::Low) - } } impl Pool for OrphanPool { @@ -279,15 +299,11 @@ impl Pool for OrphanPool { &self.all_orphans } - fn all_mut(&mut self) -> &mut MempoolTransactionCollection { - &mut self.all_orphans - } - fn chained(&self) -> &TransactionsEdges { &self.chained_orphans } - fn chained_mut(&mut self) -> &mut TransactionsEdges { - &mut self.chained_orphans + fn get_mut(&mut self, transaction_id: &TransactionId) -> Option<&mut MempoolTransaction> { + self.all_orphans.get_mut(transaction_id) } } diff --git a/mining/src/mempool/model/pool.rs b/mining/src/mempool/model/pool.rs index a974a2c38d..1f9ff15529 100644 --- a/mining/src/mempool/model/pool.rs +++ b/mining/src/mempool/model/pool.rs @@ -1,8 +1,8 @@ -use std::collections::{hash_set::Iter, HashMap, HashSet}; - -use super::{map::MempoolTransactionCollection, tx::MempoolTransaction}; use crate::{ - mempool::tx::Priority, + mempool::{ + model::{map::MempoolTransactionCollection, tx::MempoolTransaction}, + tx::Priority, + }, model::{ owner_txs::{GroupedOwnerTransactions, ScriptPublicKeySet}, topological_index::TopologicalIndex, @@ -10,15 +10,14 @@ use crate::{ }, }; use kaspa_consensus_core::tx::{MutableTransaction, TransactionId}; +use std::collections::{hash_set::Iter, HashMap, HashSet, VecDeque}; pub(crate) type TransactionsEdges = HashMap; pub(crate) trait Pool { fn all(&self) -> &MempoolTransactionCollection; - fn all_mut(&mut self) -> &mut MempoolTransactionCollection; fn chained(&self) -> &TransactionsEdges; - fn chained_mut(&mut self) -> &mut TransactionsEdges; fn has(&self, transaction_id: &TransactionId) -> bool { self.all().contains_key(transaction_id) @@ -28,6 +27,8 @@ pub(crate) trait Pool { self.all().get(transaction_id) } + fn get_mut(&mut self, transaction_id: &TransactionId) -> Option<&mut MempoolTransaction>; + /// Returns the number of transactions in the pool fn len(&self) -> usize { self.all().len() @@ -62,24 +63,34 @@ pub(crate) trait Pool { /// Returns the ids of all transactions being directly and indirectly chained to `transaction_id` /// and existing in the pool. - fn get_redeemer_ids_in_pool(&self, transaction_id: &TransactionId) -> TransactionIdSet { - let mut redeemers = TransactionIdSet::new(); + /// + /// The transactions are traversed in BFS mode. The returned order is not guaranteed to be + /// topological. + /// + /// NOTE: this operation's complexity might become linear in the size of the mempool if the mempool + /// contains deeply chained transactions + fn get_redeemer_ids_in_pool(&self, transaction_id: &TransactionId) -> Vec { + // TODO: study if removals based on the results of this function should occur in reversed + // topological order to prevent missing outpoints in concurrent processes. + let mut visited = TransactionIdSet::new(); + let mut descendants = vec![]; if let Some(transaction) = self.get(transaction_id) { - let mut stack = vec![transaction]; - while let Some(transaction) = stack.pop() { + let mut queue = VecDeque::new(); + queue.push_back(transaction); + while let Some(transaction) = queue.pop_front() { if let Some(chains) = self.chained().get(&transaction.id()) { - for redeemer_id in chains { + chains.iter().for_each(|redeemer_id| { if let Some(redeemer) = self.get(redeemer_id) { - // Do no revisit transactions - if redeemers.insert(*redeemer_id) { - stack.push(redeemer); + if visited.insert(*redeemer_id) { + descendants.push(*redeemer_id); + queue.push_back(redeemer); } } - } + }) } } } - redeemers + descendants } /// Returns a vector with clones of all the transactions in the pool. @@ -87,6 +98,11 @@ pub(crate) trait Pool { self.all().values().map(|x| x.mtx.clone()).collect() } + /// Returns a vector with ids of all the transactions in the pool. + fn get_all_transaction_ids(&self) -> Vec { + self.all().keys().cloned().collect() + } + /// Fills owner transactions for a set of script public keys. fn fill_owner_set_transactions(&self, script_public_keys: &ScriptPublicKeySet, owner_set: &mut GroupedOwnerTransactions) { script_public_keys.iter().for_each(|script_public_key| { @@ -98,9 +114,7 @@ pub(crate) trait Pool { // Insert the mutable transaction in the owners object if not already present. // Clone since the transaction leaves the mempool. owner_set.transactions.entry(*id).or_insert_with(|| transaction.mtx.clone()); - if !owner.sending_txs.contains(id) { - owner.sending_txs.insert(*id); - } + owner.sending_txs.insert(*id); } // Receiving transactions @@ -108,9 +122,7 @@ pub(crate) trait Pool { // Insert the mutable transaction in the owners object if not already present. // Clone since the transaction leaves the mempool. owner_set.transactions.entry(*id).or_insert_with(|| transaction.mtx.clone()); - if !owner.receiving_txs.contains(id) { - owner.receiving_txs.insert(*id); - } + owner.receiving_txs.insert(*id); } }); }); @@ -123,6 +135,7 @@ pub(crate) struct PoolIndex { } impl PoolIndex { + #[allow(dead_code)] pub(crate) fn new(transactions: TransactionIdSet, chained_transactions: TransactionsEdges) -> Self { Self { transactions, chained_transactions } } diff --git a/mining/src/mempool/model/transactions_pool.rs b/mining/src/mempool/model/transactions_pool.rs index 98de0b2d6b..cf70150df7 100644 --- a/mining/src/mempool/model/transactions_pool.rs +++ b/mining/src/mempool/model/transactions_pool.rs @@ -2,7 +2,12 @@ use crate::{ mempool::{ config::Config, errors::{RuleError, RuleResult}, - model::{map::MempoolTransactionCollection, pool::Pool, tx::MempoolTransaction, utxo_set::MempoolUtxoSet}, + model::{ + map::MempoolTransactionCollection, + pool::{Pool, TransactionsEdges}, + tx::MempoolTransaction, + utxo_set::MempoolUtxoSet, + }, tx::Priority, }, model::{candidate_tx::CandidateTransaction, topological_index::TopologicalIndex}, @@ -11,14 +16,12 @@ use kaspa_consensus_core::{ tx::TransactionId, tx::{MutableTransaction, TransactionOutpoint}, }; -use kaspa_core::{debug, time::unix_now, warn}; +use kaspa_core::{time::unix_now, trace, warn}; use std::{ - collections::{hash_map::Keys, hash_set::Iter}, + collections::{hash_map::Keys, hash_set::Iter, HashSet}, sync::Arc, }; -use super::pool::TransactionsEdges; - /// Pool of transactions to be included in a block template /// /// ### Rust rewrite notes @@ -50,6 +53,8 @@ pub(crate) struct TransactionsPool { parent_transactions: TransactionsEdges, /// Transactions dependencies formed by outputs present in pool - successor relations. chained_transactions: TransactionsEdges, + /// Transactions with no parents in the mempool -- ready to be inserted into a block template + ready_transactions: HashSet, last_expire_scan_daa_score: u64, /// last expire scan time in milliseconds @@ -66,6 +71,7 @@ impl TransactionsPool { all_transactions: MempoolTransactionCollection::default(), parent_transactions: TransactionsEdges::default(), chained_transactions: TransactionsEdges::default(), + ready_transactions: Default::default(), last_expire_scan_daa_score: 0, last_expire_scan_time: unix_now(), utxo_set: MempoolUtxoSet::new(), @@ -98,34 +104,21 @@ impl TransactionsPool { // here yet since, by definition, they would have been orphans. let parents = self.get_parent_transaction_ids_in_pool(&transaction.mtx); self.parent_transactions.insert(id, parents.clone()); + if parents.is_empty() { + self.ready_transactions.insert(id); + } for parent_id in parents { - let entry = self.chained_mut().entry(parent_id).or_default(); - if !entry.contains(&id) { - entry.insert(id); - } + let entry = self.chained_transactions.entry(parent_id).or_default(); + entry.insert(id); } self.utxo_set.add_transaction(&transaction.mtx); self.all_transactions.insert(id, transaction); + trace!("Added transaction {}", id); Ok(()) } - pub(crate) fn remove_parent_chained_relation_in_pool( - &mut self, - transaction_id: &TransactionId, - parent_id: &TransactionId, - ) -> bool { - let mut found = false; - // Remove the bijective parent/chained relation - if let Some(parents) = self.parent_transactions.get_mut(transaction_id) { - found = parents.remove(parent_id); - } - if let Some(chains) = self.chained_transactions.get_mut(parent_id) { - found = chains.remove(transaction_id) || found; - } - found - } - + /// Fully removes the transaction from all relational sets, as well as from the UTXO set pub(crate) fn remove_transaction(&mut self, transaction_id: &TransactionId) -> RuleResult { // Remove all bijective parent/chained relations if let Some(parents) = self.parent_transactions.get(transaction_id) { @@ -139,105 +132,101 @@ impl TransactionsPool { for chain in chains.iter() { if let Some(parents) = self.parent_transactions.get_mut(chain) { parents.remove(transaction_id); + if parents.is_empty() { + self.ready_transactions.insert(*chain); + } } } } self.parent_transactions.remove(transaction_id); self.chained_transactions.remove(transaction_id); + self.ready_transactions.remove(transaction_id); // Remove the transaction itself - self.all_transactions.remove(transaction_id).ok_or(RuleError::RejectMissingTransaction(*transaction_id)) - } + let removed_tx = self.all_transactions.remove(transaction_id).ok_or(RuleError::RejectMissingTransaction(*transaction_id))?; - pub(crate) fn expire_low_priority_transactions(&mut self, virtual_daa_score: u64) -> RuleResult<()> { - let now = unix_now(); - if virtual_daa_score < self.last_expire_scan_daa_score + self.config.transaction_expire_scan_interval_daa_score - || now < self.last_expire_scan_time + self.config.transaction_expire_scan_interval_milliseconds - { - return Ok(()); - } + // TODO: consider using `self.parent_transactions.get(transaction_id)` + // The tradeoff to consider is whether it might be possible that a parent tx exists in the pool + // however its relation as parent is not registered. This can supposedly happen in rare cases where + // the parent was removed w/o redeemers and then re-added + let parent_ids = self.get_parent_transaction_ids_in_pool(&removed_tx.mtx); - // Never expire high priority transactions - // Remove all transactions whose added_at_daa_score is older then transaction_expire_interval_daa_score - let expired_low_priority_transactions: Vec = self - .all_transactions - .values() - .filter_map(|x| { - if (x.priority == Priority::Low) - && virtual_daa_score > x.added_at_daa_score + self.config.transaction_expire_interval_daa_score - { - debug!( - "Removing transaction {}, because it expired, virtual DAA score is {} and expire limit is {}", - x.id(), - virtual_daa_score, - x.added_at_daa_score + self.config.transaction_expire_interval_daa_score - ); - Some(x.id()) - } else { - None - } - }) - .collect(); - - for transaction_id in expired_low_priority_transactions.iter() { - self.remove_transaction(transaction_id)?; - } + // Remove the transaction from the mempool UTXO set + self.utxo_set.remove_transaction(&removed_tx.mtx, &parent_ids); - self.last_expire_scan_daa_score = virtual_daa_score; - self.last_expire_scan_time = now; - Ok(()) + Ok(removed_tx) } - /// Is the mempool transaction identified by `transaction_id` ready for being inserted into a block template? - pub(crate) fn is_transaction_ready(&self, transaction_id: &TransactionId) -> bool { - if self.all_transactions.contains_key(transaction_id) { - if let Some(parents) = self.parent_transactions.get(transaction_id) { - return parents.is_empty(); - } - return true; - } - false + pub(crate) fn ready_transaction_count(&self) -> usize { + self.ready_transactions.len() } /// all_ready_transactions returns all fully populated mempool transactions having no parents in the mempool. /// These transactions are ready for being inserted in a block template. pub(crate) fn all_ready_transactions(&self) -> Vec { // The returned transactions are leaving the mempool so they are cloned - self.all_transactions - .values() - .filter_map(|x| if self.is_transaction_ready(&x.id()) { Some(CandidateTransaction::from_mutable(&x.mtx)) } else { None }) + self.ready_transactions + .iter() + .take(self.config.maximum_ready_transaction_count as usize) + .map(|id| CandidateTransaction::from_mutable(&self.all_transactions.get(id).unwrap().mtx)) .collect() } + /// Is the mempool transaction identified by `transaction_id` unchained, thus having no successor? + pub(crate) fn transaction_is_unchained(&self, transaction_id: &TransactionId) -> bool { + if self.all_transactions.contains_key(transaction_id) { + if let Some(chains) = self.chained_transactions.get(transaction_id) { + return chains.is_empty(); + } + return true; + } + false + } /// Returns the exceeding low-priority transactions having the lowest fee rates in order - /// to have room for at least `free_slots` new transactions. + /// to have room for at least `free_slots` new transactions. The returned transactions + /// are guaranteed to be unchained (no successor in mempool) and to not be parent of + /// `transaction`. /// /// An error is returned if the mempool is filled with high priority transactions. - pub(crate) fn limit_transaction_count(&self, free_slots: usize) -> RuleResult> { + pub(crate) fn limit_transaction_count( + &self, + free_slots: usize, + transaction: &MutableTransaction, + ) -> RuleResult> { + assert!(free_slots > 0); // Returns a vector of transactions to be removed that the caller has to remove actually. // The caller is golang validateAndInsertTransaction equivalent. // This behavior differs from golang impl. - let mut transactions_to_remove = Vec::new(); - if self.len() + free_slots > self.config.maximum_transaction_count as usize { + let trim_size = self.len() + free_slots - usize::min(self.len() + free_slots, self.config.maximum_transaction_count as usize); + let mut transactions_to_remove = Vec::with_capacity(trim_size); + if trim_size > 0 { // TODO: consider introducing an index on all_transactions low-priority items instead. // // Sorting this vector here may be sub-optimal compared with maintaining a sorted // index of all_transactions low-priority items if the proportion of low-priority txs // in all_transactions is important. - let mut low_priority_txs = self.all_transactions.values().filter(|x| x.priority == Priority::Low).collect::>(); - - if !low_priority_txs.is_empty() { - low_priority_txs.sort_by(|a, b| a.fee_rate().partial_cmp(&b.fee_rate()).unwrap()); - transactions_to_remove.extend_from_slice( - &low_priority_txs[0..usize::min( - self.len() + free_slots - self.config.maximum_transaction_count as usize, - low_priority_txs.len(), - )], - ); + let low_priority_txs = self + .all_transactions + .values() + .filter(|x| x.priority == Priority::Low && self.transaction_is_unchained(&x.id()) && !x.is_parent_of(transaction)); + + if trim_size == 1 { + // This is the most likely case. Here we just search the minimum, thus avoiding the need to sort altogether. + if let Some(tx) = low_priority_txs.min_by(|a, b| a.fee_rate().partial_cmp(&b.fee_rate()).unwrap()) { + transactions_to_remove.push(tx); + } + } else { + let mut low_priority_txs = low_priority_txs.collect::>(); + if low_priority_txs.len() > trim_size { + low_priority_txs.sort_by(|a, b| a.fee_rate().partial_cmp(&b.fee_rate()).unwrap()); + transactions_to_remove.extend_from_slice(&low_priority_txs[0..usize::min(trim_size, low_priority_txs.len())]); + } else { + transactions_to_remove = low_priority_txs; + } } } - // An error is returned if the mempool is filled with high priority transactions. + // An error is returned if the mempool is filled with high priority and other unremovable transactions. let tx_count = self.len() + free_slots - transactions_to_remove.len(); if tx_count as u64 > self.config.maximum_transaction_count { let err = RuleError::RejectMempoolIsFull(tx_count - free_slots, self.config.maximum_transaction_count); @@ -248,8 +237,8 @@ impl TransactionsPool { Ok(transactions_to_remove.iter().map(|x| x.id()).collect()) } - pub(crate) fn get_all_transactions(&self) -> Vec { - self.all().values().map(|x| x.mtx.clone()).collect() + pub(crate) fn all_transaction_ids_with_priority(&self, priority: Priority) -> Vec { + self.all().values().filter_map(|x| if x.priority == priority { Some(x.id()) } else { None }).collect() } pub(crate) fn get_outpoint_owner_id(&self, outpoint: &TransactionOutpoint) -> Option<&TransactionId> { @@ -260,9 +249,31 @@ impl TransactionsPool { self.utxo_set.check_double_spends(transaction) } - pub(crate) fn remove_transaction_utxos(&mut self, transaction: &MutableTransaction) { - let parent_ids = self.get_parent_transaction_ids_in_pool(transaction); - self.utxo_set.remove_transaction(transaction, &parent_ids) + pub(crate) fn collect_expired_low_priority_transactions(&mut self, virtual_daa_score: u64) -> Vec { + let now = unix_now(); + if virtual_daa_score < self.last_expire_scan_daa_score + self.config.transaction_expire_scan_interval_daa_score + || now < self.last_expire_scan_time + self.config.transaction_expire_scan_interval_milliseconds + { + return vec![]; + } + + self.last_expire_scan_daa_score = virtual_daa_score; + self.last_expire_scan_time = now; + + // Never expire high priority transactions + // Remove all transactions whose added_at_daa_score is older then transaction_expire_interval_daa_score + self.all_transactions + .values() + .filter_map(|x| { + if (x.priority == Priority::Low) + && virtual_daa_score > x.added_at_daa_score + self.config.transaction_expire_interval_daa_score + { + Some(x.id()) + } else { + None + } + }) + .collect() } } @@ -285,18 +296,12 @@ impl Pool for TransactionsPool { &self.all_transactions } - #[inline] - fn all_mut(&mut self) -> &mut MempoolTransactionCollection { - &mut self.all_transactions - } - #[inline] fn chained(&self) -> &TransactionsEdges { &self.chained_transactions } - #[inline] - fn chained_mut(&mut self) -> &mut TransactionsEdges { - &mut self.chained_transactions + fn get_mut(&mut self, transaction_id: &TransactionId) -> Option<&mut MempoolTransaction> { + self.all_transactions.get_mut(transaction_id) } } diff --git a/mining/src/mempool/model/tx.rs b/mining/src/mempool/model/tx.rs index 1cc0611745..6d07da67f0 100644 --- a/mining/src/mempool/model/tx.rs +++ b/mining/src/mempool/model/tx.rs @@ -1,6 +1,9 @@ use crate::mempool::tx::Priority; use kaspa_consensus_core::{tx::MutableTransaction, tx::TransactionId}; -use std::cmp::Ordering; +use std::{ + cmp::Ordering, + fmt::{Display, Formatter}, +}; pub(crate) struct MempoolTransaction { pub(crate) mtx: MutableTransaction, @@ -21,6 +24,11 @@ impl MempoolTransaction { pub(crate) fn fee_rate(&self) -> f64 { self.mtx.calculated_fee.unwrap() as f64 / self.mtx.calculated_mass.unwrap() as f64 } + + pub(crate) fn is_parent_of(&self, transaction: &MutableTransaction) -> bool { + let parent_id = self.id(); + transaction.tx.inputs.iter().any(|x| x.previous_outpoint.transaction_id == parent_id) + } } impl Ord for MempoolTransaction { @@ -42,3 +50,40 @@ impl PartialEq for MempoolTransaction { self.fee_rate() == other.fee_rate() } } + +#[derive(PartialEq, Eq)] +pub(crate) enum TxRemovalReason { + Muted, + Accepted, + MakingRoom, + Unorphaned, + Expired, + DoubleSpend, + InvalidInBlockTemplate, + RevalidationWithMissingOutpoints, +} + +impl TxRemovalReason { + pub(crate) fn as_str(&self) -> &'static str { + match self { + TxRemovalReason::Muted => "", + TxRemovalReason::Accepted => "accepted", + TxRemovalReason::MakingRoom => "making room", + TxRemovalReason::Unorphaned => "unorphaned", + TxRemovalReason::Expired => "expired", + TxRemovalReason::DoubleSpend => "double spend", + TxRemovalReason::InvalidInBlockTemplate => "invalid in block template", + TxRemovalReason::RevalidationWithMissingOutpoints => "revalidation with missing outpoints", + } + } + + pub(crate) fn verbose(&self) -> bool { + !matches!(self, TxRemovalReason::Muted) + } +} + +impl Display for TxRemovalReason { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + f.write_str(self.as_str()) + } +} diff --git a/mining/src/mempool/model/utxo_set.rs b/mining/src/mempool/model/utxo_set.rs index d622fe44fb..38c2bcb4ee 100644 --- a/mining/src/mempool/model/utxo_set.rs +++ b/mining/src/mempool/model/utxo_set.rs @@ -68,7 +68,7 @@ impl MempoolUtxoSet { self.outpoint_owner_id.get(outpoint) } - /// Make sure no other transaction in the mempool is already spending an output one of this transaction inputs spends + /// Make sure no other transaction in the mempool is already spending an output which one of this transaction inputs spends pub(crate) fn check_double_spends(&self, transaction: &MutableTransaction) -> RuleResult<()> { let transaction_id = transaction.id(); for input in transaction.tx.inputs.iter() { diff --git a/mining/src/mempool/populate_entries_and_try_validate.rs b/mining/src/mempool/populate_entries_and_try_validate.rs index 615bbf491f..0c0dcf9a1e 100644 --- a/mining/src/mempool/populate_entries_and_try_validate.rs +++ b/mining/src/mempool/populate_entries_and_try_validate.rs @@ -1,24 +1,9 @@ use crate::mempool::{errors::RuleResult, model::pool::Pool, Mempool}; use kaspa_consensus_core::{api::ConsensusApi, constants::UNACCEPTED_DAA_SCORE, tx::MutableTransaction, tx::UtxoEntry}; +use kaspa_mining_errors::mempool::RuleError; impl Mempool { - pub(crate) fn populate_entries_and_try_validate( - &self, - consensus: &dyn ConsensusApi, - transaction: &mut MutableTransaction, - ) -> RuleResult<()> { - // Rust rewrite note: - // Neither parentsInPool nor missingOutpoints are actually used or needed by the - // callers so we neither build nor return them. - // parentsInPool is now built by transactions_pool::add_mempool_transaction. - // missingOutpoints is reduced to a simple ConsensusError::TxMissingOutpoints. - - self.populate_mempool_entries(transaction); - consensus.validate_mempool_transaction_and_populate(transaction)?; - Ok(()) - } - - fn populate_mempool_entries(&self, transaction: &mut MutableTransaction) { + pub(crate) fn populate_mempool_entries(&self, transaction: &mut MutableTransaction) { for (i, input) in transaction.tx.inputs.iter().enumerate() { if let Some(parent) = self.transaction_pool.get(&input.previous_outpoint.transaction_id) { let output = &parent.mtx.tx.outputs[input.previous_outpoint.index as usize]; @@ -28,3 +13,21 @@ impl Mempool { } } } + +pub(crate) fn validate_mempool_transaction(consensus: &dyn ConsensusApi, transaction: &mut MutableTransaction) -> RuleResult<()> { + Ok(consensus.validate_mempool_transaction(transaction)?) +} + +pub(crate) fn validate_mempool_transactions_in_parallel( + consensus: &dyn ConsensusApi, + transactions: &mut [MutableTransaction], +) -> Vec> { + consensus.validate_mempool_transactions_in_parallel(transactions).into_iter().map(|x| x.map_err(RuleError::from)).collect() +} + +pub(crate) fn populate_mempool_transactions_in_parallel( + consensus: &dyn ConsensusApi, + transactions: &mut [MutableTransaction], +) -> Vec> { + consensus.populate_mempool_transactions_in_parallel(transactions).into_iter().map(|x| x.map_err(RuleError::from)).collect() +} diff --git a/mining/src/mempool/remove_transaction.rs b/mining/src/mempool/remove_transaction.rs index fb71b3671e..960ebc264b 100644 --- a/mining/src/mempool/remove_transaction.rs +++ b/mining/src/mempool/remove_transaction.rs @@ -1,38 +1,70 @@ -use crate::mempool::{errors::RuleResult, model::pool::Pool, Mempool}; +use crate::mempool::{ + errors::RuleResult, + model::{pool::Pool, tx::TxRemovalReason}, + Mempool, +}; use kaspa_consensus_core::tx::TransactionId; +use kaspa_core::{debug, warn}; +use kaspa_utils::iter::IterExtensions; impl Mempool { - pub(crate) fn remove_transaction(&mut self, transaction_id: &TransactionId, remove_redeemers: bool) -> RuleResult<()> { + pub(crate) fn remove_transaction( + &mut self, + transaction_id: &TransactionId, + remove_redeemers: bool, + reason: TxRemovalReason, + extra_info: &str, + ) -> RuleResult<()> { if self.orphan_pool.has(transaction_id) { - return self.orphan_pool.remove_orphan(transaction_id, true).map(|_| ()); + return self.orphan_pool.remove_orphan(transaction_id, true, reason, extra_info).map(|_| ()); } if !self.transaction_pool.has(transaction_id) { return Ok(()); } - let mut transactions_to_remove = vec![*transaction_id]; - let redeemers = self.transaction_pool.get_redeemer_ids_in_pool(transaction_id); + let mut removed_transactions = vec![*transaction_id]; if remove_redeemers { - transactions_to_remove.extend(redeemers); - } else { - redeemers.iter().for_each(|x| { - self.transaction_pool.remove_parent_chained_relation_in_pool(x, transaction_id); - }); + // Add all descendent txs as pending removals + removed_transactions.extend(self.transaction_pool.get_redeemer_ids_in_pool(transaction_id)); } - transactions_to_remove.iter().try_for_each(|x| self.remove_transaction_from_sets(x, remove_redeemers))?; + let mut removed_orphans: Vec = vec![]; + for tx_id in removed_transactions.iter() { + // Remove the tx from the transaction pool and the UTXO set (handled within the pool) + let tx = self.transaction_pool.remove_transaction(tx_id)?; + // Update/remove descendent orphan txs (depending on `remove_redeemers`) + let txs = self.orphan_pool.update_orphans_after_transaction_removed(&tx, remove_redeemers)?; + removed_orphans.extend(txs.into_iter().map(|x| x.id())); + } + removed_transactions.extend(removed_orphans); - if remove_redeemers { - self.orphan_pool.remove_redeemers_of(transaction_id)?; + match reason { + TxRemovalReason::Muted => {} + TxRemovalReason::DoubleSpend => match removed_transactions.len() { + 0 => {} + 1 => warn!("Removed transaction ({}) {}{}", reason, removed_transactions[0], extra_info), + n => warn!( + "Removed {} transactions ({}): {}{}", + n, + reason, + removed_transactions.iter().reusable_format(", "), + extra_info + ), + }, + _ => match removed_transactions.len() { + 0 => {} + 1 => debug!("Removed transaction ({}) {}{}", reason, removed_transactions[0], extra_info), + n => debug!( + "Removed {} transactions ({}): {}{}", + n, + reason, + removed_transactions.iter().reusable_format(", "), + extra_info + ), + }, } Ok(()) } - - fn remove_transaction_from_sets(&mut self, transaction_id: &TransactionId, remove_redeemers: bool) -> RuleResult<()> { - let removed_transaction = self.transaction_pool.remove_transaction(transaction_id)?; - self.transaction_pool.remove_transaction_utxos(&removed_transaction.mtx); - self.orphan_pool.update_orphans_after_transaction_removed(&removed_transaction, remove_redeemers) - } } diff --git a/mining/src/mempool/revalidate_high_priority_transactions.rs b/mining/src/mempool/revalidate_high_priority_transactions.rs deleted file mode 100644 index ac64245c03..0000000000 --- a/mining/src/mempool/revalidate_high_priority_transactions.rs +++ /dev/null @@ -1,61 +0,0 @@ -use crate::{ - mempool::{ - errors::{RuleError, RuleResult}, - model::pool::Pool, - Mempool, - }, - model::topological_index::TopologicalIndex, -}; -use kaspa_consensus_core::{ - api::ConsensusApi, - tx::{MutableTransaction, TransactionId}, -}; -use kaspa_core::debug; - -use super::tx::Priority; - -impl Mempool { - pub(crate) fn revalidate_high_priority_transactions(&mut self, consensus: &dyn ConsensusApi) -> RuleResult> { - // First establish a topologically ordered list of all high priority transaction ids - - // Processing the transactions in a parent to chained order guarantees that - // any transaction removal will propagate to all chained dependencies saving - // validations calls to consensus. - let ids = self.transaction_pool.index(Priority::High).topological_index()?; - let mut valid_ids = vec![]; - - for transaction_id in ids.iter() { - // Try to take the transaction out of the storage map so we can mutate it with some self functions. - // The redeemers of removed transactions are removed too so the following call may return a None. - if let Some(mut transaction) = self.transaction_pool.all_mut().remove(transaction_id) { - let is_valid = self.revalidate_transaction(consensus, &mut transaction.mtx)?; - // After mutating we can now put the transaction back into the storage map. - // The alternative would be to wrap transactions in the pools with a RefCell. - self.transaction_pool.all_mut().insert(*transaction_id, transaction); - if is_valid { - // A following transaction should not remove this one from the pool since we process - // in topological order - // TODO: consider the scenario of two high priority txs sandwiching a low one, where - // in this case topology order is not guaranteed since we topologically sorted only - // high-priority transactions - valid_ids.push(*transaction_id); - } else { - debug!("Removing transaction {0}, it failed revalidation", transaction_id); - // This call cleanly removes the invalid transaction and its redeemers. - self.remove_transaction(transaction_id, true)?; - } - } - } - // Return the successfully processed high priority transaction ids - Ok(valid_ids) - } - - fn revalidate_transaction(&self, consensus: &dyn ConsensusApi, transaction: &mut MutableTransaction) -> RuleResult { - transaction.clear_entries(); - match self.populate_entries_and_try_validate(consensus, transaction) { - Ok(_) => Ok(true), - Err(RuleError::RejectMissingOutpoint) => Ok(false), - Err(err) => Err(err), - } - } -} diff --git a/mining/src/mempool/validate_and_insert_transaction.rs b/mining/src/mempool/validate_and_insert_transaction.rs index 27f4d17f99..d402f9028e 100644 --- a/mining/src/mempool/validate_and_insert_transaction.rs +++ b/mining/src/mempool/validate_and_insert_transaction.rs @@ -1,8 +1,10 @@ -use std::sync::Arc; - use crate::mempool::{ errors::{RuleError, RuleResult}, - model::{pool::Pool, tx::MempoolTransaction}, + model::{ + pool::Pool, + tx::{MempoolTransaction, TxRemovalReason}, + }, + tx::{Orphan, Priority}, Mempool, }; use kaspa_consensus_core::{ @@ -10,42 +12,56 @@ use kaspa_consensus_core::{ constants::{SOMPI_PER_KASPA, UNACCEPTED_DAA_SCORE}, tx::{MutableTransaction, Transaction, TransactionId, TransactionOutpoint, UtxoEntry}, }; -use kaspa_core::info; -use kaspa_utils::vec::VecExtensions; - -use super::tx::{Orphan, Priority}; +use kaspa_core::{debug, info}; +use std::sync::Arc; impl Mempool { - pub(crate) fn validate_and_insert_transaction( - &mut self, + pub(crate) fn pre_validate_and_populate_transaction( + &self, consensus: &dyn ConsensusApi, - transaction: Transaction, - priority: Priority, - orphan: Orphan, - ) -> RuleResult>> { - self.validate_and_insert_mutable_transaction(consensus, MutableTransaction::from_tx(transaction), priority, orphan) + mut transaction: MutableTransaction, + ) -> RuleResult { + self.validate_transaction_unacceptance(&transaction)?; + // Populate mass in the beginning, it will be used in multiple places throughout the validation and insertion. + transaction.calculated_mass = Some(consensus.calculate_transaction_mass(&transaction.tx)); + self.validate_transaction_in_isolation(&transaction)?; + self.transaction_pool.check_double_spends(&transaction)?; + self.populate_mempool_entries(&mut transaction); + Ok(transaction) } - pub(crate) fn validate_and_insert_mutable_transaction( + pub(crate) fn post_validate_and_insert_transaction( &mut self, consensus: &dyn ConsensusApi, - mut transaction: MutableTransaction, + validation_result: RuleResult<()>, + transaction: MutableTransaction, priority: Priority, orphan: Orphan, - ) -> RuleResult>> { - // Populate mass in the beginning, it will be used in multiple places throughout the validation and insertion. - transaction.calculated_mass = Some(consensus.calculate_transaction_mass(&transaction.tx)); + ) -> RuleResult>> { + let transaction_id = transaction.id(); + + // First check if the transaction was not already added to the mempool. + // The case may arise since the execution of the manager public functions is no + // longer atomic and different code paths may lead to inserting the same transaction + // concurrently. + if self.transaction_pool.has(&transaction_id) { + debug!("Transaction {0} is not post validated since already in the mempool", transaction_id); + return Ok(None); + } - self.validate_transaction_pre_utxo_entry(&transaction)?; + self.validate_transaction_unacceptance(&transaction)?; - match self.populate_entries_and_try_validate(consensus, &mut transaction) { + // Re-check double spends since validate_and_insert_transaction is no longer atomic + self.transaction_pool.check_double_spends(&transaction)?; + + match validation_result { Ok(_) => {} Err(RuleError::RejectMissingOutpoint) => { if orphan == Orphan::Forbidden { - return Err(RuleError::RejectDisallowedOrphan(transaction.id())); + return Err(RuleError::RejectDisallowedOrphan(transaction_id)); } - self.orphan_pool.try_add_orphan(consensus, transaction, priority)?; - return Ok(vec![]); + self.orphan_pool.try_add_orphan(consensus.get_virtual_daa_score(), transaction, priority)?; + return Ok(None); } Err(err) => { return Err(err); @@ -55,21 +71,24 @@ impl Mempool { self.validate_transaction_in_context(&transaction)?; // Before adding the transaction, check if there is room in the pool - self.transaction_pool.limit_transaction_count(1)?.iter().try_for_each(|x| self.remove_transaction(x, true))?; + self.transaction_pool.limit_transaction_count(1, &transaction)?.iter().try_for_each(|x| { + self.remove_transaction(x, true, TxRemovalReason::MakingRoom, format!(" for {}", transaction_id).as_str()) + })?; - // Here the accepted transaction is cloned in order to prevent having self borrowed immutably for the - // transaction reference and mutably for the call to process_orphans_after_accepted_transaction + // Add the transaction to the mempool as a MempoolTransaction and return a clone of the embedded Arc let accepted_transaction = self.transaction_pool.add_transaction(transaction, consensus.get_virtual_daa_score(), priority)?.mtx.tx.clone(); - let mut accepted_transactions = self.process_orphans_after_accepted_transaction(consensus, &accepted_transaction)?; - // We include the original accepted transaction as well - accepted_transactions.swap_insert(0, accepted_transaction); - Ok(accepted_transactions) + Ok(Some(accepted_transaction)) } - fn validate_transaction_pre_utxo_entry(&self, transaction: &MutableTransaction) -> RuleResult<()> { - self.validate_transaction_in_isolation(transaction)?; - self.transaction_pool.check_double_spends(transaction) + /// Validates that the transaction wasn't already accepted into the DAG + fn validate_transaction_unacceptance(&self, transaction: &MutableTransaction) -> RuleResult<()> { + // Reject if the transaction is registered as an accepted transaction + let transaction_id = transaction.id(); + match self.accepted_transactions.has(&transaction_id) { + true => Err(RuleError::RejectAlreadyAccepted(transaction_id)), + false => Ok(()), + } } fn validate_transaction_in_isolation(&self, transaction: &MutableTransaction) -> RuleResult<()> { @@ -105,36 +124,13 @@ impl Mempool { Ok(()) } - /// Finds all transactions that can be unorphaned after a some transaction - /// has been accepted. Unorphan and add those to the transaction pool. - /// - /// Returns the list of all successfully processed transactions. - pub(crate) fn process_orphans_after_accepted_transaction( - &mut self, - consensus: &dyn ConsensusApi, - accepted_transaction: &Transaction, - ) -> RuleResult>> { - // Rust rewrite: - // - The function is relocated from OrphanPool into Mempool - let unorphaned_transactions = self.get_unorphaned_transactions_after_accepted_transaction(consensus, accepted_transaction)?; - let mut added_transactions = Vec::with_capacity(unorphaned_transactions.len() + 1); // +1 since some callers add the accepted tx itself - for transaction in unorphaned_transactions { - // The returned transactions are leaving the mempool but must also be added to - // the transaction pool so we clone. - added_transactions.push(transaction.mtx.tx.clone()); - self.transaction_pool.add_mempool_transaction(transaction)?; - } - Ok(added_transactions) - } - /// Returns a list with all successfully unorphaned transactions after some /// transaction has been accepted. - fn get_unorphaned_transactions_after_accepted_transaction( + pub(crate) fn get_unorphaned_transactions_after_accepted_transaction( &mut self, - consensus: &dyn ConsensusApi, transaction: &Transaction, - ) -> RuleResult> { - let mut accepted_orphans = Vec::new(); + ) -> Vec { + let mut unorphaned_transactions = Vec::new(); let transaction_id = transaction.id(); let mut outpoint = TransactionOutpoint::new(transaction_id, 0); for (i, output) in transaction.outputs.iter().enumerate() { @@ -157,9 +153,13 @@ impl Mempool { continue; } if let Some(orphan_id) = orphan_id { - match self.unorphan_transaction(consensus, &orphan_id) { - Ok(accepted_tx) => { - accepted_orphans.push(accepted_tx); + match self.unorphan_transaction(&orphan_id) { + Ok(unorphaned_tx) => { + unorphaned_transactions.push(unorphaned_tx); + debug!("Transaction {0} unorphaned", transaction_id); + } + Err(RuleError::RejectAlreadyAccepted(transaction_id)) => { + debug!("Ignoring already accepted transaction {}", transaction_id); } Err(err) => { // In case of validation error, we log the problem and drop the @@ -169,30 +169,28 @@ impl Mempool { } } } - Ok(accepted_orphans) + + unorphaned_transactions } - fn unorphan_transaction( - &mut self, - consensus: &dyn ConsensusApi, - transaction_id: &TransactionId, - ) -> RuleResult { + fn unorphan_transaction(&mut self, transaction_id: &TransactionId) -> RuleResult { // Rust rewrite: // - Instead of adding the validated transaction to mempool transaction pool, // we return it. - // - The function is relocated from OrphanPool into Mempool + // - The function is relocated from OrphanPool into Mempool. + // - The function no longer validates the transaction in mempool (signatures) nor in context. + // This job is delegated to a fn called later in the process (Manager::validate_and_insert_unorphaned_transactions). // Remove the transaction identified by transaction_id from the orphan pool. - let mut transactions = self.orphan_pool.remove_orphan(transaction_id, false)?; + let mut transactions = self.orphan_pool.remove_orphan(transaction_id, false, TxRemovalReason::Unorphaned, "")?; - // At this point, `transactions` contain exactly one transaction. + // At this point, `transactions` contains exactly one transaction. // The one we just removed from the orphan pool. assert_eq!(transactions.len(), 1, "the list returned by remove_orphan is expected to contain exactly one transaction"); - let mut transaction = transactions.pop().unwrap(); + let transaction = transactions.pop().unwrap(); - consensus.validate_mempool_transaction_and_populate(&mut transaction.mtx)?; - self.validate_transaction_in_context(&transaction.mtx)?; - transaction.added_at_daa_score = consensus.get_virtual_daa_score(); + self.validate_transaction_unacceptance(&transaction.mtx)?; + self.transaction_pool.check_double_spends(&transaction.mtx)?; Ok(transaction) } } diff --git a/mining/src/model/mod.rs b/mining/src/model/mod.rs index c53ad12e12..482cc82f11 100644 --- a/mining/src/model/mod.rs +++ b/mining/src/model/mod.rs @@ -4,6 +4,7 @@ use std::collections::HashSet; pub(crate) mod candidate_tx; pub mod owner_txs; pub mod topological_index; +pub mod topological_sort; /// A set of unique transaction ids pub type TransactionIdSet = HashSet; diff --git a/mining/src/model/topological_sort.rs b/mining/src/model/topological_sort.rs new file mode 100644 index 0000000000..aa88cce023 --- /dev/null +++ b/mining/src/model/topological_sort.rs @@ -0,0 +1,284 @@ +use itertools::Itertools; +use kaspa_consensus_core::tx::Transaction; +use std::{ + collections::{HashMap, HashSet, VecDeque}, + iter::{FusedIterator, Map}, +}; + +type IndexSet = HashSet; + +pub trait TopologicalSort { + fn topological_sort(self) -> Self + where + Self: Sized; +} + +impl + Clone> TopologicalSort for Vec { + fn topological_sort(self) -> Self { + let mut sorted = Vec::with_capacity(self.len()); + let mut in_degree: Vec = vec![0; self.len()]; + + // Index on transaction ids + let mut index = HashMap::with_capacity(self.len()); + self.iter().enumerate().for_each(|(idx, tx)| { + let _ = index.insert(tx.as_ref().id(), idx); + }); + + // Transaction edges + let mut all_edges: Vec> = vec![None; self.len()]; + self.iter().enumerate().for_each(|(destination_idx, tx)| { + tx.as_ref().inputs.iter().for_each(|input| { + if let Some(origin_idx) = index.get(&input.previous_outpoint.transaction_id) { + all_edges[*origin_idx].get_or_insert_with(IndexSet::new).insert(destination_idx); + } + }) + }); + + // Degrees + (0..self.len()).for_each(|origin_idx| { + if let Some(ref edges) = all_edges[origin_idx] { + edges.iter().for_each(|destination_idx| { + in_degree[*destination_idx] += 1; + }); + } + }); + + // Degree 0 + let mut queue = VecDeque::with_capacity(self.len()); + (0..self.len()).for_each(|destination_idx| { + if in_degree[destination_idx] == 0 { + queue.push_back(destination_idx); + } + }); + + // Sorted transactions + while !queue.is_empty() { + let current = queue.pop_front().unwrap(); + if let Some(ref edges) = all_edges[current] { + edges.iter().for_each(|destination_idx| { + let degree = in_degree.get_mut(*destination_idx).unwrap(); + *degree -= 1; + if *degree == 0 { + queue.push_back(*destination_idx); + } + }); + } + sorted.push(self[current].clone()); + } + assert_eq!(sorted.len(), self.len(), "by definition, cryptographically no cycle can exist in a DAG of transactions"); + + sorted + } +} + +pub trait IterTopologically +where + T: AsRef, +{ + fn topological_iter(&self) -> TopologicalIter<'_, T>; +} + +impl> IterTopologically for &[T] { + fn topological_iter(&self) -> TopologicalIter<'_, T> { + TopologicalIter::new(self) + } +} + +impl> IterTopologically for Vec { + fn topological_iter(&self) -> TopologicalIter<'_, T> { + TopologicalIter::new(self) + } +} + +pub struct TopologicalIter<'a, T: AsRef> { + transactions: &'a [T], + in_degree: Vec, + edges: Vec>, + queue: VecDeque, + yields_count: usize, +} + +impl<'a, T: AsRef> TopologicalIter<'a, T> { + pub fn new(transactions: &'a [T]) -> Self { + let mut in_degree: Vec = vec![0; transactions.len()]; + + // Index on transaction ids + let mut index = HashMap::with_capacity(transactions.len()); + transactions.iter().enumerate().for_each(|(idx, tx)| { + let _ = index.insert(tx.as_ref().id(), idx); + }); + + // Transaction edges + let mut edges: Vec> = vec![None; transactions.len()]; + transactions.iter().enumerate().for_each(|(destination_idx, tx)| { + tx.as_ref().inputs.iter().for_each(|input| { + if let Some(origin_idx) = index.get(&input.previous_outpoint.transaction_id) { + edges[*origin_idx].get_or_insert_with(IndexSet::new).insert(destination_idx); + } + }) + }); + + // Degrees + (0..transactions.len()).for_each(|origin_idx| { + if let Some(ref edges) = edges[origin_idx] { + edges.iter().for_each(|destination_idx| { + in_degree[*destination_idx] += 1; + }); + } + }); + + // Degree 0 + let mut queue = VecDeque::with_capacity(transactions.len()); + (0..transactions.len()).for_each(|destination_idx| { + if in_degree[destination_idx] == 0 { + queue.push_back(destination_idx); + } + }); + Self { transactions, in_degree, edges, queue, yields_count: 0 } + } +} + +impl<'a, T: AsRef> Iterator for TopologicalIter<'a, T> { + type Item = &'a T; + + fn next(&mut self) -> Option { + match self.queue.pop_front() { + Some(current) => { + if let Some(ref edges) = self.edges[current] { + edges.iter().for_each(|destination_idx| { + let degree = self.in_degree.get_mut(*destination_idx).unwrap(); + *degree -= 1; + if *degree == 0 { + self.queue.push_back(*destination_idx); + } + }); + } + self.yields_count += 1; + Some(&self.transactions[current]) + } + None => None, + } + } + + fn size_hint(&self) -> (usize, Option) { + let items_remaining = self.transactions.len() - self.yields_count.min(self.transactions.len()); + (self.yields_count, Some(items_remaining)) + } +} + +impl<'a, T: AsRef> FusedIterator for TopologicalIter<'a, T> {} +impl<'a, T: AsRef> ExactSizeIterator for TopologicalIter<'a, T> { + fn len(&self) -> usize { + self.transactions.len() + } +} + +pub trait IntoIterTopologically +where + T: AsRef, +{ + fn topological_into_iter(self) -> TopologicalIntoIter; +} + +impl> IntoIterTopologically for Vec { + fn topological_into_iter(self) -> TopologicalIntoIter { + TopologicalIntoIter::new(self) + } +} + +impl IntoIterTopologically for Map +where + T: AsRef, + I: Iterator, + F: FnMut(::Item) -> T, +{ + fn topological_into_iter(self) -> TopologicalIntoIter { + TopologicalIntoIter::new(self) + } +} + +pub struct TopologicalIntoIter> { + transactions: Vec>, + in_degree: Vec, + edges: Vec>, + queue: VecDeque, + yields_count: usize, +} + +impl> TopologicalIntoIter { + pub fn new(transactions: impl IntoIterator) -> Self { + // Collect all transactions + let transactions = transactions.into_iter().map(|tx| Some(tx)).collect_vec(); + + let mut in_degree: Vec = vec![0; transactions.len()]; + + // Index on transaction ids + let mut index = HashMap::with_capacity(transactions.len()); + transactions.iter().enumerate().for_each(|(idx, tx)| { + let _ = index.insert(tx.as_ref().unwrap().as_ref().id(), idx); + }); + + // Transaction edges + let mut edges: Vec> = vec![None; transactions.len()]; + transactions.iter().enumerate().for_each(|(destination_idx, tx)| { + tx.as_ref().unwrap().as_ref().inputs.iter().for_each(|input| { + if let Some(origin_idx) = index.get(&input.previous_outpoint.transaction_id) { + edges[*origin_idx].get_or_insert_with(IndexSet::new).insert(destination_idx); + } + }) + }); + + // Degrees + (0..transactions.len()).for_each(|origin_idx| { + if let Some(ref edges) = edges[origin_idx] { + edges.iter().for_each(|destination_idx| { + in_degree[*destination_idx] += 1; + }); + } + }); + + // Degree 0 + let mut queue = VecDeque::with_capacity(transactions.len()); + (0..transactions.len()).for_each(|destination_idx| { + if in_degree[destination_idx] == 0 { + queue.push_back(destination_idx); + } + }); + Self { transactions, in_degree, edges, queue, yields_count: 0 } + } +} + +impl> Iterator for TopologicalIntoIter { + type Item = T; + + fn next(&mut self) -> Option { + match self.queue.pop_front() { + Some(current) => { + if let Some(ref edges) = self.edges[current] { + edges.iter().for_each(|destination_idx| { + let degree = self.in_degree.get_mut(*destination_idx).unwrap(); + *degree -= 1; + if *degree == 0 { + self.queue.push_back(*destination_idx); + } + }); + } + self.yields_count += 1; + self.transactions[current].take() + } + None => None, + } + } + + fn size_hint(&self) -> (usize, Option) { + let items_remaining = self.transactions.len() - self.yields_count.min(self.transactions.len()); + (self.yields_count, Some(items_remaining)) + } +} + +impl> FusedIterator for TopologicalIntoIter {} +impl> ExactSizeIterator for TopologicalIntoIter { + fn len(&self) -> usize { + self.transactions.len() + } +} diff --git a/mining/src/monitor.rs b/mining/src/monitor.rs new file mode 100644 index 0000000000..517bd82763 --- /dev/null +++ b/mining/src/monitor.rs @@ -0,0 +1,114 @@ +use super::MiningCounters; +use kaspa_core::{ + debug, info, + task::{ + service::{AsyncService, AsyncServiceFuture}, + tick::{TickReason, TickService}, + }, + trace, +}; +use kaspa_txscript::caches::TxScriptCacheCounters; +use std::{sync::Arc, time::Duration}; + +const MONITOR: &str = "mempool-monitor"; + +pub struct MiningMonitor { + // Counters + counters: Arc, + + tx_script_cache_counters: Arc, + + // Tick service + tick_service: Arc, +} + +impl MiningMonitor { + pub fn new( + counters: Arc, + tx_script_cache_counters: Arc, + tick_service: Arc, + ) -> MiningMonitor { + MiningMonitor { counters, tx_script_cache_counters, tick_service } + } + + pub async fn worker(self: &Arc) { + let mut last_snapshot = self.counters.snapshot(); + let mut last_tx_script_cache_snapshot = self.tx_script_cache_counters.snapshot(); + let snapshot_interval = 10; + loop { + if let TickReason::Shutdown = self.tick_service.tick(Duration::from_secs(snapshot_interval)).await { + // Let the system print final logs before exiting + tokio::time::sleep(Duration::from_millis(500)).await; + break; + } + + let snapshot = self.counters.snapshot(); + let tx_script_cache_snapshot = self.tx_script_cache_counters.snapshot(); + if snapshot == last_snapshot { + // No update, avoid printing useless info + continue; + } + + // Subtract the snapshots + let delta = &snapshot - &last_snapshot; + let tx_script_cache_delta = &tx_script_cache_snapshot - &last_tx_script_cache_snapshot; + + if delta.has_tps_activity() { + info!( + "Tx throughput stats: {:.2} u-tps, {:.2}% e-tps (in: {} via RPC, {} via P2P, out: {} via accepted blocks)", + delta.u_tps(), + delta.e_tps() * 100.0, + delta.high_priority_tx_counts, + delta.low_priority_tx_counts, + delta.tx_accepted_counts, + ); + } + if tx_script_cache_snapshot != last_tx_script_cache_snapshot { + debug!( + "UTXO set stats: {} spent, {} created ({} signatures validated, {} cache hits, {:.2} hit ratio)", + delta.input_counts, + delta.output_counts, + tx_script_cache_delta.insert_counts, + tx_script_cache_delta.get_counts, + tx_script_cache_delta.hit_ratio() + ); + } + if delta.txs_sample + delta.orphans_sample > 0 { + debug!( + "Mempool sample: {} ready out of {} txs, {} orphans, {} cached as accepted", + delta.ready_txs_sample, delta.txs_sample, delta.orphans_sample, delta.accepted_sample + ); + } + + last_snapshot = snapshot; + last_tx_script_cache_snapshot = tx_script_cache_snapshot; + } + + trace!("mempool monitor thread exiting"); + } +} + +// service trait implementation for Monitor +impl AsyncService for MiningMonitor { + fn ident(self: Arc) -> &'static str { + MONITOR + } + + fn start(self: Arc) -> AsyncServiceFuture { + Box::pin(async move { + self.worker().await; + Ok(()) + }) + } + + fn signal_exit(self: Arc) { + trace!("sending an exit signal to {}", MONITOR); + } + + fn stop(self: Arc) -> AsyncServiceFuture { + Box::pin(async move { + trace!("{} stopped", MONITOR); + Ok(()) + }) + } +} diff --git a/mining/src/testutils/consensus_mock.rs b/mining/src/testutils/consensus_mock.rs index d9af3bdc29..ecf5319e0a 100644 --- a/mining/src/testutils/consensus_mock.rs +++ b/mining/src/testutils/consensus_mock.rs @@ -1,7 +1,7 @@ use super::coinbase_mock::CoinbaseManagerMock; use kaspa_consensus_core::{ api::ConsensusApi, - block::{BlockTemplate, MutableBlock}, + block::{BlockTemplate, MutableBlock, TemplateBuildMode, TemplateTransactionSelector}, coinbase::MinerData, constants::BLOCK_VERSION, errors::{ @@ -72,7 +72,13 @@ impl ConsensusMock { } impl ConsensusApi for ConsensusMock { - fn build_block_template(&self, miner_data: MinerData, mut txs: Vec) -> Result { + fn build_block_template( + &self, + miner_data: MinerData, + mut tx_selector: Box, + _build_mode: TemplateBuildMode, + ) -> Result { + let mut txs = tx_selector.select_transactions(); let coinbase_manager = CoinbaseManagerMock::new(); let coinbase = coinbase_manager.expected_coinbase_transaction(miner_data.clone()); txs.insert(0, coinbase.tx); @@ -97,7 +103,7 @@ impl ConsensusApi for ConsensusMock { Ok(BlockTemplate::new(mutable_block, miner_data, coinbase.has_red_reward, now, 0)) } - fn validate_mempool_transaction_and_populate(&self, mutable_tx: &mut MutableTransaction) -> TxResult<()> { + fn validate_mempool_transaction(&self, mutable_tx: &mut MutableTransaction) -> TxResult<()> { // If a predefined status was registered to simulate an error, return it right away if let Some(status) = self.statuses.read().get(&mutable_tx.id()) { if status.is_err() { @@ -129,6 +135,14 @@ impl ConsensusApi for ConsensusMock { Ok(()) } + fn validate_mempool_transactions_in_parallel(&self, transactions: &mut [MutableTransaction]) -> Vec> { + transactions.iter_mut().map(|x| self.validate_mempool_transaction(x)).collect() + } + + fn populate_mempool_transactions_in_parallel(&self, transactions: &mut [MutableTransaction]) -> Vec> { + transactions.iter_mut().map(|x| self.validate_mempool_transaction(x)).collect() + } + fn calculate_transaction_mass(&self, transaction: &Transaction) -> u64 { if transaction.is_coinbase() { 0 diff --git a/protocol/flows/src/flow_context.rs b/protocol/flows/src/flow_context.rs index 5c77eb671c..642977d027 100644 --- a/protocol/flows/src/flow_context.rs +++ b/protocol/flows/src/flow_context.rs @@ -354,14 +354,16 @@ impl FlowContext { /// _GO-KASPAD: OnNewBlock + broadcastTransactionsAfterBlockAdded_ pub async fn on_new_block(&self, consensus: &ConsensusProxy, block: Block) -> Result<(), ProtocolError> { let hash = block.hash(); - let blocks = self.unorphan_blocks(consensus, hash).await; + let mut blocks = self.unorphan_blocks(consensus, hash).await; + // Process blocks in topological order + blocks.sort_by(|a, b| a.header.blue_work.partial_cmp(&b.header.blue_work).unwrap()); // Use a ProcessQueue so we get rid of duplicates let mut transactions_to_broadcast = ProcessQueue::new(); for block in once(block).chain(blocks.into_iter()) { transactions_to_broadcast.enqueue_chunk( self.mining_manager() .clone() - .handle_new_block_transactions(consensus, block.transactions.clone()) + .handle_new_block_transactions(consensus, block.header.daa_score, block.transactions.clone()) .await? .iter() .map(|x| x.id()), @@ -373,9 +375,30 @@ impl FlowContext { return Ok(()); } - if self.should_rebroadcast_transactions().await { - transactions_to_broadcast - .enqueue_chunk(self.mining_manager().clone().revalidate_high_priority_transactions(consensus).await?.into_iter()); + if self.should_run_mempool_scanning_task().await { + // Spawn a task executing the removal of expired low priority transactions and, if time has come too, + // the revalidation of high priority transactions. + // + // The TransactionSpread member ensures at most one instance of this task is running at any + // given time. + let mining_manager = self.mining_manager().clone(); + let consensus_clone = consensus.clone(); + let context = self.clone(); + debug!("<> Starting mempool scanning task #{}...", self.mempool_scanning_job_count().await); + tokio::spawn(async move { + mining_manager.clone().expire_low_priority_transactions(&consensus_clone).await; + if context.should_rebroadcast().await { + let (tx, mut rx) = unbounded_channel(); + tokio::spawn(async move { + mining_manager.revalidate_high_priority_transactions(&consensus_clone, tx).await; + }); + while let Some(transactions) = rx.recv().await { + let _ = context.broadcast_transactions(transactions).await; + } + } + context.mempool_scanning_is_done().await; + debug!("<> Mempool scanning task is done"); + }); } self.broadcast_transactions(transactions_to_broadcast).await @@ -419,11 +442,22 @@ impl FlowContext { self.broadcast_transactions(accepted_transactions.iter().map(|x| x.id())).await } - /// Returns true if the time for a rebroadcast of the mempool high priority transactions has come. - /// - /// If true, the instant of the call is registered as the last rebroadcast time. - pub async fn should_rebroadcast_transactions(&self) -> bool { - self.transactions_spread.write().await.should_rebroadcast_transactions() + /// Returns true if the time has come for running the task cleaning mempool transactions. + async fn should_run_mempool_scanning_task(&self) -> bool { + self.transactions_spread.write().await.should_run_mempool_scanning_task() + } + + /// Returns true if the time has come for a rebroadcast of the mempool high priority transactions. + async fn should_rebroadcast(&self) -> bool { + self.transactions_spread.read().await.should_rebroadcast() + } + + async fn mempool_scanning_job_count(&self) -> u64 { + self.transactions_spread.read().await.mempool_scanning_job_count() + } + + async fn mempool_scanning_is_done(&self) { + self.transactions_spread.write().await.mempool_scanning_is_done() } /// Add the given transactions IDs to a set of IDs to broadcast. The IDs will be broadcasted to all peers diff --git a/protocol/flows/src/flowcontext/transactions.rs b/protocol/flows/src/flowcontext/transactions.rs index 4a43789893..20851c8d6e 100644 --- a/protocol/flows/src/flowcontext/transactions.rs +++ b/protocol/flows/src/flowcontext/transactions.rs @@ -10,34 +10,66 @@ use kaspa_p2p_lib::{ }; use std::time::{Duration, Instant}; -const REBROADCAST_INTERVAL: Duration = Duration::from_secs(30); +/// Interval between mempool scanning tasks (in seconds) +const SCANNING_TASK_INTERVAL: u64 = 10; +const REBROADCAST_FREQUENCY: u64 = 3; const BROADCAST_INTERVAL: Duration = Duration::from_millis(500); pub(crate) const MAX_INV_PER_TX_INV_MSG: usize = 131_072; pub struct TransactionsSpread { hub: Hub, - last_rebroadcast_time: Instant, + last_scanning_time: Instant, + scanning_task_running: bool, + scanning_job_count: u64, transaction_ids: ProcessQueue, last_broadcast_time: Instant, } impl TransactionsSpread { pub fn new(hub: Hub) -> Self { - Self { hub, last_rebroadcast_time: Instant::now(), transaction_ids: ProcessQueue::new(), last_broadcast_time: Instant::now() } + Self { + hub, + last_scanning_time: Instant::now(), + scanning_task_running: false, + scanning_job_count: 0, + transaction_ids: ProcessQueue::new(), + last_broadcast_time: Instant::now(), + } } - /// Returns true if the time for a rebroadcast of the mempool high priority transactions has come. - /// - /// If true, the instant of the call is registered as the last rebroadcast time. - pub fn should_rebroadcast_transactions(&mut self) -> bool { + /// Returns true if the time has come for running the task of scanning mempool transactions + /// and if so, mark the task as running. + pub fn should_run_mempool_scanning_task(&mut self) -> bool { let now = Instant::now(); - if now - self.last_rebroadcast_time < REBROADCAST_INTERVAL { + if self.scanning_task_running || now < self.last_scanning_time + Duration::from_secs(SCANNING_TASK_INTERVAL) { return false; } - self.last_rebroadcast_time = now; + let delta = now.checked_duration_since(self.last_scanning_time).expect("verified above"); + // Keep the launching times aligned to exact intervals. Note that `delta=10.1` seconds will result in + // adding 10 seconds to last scan time, while `delta=11` will result in adding 20 (assuming scanning + // interval is 10 seconds). + self.last_scanning_time += + Duration::from_secs(((delta.as_secs() + SCANNING_TASK_INTERVAL - 1) / SCANNING_TASK_INTERVAL) * SCANNING_TASK_INTERVAL); + + self.scanning_job_count += 1; + self.scanning_task_running = true; true } + /// Returns true if the time for a rebroadcast of the mempool high priority transactions has come. + pub fn should_rebroadcast(&self) -> bool { + self.scanning_job_count % REBROADCAST_FREQUENCY == 0 + } + + pub fn mempool_scanning_job_count(&self) -> u64 { + self.scanning_job_count + } + + pub fn mempool_scanning_is_done(&mut self) { + assert!(self.scanning_task_running, "no stop without a matching start"); + self.scanning_task_running = false; + } + /// Add the given transactions IDs to a set of IDs to broadcast. The IDs will be broadcasted to all peers /// within transaction Inv messages. /// @@ -53,7 +85,7 @@ impl TransactionsSpread { self.transaction_ids.enqueue_chunk(transaction_ids); let now = Instant::now(); - if now - self.last_broadcast_time < BROADCAST_INTERVAL && self.transaction_ids.len() < MAX_INV_PER_TX_INV_MSG { + if now < self.last_broadcast_time + BROADCAST_INTERVAL && self.transaction_ids.len() < MAX_INV_PER_TX_INV_MSG { return Ok(()); } diff --git a/protocol/flows/src/v5/blockrelay/flow.rs b/protocol/flows/src/v5/blockrelay/flow.rs index 236a34bf6d..02551c2242 100644 --- a/protocol/flows/src/v5/blockrelay/flow.rs +++ b/protocol/flows/src/v5/blockrelay/flow.rs @@ -154,8 +154,8 @@ impl HandleRelayInvsFlow { } self.ctx.log_block_acceptance(inv.hash, BlockSource::Relay); - self.ctx.on_new_block_template().await?; self.ctx.on_new_block(&session, block).await?; + self.ctx.on_new_block_template().await?; // Broadcast all *new* virtual parents. As a policy, we avoid directly relaying the new block since // we wish to relay only blocks who entered past(virtual). diff --git a/protocol/flows/src/v5/txrelay/flow.rs b/protocol/flows/src/v5/txrelay/flow.rs index 253cb1b2cb..b1a8b741f3 100644 --- a/protocol/flows/src/v5/txrelay/flow.rs +++ b/protocol/flows/src/v5/txrelay/flow.rs @@ -107,12 +107,11 @@ impl RelayTransactionsFlow { ) -> Result>, ProtocolError> { // Build a vector with the transaction ids unknown in the mempool and not already requested // by another peer + let transaction_ids = self.ctx.mining_manager().clone().unknown_transactions(transaction_ids).await; let mut requests = Vec::new(); for transaction_id in transaction_ids { - if !self.is_known_transaction(transaction_id).await { - if let Some(req) = self.ctx.try_adding_transaction_request(transaction_id) { - requests.push(req); - } + if let Some(req) = self.ctx.try_adding_transaction_request(transaction_id) { + requests.push(req); } } @@ -131,12 +130,6 @@ impl RelayTransactionsFlow { Ok(requests) } - async fn is_known_transaction(&self, transaction_id: TransactionId) -> bool { - // Ask the transaction memory pool if the transaction is known - // to it in any form (main pool or orphan). - self.ctx.mining_manager().clone().has_transaction(transaction_id, true, true).await - } - /// Returns the next Transaction or TransactionNotFound message in msg_route, /// returning only one of the message types at a time. async fn read_response(&mut self) -> Result { @@ -168,7 +161,7 @@ impl RelayTransactionsFlow { consensus: ConsensusProxy, requests: Vec>, ) -> Result<(), ProtocolError> { - // trace!("Receive {} transaction ids from {}", requests.len(), self.router.identity()); + let mut transactions: Vec = Vec::with_capacity(requests.len()); for request in requests { let response = self.read_response().await?; let transaction_id = response.transaction_id(); @@ -178,40 +171,41 @@ impl RelayTransactionsFlow { request.req, transaction_id ))); } - let Response::Transaction(transaction) = response else { - continue; - }; - match self - .ctx - .mining_manager() - .clone() - .validate_and_insert_transaction(&consensus, transaction, Priority::Low, Orphan::Allowed) - .await - { - Ok(accepted_transactions) => { - // trace!("Broadcast {} accepted transaction ids", accepted_transactions.len()); - self.ctx.broadcast_transactions(accepted_transactions.iter().map(|x| x.id())).await?; + if let Response::Transaction(transaction) = response { + transactions.push(transaction); + } + } + let insert_results = self + .ctx + .mining_manager() + .clone() + .validate_and_insert_transaction_batch(&consensus, transactions, Priority::Low, Orphan::Allowed) + .await; + + for res in insert_results.iter() { + match res { + Ok(_) => {} + Err(MiningManagerError::MempoolError(RuleError::RejectInvalid(transaction_id))) => { + // TODO: discuss a banning process + return Err(ProtocolError::MisbehavingPeer(format!("rejected invalid transaction {}", transaction_id))); } - Err(MiningManagerError::MempoolError(err)) => { - match err { - RuleError::RejectInvalid(_) => { - // TODO: discuss a banning process - return Err(ProtocolError::MisbehavingPeer(format!("rejected invalid transaction {}", transaction_id))); - } - RuleError::RejectSpamTransaction(_) => { - self.spam_counter += 1; - if self.spam_counter % 100 == 0 { - kaspa_core::warn!("Peer {} has shared {} spam txs", self.router, self.spam_counter); - } - } - _ => (), + Err(MiningManagerError::MempoolError(RuleError::RejectSpamTransaction(_))) => { + self.spam_counter += 1; + if self.spam_counter % 100 == 0 { + kaspa_core::warn!("Peer {} has shared {} spam txs", self.router, self.spam_counter); } - continue; } Err(_) => {} } } - // trace!("Processed {} transactions from {}", requests.len(), self.router.identity()); + + self.ctx + .broadcast_transactions(insert_results.into_iter().filter_map(|res| match res { + Ok(x) => Some(x.id()), + Err(_) => None, + })) + .await?; + Ok(()) } } diff --git a/simpa/src/main.rs b/simpa/src/main.rs index 123f21f724..0458a34ef5 100644 --- a/simpa/src/main.rs +++ b/simpa/src/main.rs @@ -187,8 +187,15 @@ fn main() { }; let (dummy_notification_sender, _) = unbounded(); let notification_root = Arc::new(ConsensusNotificationRoot::new(dummy_notification_sender)); - let consensus = - Arc::new(Consensus::new(db, config.clone(), Default::default(), notification_root, Default::default(), unix_now())); + let consensus = Arc::new(Consensus::new( + db, + config.clone(), + Default::default(), + notification_root, + Default::default(), + Default::default(), + unix_now(), + )); (consensus, lifetime) } else { let until = if args.target_blocks.is_none() { config.genesis.timestamp + args.sim_time * 1000 } else { u64::MAX }; // milliseconds @@ -216,8 +223,15 @@ fn main() { let (_lifetime2, db2) = create_temp_db!(ConnBuilder::default().with_parallelism(num_cpus::get())); let (dummy_notification_sender, _) = unbounded(); let notification_root = Arc::new(ConsensusNotificationRoot::new(dummy_notification_sender)); - let consensus2 = - Arc::new(Consensus::new(db2, config.clone(), Default::default(), notification_root, Default::default(), unix_now())); + let consensus2 = Arc::new(Consensus::new( + db2, + config.clone(), + Default::default(), + notification_root, + Default::default(), + Default::default(), + unix_now(), + )); let handles2 = consensus2.run_processors(); rt.block_on(validate(&consensus, &consensus2, &config, args.delay, args.bps)); consensus2.shutdown(handles2); diff --git a/simpa/src/simulator/miner.rs b/simpa/src/simulator/miner.rs index 9bc3aae644..2f144fc668 100644 --- a/simpa/src/simulator/miner.rs +++ b/simpa/src/simulator/miner.rs @@ -4,7 +4,7 @@ use kaspa_consensus::consensus::Consensus; use kaspa_consensus::model::stores::virtual_state::VirtualStateStoreReader; use kaspa_consensus::params::Params; use kaspa_consensus_core::api::ConsensusApi; -use kaspa_consensus_core::block::Block; +use kaspa_consensus_core::block::{Block, TemplateBuildMode, TemplateTransactionSelector}; use kaspa_consensus_core::coinbase::MinerData; use kaspa_consensus_core::sign::sign; use kaspa_consensus_core::subnets::SUBNETWORK_ID_NATIVE; @@ -22,6 +22,30 @@ use std::cmp::max; use std::iter::once; use std::sync::Arc; +struct OnetimeTxSelector { + txs: Option>, +} + +impl OnetimeTxSelector { + fn new(txs: Vec) -> Self { + Self { txs: Some(txs) } + } +} + +impl TemplateTransactionSelector for OnetimeTxSelector { + fn select_transactions(&mut self) -> Vec { + self.txs.take().unwrap() + } + + fn reject_selection(&mut self, _tx_id: kaspa_consensus_core::tx::TransactionId) { + unimplemented!() + } + + fn is_successful(&self) -> bool { + true + } +} + pub struct Miner { // ID pub(super) id: u64, @@ -89,7 +113,7 @@ impl Miner { let session = self.consensus.acquire_session(); let mut block_template = self .consensus - .build_block_template(self.miner_data.clone(), txs) + .build_block_template(self.miner_data.clone(), Box::new(OnetimeTxSelector::new(txs)), TemplateBuildMode::Standard) .expect("simulation txs are selected in sync with virtual state and are expected to be valid"); drop(session); block_template.block.header.timestamp = timestamp; // Use simulation time rather than real time diff --git a/simpa/src/simulator/network.rs b/simpa/src/simulator/network.rs index 82e75178e8..74fdabf696 100644 --- a/simpa/src/simulator/network.rs +++ b/simpa/src/simulator/network.rs @@ -82,6 +82,7 @@ impl KaspaNetworkSimulator { Default::default(), notification_root, Default::default(), + Default::default(), unix_now(), )); let handles = consensus.run_processors(); diff --git a/testing/integration/src/common/client_pool.rs b/testing/integration/src/common/client_pool.rs new file mode 100644 index 0000000000..b01f6e8700 --- /dev/null +++ b/testing/integration/src/common/client_pool.rs @@ -0,0 +1,53 @@ +use async_channel::{SendError, Sender}; +use futures_util::Future; +use kaspa_core::trace; +use kaspa_grpc_client::GrpcClient; +use kaspa_utils::{any::type_name_short, channel::Channel}; +use std::sync::Arc; +use tokio::task::JoinHandle; + +pub struct ClientPool { + distribution_channel: Channel, + pub join_handles: Vec>, +} + +impl ClientPool { + pub fn new(clients: Vec>, distribution_channel_capacity: usize, client_op: F) -> Self + where + F: Fn(Arc, T) -> R + Sync + Send + Copy + 'static, + R: Future + Send, + { + let distribution_channel = Channel::bounded(distribution_channel_capacity); + let join_handles = clients + .into_iter() + .enumerate() + .map(|(index, client)| { + let rx = distribution_channel.receiver(); + tokio::spawn(async move { + while let Ok(msg) = rx.recv().await { + if client_op(client.clone(), msg).await { + rx.close(); + break; + } + } + client.disconnect().await.unwrap(); + trace!("Client pool {} task {} exited", type_name_short::(), index); + }) + }) + .collect(); + + Self { distribution_channel, join_handles } + } + + pub async fn send_via_available_client(&self, msg: T) -> Result<(), SendError> { + self.distribution_channel.send(msg).await + } + + pub fn sender(&self) -> Sender { + self.distribution_channel.sender() + } + + pub fn close(&self) { + self.distribution_channel.close() + } +} diff --git a/testing/integration/src/common/daemon.rs b/testing/integration/src/common/daemon.rs index 4a6d871b82..67b4b42dfc 100644 --- a/testing/integration/src/common/daemon.rs +++ b/testing/integration/src/common/daemon.rs @@ -1,3 +1,4 @@ +use futures_util::Future; use kaspa_consensus_core::network::NetworkId; use kaspa_core::{core::Core, signals::Shutdown}; use kaspa_database::utils::get_kaspa_tempdir; @@ -7,6 +8,8 @@ use kaspad_lib::{args::Args, daemon::create_core_with_runtime}; use std::{sync::Arc, time::Duration}; use tempfile::TempDir; +use super::client_pool::ClientPool; + pub struct Daemon { // Type and suffix of the daemon network pub network: NetworkId, @@ -77,6 +80,23 @@ impl Daemon { .await .unwrap() } + + pub async fn new_client_pool( + &self, + pool_size: usize, + distribution_channel_capacity: usize, + client_op: F, + ) -> ClientPool + where + F: Fn(Arc, T) -> R + Sync + Send + Copy + 'static, + R: Future + Send, + { + let mut clients = Vec::with_capacity(pool_size); + for _ in 0..pool_size { + clients.push(Arc::new(self.new_client().await)); + } + ClientPool::new(clients, distribution_channel_capacity, client_op) + } } impl Drop for Daemon { diff --git a/testing/integration/src/common/mod.rs b/testing/integration/src/common/mod.rs index 1ca870b397..095f189026 100644 --- a/testing/integration/src/common/mod.rs +++ b/testing/integration/src/common/mod.rs @@ -4,6 +4,7 @@ use std::{ path::Path, }; +pub mod client_pool; pub mod daemon; pub fn open_file(file_path: &Path) -> File { diff --git a/testing/integration/src/consensus_integration_tests.rs b/testing/integration/src/consensus_integration_tests.rs index 7dcf3fe84b..09776015e6 100644 --- a/testing/integration/src/consensus_integration_tests.rs +++ b/testing/integration/src/consensus_integration_tests.rs @@ -52,6 +52,7 @@ use kaspa_database::prelude::ConnBuilder; use kaspa_index_processor::service::IndexService; use kaspa_math::Uint256; use kaspa_muhash::MuHash; +use kaspa_txscript::caches::TxScriptCacheCounters; use kaspa_utxoindex::api::{UtxoIndexApi, UtxoIndexProxy}; use kaspa_utxoindex::UtxoIndex; use serde::{Deserialize, Serialize}; @@ -1689,8 +1690,10 @@ async fn staging_consensus_test() { let (notification_send, _notification_recv) = unbounded(); let notification_root = Arc::new(ConsensusNotificationRoot::new(notification_send)); let counters = Arc::new(ProcessingCounters::default()); + let tx_script_cache_counters = Arc::new(TxScriptCacheCounters::default()); - let consensus_factory = Arc::new(ConsensusFactory::new(meta_db, &config, consensus_db_dir, 4, notification_root, counters)); + let consensus_factory = + Arc::new(ConsensusFactory::new(meta_db, &config, consensus_db_dir, 4, notification_root, counters, tx_script_cache_counters)); let consensus_manager = Arc::new(ConsensusManager::new(consensus_factory)); let core = Arc::new(Core::new()); diff --git a/testing/integration/src/mempool_benchmarks.rs b/testing/integration/src/mempool_benchmarks.rs index a08e8b551d..8f9a9cd8dd 100644 --- a/testing/integration/src/mempool_benchmarks.rs +++ b/testing/integration/src/mempool_benchmarks.rs @@ -1,5 +1,6 @@ use crate::common::daemon::Daemon; use async_channel::Sender; +use futures_util::future::join_all; use itertools::Itertools; use kaspa_addresses::Address; use kaspa_consensus::params::Params; @@ -14,7 +15,7 @@ use kaspa_consensus_core::{ utxo_diff::UtxoDiff, }, }; -use kaspa_core::{debug, info, time::Stopwatch}; +use kaspa_core::{debug, info}; use kaspa_notify::{ listener::ListenerId, notifier::Notify, @@ -32,8 +33,11 @@ use std::{ cmp::max, collections::{hash_map::Entry::Occupied, HashMap, HashSet}, fmt::Debug, - sync::Arc, - time::Duration, + sync::{ + atomic::{AtomicBool, Ordering}, + Arc, + }, + time::{Duration, Instant}, }; use tokio::join; @@ -49,9 +53,8 @@ impl Notify for ChannelNotify { } } -const FEE_PER_MASS: u64 = 10; - fn required_fee(num_inputs: usize, num_outputs: u64) -> u64 { + const FEE_PER_MASS: u64 = 10; FEE_PER_MASS * estimated_mass(num_inputs, num_outputs) } @@ -59,8 +62,17 @@ fn estimated_mass(num_inputs: usize, num_outputs: u64) -> u64 { 200 + 34 * num_outputs + 1000 * (num_inputs as u64) } +const EXPAND_FACTOR: u64 = 1; +const CONTRACT_FACTOR: u64 = 1; + /// Builds a TX DAG based on the initial UTXO set and on constant params -fn generate_tx_dag(mut utxoset: UtxoCollection, schnorr_key: KeyPair, spk: ScriptPublicKey) -> Vec> { +fn generate_tx_dag( + mut utxoset: UtxoCollection, + schnorr_key: KeyPair, + spk: ScriptPublicKey, + target_levels: usize, + target_width: usize, +) -> Vec> { /* Algo: perform level by level: @@ -72,10 +84,8 @@ fn generate_tx_dag(mut utxoset: UtxoCollection, schnorr_key: KeyPair, spk: Scrip apply level utxo diff to the utxo collection */ - let target_levels = 1_000; - let target_width = 500; - let num_inputs = 2; - let num_outputs = 2; + let num_inputs = CONTRACT_FACTOR as usize; + let num_outputs = EXPAND_FACTOR; let mut txs = Vec::with_capacity(target_levels * target_width); @@ -107,7 +117,7 @@ fn generate_tx_dag(mut utxoset: UtxoCollection, schnorr_key: KeyPair, spk: Scrip utxoset.remove_collection(&utxo_diff.remove); utxoset.add_collection(&utxo_diff.add); - if i % 100 == 0 { + if i % (target_levels / 10).max(1) == 0 { info!("Generated {} txs", txs.len()); } } @@ -138,7 +148,22 @@ fn verify_tx_dag(initial_utxoset: &UtxoCollection, txs: &Vec>) #[ignore = "bmk"] async fn bench_bbt_latency() { kaspa_core::panic::configure_panic(); - kaspa_core::log::try_init_logger("info"); + kaspa_core::log::try_init_logger("info,kaspa_core::time=debug,kaspa_mining::monitor=debug"); + + // Constants + const BLOCK_COUNT: usize = usize::MAX; + + const MEMPOOL_TARGET: u64 = 600_000; + const TX_COUNT: usize = 1_400_000; + const TX_LEVEL_WIDTH: usize = 20_000; + const TPS_PRESSURE: u64 = u64::MAX; + + const SUBMIT_BLOCK_CLIENTS: usize = 20; + const SUBMIT_TX_CLIENTS: usize = 2; + + if TX_COUNT < TX_LEVEL_WIDTH { + panic!() + } /* Logic: @@ -166,24 +191,23 @@ async fn bench_bbt_latency() { let args = Args { simnet: true, enable_unsynced_mining: true, - num_prealloc_utxos: Some(1_000), + num_prealloc_utxos: Some(TX_LEVEL_WIDTH as u64 * CONTRACT_FACTOR), prealloc_address: Some(prealloc_address.to_string()), prealloc_amount: 500 * SOMPI_PER_KASPA, + block_template_cache_lifetime: Some(0), ..Default::default() }; let network = args.network(); let params: Params = network.into(); let utxoset = args.generate_prealloc_utxos(args.num_prealloc_utxos.unwrap()); - let txs = generate_tx_dag(utxoset.clone(), schnorr_key, spk); + let txs = generate_tx_dag(utxoset.clone(), schnorr_key, spk, TX_COUNT / TX_LEVEL_WIDTH, TX_LEVEL_WIDTH); verify_tx_dag(&utxoset, &txs); info!("Generated overall {} txs", txs.len()); let mut daemon = Daemon::new_random_with_args(args); let client = daemon.start().await; - // TODO: use only a single client once grpc server-side supports concurrent requests - let block_template_client = daemon.new_client().await; - let submit_block_client = daemon.new_client().await; + let bbt_client = daemon.new_client().await; // The time interval between Poisson(lambda) events distributes ~Exp(lambda) let dist: Exp = Exp::new(params.bps() as f64).unwrap(); @@ -195,31 +219,67 @@ async fn bench_bbt_latency() { Address::new(network.network_type().into(), kaspa_addresses::Version::PubKey, &pk.x_only_public_key().0.serialize()); debug!("Generated private key {} and address {}", sk.display_secret(), pay_address); - let current_template = Arc::new(Mutex::new(block_template_client.get_block_template(pay_address.clone(), vec![]).await.unwrap())); + let current_template = Arc::new(Mutex::new(bbt_client.get_block_template(pay_address.clone(), vec![]).await.unwrap())); let current_template_consume = current_template.clone(); + let executing = Arc::new(AtomicBool::new(true)); let (sender, receiver) = async_channel::unbounded(); - block_template_client.start(Some(Arc::new(ChannelNotify { sender }))).await; - block_template_client.start_notify(ListenerId::default(), Scope::NewBlockTemplate(NewBlockTemplateScope {})).await.unwrap(); + bbt_client.start(Some(Arc::new(ChannelNotify { sender }))).await; + bbt_client.start_notify(ListenerId::default(), Scope::NewBlockTemplate(NewBlockTemplateScope {})).await.unwrap(); + + let submit_block_pool = daemon + .new_client_pool(SUBMIT_BLOCK_CLIENTS, 100, |c, block| async move { + let _sw = kaspa_core::time::Stopwatch::<500>::with_threshold("sb"); + let response = c.submit_block(block, false).await.unwrap(); + assert_eq!(response.report, kaspa_rpc_core::SubmitBlockReport::Success); + false + }) + .await; - let cc = block_template_client.clone(); + let submit_tx_pool = daemon + .new_client_pool::<(usize, Arc), _, _>(SUBMIT_TX_CLIENTS, 100, |c, (i, tx)| async move { + match c.submit_transaction(tx.as_ref().into(), false).await { + Ok(_) => {} + Err(RpcError::General(msg)) if msg.contains("orphan") => { + kaspa_core::warn!("\n\n\n{msg}\n\n"); + kaspa_core::warn!("Submitted {} transactions, exiting tx submit loop", i); + return true; + } + Err(e) => panic!("{e}"), + } + false + }) + .await; + + let cc = bbt_client.clone(); + let exec = executing.clone(); + let notification_rx = receiver.clone(); + let pac = pay_address.clone(); let miner_receiver_task = tokio::spawn(async move { - while let Ok(notification) = receiver.recv().await { + while let Ok(notification) = notification_rx.recv().await { match notification { Notification::NewBlockTemplate(_) => { - while receiver.try_recv().is_ok() { + while notification_rx.try_recv().is_ok() { // Drain the channel } - let _sw = Stopwatch::<500>::with_threshold("get_block_template"); - *current_template.lock() = cc.get_block_template(pay_address.clone(), vec![]).await.unwrap(); + // let _sw = kaspa_core::time::Stopwatch::<500>::with_threshold("bbt"); + *current_template.lock() = cc.get_block_template(pac.clone(), vec![]).await.unwrap(); } _ => panic!(), } + if !exec.load(Ordering::Relaxed) { + kaspa_core::warn!("Test is over, stopping miner receiver loop"); + break; + } } + kaspa_core::warn!("Miner receiver loop task exited"); }); + let block_sender = submit_block_pool.sender(); + let exec = executing.clone(); + let cc = Arc::new(bbt_client.clone()); let miner_loop_task = tokio::spawn(async move { - for i in 0..10000 { + for i in 0..BLOCK_COUNT { // Simulate mining time let timeout = max((dist.sample(&mut thread_rng()) * 1000.0) as u64, 1); tokio::time::sleep(Duration::from_millis(timeout)).await; @@ -227,46 +287,103 @@ async fn bench_bbt_latency() { // Read the most up-to-date block template let mut block = current_template_consume.lock().block.clone(); // Use index as nonce to avoid duplicate blocks - block.header.nonce = i; + block.header.nonce = i as u64; - let mcc = submit_block_client.clone(); + let ctc = current_template_consume.clone(); + let ccc = cc.clone(); + let pac = pay_address.clone(); + tokio::spawn(async move { + // let _sw = kaspa_core::time::Stopwatch::<500>::with_threshold("bbt"); + // We used the current template so let's refetch a new template with new txs + *ctc.lock() = ccc.get_block_template(pac, vec![]).await.unwrap(); + }); + + let bs = block_sender.clone(); tokio::spawn(async move { // Simulate communication delay. TODO: consider adding gaussian noise tokio::time::sleep(Duration::from_millis(comm_delay)).await; - // let _sw = Stopwatch::<500>::with_threshold("submit_block"); - let response = mcc.submit_block(block, false).await.unwrap(); - assert_eq!(response.report, kaspa_rpc_core::SubmitBlockReport::Success); + let _ = bs.send(block).await; }); + if !exec.load(Ordering::Relaxed) { + kaspa_core::warn!("Test is over, stopping miner loop"); + break; + } } - block_template_client.disconnect().await.unwrap(); - submit_block_client.disconnect().await.unwrap(); + exec.store(false, Ordering::Relaxed); + bbt_client.stop_notify(ListenerId::default(), Scope::NewBlockTemplate(NewBlockTemplateScope {})).await.unwrap(); + bbt_client.disconnect().await.unwrap(); + kaspa_core::warn!("Miner loop task exited"); }); + let tx_sender = submit_tx_pool.sender(); + let exec = executing.clone(); let cc = client.clone(); + let mut tps_pressure = if MEMPOOL_TARGET < u64::MAX { u64::MAX } else { TPS_PRESSURE }; + let mut last_log_time = Instant::now() - Duration::from_secs(5); + let mut log_index = 0; let tx_sender_task = tokio::spawn(async move { - let total_txs = txs.len(); for (i, tx) in txs.into_iter().enumerate() { - let _sw = Stopwatch::<500>::with_threshold("submit_transaction"); - let res = cc.submit_transaction(tx.as_ref().into(), false).await; - match res { + if tps_pressure != u64::MAX { + tokio::time::sleep(std::time::Duration::from_secs_f64(1.0 / tps_pressure as f64)).await; + } + if last_log_time.elapsed() > Duration::from_millis(200) { + let mut mempool_size = cc.get_info().await.unwrap().mempool_size; + if log_index % 10 == 0 { + info!("Mempool size: {:#?}, txs submitted: {}", mempool_size, i); + } + log_index += 1; + last_log_time = Instant::now(); + + if mempool_size > (MEMPOOL_TARGET as f32 * 1.05) as u64 { + tps_pressure = TPS_PRESSURE; + while mempool_size > MEMPOOL_TARGET { + tokio::time::sleep(std::time::Duration::from_millis(200)).await; + mempool_size = cc.get_info().await.unwrap().mempool_size; + if log_index % 10 == 0 { + info!("Mempool size: {:#?}, txs submitted: {}", mempool_size, i); + } + log_index += 1; + } + } + } + match tx_sender.send((i, tx)).await { Ok(_) => {} - Err(RpcError::General(msg)) if msg.contains("orphan") => { - kaspa_core::error!("\n\n\n{msg}\n\n"); - kaspa_core::warn!("Submitted {} out of {}, exiting tx submit loop", i, total_txs); + Err(_) => { break; } - Err(e) => panic!("{e}"), } + if !exec.load(Ordering::Relaxed) { + break; + } + } + + kaspa_core::warn!("Tx sender task, waiting for mempool to drain.."); + loop { + if !exec.load(Ordering::Relaxed) { + break; + } + let mempool_size = cc.get_info().await.unwrap().mempool_size; + info!("Mempool size: {:#?}", mempool_size); + if mempool_size == 0 || (TX_COUNT as u64 > MEMPOOL_TARGET && mempool_size < MEMPOOL_TARGET) { + break; + } + tokio::time::sleep(std::time::Duration::from_secs(1)).await; } - kaspa_core::warn!("Tx submit task exited"); + exec.store(false, Ordering::Relaxed); + kaspa_core::warn!("Tx sender task exited"); }); let _ = join!(miner_receiver_task, miner_loop_task, tx_sender_task); + submit_block_pool.close(); + submit_tx_pool.close(); + + join_all(submit_block_pool.join_handles).await; + join_all(submit_tx_pool.join_handles).await; + // // Fold-up // - // tokio::time::sleep(std::time::Duration::from_secs(5)).await; client.disconnect().await.unwrap(); drop(client); daemon.shutdown(); diff --git a/utils/src/channel.rs b/utils/src/channel.rs index bf30a6f891..02c2089265 100644 --- a/utils/src/channel.rs +++ b/utils/src/channel.rs @@ -1,4 +1,4 @@ -use async_channel::{unbounded, Receiver, RecvError, SendError, Sender, TryRecvError, TrySendError}; +use async_channel::{bounded, unbounded, Receiver, RecvError, SendError, Sender, TryRecvError, TrySendError}; /// Multiple producers multiple consumers channel #[derive(Clone, Debug)] @@ -12,6 +12,11 @@ impl Channel { Self { sender: channel.0, receiver: channel.1 } } + pub fn bounded(capacity: usize) -> Channel { + let channel = bounded(capacity); + Self { sender: channel.0, receiver: channel.1 } + } + pub fn sender(&self) -> Sender { self.sender.clone() }