From 9e7ebfa15b3b2e3dd2aa3b552a6b83669fcd8800 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Tue, 23 Sep 2025 12:11:53 -0700 Subject: [PATCH 1/9] Implement TestChainstate Signed-off-by: Jacinta Ferrant --- stackslib/src/chainstate/mod.rs | 2 + .../src/chainstate/nakamoto/tests/node.rs | 149 +-- stackslib/src/chainstate/tests/mod.rs | 1044 +++++++++++++++++ 3 files changed, 1124 insertions(+), 71 deletions(-) create mode 100644 stackslib/src/chainstate/tests/mod.rs diff --git a/stackslib/src/chainstate/mod.rs b/stackslib/src/chainstate/mod.rs index 3887650d8e8..0d848acf634 100644 --- a/stackslib/src/chainstate/mod.rs +++ b/stackslib/src/chainstate/mod.rs @@ -24,3 +24,5 @@ pub mod burn; pub mod coordinator; pub mod nakamoto; pub mod stacks; +#[cfg(test)] +pub mod tests; diff --git a/stackslib/src/chainstate/nakamoto/tests/node.rs b/stackslib/src/chainstate/nakamoto/tests/node.rs index 2b657ccf27c..1c9a64b9596 100644 --- a/stackslib/src/chainstate/nakamoto/tests/node.rs +++ b/stackslib/src/chainstate/nakamoto/tests/node.rs @@ -696,6 +696,7 @@ impl TestStacksNode { mut after_block: G, malleablize: bool, mined_canonical: bool, + timestamp: Option, ) -> Result)>, ChainstateError> where S: FnMut(&mut NakamotoBlockBuilder), @@ -804,6 +805,10 @@ impl TestStacksNode { &coinbase.clone().unwrap(), ) }; + // Optionally overwrite the timestamp to enable predictable blocks. + if let Some(timestamp) = timestamp { + builder.header.timestamp = timestamp; + } miner_setup(&mut builder); tenure_change = None; @@ -1060,82 +1065,82 @@ impl TestStacksNode { } } -impl TestPeer<'_> { - /// Get the Nakamoto parent linkage data for building atop the last-produced tenure or - /// Stacks 2.x block. - /// Returns (last-tenure-id, epoch2-parent, nakamoto-parent-tenure, parent-sortition) - fn get_nakamoto_parent( - miner: &TestMiner, - stacks_node: &TestStacksNode, - sortdb: &SortitionDB, - ) -> ( - StacksBlockId, - Option, - Option>, - ) { - let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); - if let Some(parent_blocks) = stacks_node.get_last_nakamoto_tenure(miner) { - debug!("Parent will be a Nakamoto block"); - - // parent is an epoch 3 nakamoto block - let first_parent = parent_blocks.first().unwrap(); - debug!("First parent is {:?}", first_parent); +/// Get the Nakamoto parent linkage data for building atop the last-produced tenure or +/// Stacks 2.x block. +/// Returns (last-tenure-id, epoch2-parent, nakamoto-parent-tenure, parent-sortition) +pub fn get_nakamoto_parent( + miner: &TestMiner, + stacks_node: &TestStacksNode, + sortdb: &SortitionDB, +) -> ( + StacksBlockId, + Option, + Option>, +) { + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + if let Some(parent_blocks) = stacks_node.get_last_nakamoto_tenure(miner) { + debug!("Parent will be a Nakamoto block"); + + // parent is an epoch 3 nakamoto block + let first_parent = parent_blocks.first().unwrap(); + debug!("First parent is {:?}", first_parent); + + // sanity check -- this parent must correspond to a sortition + assert!( + SortitionDB::get_block_snapshot_consensus( + sortdb.conn(), + &first_parent.header.consensus_hash, + ) + .unwrap() + .unwrap() + .sortition + ); - // sanity check -- this parent must correspond to a sortition - assert!( - SortitionDB::get_block_snapshot_consensus( - sortdb.conn(), - &first_parent.header.consensus_hash, - ) - .unwrap() - .unwrap() - .sortition + let last_tenure_id = StacksBlockId::new( + &first_parent.header.consensus_hash, + &first_parent.header.block_hash(), + ); + (last_tenure_id, None, Some(parent_blocks)) + } else { + // parent may be an epoch 2.x block + let (parent_opt, parent_sortition_opt) = if let Some(parent_block) = + stacks_node.get_last_anchored_block(miner) + { + debug!("Parent will be a Stacks 2.x block"); + let ic = sortdb.index_conn(); + let sort_opt = SortitionDB::get_block_snapshot_for_winning_stacks_block( + &ic, + &tip.sortition_id, + &parent_block.block_hash(), + ) + .unwrap(); + if sort_opt.is_none() { + warn!("No parent sortition in epoch2: tip.sortition_id = {}, parent_block.block_hash() = {}", &tip.sortition_id, &parent_block.block_hash()); + } + (Some(parent_block), sort_opt) + } else { + warn!( + "No parent sortition in epoch2: tip.sortition_id = {}", + &tip.sortition_id ); + (None, None) + }; - let last_tenure_id = StacksBlockId::new( - &first_parent.header.consensus_hash, - &first_parent.header.block_hash(), - ); - (last_tenure_id, None, Some(parent_blocks)) + let last_tenure_id = if let Some(last_epoch2_block) = parent_opt.as_ref() { + let parent_sort = parent_sortition_opt.as_ref().unwrap(); + StacksBlockId::new( + &parent_sort.consensus_hash, + &last_epoch2_block.header.block_hash(), + ) } else { - // parent may be an epoch 2.x block - let (parent_opt, parent_sortition_opt) = if let Some(parent_block) = - stacks_node.get_last_anchored_block(miner) - { - debug!("Parent will be a Stacks 2.x block"); - let ic = sortdb.index_conn(); - let sort_opt = SortitionDB::get_block_snapshot_for_winning_stacks_block( - &ic, - &tip.sortition_id, - &parent_block.block_hash(), - ) - .unwrap(); - if sort_opt.is_none() { - warn!("No parent sortition in epoch2: tip.sortition_id = {}, parent_block.block_hash() = {}", &tip.sortition_id, &parent_block.block_hash()); - } - (Some(parent_block), sort_opt) - } else { - warn!( - "No parent sortition in epoch2: tip.sortition_id = {}", - &tip.sortition_id - ); - (None, None) - }; - - let last_tenure_id = if let Some(last_epoch2_block) = parent_opt.as_ref() { - let parent_sort = parent_sortition_opt.as_ref().unwrap(); - StacksBlockId::new( - &parent_sort.consensus_hash, - &last_epoch2_block.header.block_hash(), - ) - } else { - // must be a genesis block (testing only!) - StacksBlockId(BOOT_BLOCK_HASH.0) - }; - (last_tenure_id, parent_opt, None) - } + // must be a genesis block (testing only!) + StacksBlockId(BOOT_BLOCK_HASH.0) + }; + (last_tenure_id, parent_opt, None) } +} +impl TestPeer<'_> { /// Start the next Nakamoto tenure. /// This generates the VRF key and block-commit txs, as well as the TenureChange and /// leader key this commit references @@ -1161,7 +1166,7 @@ impl TestPeer<'_> { Some(nakamoto_parent_tenure.clone()), ) } else { - Self::get_nakamoto_parent(&self.miner, &stacks_node, &sortdb) + get_nakamoto_parent(&self.miner, &stacks_node, &sortdb) }; // find the VRF leader key register tx to use. @@ -1464,6 +1469,7 @@ impl TestPeer<'_> { after_block, peer.mine_malleablized_blocks, peer.nakamoto_parent_tenure_opt.is_none(), + None, )?; let just_blocks = blocks @@ -1552,6 +1558,7 @@ impl TestPeer<'_> { |_| true, self.mine_malleablized_blocks, self.nakamoto_parent_tenure_opt.is_none(), + None, ) .unwrap(); diff --git a/stackslib/src/chainstate/tests/mod.rs b/stackslib/src/chainstate/tests/mod.rs new file mode 100644 index 00000000000..ce38b60bad7 --- /dev/null +++ b/stackslib/src/chainstate/tests/mod.rs @@ -0,0 +1,1044 @@ +// Copyright (C) 2025 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . +use std::fs; + +use clarity::types::chainstate::{BlockHeaderHash, BurnchainHeaderHash, StacksBlockId}; +use clarity::vm::ast::parser::v1::CONTRACT_MAX_NAME_LENGTH; +use clarity::vm::costs::ExecutionCost; +use clarity::vm::database::STXBalance; +use clarity::vm::types::*; +use clarity::vm::ContractName; +use rand; +use rand::{thread_rng, Rng}; +use stacks_common::address::*; +use stacks_common::deps_common::bitcoin::network::serialize::BitcoinHash; +use stacks_common::types::StacksEpochId; +use stacks_common::util::hash::*; +use stacks_common::util::vrf::*; + +use self::nakamoto::test_signers::TestSigners; +use super::*; +use crate::burnchains::bitcoin::indexer::BitcoinIndexer; +use crate::burnchains::db::{BurnchainDB, BurnchainHeaderReader}; +use crate::burnchains::tests::*; +use crate::burnchains::*; +use crate::chainstate::burn::db::sortdb::*; +use crate::chainstate::burn::operations::*; +use crate::chainstate::burn::*; +use crate::chainstate::coordinator::tests::*; +use crate::chainstate::coordinator::*; +use crate::chainstate::nakamoto::coordinator::get_nakamoto_next_recipients; +use crate::chainstate::nakamoto::tests::get_account; +use crate::chainstate::nakamoto::tests::node::{get_nakamoto_parent, TestStacker}; +use crate::chainstate::nakamoto::{NakamotoBlock, NakamotoChainState, StacksDBIndexed}; +use crate::chainstate::stacks::address::PoxAddress; +use crate::chainstate::stacks::boot::test::get_parent_tip; +use crate::chainstate::stacks::db::{StacksChainState, *}; +use crate::chainstate::stacks::tests::*; +use crate::chainstate::stacks::{Error as ChainstateError, StacksMicroblockHeader, *}; +use crate::core::{EpochList, StacksEpoch, StacksEpochExtension, BOOT_BLOCK_HASH}; +use crate::net::test::{TestEventObserver, TestPeerConfig}; +use crate::util_lib::boot::{boot_code_test_addr, boot_code_tx_auth}; +use crate::util_lib::strings::*; + +// describes a chainstate's initial configuration +#[derive(Debug, Clone)] +pub struct TestChainstateConfig { + pub network_id: u32, + pub current_block: u64, + pub burnchain: Burnchain, + pub test_name: String, + pub initial_balances: Vec<(PrincipalData, u64)>, + pub initial_lockups: Vec, + pub spending_account: TestMiner, + pub setup_code: String, + pub epochs: Option, + pub test_stackers: Option>, + pub test_signers: Option, + /// aggregate public key to use + /// (NOTE: will be used post-Nakamoto) + pub aggregate_public_key: Option>, + pub txindex: bool, +} + +impl Default for TestChainstateConfig { + fn default() -> Self { + let chain_config = TestPeerConfig::default(); + Self::from(chain_config) + } +} +impl TestChainstateConfig { + pub fn new(test_name: &str) -> Self { + Self { + test_name: test_name.into(), + ..Self::default() + } + } +} +pub struct TestChainstate<'a> { + pub config: TestChainstateConfig, + pub sortdb: Option, + pub miner: TestMiner, + pub stacks_node: Option, + pub chainstate_path: String, + pub indexer: Option, + pub coord: ChainsCoordinator< + 'a, + TestEventObserver, + (), + OnChainRewardSetProvider<'a, TestEventObserver>, + (), + (), + BitcoinIndexer, + >, + pub nakamoto_parent_tenure_opt: Option>, + /// list of malleablized blocks produced when mining. + pub malleablized_blocks: Vec, + pub mine_malleablized_blocks: bool, +} + +impl From for TestChainstateConfig { + fn from(chain_config: TestPeerConfig) -> Self { + Self { + network_id: chain_config.network_id, + current_block: chain_config.current_block, + burnchain: chain_config.burnchain, + test_name: chain_config.test_name, + initial_balances: chain_config.initial_balances, + initial_lockups: chain_config.initial_lockups, + spending_account: chain_config.spending_account, + setup_code: chain_config.setup_code, + epochs: chain_config.epochs, + test_stackers: chain_config.test_stackers, + test_signers: chain_config.test_signers, + aggregate_public_key: chain_config.aggregate_public_key, + txindex: chain_config.txindex, + } + } +} +impl<'a> TestChainstate<'a> { + pub fn new(config: TestChainstateConfig) -> TestChainstate<'a> { + Self::new_with_observer(config, None) + } + + pub fn test_path(config: &TestChainstateConfig) -> String { + let random = thread_rng().gen::(); + let random_bytes = to_hex(&random.to_be_bytes()); + let cleaned_config_test_name = config.test_name.replace("::", "_"); + format!( + "/tmp/stacks-node-tests/units-test-consensus/{cleaned_config_test_name}-{random_bytes}" + ) + } + + pub fn make_test_path(config: &TestChainstateConfig) -> String { + let test_path = Self::test_path(config); + if fs::metadata(&test_path).is_ok() { + fs::remove_dir_all(&test_path).unwrap(); + }; + + fs::create_dir_all(&test_path).unwrap(); + test_path + } + + pub fn new_with_observer( + mut config: TestChainstateConfig, + observer: Option<&'a TestEventObserver>, + ) -> TestChainstate<'a> { + let test_path = Self::test_path(&config); + let chainstate_path = get_chainstate_path_str(&test_path); + let mut miner_factory = TestMinerFactory::new(); + miner_factory.chain_id = config.network_id; + let mut miner = miner_factory.next_miner( + config.burnchain.clone(), + 1, + 1, + AddressHashMode::SerializeP2PKH, + ); + // manually set fees + miner.test_with_tx_fees = false; + + config.burnchain.working_dir = get_burnchain(&test_path, None).working_dir; + + let epochs = config.epochs.clone().unwrap_or_else(|| { + StacksEpoch::unit_test_pre_2_05(config.burnchain.first_block_height) + }); + + let mut sortdb = SortitionDB::connect( + &config.burnchain.get_db_path(), + config.burnchain.first_block_height, + &config.burnchain.first_block_hash, + 0, + &epochs, + config.burnchain.pox_constants.clone(), + None, + true, + ) + .unwrap(); + + let first_burnchain_block_height = config.burnchain.first_block_height; + let first_burnchain_block_hash = config.burnchain.first_block_hash.clone(); + + let _burnchain_blocks_db = BurnchainDB::connect( + &config.burnchain.get_burnchaindb_path(), + &config.burnchain, + true, + ) + .unwrap(); + + let agg_pub_key_opt = config.aggregate_public_key.clone(); + + let conf = config.clone(); + let post_flight_callback = move |clarity_tx: &mut ClarityTx| { + let mut receipts = vec![]; + + if let Some(agg_pub_key) = agg_pub_key_opt { + debug!("Setting aggregate public key to {}", &to_hex(&agg_pub_key)); + NakamotoChainState::aggregate_public_key_bootcode(clarity_tx, agg_pub_key); + } else { + debug!("Not setting aggregate public key"); + } + // add test-specific boot code + if !conf.setup_code.is_empty() { + let receipt = clarity_tx.connection().as_transaction(|clarity| { + let boot_code_addr = boot_code_test_addr(); + let boot_code_account = StacksAccount { + principal: boot_code_addr.to_account_principal(), + nonce: 0, + stx_balance: STXBalance::zero(), + }; + + let boot_code_auth = boot_code_tx_auth(boot_code_addr.clone()); + + debug!( + "Instantiate test-specific boot code contract '{}.{}' ({} bytes)...", + &boot_code_addr.to_string(), + &conf.test_name, + conf.setup_code.len() + ); + + let smart_contract = TransactionPayload::SmartContract( + TransactionSmartContract { + name: ContractName::try_from( + conf.test_name + .replace("::", "-") + .chars() + .skip( + conf.test_name + .len() + .saturating_sub(CONTRACT_MAX_NAME_LENGTH), + ) + .collect::() + .trim_start_matches(|c: char| !c.is_alphabetic()) + .to_string(), + ) + .expect("FATAL: invalid boot-code contract name"), + code_body: StacksString::from_str(&conf.setup_code) + .expect("FATAL: invalid boot code body"), + }, + None, + ); + + let boot_code_smart_contract = StacksTransaction::new( + TransactionVersion::Testnet, + boot_code_auth, + smart_contract, + ); + StacksChainState::process_transaction_payload( + clarity, + &boot_code_smart_contract, + &boot_code_account, + None, + ) + .unwrap() + }); + receipts.push(receipt); + } + debug!("Bootup receipts: {receipts:?}"); + }; + + let mut boot_data = ChainStateBootData::new( + &config.burnchain, + config.initial_balances.clone(), + Some(Box::new(post_flight_callback)), + ); + + if !config.initial_lockups.is_empty() { + let lockups = config.initial_lockups.clone(); + boot_data.get_bulk_initial_lockups = + Some(Box::new(move || Box::new(lockups.into_iter()))); + } + + let (chainstate, _) = StacksChainState::open_and_exec( + false, + config.network_id, + &chainstate_path, + Some(&mut boot_data), + None, + ) + .unwrap(); + + let indexer = BitcoinIndexer::new_unit_test(&config.burnchain.working_dir); + let mut coord = ChainsCoordinator::test_new_full( + &config.burnchain, + config.network_id, + &test_path, + OnChainRewardSetProvider(observer), + observer, + indexer, + None, + config.txindex, + ); + coord.handle_new_burnchain_block().unwrap(); + + let mut stacks_node = TestStacksNode::from_chainstate(chainstate); + + { + // pre-populate burnchain, if running on bitcoin + let prev_snapshot = SortitionDB::get_first_block_snapshot(sortdb.conn()).unwrap(); + let mut fork = TestBurnchainFork::new( + prev_snapshot.block_height, + &prev_snapshot.burn_header_hash, + &prev_snapshot.index_root, + 0, + ); + for i in prev_snapshot.block_height..config.current_block { + let burn_block = { + let ic = sortdb.index_conn(); + let mut burn_block = fork.next_block(&ic); + stacks_node.add_key_register(&mut burn_block, &mut miner); + burn_block + }; + fork.append_block(burn_block); + + fork.mine_pending_blocks_pox(&mut sortdb, &config.burnchain, &mut coord); + } + } + + let indexer = BitcoinIndexer::new_unit_test(&config.burnchain.working_dir); + + TestChainstate { + config, + sortdb: Some(sortdb), + miner, + stacks_node: Some(stacks_node), + chainstate_path, + coord, + indexer: Some(indexer), + nakamoto_parent_tenure_opt: None, + malleablized_blocks: vec![], + mine_malleablized_blocks: true, + } + } + + pub fn next_burnchain_block( + &mut self, + blockstack_ops: Vec, + ) -> (u64, BurnchainHeaderHash, ConsensusHash) { + let x = self.inner_next_burnchain_block(blockstack_ops, true, true, true, false); + (x.0, x.1, x.2) + } + + pub fn set_ops_consensus_hash( + blockstack_ops: &mut [BlockstackOperationType], + ch: &ConsensusHash, + ) { + for op in blockstack_ops.iter_mut() { + if let BlockstackOperationType::LeaderKeyRegister(ref mut data) = op { + data.consensus_hash = (*ch).clone(); + } + } + } + + pub fn set_ops_burn_header_hash( + blockstack_ops: &mut [BlockstackOperationType], + bhh: &BurnchainHeaderHash, + ) { + for op in blockstack_ops.iter_mut() { + op.set_burn_header_hash(bhh.clone()); + } + } + + pub fn make_next_burnchain_block( + burnchain: &Burnchain, + tip_block_height: u64, + tip_block_hash: &BurnchainHeaderHash, + num_ops: u64, + ops_determine_block_header: bool, + ) -> BurnchainBlockHeader { + test_debug!( + "make_next_burnchain_block: tip_block_height={tip_block_height} tip_block_hash={tip_block_hash} num_ops={num_ops}" + ); + let indexer = BitcoinIndexer::new_unit_test(&burnchain.working_dir); + let parent_hdr = indexer + .read_burnchain_header(tip_block_height) + .unwrap() + .unwrap(); + + test_debug!("parent hdr ({tip_block_height}): {parent_hdr:?}"); + assert_eq!(&parent_hdr.block_hash, tip_block_hash); + + let now = BURNCHAIN_TEST_BLOCK_TIME; + let block_header_hash = BurnchainHeaderHash::from_bitcoin_hash( + &BitcoinIndexer::mock_bitcoin_header( + &parent_hdr.block_hash, + (now as u32) + + if ops_determine_block_header { + num_ops as u32 + } else { + 0 + }, + ) + .bitcoin_hash(), + ); + test_debug!( + "Block header hash at {} is {block_header_hash}", + tip_block_height + 1 + ); + + BurnchainBlockHeader { + block_height: tip_block_height + 1, + block_hash: block_header_hash.clone(), + parent_block_hash: parent_hdr.block_hash.clone(), + num_txs: num_ops, + timestamp: now, + } + } + + pub fn add_burnchain_block( + burnchain: &Burnchain, + block_header: &BurnchainBlockHeader, + blockstack_ops: Vec, + ) { + let mut burnchain_db = BurnchainDB::open(&burnchain.get_burnchaindb_path(), true).unwrap(); + + let mut indexer = BitcoinIndexer::new_unit_test(&burnchain.working_dir); + + test_debug!( + "Store header and block ops for {}-{} ({})", + &block_header.block_hash, + &block_header.parent_block_hash, + block_header.block_height + ); + indexer.raw_store_header(block_header.clone()).unwrap(); + burnchain_db + .raw_store_burnchain_block(burnchain, &indexer, block_header.clone(), blockstack_ops) + .unwrap(); + } + + /// Generate and commit the next burnchain block with the given block operations. + /// * if `set_consensus_hash` is true, then each op's consensus_hash field will be set to + /// that of the resulting block snapshot. + /// * if `set_burn_hash` is true, then each op's burnchain header hash field will be set to + /// that of the resulting block snapshot. + /// + /// Returns ( + /// burnchain tip block height, + /// burnchain tip block hash, + /// burnchain tip consensus hash, + /// Option + /// ) + fn inner_next_burnchain_block( + &mut self, + mut blockstack_ops: Vec, + set_consensus_hash: bool, + set_burn_hash: bool, + update_burnchain: bool, + ops_determine_block_header: bool, + ) -> ( + u64, + BurnchainHeaderHash, + ConsensusHash, + Option, + ) { + let sortdb = self.sortdb.take().unwrap(); + let (block_height, block_hash, epoch_id) = { + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + let epoch_id = SortitionDB::get_stacks_epoch(sortdb.conn(), tip.block_height + 1) + .unwrap() + .unwrap() + .epoch_id; + + if set_consensus_hash { + Self::set_ops_consensus_hash(&mut blockstack_ops, &tip.consensus_hash); + } + + let block_header = Self::make_next_burnchain_block( + &self.config.burnchain, + tip.block_height, + &tip.burn_header_hash, + blockstack_ops.len() as u64, + ops_determine_block_header, + ); + + if set_burn_hash { + Self::set_ops_burn_header_hash(&mut blockstack_ops, &block_header.block_hash); + } + + if update_burnchain { + Self::add_burnchain_block( + &self.config.burnchain, + &block_header, + blockstack_ops.clone(), + ); + } + (block_header.block_height, block_header.block_hash, epoch_id) + }; + + let missing_pox_anchor_block_hash_opt = if epoch_id < StacksEpochId::Epoch30 { + self.coord + .handle_new_burnchain_block() + .unwrap() + .into_missing_block_hash() + } else if self.coord.handle_new_nakamoto_burnchain_block().unwrap() { + None + } else { + Some(BlockHeaderHash([0x00; 32])) + }; + + let pox_id = { + let ic = sortdb.index_conn(); + let tip_sort_id = SortitionDB::get_canonical_sortition_tip(sortdb.conn()).unwrap(); + let sortdb_reader = SortitionHandleConn::open_reader(&ic, &tip_sort_id).unwrap(); + sortdb_reader.get_pox_id().unwrap() + }; + + test_debug!("\n\nafter burn block {block_hash:?}, tip PoX ID is {pox_id:?}\n\n"); + + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + self.sortdb = Some(sortdb); + ( + block_height, + block_hash, + tip.consensus_hash, + missing_pox_anchor_block_hash_opt, + ) + } + + /// Store the given epoch 2.x Stacks block and microblock to staging, and then try and + /// process them. + pub fn process_stacks_epoch_at_tip( + &mut self, + block: &StacksBlock, + microblocks: &[StacksMicroblock], + ) { + let sortdb = self.sortdb.take().unwrap(); + let mut node = self.stacks_node.take().unwrap(); + { + let ic = sortdb.index_conn(); + let tip = SortitionDB::get_canonical_burn_chain_tip(&ic).unwrap(); + node.chainstate + .preprocess_stacks_epoch(&ic, &tip, block, microblocks) + .unwrap(); + } + self.coord.handle_new_stacks_block().unwrap(); + + let pox_id = { + let ic = sortdb.index_conn(); + let tip_sort_id = SortitionDB::get_canonical_sortition_tip(sortdb.conn()).unwrap(); + let sortdb_reader = SortitionHandleConn::open_reader(&ic, &tip_sort_id).unwrap(); + sortdb_reader.get_pox_id().unwrap() + }; + test_debug!( + "\n\nafter stacks block {:?}, tip PoX ID is {pox_id:?}\n\n", + block.block_hash() + ); + + self.sortdb = Some(sortdb); + self.stacks_node = Some(node); + } + + pub fn sortdb(&mut self) -> &mut SortitionDB { + self.sortdb.as_mut().unwrap() + } + + pub fn sortdb_ref(&mut self) -> &SortitionDB { + self.sortdb.as_ref().unwrap() + } + + /// Make a tenure with the given transactions. Creates a coinbase tx with the given nonce, and then increments + /// the provided reference. + pub fn tenure_with_txs( + &mut self, + txs: &[StacksTransaction], + coinbase_nonce: &mut usize, + ) -> StacksBlockId { + let microblock_privkey = self.miner.next_microblock_privkey(); + let microblock_pubkeyhash = + Hash160::from_node_public_key(&StacksPublicKey::from_private(µblock_privkey)); + let tip = SortitionDB::get_canonical_burn_chain_tip(self.sortdb.as_ref().unwrap().conn()) + .unwrap(); + let burnchain = self.config.burnchain.clone(); + + let (burn_ops, stacks_block, microblocks) = self.make_tenure( + |ref mut miner, + ref mut sortdb, + ref mut chainstate, + vrf_proof, + ref parent_opt, + ref parent_microblock_header_opt| { + let parent_tip = get_parent_tip(parent_opt, chainstate, sortdb); + let coinbase_tx = make_coinbase(miner, *coinbase_nonce); + + let mut block_txs = vec![coinbase_tx]; + block_txs.extend_from_slice(txs); + + let block_builder = StacksBlockBuilder::make_regtest_block_builder( + &burnchain, + &parent_tip, + vrf_proof, + tip.total_burn, + µblock_pubkeyhash, + ) + .unwrap(); + let (anchored_block, _size, _cost) = + StacksBlockBuilder::make_anchored_block_from_txs( + block_builder, + chainstate, + &sortdb.index_handle(&tip.sortition_id), + block_txs, + ) + .unwrap(); + (anchored_block, vec![]) + }, + ); + + let (_, _, consensus_hash) = self.next_burnchain_block(burn_ops); + self.process_stacks_epoch_at_tip(&stacks_block, µblocks); + + *coinbase_nonce += 1; + + StacksBlockId::new(&consensus_hash, &stacks_block.block_hash()) + } + + /// Make a tenure, using `tenure_builder` to generate a Stacks block and a list of + /// microblocks. + pub fn make_tenure( + &mut self, + mut tenure_builder: F, + ) -> ( + Vec, + StacksBlock, + Vec, + ) + where + F: FnMut( + &mut TestMiner, + &mut SortitionDB, + &mut StacksChainState, + &VRFProof, + Option<&StacksBlock>, + Option<&StacksMicroblockHeader>, + ) -> (StacksBlock, Vec), + { + let mut sortdb = self.sortdb.take().unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + + let mut burn_block = TestBurnchainBlock::new(&tip, 0); + let mut stacks_node = self.stacks_node.take().unwrap(); + + let parent_block_opt = stacks_node.get_last_anchored_block(&self.miner); + let parent_sortition_opt = parent_block_opt.as_ref().and_then(|parent_block| { + let ic = sortdb.index_conn(); + SortitionDB::get_block_snapshot_for_winning_stacks_block( + &ic, + &tip.sortition_id, + &parent_block.block_hash(), + ) + .unwrap() + }); + + let parent_microblock_header_opt = + get_last_microblock_header(&stacks_node, &self.miner, parent_block_opt.as_ref()); + let last_key = stacks_node.get_last_key(&self.miner); + + let network_id = self.config.network_id; + let chainstate_path = self.chainstate_path.clone(); + let burn_block_height = burn_block.block_height; + + let proof = self + .miner + .make_proof( + &last_key.public_key, + &burn_block.parent_snapshot.sortition_hash, + ) + .unwrap_or_else(|| panic!("FATAL: no private key for {:?}", last_key.public_key)); + + let (stacks_block, microblocks) = tenure_builder( + &mut self.miner, + &mut sortdb, + &mut stacks_node.chainstate, + &proof, + parent_block_opt.as_ref(), + parent_microblock_header_opt.as_ref(), + ); + + let mut block_commit_op = stacks_node.make_tenure_commitment( + &sortdb, + &mut burn_block, + &mut self.miner, + &stacks_block, + microblocks.clone(), + 1000, + &last_key, + parent_sortition_opt.as_ref(), + ); + + // patch up block-commit -- these blocks all mine off of genesis + if stacks_block.header.parent_block == BlockHeaderHash([0u8; 32]) { + block_commit_op.parent_block_ptr = 0; + block_commit_op.parent_vtxindex = 0; + } + + let leader_key_op = stacks_node.add_key_register(&mut burn_block, &mut self.miner); + + // patch in reward set info + let recipients = get_next_recipients( + &tip, + &mut stacks_node.chainstate, + &mut sortdb, + &self.config.burnchain, + &OnChainRewardSetProvider::new(), + ) + .unwrap_or_else(|e| panic!("Failure fetching recipient set: {e:?}")); + block_commit_op.commit_outs = match recipients { + Some(info) => { + let mut recipients = info + .recipients + .into_iter() + .map(|x| x.0) + .collect::>(); + if recipients.len() == 1 { + recipients.push(PoxAddress::standard_burn_address(false)); + } + recipients + } + None => { + if self + .config + .burnchain + .is_in_prepare_phase(burn_block.block_height) + { + vec![PoxAddress::standard_burn_address(false)] + } else { + vec![ + PoxAddress::standard_burn_address(false), + PoxAddress::standard_burn_address(false), + ] + } + } + }; + test_debug!( + "Block commit at height {} has {} recipients: {:?}", + block_commit_op.block_height, + block_commit_op.commit_outs.len(), + &block_commit_op.commit_outs + ); + + self.stacks_node = Some(stacks_node); + self.sortdb = Some(sortdb); + ( + vec![ + BlockstackOperationType::LeaderKeyRegister(leader_key_op), + BlockstackOperationType::LeaderBlockCommit(block_commit_op), + ], + stacks_block, + microblocks, + ) + } + + pub fn get_burn_block_height(&self) -> u64 { + SortitionDB::get_canonical_burn_chain_tip( + self.sortdb.as_ref().expect("Failed to get sortdb").conn(), + ) + .expect("Failed to get canonical burn chain tip") + .block_height + } + + pub fn get_reward_cycle(&self) -> u64 { + let block_height = self.get_burn_block_height(); + self.config + .burnchain + .block_height_to_reward_cycle(block_height) + .unwrap_or_else(|| panic!("Failed to get reward cycle for block height {block_height}")) + } + + /// Start the next Nakamoto tenure. + /// This generates the VRF key and block-commit txs, as well as the TenureChange and + /// leader key this commit references + pub fn begin_nakamoto_tenure( + &mut self, + tenure_change_cause: TenureChangeCause, + ) -> ( + Vec, + TenureChangePayload, + LeaderKeyRegisterOp, + ) { + let mut sortdb = self.sortdb.take().unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + + let mut burn_block = TestBurnchainBlock::new(&tip, 0); + let mut stacks_node = self.stacks_node.take().unwrap(); + + let (last_tenure_id, parent_block_opt, parent_tenure_opt) = + if let Some(nakamoto_parent_tenure) = self.nakamoto_parent_tenure_opt.as_ref() { + ( + nakamoto_parent_tenure.first().as_ref().unwrap().block_id(), + None, + Some(nakamoto_parent_tenure.clone()), + ) + } else { + get_nakamoto_parent(&self.miner, &stacks_node, &sortdb) + }; + + // find the VRF leader key register tx to use. + // it's the one pointed to by the parent tenure + let parent_consensus_hash_and_tenure_start_id_opt = + if let Some(parent_tenure) = parent_tenure_opt.as_ref() { + let tenure_start_block = parent_tenure.first().unwrap(); + Some(( + tenure_start_block.header.consensus_hash.clone(), + tenure_start_block.block_id(), + )) + } else if let Some(parent_block) = parent_block_opt.as_ref() { + let parent_header_info = + StacksChainState::get_stacks_block_header_info_by_index_block_hash( + stacks_node.chainstate.db(), + &last_tenure_id, + ) + .unwrap() + .unwrap(); + Some(( + parent_header_info.consensus_hash.clone(), + parent_header_info.index_block_hash(), + )) + } else { + None + }; + + let (ch, parent_tenure_start_block_id) = parent_consensus_hash_and_tenure_start_id_opt + .clone() + .expect("No leader key"); + // it's possible that the parent was a shadow block. + // if so, find the highest non-shadow ancestor's block-commit, so we can + let mut cursor = ch; + let (tenure_sn, tenure_block_commit) = loop { + let tenure_sn = SortitionDB::get_block_snapshot_consensus(sortdb.conn(), &cursor) + .unwrap() + .unwrap(); + + let Some(tenure_block_commit) = get_block_commit_by_txid( + sortdb.conn(), + &tenure_sn.sortition_id, + &tenure_sn.winning_block_txid, + ) + .unwrap() else { + // parent must be a shadow block + let header = NakamotoChainState::get_block_header_nakamoto( + stacks_node.chainstate.db(), + &parent_tenure_start_block_id, + ) + .unwrap() + .unwrap() + .anchored_header + .as_stacks_nakamoto() + .cloned() + .unwrap(); + + assert!(header.is_shadow_block(), "Parent tenure start block ID {parent_tenure_start_block_id} has no block-commit and is not a shadow block"); + + cursor = stacks_node + .chainstate + .index_conn() + .get_parent_tenure_consensus_hash(&parent_tenure_start_block_id, &cursor) + .unwrap() + .unwrap(); + + continue; + }; + break (tenure_sn, tenure_block_commit); + }; + + let last_key = SortitionDB::get_leader_key_at( + &sortdb.index_conn(), + tenure_block_commit.key_block_ptr.into(), + tenure_block_commit.key_vtxindex.into(), + &tenure_sn.sortition_id, + ) + .unwrap() + .unwrap(); + + let network_id = self.config.network_id; + let chainstate_path = self.chainstate_path.clone(); + let burn_block_height = burn_block.block_height; + + let (mut block_commit_op, tenure_change_payload) = stacks_node.begin_nakamoto_tenure( + &sortdb, + &mut self.miner, + &mut burn_block, + &last_key, + parent_block_opt.as_ref(), + parent_tenure_opt.as_deref(), + 1000, + tenure_change_cause, + ); + + // patch up block-commit -- these blocks all mine off of genesis + if last_tenure_id == StacksBlockId(BOOT_BLOCK_HASH.0) { + block_commit_op.parent_block_ptr = 0; + block_commit_op.parent_vtxindex = 0; + } + + let mut burn_ops = vec![]; + if self.miner.last_VRF_public_key().is_none() { + let leader_key_op = stacks_node.add_key_register(&mut burn_block, &mut self.miner); + burn_ops.push(BlockstackOperationType::LeaderKeyRegister(leader_key_op)); + } + + // patch in reward set info + let recipients = get_nakamoto_next_recipients( + &tip, + &mut sortdb, + &mut stacks_node.chainstate, + &tenure_change_payload.previous_tenure_end, + &self.config.burnchain, + ) + .unwrap_or_else(|e| panic!("Failure fetching recipient set: {e:?}")); + block_commit_op.commit_outs = match recipients { + Some(info) => { + let mut recipients = info + .recipients + .into_iter() + .map(|x| x.0) + .collect::>(); + if recipients.len() == 1 { + recipients.push(PoxAddress::standard_burn_address(false)); + } + recipients + } + None => { + if self + .config + .burnchain + .is_in_prepare_phase(burn_block.block_height) + { + vec![PoxAddress::standard_burn_address(false)] + } else { + vec![ + PoxAddress::standard_burn_address(false), + PoxAddress::standard_burn_address(false), + ] + } + } + }; + test_debug!( + "Block commit at height {} has {} recipients: {:?}", + block_commit_op.block_height, + block_commit_op.commit_outs.len(), + &block_commit_op.commit_outs + ); + + burn_ops.push(BlockstackOperationType::LeaderBlockCommit(block_commit_op)); + + // prepare to mine + let miner_addr = self.miner.origin_address().unwrap(); + let miner_account = get_account(&mut stacks_node.chainstate, &sortdb, &miner_addr); + self.miner.set_nonce(miner_account.nonce); + + self.stacks_node = Some(stacks_node); + self.sortdb = Some(sortdb); + (burn_ops, tenure_change_payload, last_key) + } + + /// Make the VRF proof for this tenure. + /// Call after processing the block-commit + pub fn make_nakamoto_vrf_proof(&mut self, miner_key: LeaderKeyRegisterOp) -> VRFProof { + let sortdb = self.sortdb.take().unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + let proof = self + .miner + .make_proof(&miner_key.public_key, &tip.sortition_hash) + .unwrap_or_else(|| panic!("FATAL: no private key for {:?}", miner_key.public_key)); + self.sortdb = Some(sortdb); + debug!( + "VRF proof made from {:?} over {}: {proof:?}", + miner_key.public_key, &tip.sortition_hash + ); + proof + } + + /// Produce and process a Nakamoto tenure, after processing the block-commit from + /// begin_nakamoto_tenure(). You'd process the burnchain ops from begin_nakamoto_tenure(), + /// take the consensus hash, and feed it in here. + /// + /// Returns the blocks, their sizes, and runtime costs + pub fn make_nakamoto_tenure( + &mut self, + tenure_change: StacksTransaction, + coinbase: StacksTransaction, + timestamp: Option, + ) -> Result, ChainstateError> { + let cycle = self.get_reward_cycle(); + let mut signers = self.config.test_signers.clone().unwrap_or_default(); + signers.generate_aggregate_key(cycle); + + let mut sortdb = self.sortdb.take().unwrap(); + let mut stacks_node = self.stacks_node.take().unwrap(); + let blocks = TestStacksNode::make_nakamoto_tenure_blocks( + &mut stacks_node.chainstate, + &mut sortdb, + &mut self.miner, + &mut signers, + &tenure_change + .try_as_tenure_change() + .unwrap() + .tenure_consensus_hash + .clone(), + Some(tenure_change), + Some(coinbase), + &mut self.coord, + |_| {}, + |_, _, _, _| vec![], + |_| true, + self.mine_malleablized_blocks, + self.nakamoto_parent_tenure_opt.is_none(), + timestamp, + )?; + + let just_blocks = blocks + .clone() + .into_iter() + .map(|(block, _, _, _)| block) + .collect(); + + stacks_node.add_nakamoto_tenure_blocks(just_blocks); + + let mut malleablized_blocks: Vec = blocks + .clone() + .into_iter() + .flat_map(|(_, _, _, malleablized)| malleablized) + .collect(); + + self.malleablized_blocks.append(&mut malleablized_blocks); + + let block_data = blocks + .into_iter() + .map(|(blk, sz, cost, _)| (blk, sz, cost)) + .collect(); + + self.sortdb = Some(sortdb); + self.stacks_node = Some(stacks_node); + Ok(block_data) + } +} From 2376c98684e53a4ab0f487a83a88c0c2332e24cd Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Tue, 23 Sep 2025 12:45:59 -0700 Subject: [PATCH 2/9] Add ConsensusTest boiler plate for appending a block Signed-off-by: Jacinta Ferrant --- stackslib/src/chainstate/tests/consensus.rs | 335 ++++++++++++++++++++ stackslib/src/chainstate/tests/mod.rs | 2 + stackslib/src/net/tests/mod.rs | 218 +++++++++++++ 3 files changed, 555 insertions(+) create mode 100644 stackslib/src/chainstate/tests/consensus.rs diff --git a/stackslib/src/chainstate/tests/consensus.rs b/stackslib/src/chainstate/tests/consensus.rs new file mode 100644 index 00000000000..996078e9a62 --- /dev/null +++ b/stackslib/src/chainstate/tests/consensus.rs @@ -0,0 +1,335 @@ +// Copyright (C) 2025 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . +use std::collections::HashMap; + +use clarity::codec::StacksMessageCodec; +use clarity::types::chainstate::{StacksAddress, StacksPrivateKey, TrieHash}; +use clarity::types::{Address, StacksEpochId}; +use clarity::util::hash::{MerkleTree, Sha512Trunc256Sum}; +use clarity::util::secp256k1::MessageSignature; +use clarity::vm::costs::ExecutionCost; +use serde::{Deserialize, Serialize}; +use stacks_common::bitvec::BitVec; + +use crate::burnchains::PoxConstants; +use crate::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader, NakamotoChainState}; +use crate::chainstate::stacks::boot::RewardSet; +use crate::chainstate::stacks::{ + StacksTransaction, TenureChangeCause, TransactionAuth, TransactionPayload, TransactionVersion, +}; +use crate::chainstate::tests::TestChainstate; +use crate::net::tests::NakamotoBootPlan; + +pub struct ConsensusTest<'a> { + pub chain: TestChainstate<'a>, + pub test_vector: ConsensusTestVector, +} + +impl ConsensusTest<'_> { + pub fn new(test_name: &str, test_vector: ConsensusTestVector) -> Self { + let privk = StacksPrivateKey::from_hex( + "510f96a8efd0b11e211733c1ac5e3fa6f3d3fcdd62869e376c47decb3e14fea101", + ) + .unwrap(); + + let initial_balances = test_vector + .initial_balances + .iter() + .map(|(addr, amount)| (StacksAddress::from_string(addr).unwrap().into(), *amount)) + .collect(); + let epoch_id = StacksEpochId::try_from(test_vector.epoch_id).unwrap(); + let chain = match epoch_id { + StacksEpochId::Epoch30 + | StacksEpochId::Epoch31 + | StacksEpochId::Epoch32 + | StacksEpochId::Epoch33 => { + let mut chain = NakamotoBootPlan::new(test_name) + .with_pox_constants(10, 3) + .with_initial_balances(initial_balances) + .with_private_key(privk) + .boot_nakamoto_chainstate(None); + let (burn_ops, mut tenure_change, miner_key) = + chain.begin_nakamoto_tenure(TenureChangeCause::BlockFound); + let (_, header_hash, consensus_hash) = chain.next_burnchain_block(burn_ops); + let vrf_proof = chain.make_nakamoto_vrf_proof(miner_key); + + tenure_change.tenure_consensus_hash = consensus_hash.clone(); + tenure_change.burn_view_consensus_hash = consensus_hash.clone(); + let tenure_change_tx = chain.miner.make_nakamoto_tenure_change(tenure_change); + let coinbase_tx = chain.miner.make_nakamoto_coinbase(None, vrf_proof); + + let blocks_and_sizes = + chain.make_nakamoto_tenure(tenure_change_tx, coinbase_tx, Some(0)); + chain + } + StacksEpochId::Epoch10 + | StacksEpochId::Epoch20 + | StacksEpochId::Epoch2_05 + | StacksEpochId::Epoch21 + | StacksEpochId::Epoch22 + | StacksEpochId::Epoch23 + | StacksEpochId::Epoch24 + | StacksEpochId::Epoch25 => { + unimplemented!("Not bothering with pre nakamoto tests."); + } + }; + Self { chain, test_vector } + } + + /// Run a single test vector, validating consensus. + pub fn run(mut self) { + debug!("--------- Running test vector ---------"); + let txs: Vec<_> = self + .test_vector + .payloads + .iter() + .map(|payload_str| { + let payload: TransactionPayload = serde_json::from_str(payload_str).unwrap(); + StacksTransaction::new( + TransactionVersion::Testnet, + TransactionAuth::from_p2pkh(&StacksPrivateKey::random()).unwrap(), + payload, + ) + }) + .collect(); + + let expected_state_index_root = + TrieHash::from_hex(&self.test_vector.expected_state_index_root).unwrap(); + + let (block, block_size) = self.construct_nakamoto_block(txs, expected_state_index_root); + let test_vector = self.test_vector.clone(); + + let mut stacks_node = self.chain.stacks_node.take().unwrap(); + let sortdb = self.chain.sortdb.take().unwrap(); + let chain_tip = + NakamotoChainState::get_canonical_block_header(stacks_node.chainstate.db(), &sortdb) + .unwrap() + .unwrap(); + let pox_constants = PoxConstants::test_default(); + + let (mut chainstate_tx, clarity_instance) = + stacks_node.chainstate.chainstate_tx_begin().unwrap(); + + let mut burndb_conn = sortdb.index_handle_at_tip(); + + debug!("--------- Appending block {} ---------", block.header.signer_signature_hash(); "block" => ?block); + let result = NakamotoChainState::append_block( + &mut chainstate_tx, + clarity_instance, + &mut burndb_conn, + &chain_tip.consensus_hash, + &pox_constants, + &chain_tip, + &chain_tip.burn_header_hash, + chain_tip.burn_header_height, + chain_tip.burn_header_timestamp, + &block, + block_size.try_into().unwrap(), + block.header.burn_spent, + 1500, + &RewardSet::empty(), + false, + ); + + let mut mismatches = Vec::new(); + + match (&result, &test_vector.expected_result) { + (Ok((epoch_receipt, _, _, tx_events)), ExpectedResult::Success(expected_outputs)) => { + debug!("--------- Appended Block ---------"; + "epoch_receipt" => ?epoch_receipt, + "tx_events" => ?tx_events + ); + + let actual_results = ExpectedOutputs { + transaction_return_types: epoch_receipt + .tx_receipts + .iter() + .map(|r| serde_json::to_string(&r.result).unwrap()) + .collect(), + transaction_costs: epoch_receipt + .tx_receipts + .iter() + .map(|r| r.execution_cost.clone()) + .collect(), + total_block_cost: epoch_receipt.anchored_block_cost.clone(), + marf_hash: epoch_receipt.header.index_root.to_hex(), + }; + + if actual_results != *expected_outputs { + if actual_results.transaction_return_types + != expected_outputs.transaction_return_types + { + mismatches.push(format!( + "Tx return types mismatch: actual {:?}, expected {:?}", + actual_results.transaction_return_types, + expected_outputs.transaction_return_types + )); + } + if actual_results.transaction_costs != expected_outputs.transaction_costs { + mismatches.push(format!( + "Tx costs mismatch: actual {:?}, expected {:?}", + actual_results.transaction_costs, expected_outputs.transaction_costs + )); + } + if actual_results.total_block_cost != expected_outputs.total_block_cost { + mismatches.push(format!( + "Total block cost mismatch: actual {:?}, expected {:?}", + actual_results.total_block_cost, expected_outputs.total_block_cost + )); + } + if actual_results.marf_hash != expected_outputs.marf_hash { + mismatches.push(format!( + "MARF hash mismatch: actual {}, expected {}", + actual_results.marf_hash, expected_outputs.marf_hash + )); + } + } + } + (Ok(_), ExpectedResult::Failure(_)) => { + mismatches.push("Expected failure but got success".to_string()); + } + (Err(e), ExpectedResult::Failure(expected_err)) => { + debug!("--------- Block Errored: {e} ---------"); + let actual_err = e.to_string(); + if !actual_err.contains(expected_err) { + mismatches.push(format!( + "Error mismatch: actual '{actual_err}', expected contains '{expected_err}'" + )); + } + } + (Err(_), ExpectedResult::Success(_)) => { + mismatches.push("Expected success but got failure".to_string()); + } + } + assert!(mismatches.is_empty(), "Mismatches: {mismatches:?}"); + } + + /// Construct a NakamotoBlock from the test vector. + fn construct_nakamoto_block( + &self, + txs: Vec, + state_index_root: TrieHash, + ) -> (NakamotoBlock, usize) { + let chain_tip = NakamotoChainState::get_canonical_block_header( + self.chain.stacks_node.as_ref().unwrap().chainstate.db(), + self.chain.sortdb.as_ref().unwrap(), + ) + .unwrap() + .unwrap(); + let mut block = NakamotoBlock { + header: NakamotoBlockHeader { + version: 1, + chain_length: chain_tip.stacks_block_height + 1, + burn_spent: 17000, + consensus_hash: chain_tip.consensus_hash.clone(), + parent_block_id: chain_tip.index_block_hash(), + tx_merkle_root: Sha512Trunc256Sum::from_data(&[]), + state_index_root, + timestamp: 1, + miner_signature: MessageSignature::empty(), + signer_signature: vec![], + pox_treatment: BitVec::ones(1).unwrap(), + }, + txs, + }; + + let tx_merkle_root = { + let txid_vecs: Vec<_> = block + .txs + .iter() + .map(|tx| tx.txid().as_bytes().to_vec()) + .collect(); + + MerkleTree::::new(&txid_vecs).root() + }; + block.header.tx_merkle_root = tx_merkle_root; + self.chain.miner.sign_nakamoto_block(&mut block); + let mut signers = self.chain.config.test_signers.clone().unwrap_or_default(); + signers.sign_nakamoto_block(&mut block, self.chain.get_reward_cycle()); + let block_len = block.serialize_to_vec().len(); + + (block, block_len) + } +} + +/// Test vector struct for `append_block` consensus testing. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct ConsensusTestVector { + /// A hex stacks address and amount pair for populating initial balances + pub initial_balances: HashMap, + /// Desired epoch of chainstate + pub epoch_id: u32, + /// Transaction payloads to stuff into the block + pub payloads: Vec, + /// Expected state root trie hash + pub expected_state_index_root: String, + /// Expected result: success with outputs or failure with error + pub expected_result: ExpectedResult, +} + +/// Enum representing expected result: success with outputs or failure with error +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum ExpectedResult { + Success(ExpectedOutputs), + // TODO: should match maybe on actual Error type? + Failure(String), +} + +/// Expected outputs for a successful block append +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct ExpectedOutputs { + pub transaction_return_types: Vec, + pub transaction_costs: Vec, + pub total_block_cost: ExecutionCost, + pub marf_hash: String, +} + +fn default_test_vector() -> ConsensusTestVector { + let outputs = ExpectedOutputs { + transaction_return_types: vec![], + transaction_costs: vec![], + total_block_cost: ExecutionCost::ZERO, + marf_hash: "f86c9ceaf2a17a4d9e502af73b6f00f89c18e5b58be501b3840f707f7b372dea".into(), + }; + ConsensusTestVector { + initial_balances: HashMap::new(), + expected_state_index_root: + "6fe3e70b95f5f56c9c7c2c59ba8fc9c19cdfede25d2dcd4d120438bc27dfa88b".into(), + epoch_id: StacksEpochId::Epoch30 as u32, + payloads: vec![], + expected_result: ExpectedResult::Success(outputs), + } +} + +fn failing_test_vector() -> ConsensusTestVector { + ConsensusTestVector { + initial_balances: HashMap::new(), + expected_state_index_root: + "0000000000000000000000000000000000000000000000000000000000000000".into(), + epoch_id: StacksEpochId::Epoch30 as u32, + payloads: vec![], + expected_result: ExpectedResult::Failure("state root mismatch".to_string()), + } +} + +#[test] +fn test_append_empty_block() { + ConsensusTest::new(function_name!(), default_test_vector()).run() +} + +#[test] +fn test_append_state_index_root_mismatch() { + ConsensusTest::new(function_name!(), failing_test_vector()).run() +} diff --git a/stackslib/src/chainstate/tests/mod.rs b/stackslib/src/chainstate/tests/mod.rs index ce38b60bad7..c16c1201cfe 100644 --- a/stackslib/src/chainstate/tests/mod.rs +++ b/stackslib/src/chainstate/tests/mod.rs @@ -12,6 +12,8 @@ // // You should have received a copy of the GNU General Public License // along with this program. If not, see . +pub mod consensus; + use std::fs; use clarity::types::chainstate::{BlockHeaderHash, BurnchainHeaderHash, StacksBlockId}; diff --git a/stackslib/src/net/tests/mod.rs b/stackslib/src/net/tests/mod.rs index 7762c4b68b6..2857fba8f7f 100644 --- a/stackslib/src/net/tests/mod.rs +++ b/stackslib/src/net/tests/mod.rs @@ -61,6 +61,7 @@ use crate::chainstate::stacks::{ TokenTransferMemo, TransactionAnchorMode, TransactionAuth, TransactionContractCall, TransactionPayload, TransactionVersion, }; +use crate::chainstate::tests::{TestChainstate, TestChainstateConfig}; use crate::clarity::vm::types::StacksAddressExtensions; use crate::core::{StacksEpoch, StacksEpochExtension}; use crate::net::relay::Relayer; @@ -348,6 +349,223 @@ impl NakamotoBootPlan { } } + /// Make a chsintate and transition it into the Nakamoto epoch. + /// The node needs to be stacking; otherwise, Nakamoto won't activate. + pub fn boot_nakamoto_chainstate( + mut self, + observer: Option<&TestEventObserver>, + ) -> TestChainstate<'_> { + let mut chainstate_config = TestChainstateConfig::new(&self.test_name); + chainstate_config.txindex = self.txindex; + chainstate_config.network_id = self.network_id; + + let addr = StacksAddress::from_public_keys( + C32_ADDRESS_VERSION_TESTNET_SINGLESIG, + &AddressHashMode::SerializeP2PKH, + 1, + &vec![StacksPublicKey::from_private(&self.private_key)], + ) + .unwrap(); + + // reward cycles are 5 blocks long + // first 25 blocks are boot-up + // reward cycle 6 instantiates pox-3 + // we stack in reward cycle 7 so pox-3 is evaluated to find reward set participation + chainstate_config.epochs = Some(StacksEpoch::unit_test_3_0_only( + (self.pox_constants.pox_4_activation_height + + self.pox_constants.reward_cycle_length + + 1) + .into(), + )); + chainstate_config.initial_balances = vec![]; + if self.add_default_balance { + chainstate_config + .initial_balances + .push((addr.to_account_principal(), 1_000_000_000_000_000_000)); + } + chainstate_config + .initial_balances + .append(&mut self.initial_balances.clone()); + + // Create some balances for test Stackers + // They need their stacking amount + enough to pay fees + let fee_payment_balance = 10_000; + let stacker_balances = self.test_stackers.iter().map(|test_stacker| { + ( + PrincipalData::from(key_to_stacks_addr(&test_stacker.stacker_private_key)), + u64::try_from(test_stacker.amount).expect("Stacking amount too large"), + ) + }); + let signer_balances = self.test_stackers.iter().map(|test_stacker| { + ( + PrincipalData::from(key_to_stacks_addr(&test_stacker.signer_private_key)), + fee_payment_balance, + ) + }); + + chainstate_config.initial_balances.extend(stacker_balances); + chainstate_config.initial_balances.extend(signer_balances); + chainstate_config.test_signers = Some(self.test_signers.clone()); + chainstate_config.test_stackers = Some(self.test_stackers.clone()); + chainstate_config.burnchain.pox_constants = self.pox_constants.clone(); + let mut chain = TestChainstate::new_with_observer(chainstate_config.clone(), observer); + + chain.mine_malleablized_blocks = self.malleablized_blocks; + + self.advance_to_nakamoto_chainstate(&mut chain); + chain + } + + /// Bring a TestPeer into the Nakamoto Epoch + fn advance_to_nakamoto_chainstate(&mut self, chain: &mut TestChainstate) { + let mut chain_nonce = 0; + let addr = StacksAddress::p2pkh(false, &StacksPublicKey::from_private(&self.private_key)); + let default_pox_addr = + PoxAddress::from_legacy(AddressHashMode::SerializeP2PKH, addr.bytes().clone()); + + let mut sortition_height = chain.get_burn_block_height(); + debug!("\n\n======================"); + debug!( + "PoxConstants = {:#?}", + &chain.config.burnchain.pox_constants + ); + debug!("tip = {sortition_height}"); + debug!("========================\n\n"); + + let epoch_25_height = chain + .config + .epochs + .as_ref() + .unwrap() + .iter() + .find(|e| e.epoch_id == StacksEpochId::Epoch25) + .unwrap() + .start_height; + + let epoch_30_height = chain + .config + .epochs + .as_ref() + .unwrap() + .iter() + .find(|e| e.epoch_id == StacksEpochId::Epoch30) + .unwrap() + .start_height; + + // advance to just past pox-4 instantiation + let mut blocks_produced = false; + while sortition_height <= epoch_25_height { + chain.tenure_with_txs(&[], &mut chain_nonce); + sortition_height = chain.get_burn_block_height(); + blocks_produced = true; + } + + // need to produce at least 1 block before making pox-4 lockups: + // the way `burn-block-height` constant works in Epoch 2.5 is such + // that if its the first block produced, this will be 0 which will + // prevent the lockups from being valid. + if !blocks_produced { + chain.tenure_with_txs(&[], &mut chain_nonce); + sortition_height = chain.get_burn_block_height(); + } + + debug!("\n\n======================"); + debug!("Make PoX-4 lockups"); + debug!("========================\n\n"); + + let reward_cycle = chain + .config + .burnchain + .block_height_to_reward_cycle(sortition_height) + .unwrap(); + + // Make all the test Stackers stack + let stack_txs: Vec<_> = chain + .config + .test_stackers + .clone() + .unwrap_or_default() + .iter() + .map(|test_stacker| { + let pox_addr = test_stacker + .pox_addr + .clone() + .unwrap_or(default_pox_addr.clone()); + let max_amount = test_stacker.max_amount.unwrap_or(u128::MAX); + let signature = make_pox_4_signer_key_signature( + &pox_addr, + &test_stacker.signer_private_key, + reward_cycle.into(), + &crate::util_lib::signed_structured_data::pox4::Pox4SignatureTopic::StackStx, + chain.config.network_id, + 12, + max_amount, + 1, + ) + .unwrap() + .to_rsv(); + make_pox_4_lockup_chain_id( + &test_stacker.stacker_private_key, + 0, + test_stacker.amount, + &pox_addr, + 12, + &StacksPublicKey::from_private(&test_stacker.signer_private_key), + sortition_height + 1, + Some(signature), + max_amount, + 1, + chain.config.network_id, + ) + }) + .collect(); + + let mut stacks_block = chain.tenure_with_txs(&stack_txs, &mut chain_nonce); + + let (stacks_tip_ch, stacks_tip_bh) = + SortitionDB::get_canonical_stacks_chain_tip_hash(chain.sortdb().conn()).unwrap(); + let stacks_tip = StacksBlockId::new(&stacks_tip_ch, &stacks_tip_bh); + assert_eq!(stacks_block, stacks_tip); + + debug!("\n\n======================"); + debug!("Advance to the Prepare Phase"); + debug!("========================\n\n"); + while !chain.config.burnchain.is_in_prepare_phase(sortition_height) { + let (stacks_tip_ch, stacks_tip_bh) = + SortitionDB::get_canonical_stacks_chain_tip_hash(chain.sortdb().conn()).unwrap(); + let old_tip = StacksBlockId::new(&stacks_tip_ch, &stacks_tip_bh); + stacks_block = chain.tenure_with_txs(&[], &mut chain_nonce); + + let (stacks_tip_ch, stacks_tip_bh) = + SortitionDB::get_canonical_stacks_chain_tip_hash(chain.sortdb().conn()).unwrap(); + let stacks_tip = StacksBlockId::new(&stacks_tip_ch, &stacks_tip_bh); + assert_ne!(old_tip, stacks_tip); + sortition_height = chain.get_burn_block_height(); + } + + debug!("\n\n======================"); + debug!("Advance to Epoch 3.0"); + debug!("========================\n\n"); + + // advance to the start of epoch 3.0 + while sortition_height < epoch_30_height - 1 { + let (stacks_tip_ch, stacks_tip_bh) = + SortitionDB::get_canonical_stacks_chain_tip_hash(chain.sortdb().conn()).unwrap(); + let old_tip = StacksBlockId::new(&stacks_tip_ch, &stacks_tip_bh); + chain.tenure_with_txs(&[], &mut chain_nonce); + + let (stacks_tip_ch, stacks_tip_bh) = + SortitionDB::get_canonical_stacks_chain_tip_hash(chain.sortdb().conn()).unwrap(); + let stacks_tip = StacksBlockId::new(&stacks_tip_ch, &stacks_tip_bh); + assert_ne!(old_tip, stacks_tip); + sortition_height = chain.get_burn_block_height(); + } + + debug!("\n\n======================"); + debug!("Welcome to Nakamoto!"); + debug!("========================\n\n"); + } + /// Make a peer and transition it into the Nakamoto epoch. /// The node needs to be stacking; otherwise, Nakamoto won't activate. fn boot_nakamoto_peers( From 8b455aada10659a6038d10924f2a6f6248d46cd2 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Fri, 26 Sep 2025 12:08:41 -0700 Subject: [PATCH 3/9] Add a test mismatch structures Signed-off-by: Jacinta Ferrant --- stackslib/src/chainstate/tests/consensus.rs | 350 +++++++++++++------- 1 file changed, 228 insertions(+), 122 deletions(-) diff --git a/stackslib/src/chainstate/tests/consensus.rs b/stackslib/src/chainstate/tests/consensus.rs index 996078e9a62..8d115cf8c90 100644 --- a/stackslib/src/chainstate/tests/consensus.rs +++ b/stackslib/src/chainstate/tests/consensus.rs @@ -20,24 +20,226 @@ use clarity::types::{Address, StacksEpochId}; use clarity::util::hash::{MerkleTree, Sha512Trunc256Sum}; use clarity::util::secp256k1::MessageSignature; use clarity::vm::costs::ExecutionCost; +use clarity::vm::events::StacksTransactionEvent; +use clarity::vm::Value as ClarityValue; use serde::{Deserialize, Serialize}; use stacks_common::bitvec::BitVec; use crate::burnchains::PoxConstants; use crate::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader, NakamotoChainState}; -use crate::chainstate::stacks::boot::RewardSet; +use crate::chainstate::stacks::boot::{RewardSet, RewardSetData}; +use crate::chainstate::stacks::db::StacksEpochReceipt; use crate::chainstate::stacks::{ - StacksTransaction, TenureChangeCause, TransactionAuth, TransactionPayload, TransactionVersion, + Error as ChainstateError, StacksTransaction, TenureChangeCause, TransactionAuth, + TransactionPayload, TransactionVersion, }; use crate::chainstate::tests::TestChainstate; +use crate::clarity_vm::clarity::PreCommitClarityBlock; use crate::net::tests::NakamotoBootPlan; +/// Represents the expected output of a transaction in a test. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct ExpectedTransactionOutput { + /// The expected return value of the transaction. + pub return_type: ClarityValue, + /// The expected execution cost of the transaction. + pub cost: ExecutionCost, +} + +/// Represents the expected outputs for a block's transactions. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct ExpectedOutputs { + /// The expected outputs for each transaction, in input order. + pub transactions: Vec, + /// The total execution cost of the block. + pub total_block_cost: ExecutionCost, +} + +/// Represents the expected result of a consensus test. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum ExpectedResult { + /// The test should succeed with the specified outputs. + Success(ExpectedOutputs), + /// The test should fail with an error containing the specified string. + Failure(String), +} + +/// Defines a test vector for a consensus test, including chainstate setup and expected outcomes. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct ConsensusTestVector { + /// Initial balances for Stacks addresses during chainstate instantiation. + pub initial_balances: HashMap, + /// Hex representation of the MARF hash for block construction. + pub marf_hash: String, + /// The epoch ID for the test environment. + pub epoch_id: u32, + /// Transaction payloads to include in the block, as serialized strings. + pub payloads: Vec, + /// The expected result after appending the constructed block. + pub expected_result: ExpectedResult, +} + +/// Tracks mismatches between actual and expected transaction results. +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct TransactionMismatch { + /// The index of the transaction with mismatches. + pub index: u32, + /// Mismatch between actual and expected return types, if any. + pub return_type: Option<(ClarityValue, ClarityValue)>, + /// Mismatch between actual and expected execution costs, if any. + pub cost: Option<(ExecutionCost, ExecutionCost)>, +} + +impl TransactionMismatch { + /// Creates a new `TransactionMismatch` for the given transaction index. + fn new(index: u32) -> Self { + Self { + index, + return_type: None, + cost: None, + } + } + + /// Adds a return type mismatch to the transaction. + fn with_return_type_mismatch(mut self, actual: ClarityValue, expected: ClarityValue) -> Self { + self.return_type = Some((actual, expected)); + self + } + + /// Adds an execution cost mismatch to the transaction. + fn with_cost_mismatch(mut self, actual: ExecutionCost, expected: ExecutionCost) -> Self { + self.cost = Some((actual, expected)); + self + } + + /// Returns true if no mismatches are recorded. + fn is_empty(&self) -> bool { + self.return_type.is_none() && self.cost.is_none() + } +} + +/// Aggregates all mismatches between actual and expected test results. +#[derive(Debug, Clone, PartialEq, Default, Serialize, Deserialize)] +pub struct ConsensusMismatch { + /// Mismatches for individual transactions. + pub transactions: Vec, + /// Mismatch between actual and expected total block costs, if any. + pub total_block_cost: Option<(ExecutionCost, ExecutionCost)>, + /// Mismatch between actual and expected error messages, if any. + pub error: Option<(String, String)>, +} + +impl ConsensusMismatch { + /// Creates a `ConsensusMismatch` from test results, if mismatches exist. + pub fn from_test_result<'a>( + append_result: Result< + ( + StacksEpochReceipt, + PreCommitClarityBlock<'a>, + Option, + Vec, + ), + ChainstateError, + >, + expected_result: ExpectedResult, + ) -> Option { + let mut mismatches = ConsensusMismatch::default(); + match (append_result, expected_result) { + (Ok((epoch_receipt, _, _, _)), ExpectedResult::Success(expected)) => { + // Convert transaction receipts to `ExpectedTransactionOutput` for comparison. + let actual_transactions: Vec<_> = epoch_receipt + .tx_receipts + .iter() + .map(|r| { + ( + r.tx_index, + ExpectedTransactionOutput { + return_type: r.result.clone(), + cost: r.execution_cost.clone(), + }, + ) + }) + .collect(); + + // Compare each transaction's actual vs expected outputs. + for ((tx_index, actual_tx), expected_tx) in + actual_transactions.iter().zip(expected.transactions.iter()) + { + let mut tx_mismatch = TransactionMismatch::new(*tx_index); + let mut has_mismatch = false; + + if actual_tx.return_type != expected_tx.return_type { + tx_mismatch = tx_mismatch.with_return_type_mismatch( + actual_tx.return_type.clone(), + expected_tx.return_type.clone(), + ); + has_mismatch = true; + } + + if actual_tx.cost != expected_tx.cost { + tx_mismatch = tx_mismatch + .with_cost_mismatch(actual_tx.cost.clone(), expected_tx.cost.clone()); + has_mismatch = true; + } + + if has_mismatch { + mismatches.add_transaction_mismatch(tx_mismatch); + } + } + + // Compare total block execution cost. + if epoch_receipt.anchored_block_cost != expected.total_block_cost { + mismatches.add_total_block_cost_mismatch( + &epoch_receipt.anchored_block_cost, + &expected.total_block_cost, + ); + } + // TODO: add any additional mismatches we might care about? + } + (Ok(_), ExpectedResult::Failure(expected_err)) => { + mismatches.error = Some(("Ok".to_string(), expected_err)); + } + (Err(actual_err), ExpectedResult::Failure(expected_err)) => { + if !actual_err.to_string().contains(&expected_err) { + mismatches.error = Some((actual_err.to_string(), expected_err)); + } + } + (Err(actual_err), ExpectedResult::Success(_)) => { + mismatches.error = Some((actual_err.to_string(), "Success".into())); + } + } + + if mismatches.is_empty() { + None + } else { + Some(mismatches) + } + } + + /// Adds a transaction mismatch to the collection. + fn add_transaction_mismatch(&mut self, mismatch: TransactionMismatch) { + self.transactions.push(mismatch); + } + + /// Records a total block cost mismatch. + fn add_total_block_cost_mismatch(&mut self, actual: &ExecutionCost, expected: &ExecutionCost) { + self.total_block_cost = Some((actual.clone(), expected.clone())); + } + + /// Returns true if no mismatches are recorded. + pub fn is_empty(&self) -> bool { + self.transactions.is_empty() && self.total_block_cost.is_none() && self.error.is_none() + } +} + +/// Represents a consensus test with chainstate and test vector. pub struct ConsensusTest<'a> { pub chain: TestChainstate<'a>, pub test_vector: ConsensusTestVector, } impl ConsensusTest<'_> { + /// Creates a new `ConsensusTest` with the given test name and vector. pub fn new(test_name: &str, test_vector: ConsensusTestVector) -> Self { let privk = StacksPrivateKey::from_hex( "510f96a8efd0b11e211733c1ac5e3fa6f3d3fcdd62869e376c47decb3e14fea101", @@ -82,13 +284,13 @@ impl ConsensusTest<'_> { | StacksEpochId::Epoch23 | StacksEpochId::Epoch24 | StacksEpochId::Epoch25 => { - unimplemented!("Not bothering with pre nakamoto tests."); + unimplemented!("Pre-Nakamoto epochs are not supported."); } }; Self { chain, test_vector } } - /// Run a single test vector, validating consensus. + /// Runs the consensus test, validating the results against the expected outcome. pub fn run(mut self) { debug!("--------- Running test vector ---------"); let txs: Vec<_> = self @@ -105,11 +307,9 @@ impl ConsensusTest<'_> { }) .collect(); - let expected_state_index_root = - TrieHash::from_hex(&self.test_vector.expected_state_index_root).unwrap(); + let marf_hash = TrieHash::from_hex(&self.test_vector.marf_hash).unwrap(); - let (block, block_size) = self.construct_nakamoto_block(txs, expected_state_index_root); - let test_vector = self.test_vector.clone(); + let (block, block_size) = self.construct_nakamoto_block(txs, marf_hash); let mut stacks_node = self.chain.stacks_node.take().unwrap(); let sortdb = self.chain.sortdb.take().unwrap(); @@ -143,80 +343,18 @@ impl ConsensusTest<'_> { false, ); - let mut mismatches = Vec::new(); - - match (&result, &test_vector.expected_result) { - (Ok((epoch_receipt, _, _, tx_events)), ExpectedResult::Success(expected_outputs)) => { - debug!("--------- Appended Block ---------"; - "epoch_receipt" => ?epoch_receipt, - "tx_events" => ?tx_events - ); - - let actual_results = ExpectedOutputs { - transaction_return_types: epoch_receipt - .tx_receipts - .iter() - .map(|r| serde_json::to_string(&r.result).unwrap()) - .collect(), - transaction_costs: epoch_receipt - .tx_receipts - .iter() - .map(|r| r.execution_cost.clone()) - .collect(), - total_block_cost: epoch_receipt.anchored_block_cost.clone(), - marf_hash: epoch_receipt.header.index_root.to_hex(), - }; - - if actual_results != *expected_outputs { - if actual_results.transaction_return_types - != expected_outputs.transaction_return_types - { - mismatches.push(format!( - "Tx return types mismatch: actual {:?}, expected {:?}", - actual_results.transaction_return_types, - expected_outputs.transaction_return_types - )); - } - if actual_results.transaction_costs != expected_outputs.transaction_costs { - mismatches.push(format!( - "Tx costs mismatch: actual {:?}, expected {:?}", - actual_results.transaction_costs, expected_outputs.transaction_costs - )); - } - if actual_results.total_block_cost != expected_outputs.total_block_cost { - mismatches.push(format!( - "Total block cost mismatch: actual {:?}, expected {:?}", - actual_results.total_block_cost, expected_outputs.total_block_cost - )); - } - if actual_results.marf_hash != expected_outputs.marf_hash { - mismatches.push(format!( - "MARF hash mismatch: actual {}, expected {}", - actual_results.marf_hash, expected_outputs.marf_hash - )); - } - } - } - (Ok(_), ExpectedResult::Failure(_)) => { - mismatches.push("Expected failure but got success".to_string()); - } - (Err(e), ExpectedResult::Failure(expected_err)) => { - debug!("--------- Block Errored: {e} ---------"); - let actual_err = e.to_string(); - if !actual_err.contains(expected_err) { - mismatches.push(format!( - "Error mismatch: actual '{actual_err}', expected contains '{expected_err}'" - )); - } - } - (Err(_), ExpectedResult::Success(_)) => { - mismatches.push("Expected success but got failure".to_string()); - } - } - assert!(mismatches.is_empty(), "Mismatches: {mismatches:?}"); + debug!("--------- Appended block: {} ---------", result.is_ok()); + // Compare actual vs expected results. + let mismatches = + ConsensusMismatch::from_test_result(result, self.test_vector.expected_result); + let mismatch_str = mismatches + .as_ref() + .map(|m| serde_json::to_string_pretty(m).unwrap()) + .unwrap_or("".into()); + assert!(mismatches.is_none(), "Mismatches found: {mismatch_str}"); } - /// Construct a NakamotoBlock from the test vector. + /// Constructs a Nakamoto block with the given transactions and state index root. fn construct_nakamoto_block( &self, txs: Vec, @@ -228,6 +366,9 @@ impl ConsensusTest<'_> { ) .unwrap() .unwrap(); + + let cycle = self.chain.get_reward_cycle(); + let mut block = NakamotoBlock { header: NakamotoBlockHeader { version: 1, @@ -251,73 +392,38 @@ impl ConsensusTest<'_> { .iter() .map(|tx| tx.txid().as_bytes().to_vec()) .collect(); - MerkleTree::::new(&txid_vecs).root() }; + block.header.tx_merkle_root = tx_merkle_root; self.chain.miner.sign_nakamoto_block(&mut block); let mut signers = self.chain.config.test_signers.clone().unwrap_or_default(); - signers.sign_nakamoto_block(&mut block, self.chain.get_reward_cycle()); + signers.sign_nakamoto_block(&mut block, cycle); let block_len = block.serialize_to_vec().len(); - (block, block_len) } } -/// Test vector struct for `append_block` consensus testing. -#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] -pub struct ConsensusTestVector { - /// A hex stacks address and amount pair for populating initial balances - pub initial_balances: HashMap, - /// Desired epoch of chainstate - pub epoch_id: u32, - /// Transaction payloads to stuff into the block - pub payloads: Vec, - /// Expected state root trie hash - pub expected_state_index_root: String, - /// Expected result: success with outputs or failure with error - pub expected_result: ExpectedResult, -} - -/// Enum representing expected result: success with outputs or failure with error -#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] -pub enum ExpectedResult { - Success(ExpectedOutputs), - // TODO: should match maybe on actual Error type? - Failure(String), -} - -/// Expected outputs for a successful block append -#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] -pub struct ExpectedOutputs { - pub transaction_return_types: Vec, - pub transaction_costs: Vec, - pub total_block_cost: ExecutionCost, - pub marf_hash: String, -} - +/// Creates a default test vector with empty transactions and zero cost. fn default_test_vector() -> ConsensusTestVector { let outputs = ExpectedOutputs { - transaction_return_types: vec![], - transaction_costs: vec![], + transactions: vec![], total_block_cost: ExecutionCost::ZERO, - marf_hash: "f86c9ceaf2a17a4d9e502af73b6f00f89c18e5b58be501b3840f707f7b372dea".into(), }; ConsensusTestVector { initial_balances: HashMap::new(), - expected_state_index_root: - "6fe3e70b95f5f56c9c7c2c59ba8fc9c19cdfede25d2dcd4d120438bc27dfa88b".into(), + marf_hash: "6fe3e70b95f5f56c9c7c2c59ba8fc9c19cdfede25d2dcd4d120438bc27dfa88b".into(), epoch_id: StacksEpochId::Epoch30 as u32, payloads: vec![], expected_result: ExpectedResult::Success(outputs), } } +/// Creates a test vector expecting a failure due to a state root mismatch. fn failing_test_vector() -> ConsensusTestVector { ConsensusTestVector { initial_balances: HashMap::new(), - expected_state_index_root: - "0000000000000000000000000000000000000000000000000000000000000000".into(), + marf_hash: "0000000000000000000000000000000000000000000000000000000000000000".into(), epoch_id: StacksEpochId::Epoch30 as u32, payloads: vec![], expected_result: ExpectedResult::Failure("state root mismatch".to_string()), From 39a1b4ac169af40e4a4d42488a3de7c7bf96af78 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Fri, 26 Sep 2025 13:11:47 -0700 Subject: [PATCH 4/9] CRC: directly compare Error as string, comment cleanup, and rename some struct Signed-off-by: Jacinta Ferrant --- stackslib/src/chainstate/tests/consensus.rs | 19 +++++++++++-------- stackslib/src/net/tests/mod.rs | 4 ++-- 2 files changed, 13 insertions(+), 10 deletions(-) diff --git a/stackslib/src/chainstate/tests/consensus.rs b/stackslib/src/chainstate/tests/consensus.rs index 8d115cf8c90..6aa6f8412c8 100644 --- a/stackslib/src/chainstate/tests/consensus.rs +++ b/stackslib/src/chainstate/tests/consensus.rs @@ -46,9 +46,9 @@ pub struct ExpectedTransactionOutput { pub cost: ExecutionCost, } -/// Represents the expected outputs for a block's transactions. +/// Represents the expected outputs for a block's execution. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] -pub struct ExpectedOutputs { +pub struct ExpectedBlockOutput { /// The expected outputs for each transaction, in input order. pub transactions: Vec, /// The total execution cost of the block. @@ -59,8 +59,10 @@ pub struct ExpectedOutputs { #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub enum ExpectedResult { /// The test should succeed with the specified outputs. - Success(ExpectedOutputs), - /// The test should fail with an error containing the specified string. + Success(ExpectedBlockOutput), + /// The test should fail with an error matching the specified string + /// Cannot match on the exact Error directly as they do not implement + /// Serialize/Deserialize or PartialEq Failure(String), } @@ -200,8 +202,9 @@ impl ConsensusMismatch { mismatches.error = Some(("Ok".to_string(), expected_err)); } (Err(actual_err), ExpectedResult::Failure(expected_err)) => { - if !actual_err.to_string().contains(&expected_err) { - mismatches.error = Some((actual_err.to_string(), expected_err)); + let actual_err_str = actual_err.to_string(); + if actual_err_str != expected_err { + mismatches.error = Some((actual_err_str, expected_err)); } } (Err(actual_err), ExpectedResult::Success(_)) => { @@ -406,7 +409,7 @@ impl ConsensusTest<'_> { /// Creates a default test vector with empty transactions and zero cost. fn default_test_vector() -> ConsensusTestVector { - let outputs = ExpectedOutputs { + let outputs = ExpectedBlockOutput { transactions: vec![], total_block_cost: ExecutionCost::ZERO, }; @@ -426,7 +429,7 @@ fn failing_test_vector() -> ConsensusTestVector { marf_hash: "0000000000000000000000000000000000000000000000000000000000000000".into(), epoch_id: StacksEpochId::Epoch30 as u32, payloads: vec![], - expected_result: ExpectedResult::Failure("state root mismatch".to_string()), + expected_result: ExpectedResult::Failure(ChainstateError::InvalidStacksBlock("Block c8eeff18a0b03dec385bfe8268bc87ccf93fc00ff73af600c4e1aaef6e0dfaf5 state root mismatch: expected 0000000000000000000000000000000000000000000000000000000000000000, got 6fe3e70b95f5f56c9c7c2c59ba8fc9c19cdfede25d2dcd4d120438bc27dfa88b".into()).to_string()), } } diff --git a/stackslib/src/net/tests/mod.rs b/stackslib/src/net/tests/mod.rs index 2857fba8f7f..9bbb455f35a 100644 --- a/stackslib/src/net/tests/mod.rs +++ b/stackslib/src/net/tests/mod.rs @@ -349,7 +349,7 @@ impl NakamotoBootPlan { } } - /// Make a chsintate and transition it into the Nakamoto epoch. + /// Make a chainstate and transition it into the Nakamoto epoch. /// The node needs to be stacking; otherwise, Nakamoto won't activate. pub fn boot_nakamoto_chainstate( mut self, @@ -416,7 +416,7 @@ impl NakamotoBootPlan { chain } - /// Bring a TestPeer into the Nakamoto Epoch + /// Bring a TestChainstate into the Nakamoto Epoch fn advance_to_nakamoto_chainstate(&mut self, chain: &mut TestChainstate) { let mut chain_nonce = 0; let addr = StacksAddress::p2pkh(false, &StacksPublicKey::from_private(&self.private_key)); From 28701faadefa58c16f527ec511dd352dd53a1129 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Mon, 29 Sep 2025 11:51:38 -0700 Subject: [PATCH 5/9] CRC: add a helper function to pretty print ConsensusMismatch as a JSON string Signed-off-by: Jacinta Ferrant --- stackslib/src/chainstate/tests/consensus.rs | 19 ++++++++++++++----- 1 file changed, 14 insertions(+), 5 deletions(-) diff --git a/stackslib/src/chainstate/tests/consensus.rs b/stackslib/src/chainstate/tests/consensus.rs index 6aa6f8412c8..1b5a60ef100 100644 --- a/stackslib/src/chainstate/tests/consensus.rs +++ b/stackslib/src/chainstate/tests/consensus.rs @@ -233,6 +233,15 @@ impl ConsensusMismatch { pub fn is_empty(&self) -> bool { self.transactions.is_empty() && self.total_block_cost.is_none() && self.error.is_none() } + + /// Serializes the given `ConsensusMismatch` as pretty-printed JSON, + /// or returns an empty string if `None`. + pub fn to_json_string_pretty(mismatch: &Option) -> String { + mismatch + .as_ref() + .map(|m| serde_json::to_string_pretty(m).unwrap()) + .unwrap_or("".into()) + } } /// Represents a consensus test with chainstate and test vector. @@ -350,11 +359,11 @@ impl ConsensusTest<'_> { // Compare actual vs expected results. let mismatches = ConsensusMismatch::from_test_result(result, self.test_vector.expected_result); - let mismatch_str = mismatches - .as_ref() - .map(|m| serde_json::to_string_pretty(m).unwrap()) - .unwrap_or("".into()); - assert!(mismatches.is_none(), "Mismatches found: {mismatch_str}"); + assert!( + mismatches.is_none(), + "Mismatches found: {}", + ConsensusMismatch::to_json_string_pretty(&mismatches) + ); } /// Constructs a Nakamoto block with the given transactions and state index root. From c245ce486838e0c79bb65f92fba1fbf9068e12b3 Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Mon, 29 Sep 2025 13:11:38 -0700 Subject: [PATCH 6/9] Add a stx transfer test Signed-off-by: Jacinta Ferrant --- stackslib/src/chainstate/tests/consensus.rs | 158 ++++++++++++-------- 1 file changed, 97 insertions(+), 61 deletions(-) diff --git a/stackslib/src/chainstate/tests/consensus.rs b/stackslib/src/chainstate/tests/consensus.rs index 1b5a60ef100..fde17f8a66c 100644 --- a/stackslib/src/chainstate/tests/consensus.rs +++ b/stackslib/src/chainstate/tests/consensus.rs @@ -12,15 +12,16 @@ // // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use std::collections::HashMap; - +use clarity::boot_util::boot_code_addr; use clarity::codec::StacksMessageCodec; -use clarity::types::chainstate::{StacksAddress, StacksPrivateKey, TrieHash}; -use clarity::types::{Address, StacksEpochId}; +use clarity::consts::CHAIN_ID_TESTNET; +use clarity::types::chainstate::{StacksAddress, StacksPrivateKey, StacksPublicKey, TrieHash}; +use clarity::types::StacksEpochId; use clarity::util::hash::{MerkleTree, Sha512Trunc256Sum}; use clarity::util::secp256k1::MessageSignature; use clarity::vm::costs::ExecutionCost; use clarity::vm::events::StacksTransactionEvent; +use clarity::vm::types::{PrincipalData, ResponseData}; use clarity::vm::Value as ClarityValue; use serde::{Deserialize, Serialize}; use stacks_common::bitvec::BitVec; @@ -29,14 +30,16 @@ use crate::burnchains::PoxConstants; use crate::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader, NakamotoChainState}; use crate::chainstate::stacks::boot::{RewardSet, RewardSetData}; use crate::chainstate::stacks::db::StacksEpochReceipt; -use crate::chainstate::stacks::{ - Error as ChainstateError, StacksTransaction, TenureChangeCause, TransactionAuth, - TransactionPayload, TransactionVersion, -}; +use crate::chainstate::stacks::{Error as ChainstateError, StacksTransaction, TenureChangeCause}; use crate::chainstate::tests::TestChainstate; use crate::clarity_vm::clarity::PreCommitClarityBlock; +use crate::core::test_util::make_stacks_transfer_tx; use crate::net::tests::NakamotoBootPlan; +pub const SK_1: &str = "a1289f6438855da7decf9b61b852c882c398cff1446b2a0f823538aa2ebef92e01"; +pub const SK_2: &str = "4ce9a8f7539ea93753a36405b16e8b57e15a552430410709c2b6d65dca5c02e201"; +pub const SK_3: &str = "cb95ddd0fe18ec57f4f3533b95ae564b3f1ae063dbf75b46334bd86245aef78501"; + /// Represents the expected output of a transaction in a test. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct ExpectedTransactionOutput { @@ -69,14 +72,14 @@ pub enum ExpectedResult { /// Defines a test vector for a consensus test, including chainstate setup and expected outcomes. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct ConsensusTestVector { - /// Initial balances for Stacks addresses during chainstate instantiation. - pub initial_balances: HashMap, + /// Initial balances for the provided PrincipalData during chainstate instantiation. + pub initial_balances: Vec<(PrincipalData, u64)>, /// Hex representation of the MARF hash for block construction. pub marf_hash: String, /// The epoch ID for the test environment. pub epoch_id: u32, - /// Transaction payloads to include in the block, as serialized strings. - pub payloads: Vec, + /// Transactions to include in the block + pub transactions: Vec, /// The expected result after appending the constructed block. pub expected_result: ExpectedResult, } @@ -253,16 +256,18 @@ pub struct ConsensusTest<'a> { impl ConsensusTest<'_> { /// Creates a new `ConsensusTest` with the given test name and vector. pub fn new(test_name: &str, test_vector: ConsensusTestVector) -> Self { + if let ExpectedResult::Success(output) = &test_vector.expected_result { + assert_eq!( + output.transactions.len(), + test_vector.transactions.len(), + "Test vector is invalid. Must specify an expected output per input transaction" + ); + } let privk = StacksPrivateKey::from_hex( "510f96a8efd0b11e211733c1ac5e3fa6f3d3fcdd62869e376c47decb3e14fea101", ) .unwrap(); - let initial_balances = test_vector - .initial_balances - .iter() - .map(|(addr, amount)| (StacksAddress::from_string(addr).unwrap().into(), *amount)) - .collect(); let epoch_id = StacksEpochId::try_from(test_vector.epoch_id).unwrap(); let chain = match epoch_id { StacksEpochId::Epoch30 @@ -271,7 +276,7 @@ impl ConsensusTest<'_> { | StacksEpochId::Epoch33 => { let mut chain = NakamotoBootPlan::new(test_name) .with_pox_constants(10, 3) - .with_initial_balances(initial_balances) + .with_initial_balances(test_vector.initial_balances.clone()) .with_private_key(privk) .boot_nakamoto_chainstate(None); let (burn_ops, mut tenure_change, miner_key) = @@ -305,24 +310,7 @@ impl ConsensusTest<'_> { /// Runs the consensus test, validating the results against the expected outcome. pub fn run(mut self) { debug!("--------- Running test vector ---------"); - let txs: Vec<_> = self - .test_vector - .payloads - .iter() - .map(|payload_str| { - let payload: TransactionPayload = serde_json::from_str(payload_str).unwrap(); - StacksTransaction::new( - TransactionVersion::Testnet, - TransactionAuth::from_p2pkh(&StacksPrivateKey::random()).unwrap(), - payload, - ) - }) - .collect(); - - let marf_hash = TrieHash::from_hex(&self.test_vector.marf_hash).unwrap(); - - let (block, block_size) = self.construct_nakamoto_block(txs, marf_hash); - + let (block, block_size) = self.construct_nakamoto_block(); let mut stacks_node = self.chain.stacks_node.take().unwrap(); let sortdb = self.chain.sortdb.take().unwrap(); let chain_tip = @@ -367,11 +355,8 @@ impl ConsensusTest<'_> { } /// Constructs a Nakamoto block with the given transactions and state index root. - fn construct_nakamoto_block( - &self, - txs: Vec, - state_index_root: TrieHash, - ) -> (NakamotoBlock, usize) { + fn construct_nakamoto_block(&self) -> (NakamotoBlock, usize) { + let state_index_root = TrieHash::from_hex(&self.test_vector.marf_hash).unwrap(); let chain_tip = NakamotoChainState::get_canonical_block_header( self.chain.stacks_node.as_ref().unwrap().chainstate.db(), self.chain.sortdb.as_ref().unwrap(), @@ -395,7 +380,7 @@ impl ConsensusTest<'_> { signer_signature: vec![], pox_treatment: BitVec::ones(1).unwrap(), }, - txs, + txs: self.test_vector.transactions.clone(), }; let tx_merkle_root = { @@ -416,38 +401,89 @@ impl ConsensusTest<'_> { } } -/// Creates a default test vector with empty transactions and zero cost. -fn default_test_vector() -> ConsensusTestVector { +#[test] +fn test_append_empty_block() { let outputs = ExpectedBlockOutput { transactions: vec![], total_block_cost: ExecutionCost::ZERO, }; - ConsensusTestVector { - initial_balances: HashMap::new(), + let test_vector = ConsensusTestVector { + initial_balances: Vec::new(), marf_hash: "6fe3e70b95f5f56c9c7c2c59ba8fc9c19cdfede25d2dcd4d120438bc27dfa88b".into(), epoch_id: StacksEpochId::Epoch30 as u32, - payloads: vec![], + transactions: vec![], expected_result: ExpectedResult::Success(outputs), - } + }; + ConsensusTest::new(function_name!(), test_vector).run() } -/// Creates a test vector expecting a failure due to a state root mismatch. -fn failing_test_vector() -> ConsensusTestVector { - ConsensusTestVector { - initial_balances: HashMap::new(), +#[test] +fn test_append_state_index_root_mismatch() { + let test_vector = ConsensusTestVector { + initial_balances: Vec::new(), + // An invalid MARF. Will result in state root mismatch marf_hash: "0000000000000000000000000000000000000000000000000000000000000000".into(), epoch_id: StacksEpochId::Epoch30 as u32, - payloads: vec![], + transactions: vec![], expected_result: ExpectedResult::Failure(ChainstateError::InvalidStacksBlock("Block c8eeff18a0b03dec385bfe8268bc87ccf93fc00ff73af600c4e1aaef6e0dfaf5 state root mismatch: expected 0000000000000000000000000000000000000000000000000000000000000000, got 6fe3e70b95f5f56c9c7c2c59ba8fc9c19cdfede25d2dcd4d120438bc27dfa88b".into()).to_string()), - } -} - -#[test] -fn test_append_empty_block() { - ConsensusTest::new(function_name!(), default_test_vector()).run() + }; + ConsensusTest::new(function_name!(), test_vector).run() } #[test] -fn test_append_state_index_root_mismatch() { - ConsensusTest::new(function_name!(), failing_test_vector()).run() +fn test_append_stx_transfers() { + let sender_privks = [ + StacksPrivateKey::from_hex(SK_1).unwrap(), + StacksPrivateKey::from_hex(SK_2).unwrap(), + StacksPrivateKey::from_hex(SK_3).unwrap(), + ]; + let send_amount = 1_000; + let tx_fee = 180; + let mut initial_balances = Vec::new(); + let transactions = sender_privks + .iter() + .map(|sender_privk| { + initial_balances.push(( + StacksAddress::p2pkh(false, &StacksPublicKey::from_private(sender_privk)).into(), + send_amount + tx_fee, + )); + make_stacks_transfer_tx( + sender_privk, + 0, + tx_fee, + CHAIN_ID_TESTNET, + &boot_code_addr(false).into(), + send_amount, + ) + }) + .collect(); + let transfer_result = ExpectedTransactionOutput { + return_type: ClarityValue::Response(ResponseData { + committed: true, + data: Box::new(ClarityValue::Bool(true)), + }), + cost: ExecutionCost { + write_length: 0, + write_count: 0, + read_length: 0, + read_count: 0, + runtime: 0, + }, + }; + let outputs = ExpectedBlockOutput { + transactions: vec![ + transfer_result.clone(), + transfer_result.clone(), + transfer_result, + ], + total_block_cost: ExecutionCost::ZERO, + }; + let test_vector = ConsensusTestVector { + initial_balances, + marf_hash: "3838b1ae67f108b10ec7a7afb6c2b18e6468be2423d7183ffa2f7824b619b8be".into(), + epoch_id: StacksEpochId::Epoch30 as u32, + transactions, + expected_result: ExpectedResult::Success(outputs), + }; + ConsensusTest::new(function_name!(), test_vector).run() } From b6fc3ff4d7bcf0dd83b7607191d34bfb5f4a382d Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Mon, 29 Sep 2025 16:03:19 -0700 Subject: [PATCH 7/9] Add a ExpressionStackDepthTooDeep test Signed-off-by: Jacinta Ferrant --- stackslib/src/chainstate/tests/consensus.rs | 67 +++++++++++++++++++-- 1 file changed, 63 insertions(+), 4 deletions(-) diff --git a/stackslib/src/chainstate/tests/consensus.rs b/stackslib/src/chainstate/tests/consensus.rs index fde17f8a66c..f0828e903e0 100644 --- a/stackslib/src/chainstate/tests/consensus.rs +++ b/stackslib/src/chainstate/tests/consensus.rs @@ -19,10 +19,12 @@ use clarity::types::chainstate::{StacksAddress, StacksPrivateKey, StacksPublicKe use clarity::types::StacksEpochId; use clarity::util::hash::{MerkleTree, Sha512Trunc256Sum}; use clarity::util::secp256k1::MessageSignature; +use clarity::vm::ast::errors::{ParseError, ParseErrors}; +use clarity::vm::ast::stack_depth_checker::AST_CALL_STACK_DEPTH_BUFFER; use clarity::vm::costs::ExecutionCost; use clarity::vm::events::StacksTransactionEvent; use clarity::vm::types::{PrincipalData, ResponseData}; -use clarity::vm::Value as ClarityValue; +use clarity::vm::{Value as ClarityValue, MAX_CALL_STACK_DEPTH}; use serde::{Deserialize, Serialize}; use stacks_common::bitvec::BitVec; @@ -32,10 +34,9 @@ use crate::chainstate::stacks::boot::{RewardSet, RewardSetData}; use crate::chainstate::stacks::db::StacksEpochReceipt; use crate::chainstate::stacks::{Error as ChainstateError, StacksTransaction, TenureChangeCause}; use crate::chainstate::tests::TestChainstate; -use crate::clarity_vm::clarity::PreCommitClarityBlock; -use crate::core::test_util::make_stacks_transfer_tx; +use crate::clarity_vm::clarity::{Error as ClarityError, PreCommitClarityBlock}; +use crate::core::test_util::{make_contract_publish, make_stacks_transfer_tx}; use crate::net::tests::NakamotoBootPlan; - pub const SK_1: &str = "a1289f6438855da7decf9b61b852c882c398cff1446b2a0f823538aa2ebef92e01"; pub const SK_2: &str = "4ce9a8f7539ea93753a36405b16e8b57e15a552430410709c2b6d65dca5c02e201"; pub const SK_3: &str = "cb95ddd0fe18ec57f4f3533b95ae564b3f1ae063dbf75b46334bd86245aef78501"; @@ -487,3 +488,61 @@ fn test_append_stx_transfers() { }; ConsensusTest::new(function_name!(), test_vector).run() } + +#[test] +fn test_append_chainstate_error_expression_stack_depth_too_deep() { + // something just over the limit of the expression depth + let exceeds_repeat_factor = AST_CALL_STACK_DEPTH_BUFFER + (MAX_CALL_STACK_DEPTH as u64); + let tx_exceeds_body_start = "{ a : ".repeat(exceeds_repeat_factor as usize); + let tx_exceeds_body_end = "} ".repeat(exceeds_repeat_factor as usize); + let tx_exceeds_body = format!("{tx_exceeds_body_start}u1 {tx_exceeds_body_end}"); + + let sender_privk = StacksPrivateKey::from_hex(SK_1).unwrap(); + let tx_fee = (tx_exceeds_body.len() * 100) as u64; + let initial_balances = vec![( + StacksAddress::p2pkh(false, &StacksPublicKey::from_private(&sender_privk)).into(), + tx_fee, + )]; + let tx_bytes = make_contract_publish( + &sender_privk, + 0, + tx_fee, + CHAIN_ID_TESTNET, + "test-exceeds", + &tx_exceeds_body, + ); + let tx = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); + let transfer_result = ExpectedTransactionOutput { + return_type: ClarityValue::Response(ResponseData { + committed: true, + data: Box::new(ClarityValue::Bool(true)), + }), + cost: ExecutionCost { + write_length: 0, + write_count: 0, + read_length: 0, + read_count: 0, + runtime: 0, + }, + }; + let outputs = ExpectedBlockOutput { + transactions: vec![transfer_result], + total_block_cost: ExecutionCost::ZERO, + }; + // TODO: should look into append_block. It does weird wrapping of ChainstateError variants inside ChainstateError::StacksInvalidBlock. + let e = ChainstateError::ClarityError(ClarityError::Parse(ParseError::new( + ParseErrors::ExpressionStackDepthTooDeep, + ))); + let msg = format!("Invalid Stacks block 518dfea674b5c4874e025a31e01a522c8269005c0685d12658f0359757de6692: {e:?}"); + let test_vector = ConsensusTestVector { + initial_balances, + // Marf hash doesn't matter. It will fail with ExpressionStackDepthTooDeep + marf_hash: "0000000000000000000000000000000000000000000000000000000000000000".into(), + epoch_id: StacksEpochId::Epoch30 as u32, + transactions: vec![tx], + expected_result: ExpectedResult::Failure( + ChainstateError::InvalidStacksBlock(msg).to_string(), + ), + }; + ConsensusTest::new(function_name!(), test_vector).run() +} From 03ed436839e39ef2e1a415565f609e309a3b85ed Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Tue, 30 Sep 2025 13:06:26 -0700 Subject: [PATCH 8/9] Add support for epoch specific tests Signed-off-by: Jacinta Ferrant --- stackslib/src/chainstate/tests/consensus.rs | 463 ++++++++++++++++---- stackslib/src/chainstate/tests/mod.rs | 2 +- stackslib/src/net/tests/mod.rs | 19 +- 3 files changed, 401 insertions(+), 83 deletions(-) diff --git a/stackslib/src/chainstate/tests/consensus.rs b/stackslib/src/chainstate/tests/consensus.rs index f0828e903e0..eda5810c461 100644 --- a/stackslib/src/chainstate/tests/consensus.rs +++ b/stackslib/src/chainstate/tests/consensus.rs @@ -14,9 +14,14 @@ // along with this program. If not, see . use clarity::boot_util::boot_code_addr; use clarity::codec::StacksMessageCodec; -use clarity::consts::CHAIN_ID_TESTNET; +use clarity::consts::{ + CHAIN_ID_TESTNET, PEER_VERSION_EPOCH_1_0, PEER_VERSION_EPOCH_2_0, PEER_VERSION_EPOCH_2_05, + PEER_VERSION_EPOCH_2_1, PEER_VERSION_EPOCH_2_2, PEER_VERSION_EPOCH_2_3, PEER_VERSION_EPOCH_2_4, + PEER_VERSION_EPOCH_2_5, PEER_VERSION_EPOCH_3_0, PEER_VERSION_EPOCH_3_1, PEER_VERSION_EPOCH_3_2, + PEER_VERSION_EPOCH_3_3, STACKS_EPOCH_MAX, +}; use clarity::types::chainstate::{StacksAddress, StacksPrivateKey, StacksPublicKey, TrieHash}; -use clarity::types::StacksEpochId; +use clarity::types::{StacksEpoch, StacksEpochId}; use clarity::util::hash::{MerkleTree, Sha512Trunc256Sum}; use clarity::util::secp256k1::MessageSignature; use clarity::vm::ast::errors::{ParseError, ParseErrors}; @@ -29,6 +34,7 @@ use serde::{Deserialize, Serialize}; use stacks_common::bitvec::BitVec; use crate::burnchains::PoxConstants; +use crate::chainstate::burn::db::sortdb::SortitionDB; use crate::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader, NakamotoChainState}; use crate::chainstate::stacks::boot::{RewardSet, RewardSetData}; use crate::chainstate::stacks::db::StacksEpochReceipt; @@ -36,11 +42,103 @@ use crate::chainstate::stacks::{Error as ChainstateError, StacksTransaction, Ten use crate::chainstate::tests::TestChainstate; use crate::clarity_vm::clarity::{Error as ClarityError, PreCommitClarityBlock}; use crate::core::test_util::{make_contract_publish, make_stacks_transfer_tx}; +use crate::core::{EpochList, BLOCK_LIMIT_MAINNET_21}; use crate::net::tests::NakamotoBootPlan; pub const SK_1: &str = "a1289f6438855da7decf9b61b852c882c398cff1446b2a0f823538aa2ebef92e01"; pub const SK_2: &str = "4ce9a8f7539ea93753a36405b16e8b57e15a552430410709c2b6d65dca5c02e201"; pub const SK_3: &str = "cb95ddd0fe18ec57f4f3533b95ae564b3f1ae063dbf75b46334bd86245aef78501"; +fn epoch_3_0_onwards(first_burnchain_height: u64) -> EpochList { + info!("StacksEpoch unit_test first_burn_height = {first_burnchain_height}"); + + EpochList::new(&[ + StacksEpoch { + epoch_id: StacksEpochId::Epoch10, + start_height: 0, + end_height: 0, + block_limit: ExecutionCost::max_value(), + network_epoch: PEER_VERSION_EPOCH_1_0, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch20, + start_height: 0, + end_height: 0, + block_limit: ExecutionCost::max_value(), + network_epoch: PEER_VERSION_EPOCH_2_0, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch2_05, + start_height: 0, + end_height: 0, + block_limit: ExecutionCost::max_value(), + network_epoch: PEER_VERSION_EPOCH_2_05, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch21, + start_height: 0, + end_height: 0, + block_limit: ExecutionCost::max_value(), + network_epoch: PEER_VERSION_EPOCH_2_1, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch22, + start_height: 0, + end_height: 0, + block_limit: ExecutionCost::max_value(), + network_epoch: PEER_VERSION_EPOCH_2_2, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch23, + start_height: 0, + end_height: 0, + block_limit: ExecutionCost::max_value(), + network_epoch: PEER_VERSION_EPOCH_2_3, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch24, + start_height: 0, + end_height: 0, + block_limit: ExecutionCost::max_value(), + network_epoch: PEER_VERSION_EPOCH_2_4, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch25, + start_height: 0, + end_height: first_burnchain_height, + block_limit: BLOCK_LIMIT_MAINNET_21.clone(), + network_epoch: PEER_VERSION_EPOCH_2_5, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch30, + start_height: first_burnchain_height, + end_height: first_burnchain_height + 1, + block_limit: BLOCK_LIMIT_MAINNET_21.clone(), + network_epoch: PEER_VERSION_EPOCH_3_0, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch31, + start_height: first_burnchain_height + 1, + end_height: first_burnchain_height + 2, + block_limit: BLOCK_LIMIT_MAINNET_21.clone(), + network_epoch: PEER_VERSION_EPOCH_3_1, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch32, + start_height: first_burnchain_height + 2, + end_height: first_burnchain_height + 3, + block_limit: BLOCK_LIMIT_MAINNET_21.clone(), + network_epoch: PEER_VERSION_EPOCH_3_2, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch33, + start_height: first_burnchain_height + 3, + end_height: STACKS_EPOCH_MAX, + block_limit: BLOCK_LIMIT_MAINNET_21.clone(), + network_epoch: PEER_VERSION_EPOCH_3_3, + }, + ]) +} + /// Represents the expected output of a transaction in a test. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] pub struct ExpectedTransactionOutput { @@ -78,7 +176,7 @@ pub struct ConsensusTestVector { /// Hex representation of the MARF hash for block construction. pub marf_hash: String, /// The epoch ID for the test environment. - pub epoch_id: u32, + pub epoch_id: StacksEpochId, /// Transactions to include in the block pub transactions: Vec, /// The expected result after appending the constructed block. @@ -264,47 +362,61 @@ impl ConsensusTest<'_> { "Test vector is invalid. Must specify an expected output per input transaction" ); } + assert!( + !matches!( + test_vector.epoch_id, + StacksEpochId::Epoch10 + | StacksEpochId::Epoch20 + | StacksEpochId::Epoch2_05 + | StacksEpochId::Epoch21 + | StacksEpochId::Epoch22 + | StacksEpochId::Epoch23 + | StacksEpochId::Epoch24 + | StacksEpochId::Epoch25 + ), + "Pre-Nakamoto Tenures are not Supported" + ); let privk = StacksPrivateKey::from_hex( "510f96a8efd0b11e211733c1ac5e3fa6f3d3fcdd62869e376c47decb3e14fea101", ) .unwrap(); - let epoch_id = StacksEpochId::try_from(test_vector.epoch_id).unwrap(); - let chain = match epoch_id { - StacksEpochId::Epoch30 - | StacksEpochId::Epoch31 - | StacksEpochId::Epoch32 - | StacksEpochId::Epoch33 => { - let mut chain = NakamotoBootPlan::new(test_name) - .with_pox_constants(10, 3) - .with_initial_balances(test_vector.initial_balances.clone()) - .with_private_key(privk) - .boot_nakamoto_chainstate(None); - let (burn_ops, mut tenure_change, miner_key) = - chain.begin_nakamoto_tenure(TenureChangeCause::BlockFound); - let (_, header_hash, consensus_hash) = chain.next_burnchain_block(burn_ops); - let vrf_proof = chain.make_nakamoto_vrf_proof(miner_key); - - tenure_change.tenure_consensus_hash = consensus_hash.clone(); - tenure_change.burn_view_consensus_hash = consensus_hash.clone(); - let tenure_change_tx = chain.miner.make_nakamoto_tenure_change(tenure_change); - let coinbase_tx = chain.miner.make_nakamoto_coinbase(None, vrf_proof); - - let blocks_and_sizes = - chain.make_nakamoto_tenure(tenure_change_tx, coinbase_tx, Some(0)); - chain - } - StacksEpochId::Epoch10 - | StacksEpochId::Epoch20 - | StacksEpochId::Epoch2_05 - | StacksEpochId::Epoch21 - | StacksEpochId::Epoch22 - | StacksEpochId::Epoch23 - | StacksEpochId::Epoch24 - | StacksEpochId::Epoch25 => { - unimplemented!("Pre-Nakamoto epochs are not supported."); - } - }; + // We don't really ever want the reward cycle to force a new signer set...so for now + // Just set the cycle length to a high value + let mut boot_plan = NakamotoBootPlan::new(test_name) + .with_pox_constants(100, 3) + .with_initial_balances(test_vector.initial_balances.clone()) + .with_private_key(privk); + let epochs = epoch_3_0_onwards( + (boot_plan.pox_constants.pox_4_activation_height + + boot_plan.pox_constants.reward_cycle_length + + 1) as u64, + ); + boot_plan = boot_plan.with_epochs(epochs); + let mut chain = boot_plan.boot_nakamoto_chainstate(None); + let mut burn_block_height = chain.get_burn_block_height(); + let mut i = 0; + while SortitionDB::get_stacks_epoch(chain.sortdb().conn(), burn_block_height) + .unwrap() + .unwrap() + .epoch_id + < test_vector.epoch_id + { + let (burn_ops, mut tenure_change, miner_key) = + chain.begin_nakamoto_tenure(TenureChangeCause::BlockFound); + let (_, header_hash, consensus_hash) = chain.next_burnchain_block(burn_ops); + let vrf_proof = chain.make_nakamoto_vrf_proof(miner_key); + + tenure_change.tenure_consensus_hash = consensus_hash.clone(); + tenure_change.burn_view_consensus_hash = consensus_hash.clone(); + let tenure_change_tx = chain.miner.make_nakamoto_tenure_change(tenure_change); + let coinbase_tx = chain.miner.make_nakamoto_coinbase(None, vrf_proof); + + let _blocks_and_sizes = + chain.make_nakamoto_tenure(tenure_change_tx, coinbase_tx, Some(0)); + i += 1; + burn_block_height = chain.get_burn_block_height(); + } Self { chain, test_vector } } @@ -364,14 +476,18 @@ impl ConsensusTest<'_> { ) .unwrap() .unwrap(); - let cycle = self.chain.get_reward_cycle(); - + let burn_spent = SortitionDB::get_block_snapshot_consensus( + self.chain.sortdb_ref().conn(), + &chain_tip.consensus_hash, + ) + .unwrap() + .map(|sn| sn.total_burn).unwrap(); let mut block = NakamotoBlock { header: NakamotoBlockHeader { version: 1, chain_length: chain_tip.stacks_block_height + 1, - burn_spent: 17000, + burn_spent, consensus_hash: chain_tip.consensus_hash.clone(), parent_block_id: chain_tip.index_block_hash(), tx_merkle_root: Sha512Trunc256Sum::from_data(&[]), @@ -403,15 +519,63 @@ impl ConsensusTest<'_> { } #[test] -fn test_append_empty_block() { +fn test_append_empty_block_epoch_30() { + let outputs = ExpectedBlockOutput { + transactions: vec![], + total_block_cost: ExecutionCost::ZERO, + }; + let test_vector = ConsensusTestVector { + initial_balances: Vec::new(), + marf_hash: "f1934080b22ef0192cfb39710690e7cb0efa9cff950832b33544bde3aa1484a5".into(), + epoch_id: StacksEpochId::Epoch30, + transactions: vec![], + expected_result: ExpectedResult::Success(outputs), + }; + ConsensusTest::new(function_name!(), test_vector).run() +} + +#[test] +fn test_append_empty_block_epoch_31() { + let outputs = ExpectedBlockOutput { + transactions: vec![], + total_block_cost: ExecutionCost::ZERO, + }; + let test_vector = ConsensusTestVector { + initial_balances: Vec::new(), + marf_hash: "a05f1383613215f5789eb977e4c62dfbb789d90964e14865d109375f7f6dc3cf".into(), + epoch_id: StacksEpochId::Epoch31, + transactions: vec![], + expected_result: ExpectedResult::Success(outputs), + }; + ConsensusTest::new(function_name!(), test_vector).run() +} + +#[test] +fn test_append_empty_block_epoch_32() { + let outputs = ExpectedBlockOutput { + transactions: vec![], + total_block_cost: ExecutionCost::ZERO, + }; + let test_vector = ConsensusTestVector { + initial_balances: Vec::new(), + marf_hash: "f1934080b22ef0192cfb39710690e7cb0efa9cff950832b33544bde3aa1484a5".into(), + epoch_id: StacksEpochId::Epoch30, + transactions: vec![], + expected_result: ExpectedResult::Success(outputs), + }; + ConsensusTest::new(function_name!(), test_vector).run() +} + +#[test] +fn test_append_empty_block_epoch_33() { let outputs = ExpectedBlockOutput { transactions: vec![], total_block_cost: ExecutionCost::ZERO, }; let test_vector = ConsensusTestVector { initial_balances: Vec::new(), - marf_hash: "6fe3e70b95f5f56c9c7c2c59ba8fc9c19cdfede25d2dcd4d120438bc27dfa88b".into(), - epoch_id: StacksEpochId::Epoch30 as u32, + marf_hash: "f1934080b22ef0192cfb39710690e7cb0efa9cff950832b33544bde3aa1484a5".into(), + epoch_id: StacksEpochId::Epoch30, transactions: vec![], expected_result: ExpectedResult::Success(outputs), }; @@ -419,20 +583,62 @@ fn test_append_empty_block() { } #[test] -fn test_append_state_index_root_mismatch() { +fn test_append_state_index_root_mismatch_epoch_30() { + let test_vector = ConsensusTestVector { + initial_balances: Vec::new(), + // An invalid MARF. Will result in state root mismatch + marf_hash: "0000000000000000000000000000000000000000000000000000000000000000".into(), + epoch_id: StacksEpochId::Epoch30, + transactions: vec![], + expected_result: ExpectedResult::Failure(ChainstateError::InvalidStacksBlock("Block ef45bfa44231d9e7aff094b53cfd48df0456067312f169a499354c4273a66fe3 state root mismatch: expected 0000000000000000000000000000000000000000000000000000000000000000, got f1934080b22ef0192cfb39710690e7cb0efa9cff950832b33544bde3aa1484a5".into()).to_string()), + }; + ConsensusTest::new(function_name!(), test_vector).run() +} + +#[test] +fn test_append_state_index_root_mismatch_epoch_31() { + let test_vector = ConsensusTestVector { + initial_balances: Vec::new(), + // An invalid MARF. Will result in state root mismatch + marf_hash: "0000000000000000000000000000000000000000000000000000000000000000".into(), + epoch_id: StacksEpochId::Epoch31, + transactions: vec![], + expected_result: ExpectedResult::Failure(ChainstateError::InvalidStacksBlock("Block a14d0b5c8d3c49554aeb462a8fe019718195789fa1dcd642059b75e41f0ce9cc state root mismatch: expected 0000000000000000000000000000000000000000000000000000000000000000, got a05f1383613215f5789eb977e4c62dfbb789d90964e14865d109375f7f6dc3cf".into()).to_string()), + }; + ConsensusTest::new(function_name!(), test_vector).run() +} + +#[test] +fn test_append_state_index_root_mismatch_epoch_32() { let test_vector = ConsensusTestVector { initial_balances: Vec::new(), // An invalid MARF. Will result in state root mismatch marf_hash: "0000000000000000000000000000000000000000000000000000000000000000".into(), - epoch_id: StacksEpochId::Epoch30 as u32, + epoch_id: StacksEpochId::Epoch32, transactions: vec![], - expected_result: ExpectedResult::Failure(ChainstateError::InvalidStacksBlock("Block c8eeff18a0b03dec385bfe8268bc87ccf93fc00ff73af600c4e1aaef6e0dfaf5 state root mismatch: expected 0000000000000000000000000000000000000000000000000000000000000000, got 6fe3e70b95f5f56c9c7c2c59ba8fc9c19cdfede25d2dcd4d120438bc27dfa88b".into()).to_string()), + expected_result: ExpectedResult::Failure(ChainstateError::InvalidStacksBlock("Block f8120b4a632ee1d49fbbde3e01289588389cd205cab459a4493a7d58d2dc18ed state root mismatch: expected 0000000000000000000000000000000000000000000000000000000000000000, got c17829daff8746329c65ae658f4087519c6a8bd8c7f21e51644ddbc9c010390f".into()).to_string()), }; ConsensusTest::new(function_name!(), test_vector).run() } #[test] -fn test_append_stx_transfers() { +fn test_append_state_index_root_mismatch_epoch_33() { + let test_vector = ConsensusTestVector { + initial_balances: Vec::new(), + // An invalid MARF. Will result in state root mismatch + marf_hash: "0000000000000000000000000000000000000000000000000000000000000000".into(), + epoch_id: StacksEpochId::Epoch33, + transactions: vec![], + expected_result: ExpectedResult::Failure(ChainstateError::InvalidStacksBlock("Block 4dcb48b684d105ff0e0ab8becddd4a2d5623cc8b168aacf9c455e20b3e610e63 state root mismatch: expected 0000000000000000000000000000000000000000000000000000000000000000, got 23ecbcb91cac914ba3994a15f3ea7189bcab4e9762530cd0e6c7d237fcd6dc78".into()).to_string()), + }; + ConsensusTest::new(function_name!(), test_vector).run() +} + +fn create_stx_transfers_tx_and_outputs() -> ( + Vec<(PrincipalData, u64)>, + Vec, + ExpectedBlockOutput, +) { let sender_privks = [ StacksPrivateKey::from_hex(SK_1).unwrap(), StacksPrivateKey::from_hex(SK_2).unwrap(), @@ -479,10 +685,42 @@ fn test_append_stx_transfers() { ], total_block_cost: ExecutionCost::ZERO, }; + (initial_balances, transactions, outputs) +} + +#[test] +fn test_append_stx_transfers_epoch_30() { + let (initial_balances, transactions, outputs) = create_stx_transfers_tx_and_outputs(); + let test_vector = ConsensusTestVector { + initial_balances, + marf_hash: "63ea49669d2216ebc7e4f8b5e1cd2c99b8aff9806794adf87dcf709c0a244798".into(), + epoch_id: StacksEpochId::Epoch30, + transactions, + expected_result: ExpectedResult::Success(outputs), + }; + ConsensusTest::new(function_name!(), test_vector).run() +} + +#[test] +fn test_append_stx_transfers_epoch_31() { + let (initial_balances, transactions, outputs) = create_stx_transfers_tx_and_outputs(); + let test_vector = ConsensusTestVector { + initial_balances, + marf_hash: "7fc538e605a4a353871c4a655ae850fe9a70c3875b65f2bb42ea3bef5effed2c".into(), + epoch_id: StacksEpochId::Epoch31, + transactions, + expected_result: ExpectedResult::Success(outputs), + }; + ConsensusTest::new(function_name!(), test_vector).run() +} + +#[test] +fn test_append_stx_transfers_epoch_32() { + let (initial_balances, transactions, outputs) = create_stx_transfers_tx_and_outputs(); let test_vector = ConsensusTestVector { initial_balances, - marf_hash: "3838b1ae67f108b10ec7a7afb6c2b18e6468be2423d7183ffa2f7824b619b8be".into(), - epoch_id: StacksEpochId::Epoch30 as u32, + marf_hash: "4d5c9a6d07806ac5006137de22b083de66fff7119143dd5cd92e4a457d66e028".into(), + epoch_id: StacksEpochId::Epoch32, transactions, expected_result: ExpectedResult::Success(outputs), }; @@ -490,8 +728,19 @@ fn test_append_stx_transfers() { } #[test] -fn test_append_chainstate_error_expression_stack_depth_too_deep() { - // something just over the limit of the expression depth +fn test_append_stx_transfers_epoch_33() { + let (initial_balances, transactions, outputs) = create_stx_transfers_tx_and_outputs(); + let test_vector = ConsensusTestVector { + initial_balances, + marf_hash: "66eed8c0ab31db111a5adcc83d38a7004c6e464e3b9fb9f52ec589bc6d5f2d32".into(), + epoch_id: StacksEpochId::Epoch33, + transactions, + expected_result: ExpectedResult::Success(outputs), + }; + ConsensusTest::new(function_name!(), test_vector).run() +} + +fn create_exceeds_stacks_depth_contract_tx(sender_privk: &StacksPrivateKey) -> StacksTransaction { let exceeds_repeat_factor = AST_CALL_STACK_DEPTH_BUFFER + (MAX_CALL_STACK_DEPTH as u64); let tx_exceeds_body_start = "{ a : ".repeat(exceeds_repeat_factor as usize); let tx_exceeds_body_end = "} ".repeat(exceeds_repeat_factor as usize); @@ -499,10 +748,6 @@ fn test_append_chainstate_error_expression_stack_depth_too_deep() { let sender_privk = StacksPrivateKey::from_hex(SK_1).unwrap(); let tx_fee = (tx_exceeds_body.len() * 100) as u64; - let initial_balances = vec![( - StacksAddress::p2pkh(false, &StacksPublicKey::from_private(&sender_privk)).into(), - tx_fee, - )]; let tx_bytes = make_contract_publish( &sender_privk, 0, @@ -511,34 +756,100 @@ fn test_append_chainstate_error_expression_stack_depth_too_deep() { "test-exceeds", &tx_exceeds_body, ); - let tx = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); - let transfer_result = ExpectedTransactionOutput { - return_type: ClarityValue::Response(ResponseData { - committed: true, - data: Box::new(ClarityValue::Bool(true)), - }), - cost: ExecutionCost { - write_length: 0, - write_count: 0, - read_length: 0, - read_count: 0, - runtime: 0, - }, + + StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap() +} + +#[test] +fn test_append_chainstate_error_expression_stack_depth_too_deep_epoch_30() { + let sender_privk = StacksPrivateKey::from_hex(SK_1).unwrap(); + let tx = create_exceeds_stacks_depth_contract_tx(&sender_privk); + let initial_balances = vec![( + StacksAddress::p2pkh(false, &StacksPublicKey::from_private(&sender_privk)).into(), + tx.get_tx_fee(), + )]; + // TODO: should look into append_block. It does weird wrapping of ChainstateError variants inside ChainstateError::StacksInvalidBlock. + let e = ChainstateError::ClarityError(ClarityError::Parse(ParseError::new( + ParseErrors::ExpressionStackDepthTooDeep, + ))); + let test_vector = ConsensusTestVector { + initial_balances, + // Marf hash doesn't matter. It will fail with ExpressionStackDepthTooDeep + marf_hash: "0000000000000000000000000000000000000000000000000000000000000000".into(), + epoch_id: StacksEpochId::Epoch30, + transactions: vec![tx], + expected_result: ExpectedResult::Failure( + ChainstateError::InvalidStacksBlock(format!("Invalid Stacks block ff0796f9934d45aad71871f317061acb99dd5ef1237a8747a78624a2824f7d32: {e:?}")).to_string(), + ), }; - let outputs = ExpectedBlockOutput { - transactions: vec![transfer_result], - total_block_cost: ExecutionCost::ZERO, + ConsensusTest::new(function_name!(), test_vector).run() +} + +#[test] +fn test_append_chainstate_error_expression_stack_depth_too_deep_epoch_31() { + let sender_privk = StacksPrivateKey::from_hex(SK_1).unwrap(); + let tx = create_exceeds_stacks_depth_contract_tx(&sender_privk); + let initial_balances = vec![( + StacksAddress::p2pkh(false, &StacksPublicKey::from_private(&sender_privk)).into(), + tx.get_tx_fee(), + )]; + let e = ChainstateError::ClarityError(ClarityError::Parse(ParseError::new( + ParseErrors::ExpressionStackDepthTooDeep, + ))); + let test_vector = ConsensusTestVector { + initial_balances, + // Marf hash doesn't matter. It will fail with ExpressionStackDepthTooDeep + marf_hash: "0000000000000000000000000000000000000000000000000000000000000000".into(), + epoch_id: StacksEpochId::Epoch31, + transactions: vec![tx], + expected_result: ExpectedResult::Failure( + ChainstateError::InvalidStacksBlock(format!("Invalid Stacks block 9da03cdc774989cea30445f1453073b070430867edcecb180d1cc9a6e9738b46: {e:?}")).to_string(), + ), }; - // TODO: should look into append_block. It does weird wrapping of ChainstateError variants inside ChainstateError::StacksInvalidBlock. + ConsensusTest::new(function_name!(), test_vector).run() +} + +#[test] +fn test_append_chainstate_error_expression_stack_depth_too_deep_epoch_32() { + let sender_privk = StacksPrivateKey::from_hex(SK_1).unwrap(); + let tx = create_exceeds_stacks_depth_contract_tx(&sender_privk); + let initial_balances = vec![( + StacksAddress::p2pkh(false, &StacksPublicKey::from_private(&sender_privk)).into(), + tx.get_tx_fee(), + )]; + let e = ChainstateError::ClarityError(ClarityError::Parse(ParseError::new( + ParseErrors::ExpressionStackDepthTooDeep, + ))); + let test_vector = ConsensusTestVector { + initial_balances, + // Marf hash doesn't matter. It will fail with ExpressionStackDepthTooDeep + marf_hash: "0000000000000000000000000000000000000000000000000000000000000000".into(), + epoch_id: StacksEpochId::Epoch32, + transactions: vec![tx], + expected_result: ExpectedResult::Failure( + ChainstateError::InvalidStacksBlock(format!("Invalid Stacks block 76a6d95b3ec273a13f10080b3b18e225cc838044c5e3a3000b7ccdd8b50a5ae1: {e:?}")).to_string(), + ), + }; + ConsensusTest::new(function_name!(), test_vector).run() +} + +#[test] +fn test_append_chainstate_error_expression_stack_depth_too_deep_epoch_33() { + let sender_privk = StacksPrivateKey::from_hex(SK_1).unwrap(); + let tx = create_exceeds_stacks_depth_contract_tx(&sender_privk); + let initial_balances = vec![( + StacksAddress::p2pkh(false, &StacksPublicKey::from_private(&sender_privk)).into(), + tx.get_tx_fee(), + )]; let e = ChainstateError::ClarityError(ClarityError::Parse(ParseError::new( ParseErrors::ExpressionStackDepthTooDeep, ))); - let msg = format!("Invalid Stacks block 518dfea674b5c4874e025a31e01a522c8269005c0685d12658f0359757de6692: {e:?}"); + let msg = format!("Invalid Stacks block de3c507ab60e717275f97f267ec2608c96aaab42a7e32fc2d8129585dff9e74a: {e:?}"); let test_vector = ConsensusTestVector { initial_balances, // Marf hash doesn't matter. It will fail with ExpressionStackDepthTooDeep marf_hash: "0000000000000000000000000000000000000000000000000000000000000000".into(), - epoch_id: StacksEpochId::Epoch30 as u32, + epoch_id: StacksEpochId::Epoch33, transactions: vec![tx], expected_result: ExpectedResult::Failure( ChainstateError::InvalidStacksBlock(msg).to_string(), diff --git a/stackslib/src/chainstate/tests/mod.rs b/stackslib/src/chainstate/tests/mod.rs index c16c1201cfe..9d5e14abb5e 100644 --- a/stackslib/src/chainstate/tests/mod.rs +++ b/stackslib/src/chainstate/tests/mod.rs @@ -565,7 +565,7 @@ impl<'a> TestChainstate<'a> { self.sortdb.as_mut().unwrap() } - pub fn sortdb_ref(&mut self) -> &SortitionDB { + pub fn sortdb_ref(&self) -> &SortitionDB { self.sortdb.as_ref().unwrap() } diff --git a/stackslib/src/net/tests/mod.rs b/stackslib/src/net/tests/mod.rs index 9bbb455f35a..57859fed313 100644 --- a/stackslib/src/net/tests/mod.rs +++ b/stackslib/src/net/tests/mod.rs @@ -25,6 +25,8 @@ pub mod relay; use std::collections::{HashMap, HashSet}; use std::net::{IpAddr, Ipv4Addr, SocketAddr}; +use clarity::types::EpochList; +use clarity::vm::costs::ExecutionCost; use clarity::vm::types::{PrincipalData, QualifiedContractIdentifier}; use libstackerdb::StackerDBChunkData; use rand::Rng; @@ -101,11 +103,13 @@ pub struct NakamotoBootPlan { pub malleablized_blocks: bool, pub network_id: u32, pub txindex: bool, + pub epochs: Option>, } impl NakamotoBootPlan { pub fn new(test_name: &str) -> Self { let (test_signers, test_stackers) = TestStacker::common_signing_set(); + let pox_constants = TestPeerConfig::default().burnchain.pox_constants; Self { test_name: test_name.to_string(), pox_constants: TestPeerConfig::default().burnchain.pox_constants, @@ -119,6 +123,7 @@ impl NakamotoBootPlan { malleablized_blocks: true, network_id: TestPeerConfig::default().network_id, txindex: false, + epochs: None, } } @@ -154,6 +159,11 @@ impl NakamotoBootPlan { self } + pub fn with_epochs(mut self, epochs: EpochList) -> Self { + self.epochs = Some(epochs); + self + } + pub fn with_initial_balances(mut self, initial_balances: Vec<(PrincipalData, u64)>) -> Self { self.initial_balances = initial_balances; self @@ -367,16 +377,13 @@ impl NakamotoBootPlan { ) .unwrap(); - // reward cycles are 5 blocks long - // first 25 blocks are boot-up - // reward cycle 6 instantiates pox-3 - // we stack in reward cycle 7 so pox-3 is evaluated to find reward set participation - chainstate_config.epochs = Some(StacksEpoch::unit_test_3_0_only( + let default_epoch = StacksEpoch::unit_test_3_0_only( (self.pox_constants.pox_4_activation_height + self.pox_constants.reward_cycle_length + 1) .into(), - )); + ); + chainstate_config.epochs = Some(self.epochs.clone().unwrap_or(default_epoch)); chainstate_config.initial_balances = vec![]; if self.add_default_balance { chainstate_config From 857c0f9efb16255b96a07f0006a456887633dbfd Mon Sep 17 00:00:00 2001 From: Jacinta Ferrant Date: Tue, 30 Sep 2025 14:46:47 -0700 Subject: [PATCH 9/9] Add support for multiple blocks per epoch and chainstate reuse Signed-off-by: Jacinta Ferrant --- stackslib/src/chainstate/tests/consensus.rs | 675 ++++++++++---------- stackslib/src/chainstate/tests/mod.rs | 8 + 2 files changed, 350 insertions(+), 333 deletions(-) diff --git a/stackslib/src/chainstate/tests/consensus.rs b/stackslib/src/chainstate/tests/consensus.rs index eda5810c461..2c5731e8985 100644 --- a/stackslib/src/chainstate/tests/consensus.rs +++ b/stackslib/src/chainstate/tests/consensus.rs @@ -12,6 +12,8 @@ // // You should have received a copy of the GNU General Public License // along with this program. If not, see . +use std::collections::HashMap; + use clarity::boot_util::boot_code_addr; use clarity::codec::StacksMessageCodec; use clarity::consts::{ @@ -168,21 +170,26 @@ pub enum ExpectedResult { Failure(String), } -/// Defines a test vector for a consensus test, including chainstate setup and expected outcomes. +/// Represents a block to be appended in a test and its expected result. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] -pub struct ConsensusTestVector { - /// Initial balances for the provided PrincipalData during chainstate instantiation. - pub initial_balances: Vec<(PrincipalData, u64)>, +pub struct TestBlock { /// Hex representation of the MARF hash for block construction. pub marf_hash: String, - /// The epoch ID for the test environment. - pub epoch_id: StacksEpochId, /// Transactions to include in the block pub transactions: Vec, /// The expected result after appending the constructed block. pub expected_result: ExpectedResult, } +/// Defines a test vector for a consensus test, including chainstate setup and expected outcomes. +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub struct ConsensusTestVector { + /// Initial balances for the provided PrincipalData during chainstate instantiation. + pub initial_balances: Vec<(PrincipalData, u64)>, + /// A mapping of epoch to Blocks that should be applied in that epoch + pub epoch_blocks: HashMap>, +} + /// Tracks mismatches between actual and expected transaction results. #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct TransactionMismatch { @@ -249,7 +256,7 @@ impl ConsensusMismatch { ) -> Option { let mut mismatches = ConsensusMismatch::default(); match (append_result, expected_result) { - (Ok((epoch_receipt, _, _, _)), ExpectedResult::Success(expected)) => { + (Ok((epoch_receipt, clarity_commit, _, _)), ExpectedResult::Success(expected)) => { // Convert transaction receipts to `ExpectedTransactionOutput` for comparison. let actual_transactions: Vec<_> = epoch_receipt .tx_receipts @@ -299,6 +306,7 @@ impl ConsensusMismatch { ); } // TODO: add any additional mismatches we might care about? + clarity_commit.commit(); } (Ok(_), ExpectedResult::Failure(expected_err)) => { mismatches.error = Some(("Ok".to_string(), expected_err)); @@ -355,34 +363,41 @@ pub struct ConsensusTest<'a> { impl ConsensusTest<'_> { /// Creates a new `ConsensusTest` with the given test name and vector. pub fn new(test_name: &str, test_vector: ConsensusTestVector) -> Self { - if let ExpectedResult::Success(output) = &test_vector.expected_result { - assert_eq!( - output.transactions.len(), - test_vector.transactions.len(), - "Test vector is invalid. Must specify an expected output per input transaction" + // Validate blocks + for (epoch_id, blocks) in &test_vector.epoch_blocks { + assert!( + !matches!( + *epoch_id, + StacksEpochId::Epoch10 + | StacksEpochId::Epoch20 + | StacksEpochId::Epoch2_05 + | StacksEpochId::Epoch21 + | StacksEpochId::Epoch22 + | StacksEpochId::Epoch23 + | StacksEpochId::Epoch24 + | StacksEpochId::Epoch25 + ), + "Pre-Nakamoto Tenures are not Supported" ); + for block in blocks { + if let ExpectedResult::Success(output) = &block.expected_result { + assert_eq!( + output.transactions.len(), + block.transactions.len(), + "Test block is invalid. Must specify an expected output per input transaction" + ); + } + } } - assert!( - !matches!( - test_vector.epoch_id, - StacksEpochId::Epoch10 - | StacksEpochId::Epoch20 - | StacksEpochId::Epoch2_05 - | StacksEpochId::Epoch21 - | StacksEpochId::Epoch22 - | StacksEpochId::Epoch23 - | StacksEpochId::Epoch24 - | StacksEpochId::Epoch25 - ), - "Pre-Nakamoto Tenures are not Supported" - ); + let privk = StacksPrivateKey::from_hex( "510f96a8efd0b11e211733c1ac5e3fa6f3d3fcdd62869e376c47decb3e14fea101", ) .unwrap(); - // We don't really ever want the reward cycle to force a new signer set...so for now - // Just set the cycle length to a high value + // Set up chainstate to start at Epoch 3.0 + // We don't really ever want the reward cycle to force a new signer set... + // so for now just set the cycle length to a high value (100) let mut boot_plan = NakamotoBootPlan::new(test_name) .with_pox_constants(100, 3) .with_initial_balances(test_vector.initial_balances.clone()) @@ -393,83 +408,129 @@ impl ConsensusTest<'_> { + 1) as u64, ); boot_plan = boot_plan.with_epochs(epochs); - let mut chain = boot_plan.boot_nakamoto_chainstate(None); - let mut burn_block_height = chain.get_burn_block_height(); - let mut i = 0; - while SortitionDB::get_stacks_epoch(chain.sortdb().conn(), burn_block_height) - .unwrap() - .unwrap() - .epoch_id - < test_vector.epoch_id - { - let (burn_ops, mut tenure_change, miner_key) = - chain.begin_nakamoto_tenure(TenureChangeCause::BlockFound); - let (_, header_hash, consensus_hash) = chain.next_burnchain_block(burn_ops); - let vrf_proof = chain.make_nakamoto_vrf_proof(miner_key); + let chain = boot_plan.boot_nakamoto_chainstate(None); + + Self { chain, test_vector } + } + + /// Advances the chainstate to the specified epoch. Creating a tenure change block per burn block height + fn advance_to_epoch(&mut self, target_epoch: StacksEpochId) { + let burn_block_height = self.chain.get_burn_block_height(); + let mut current_epoch = + SortitionDB::get_stacks_epoch(self.chain.sortdb().conn(), burn_block_height) + .unwrap() + .unwrap() + .epoch_id; + assert!(current_epoch <= target_epoch, "Chainstate is already at a higher epoch than the target. Current epoch: {current_epoch}. Target epoch: {target_epoch}"); + while current_epoch < target_epoch { + let (burn_ops, mut tenure_change, miner_key) = self + .chain + .begin_nakamoto_tenure(TenureChangeCause::BlockFound); + let (_, header_hash, consensus_hash) = self.chain.next_burnchain_block(burn_ops); + let vrf_proof = self.chain.make_nakamoto_vrf_proof(miner_key); tenure_change.tenure_consensus_hash = consensus_hash.clone(); tenure_change.burn_view_consensus_hash = consensus_hash.clone(); - let tenure_change_tx = chain.miner.make_nakamoto_tenure_change(tenure_change); - let coinbase_tx = chain.miner.make_nakamoto_coinbase(None, vrf_proof); + let tenure_change_tx = self.chain.miner.make_nakamoto_tenure_change(tenure_change); + let coinbase_tx = self.chain.miner.make_nakamoto_coinbase(None, vrf_proof); let _blocks_and_sizes = - chain.make_nakamoto_tenure(tenure_change_tx, coinbase_tx, Some(0)); - i += 1; - burn_block_height = chain.get_burn_block_height(); + self.chain + .make_nakamoto_tenure(tenure_change_tx, coinbase_tx, Some(0)); + let burn_block_height = self.chain.get_burn_block_height(); + current_epoch = + SortitionDB::get_stacks_epoch(self.chain.sortdb().conn(), burn_block_height) + .unwrap() + .unwrap() + .epoch_id; } - Self { chain, test_vector } } - /// Runs the consensus test, validating the results against the expected outcome. + /// Runs the consensus test for the test vector, advancing epochs as needed. pub fn run(mut self) { - debug!("--------- Running test vector ---------"); - let (block, block_size) = self.construct_nakamoto_block(); - let mut stacks_node = self.chain.stacks_node.take().unwrap(); - let sortdb = self.chain.sortdb.take().unwrap(); - let chain_tip = - NakamotoChainState::get_canonical_block_header(stacks_node.chainstate.db(), &sortdb) + // Get sorted epochs + let mut epochs: Vec = + self.test_vector.epoch_blocks.keys().cloned().collect(); + epochs.sort(); + + for epoch in epochs { + debug!( + "--------- Processing epoch {epoch:?} with {} blocks ---------", + self.test_vector.epoch_blocks[&epoch].len() + ); + self.advance_to_epoch(epoch); + for (i, block) in self.test_vector.epoch_blocks[&epoch].iter().enumerate() { + debug!("--------- Running block {i} for epoch {epoch:?} ---------"); + let (nakamoto_block, block_size) = + self.construct_nakamoto_block(&block.marf_hash, &block.transactions); + let sortdb = self.chain.sortdb.take().unwrap(); + let chain_tip = NakamotoChainState::get_canonical_block_header( + self.chain.stacks_node().chainstate.db(), + &sortdb, + ) .unwrap() .unwrap(); - let pox_constants = PoxConstants::test_default(); + let pox_constants = PoxConstants::test_default(); - let (mut chainstate_tx, clarity_instance) = - stacks_node.chainstate.chainstate_tx_begin().unwrap(); + debug!( + "--------- Appending block {} ---------", + nakamoto_block.header.signer_signature_hash(); + "block" => ?nakamoto_block + ); + { + let (mut chainstate_tx, clarity_instance) = self + .chain + .stacks_node() + .chainstate + .chainstate_tx_begin() + .unwrap(); + + let mut burndb_conn = sortdb.index_handle_at_tip(); + + let result = NakamotoChainState::append_block( + &mut chainstate_tx, + clarity_instance, + &mut burndb_conn, + &chain_tip.consensus_hash, + &pox_constants, + &chain_tip, + &chain_tip.burn_header_hash, + chain_tip.burn_header_height, + chain_tip.burn_header_timestamp, + &nakamoto_block, + block_size.try_into().unwrap(), + nakamoto_block.header.burn_spent, + 1500, + &RewardSet::empty(), + false, + ); - let mut burndb_conn = sortdb.index_handle_at_tip(); + debug!("--------- Appended block: {} ---------", result.is_ok()); - debug!("--------- Appending block {} ---------", block.header.signer_signature_hash(); "block" => ?block); - let result = NakamotoChainState::append_block( - &mut chainstate_tx, - clarity_instance, - &mut burndb_conn, - &chain_tip.consensus_hash, - &pox_constants, - &chain_tip, - &chain_tip.burn_header_hash, - chain_tip.burn_header_height, - chain_tip.burn_header_timestamp, - &block, - block_size.try_into().unwrap(), - block.header.burn_spent, - 1500, - &RewardSet::empty(), - false, - ); + // Compare actual vs expected results. + let mismatches = + ConsensusMismatch::from_test_result(result, block.expected_result.clone()); + assert!( + mismatches.is_none(), + "Mismatches found in block {i} for epoch {epoch:?}: {}", + ConsensusMismatch::to_json_string_pretty(&mismatches) + ); + chainstate_tx.commit().unwrap(); + } - debug!("--------- Appended block: {} ---------", result.is_ok()); - // Compare actual vs expected results. - let mismatches = - ConsensusMismatch::from_test_result(result, self.test_vector.expected_result); - assert!( - mismatches.is_none(), - "Mismatches found: {}", - ConsensusMismatch::to_json_string_pretty(&mismatches) - ); + // Restore chainstate for the next block + self.chain.sortdb = Some(sortdb); + } + } } /// Constructs a Nakamoto block with the given transactions and state index root. - fn construct_nakamoto_block(&self) -> (NakamotoBlock, usize) { - let state_index_root = TrieHash::from_hex(&self.test_vector.marf_hash).unwrap(); + fn construct_nakamoto_block( + &self, + marf_hash: &str, + transactions: &[StacksTransaction], + ) -> (NakamotoBlock, usize) { + let state_index_root = TrieHash::from_hex(marf_hash).unwrap(); let chain_tip = NakamotoChainState::get_canonical_block_header( self.chain.stacks_node.as_ref().unwrap().chainstate.db(), self.chain.sortdb.as_ref().unwrap(), @@ -482,7 +543,8 @@ impl ConsensusTest<'_> { &chain_tip.consensus_hash, ) .unwrap() - .map(|sn| sn.total_burn).unwrap(); + .map(|sn| sn.total_burn) + .unwrap(); let mut block = NakamotoBlock { header: NakamotoBlockHeader { version: 1, @@ -497,7 +559,7 @@ impl ConsensusTest<'_> { signer_signature: vec![], pox_treatment: BitVec::ones(1).unwrap(), }, - txs: self.test_vector.transactions.clone(), + txs: transactions.to_vec(), }; let tx_merkle_root = { @@ -519,126 +581,117 @@ impl ConsensusTest<'_> { } #[test] -fn test_append_empty_block_epoch_30() { - let outputs = ExpectedBlockOutput { - transactions: vec![], - total_block_cost: ExecutionCost::ZERO, - }; - let test_vector = ConsensusTestVector { - initial_balances: Vec::new(), - marf_hash: "f1934080b22ef0192cfb39710690e7cb0efa9cff950832b33544bde3aa1484a5".into(), - epoch_id: StacksEpochId::Epoch30, - transactions: vec![], - expected_result: ExpectedResult::Success(outputs), - }; - ConsensusTest::new(function_name!(), test_vector).run() -} - -#[test] -fn test_append_empty_block_epoch_31() { - let outputs = ExpectedBlockOutput { +fn test_append_empty_blocks() { + let mut epoch_blocks = HashMap::new(); + let expected_result = ExpectedResult::Success(ExpectedBlockOutput { transactions: vec![], total_block_cost: ExecutionCost::ZERO, - }; - let test_vector = ConsensusTestVector { - initial_balances: Vec::new(), - marf_hash: "a05f1383613215f5789eb977e4c62dfbb789d90964e14865d109375f7f6dc3cf".into(), - epoch_id: StacksEpochId::Epoch31, - transactions: vec![], - expected_result: ExpectedResult::Success(outputs), - }; - ConsensusTest::new(function_name!(), test_vector).run() -} - -#[test] -fn test_append_empty_block_epoch_32() { - let outputs = ExpectedBlockOutput { - transactions: vec![], - total_block_cost: ExecutionCost::ZERO, - }; - let test_vector = ConsensusTestVector { - initial_balances: Vec::new(), - marf_hash: "f1934080b22ef0192cfb39710690e7cb0efa9cff950832b33544bde3aa1484a5".into(), - epoch_id: StacksEpochId::Epoch30, - transactions: vec![], - expected_result: ExpectedResult::Success(outputs), - }; - ConsensusTest::new(function_name!(), test_vector).run() -} - -#[test] -fn test_append_empty_block_epoch_33() { - let outputs = ExpectedBlockOutput { - transactions: vec![], - total_block_cost: ExecutionCost::ZERO, - }; - let test_vector = ConsensusTestVector { - initial_balances: Vec::new(), - marf_hash: "f1934080b22ef0192cfb39710690e7cb0efa9cff950832b33544bde3aa1484a5".into(), - epoch_id: StacksEpochId::Epoch30, - transactions: vec![], - expected_result: ExpectedResult::Success(outputs), - }; - ConsensusTest::new(function_name!(), test_vector).run() -} + }); + epoch_blocks.insert( + StacksEpochId::Epoch30, + vec![TestBlock { + marf_hash: "f1934080b22ef0192cfb39710690e7cb0efa9cff950832b33544bde3aa1484a5".into(), + transactions: vec![], + expected_result: expected_result.clone(), + }], + ); + epoch_blocks.insert( + StacksEpochId::Epoch31, + vec![TestBlock { + marf_hash: "a05f1383613215f5789eb977e4c62dfbb789d90964e14865d109375f7f6dc3cf".into(), + transactions: vec![], + expected_result: expected_result.clone(), + }], + ); + epoch_blocks.insert( + StacksEpochId::Epoch32, + vec![TestBlock { + marf_hash: "c17829daff8746329c65ae658f4087519c6a8bd8c7f21e51644ddbc9c010390f".into(), + transactions: vec![], + expected_result: expected_result.clone(), + }], + ); + epoch_blocks.insert( + StacksEpochId::Epoch33, + vec![TestBlock { + marf_hash: "23ecbcb91cac914ba3994a15f3ea7189bcab4e9762530cd0e6c7d237fcd6dc78".into(), + transactions: vec![], + expected_result: expected_result.clone(), + }], + ); -#[test] -fn test_append_state_index_root_mismatch_epoch_30() { let test_vector = ConsensusTestVector { initial_balances: Vec::new(), - // An invalid MARF. Will result in state root mismatch - marf_hash: "0000000000000000000000000000000000000000000000000000000000000000".into(), - epoch_id: StacksEpochId::Epoch30, - transactions: vec![], - expected_result: ExpectedResult::Failure(ChainstateError::InvalidStacksBlock("Block ef45bfa44231d9e7aff094b53cfd48df0456067312f169a499354c4273a66fe3 state root mismatch: expected 0000000000000000000000000000000000000000000000000000000000000000, got f1934080b22ef0192cfb39710690e7cb0efa9cff950832b33544bde3aa1484a5".into()).to_string()), + epoch_blocks, }; - ConsensusTest::new(function_name!(), test_vector).run() + ConsensusTest::new(function_name!(), test_vector).run(); } #[test] -fn test_append_state_index_root_mismatch_epoch_31() { - let test_vector = ConsensusTestVector { - initial_balances: Vec::new(), - // An invalid MARF. Will result in state root mismatch - marf_hash: "0000000000000000000000000000000000000000000000000000000000000000".into(), - epoch_id: StacksEpochId::Epoch31, - transactions: vec![], - expected_result: ExpectedResult::Failure(ChainstateError::InvalidStacksBlock("Block a14d0b5c8d3c49554aeb462a8fe019718195789fa1dcd642059b75e41f0ce9cc state root mismatch: expected 0000000000000000000000000000000000000000000000000000000000000000, got a05f1383613215f5789eb977e4c62dfbb789d90964e14865d109375f7f6dc3cf".into()).to_string()), - }; - ConsensusTest::new(function_name!(), test_vector).run() -} +fn test_append_state_index_root_mismatches() { + let mut epoch_blocks = HashMap::new(); + epoch_blocks.insert( + StacksEpochId::Epoch30, + vec![TestBlock { + marf_hash: "0000000000000000000000000000000000000000000000000000000000000000".into(), + transactions: vec![], + expected_result: ExpectedResult::Failure( + ChainstateError::InvalidStacksBlock( + "Block ef45bfa44231d9e7aff094b53cfd48df0456067312f169a499354c4273a66fe3 state root mismatch: expected 0000000000000000000000000000000000000000000000000000000000000000, got f1934080b22ef0192cfb39710690e7cb0efa9cff950832b33544bde3aa1484a5".into(), + ) + .to_string(), + ), + }], + ); + epoch_blocks.insert( + StacksEpochId::Epoch31, + vec![TestBlock { + marf_hash: "0000000000000000000000000000000000000000000000000000000000000000".into(), + transactions: vec![], + expected_result: ExpectedResult::Failure( + ChainstateError::InvalidStacksBlock( + "Block a14d0b5c8d3c49554aeb462a8fe019718195789fa1dcd642059b75e41f0ce9cc state root mismatch: expected 0000000000000000000000000000000000000000000000000000000000000000, got a05f1383613215f5789eb977e4c62dfbb789d90964e14865d109375f7f6dc3cf".into(), + ) + .to_string(), + ), + }], + ); + epoch_blocks.insert( + StacksEpochId::Epoch32, + vec![TestBlock { + marf_hash: "0000000000000000000000000000000000000000000000000000000000000000".into(), + transactions: vec![], + expected_result: ExpectedResult::Failure( + ChainstateError::InvalidStacksBlock( + "Block f8120b4a632ee1d49fbbde3e01289588389cd205cab459a4493a7d58d2dc18ed state root mismatch: expected 0000000000000000000000000000000000000000000000000000000000000000, got c17829daff8746329c65ae658f4087519c6a8bd8c7f21e51644ddbc9c010390f".into(), + ) + .to_string(), + ), + }], + ); + epoch_blocks.insert( + StacksEpochId::Epoch33, + vec![TestBlock { + marf_hash: "0000000000000000000000000000000000000000000000000000000000000000".into(), + transactions: vec![], + expected_result: ExpectedResult::Failure( + ChainstateError::InvalidStacksBlock( + "Block 4dcb48b684d105ff0e0ab8becddd4a2d5623cc8b168aacf9c455e20b3e610e63 state root mismatch: expected 0000000000000000000000000000000000000000000000000000000000000000, got 23ecbcb91cac914ba3994a15f3ea7189bcab4e9762530cd0e6c7d237fcd6dc78".into(), + ) + .to_string(), + ), + }], + ); -#[test] -fn test_append_state_index_root_mismatch_epoch_32() { let test_vector = ConsensusTestVector { initial_balances: Vec::new(), - // An invalid MARF. Will result in state root mismatch - marf_hash: "0000000000000000000000000000000000000000000000000000000000000000".into(), - epoch_id: StacksEpochId::Epoch32, - transactions: vec![], - expected_result: ExpectedResult::Failure(ChainstateError::InvalidStacksBlock("Block f8120b4a632ee1d49fbbde3e01289588389cd205cab459a4493a7d58d2dc18ed state root mismatch: expected 0000000000000000000000000000000000000000000000000000000000000000, got c17829daff8746329c65ae658f4087519c6a8bd8c7f21e51644ddbc9c010390f".into()).to_string()), + epoch_blocks, }; - ConsensusTest::new(function_name!(), test_vector).run() + ConsensusTest::new(function_name!(), test_vector).run(); } #[test] -fn test_append_state_index_root_mismatch_epoch_33() { - let test_vector = ConsensusTestVector { - initial_balances: Vec::new(), - // An invalid MARF. Will result in state root mismatch - marf_hash: "0000000000000000000000000000000000000000000000000000000000000000".into(), - epoch_id: StacksEpochId::Epoch33, - transactions: vec![], - expected_result: ExpectedResult::Failure(ChainstateError::InvalidStacksBlock("Block 4dcb48b684d105ff0e0ab8becddd4a2d5623cc8b168aacf9c455e20b3e610e63 state root mismatch: expected 0000000000000000000000000000000000000000000000000000000000000000, got 23ecbcb91cac914ba3994a15f3ea7189bcab4e9762530cd0e6c7d237fcd6dc78".into()).to_string()), - }; - ConsensusTest::new(function_name!(), test_vector).run() -} - -fn create_stx_transfers_tx_and_outputs() -> ( - Vec<(PrincipalData, u64)>, - Vec, - ExpectedBlockOutput, -) { +fn test_append_stx_transfers_success() { let sender_privks = [ StacksPrivateKey::from_hex(SK_1).unwrap(), StacksPrivateKey::from_hex(SK_2).unwrap(), @@ -647,13 +700,14 @@ fn create_stx_transfers_tx_and_outputs() -> ( let send_amount = 1_000; let tx_fee = 180; let mut initial_balances = Vec::new(); - let transactions = sender_privks + let transactions: Vec<_> = sender_privks .iter() .map(|sender_privk| { initial_balances.push(( StacksAddress::p2pkh(false, &StacksPublicKey::from_private(sender_privk)).into(), send_amount + tx_fee, )); + // Interestingly, it doesn't seem to care about nonce... make_stacks_transfer_tx( sender_privk, 0, @@ -685,68 +739,55 @@ fn create_stx_transfers_tx_and_outputs() -> ( ], total_block_cost: ExecutionCost::ZERO, }; - (initial_balances, transactions, outputs) -} - -#[test] -fn test_append_stx_transfers_epoch_30() { - let (initial_balances, transactions, outputs) = create_stx_transfers_tx_and_outputs(); - let test_vector = ConsensusTestVector { - initial_balances, - marf_hash: "63ea49669d2216ebc7e4f8b5e1cd2c99b8aff9806794adf87dcf709c0a244798".into(), - epoch_id: StacksEpochId::Epoch30, - transactions, - expected_result: ExpectedResult::Success(outputs), - }; - ConsensusTest::new(function_name!(), test_vector).run() -} - -#[test] -fn test_append_stx_transfers_epoch_31() { - let (initial_balances, transactions, outputs) = create_stx_transfers_tx_and_outputs(); - let test_vector = ConsensusTestVector { - initial_balances, - marf_hash: "7fc538e605a4a353871c4a655ae850fe9a70c3875b65f2bb42ea3bef5effed2c".into(), - epoch_id: StacksEpochId::Epoch31, - transactions, - expected_result: ExpectedResult::Success(outputs), - }; - ConsensusTest::new(function_name!(), test_vector).run() -} + let mut epoch_blocks = HashMap::new(); + epoch_blocks.insert( + StacksEpochId::Epoch30, + vec![TestBlock { + marf_hash: "63ea49669d2216ebc7e4f8b5e1cd2c99b8aff9806794adf87dcf709c0a244798".into(), + transactions: transactions.clone(), + expected_result: ExpectedResult::Success(outputs.clone()), + }], + ); + epoch_blocks.insert( + StacksEpochId::Epoch31, + vec![TestBlock { + marf_hash: "7fc538e605a4a353871c4a655ae850fe9a70c3875b65f2bb42ea3bef5effed2c".into(), + transactions: transactions.clone(), + expected_result: ExpectedResult::Success(outputs.clone()), + }], + ); + epoch_blocks.insert( + StacksEpochId::Epoch32, + vec![TestBlock { + marf_hash: "4d5c9a6d07806ac5006137de22b083de66fff7119143dd5cd92e4a457d66e028".into(), + transactions: transactions.clone(), + expected_result: ExpectedResult::Success(outputs.clone()), + }], + ); + epoch_blocks.insert( + StacksEpochId::Epoch33, + vec![TestBlock { + marf_hash: "66eed8c0ab31db111a5adcc83d38a7004c6e464e3b9fb9f52ec589bc6d5f2d32".into(), + transactions: transactions.clone(), + expected_result: ExpectedResult::Success(outputs.clone()), + }], + ); -#[test] -fn test_append_stx_transfers_epoch_32() { - let (initial_balances, transactions, outputs) = create_stx_transfers_tx_and_outputs(); let test_vector = ConsensusTestVector { initial_balances, - marf_hash: "4d5c9a6d07806ac5006137de22b083de66fff7119143dd5cd92e4a457d66e028".into(), - epoch_id: StacksEpochId::Epoch32, - transactions, - expected_result: ExpectedResult::Success(outputs), + epoch_blocks, }; - ConsensusTest::new(function_name!(), test_vector).run() + ConsensusTest::new(function_name!(), test_vector).run(); } #[test] -fn test_append_stx_transfers_epoch_33() { - let (initial_balances, transactions, outputs) = create_stx_transfers_tx_and_outputs(); - let test_vector = ConsensusTestVector { - initial_balances, - marf_hash: "66eed8c0ab31db111a5adcc83d38a7004c6e464e3b9fb9f52ec589bc6d5f2d32".into(), - epoch_id: StacksEpochId::Epoch33, - transactions, - expected_result: ExpectedResult::Success(outputs), - }; - ConsensusTest::new(function_name!(), test_vector).run() -} - -fn create_exceeds_stacks_depth_contract_tx(sender_privk: &StacksPrivateKey) -> StacksTransaction { +fn test_append_chainstate_error_expression_stack_depth_too_deep() { + let sender_privk = StacksPrivateKey::from_hex(SK_1).unwrap(); let exceeds_repeat_factor = AST_CALL_STACK_DEPTH_BUFFER + (MAX_CALL_STACK_DEPTH as u64); let tx_exceeds_body_start = "{ a : ".repeat(exceeds_repeat_factor as usize); let tx_exceeds_body_end = "} ".repeat(exceeds_repeat_factor as usize); let tx_exceeds_body = format!("{tx_exceeds_body_start}u1 {tx_exceeds_body_end}"); - let sender_privk = StacksPrivateKey::from_hex(SK_1).unwrap(); let tx_fee = (tx_exceeds_body.len() * 100) as u64; let tx_bytes = make_contract_publish( &sender_privk, @@ -757,103 +798,71 @@ fn create_exceeds_stacks_depth_contract_tx(sender_privk: &StacksPrivateKey) -> S &tx_exceeds_body, ); - StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap() -} - -#[test] -fn test_append_chainstate_error_expression_stack_depth_too_deep_epoch_30() { - let sender_privk = StacksPrivateKey::from_hex(SK_1).unwrap(); - let tx = create_exceeds_stacks_depth_contract_tx(&sender_privk); + let tx = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); let initial_balances = vec![( StacksAddress::p2pkh(false, &StacksPublicKey::from_private(&sender_privk)).into(), - tx.get_tx_fee(), - )]; - // TODO: should look into append_block. It does weird wrapping of ChainstateError variants inside ChainstateError::StacksInvalidBlock. - let e = ChainstateError::ClarityError(ClarityError::Parse(ParseError::new( - ParseErrors::ExpressionStackDepthTooDeep, - ))); - let test_vector = ConsensusTestVector { - initial_balances, - // Marf hash doesn't matter. It will fail with ExpressionStackDepthTooDeep - marf_hash: "0000000000000000000000000000000000000000000000000000000000000000".into(), - epoch_id: StacksEpochId::Epoch30, - transactions: vec![tx], - expected_result: ExpectedResult::Failure( - ChainstateError::InvalidStacksBlock(format!("Invalid Stacks block ff0796f9934d45aad71871f317061acb99dd5ef1237a8747a78624a2824f7d32: {e:?}")).to_string(), - ), - }; - ConsensusTest::new(function_name!(), test_vector).run() -} - -#[test] -fn test_append_chainstate_error_expression_stack_depth_too_deep_epoch_31() { - let sender_privk = StacksPrivateKey::from_hex(SK_1).unwrap(); - let tx = create_exceeds_stacks_depth_contract_tx(&sender_privk); - let initial_balances = vec![( - StacksAddress::p2pkh(false, &StacksPublicKey::from_private(&sender_privk)).into(), - tx.get_tx_fee(), - )]; - let e = ChainstateError::ClarityError(ClarityError::Parse(ParseError::new( - ParseErrors::ExpressionStackDepthTooDeep, - ))); - let test_vector = ConsensusTestVector { - initial_balances, - // Marf hash doesn't matter. It will fail with ExpressionStackDepthTooDeep - marf_hash: "0000000000000000000000000000000000000000000000000000000000000000".into(), - epoch_id: StacksEpochId::Epoch31, - transactions: vec![tx], - expected_result: ExpectedResult::Failure( - ChainstateError::InvalidStacksBlock(format!("Invalid Stacks block 9da03cdc774989cea30445f1453073b070430867edcecb180d1cc9a6e9738b46: {e:?}")).to_string(), - ), - }; - ConsensusTest::new(function_name!(), test_vector).run() -} - -#[test] -fn test_append_chainstate_error_expression_stack_depth_too_deep_epoch_32() { - let sender_privk = StacksPrivateKey::from_hex(SK_1).unwrap(); - let tx = create_exceeds_stacks_depth_contract_tx(&sender_privk); - let initial_balances = vec![( - StacksAddress::p2pkh(false, &StacksPublicKey::from_private(&sender_privk)).into(), - tx.get_tx_fee(), + tx_fee, )]; let e = ChainstateError::ClarityError(ClarityError::Parse(ParseError::new( ParseErrors::ExpressionStackDepthTooDeep, ))); - let test_vector = ConsensusTestVector { - initial_balances, - // Marf hash doesn't matter. It will fail with ExpressionStackDepthTooDeep - marf_hash: "0000000000000000000000000000000000000000000000000000000000000000".into(), - epoch_id: StacksEpochId::Epoch32, - transactions: vec![tx], - expected_result: ExpectedResult::Failure( - ChainstateError::InvalidStacksBlock(format!("Invalid Stacks block 76a6d95b3ec273a13f10080b3b18e225cc838044c5e3a3000b7ccdd8b50a5ae1: {e:?}")).to_string(), - ), - }; - ConsensusTest::new(function_name!(), test_vector).run() -} + let mut epoch_blocks = HashMap::new(); + epoch_blocks.insert( + StacksEpochId::Epoch30, + vec![TestBlock { + marf_hash: "0000000000000000000000000000000000000000000000000000000000000000".into(), + transactions: vec![tx.clone()], + expected_result: ExpectedResult::Failure( + ChainstateError::InvalidStacksBlock(format!( + "Invalid Stacks block ff0796f9934d45aad71871f317061acb99dd5ef1237a8747a78624a2824f7d32: {e:?}" + )) + .to_string(), + ), + }], + ); + epoch_blocks.insert( + StacksEpochId::Epoch31, + vec![TestBlock { + marf_hash: "0000000000000000000000000000000000000000000000000000000000000000".into(), + transactions: vec![tx.clone()], + expected_result: ExpectedResult::Failure( + ChainstateError::InvalidStacksBlock(format!( + "Invalid Stacks block 9da03cdc774989cea30445f1453073b070430867edcecb180d1cc9a6e9738b46: {e:?}" + )) + .to_string(), + ), + }], + ); + epoch_blocks.insert( + StacksEpochId::Epoch32, + vec![TestBlock { + marf_hash: "0000000000000000000000000000000000000000000000000000000000000000".into(), + transactions: vec![tx.clone()], + expected_result: ExpectedResult::Failure( + ChainstateError::InvalidStacksBlock(format!( + "Invalid Stacks block 76a6d95b3ec273a13f10080b3b18e225cc838044c5e3a3000b7ccdd8b50a5ae1: {e:?}" + )) + .to_string(), + ), + }], + ); + epoch_blocks.insert( + StacksEpochId::Epoch33, + vec![TestBlock { + marf_hash: "0000000000000000000000000000000000000000000000000000000000000000".into(), + transactions: vec![tx.clone()], + expected_result: ExpectedResult::Failure( + ChainstateError::InvalidStacksBlock(format!( + "Invalid Stacks block de3c507ab60e717275f97f267ec2608c96aaab42a7e32fc2d8129585dff9e74a: {e:?}" + )) + .to_string(), + ), + }], + ); -#[test] -fn test_append_chainstate_error_expression_stack_depth_too_deep_epoch_33() { - let sender_privk = StacksPrivateKey::from_hex(SK_1).unwrap(); - let tx = create_exceeds_stacks_depth_contract_tx(&sender_privk); - let initial_balances = vec![( - StacksAddress::p2pkh(false, &StacksPublicKey::from_private(&sender_privk)).into(), - tx.get_tx_fee(), - )]; - let e = ChainstateError::ClarityError(ClarityError::Parse(ParseError::new( - ParseErrors::ExpressionStackDepthTooDeep, - ))); - let msg = format!("Invalid Stacks block de3c507ab60e717275f97f267ec2608c96aaab42a7e32fc2d8129585dff9e74a: {e:?}"); let test_vector = ConsensusTestVector { initial_balances, - // Marf hash doesn't matter. It will fail with ExpressionStackDepthTooDeep - marf_hash: "0000000000000000000000000000000000000000000000000000000000000000".into(), - epoch_id: StacksEpochId::Epoch33, - transactions: vec![tx], - expected_result: ExpectedResult::Failure( - ChainstateError::InvalidStacksBlock(msg).to_string(), - ), + epoch_blocks, }; - ConsensusTest::new(function_name!(), test_vector).run() + ConsensusTest::new(function_name!(), test_vector).run(); } diff --git a/stackslib/src/chainstate/tests/mod.rs b/stackslib/src/chainstate/tests/mod.rs index 9d5e14abb5e..3d4037aeaf9 100644 --- a/stackslib/src/chainstate/tests/mod.rs +++ b/stackslib/src/chainstate/tests/mod.rs @@ -569,6 +569,14 @@ impl<'a> TestChainstate<'a> { self.sortdb.as_ref().unwrap() } + pub fn stacks_node(&mut self) -> &mut TestStacksNode { + self.stacks_node.as_mut().unwrap() + } + + pub fn stacks_node_ref(&self) -> &TestStacksNode { + self.stacks_node.as_ref().unwrap() + } + /// Make a tenure with the given transactions. Creates a coinbase tx with the given nonce, and then increments /// the provided reference. pub fn tenure_with_txs(