diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index 901b9fc040..27e76a646d 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -98,6 +98,7 @@ jobs: - tests::signer::v0::miner_forking - tests::signer::v0::reloads_signer_set_in - tests::signer::v0::signers_broadcast_signed_blocks + - tests::signer::v0::min_gap_between_blocks - tests::nakamoto_integrations::stack_stx_burn_op_integration_test - tests::nakamoto_integrations::check_block_heights - tests::nakamoto_integrations::clarity_burn_state diff --git a/.github/workflows/pr-differences-mutants.yml b/.github/workflows/pr-differences-mutants.yml index fc4a725687..d53e2ca661 100644 --- a/.github/workflows/pr-differences-mutants.yml +++ b/.github/workflows/pr-differences-mutants.yml @@ -9,6 +9,7 @@ on: - ready_for_review paths: - '**.rs' + workflow_dispatch: concurrency: group: pr-differences-${{ github.head_ref || github.ref || github.run_id }} @@ -16,9 +17,26 @@ concurrency: cancel-in-progress: true jobs: + check-access-permissions: + name: Check Access Permissions + runs-on: ubuntu-latest + + steps: + - name: Check Access Permissions To Trigger This + id: check_access_permissions + uses: stacks-network/actions/team-membership@main + with: + username: ${{ github.actor }} + team: 'blockchain-team' + GITHUB_TOKEN: ${{ secrets.GH_TOKEN }} + + outputs: + ignore_timeout: ${{ steps.check_access_permissions.outputs.is_team_member == 'true' && github.event_name == 'workflow_dispatch' }} + # Check and output whether to run big (`stacks-node`/`stackslib`) or small (others) packages with or without shards check-big-packages-and-shards: name: Check Packages and Shards + needs: check-access-permissions runs-on: ubuntu-latest @@ -30,10 +48,13 @@ jobs: run_small_packages: ${{ steps.check_packages_and_shards.outputs.run_small_packages }} small_packages_with_shards: ${{ steps.check_packages_and_shards.outputs.small_packages_with_shards }} run_stacks_signer: ${{ steps.check_packages_and_shards.outputs.run_stacks_signer }} + too_many_mutants: ${{ steps.check_packages_and_shards.outputs.too_many_mutants }} steps: - id: check_packages_and_shards uses: stacks-network/actions/stacks-core/mutation-testing/check-packages-and-shards@main + with: + ignore_timeout: ${{ needs.check-access-permissions.outputs.ignore_timeout }} # Mutation testing - Execute on PR on small packages that have functions modified (normal run, no shards) pr-differences-mutants-small-normal: @@ -220,3 +241,4 @@ jobs: small_packages: ${{ needs.check-big-packages-and-shards.outputs.run_small_packages }} shards_for_small_packages: ${{ needs.check-big-packages-and-shards.outputs.small_packages_with_shards }} stacks_signer: ${{ needs.check-big-packages-and-shards.outputs.run_stacks_signer }} + too_many_mutants: ${{ needs.check-big-packages-and-shards.outputs.too_many_mutants }} diff --git a/Cargo.lock b/Cargo.lock index 617b5615a6..bfc196fdd0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3990,10 +3990,12 @@ dependencies = [ "stackslib", "stx-genesis", "tikv-jemallocator", + "tiny_http", "tokio", "toml 0.5.11", "tracing", "tracing-subscriber", + "url", "warp", "wsts", ] diff --git a/stacks-common/src/types/chainstate.rs b/stacks-common/src/types/chainstate.rs index 939156b189..4afbb2a5fc 100644 --- a/stacks-common/src/types/chainstate.rs +++ b/stacks-common/src/types/chainstate.rs @@ -28,11 +28,11 @@ impl_byte_array_serde!(TrieHash); pub const TRIEHASH_ENCODED_SIZE: usize = 32; +#[derive(Serialize, Deserialize)] pub struct BurnchainHeaderHash(pub [u8; 32]); impl_array_newtype!(BurnchainHeaderHash, u8, 32); impl_array_hexstring_fmt!(BurnchainHeaderHash); impl_byte_array_newtype!(BurnchainHeaderHash, u8, 32); -impl_byte_array_serde!(BurnchainHeaderHash); pub struct BlockHeaderHash(pub [u8; 32]); impl_array_newtype!(BlockHeaderHash, u8, 32); diff --git a/stacks-common/src/types/mod.rs b/stacks-common/src/types/mod.rs index c995359459..23f2b006db 100644 --- a/stacks-common/src/types/mod.rs +++ b/stacks-common/src/types/mod.rs @@ -82,6 +82,7 @@ pub enum StacksEpochId { Epoch30 = 0x03000, } +#[derive(Debug)] pub enum MempoolCollectionBehavior { ByStacksHeight, ByReceiveTime, diff --git a/stackslib/src/burnchains/burnchain.rs b/stackslib/src/burnchains/burnchain.rs index 4002c253ae..a5ecaa0458 100644 --- a/stackslib/src/burnchains/burnchain.rs +++ b/stackslib/src/burnchains/burnchain.rs @@ -549,47 +549,43 @@ impl Burnchain { .expect("Overflowed u64 in calculating expected sunset_burn") } + /// Is this the first block to receive rewards in its cycle? + /// This is the mod 1 block. Note: in nakamoto, the signer set for cycle N signs + /// the mod 0 block. pub fn is_reward_cycle_start(&self, burn_height: u64) -> bool { self.pox_constants .is_reward_cycle_start(self.first_block_height, burn_height) } + /// Is this the first block to be signed by the signer set in cycle N? + /// This is the mod 0 block. + pub fn is_naka_signing_cycle_start(&self, burn_height: u64) -> bool { + self.pox_constants + .is_naka_signing_cycle_start(self.first_block_height, burn_height) + } + + /// return the first burn block which receives reward in `reward_cycle`. + /// this is the modulo 1 block pub fn reward_cycle_to_block_height(&self, reward_cycle: u64) -> u64 { self.pox_constants .reward_cycle_to_block_height(self.first_block_height, reward_cycle) } - /// Compute the reward cycle ID of the PoX reward set which is active as of this burn_height. - /// The reward set is calculated at reward cycle index 1, so if this block height is at or after - /// reward cycle index 1, then this behaves like `block_height_to_reward_cycle()`. However, - /// if it's reward cycle index is 0, then it belongs to the previous reward cycle. - pub fn pox_reward_cycle(&self, block_height: u64) -> Option { - let cycle = self.block_height_to_reward_cycle(block_height)?; - let effective_height = block_height.checked_sub(self.first_block_height)?; - if effective_height % u64::from(self.pox_constants.reward_cycle_length) == 0 { - Some(cycle.saturating_sub(1)) - } else { - Some(cycle) - } + /// the first burn block that must be *signed* by the signer set of `reward_cycle`. + /// this is the modulo 0 block + pub fn nakamoto_first_block_of_cycle(&self, reward_cycle: u64) -> u64 { + self.pox_constants + .nakamoto_first_block_of_cycle(self.first_block_height, reward_cycle) } + /// What is the reward cycle for this block height? + /// This considers the modulo 0 block to be in reward cycle `n`, even though + /// rewards for cycle `n` do not begin until modulo 1. pub fn block_height_to_reward_cycle(&self, block_height: u64) -> Option { self.pox_constants .block_height_to_reward_cycle(self.first_block_height, block_height) } - pub fn static_block_height_to_reward_cycle( - block_height: u64, - first_block_height: u64, - reward_cycle_length: u64, - ) -> Option { - PoxConstants::static_block_height_to_reward_cycle( - block_height, - first_block_height, - reward_cycle_length, - ) - } - /// Is this block either the first block in a reward cycle or /// right before the reward phase starts? This is the mod 0 or mod 1 /// block. Reward cycle start events (like auto-unlocks) process *after* @@ -607,27 +603,19 @@ impl Burnchain { (effective_height % reward_cycle_length) <= 1 } - pub fn static_is_in_prepare_phase( - first_block_height: u64, - reward_cycle_length: u64, - prepare_length: u64, - block_height: u64, - ) -> bool { - PoxConstants::static_is_in_prepare_phase( - first_block_height, - reward_cycle_length, - prepare_length, - block_height, - ) + /// Does this block include reward slots? + /// This is either in the last prepare_phase_length blocks of the cycle + /// or the modulo 0 block + pub fn is_in_prepare_phase(&self, block_height: u64) -> bool { + self.pox_constants + .is_in_prepare_phase(self.first_block_height, block_height) } - pub fn is_in_prepare_phase(&self, block_height: u64) -> bool { - Self::static_is_in_prepare_phase( - self.first_block_height, - self.pox_constants.reward_cycle_length as u64, - self.pox_constants.prepare_length.into(), - block_height, - ) + /// The prepare phase is the last prepare_phase_length blocks of the cycle + /// This cannot include the 0 block for nakamoto + pub fn is_in_naka_prepare_phase(&self, block_height: u64) -> bool { + self.pox_constants + .is_in_naka_prepare_phase(self.first_block_height, block_height) } pub fn regtest(working_dir: &str) -> Burnchain { diff --git a/stackslib/src/burnchains/mod.rs b/stackslib/src/burnchains/mod.rs index 30cd9f81ee..0bc68897cb 100644 --- a/stackslib/src/burnchains/mod.rs +++ b/stackslib/src/burnchains/mod.rs @@ -517,7 +517,7 @@ impl PoxConstants { } } - /// What's the first block in the prepare phase + /// The first block of the prepare phase during `reward_cycle`. This is the prepare phase _for the next cycle_. pub fn prepare_phase_start(&self, first_block_height: u64, reward_cycle: u64) -> u64 { let reward_cycle_start = self.reward_cycle_to_block_height(first_block_height, reward_cycle); @@ -526,18 +526,37 @@ impl PoxConstants { prepare_phase_start } + /// Is this the first block to receive rewards in its cycle? + /// This is the mod 1 block. Note: in nakamoto, the signer set for cycle N signs + /// the mod 0 block. pub fn is_reward_cycle_start(&self, first_block_height: u64, burn_height: u64) -> bool { let effective_height = burn_height - first_block_height; // first block of the new reward cycle (effective_height % u64::from(self.reward_cycle_length)) == 1 } + /// Is this the first block to be signed by the signer set in cycle N? + /// This is the mod 0 block. + pub fn is_naka_signing_cycle_start(&self, first_block_height: u64, burn_height: u64) -> bool { + let effective_height = burn_height - first_block_height; + // first block of the new reward cycle + (effective_height % u64::from(self.reward_cycle_length)) == 0 + } + + /// return the first burn block which receives reward in `reward_cycle`. + /// this is the modulo 1 block pub fn reward_cycle_to_block_height(&self, first_block_height: u64, reward_cycle: u64) -> u64 { // NOTE: the `+ 1` is because the height of the first block of a reward cycle is mod 1, not // mod 0. first_block_height + reward_cycle * u64::from(self.reward_cycle_length) + 1 } + /// the first burn block that must be *signed* by the signer set of `reward_cycle`. + /// this is the modulo 0 block + pub fn nakamoto_first_block_of_cycle(&self, first_block_height: u64, reward_cycle: u64) -> u64 { + first_block_height + reward_cycle * u64::from(self.reward_cycle_length) + } + pub fn reward_cycle_index(&self, first_block_height: u64, burn_height: u64) -> Option { let effective_height = burn_height.checked_sub(first_block_height)?; Some(effective_height % u64::from(self.reward_cycle_length)) @@ -609,6 +628,35 @@ impl PoxConstants { } } + /// The prepare phase is the last prepare_phase_length blocks of the cycle + /// This cannot include the 0 block for nakamoto + pub fn is_in_naka_prepare_phase(&self, first_block_height: u64, block_height: u64) -> bool { + Self::static_is_in_naka_prepare_phase( + first_block_height, + u64::from(self.reward_cycle_length), + u64::from(self.prepare_length), + block_height, + ) + } + + /// The prepare phase is the last prepare_phase_length blocks of the cycle + /// This cannot include the 0 block for nakamoto + pub fn static_is_in_naka_prepare_phase( + first_block_height: u64, + reward_cycle_length: u64, + prepare_length: u64, + block_height: u64, + ) -> bool { + if block_height <= first_block_height { + // not a reward cycle start if we're the first block after genesis. + false + } else { + let effective_height = block_height - first_block_height; + let reward_index = effective_height % reward_cycle_length; + reward_index > u64::from(reward_cycle_length - prepare_length) + } + } + /// Returns the active reward cycle at the given burn block height /// * `first_block_ht` - the first burn block height that the Stacks network monitored /// * `reward_cycle_len` - the length of each reward cycle in the network. diff --git a/stackslib/src/chainstate/burn/db/sortdb.rs b/stackslib/src/chainstate/burn/db/sortdb.rs index 14d71cd646..ebb6e87be2 100644 --- a/stackslib/src/chainstate/burn/db/sortdb.rs +++ b/stackslib/src/chainstate/burn/db/sortdb.rs @@ -3583,42 +3583,6 @@ impl SortitionDB { Ok(()) } - /// Get the prepare phase end sortition ID of a reward cycle. This is the last prepare - /// phase sortition for the prepare phase that began this reward cycle (i.e. the returned - /// sortition will be in the preceding reward cycle) - /// Wrapper around SortitionDBConn::get_prepare_phase_end_sortition_id_for_reward_ccyle() - pub fn get_prepare_phase_end_sortition_id_for_reward_cycle( - &self, - tip: &SortitionId, - reward_cycle_id: u64, - ) -> Result { - self.index_conn() - .get_prepare_phase_end_sortition_id_for_reward_cycle( - &self.pox_constants, - self.first_block_height, - tip, - reward_cycle_id, - ) - } - - /// Get the prepare phase start sortition ID of a reward cycle. This is the first prepare - /// phase sortition for the prepare phase that began this reward cycle (i.e. the returned - /// sortition will be in the preceding reward cycle) - /// Wrapper around SortitionDBConn::get_prepare_phase_start_sortition_id_for_reward_cycle(). - pub fn get_prepare_phase_start_sortition_id_for_reward_cycle( - &self, - tip: &SortitionId, - reward_cycle_id: u64, - ) -> Result { - self.index_conn() - .get_prepare_phase_start_sortition_id_for_reward_cycle( - &self.pox_constants, - self.first_block_height, - tip, - reward_cycle_id, - ) - } - /// Figure out the reward cycle for `tip` and lookup the preprocessed /// reward set (if it exists) for the active reward cycle during `tip`. /// Returns the reward cycle info on success. @@ -3933,33 +3897,6 @@ impl<'a> SortitionDBConn<'a> { .and_then(|(reward_cycle_info, _anchor_sortition_id)| Ok(reward_cycle_info)) } - /// Get the prepare phase end sortition ID of a reward cycle. This is the last prepare - /// phase sortition for the prepare phase that began this reward cycle (i.e. the returned - /// sortition will be in the preceding reward cycle) - pub fn get_prepare_phase_end_sortition_id_for_reward_cycle( - &self, - pox_constants: &PoxConstants, - first_block_height: u64, - tip: &SortitionId, - reward_cycle_id: u64, - ) -> Result { - let prepare_phase_end = pox_constants - .reward_cycle_to_block_height(first_block_height, reward_cycle_id) - .saturating_sub(1); - - let last_sortition = - get_ancestor_sort_id(self, prepare_phase_end, tip)?.ok_or_else(|| { - error!( - "Could not find prepare phase end ancestor while fetching reward set"; - "tip_sortition_id" => %tip, - "reward_cycle_id" => reward_cycle_id, - "prepare_phase_end_height" => prepare_phase_end - ); - db_error::NotFoundError - })?; - Ok(last_sortition) - } - /// Get the prepare phase start sortition ID of a reward cycle. This is the first prepare /// phase sortition for the prepare phase that began this reward cycle (i.e. the returned /// sortition will be in the preceding reward cycle) @@ -3970,9 +3907,11 @@ impl<'a> SortitionDBConn<'a> { tip: &SortitionId, reward_cycle_id: u64, ) -> Result { - let prepare_phase_start = pox_constants - .reward_cycle_to_block_height(first_block_height, reward_cycle_id) - .saturating_sub(pox_constants.prepare_length.into()); + let reward_cycle_of_prepare_phase = reward_cycle_id + .checked_sub(1) + .ok_or_else(|| db_error::Other("No prepare phase exists for cycle 0".into()))?; + let prepare_phase_start = + pox_constants.prepare_phase_start(first_block_height, reward_cycle_of_prepare_phase); let first_sortition = get_ancestor_sort_id(self, prepare_phase_start, tip)?.ok_or_else(|| { @@ -5944,10 +5883,10 @@ impl<'a> SortitionHandleTx<'a> { /// Get the expected number of PoX payouts per output fn get_num_pox_payouts(&self, burn_block_height: u64) -> usize { - let op_num_outputs = if Burnchain::static_is_in_prepare_phase( + let op_num_outputs = if PoxConstants::static_is_in_prepare_phase( self.context.first_block_height, - self.context.pox_constants.reward_cycle_length as u64, - self.context.pox_constants.prepare_length.into(), + u64::from(self.context.pox_constants.reward_cycle_length), + u64::from(self.context.pox_constants.prepare_length), burn_block_height, ) { 1 @@ -6172,7 +6111,7 @@ impl<'a> SortitionHandleTx<'a> { } // if there are qualifying auto-unlocks, record them if !reward_set.start_cycle_state.is_empty() { - let cycle_number = Burnchain::static_block_height_to_reward_cycle( + let cycle_number = PoxConstants::static_block_height_to_reward_cycle( snapshot.block_height, self.context.first_block_height, self.context.pox_constants.reward_cycle_length.into(), diff --git a/stackslib/src/chainstate/coordinator/mod.rs b/stackslib/src/chainstate/coordinator/mod.rs index d3b6fd5f3e..72e44f981c 100644 --- a/stackslib/src/chainstate/coordinator/mod.rs +++ b/stackslib/src/chainstate/coordinator/mod.rs @@ -297,9 +297,8 @@ pub trait RewardSetProvider { fn get_reward_set_nakamoto( &self, - cycle_start_burn_height: u64, chainstate: &mut StacksChainState, - burnchain: &Burnchain, + cycle: u64, sortdb: &SortitionDB, block_id: &StacksBlockId, ) -> Result; @@ -372,20 +371,12 @@ impl<'a, T: BlockEventDispatcher> RewardSetProvider for OnChainRewardSetProvider fn get_reward_set_nakamoto( &self, - cycle_start_burn_height: u64, chainstate: &mut StacksChainState, - burnchain: &Burnchain, + reward_cycle: u64, sortdb: &SortitionDB, block_id: &StacksBlockId, ) -> Result { - self.read_reward_set_nakamoto( - cycle_start_burn_height, - chainstate, - burnchain, - sortdb, - block_id, - false, - ) + self.read_reward_set_nakamoto(chainstate, reward_cycle, sortdb, block_id, false) } } diff --git a/stackslib/src/chainstate/coordinator/tests.rs b/stackslib/src/chainstate/coordinator/tests.rs index 7bd06aaaea..50127af176 100644 --- a/stackslib/src/chainstate/coordinator/tests.rs +++ b/stackslib/src/chainstate/coordinator/tests.rs @@ -520,9 +520,8 @@ impl RewardSetProvider for StubbedRewardSetProvider { fn get_reward_set_nakamoto( &self, - cycle_start_burn_height: u64, chainstate: &mut StacksChainState, - burnchain: &Burnchain, + cycle: u64, sortdb: &SortitionDB, block_id: &StacksBlockId, ) -> Result { diff --git a/stackslib/src/chainstate/nakamoto/coordinator/mod.rs b/stackslib/src/chainstate/nakamoto/coordinator/mod.rs index 15cc7f0852..058025ee1c 100644 --- a/stackslib/src/chainstate/nakamoto/coordinator/mod.rs +++ b/stackslib/src/chainstate/nakamoto/coordinator/mod.rs @@ -29,7 +29,7 @@ use stacks_common::types::chainstate::{ use stacks_common::types::{StacksEpoch, StacksEpochId}; use crate::burnchains::db::{BurnchainBlockData, BurnchainDB, BurnchainHeaderReader}; -use crate::burnchains::{Burnchain, BurnchainBlockHeader}; +use crate::burnchains::{self, burnchain, Burnchain, BurnchainBlockHeader}; use crate::chainstate::burn::db::sortdb::{ get_ancestor_sort_id, SortitionDB, SortitionHandle, SortitionHandleConn, }; @@ -88,16 +88,12 @@ impl<'a, T: BlockEventDispatcher> OnChainRewardSetProvider<'a, T> { /// RPC endpoints to expose this without flooding loggers. pub fn read_reward_set_nakamoto( &self, - cycle_start_burn_height: u64, chainstate: &mut StacksChainState, - burnchain: &Burnchain, + cycle: u64, sortdb: &SortitionDB, block_id: &StacksBlockId, debug_log: bool, ) -> Result { - let cycle = burnchain - .block_height_to_reward_cycle(cycle_start_burn_height) - .expect("FATAL: no reward cycle for burn height"); self.read_reward_set_nakamoto_of_cycle(cycle, chainstate, sortdb, block_id, debug_log) } @@ -192,7 +188,7 @@ impl<'a, T: BlockEventDispatcher> OnChainRewardSetProvider<'a, T> { debug_log: bool, ) -> Result { let Some(reward_set_block) = NakamotoChainState::get_header_by_coinbase_height( - &mut chainstate.index_tx_begin(), + &mut chainstate.index_conn(), block_id, coinbase_height_of_calculation, )? @@ -280,9 +276,10 @@ fn find_prepare_phase_sortitions( } /// Try to get the reward cycle information for a Nakamoto reward cycle, identified by the -/// burn_height. The reward cycle info returned will be from the reward cycle that is active as of -/// `burn_height`. `sortition_tip` can be any sortition ID that's at a higher height than -/// `burn_height`. +/// `reward_cycle` number. +/// +/// `sortition_tip` can be any sortition ID that's at a higher height than +/// `reward_cycle`'s start height (the 0 block). /// /// In Nakamoto, the PoX anchor block for reward cycle _R_ is the _first_ Stacks block mined in the /// _last_ tenure of _R - 1_'s reward phase (i.e. which takes place toward the end of reward cycle). @@ -297,14 +294,16 @@ fn find_prepare_phase_sortitions( /// Returns Ok(None) if we're still waiting for the PoX anchor block sortition /// Returns Err(Error::NotInPreparePhase) if `burn_height` is not in the prepare phase pub fn get_nakamoto_reward_cycle_info( - burn_height: u64, sortition_tip: &SortitionId, + reward_cycle: u64, burnchain: &Burnchain, chain_state: &mut StacksChainState, stacks_tip: &StacksBlockId, sort_db: &mut SortitionDB, provider: &U, ) -> Result, Error> { + let burn_height = burnchain.nakamoto_first_block_of_cycle(reward_cycle); + let epoch_at_height = SortitionDB::get_stacks_epoch(sort_db.conn(), burn_height)? .unwrap_or_else(|| panic!("FATAL: no epoch defined for burn height {}", burn_height)) .epoch_id; @@ -314,14 +313,8 @@ pub fn get_nakamoto_reward_cycle_info( "FATAL: called a nakamoto function outside of epoch 3" ); - // calculating the reward set for the current reward cycle - let reward_cycle = burnchain - .pox_reward_cycle(burn_height) - .expect("FATAL: no reward cycle for burn height"); - debug!("Processing reward set for Nakamoto reward cycle"; "stacks_tip" => %stacks_tip, - "burn_height" => burn_height, "reward_cycle" => reward_cycle, "reward_cycle_length" => burnchain.pox_constants.reward_cycle_length, "prepare_phase_length" => burnchain.pox_constants.prepare_length); @@ -376,33 +369,23 @@ pub fn load_nakamoto_reward_set( sort_db: &SortitionDB, provider: &U, ) -> Result, Error> { - let prepare_end_height = burnchain - .reward_cycle_to_block_height(reward_cycle) - .saturating_sub(1); + let cycle_start_height = burnchain.nakamoto_first_block_of_cycle(reward_cycle); - let epoch_at_height = SortitionDB::get_stacks_epoch(sort_db.conn(), prepare_end_height)? + let epoch_at_height = SortitionDB::get_stacks_epoch(sort_db.conn(), cycle_start_height)? .unwrap_or_else(|| { panic!( "FATAL: no epoch defined for burn height {}", - prepare_end_height + cycle_start_height ) }); - let Some(prepare_end_sortition_id) = - get_ancestor_sort_id(&sort_db.index_conn(), prepare_end_height, sortition_tip)? - else { - // reward cycle is too far in the future - warn!("Requested reward cycle start ancestor sortition ID for cycle {} prepare-end height {}, but tip is {}", reward_cycle, prepare_end_height, sortition_tip); - return Ok(None); - }; - // Find the first Stacks block in this reward cycle's preceding prepare phase. // This block will have invoked `.signers.stackerdb-set-signer-slots()` with the reward set. // Note that we may not have processed it yet. But, if we do find it, then it's // unique (and since Nakamoto Stacks blocks are processed in order, the anchor block // cannot change later). let first_epoch30_reward_cycle = burnchain - .pox_reward_cycle(epoch_at_height.start_height) + .block_height_to_reward_cycle(epoch_at_height.start_height) .expect("FATAL: no reward cycle for epoch 3.0 start height"); if !epoch_at_height @@ -412,6 +395,14 @@ pub fn load_nakamoto_reward_set( // in epoch 2.5, and in the first reward cycle of epoch 3.0, the reward set can *only* be found in the sortition DB. // The nakamoto chain-processing rules aren't active yet, so we can't look for the reward // cycle info in the nakamoto chain state. + let Some(prepare_end_sortition_id) = + get_ancestor_sort_id(&sort_db.index_conn(), cycle_start_height, sortition_tip)? + else { + // reward cycle is too far in the future + warn!("Requested reward cycle start ancestor sortition ID for cycle {} prepare-end height {}, but tip is {}", reward_cycle, cycle_start_height, sortition_tip); + return Ok(None); + }; + if let Ok(persisted_reward_cycle_info) = sort_db.get_preprocessed_reward_set_of(&prepare_end_sortition_id) { @@ -475,8 +466,18 @@ pub fn load_nakamoto_reward_set( } // find the reward cycle's prepare-phase sortitions (in the preceding reward cycle) + let Some(prior_cycle_end) = get_ancestor_sort_id( + &sort_db.index_conn(), + cycle_start_height.saturating_sub(1), + sortition_tip, + )? + else { + // reward cycle is too far in the future + warn!("Requested reward cycle start ancestor sortition ID for cycle {} prepare-end height {}, but tip is {}", reward_cycle, cycle_start_height.saturating_sub(1), sortition_tip); + return Ok(None); + }; let prepare_phase_sortitions = - find_prepare_phase_sortitions(sort_db, burnchain, &prepare_end_sortition_id)?; + find_prepare_phase_sortitions(sort_db, burnchain, &prior_cycle_end)?; // iterate over the prepare_phase_sortitions, finding the first such sortition // with a processed stacks block @@ -505,7 +506,7 @@ pub fn load_nakamoto_reward_set( Err(e) => return Some(Err(e)), Ok(None) => { // no header for this snapshot (possibly invalid) - debug!("Failed to find block by consensus hash"; "consensus_hash" => %sn.consensus_hash); + info!("Failed to find block by consensus hash"; "consensus_hash" => %sn.consensus_hash); return None } } @@ -542,16 +543,11 @@ pub fn load_nakamoto_reward_set( "block_hash" => %stacks_block_hash, "consensus_hash" => %anchor_block_sn.consensus_hash, "txid" => %txid, - "prepare_end_height" => %prepare_end_height, + "cycle_start_height" => %cycle_start_height, "burnchain_height" => %anchor_block_sn.block_height); - let reward_set = provider.get_reward_set_nakamoto( - prepare_end_height, - chain_state, - burnchain, - sort_db, - &block_id, - )?; + let reward_set = + provider.get_reward_set_nakamoto(chain_state, reward_cycle, sort_db, &block_id)?; debug!( "Stacks anchor block (ch {}) {} cycle {} is processed", &anchor_block_header.consensus_hash, &block_id, reward_cycle; @@ -581,26 +577,28 @@ pub fn get_nakamoto_next_recipients( stacks_tip: &StacksBlockId, burnchain: &Burnchain, ) -> Result, Error> { - let reward_cycle_info = - if burnchain.is_reward_cycle_start(sortition_tip.block_height.saturating_add(1)) { - let Some((reward_set, _)) = load_nakamoto_reward_set( - burnchain - .pox_reward_cycle(sortition_tip.block_height.saturating_add(1)) - .expect("Sortition block height has no reward cycle"), - &sortition_tip.sortition_id, - burnchain, - chain_state, - stacks_tip, - sort_db, - &OnChainRewardSetProvider::new(), - )? - else { - return Ok(None); - }; - Some(reward_set) - } else { - None + let next_burn_height = sortition_tip.block_height.saturating_add(1); + let Some(reward_cycle) = burnchain.block_height_to_reward_cycle(next_burn_height) else { + error!("CORRUPTION: evaluating burn block height before starting burn height"); + return Err(Error::BurnchainError(burnchains::Error::NoStacksEpoch)); + }; + let reward_cycle_info = if burnchain.is_reward_cycle_start(next_burn_height) { + let Some((reward_set, _)) = load_nakamoto_reward_set( + reward_cycle, + &sortition_tip.sortition_id, + burnchain, + chain_state, + stacks_tip, + sort_db, + &OnChainRewardSetProvider::new(), + )? + else { + return Ok(None); }; + Some(reward_set) + } else { + None + }; sort_db .get_next_block_recipients(burnchain, sortition_tip, reward_cycle_info.as_ref()) .map_err(Error::from) @@ -670,7 +668,7 @@ impl< // only proceed if we have processed the _anchor block_ for this reward cycle. let Some((rc_info, _)) = load_nakamoto_reward_set( self.burnchain - .pox_reward_cycle(canonical_sn.block_height) + .block_height_to_reward_cycle(canonical_sn.block_height) .expect("FATAL: snapshot has no reward cycle"), &canonical_sn.sortition_id, &self.burnchain, @@ -906,7 +904,11 @@ impl< }); // are we in the prepare phase? - if !self.burnchain.is_in_prepare_phase(stacks_sn.block_height) { + // TODO: this should *not* include the 0 block! + if !self + .burnchain + .is_in_naka_prepare_phase(stacks_sn.block_height) + { // next ready stacks block continue; } @@ -930,7 +932,7 @@ impl< // cycle data let Some((rc_info, _)) = load_nakamoto_reward_set( self.burnchain - .pox_reward_cycle(canonical_sn.block_height) + .block_height_to_reward_cycle(canonical_sn.block_height) .expect("FATAL: snapshot has no reward cycle"), &canonical_sn.sortition_id, &self.burnchain, @@ -966,8 +968,8 @@ impl< /// Given a burnchain header, find the PoX reward cycle info fn get_nakamoto_reward_cycle_info( &mut self, - block_height: u64, stacks_tip: &StacksBlockId, + reward_cycle: u64, ) -> Result, Error> { let sortition_tip_id = self .canonical_sortition_tip @@ -975,8 +977,8 @@ impl< .expect("FATAL: Processing anchor block, but no known sortition tip"); get_nakamoto_reward_cycle_info( - block_height, sortition_tip_id, + reward_cycle, &self.burnchain, &mut self.chain_state_db, stacks_tip, @@ -1117,10 +1119,15 @@ impl< return Ok(false); }; - let reward_cycle_info = self.get_nakamoto_reward_cycle_info( - header.block_height, - &local_best_nakamoto_tip, - )?; + let Some(reward_cycle) = self + .burnchain + .block_height_to_reward_cycle(header.block_height) + else { + error!("CORRUPTION: Evaluating burn block before start burn height"; "burn_height" => header.block_height); + return Ok(false); + }; + let reward_cycle_info = + self.get_nakamoto_reward_cycle_info(&local_best_nakamoto_tip, reward_cycle)?; if let Some(rc_info) = reward_cycle_info.as_ref() { // in nakamoto, if we have any reward cycle info at all, it will be known. // otherwise, we may have to process some more Stacks blocks diff --git a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs index 569114aa12..188e2e5a3e 100644 --- a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs +++ b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs @@ -598,7 +598,7 @@ impl<'a> TestPeer<'a> { info!( "Burnchain block produced: {burn_height}, in_prepare_phase?: {}, first_reward_block?: {}", pox_constants.is_in_prepare_phase(first_burn_height, burn_height), - pox_constants.is_reward_cycle_start(first_burn_height, burn_height) + pox_constants.is_naka_signing_cycle_start(first_burn_height, burn_height) ); let vrf_proof = self.make_nakamoto_vrf_proof(miner_key); @@ -761,6 +761,9 @@ fn pox_treatment() { peer.single_block_tenure(&private_key, |_| {}, |_| {}, |_| true); blocks.push(block); + // note: we use `is_reward_cycle_start` here rather than naka_reward_cycle_start + // because in this test, we're interested in getting to the reward blocks, + // not validating the signer set. the reward blocks only begin at modulo 1 if pox_constants.is_reward_cycle_start(first_burn_height, burn_height + 1) { break; } @@ -1083,8 +1086,6 @@ fn test_nakamoto_chainstate_getters() { let chainstate = &mut peer.stacks_node.as_mut().unwrap().chainstate; let sort_db = peer.sortdb.as_ref().unwrap(); - let (mut stacks_db_tx, _) = chainstate.chainstate_tx_begin().unwrap(); - for coinbase_height in 0..=((tip .anchored_header .as_stacks_nakamoto() @@ -1094,7 +1095,7 @@ fn test_nakamoto_chainstate_getters() { + 1) { let header_opt = NakamotoChainState::get_header_by_coinbase_height( - &mut stacks_db_tx, + &mut chainstate.index_conn(), &tip.index_block_hash(), coinbase_height, ) @@ -1571,7 +1572,7 @@ pub fn simple_nakamoto_coordinator_10_tenures_10_sortitions<'a>() -> TestPeer<'a if peer .config .burnchain - .is_reward_cycle_start(tip.block_height) + .is_naka_signing_cycle_start(tip.block_height) { rc_blocks.push(all_blocks.clone()); rc_burn_ops.push(all_burn_ops.clone()); @@ -2316,7 +2317,7 @@ pub fn simple_nakamoto_coordinator_10_extended_tenures_10_sortitions() -> TestPe if peer .config .burnchain - .is_reward_cycle_start(tip.block_height) + .is_naka_signing_cycle_start(tip.block_height) { rc_blocks.push(all_blocks.clone()); rc_burn_ops.push(all_burn_ops.clone()); diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index 4bddab3071..dab9f2eba8 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -300,6 +300,13 @@ pub trait StacksDBIndexed { fn get(&mut self, tip: &StacksBlockId, key: &str) -> Result, DBError>; fn sqlite(&self) -> &Connection; + /// Get the ancestor block hash given a height + fn get_ancestor_block_id( + &mut self, + coinbase_height: u64, + tip_index_hash: &StacksBlockId, + ) -> Result, DBError>; + /// Get the block ID for a specific coinbase height in the fork identified by `tip` fn get_nakamoto_block_id_at_coinbase_height( &mut self, @@ -452,6 +459,14 @@ impl StacksDBIndexed for StacksDBConn<'_> { fn sqlite(&self) -> &Connection { self.conn() } + + fn get_ancestor_block_id( + &mut self, + coinbase_height: u64, + tip_index_hash: &StacksBlockId, + ) -> Result, DBError> { + self.get_ancestor_block_hash(coinbase_height, tip_index_hash) + } } impl StacksDBIndexed for StacksDBTx<'_> { @@ -462,6 +477,14 @@ impl StacksDBIndexed for StacksDBTx<'_> { fn sqlite(&self) -> &Connection { self.tx().deref() } + + fn get_ancestor_block_id( + &mut self, + coinbase_height: u64, + tip_index_hash: &StacksBlockId, + ) -> Result, DBError> { + self.get_ancestor_block_hash(coinbase_height, tip_index_hash) + } } impl<'a> ChainstateTx<'a> { @@ -816,6 +839,12 @@ impl NakamotoBlockHeader { public_key_bytes.copy_from_slice(&public_key.to_bytes_compressed()[..]); let (signer, signer_index) = signers_by_pk.get(&public_key_bytes).ok_or_else(|| { + warn!( + "Found an invalid public key. Reward set has {} signers. Chain length {}. Signatures length {}", + signers.len(), + self.chain_length, + self.signer_signature.len(), + ); ChainstateError::InvalidStacksBlock(format!( "Public key {} not found in the reward set", public_key.to_hex() @@ -2400,22 +2429,22 @@ impl NakamotoChainState { /// Return a Nakamoto StacksHeaderInfo at a given coinbase height in the fork identified by `tip_index_hash`. /// * For Stacks 2.x, this is the Stacks block's header /// * For Stacks 3.x (Nakamoto), this is the first block in the miner's tenure. - pub fn get_header_by_coinbase_height( - tx: &mut StacksDBTx, + pub fn get_header_by_coinbase_height( + conn: &mut SDBI, tip_index_hash: &StacksBlockId, coinbase_height: u64, ) -> Result, ChainstateError> { // nakamoto block? if let Some(block_id) = - tx.get_nakamoto_block_id_at_coinbase_height(tip_index_hash, coinbase_height)? + conn.get_nakamoto_block_id_at_coinbase_height(tip_index_hash, coinbase_height)? { - return Self::get_block_header_nakamoto(tx.sqlite(), &block_id); + return Self::get_block_header_nakamoto(conn.sqlite(), &block_id); } // epcoh2 block? - let Some(ancestor_at_height) = tx - .get_ancestor_block_hash(coinbase_height, tip_index_hash)? - .map(|ancestor| Self::get_block_header(tx.tx(), &ancestor)) + let Some(ancestor_at_height) = conn + .get_ancestor_block_id(coinbase_height, tip_index_hash)? + .map(|ancestor| Self::get_block_header(conn.sqlite(), &ancestor)) .transpose()? .flatten() else { diff --git a/stackslib/src/chainstate/nakamoto/tenure.rs b/stackslib/src/chainstate/nakamoto/tenure.rs index bff030be8f..81380cc93d 100644 --- a/stackslib/src/chainstate/nakamoto/tenure.rs +++ b/stackslib/src/chainstate/nakamoto/tenure.rs @@ -372,7 +372,7 @@ impl NakamotoChainState { let matured_coinbase_height = coinbase_height - MINER_REWARD_MATURITY; let matured_tenure_block_header = Self::get_header_by_coinbase_height( - chainstate_tx, + chainstate_tx.deref_mut(), &tip_index_hash, matured_coinbase_height, )? @@ -964,7 +964,7 @@ impl NakamotoChainState { let total_coinbase = coinbase_at_block.saturating_add(accumulated_rewards); let parent_tenure_start_header: StacksHeaderInfo = Self::get_header_by_coinbase_height( - chainstate_tx, + chainstate_tx.deref_mut(), &block.header.parent_block_id, parent_coinbase_height, )? diff --git a/stackslib/src/chainstate/nakamoto/test_signers.rs b/stackslib/src/chainstate/nakamoto/test_signers.rs index 13d7f2ff1e..4ab7613751 100644 --- a/stackslib/src/chainstate/nakamoto/test_signers.rs +++ b/stackslib/src/chainstate/nakamoto/test_signers.rs @@ -324,6 +324,12 @@ impl TestSigners { .map(|s| s.signing_key.to_vec()) .collect::>(); + info!( + "TestSigners: Signing Nakamoto block. TestSigners has {} signers. Reward set has {} signers.", + test_signers_by_pk.len(), + reward_set_keys.len(), + ); + let mut signatures = Vec::with_capacity(reward_set_keys.len()); let mut missing_keys = 0; diff --git a/stackslib/src/chainstate/nakamoto/tests/node.rs b/stackslib/src/chainstate/nakamoto/tests/node.rs index d3f190de1f..bd12072a01 100644 --- a/stackslib/src/chainstate/nakamoto/tests/node.rs +++ b/stackslib/src/chainstate/nakamoto/tests/node.rs @@ -737,7 +737,7 @@ impl TestStacksNode { let reward_set = load_nakamoto_reward_set( miner .burnchain - .pox_reward_cycle(sort_tip_sn.block_height) + .block_height_to_reward_cycle(sort_tip_sn.block_height) .expect("FATAL: no reward cycle for sortition"), &sort_tip_sn.sortition_id, &miner.burnchain, diff --git a/stackslib/src/chainstate/stacks/boot/contract_tests.rs b/stackslib/src/chainstate/stacks/boot/contract_tests.rs index 44b7be9264..20abf84856 100644 --- a/stackslib/src/chainstate/stacks/boot/contract_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/contract_tests.rs @@ -490,7 +490,7 @@ impl BurnStateDB for TestSimBurnStateDB { let first_block = self.get_burn_start_height(); let prepare_len = self.get_pox_prepare_length(); let rc_len = self.get_pox_reward_cycle_length(); - if Burnchain::static_is_in_prepare_phase( + if PoxConstants::static_is_in_prepare_phase( first_block.into(), rc_len.into(), prepare_len.into(), diff --git a/stackslib/src/chainstate/stacks/boot/mod.rs b/stackslib/src/chainstate/stacks/boot/mod.rs index 0f45d7a6d0..88ecc8887e 100644 --- a/stackslib/src/chainstate/stacks/boot/mod.rs +++ b/stackslib/src/chainstate/stacks/boot/mod.rs @@ -1773,6 +1773,7 @@ pub mod test { let data = if let Some(d) = value_opt.expect_optional().unwrap() { d } else { + warn!("get_stacker_info: No PoX info for {}", addr); return None; }; diff --git a/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs b/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs index ac59772f32..0968cc4de3 100644 --- a/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs @@ -49,7 +49,7 @@ use wsts::curve::point::{Compressed, Point}; use super::test::*; use super::RawRewardSetEntry; use crate::burnchains::{Burnchain, PoxConstants}; -use crate::chainstate::burn::db::sortdb::SortitionDB; +use crate::chainstate::burn::db::sortdb::{SortitionDB, SortitionHandle}; use crate::chainstate::burn::operations::*; use crate::chainstate::burn::{BlockSnapshot, ConsensusHash}; use crate::chainstate::coordinator::tests::pox_addr_from; @@ -81,6 +81,7 @@ use crate::clarity_vm::database::marf::{MarfedKV, WritableMarfStore}; use crate::clarity_vm::database::HeadersDBConn; use crate::core::*; use crate::net::test::{TestEventObserver, TestEventObserverBlock, TestPeer, TestPeerConfig}; +use crate::net::tests::NakamotoBootPlan; use crate::util_lib::boot::boot_code_id; use crate::util_lib::db::{DBConn, FromRow}; use crate::util_lib::signed_structured_data::pox4::Pox4SignatureTopic; @@ -96,6 +97,14 @@ pub fn get_tip(sortdb: Option<&SortitionDB>) -> BlockSnapshot { SortitionDB::get_canonical_burn_chain_tip(&sortdb.unwrap().conn()).unwrap() } +/// Helper rstest template for running tests in both 2.5 +/// and 3.0 epochs. +#[template] +#[rstest] +#[case::epoch_30(true)] +#[case::epoch_25(false)] +fn nakamoto_cases(#[case] use_nakamoto: bool) {} + fn make_simple_pox_4_lock( key: &StacksPrivateKey, peer: &mut TestPeer, @@ -138,7 +147,7 @@ fn make_simple_pox_4_lock( ) } -pub fn make_test_epochs_pox() -> (Vec, PoxConstants) { +pub fn make_test_epochs_pox(use_nakamoto: bool) -> (Vec, PoxConstants) { let EMPTY_SORTITIONS = 25; let EPOCH_2_1_HEIGHT = EMPTY_SORTITIONS + 11; // 36 let EPOCH_2_2_HEIGHT = EPOCH_2_1_HEIGHT + 14; // 50 @@ -147,8 +156,9 @@ pub fn make_test_epochs_pox() -> (Vec, PoxConstants) { // this means that cycle 11 should also be treated like a "burn" let EPOCH_2_4_HEIGHT = EPOCH_2_3_HEIGHT + 4; // 56 let EPOCH_2_5_HEIGHT = EPOCH_2_4_HEIGHT + 44; // 100 + let EPOCH_3_0_HEIGHT = EPOCH_2_5_HEIGHT + 23; // 123 - let epochs = vec![ + let mut epochs = vec![ StacksEpoch { epoch_id: StacksEpochId::Epoch10, start_height: 0, @@ -201,12 +211,28 @@ pub fn make_test_epochs_pox() -> (Vec, PoxConstants) { StacksEpoch { epoch_id: StacksEpochId::Epoch25, start_height: EPOCH_2_5_HEIGHT, - end_height: STACKS_EPOCH_MAX, + end_height: { + if use_nakamoto { + EPOCH_3_0_HEIGHT + } else { + STACKS_EPOCH_MAX + } + }, block_limit: ExecutionCost::max_value(), network_epoch: PEER_VERSION_EPOCH_2_5, }, ]; + if use_nakamoto { + epochs.push(StacksEpoch { + epoch_id: StacksEpochId::Epoch30, + start_height: EPOCH_3_0_HEIGHT, + end_height: STACKS_EPOCH_MAX, + block_limit: ExecutionCost::max_value(), + network_epoch: PEER_VERSION_EPOCH_3_0, + }); + } + let mut pox_constants = PoxConstants::mainnet_default(); pox_constants.reward_cycle_length = 5; pox_constants.prepare_length = 2; @@ -230,7 +256,7 @@ fn pox_extend_transition() { // tenures start being tracked. let EMPTY_SORTITIONS = 25; - let (epochs, pox_constants) = make_test_epochs_pox(); + let (epochs, pox_constants) = make_test_epochs_pox(false); let mut burnchain = Burnchain::default_unittest( 0, @@ -890,7 +916,7 @@ fn pox_lock_unlock() { // Config for this test // We are going to try locking for 2 reward cycles (10 blocks) let lock_period = 2; - let (epochs, pox_constants) = make_test_epochs_pox(); + let (epochs, pox_constants) = make_test_epochs_pox(false); let mut burnchain = Burnchain::default_unittest( 0, @@ -1064,7 +1090,7 @@ fn pox_3_defunct() { // Config for this test // We are going to try locking for 2 reward cycles (10 blocks) let lock_period = 2; - let (epochs, pox_constants) = make_test_epochs_pox(); + let (epochs, pox_constants) = make_test_epochs_pox(false); let mut burnchain = Burnchain::default_unittest( 0, @@ -1199,7 +1225,7 @@ fn pox_3_unlocks() { // Config for this test // We are going to try locking for 4 reward cycles (20 blocks) let lock_period = 4; - let (epochs, pox_constants) = make_test_epochs_pox(); + let (epochs, pox_constants) = make_test_epochs_pox(false); let mut burnchain = Burnchain::default_unittest( 0, @@ -1350,7 +1376,7 @@ fn pox_3_unlocks() { #[test] fn pox_4_check_cycle_id_range_in_print_events_pool() { // Config for this test - let (epochs, pox_constants) = make_test_epochs_pox(); + let (epochs, pox_constants) = make_test_epochs_pox(false); let mut burnchain = Burnchain::default_unittest( 0, @@ -1739,7 +1765,7 @@ fn pox_4_check_cycle_id_range_in_print_events_pool() { #[test] fn pox_4_check_cycle_id_range_in_print_events_pool_in_prepare_phase() { // Config for this test - let (epochs, pox_constants) = make_test_epochs_pox(); + let (epochs, pox_constants) = make_test_epochs_pox(false); let mut burnchain = Burnchain::default_unittest( 0, @@ -2166,7 +2192,7 @@ fn pox_4_check_cycle_id_range_in_print_events_pool_in_prepare_phase() { #[test] fn pox_4_check_cycle_id_range_in_print_events_pool_in_prepare_phase_skip_cycle() { // Config for this test - let (epochs, pox_constants) = make_test_epochs_pox(); + let (epochs, pox_constants) = make_test_epochs_pox(false); let mut burnchain = Burnchain::default_unittest( 0, @@ -2398,7 +2424,7 @@ fn pox_4_check_cycle_id_range_in_print_events_pool_in_prepare_phase_skip_cycle() #[test] fn pox_4_check_cycle_id_range_in_print_events_before_prepare_phase() { // Config for this test - let (epochs, pox_constants) = make_test_epochs_pox(); + let (epochs, pox_constants) = make_test_epochs_pox(false); let mut burnchain = Burnchain::default_unittest( 0, @@ -2519,7 +2545,7 @@ fn pox_4_check_cycle_id_range_in_print_events_before_prepare_phase() { #[test] fn pox_4_check_cycle_id_range_in_print_events_in_prepare_phase() { // Config for this test - let (epochs, pox_constants) = make_test_epochs_pox(); + let (epochs, pox_constants) = make_test_epochs_pox(false); let mut burnchain = Burnchain::default_unittest( 0, @@ -2638,7 +2664,7 @@ fn pox_4_check_cycle_id_range_in_print_events_in_prepare_phase() { #[test] fn pox_4_delegate_stack_increase_events() { // Config for this test - let (epochs, pox_constants) = make_test_epochs_pox(); + let (epochs, pox_constants) = make_test_epochs_pox(false); let mut burnchain = Burnchain::default_unittest( 0, @@ -2744,7 +2770,7 @@ fn pox_4_delegate_stack_increase_events() { #[test] fn pox_4_revoke_delegate_stx_events() { // Config for this test - let (epochs, pox_constants) = make_test_epochs_pox(); + let (epochs, pox_constants) = make_test_epochs_pox(false); let mut burnchain = Burnchain::default_unittest( 0, @@ -2982,7 +3008,7 @@ fn verify_signer_key_sig( #[test] fn verify_signer_key_signatures() { - let (epochs, pox_constants) = make_test_epochs_pox(); + let (epochs, pox_constants) = make_test_epochs_pox(false); let mut burnchain = Burnchain::default_unittest( 0, @@ -3274,12 +3300,12 @@ fn verify_signer_key_signatures() { assert_eq!(result, Value::okay_true()); } -#[test] -fn stack_stx_verify_signer_sig() { +#[apply(nakamoto_cases)] +fn stack_stx_verify_signer_sig(use_nakamoto: bool) { let lock_period = 2; let observer = TestEventObserver::new(); - let (burnchain, mut peer, keys, latest_block, block_height, coinbase_nonce) = - prepare_pox4_test(function_name!(), Some(&observer)); + let (burnchain, mut peer, keys, latest_block, block_height, coinbase_nonce, mut test_signers) = + prepare_pox4_test(function_name!(), Some(&observer), use_nakamoto); let mut coinbase_nonce = coinbase_nonce; @@ -3538,7 +3564,7 @@ fn stack_stx_verify_signer_sig() { valid_tx, ]; - let latest_block = peer.tenure_with_txs(&txs, &mut coinbase_nonce); + let latest_block = tenure_with_txs(&mut peer, &txs, &mut coinbase_nonce, &mut test_signers); let stacker_txs = get_last_block_sender_transactions(&observer, stacker_addr); let expected_error = Value::error(Value::Int(35)).unwrap(); @@ -3598,8 +3624,8 @@ fn stack_stx_verify_signer_sig() { fn stack_extend_verify_sig() { let lock_period = 2; let observer = TestEventObserver::new(); - let (burnchain, mut peer, keys, latest_block, block_height, coinbase_nonce) = - prepare_pox4_test(function_name!(), Some(&observer)); + let (burnchain, mut peer, keys, latest_block, block_height, coinbase_nonce, test_signers) = + prepare_pox4_test(function_name!(), Some(&observer), false); let mut coinbase_nonce = coinbase_nonce; @@ -3851,8 +3877,8 @@ fn stack_extend_verify_sig() { fn stack_agg_commit_verify_sig() { let lock_period = 2; let observer = TestEventObserver::new(); - let (burnchain, mut peer, keys, latest_block, block_height, coinbase_nonce) = - prepare_pox4_test(function_name!(), Some(&observer)); + let (burnchain, mut peer, keys, latest_block, block_height, coinbase_nonce, test_signers) = + prepare_pox4_test(function_name!(), Some(&observer), false); let mut coinbase_nonce = coinbase_nonce; @@ -4218,12 +4244,23 @@ fn advance_to_block_height( txs: &[StacksTransaction], peer_nonce: &mut usize, target_height: u64, -) -> (StacksBlockId, TestEventObserverBlock) { + test_signers: &mut Option, +) -> ( + StacksBlockId, + TestEventObserverBlock, + Vec, +) { let mut tx_block = None; let mut latest_block = None; let mut passed_txs = txs; while peer.get_burn_block_height() < target_height { - latest_block = Some(peer.tenure_with_txs(&passed_txs, peer_nonce)); + info!( + "Advancing to block height: {} from {} with {} txs", + target_height, + peer.get_burn_block_height(), + passed_txs.len() + ); + latest_block = Some(tenure_with_txs(peer, &passed_txs, peer_nonce, test_signers)); passed_txs = &[]; if tx_block.is_none() { tx_block = Some(observer.get_blocks().last().unwrap().clone()); @@ -4231,7 +4268,12 @@ fn advance_to_block_height( } let latest_block = latest_block.expect("Failed to get tip"); let tx_block = tx_block.expect("Failed to get tx block"); - (latest_block, tx_block) + let tx_block_receipts = if test_signers.is_some() { + tx_block.receipts[1..].to_vec() // remove TenureChange + } else { + tx_block.receipts.clone() + }; + (latest_block, tx_block, tx_block_receipts) } #[test] @@ -4430,12 +4472,13 @@ fn stack_agg_increase() { // Advance to next block in order to collect aggregate commit reward index target_height += 1; - let (latest_block, tx_block) = advance_to_block_height( + let (latest_block, tx_block, _receipts) = advance_to_block_height( &mut peer, &observer, &txs, &mut peer_nonce, target_height.into(), + &mut None, ); // Get Bob's aggregate commit reward index @@ -4576,12 +4619,13 @@ fn stack_agg_increase() { // Advance to next block in order to attempt aggregate increase target_height += 1; - let (latest_block, tx_block) = advance_to_block_height( + let (latest_block, tx_block, _receipts) = advance_to_block_height( &mut peer, &observer, &txs, &mut peer_nonce, target_height.into(), + &mut None, ); // Fetch the error aggregate increase result & check that the err is ERR_INVALID_SIGNER_KEY @@ -4658,12 +4702,12 @@ fn stack_agg_increase() { assert_eq!(bob_aggregate_commit_reward_index, &Value::UInt(1)); } -#[test] -fn stack_increase_verify_signer_key() { +#[apply(nakamoto_cases)] +fn stack_increase_verify_signer_key(use_nakamoto: bool) { let lock_period = 1; let observer = TestEventObserver::new(); - let (burnchain, mut peer, keys, latest_block, block_height, coinbase_nonce) = - prepare_pox4_test(function_name!(), Some(&observer)); + let (burnchain, mut peer, keys, latest_block, block_height, coinbase_nonce, mut test_signers) = + prepare_pox4_test(function_name!(), Some(&observer), use_nakamoto); let mut coinbase_nonce = coinbase_nonce; @@ -4901,7 +4945,8 @@ fn stack_increase_verify_signer_key() { 1, ); - let latest_block = peer.tenure_with_txs( + let latest_block = tenure_with_txs( + &mut peer, &[ stack_tx, invalid_cycle_tx, @@ -4915,6 +4960,7 @@ fn stack_increase_verify_signer_key() { stack_increase, ], &mut coinbase_nonce, + &mut test_signers, ); let txs = get_last_block_sender_transactions(&observer, stacker_addr); @@ -4943,15 +4989,15 @@ fn stack_increase_verify_signer_key() { .expect("Expected ok result from tx"); } -#[test] +#[apply(nakamoto_cases)] /// Verify that when calling `stack-increase`, the function /// fails if the signer key for each cycle being updated is not the same /// as the provided `signer-key` argument -fn stack_increase_different_signer_keys() { +fn stack_increase_different_signer_keys(use_nakamoto: bool) { let lock_period = 1; let observer = TestEventObserver::new(); - let (burnchain, mut peer, keys, latest_block, block_height, coinbase_nonce) = - prepare_pox4_test(function_name!(), Some(&observer)); + let (burnchain, mut peer, keys, latest_block, block_height, coinbase_nonce, mut test_signers) = + prepare_pox4_test(function_name!(), Some(&observer), use_nakamoto); let mut coinbase_nonce = coinbase_nonce; @@ -5036,8 +5082,12 @@ fn stack_increase_different_signer_keys() { 1, ); - let latest_block = - peer.tenure_with_txs(&[stack_tx, extend_tx, stack_increase], &mut coinbase_nonce); + let latest_block = tenure_with_txs( + &mut peer, + &[stack_tx, extend_tx, stack_increase], + &mut coinbase_nonce, + &mut test_signers, + ); let txs = get_last_block_sender_transactions(&observer, stacker_addr.clone()); @@ -5133,11 +5183,18 @@ fn balances_from_keys( .collect() } -#[test] -fn stack_stx_signer_key() { +#[apply(nakamoto_cases)] +fn stack_stx_signer_key(use_nakamoto: bool) { let observer = TestEventObserver::new(); - let (burnchain, mut peer, keys, latest_block, block_height, mut coinbase_nonce) = - prepare_pox4_test(function_name!(), Some(&observer)); + let ( + burnchain, + mut peer, + keys, + latest_block, + block_height, + mut coinbase_nonce, + mut test_signers, + ) = prepare_pox4_test(function_name!(), Some(&observer), use_nakamoto); let stacker_nonce = 0; let stacker_key = &keys[0]; @@ -5181,7 +5238,7 @@ fn stack_stx_signer_key() { ], )]; - let latest_block = peer.tenure_with_txs(&txs, &mut coinbase_nonce); + let latest_block = tenure_with_txs(&mut peer, &txs, &mut coinbase_nonce, &mut test_signers); let stacking_state = get_stacking_state_pox_4( &mut peer, &latest_block, @@ -5209,25 +5266,39 @@ fn stack_stx_signer_key() { .block_height_to_reward_cycle(block_height) .unwrap(); let reward_cycle_ht = burnchain.reward_cycle_to_block_height(next_reward_cycle); - let mut reward_set = get_reward_set_entries_at(&mut peer, &latest_block, reward_cycle_ht); - assert_eq!(reward_set.len(), 1); - let reward_entry = reward_set.pop().unwrap(); - assert_eq!( - PoxAddress::try_from_pox_tuple(false, &pox_addr_val).unwrap(), - reward_entry.reward_address - ); + let reward_set = get_reward_set_entries_at(&mut peer, &latest_block, reward_cycle_ht); + assert_eq!(reward_set.len(), { + if use_nakamoto { + 2 + } else { + 1 + } + }); + let reward_entry = reward_set + .iter() + .find(|entry| { + entry.reward_address == PoxAddress::try_from_pox_tuple(false, &pox_addr_val).unwrap() + }) + .expect("No reward entry found"); assert_eq!( &reward_entry.signer.unwrap(), &signer_public_key.to_bytes_compressed().as_slice(), ); } -#[test] +#[apply(nakamoto_cases)] /// Test `stack-stx` using signer key authorization -fn stack_stx_signer_auth() { +fn stack_stx_signer_auth(use_nakamoto: bool) { let observer = TestEventObserver::new(); - let (burnchain, mut peer, keys, latest_block, block_height, mut coinbase_nonce) = - prepare_pox4_test(function_name!(), Some(&observer)); + let ( + burnchain, + mut peer, + keys, + latest_block, + block_height, + mut coinbase_nonce, + mut test_signers, + ) = prepare_pox4_test(function_name!(), Some(&observer), use_nakamoto); let mut stacker_nonce = 0; let stacker_key = &keys[0]; @@ -5291,7 +5362,7 @@ fn stack_stx_signer_auth() { let txs = vec![failed_stack_tx, enable_auth_tx, valid_stack_tx]; - let latest_block = peer.tenure_with_txs(&txs, &mut coinbase_nonce); + let latest_block = tenure_with_txs(&mut peer, &txs, &mut coinbase_nonce, &mut test_signers); let stacking_state = get_stacking_state_pox_4( &mut peer, &latest_block, @@ -5329,13 +5400,13 @@ fn stack_stx_signer_auth() { assert_eq!(enable_tx_result, Value::okay_true()); } -#[test] +#[apply(nakamoto_cases)] /// Test `stack-aggregation-commit` using signer key authorization -fn stack_agg_commit_signer_auth() { +fn stack_agg_commit_signer_auth(use_nakamoto: bool) { let lock_period = 2; let observer = TestEventObserver::new(); - let (burnchain, mut peer, keys, latest_block, block_height, coinbase_nonce) = - prepare_pox4_test(function_name!(), Some(&observer)); + let (burnchain, mut peer, keys, latest_block, block_height, coinbase_nonce, mut test_signers) = + prepare_pox4_test(function_name!(), Some(&observer), use_nakamoto); let mut coinbase_nonce = coinbase_nonce; @@ -5434,7 +5505,7 @@ fn stack_agg_commit_signer_auth() { valid_agg_tx, ]; - let latest_block = peer.tenure_with_txs(&txs, &mut coinbase_nonce); + let latest_block = tenure_with_txs(&mut peer, &txs, &mut coinbase_nonce, &mut test_signers); let delegate_txs = get_last_block_sender_transactions(&observer, delegate_addr); @@ -5449,14 +5520,14 @@ fn stack_agg_commit_signer_auth() { .expect("Expected ok result from stack-agg-commit tx"); } -#[test] +#[apply(nakamoto_cases)] /// Test `stack-extend` using signer key authorization /// instead of signatures -fn stack_extend_signer_auth() { +fn stack_extend_signer_auth(use_nakamoto: bool) { let lock_period = 2; let observer = TestEventObserver::new(); - let (burnchain, mut peer, keys, latest_block, block_height, coinbase_nonce) = - prepare_pox4_test(function_name!(), Some(&observer)); + let (burnchain, mut peer, keys, latest_block, block_height, coinbase_nonce, mut test_signers) = + prepare_pox4_test(function_name!(), Some(&observer), use_nakamoto); let mut coinbase_nonce = coinbase_nonce; @@ -5540,7 +5611,7 @@ fn stack_extend_signer_auth() { let txs = vec![stack_tx, invalid_cycle_tx, enable_auth_tx, valid_tx]; - let latest_block = peer.tenure_with_txs(&txs, &mut coinbase_nonce); + let latest_block = tenure_with_txs(&mut peer, &txs, &mut coinbase_nonce, &mut test_signers); let stacker_txs = get_last_block_sender_transactions(&observer, stacker_addr); @@ -5556,13 +5627,13 @@ fn stack_extend_signer_auth() { .expect("Expected ok result from stack-extend tx"); } -#[test] +#[apply(nakamoto_cases)] /// Test `set-signer-key-authorization` function -fn test_set_signer_key_auth() { +fn test_set_signer_key_auth(use_nakamoto: bool) { let lock_period = 2; let observer = TestEventObserver::new(); - let (burnchain, mut peer, keys, latest_block, block_height, coinbase_nonce) = - prepare_pox4_test(function_name!(), Some(&observer)); + let (burnchain, mut peer, keys, latest_block, block_height, coinbase_nonce, mut test_signers) = + prepare_pox4_test(function_name!(), Some(&observer), use_nakamoto); let mut coinbase_nonce = coinbase_nonce; @@ -5639,7 +5710,8 @@ fn test_set_signer_key_auth() { 1, ); - let latest_block = peer.tenure_with_txs( + let latest_block = tenure_with_txs( + &mut peer, &[ invalid_enable_tx, invalid_tx_period, @@ -5647,6 +5719,7 @@ fn test_set_signer_key_auth() { disable_auth_tx, ], &mut coinbase_nonce, + &mut test_signers, ); let alice_txs = get_last_block_sender_transactions(&observer, alice_addr); @@ -5716,7 +5789,12 @@ fn test_set_signer_key_auth() { 1, ); - let latest_block = peer.tenure_with_txs(&[enable_auth_tx], &mut coinbase_nonce); + let latest_block = tenure_with_txs( + &mut peer, + &[enable_auth_tx], + &mut coinbase_nonce, + &mut test_signers, + ); let signer_key_enabled = get_signer_key_authorization_pox_4( &mut peer, @@ -5748,7 +5826,12 @@ fn test_set_signer_key_auth() { 1, ); - let latest_block = peer.tenure_with_txs(&[disable_auth_tx], &mut coinbase_nonce); + let latest_block = tenure_with_txs( + &mut peer, + &[disable_auth_tx], + &mut coinbase_nonce, + &mut test_signers, + ); let signer_key_enabled = get_signer_key_authorization_pox_4( &mut peer, @@ -5765,11 +5848,18 @@ fn test_set_signer_key_auth() { assert_eq!(signer_key_enabled.unwrap(), false); } -#[test] -fn stack_extend_signer_key() { +#[apply(nakamoto_cases)] +fn stack_extend_signer_key(use_nakamoto: bool) { let lock_period = 2; - let (burnchain, mut peer, keys, latest_block, block_height, mut coinbase_nonce) = - prepare_pox4_test(function_name!(), None); + let ( + burnchain, + mut peer, + keys, + latest_block, + block_height, + mut coinbase_nonce, + mut test_signers, + ) = prepare_pox4_test(function_name!(), None, use_nakamoto); let mut stacker_nonce = 0; let stacker_key = &keys[0]; @@ -5819,7 +5909,7 @@ fn stack_extend_signer_key() { stacker_nonce += 1; - let mut latest_block = peer.tenure_with_txs(&txs, &mut coinbase_nonce); + let mut latest_block = tenure_with_txs(&mut peer, &txs, &mut coinbase_nonce, &mut test_signers); let signature = make_signer_key_signature( &pox_addr, @@ -5842,7 +5932,12 @@ fn stack_extend_signer_key() { 1, )]; - latest_block = peer.tenure_with_txs(&update_txs, &mut coinbase_nonce); + latest_block = tenure_with_txs( + &mut peer, + &update_txs, + &mut coinbase_nonce, + &mut test_signers, + ); let new_stacking_state = get_stacking_state_pox_4( &mut peer, &latest_block, @@ -5855,21 +5950,35 @@ fn stack_extend_signer_key() { let reward_cycle_ht = burnchain.reward_cycle_to_block_height(next_reward_cycle); let extend_cycle_ht = burnchain.reward_cycle_to_block_height(extend_reward_cycle); - let mut reward_set = get_reward_set_entries_at(&mut peer, &latest_block, reward_cycle_ht); - assert_eq!(reward_set.len(), 1); - let reward_entry = reward_set.pop().unwrap(); - assert_eq!( - PoxAddress::try_from_pox_tuple(false, &pox_addr_val).unwrap(), - reward_entry.reward_address - ); + let reward_set = get_reward_set_entries_at(&mut peer, &latest_block, reward_cycle_ht); + assert_eq!(reward_set.len(), { + if use_nakamoto { + 2 + } else { + 1 + } + }); + let reward_entry = reward_set + .iter() + .find(|entry| entry.reward_address == pox_addr) + .expect("No reward entry found"); assert_eq!(&reward_entry.signer.unwrap(), signer_bytes.as_slice(),); - let mut reward_set = get_reward_set_entries_at(&mut peer, &latest_block, extend_cycle_ht); - assert_eq!(reward_set.len(), 1); - let reward_entry = reward_set.pop().unwrap(); + let reward_set = get_reward_set_entries_at(&mut peer, &latest_block, extend_cycle_ht); + assert_eq!(reward_set.len(), { + if use_nakamoto { + 2 + } else { + 1 + } + }); + let reward_entry = reward_set + .iter() + .find(|entry| entry.reward_address == pox_addr) + .expect("No reward entry found"); assert_eq!( - PoxAddress::try_from_pox_tuple(false, &pox_addr_val).unwrap(), - reward_entry.reward_address + &reward_entry.signer.unwrap(), + signer_extend_bytes.as_slice(), ); assert_eq!( &reward_entry.signer.unwrap(), @@ -5877,11 +5986,18 @@ fn stack_extend_signer_key() { ); } -#[test] -fn delegate_stack_stx_signer_key() { +#[apply(nakamoto_cases)] +fn delegate_stack_stx_signer_key(use_nakamoto: bool) { let lock_period = 2; - let (burnchain, mut peer, keys, latest_block, block_height, mut coinbase_nonce) = - prepare_pox4_test(function_name!(), None); + let ( + burnchain, + mut peer, + keys, + latest_block, + block_height, + mut coinbase_nonce, + mut test_signers, + ) = prepare_pox4_test(function_name!(), None, use_nakamoto); let stacker_nonce = 0; let stacker_key = &keys[0]; @@ -5955,7 +6071,7 @@ fn delegate_stack_stx_signer_key() { ), ]; - let latest_block = peer.tenure_with_txs(&txs, &mut coinbase_nonce); + let latest_block = tenure_with_txs(&mut peer, &txs, &mut coinbase_nonce, &mut test_signers); let delegation_state = get_delegation_state_pox_4( &mut peer, @@ -5974,13 +6090,18 @@ fn delegate_stack_stx_signer_key() { .expect_tuple(); let reward_cycle_ht = burnchain.reward_cycle_to_block_height(next_reward_cycle); - let mut reward_set = get_reward_set_entries_at(&mut peer, &latest_block, reward_cycle_ht); - assert_eq!(reward_set.len(), 1); - let reward_entry = reward_set.pop().unwrap(); - assert_eq!( - PoxAddress::try_from_pox_tuple(false, &pox_addr_val).unwrap(), - reward_entry.reward_address - ); + let reward_set = get_reward_set_entries_at(&mut peer, &latest_block, reward_cycle_ht); + assert_eq!(reward_set.len(), { + if use_nakamoto { + 2 + } else { + 1 + } + }); + let reward_entry = reward_set + .iter() + .find(|entry| entry.reward_address == pox_addr) + .expect("No reward entry found"); assert_eq!( &reward_entry.signer.unwrap(), signer_key.to_bytes_compressed().as_slice() @@ -5994,11 +6115,18 @@ fn delegate_stack_stx_signer_key() { // // This test asserts that the signing key in Alice's stacking state // is equal to Bob's 'new' signer key. -#[test] -fn delegate_stack_stx_extend_signer_key() { +#[apply(nakamoto_cases)] +fn delegate_stack_stx_extend_signer_key(use_nakamoto: bool) { let lock_period: u128 = 2; - let (burnchain, mut peer, keys, latest_block, block_height, mut coinbase_nonce) = - prepare_pox4_test(function_name!(), None); + let ( + burnchain, + mut peer, + keys, + latest_block, + block_height, + mut coinbase_nonce, + mut test_signers, + ) = prepare_pox4_test(function_name!(), None, use_nakamoto); let alice_nonce = 0; let alice_stacker_key = &keys[0]; @@ -6049,7 +6177,7 @@ fn delegate_stack_stx_extend_signer_key() { // Both are pox_4 helpers found in mod.rs let txs = vec![delegate_stx, delegate_stack_stx]; - let latest_block = peer.tenure_with_txs(&txs, &mut coinbase_nonce); + let latest_block = tenure_with_txs(&mut peer, &txs, &mut coinbase_nonce, &mut test_signers); let delegation_state = get_delegation_state_pox_4( &mut peer, @@ -6154,7 +6282,7 @@ fn delegate_stack_stx_extend_signer_key() { // Next tx arr calls a delegate_stack_extend pox_4 helper found in mod.rs let txs = vec![delegate_stack_extend, agg_tx_0, agg_tx_1]; - let latest_block = peer.tenure_with_txs(&txs, &mut coinbase_nonce); + let latest_block = tenure_with_txs(&mut peer, &txs, &mut coinbase_nonce, &mut test_signers); let new_stacking_state = get_stacking_state_pox_4(&mut peer, &latest_block, &alice_principal) .unwrap() .expect_tuple(); @@ -6162,16 +6290,32 @@ fn delegate_stack_stx_extend_signer_key() { let reward_cycle_ht = burnchain.reward_cycle_to_block_height(next_reward_cycle); let extend_cycle_ht = burnchain.reward_cycle_to_block_height(extend_cycle); - let mut reward_set = get_reward_set_entries_at(&mut peer, &latest_block, reward_cycle_ht); - assert_eq!(reward_set.len(), 1); - let reward_entry = reward_set.pop().unwrap(); - assert_eq!(pox_addr, reward_entry.reward_address); + let reward_set = get_reward_set_entries_at(&mut peer, &latest_block, reward_cycle_ht); + assert_eq!(reward_set.len(), { + if use_nakamoto { + 2 + } else { + 1 + } + }); + let reward_entry = reward_set + .iter() + .find(|entry| entry.reward_address == pox_addr) + .expect("No reward entry found"); assert_eq!(&reward_entry.signer.unwrap(), signer_bytes.as_slice(),); - let mut reward_set = get_reward_set_entries_at(&mut peer, &latest_block, extend_cycle_ht); - assert_eq!(reward_set.len(), 1); - let reward_entry = reward_set.pop().unwrap(); - assert_eq!(pox_addr, reward_entry.reward_address); + let reward_set = get_reward_set_entries_at(&mut peer, &latest_block, extend_cycle_ht); + assert_eq!(reward_set.len(), { + if use_nakamoto { + 2 + } else { + 1 + } + }); + let reward_entry = reward_set + .iter() + .find(|entry| entry.reward_address == pox_addr) + .expect("No reward entry found"); assert_eq!( &reward_entry.signer.unwrap(), signer_extend_bytes.as_slice(), @@ -6185,12 +6329,19 @@ fn delegate_stack_stx_extend_signer_key() { // // This test asserts that Alice's total-locked is equal to // twice the stacking minimum after calling stack-increase. -#[test] -fn stack_increase() { +#[apply(nakamoto_cases)] +fn stack_increase(use_nakamoto: bool) { let lock_period = 2; let observer = TestEventObserver::new(); - let (burnchain, mut peer, keys, latest_block, block_height, mut coinbase_nonce) = - prepare_pox4_test(function_name!(), Some(&observer)); + let ( + burnchain, + mut peer, + keys, + latest_block, + block_height, + mut coinbase_nonce, + mut test_signers, + ) = prepare_pox4_test(function_name!(), Some(&observer), use_nakamoto); let mut alice_nonce = 0; let alice_stacking_private_key = &keys[0]; @@ -6198,6 +6349,7 @@ fn stack_increase() { let signing_sk = StacksPrivateKey::from_seed(&[1]); let signing_pk = StacksPublicKey::from_private(&signing_sk); let signing_bytes = signing_pk.to_bytes_compressed(); + let alice_balance = get_balance(&mut peer, &alice_address.into()); let min_ustx = get_stacking_minimum(&mut peer, &latest_block); let pox_addr = PoxAddress::from_legacy( @@ -6233,7 +6385,7 @@ fn stack_increase() { // Initial tx arr includes a stack_stx pox_4 helper found in mod.rs let txs = vec![stack_stx]; - let latest_block = peer.tenure_with_txs(&txs, &mut coinbase_nonce); + let latest_block = tenure_with_txs(&mut peer, &txs, &mut coinbase_nonce, &mut test_signers); let stacking_state = get_stacking_state_pox_4( &mut peer, &latest_block, @@ -6265,7 +6417,7 @@ fn stack_increase() { ); // Next tx arr includes a stack_increase pox_4 helper found in mod.rs let txs = vec![stack_increase]; - let latest_block = peer.tenure_with_txs(&txs, &mut coinbase_nonce); + let latest_block = tenure_with_txs(&mut peer, &txs, &mut coinbase_nonce, &mut test_signers); let stacker_transactions = get_last_block_sender_transactions(&observer, alice_address); let actual_result = stacker_transactions.first().cloned().unwrap().result; @@ -6297,12 +6449,29 @@ fn stack_increase() { ("auth-id", Value::UInt(1)), ]); + let alice_expected_balance = alice_balance - min_ustx; + + // Compute the expected unlock height because the 3.0 and 2.5 cases + // have different PoX constants + let cycle_len = burnchain.pox_constants.reward_cycle_length as u128; + let unlock_cycle = burnchain + .pox_constants + .block_height_to_reward_cycle( + 0, + ((block_height as u128) + ((lock_period + 1) * cycle_len)) + .try_into() + .unwrap(), + ) + .unwrap(); + let expected_unlock_height = + unlock_cycle * (burnchain.pox_constants.reward_cycle_length as u64); + let common_data = PoxPrintFields { op_name: "stack-increase".to_string(), stacker: Value::Principal(PrincipalData::from(alice_address.clone())), - balance: Value::UInt(10234866375000), - locked: Value::UInt(5133625000), - burnchain_unlock_height: Value::UInt(125), + balance: Value::UInt(alice_expected_balance), + locked: Value::UInt(min_ustx), + burnchain_unlock_height: Value::UInt(expected_unlock_height as u128), }; check_pox_print_event(&increase_event, common_data, increase_op_data); @@ -6315,10 +6484,18 @@ fn stack_increase() { .block_height_to_reward_cycle(block_height) .unwrap(); let reward_cycle_ht = burnchain.reward_cycle_to_block_height(next_reward_cycle); - let mut reward_set = get_reward_set_entries_at(&mut peer, &latest_block, reward_cycle_ht); - assert_eq!(reward_set.len(), 1); - let reward_entry = reward_set.pop().unwrap(); - assert_eq!(pox_addr, reward_entry.reward_address); + let reward_set = get_reward_set_entries_at(&mut peer, &latest_block, reward_cycle_ht); + assert_eq!(reward_set.len(), { + if use_nakamoto { + 2 + } else { + 1 + } + }); + let reward_entry = reward_set + .iter() + .find(|entry| entry.reward_address == pox_addr) + .expect("No reward entry found"); assert_eq!(&reward_entry.signer.unwrap(), &signing_bytes.as_slice()); } @@ -6328,12 +6505,19 @@ fn stack_increase() { // // This test asserts that Alice's total-locked is equal to // twice the stacking minimum after calling delegate-stack-increase. -#[test] -fn delegate_stack_increase() { +#[apply(nakamoto_cases)] +fn delegate_stack_increase(use_nakamoto: bool) { let lock_period: u128 = 2; let observer = TestEventObserver::new(); - let (burnchain, mut peer, keys, latest_block, block_height, mut coinbase_nonce) = - prepare_pox4_test(function_name!(), Some(&observer)); + let ( + burnchain, + mut peer, + keys, + latest_block, + block_height, + mut coinbase_nonce, + mut test_signers, + ) = prepare_pox4_test(function_name!(), Some(&observer), use_nakamoto); let alice_nonce = 0; let alice_key = &keys[0]; @@ -6380,7 +6564,7 @@ fn delegate_stack_increase() { // Initial tx arr includes a delegate_stx & delegate_stack_stx pox_4 helper found in mod.rs let txs = vec![delegate_stx, delegate_stack_stx]; - let latest_block = peer.tenure_with_txs(&txs, &mut coinbase_nonce); + let latest_block = tenure_with_txs(&mut peer, &txs, &mut coinbase_nonce, &mut test_signers); bob_nonce += 1; @@ -6419,7 +6603,7 @@ fn delegate_stack_increase() { // Next tx arr includes a delegate_increase pox_4 helper found in mod.rs let txs = vec![delegate_increase, agg_tx]; - let latest_block = peer.tenure_with_txs(&txs, &mut coinbase_nonce); + let latest_block = tenure_with_txs(&mut peer, &txs, &mut coinbase_nonce, &mut test_signers); let delegate_transactions = get_last_block_sender_transactions(&observer, key_to_stacks_addr(bob_delegate_key)); @@ -6444,9 +6628,18 @@ fn delegate_stack_increase() { // test that the reward set contains the increased amount and the expected key let reward_cycle_ht = burnchain.reward_cycle_to_block_height(next_reward_cycle); - let mut reward_set = get_reward_set_entries_at(&mut peer, &latest_block, reward_cycle_ht); - assert_eq!(reward_set.len(), 1); - let reward_entry = reward_set.pop().unwrap(); + let reward_set = get_reward_set_entries_at(&mut peer, &latest_block, reward_cycle_ht); + assert_eq!(reward_set.len(), { + if use_nakamoto { + 2 + } else { + 1 + } + }); + let reward_entry = reward_set + .iter() + .find(|entry| entry.reward_address == pox_addr) + .expect("No reward entry found"); assert_eq!(pox_addr, reward_entry.reward_address); assert_eq!(min_ustx * 2, reward_entry.amount_stacked); assert_eq!(&reward_entry.signer.unwrap(), signer_pk_bytes.as_slice()); @@ -6456,6 +6649,7 @@ pub fn pox_4_scenario_test_setup<'a>( test_name: &str, observer: &'a TestEventObserver, initial_balances: Vec<(PrincipalData, u64)>, + use_nakamoto: bool, ) -> ( TestPeer<'a>, usize, @@ -6463,9 +6657,12 @@ pub fn pox_4_scenario_test_setup<'a>( u128, u128, u128, - u128, TestPeerConfig, + Option, ) { + if use_nakamoto { + return pox_4_scenario_test_setup_nakamoto(test_name, observer, initial_balances); + } // Setup code extracted from your original test let test_signers = TestSigners::new(vec![]); let aggregate_public_key = test_signers.aggregate_public_key.clone(); @@ -6522,20 +6719,118 @@ pub fn pox_4_scenario_test_setup<'a>( peer, peer_nonce, burn_block_height, - target_height as u128, reward_cycle as u128, next_reward_cycle as u128, min_ustx as u128, peer_config.clone(), + None, + ) +} + +pub fn pox_4_scenario_test_setup_nakamoto<'a>( + test_name: &str, + observer: &'a TestEventObserver, + initial_balances: Vec<(PrincipalData, u64)>, +) -> ( + TestPeer<'a>, + usize, + u64, + u128, + u128, + u128, + TestPeerConfig, + Option, +) { + let (epochs, pox_constants) = make_test_epochs_pox(true); + + let mut burnchain = Burnchain::default_unittest( + 0, + &BurnchainHeaderHash::from_hex(BITCOIN_REGTEST_FIRST_BLOCK_HASH).unwrap(), + ); + burnchain.pox_constants = pox_constants.clone(); + + let (peer, keys) = instantiate_pox_peer_with_epoch( + &burnchain, + test_name, + Some(epochs.clone()), + Some(observer), + ); + + let test_key = keys[3].clone(); + let test_keys = vec![test_key.clone()]; + let test_addr = key_to_stacks_addr(&test_key); + let test_signers = TestSigners::new(vec![test_key.clone()]); + let aggregate_public_key = test_signers.aggregate_public_key.clone(); + + let private_key = StacksPrivateKey::from_seed(&[2]); + let test_signers = TestSigners::new(test_keys.clone()); + let addrs: Vec = test_keys.iter().map(|pk| key_to_stacks_addr(pk)).collect(); + let initial_stacker_balance = initial_balances + .get(0) + .expect("Expected at least 1 initial balance") + .1; + let test_stackers = vec![TestStacker { + signer_private_key: test_key.clone(), + stacker_private_key: test_key.clone(), + amount: initial_stacker_balance as u128, + pox_addr: Some(pox_addr_from(&test_key)), + max_amount: None, + }]; + let mut peer_config = TestPeerConfig::default(); + peer_config.aggregate_public_key = Some(aggregate_public_key.clone()); + let mut pox_constants = peer_config.clone().burnchain.pox_constants; + pox_constants.reward_cycle_length = 10; + pox_constants.v2_unlock_height = 21; + pox_constants.pox_3_activation_height = 26; + pox_constants.v3_unlock_height = 27; + pox_constants.pox_4_activation_height = 41; + pox_constants.prepare_length = 5; + let mut boot_plan = NakamotoBootPlan::new(test_name) + .with_test_stackers(test_stackers) + .with_test_signers(test_signers.clone()) + .with_private_key(private_key); + boot_plan.add_default_balance = false; + + boot_plan.initial_balances = initial_balances; + boot_plan.pox_constants = pox_constants.clone(); + burnchain.pox_constants = pox_constants.clone(); + peer_config.burnchain = burnchain.clone(); + peer_config.test_signers = Some(test_signers.clone()); + + info!("---- Booting into Nakamoto Peer ----"); + let mut peer = boot_plan.boot_into_nakamoto_peer(vec![], Some(observer)); + let sort_db = peer.sortdb.as_ref().unwrap(); + let latest_block = sort_db + .index_handle_at_tip() + .get_nakamoto_tip_block_id() + .unwrap() + .unwrap(); + let coinbase_nonce = 0; + + let burn_block_height = get_tip(peer.sortdb.as_ref()).block_height; + let reward_cycle = burnchain + .block_height_to_reward_cycle(burn_block_height) + .unwrap() as u128; + let min_ustx = get_stacking_minimum(&mut peer, &latest_block); + + ( + peer, + coinbase_nonce, + burn_block_height, + reward_cycle as u128, + reward_cycle.wrapping_add(1), + min_ustx as u128, + peer_config.clone(), + Some(test_signers), ) } +#[apply(nakamoto_cases)] // In this test two solo stacker-signers Alice & Bob sign & stack // for two reward cycles. Alice provides a signature, Bob uses // 'set-signer-key-authorizations' to authorize. Two cycles later, // when no longer stacked, they both try replaying their auths. -#[test] -fn test_scenario_one() { +fn test_scenario_one(use_nakamoto: bool) { // Alice solo stacker-signer setup let mut alice = StackerSignerInfo::new(); // Bob solo stacker-signer setup @@ -6551,12 +6846,24 @@ fn test_scenario_one() { mut peer, mut peer_nonce, burn_block_height, - target_height, reward_cycle, next_reward_cycle, min_ustx, peer_config, - ) = pox_4_scenario_test_setup("test_scenario_one", &observer, initial_balances); + mut test_signers, + ) = pox_4_scenario_test_setup( + "test_scenario_one", + &observer, + initial_balances, + use_nakamoto, + ); + + // Add alice and bob to test_signers + if let Some(ref mut test_signers) = test_signers.as_mut() { + test_signers + .signer_keys + .extend(vec![alice.private_key.clone(), bob.private_key.clone()]); + } // Alice Signatures let amount = (default_initial_balances / 2).wrapping_sub(1000) as u128; @@ -6683,8 +6990,14 @@ fn test_scenario_one() { .reward_cycle_to_block_height(next_reward_cycle as u64) .saturating_sub(peer.config.burnchain.pox_constants.prepare_length as u64) .wrapping_add(2); - let (latest_block, tx_block) = - advance_to_block_height(&mut peer, &observer, &txs, &mut peer_nonce, target_height); + let (latest_block, tx_block, receipts) = advance_to_block_height( + &mut peer, + &observer, + &txs, + &mut peer_nonce, + target_height, + &mut test_signers, + ); // Verify Alice stacked let (pox_address, first_reward_cycle, lock_period, _indices) = @@ -6701,8 +7014,7 @@ fn test_scenario_one() { assert_eq!(pox_address, bob.pox_address); // 1. Check bob's low authorization transaction - let bob_tx_result_low = tx_block - .receipts + let bob_tx_result_low = receipts .get(1) .unwrap() .result @@ -6712,8 +7024,7 @@ fn test_scenario_one() { assert_eq!(bob_tx_result_low, Value::Bool(true)); // 2. Check bob's expected authorization transaction - let bob_tx_result_ok = tx_block - .receipts + let bob_tx_result_ok = receipts .get(2) .unwrap() .result @@ -6723,8 +7034,7 @@ fn test_scenario_one() { assert_eq!(bob_tx_result_ok, Value::Bool(true)); // 3. Check alice's low stack transaction - let alice_tx_result_err = tx_block - .receipts + let alice_tx_result_err = receipts .get(3) .unwrap() .result @@ -6734,8 +7044,7 @@ fn test_scenario_one() { assert_eq!(alice_tx_result_err, Value::Int(38)); // Get alice's expected stack transaction - let alice_tx_result_ok = tx_block - .receipts + let alice_tx_result_ok = receipts .get(4) .unwrap() .result @@ -6778,8 +7087,7 @@ fn test_scenario_one() { assert_eq!(unlock_height_expected, unlock_height_actual); // 5. Check bob's error stack transaction - let bob_tx_result_err = tx_block - .receipts + let bob_tx_result_err = receipts .get(5) .unwrap() .result @@ -6789,8 +7097,7 @@ fn test_scenario_one() { assert_eq!(bob_tx_result_err, Value::Int(38)); // Get bob's expected stack transaction - let bob_tx_result_ok = tx_block - .receipts + let bob_tx_result_ok = receipts .get(6) .unwrap() .result @@ -6862,16 +7169,47 @@ fn test_scenario_one() { next_reward_cycle, ); bob.nonce += 1; - let txs = vec![alice_vote, bob_vote]; + let mut txs = vec![alice_vote, bob_vote]; + + // Also vote for aggregate key with default test signer if in Nakamoto: + if let Some(test_signers) = test_signers.clone() { + let tester_key = test_signers.signer_keys[0]; + let tester_addr = key_to_stacks_addr(&tester_key); + let tester_index = get_signer_index( + &mut peer, + latest_block, + tester_addr.clone(), + next_reward_cycle, + ); + let tester_vote = make_signers_vote_for_aggregate_public_key( + &tester_key, + 1, // only tx is a stack-stx + tester_index, + &peer_config.aggregate_public_key.unwrap(), + 1, + next_reward_cycle, + ); + txs.push(tester_vote); + } - let target_reward_cycle = 8; + let target_reward_cycle = next_reward_cycle + 1; // Commit vote txs & advance to the first burn block of reward cycle 8 (block 161) let mut target_height = peer .config .burnchain .reward_cycle_to_block_height(target_reward_cycle as u64); - let (latest_block, tx_block) = - advance_to_block_height(&mut peer, &observer, &txs, &mut peer_nonce, target_height); + info!( + "Submitting block with vote transactions and advancing to reward cycle {} at block {}", + target_reward_cycle, target_height + ); + let (latest_block, tx_block, _receipts) = advance_to_block_height( + &mut peer, + &observer, + &txs, + &mut peer_nonce, + target_height, + &mut test_signers, + ); let approved_key = get_approved_aggregate_key(&mut peer, latest_block, next_reward_cycle) .expect("No approved key found"); @@ -6886,7 +7224,7 @@ fn test_scenario_one() { &alice.pox_address, lock_period, &alice.public_key, - 161, + target_height, Some(alice_signature.clone()), u128::MAX, 1, @@ -6900,7 +7238,7 @@ fn test_scenario_one() { &bob.pox_address, lock_period, &bob.public_key, - 161, + target_height, None, u128::MAX, 3, @@ -6909,12 +7247,17 @@ fn test_scenario_one() { // Commit replay txs & advance to the second burn block of reward cycle 8 (block 162) target_height += 1; - let (latest_block, tx_block) = - advance_to_block_height(&mut peer, &observer, &txs, &mut peer_nonce, target_height); + let (latest_block, tx_block, receipts) = advance_to_block_height( + &mut peer, + &observer, + &txs, + &mut peer_nonce, + target_height, + &mut test_signers, + ); // Check Alice replay, expect (err 35) - ERR_INVALID_SIGNATURE_PUBKEY - let alice_replay_result = tx_block - .receipts + let alice_replay_result = receipts .get(1) .unwrap() .result @@ -6924,8 +7267,7 @@ fn test_scenario_one() { assert_eq!(alice_replay_result, Value::Int(35)); // Check Bob replay, expect (err 19) - ERR_SIGNER_AUTH_USED - let bob_tx_result = tx_block - .receipts + let bob_tx_result = receipts .get(2) .unwrap() .result @@ -6938,8 +7280,8 @@ fn test_scenario_one() { // In this test two solo service signers, Alice & Bob, provide auth // for Carl & Dave, solo stackers. Alice provides a signature for Carl, // Bob uses 'set-signer-key...' for Dave. -#[test] -fn test_scenario_two() { +#[apply(nakamoto_cases)] +fn test_scenario_two(use_nakamoto: bool) { // Alice service signer setup let mut alice = StackerSignerInfo::new(); // Bob service signer setup @@ -6961,12 +7303,24 @@ fn test_scenario_two() { mut peer, mut peer_nonce, burn_block_height, - target_height, reward_cycle, next_reward_cycle, min_ustx, peer_config, - ) = pox_4_scenario_test_setup("test_scenario_two", &observer, initial_balances); + mut test_signers, + ) = pox_4_scenario_test_setup( + "test_scenario_two", + &observer, + initial_balances, + use_nakamoto, + ); + + // Add to test signers + if let Some(ref mut test_signers) = test_signers.as_mut() { + test_signers + .signer_keys + .extend(vec![alice.private_key.clone(), bob.private_key.clone()]); + } // Alice Signature For Carl let amount = (default_initial_balances / 2).wrapping_sub(1000) as u128; @@ -7070,8 +7424,14 @@ fn test_scenario_two() { .reward_cycle_to_block_height(next_reward_cycle as u64) .saturating_sub(peer_config.burnchain.pox_constants.prepare_length as u64) .wrapping_add(2); - let (latest_block, tx_block) = - advance_to_block_height(&mut peer, &observer, &txs, &mut peer_nonce, target_height); + let (latest_block, tx_block, receipts) = advance_to_block_height( + &mut peer, + &observer, + &txs, + &mut peer_nonce, + target_height, + &mut test_signers, + ); // Verify Carl Stacked let (pox_address, first_reward_cycle, lock_period, _indices) = @@ -7086,8 +7446,7 @@ fn test_scenario_two() { assert_eq!(pox_address, dave.pox_address); // Check Carl's malformed signature stack transaction (err 35 - INVALID_SIGNATURE_PUBKEY) - let carl_tx_result_err = tx_block - .receipts + let carl_tx_result_err = receipts .get(2) .unwrap() .result @@ -7097,8 +7456,7 @@ fn test_scenario_two() { assert_eq!(carl_tx_result_err, Value::Int(35)); // Check Carl's expected stack transaction - let carl_tx_result_ok = tx_block - .receipts + let carl_tx_result_ok = receipts .get(3) .unwrap() .result @@ -7127,8 +7485,7 @@ fn test_scenario_two() { assert_eq!(signer_key_expected, signer_key_actual); // Check Dave's malformed pox stack transaction (err 19 - INVALID_SIGNER_AUTH) - let dave_tx_result_err = tx_block - .receipts + let dave_tx_result_err = receipts .get(4) .unwrap() .result @@ -7138,8 +7495,7 @@ fn test_scenario_two() { assert_eq!(dave_tx_result_err, Value::Int(19)); // Check Dave's expected stack transaction - let dave_tx_result_ok = tx_block - .receipts + let dave_tx_result_ok = receipts .get(5) .unwrap() .result @@ -7228,18 +7584,23 @@ fn test_scenario_two() { bob_vote_expected, ]; - let target_reward_cycle = 8; + let target_reward_cycle = next_reward_cycle; // Commit vote txs & advance to the first burn block of reward cycle 8 (block 161) let target_height = peer .config .burnchain .reward_cycle_to_block_height(target_reward_cycle as u64); - let (latest_block, tx_block) = - advance_to_block_height(&mut peer, &observer, &txs, &mut peer_nonce, target_height); + let (latest_block, tx_block, receipts) = advance_to_block_height( + &mut peer, + &observer, + &txs, + &mut peer_nonce, + target_height, + &mut test_signers, + ); // Check Alice's expected vote - let alice_expected_vote = tx_block - .receipts + let alice_expected_vote = receipts .get(1) .unwrap() .result @@ -7249,8 +7610,7 @@ fn test_scenario_two() { assert_eq!(alice_expected_vote, Value::Bool(true)); // Check Alice's duplicate vote (err 15 - DUPLICATE_ROUND) - let alice_duplicate_vote = tx_block - .receipts + let alice_duplicate_vote = receipts .get(2) .unwrap() .result @@ -7260,8 +7620,7 @@ fn test_scenario_two() { assert_eq!(alice_duplicate_vote, Value::UInt(15)); // Check Bob's round err vote (err 17 - INVALID_ROUND) - let bob_round_err_vote = tx_block - .receipts + let bob_round_err_vote = receipts .get(3) .unwrap() .result @@ -7271,8 +7630,7 @@ fn test_scenario_two() { assert_eq!(bob_round_err_vote, Value::UInt(17)); // Check Bob's expected vote - let bob_expected_vote = tx_block - .receipts + let bob_expected_vote = receipts .get(4) .unwrap() .result @@ -7282,10 +7640,10 @@ fn test_scenario_two() { assert_eq!(bob_expected_vote, Value::Bool(true)); } +#[apply(nakamoto_cases)] // In this scenario, two solo stacker-signers (Alice, Bob), one service signer (Carl), // one stacking pool operator (Dave), & three pool stackers (Eve, Frank, Grace). -#[test] -fn test_scenario_three() { +fn test_scenario_three(use_nakamoto: bool) { // Alice stacker signer setup let mut alice = StackerSignerInfo::new(); // Bob stacker signer setup @@ -7316,12 +7674,26 @@ fn test_scenario_three() { mut peer, mut peer_nonce, burn_block_height, - target_height, reward_cycle, next_reward_cycle, min_ustx, peer_config, - ) = pox_4_scenario_test_setup("test_scenario_three", &observer, initial_balances); + mut test_signers, + ) = pox_4_scenario_test_setup( + "test_scenario_three", + &observer, + initial_balances, + use_nakamoto, + ); + + // Add to test signers + if let Some(ref mut test_signers) = test_signers.as_mut() { + test_signers.signer_keys.extend(vec![ + alice.private_key.clone(), + bob.private_key.clone(), + carl.private_key.clone(), + ]); + } let lock_period = 2; let amount = (default_initial_balances / 2).wrapping_sub(1000) as u128; @@ -7563,13 +7935,18 @@ fn test_scenario_three() { .reward_cycle_to_block_height(next_reward_cycle as u64) .saturating_sub(peer_config.burnchain.pox_constants.prepare_length as u64) .wrapping_add(2); - let (latest_block, tx_block) = - advance_to_block_height(&mut peer, &observer, &txs, &mut peer_nonce, target_height); + let (latest_block, tx_block, receipts) = advance_to_block_height( + &mut peer, + &observer, + &txs, + &mut peer_nonce, + target_height, + &mut test_signers, + ); // Start of test checks // 1. Check that Alice can't stack with an lock_period different than signature - let alice_stack_tx_err = tx_block - .receipts + let alice_stack_tx_err = receipts .get(1) .unwrap() .result @@ -7579,8 +7956,7 @@ fn test_scenario_three() { assert_eq!(alice_stack_tx_err, Value::Int(35)); // 2. Check that Alice can solo stack-sign - let alice_stack_tx_ok = tx_block - .receipts + let alice_stack_tx_ok = receipts .get(2) .unwrap() .result @@ -7609,8 +7985,7 @@ fn test_scenario_three() { assert_eq!(signer_key_expected, signer_key_actual); // 3. Check that Bob can't stack with a signature that points to a reward cycle in the past - let bob_stack_tx_err = tx_block - .receipts + let bob_stack_tx_err = receipts .get(3) .unwrap() .result @@ -7620,8 +7995,7 @@ fn test_scenario_three() { assert_eq!(bob_stack_tx_err, Value::Int(35)); // 4. Check that Bob can solo stack-sign - let bob_stack_tx_ok = tx_block - .receipts + let bob_stack_tx_ok = receipts .get(4) .unwrap() .result @@ -7642,8 +8016,7 @@ fn test_scenario_three() { assert_eq!(signer_key_actual, signer_key_actual); // 5. Check that David can't delegate-stack-stx Eve if delegation expires during lock period - let eve_delegate_stx_to_david_err = tx_block - .receipts + let eve_delegate_stx_to_david_err = receipts .get(9) .unwrap() .result @@ -7653,8 +8026,7 @@ fn test_scenario_three() { assert_eq!(eve_delegate_stx_to_david_err, Value::Int(21)); // 6. Check that Frank is correctly delegated to David - let frank_delegate_stx_to_david_tx = tx_block - .receipts + let frank_delegate_stx_to_david_tx = receipts .get(10) .unwrap() .result @@ -7683,8 +8055,7 @@ fn test_scenario_three() { assert_eq!(stacker_expected, stacker_actual); // 7. Check that Grace is correctly delegated to David - let grace_delegate_stx_to_david_tx = tx_block - .receipts + let grace_delegate_stx_to_david_tx = receipts .get(11) .unwrap() .result @@ -7713,8 +8084,7 @@ fn test_scenario_three() { assert_eq!(stacker_expected, stacker_actual); // 8. Check that Alice can't delegate-stack if already stacking - let alice_delegate_stx_to_david_err = tx_block - .receipts + let alice_delegate_stx_to_david_err = receipts .get(12) .unwrap() .result @@ -7724,8 +8094,7 @@ fn test_scenario_three() { assert_eq!(alice_delegate_stx_to_david_err, Value::Int(3)); // 9. Check that David can't aggregate-commit-indexed if pointing to a reward cycle in the future - let david_aggregate_commit_indexed_err = tx_block - .receipts + let david_aggregate_commit_indexed_err = receipts .get(13) .unwrap() .result @@ -7735,8 +8104,7 @@ fn test_scenario_three() { assert_eq!(david_aggregate_commit_indexed_err, Value::Int(35)); // 10. Check that David can aggregate-commit-indexed if using the incorrect signature topic - let david_aggregate_commit_indexed_err = tx_block - .receipts + let david_aggregate_commit_indexed_err = receipts .get(14) .unwrap() .result @@ -7745,23 +8113,24 @@ fn test_scenario_three() { .unwrap(); assert_eq!(david_aggregate_commit_indexed_err, Value::Int(35)); + let david_index = if use_nakamoto { 3 } else { 2 }; + // 11. Check that David can aggregate-commit-indexed successfully, checking stacking index = 2 - let david_aggregate_commit_indexed_ok = tx_block - .receipts + let david_aggregate_commit_indexed_ok = receipts .get(15) .unwrap() .result .clone() .expect_result_ok() .unwrap(); - assert_eq!(david_aggregate_commit_indexed_ok, Value::UInt(2)); + assert_eq!(david_aggregate_commit_indexed_ok, Value::UInt(david_index)); } +#[apply(nakamoto_cases)] // In this test scenario two solo stacker-signers (Alice & Bob), // test out the updated stack-extend & stack-increase functions // across multiple cycles. -#[test] -fn test_scenario_four() { +fn test_scenario_four(use_nakamoto: bool) { // Alice service signer setup let mut alice = StackerSignerInfo::new(); // Bob service signer setup @@ -7777,12 +8146,24 @@ fn test_scenario_four() { mut peer, mut peer_nonce, burn_block_height, - target_height, reward_cycle, next_reward_cycle, min_ustx, peer_config, - ) = pox_4_scenario_test_setup("test_scenario_four", &observer, initial_balances); + mut test_signers, + ) = pox_4_scenario_test_setup( + "test_scenario_four", + &observer, + initial_balances, + use_nakamoto, + ); + + // Add to test signers + if let Some(ref mut test_signers) = test_signers.as_mut() { + test_signers + .signer_keys + .extend(vec![alice.private_key.clone(), bob.private_key.clone()]); + } // Initial Alice Signature let amount = (default_initial_balances / 2).wrapping_sub(1000) as u128; @@ -7800,17 +8181,21 @@ fn test_scenario_four() { let alice_signature_extend_err = make_signer_key_signature( &bob.pox_address, &bob.private_key, - next_reward_cycle.wrapping_add(1), + next_reward_cycle, &Pox4SignatureTopic::StackExtend, lock_period, u128::MAX, 1, ); + info!( + "Generating stack-extend signature for cycle {}", + next_reward_cycle + ); // Extend Alice Signature Expected let alice_signature_extend = make_signer_key_signature( &alice.pox_address, &alice.private_key, - next_reward_cycle.wrapping_add(1), + next_reward_cycle, &Pox4SignatureTopic::StackExtend, lock_period, u128::MAX, @@ -7864,8 +8249,14 @@ fn test_scenario_four() { .reward_cycle_to_block_height(next_reward_cycle as u64) .saturating_sub(peer_config.burnchain.pox_constants.prepare_length as u64) .wrapping_add(2); - let (latest_block, tx_block) = - advance_to_block_height(&mut peer, &observer, &txs, &mut peer_nonce, target_height); + let (latest_block, tx_block, _receipts) = advance_to_block_height( + &mut peer, + &observer, + &txs, + &mut peer_nonce, + target_height, + &mut test_signers, + ); // Verify Alice Stacked let (pox_address, first_reward_cycle, lock_period, _indices) = @@ -7923,24 +8314,50 @@ fn test_scenario_four() { next_reward_cycle, ); bob.nonce += 1; - let txs = vec![ + let mut txs = vec![ alice_vote_err.clone(), alice_vote_expected.clone(), bob_vote_expected.clone(), ]; + // Also vote for aggregate key with default test signer if in Nakamoto: + if let Some(test_signers) = test_signers.clone() { + let tester_key = test_signers.signer_keys[0]; + let tester_addr = key_to_stacks_addr(&tester_key); + let tester_index = get_signer_index( + &mut peer, + latest_block, + tester_addr.clone(), + next_reward_cycle, + ); + let tester_vote = make_signers_vote_for_aggregate_public_key( + &tester_key, + 1, // only tx is a stack-stx + tester_index, + &peer_config.aggregate_public_key.unwrap(), + 1, + next_reward_cycle, + ); + txs.push(tester_vote); + } + // Commit vote txs & move to the prepare phase of reward cycle 7 (block 155) let target_height = peer .config .burnchain - .reward_cycle_to_block_height(7 as u64) - .wrapping_add(15); - let (latest_block, tx_block) = - advance_to_block_height(&mut peer, &observer, &txs, &mut peer_nonce, target_height); + .reward_cycle_to_block_height(next_reward_cycle as u64 + 1) + .saturating_sub(peer_config.burnchain.pox_constants.prepare_length as u64); + let (latest_block, tx_block, receipts) = advance_to_block_height( + &mut peer, + &observer, + &txs, + &mut peer_nonce, + target_height, + &mut test_signers, + ); // Check Alice's err vote (err 10 - INVALID_SIGNER_INDEX) - let alice_err_vote = tx_block - .receipts + let alice_err_vote = receipts .get(1) .unwrap() .result @@ -7950,8 +8367,7 @@ fn test_scenario_four() { assert_eq!(alice_err_vote, Value::UInt(10)); // Check Alice's expected vote - let alice_expected_vote = tx_block - .receipts + let alice_expected_vote = receipts .get(2) .unwrap() .result @@ -7961,8 +8377,7 @@ fn test_scenario_four() { assert_eq!(alice_expected_vote, Value::Bool(true)); // Check Bob's expected vote - let bob_expected_vote = tx_block - .receipts + let bob_expected_vote = receipts .get(3) .unwrap() .result @@ -8019,12 +8434,17 @@ fn test_scenario_four() { alice_vote_expected_err.clone(), ]; let target_height = target_height.wrapping_add(1); - let (latest_block, tx_block) = - advance_to_block_height(&mut peer, &observer, &txs, &mut peer_nonce, target_height); + let (latest_block, tx_block, receipts) = advance_to_block_height( + &mut peer, + &observer, + &txs, + &mut peer_nonce, + target_height, + &mut test_signers, + ); // Check Alice's err stack-extend tx (err 35 - INVALID_SIGNATURE_PUBKEY) - let alice_err_extend = tx_block - .receipts + let alice_err_extend = receipts .get(1) .unwrap() .result @@ -8034,8 +8454,7 @@ fn test_scenario_four() { assert_eq!(alice_err_extend, Value::Int(35)); // Check Alice's stack-extend tx - let alice_extend_receipt = tx_block - .receipts + let alice_extend_receipt = receipts .get(2) .unwrap() .result @@ -8044,8 +8463,7 @@ fn test_scenario_four() { .unwrap(); // Check Alice's expected err vote (err 14 - DUPLICATE_AGGREGATE_PUBLIC_KEY) - let alice_expected_vote_err = tx_block - .receipts + let alice_expected_vote_err = receipts .get(3) .unwrap() .result @@ -8062,12 +8480,19 @@ fn test_scenario_four() { // In this test case, Alice delegates twice the stacking minimum to Bob. // Bob stacks Alice's funds, and then immediately tries to stacks-aggregation-increase. // This should return a clarity user error. -#[test] -fn delegate_stack_increase_err() { +#[apply(nakamoto_cases)] +fn delegate_stack_increase_err(use_nakamoto: bool) { let lock_period: u128 = 2; let observer = TestEventObserver::new(); - let (burnchain, mut peer, keys, latest_block, block_height, mut coinbase_nonce) = - prepare_pox4_test(function_name!(), Some(&observer)); + let ( + burnchain, + mut peer, + keys, + latest_block, + block_height, + mut coinbase_nonce, + mut test_signers, + ) = prepare_pox4_test(function_name!(), Some(&observer), use_nakamoto); let alice_nonce = 0; let alice_key = &keys[0]; @@ -8113,7 +8538,7 @@ fn delegate_stack_increase_err() { let txs = vec![delegate_stx, delegate_stack_stx]; - let latest_block = peer.tenure_with_txs(&txs, &mut coinbase_nonce); + let latest_block = tenure_with_txs(&mut peer, &txs, &mut coinbase_nonce, &mut test_signers); bob_nonce += 1; @@ -8127,13 +8552,15 @@ fn delegate_stack_increase_err() { 1, ); + let slot_idx = if use_nakamoto { 1 } else { 0 }; + // Bob's Aggregate Increase let bobs_aggregate_increase = make_pox_4_aggregation_increase( &bob_delegate_key, bob_nonce, &pox_addr, next_reward_cycle.into(), - 0, + slot_idx, Some(signature), &signer_pk, u128::MAX, @@ -8142,7 +8569,7 @@ fn delegate_stack_increase_err() { let txs = vec![bobs_aggregate_increase]; - let latest_block = peer.tenure_with_txs(&txs, &mut coinbase_nonce); + let latest_block = tenure_with_txs(&mut peer, &txs, &mut coinbase_nonce, &mut test_signers); let delegate_transactions = get_last_block_sender_transactions(&observer, key_to_stacks_addr(bob_delegate_key)); @@ -8157,7 +8584,11 @@ fn delegate_stack_increase_err() { // test that the reward set is empty let reward_cycle_ht = burnchain.reward_cycle_to_block_height(next_reward_cycle); let reward_set = get_reward_set_entries_at(&mut peer, &latest_block, reward_cycle_ht); - assert!(reward_set.is_empty()); + if use_nakamoto { + assert_eq!(reward_set.len(), 1); + } else { + assert!(reward_set.is_empty()); + } } pub fn get_stacking_state_pox_4( @@ -8358,6 +8789,7 @@ pub fn get_stacking_minimum(peer: &mut TestPeer, latest_block: &StacksBlockId) - pub fn prepare_pox4_test<'a>( test_name: &str, observer: Option<&'a TestEventObserver>, + use_nakamoto: bool, ) -> ( Burnchain, TestPeer<'a>, @@ -8365,8 +8797,9 @@ pub fn prepare_pox4_test<'a>( StacksBlockId, u64, usize, + Option, ) { - let (epochs, pox_constants) = make_test_epochs_pox(); + let (epochs, pox_constants) = make_test_epochs_pox(use_nakamoto); let mut burnchain = Burnchain::default_unittest( 0, @@ -8377,33 +8810,144 @@ pub fn prepare_pox4_test<'a>( let (mut peer, keys) = instantiate_pox_peer_with_epoch(&burnchain, test_name, Some(epochs.clone()), observer); - assert_eq!(burnchain.pox_constants.reward_slots(), 6); - let mut coinbase_nonce = 0; + if use_nakamoto { + let test_key = keys[3].clone(); + let test_keys = vec![test_key.clone()]; + + let private_key = StacksPrivateKey::from_seed(&[2]); + let test_signers = TestSigners::new(test_keys.clone()); + let test_stackers = test_keys + .iter() + .map(|key| TestStacker { + signer_private_key: key.clone(), + stacker_private_key: key.clone(), + amount: 1024 * POX_THRESHOLD_STEPS_USTX, + pox_addr: Some(pox_addr_from(&key)), + max_amount: None, + }) + .collect::>(); + let mut pox_constants = TestPeerConfig::default().burnchain.pox_constants; + pox_constants.reward_cycle_length = 10; + pox_constants.v2_unlock_height = 21; + pox_constants.pox_3_activation_height = 26; + pox_constants.v3_unlock_height = 27; + pox_constants.pox_4_activation_height = 41; + pox_constants.prepare_length = 5; + let mut boot_plan = NakamotoBootPlan::new(test_name) + .with_test_stackers(test_stackers) + .with_test_signers(test_signers.clone()) + .with_private_key(private_key); + boot_plan.add_default_balance = false; + let addrs: Vec = keys.iter().map(|pk| key_to_stacks_addr(pk)).collect(); + + let balances: Vec<(PrincipalData, u64)> = addrs + .clone() + .into_iter() + .map(|addr| (addr.into(), (1024 * POX_THRESHOLD_STEPS_USTX) as u64)) + .collect(); + boot_plan.initial_balances = balances; + boot_plan.pox_constants = pox_constants.clone(); + burnchain.pox_constants = pox_constants.clone(); + + info!("---- Booting into Nakamoto Peer ----"); + let peer = boot_plan.boot_into_nakamoto_peer(vec![], observer); + let sort_db = peer.sortdb.as_ref().unwrap(); + let latest_block = sort_db + .index_handle_at_tip() + .get_nakamoto_tip_block_id() + .unwrap() + .unwrap(); + let coinbase_nonce = 0; - // Advance into pox4 - let target_height = burnchain.pox_constants.pox_4_activation_height; - let mut latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); - while get_tip(peer.sortdb.as_ref()).block_height < u64::from(target_height) { - latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); - // if we reach epoch 2.1, perform the check - if get_tip(peer.sortdb.as_ref()).block_height > epochs[3].start_height { - assert_latest_was_burn(&mut peer); - } - } + let block_height = get_tip(peer.sortdb.as_ref()).block_height; - let block_height = get_tip(peer.sortdb.as_ref()).block_height; + info!("Block height: {}", block_height); - info!("Block height: {}", block_height); + ( + burnchain, + peer, + keys, + latest_block, + block_height, + coinbase_nonce, + Some(test_signers), + ) + } else { + // Advance into pox4 + let target_height = burnchain.pox_constants.pox_4_activation_height; + let mut coinbase_nonce = 0; + let mut latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); + while get_tip(peer.sortdb.as_ref()).block_height < u64::from(target_height) { + latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); + // if we reach epoch 2.1, perform the check + if get_tip(peer.sortdb.as_ref()).block_height > epochs[3].start_height { + assert_latest_was_burn(&mut peer); + } + } + let block_height = get_tip(peer.sortdb.as_ref()).block_height; + ( + burnchain, + peer, + keys, + latest_block, + block_height, + coinbase_nonce, + None, + ) + } +} - ( - burnchain, - peer, - keys, - latest_block, - block_height, - coinbase_nonce, - ) +pub fn tenure_with_txs( + peer: &mut TestPeer, + txs: &[StacksTransaction], + coinbase_nonce: &mut usize, + test_signers: &mut Option, +) -> StacksBlockId { + if let Some(test_signers) = test_signers { + let (burn_ops, mut tenure_change, miner_key) = + peer.begin_nakamoto_tenure(TenureChangeCause::BlockFound); + let (_, _, consensus_hash) = peer.next_burnchain_block(burn_ops.clone()); + let vrf_proof = peer.make_nakamoto_vrf_proof(miner_key); + + tenure_change.tenure_consensus_hash = consensus_hash.clone(); + tenure_change.burn_view_consensus_hash = consensus_hash.clone(); + + let tenure_change_tx = peer + .miner + .make_nakamoto_tenure_change(tenure_change.clone()); + let coinbase_tx = peer.miner.make_nakamoto_coinbase(None, vrf_proof); + + let blocks_and_sizes = peer.make_nakamoto_tenure( + tenure_change_tx, + coinbase_tx, + test_signers, + |_miner, _chainstate, _sort_dbconn, _blocks| { + info!("Building nakamoto block. Blocks len {}", _blocks.len()); + if _blocks.len() == 0 { + txs.to_vec() + } else { + vec![] + } + }, + ); + let blocks: Vec<_> = blocks_and_sizes + .into_iter() + .map(|(block, _, _)| block) + .collect(); + + let chainstate = &mut peer.stacks_node.as_mut().unwrap().chainstate; + let sort_db = peer.sortdb.as_mut().unwrap(); + let latest_block = sort_db + .index_handle_at_tip() + .get_nakamoto_tip_block_id() + .unwrap() + .unwrap(); + latest_block + } else { + peer.tenure_with_txs(txs, coinbase_nonce) + } } + pub fn get_last_block_sender_transactions( observer: &TestEventObserver, address: StacksAddress, @@ -8434,7 +8978,7 @@ fn missed_slots_no_unlock() { // tenures start being tracked. let EMPTY_SORTITIONS = 25; - let (epochs, mut pox_constants) = make_test_epochs_pox(); + let (epochs, mut pox_constants) = make_test_epochs_pox(false); pox_constants.pox_4_activation_height = u32::try_from(epochs[7].start_height).unwrap() + 1; let mut burnchain = Burnchain::default_unittest( @@ -8685,7 +9229,7 @@ fn no_lockups_2_5() { // tenures start being tracked. let EMPTY_SORTITIONS = 25; - let (epochs, mut pox_constants) = make_test_epochs_pox(); + let (epochs, mut pox_constants) = make_test_epochs_pox(false); pox_constants.pox_4_activation_height = u32::try_from(epochs[7].start_height).unwrap() + 1; let mut burnchain = Burnchain::default_unittest( @@ -8783,8 +9327,8 @@ fn no_lockups_2_5() { // 5. Carl stx-stacks & self-signs for 3 reward cycle // 6. In Carl's second reward cycle, he calls stx-extend for 3 more reward cycles // 7. In Carl's third reward cycle, he calls stx-increase and should fail as he is straddling 2 keys -#[test] -fn test_scenario_five() { +#[apply(nakamoto_cases)] +fn test_scenario_five(use_nakamoto: bool) { // Alice service signer setup let mut alice = StackerSignerInfo::new(); // Bob service signer setup @@ -8827,12 +9371,28 @@ fn test_scenario_five() { mut peer, mut peer_nonce, burn_block_height, - target_height, reward_cycle, next_reward_cycle, min_ustx, mut peer_config, - ) = pox_4_scenario_test_setup("test_scenario_five", &observer, initial_balances); + mut test_signers, + ) = pox_4_scenario_test_setup( + "test_scenario_five", + &observer, + initial_balances, + use_nakamoto, + ); + + // Add to test signers + if let Some(ref mut test_signers) = test_signers.as_mut() { + test_signers.signer_keys.extend(vec![ + alice.private_key.clone(), + bob.private_key.clone(), + carl.private_key.clone(), + david.private_key.clone(), + eve.private_key.clone(), + ]); + } // Lock periods for each stacker let carl_lock_period = 3; @@ -9091,8 +9651,20 @@ fn test_scenario_five() { .reward_cycle_to_block_height(next_reward_cycle as u64) .saturating_sub(peer_config.burnchain.pox_constants.prepare_length as u64) .wrapping_add(2); - let (latest_block, tx_block) = - advance_to_block_height(&mut peer, &observer, &txs, &mut peer_nonce, target_height); + info!( + "Scenario five: submitting stacking txs."; + "target_height" => target_height, + "next_reward_cycle" => next_reward_cycle, + "prepare_length" => peer_config.burnchain.pox_constants.prepare_length, + ); + let (latest_block, tx_block, _receipts) = advance_to_block_height( + &mut peer, + &observer, + &txs, + &mut peer_nonce, + target_height, + &mut test_signers, + ); // Check that all of David's stackers have been added to the reward set for (stacker, stacker_lock_period) in davids_stackers { @@ -9166,18 +9738,22 @@ fn test_scenario_five() { alice.nonce += 1; bob.nonce += 1; carl.nonce += 1; - // Mine vote txs & advance to the reward set calculation of the next reward cycle let target_height = peer .config .burnchain .reward_cycle_to_block_height(next_reward_cycle as u64); - let (latest_block, tx_block) = advance_to_block_height( + info!( + "Scenario five: submitting votes. Target height: {}", + target_height + ); + let (latest_block, tx_block, _receipts) = advance_to_block_height( &mut peer, &observer, &vote_txs, &mut peer_nonce, target_height, + &mut test_signers, ); let mut observed_txs = HashSet::new(); @@ -9278,21 +9854,33 @@ fn test_scenario_five() { .reward_cycle_to_block_height(next_reward_cycle as u64) .saturating_sub(peer_config.burnchain.pox_constants.prepare_length as u64) .wrapping_add(2); - let (latest_block, tx_block) = - advance_to_block_height(&mut peer, &observer, &txs, &mut peer_nonce, target_height); + info!( + "Scenario five: submitting extend and aggregate commit txs. Target height: {}", + target_height + ); + let (latest_block, tx_block, receipts) = advance_to_block_height( + &mut peer, + &observer, + &txs, + &mut peer_nonce, + target_height, + &mut test_signers, + ); // Check that all of David's stackers are stacked - for (stacker, stacker_lock_period) in davids_stackers { + for (idx, (stacker, stacker_lock_period)) in davids_stackers.iter().enumerate() { let (pox_address, first_reward_cycle, lock_period, _indices) = - get_stacker_info_pox_4(&mut peer, &stacker.principal).expect("Failed to find stacker"); + get_stacker_info_pox_4(&mut peer, &stacker.principal) + .expect(format!("Failed to find stacker {}", idx).as_str()); assert_eq!(first_reward_cycle, reward_cycle); assert_eq!(pox_address, david.pox_address); assert_eq!(lock_period, *stacker_lock_period); } // Check that all of Eve's stackers are stacked - for (stacker, stacker_lock_period) in eves_stackers { + for (idx, (stacker, stacker_lock_period)) in eves_stackers.iter().enumerate() { let (pox_address, first_reward_cycle, lock_period, _indices) = - get_stacker_info_pox_4(&mut peer, &stacker.principal).expect("Failed to find stacker"); + get_stacker_info_pox_4(&mut peer, &stacker.principal) + .expect(format!("Failed to find stacker {}", idx).as_str()); assert_eq!(first_reward_cycle, reward_cycle); assert_eq!(pox_address, eve.pox_address); assert_eq!(lock_period, *stacker_lock_period); @@ -9364,12 +9952,17 @@ fn test_scenario_five() { .burnchain .reward_cycle_to_block_height(next_reward_cycle as u64); // Submit vote transactions - let (latest_block, tx_block) = advance_to_block_height( + info!( + "Scenario five: submitting votes. Target height: {}", + target_height + ); + let (latest_block, tx_block, _receipts) = advance_to_block_height( &mut peer, &observer, &vote_txs, &mut peer_nonce, target_height, + &mut test_signers, ); let mut observed_txs = HashSet::new(); @@ -9478,8 +10071,15 @@ fn test_scenario_five() { (heidi.clone(), heidi_lock_period), ]; - let (latest_block, tx_block) = - advance_to_block_height(&mut peer, &observer, &txs, &mut peer_nonce, target_height); + info!("Scenario five: submitting increase and aggregate-commit txs"); + let (latest_block, tx_block, receipts) = advance_to_block_height( + &mut peer, + &observer, + &txs, + &mut peer_nonce, + target_height, + &mut test_signers, + ); for (stacker, _) in davids_stackers { let (pox_address, first_reward_cycle, _lock_period, _indices) = @@ -9503,6 +10103,6 @@ fn test_scenario_five() { assert_eq!(pox_address, carl.pox_address); // Assert that carl's error is err(40) - let carl_increase_err = tx_block.receipts[1].clone().result; + let carl_increase_err = receipts[1].clone().result; assert_eq!(carl_increase_err, Value::error(Value::Int(40)).unwrap()); } diff --git a/stackslib/src/chainstate/stacks/boot/signers_tests.rs b/stackslib/src/chainstate/stacks/boot/signers_tests.rs index 37b2e016b7..bf3b5f312c 100644 --- a/stackslib/src/chainstate/stacks/boot/signers_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/signers_tests.rs @@ -171,7 +171,8 @@ fn make_signer_sanity_panic_1() { #[test] fn signers_get_config() { - let (burnchain, mut peer, keys, latest_block, ..) = prepare_pox4_test(function_name!(), None); + let (burnchain, mut peer, keys, latest_block, ..) = + prepare_pox4_test(function_name!(), None, false); assert_eq!( readonly_call( diff --git a/stackslib/src/chainstate/stacks/db/blocks.rs b/stackslib/src/chainstate/stacks/db/blocks.rs index be05151c12..47cace8c4b 100644 --- a/stackslib/src/chainstate/stacks/db/blocks.rs +++ b/stackslib/src/chainstate/stacks/db/blocks.rs @@ -4966,7 +4966,7 @@ impl StacksChainState { chain_tip_burn_header_height: u32, parent_sortition_id: &SortitionId, ) -> Result, Error> { - let pox_reward_cycle = Burnchain::static_block_height_to_reward_cycle( + let pox_reward_cycle = PoxConstants::static_block_height_to_reward_cycle( burn_tip_height, burn_dbconn.get_burn_start_height().into(), burn_dbconn.get_pox_reward_cycle_length().into(), diff --git a/stackslib/src/chainstate/stacks/db/mod.rs b/stackslib/src/chainstate/stacks/db/mod.rs index 60a950d543..e5bf968c66 100644 --- a/stackslib/src/chainstate/stacks/db/mod.rs +++ b/stackslib/src/chainstate/stacks/db/mod.rs @@ -2753,7 +2753,7 @@ pub mod test { } pub fn chainstate_path(test_name: &str) -> String { - format!("/tmp/blockstack-test-chainstate-{}", test_name) + format!("/tmp/stacks-node-tests/cs-{}", test_name) } #[test] diff --git a/stackslib/src/core/tests/mod.rs b/stackslib/src/core/tests/mod.rs index 158feeeba5..72b29cc097 100644 --- a/stackslib/src/core/tests/mod.rs +++ b/stackslib/src/core/tests/mod.rs @@ -1392,8 +1392,9 @@ fn mempool_do_not_replace_tx() { #[case(MempoolCollectionBehavior::ByStacksHeight)] #[case(MempoolCollectionBehavior::ByReceiveTime)] fn mempool_db_load_store_replace_tx(#[case] behavior: MempoolCollectionBehavior) { - let mut chainstate = instantiate_chainstate(false, 0x80000000, function_name!()); - let chainstate_path = chainstate_path(function_name!()); + let path_name = format!("{}::{:?}", function_name!(), behavior); + let mut chainstate = instantiate_chainstate(false, 0x80000000, &path_name); + let chainstate_path = chainstate_path(&path_name); let mut mempool = MemPoolDB::open_test(false, 0x80000000, &chainstate_path).unwrap(); let mut txs = codec_all_transactions( diff --git a/stackslib/src/net/api/getstackers.rs b/stackslib/src/net/api/getstackers.rs index 69961dbe14..3b253aeb21 100644 --- a/stackslib/src/net/api/getstackers.rs +++ b/stackslib/src/net/api/getstackers.rs @@ -92,7 +92,6 @@ impl GetStackersResponse { cycle_number: u64, ) -> Result { let cycle_start_height = burnchain.reward_cycle_to_block_height(cycle_number); - let pox_contract_name = burnchain .pox_constants .active_pox_contract(cycle_start_height); @@ -107,7 +106,7 @@ impl GetStackersResponse { let provider = OnChainRewardSetProvider::new(); let stacker_set = provider - .read_reward_set_nakamoto(cycle_start_height, chainstate, burnchain, sortdb, tip, true) + .read_reward_set_nakamoto(chainstate, cycle_number, sortdb, tip, true) .map_err(GetStackersErrors::NotAvailableYet)?; Ok(Self { stacker_set }) @@ -121,7 +120,7 @@ impl HttpRequest for GetStackersRequestHandler { } fn path_regex(&self) -> Regex { - Regex::new(r#"^/v3/stacker_set/(?P[0-9]{1,20})$"#).unwrap() + Regex::new(r#"^/v3/stacker_set/(?P[0-9]{1,10})$"#).unwrap() } fn metrics_identifier(&self) -> &str { diff --git a/stackslib/src/net/atlas/db.rs b/stackslib/src/net/atlas/db.rs index f971344a28..d6bdbb301e 100644 --- a/stackslib/src/net/atlas/db.rs +++ b/stackslib/src/net/atlas/db.rs @@ -494,8 +494,12 @@ impl AtlasDB { page_index: u32, block_id: &StacksBlockId, ) -> Result, db_error> { - let min = page_index * AttachmentInstance::ATTACHMENTS_INV_PAGE_SIZE; - let max = min + AttachmentInstance::ATTACHMENTS_INV_PAGE_SIZE; + let min = page_index + .checked_mul(AttachmentInstance::ATTACHMENTS_INV_PAGE_SIZE) + .ok_or(db_error::Overflow)?; + let max = min + .checked_add(AttachmentInstance::ATTACHMENTS_INV_PAGE_SIZE) + .ok_or(db_error::Overflow)?; let qry = "SELECT attachment_index, is_available FROM attachment_instances WHERE attachment_index >= ?1 AND attachment_index < ?2 AND index_block_hash = ?3 ORDER BY attachment_index ASC"; let args = params![min, max, block_id,]; let rows = query_rows::<(u32, u32), _>(&self.conn, &qry, args)?; diff --git a/stackslib/src/net/connection.rs b/stackslib/src/net/connection.rs index 36b1fc18ff..78c15e0833 100644 --- a/stackslib/src/net/connection.rs +++ b/stackslib/src/net/connection.rs @@ -971,7 +971,7 @@ impl ConnectionInbox

{ // NOTE: it's important that buf not be too big, since up to buf.len()-1 bytes may need // to be copied if a message boundary isn't aligned with buf (which is usually the // case). - let mut buf = [0u8; 4096]; + let mut buf = [0u8; 65536]; let num_read = match fd.read(&mut buf) { Ok(0) => { // remote fd is closed, but do try to consume all remaining bytes in the buffer diff --git a/stackslib/src/net/http/mod.rs b/stackslib/src/net/http/mod.rs index cc6355ca31..ca7a97c5be 100644 --- a/stackslib/src/net/http/mod.rs +++ b/stackslib/src/net/http/mod.rs @@ -178,14 +178,14 @@ impl FromStr for HttpContentType { let s = header.to_string().to_lowercase(); if s == "application/octet-stream" { Ok(HttpContentType::Bytes) - } else if s == "text/plain" { + } else if s == "text/plain" || s.starts_with("text/plain;") { Ok(HttpContentType::Text) - } else if s == "application/json" { + } else if s == "application/json" || s.starts_with("application/json;") { Ok(HttpContentType::JSON) } else { - Err(CodecError::DeserializeError( - "Unsupported HTTP content type".to_string(), - )) + Err(CodecError::DeserializeError(format!( + "Unsupported HTTP content type: {header}" + ))) } } } diff --git a/stackslib/src/net/http/response.rs b/stackslib/src/net/http/response.rs index b6deee218f..3a47fd0d77 100644 --- a/stackslib/src/net/http/response.rs +++ b/stackslib/src/net/http/response.rs @@ -584,7 +584,7 @@ impl StacksMessageCodec for HttpResponsePreamble { )); } - if content_type.is_none() || (content_length.is_none() && !chunked_encoding) { + if content_length.is_none() && !chunked_encoding { return Err(CodecError::DeserializeError( "Invalid HTTP response: missing Content-Type, Content-Length".to_string(), )); @@ -595,7 +595,7 @@ impl StacksMessageCodec for HttpResponsePreamble { status_code: status_code, reason: reason, keep_alive: keep_alive, - content_type: content_type.unwrap(), + content_type: content_type.unwrap_or(HttpContentType::Bytes), // per the RFC content_length: content_length, headers: headers, }) diff --git a/stackslib/src/net/http/tests.rs b/stackslib/src/net/http/tests.rs index 508ca55c6e..a17635bc59 100644 --- a/stackslib/src/net/http/tests.rs +++ b/stackslib/src/net/http/tests.rs @@ -368,8 +368,6 @@ fn test_parse_http_response_preamble_err() { "Unsupported HTTP content type"), ("HTTP/1.1 200 OK\r\nContent-Length: foo\r\n\r\n", "Invalid Content-Length"), - ("HTTP/1.1 200 OK\r\nContent-Length: 123\r\n\r\n", - "missing Content-Type, Content-Length"), ("HTTP/1.1 200 OK\r\nContent-Type: text/plain\r\n\r\n", "missing Content-Type, Content-Length"), ("HTTP/1.1 200 OK\r\nContent-Type: text/plain\r\nContent-Length: 123\r\nTransfer-Encoding: chunked\r\n\r\n", diff --git a/stackslib/src/net/httpcore.rs b/stackslib/src/net/httpcore.rs index 88ee0365b2..804add6f33 100644 --- a/stackslib/src/net/httpcore.rs +++ b/stackslib/src/net/httpcore.rs @@ -17,7 +17,8 @@ /// This module binds the http library to Stacks as a `ProtocolFamily` implementation use std::collections::{BTreeMap, HashMap}; use std::io::{Read, Write}; -use std::net::SocketAddr; +use std::net::{SocketAddr, TcpStream, ToSocketAddrs}; +use std::time::{Duration, Instant}; use std::{fmt, io, mem}; use clarity::vm::costs::ExecutionCost; @@ -32,8 +33,8 @@ use stacks_common::types::chainstate::{ use stacks_common::types::net::PeerHost; use stacks_common::types::Address; use stacks_common::util::chunked_encoding::*; -use stacks_common::util::get_epoch_time_ms; use stacks_common::util::retry::{BoundReader, RetryReader}; +use stacks_common::util::{get_epoch_time_ms, get_epoch_time_secs}; use url::Url; use super::rpc::ConversationHttp; @@ -43,12 +44,13 @@ use crate::chainstate::burn::BlockSnapshot; use crate::chainstate::nakamoto::NakamotoChainState; use crate::chainstate::stacks::db::{StacksChainState, StacksHeaderInfo}; use crate::core::{MemPoolDB, StacksEpoch}; -use crate::net::connection::ConnectionOptions; -use crate::net::http::common::HTTP_PREAMBLE_MAX_ENCODED_SIZE; +use crate::net::connection::{ConnectionOptions, NetworkConnection}; +use crate::net::http::common::{parse_raw_bytes, HTTP_PREAMBLE_MAX_ENCODED_SIZE}; use crate::net::http::{ - http_reason, Error as HttpError, HttpBadRequest, HttpContentType, HttpErrorResponse, - HttpNotFound, HttpRequest, HttpRequestContents, HttpRequestPreamble, HttpResponse, - HttpResponseContents, HttpResponsePayload, HttpResponsePreamble, HttpServerError, HttpVersion, + http_reason, parse_bytes, parse_json, Error as HttpError, HttpBadRequest, HttpContentType, + HttpErrorResponse, HttpNotFound, HttpRequest, HttpRequestContents, HttpRequestPreamble, + HttpResponse, HttpResponseContents, HttpResponsePayload, HttpResponsePreamble, HttpServerError, + HttpVersion, }; use crate::net::p2p::PeerNetwork; use crate::net::server::HttpPeer; @@ -762,7 +764,7 @@ impl MessageSequence for StacksHttpMessage { } fn get_message_name(&self) -> &'static str { - "StachsHttpMessage" + "StacksHttpMessage" } } @@ -855,6 +857,44 @@ struct StacksHttpReplyData { stream: StacksHttpRecvStream, } +/// Default response handler, for when using StacksHttp to issue arbitrary requests +#[derive(Clone)] +struct RPCArbitraryResponseHandler {} +impl HttpResponse for RPCArbitraryResponseHandler { + fn try_parse_response( + &self, + preamble: &HttpResponsePreamble, + body: &[u8], + ) -> Result { + match preamble.content_type { + HttpContentType::Bytes => { + let bytes = parse_bytes(preamble, body, MAX_MESSAGE_LEN.into())?; + Ok(HttpResponsePayload::Bytes(bytes)) + } + HttpContentType::JSON => { + if body.len() > MAX_MESSAGE_LEN as usize { + return Err(HttpError::DecodeError( + "Message is too long to decode".into(), + )); + } + + let json = parse_json(preamble, body)?; + Ok(HttpResponsePayload::JSON(json)) + } + HttpContentType::Text => { + let text_bytes = parse_raw_bytes( + preamble, + body, + MAX_MESSAGE_LEN.into(), + HttpContentType::Text, + )?; + let text = String::from_utf8_lossy(&text_bytes).to_string(); + Ok(HttpResponsePayload::Text(text)) + } + } + } +} + /// Stacks HTTP state machine implementation, for bufferring up data. /// One of these exists per Connection. /// There can be at most one HTTP request in-flight (i.e. we don't do pipelining). @@ -890,9 +930,13 @@ pub struct StacksHttp { pub read_only_call_limit: ExecutionCost, /// The authorization token to enable access to privileged features, such as the block proposal RPC endpoint pub auth_token: Option, + /// Allow arbitrary responses to be handled in addition to request handlers + allow_arbitrary_response: bool, } impl StacksHttp { + /// Create an HTTP protocol state machine that handles the built-in RPC API. + /// Used for building the RPC server pub fn new(peer_addr: SocketAddr, conn_opts: &ConnectionOptions) -> StacksHttp { let mut http = StacksHttp { peer_addr, @@ -906,11 +950,31 @@ impl StacksHttp { maximum_call_argument_size: conn_opts.maximum_call_argument_size, read_only_call_limit: conn_opts.read_only_call_limit.clone(), auth_token: conn_opts.auth_token.clone(), + allow_arbitrary_response: false, }; http.register_rpc_methods(); http } + /// Create an HTTP protocol state machine that can handle arbitrary responses. + /// Used for building clients. + pub fn new_client(peer_addr: SocketAddr, conn_opts: &ConnectionOptions) -> StacksHttp { + StacksHttp { + peer_addr, + body_start: None, + num_preamble_bytes: 0, + last_four_preamble_bytes: [0u8; 4], + reply: None, + chunk_size: 8192, + request_handler_index: None, + request_handlers: vec![], + maximum_call_argument_size: conn_opts.maximum_call_argument_size, + read_only_call_limit: conn_opts.read_only_call_limit.clone(), + auth_token: conn_opts.auth_token.clone(), + allow_arbitrary_response: true, + } + } + /// Register an API RPC endpoint pub fn register_rpc_endpoint( &mut self, @@ -1164,7 +1228,7 @@ impl StacksHttp { match preamble { StacksHttpPreamble::Response(ref http_response_preamble) => { // we can only receive a response if we're expecting it - if self.request_handler_index.is_none() { + if self.request_handler_index.is_none() && !self.allow_arbitrary_response { return Err(NetError::DeserializeError( "Unexpected HTTP response: no active request handler".to_string(), )); @@ -1292,14 +1356,7 @@ impl StacksHttp { "127.0.0.1:20443".parse().unwrap(), &ConnectionOptions::default(), ); - - let response_handler_index = - http.find_response_handler(verb, request_path) - .ok_or(NetError::SendError(format!( - "No such handler for '{} {}'", - verb, request_path - )))?; - http.request_handler_index = Some(response_handler_index); + http.allow_arbitrary_response = true; let (preamble, message_offset) = http.read_preamble(response_buf)?; let is_chunked = match preamble { @@ -1417,9 +1474,9 @@ impl ProtocolFamily for StacksHttp { } // sanity check -- if we're receiving a response, then we must have earlier issued - // a request. Thus, we must already know which response handler to use. - // Otherwise, someone sent us malforemd data. - if self.request_handler_index.is_none() { + // a request, or we must be in client mode. Thus, we must already know which + // response handler to use. Otherwise, someone sent us malforemd data. + if self.request_handler_index.is_none() && !self.allow_arbitrary_response { self.reset(); return Err(NetError::DeserializeError( "Unsolicited HTTP response".to_string(), @@ -1442,18 +1499,30 @@ impl ProtocolFamily for StacksHttp { num_read, ); - // we now know the content-length, so pass it into the parser. - let handler_index = - self.request_handler_index - .ok_or(NetError::DeserializeError( - "Unknown HTTP response handler".to_string(), - ))?; - - let parse_res = self.try_parse_response( - handler_index, - http_response_preamble, - &message_bytes[..], - ); + let parse_res = if self.request_handler_index.is_none() + && self.allow_arbitrary_response + { + let arbitrary_parser = RPCArbitraryResponseHandler {}; + let response_payload = arbitrary_parser + .try_parse_response(http_response_preamble, &message_bytes[..])?; + Ok(StacksHttpResponse::new( + http_response_preamble.clone(), + response_payload, + )) + } else { + // we now know the content-length, so pass it into the parser. + let handler_index = + self.request_handler_index + .ok_or(NetError::DeserializeError( + "Unknown HTTP response handler".to_string(), + ))?; + + self.try_parse_response( + handler_index, + http_response_preamble, + &message_bytes[..], + ) + }; // done parsing self.reset(); @@ -1538,6 +1607,32 @@ impl ProtocolFamily for StacksHttp { // message of known length test_debug!("read http response payload of {} bytes", buf.len(),); + if self.request_handler_index.is_none() && self.allow_arbitrary_response { + let arbitrary_parser = RPCArbitraryResponseHandler {}; + let response_payload = + arbitrary_parser.try_parse_response(http_response_preamble, buf)?; + if http_response_preamble.status_code >= 400 { + return Ok(( + StacksHttpMessage::Error( + "(client-given)".into(), + StacksHttpResponse::new( + http_response_preamble.clone(), + response_payload, + ), + ), + buf.len(), + )); + } else { + return Ok(( + StacksHttpMessage::Response(StacksHttpResponse::new( + http_response_preamble.clone(), + response_payload, + )), + buf.len(), + )); + } + } + // sanity check -- if we're receiving a response, then we must have earlier issued // a request. Thus, we must already know which response handler to use. // Otherwise, someone sent us malformed data. @@ -1576,27 +1671,36 @@ impl ProtocolFamily for StacksHttp { ) -> Result<(), NetError> { match *message { StacksHttpMessage::Request(ref req) => { - // client cannot send more than one request in parallel - if self.request_handler_index.is_some() { - test_debug!("Have pending request already"); - return Err(NetError::InProgress); - } + // the node cannot send more than one request in parallel, unless the client is + // directing it + let handler_index = if !self.allow_arbitrary_response { + if self.request_handler_index.is_some() { + test_debug!("Have pending request already"); + return Err(NetError::InProgress); + } - // find the response handler we'll use - let (decoded_path, _) = decode_request_path(&req.preamble().path_and_query_str)?; - let handler_index = self - .find_response_handler(&req.preamble().verb, &decoded_path) - .ok_or(NetError::SendError(format!( - "No response handler found for `{} {}`", - &req.preamble().verb, - &decoded_path - )))?; + // find the response handler we'll use + let (decoded_path, _) = + decode_request_path(&req.preamble().path_and_query_str)?; + let handler_index = self + .find_response_handler(&req.preamble().verb, &decoded_path) + .ok_or(NetError::SendError(format!( + "No response handler found for `{} {}`", + &req.preamble().verb, + &decoded_path + )))?; + Some(handler_index) + } else { + None + }; req.send(fd)?; // remember this so we'll know how to decode the response. // The next preamble and message we'll read _must be_ a response! - self.request_handler_index = Some(handler_index); + if handler_index.is_some() { + self.request_handler_index = handler_index; + } Ok(()) } StacksHttpMessage::Response(ref resp) => resp.send(fd), @@ -1664,3 +1768,209 @@ pub fn decode_request_path(path: &str) -> Result<(String, String), NetError> { query_str.unwrap_or("").to_string(), )) } + +/// Convert a NetError into an io::Error if appropriate. +fn handle_net_error(e: NetError, msg: &str) -> io::Error { + match e { + NetError::ReadError(ioe) | NetError::WriteError(ioe) => ioe, + NetError::RecvTimeout => io::Error::new(io::ErrorKind::WouldBlock, "recv timeout"), + _ => io::Error::new(io::ErrorKind::Other, format!("{}: {:?}", &e, msg).as_str()), + } +} + +/// Send an HTTP request to the given host:port. Returns the decoded response. +/// Internally, this creates a socket, connects it, sends the HTTP request, and decodes the HTTP +/// response. It is a blocking operation. +/// +/// If the request encounters a network error, then return an error. Don't retry. +/// If the request times out after `timeout`, then return an error. +pub fn send_http_request( + host: &str, + port: u16, + request: StacksHttpRequest, + timeout: Duration, +) -> Result { + // Find the host:port that works. + // This is sometimes necessary because `localhost` can resolve to both its ipv4 and ipv6 + // addresses, but usually, Stacks services like event observers are only bound to ipv4 + // addresses. So, be sure to use an address that will lead to a socket connection! + let mut stream_and_addr = None; + let mut last_err = None; + for addr in format!("{host}:{port}").to_socket_addrs()? { + debug!("send_request: connect to {}", &addr); + match TcpStream::connect_timeout(&addr, timeout) { + Ok(sock) => { + stream_and_addr = Some((sock, addr)); + break; + } + Err(e) => { + last_err = Some(e); + } + } + } + + let Some((mut stream, addr)) = stream_and_addr else { + return Err(last_err.unwrap_or(io::Error::new( + io::ErrorKind::Other, + "Unable to connect to {host}:{port}", + ))); + }; + + stream.set_read_timeout(Some(timeout))?; + stream.set_write_timeout(Some(timeout))?; + stream.set_nodelay(true)?; + + let start = Instant::now(); + + debug!("send_request: Sending request"; "request" => %request.request_path()); + + // Some explanation of what's going on here is in order. + // + // The networking stack in Stacks is designed to operate on non-blocking sockets, and + // furthermore, it operates in a way that the call site in which a network request is issued can + // be in a wholly separate stack (or thread) from the connection. While this is absolutely necessary + // within the Stacks node, using it to issue a single blocking request imposes a lot of + // overhead. + // + // First, we will create the network connection and give it a ProtocolFamily implementation + // (StacksHttp), which gets used by the connection to encode and deocde messages. + // + // Second, we'll create a _handle_ to the network connection into which we will write requests + // and read responses. The connection itself is an opaque black box that, internally, + // implements a state machine around the ProtocolFamily implementation to incrementally read + // ProtocolFamily messages from a Read, and write them to a Write. The Read + Write is + // (usually) a non-blocking socket; the network connection deals with EWOULDBLOCK internally, + // as well as underfull socket buffers. + // + // Third, we need to _drive_ data to the socket. We have to repeatedly (1) flush the network + // handle (which contains the buffered bytes from the message to be fed into the socket), and + // (2) drive bytes from the handle into the socket iself via the network connection. This is a + // two-step process mainly because the handle is expected to live in a separate stack (or even + // a separate thread). + // + // Fourth, we need to _drive_ data from the socket. We have to repeatedly (1) pull data from + // the socket into the network connection, and (2) drive parsed messages from the connection to + // the handle. Then, the call site that owns the handle simply polls the handle for new + // messages. Once we have received a message, we can proceed to handle it. + // + // Finally, we deal with the kind of HTTP message we got. If it's an error response, we convert + // it into an error. If it's a request (i.e. not a response), we also return an error. We + // only return the message if it was a well-formed non-error HTTP response. + + // Step 1-2: set up the connection and request handle + // NOTE: we don't need anything special for connection options, so just use the default + let conn_opts = ConnectionOptions::default(); + let http = StacksHttp::new_client(addr, &conn_opts); + let mut connection = NetworkConnection::new(http, &conn_opts, None); + let mut request_handle = connection + .make_request_handle(0, get_epoch_time_secs() + timeout.as_secs(), 0) + .map_err(|e| { + io::Error::new( + io::ErrorKind::Other, + format!("Failed to create request handle: {:?}", &e).as_str(), + ) + })?; + + // Step 3: load up the request with the message we're gonna send, and iteratively dump its + // bytes from the handle into the socket (the connection does internal buffering and + // bookkeeping to deal with the cases where we fail to fill the socket buffer, or we can't send + // anymore because the socket buffer is currently full). + request + .send(&mut request_handle) + .map_err(|e| handle_net_error(e, "Failed to serialize request body"))?; + + debug!("send_request(sending data)"); + loop { + let flushed = request_handle + .try_flush() + .map_err(|e| handle_net_error(e, "Failed to flush request body"))?; + + // send it out + let num_sent = connection + .send_data(&mut stream) + .map_err(|e| handle_net_error(e, "Failed to send socket data"))?; + + debug!( + "send_request(sending data): flushed = {}, num_sent = {}", + flushed, num_sent + ); + if flushed && num_sent == 0 { + break; + } + + if Instant::now().saturating_duration_since(start) > timeout { + return Err(io::Error::new( + io::ErrorKind::WouldBlock, + "Timed out while receiving request", + )); + } + } + + // Step 4: pull bytes from the socket back into the handle, and see if the connection decoded + // and dispatched any new messages to the request handle. If so, then extract the message and + // check that it's a well-formed HTTP response. + debug!("send_request(receiving data)"); + let response = loop { + // get back the reply + debug!("send_request(receiving data): try to receive data"); + match connection.recv_data(&mut stream) { + Ok(nr) => { + debug!("send_request(receiving data): received {} bytes", nr); + } + Err(e) => { + return Err(handle_net_error(e, "Failed to receive socket data")); + } + } + + // fullfill the request -- send it to its corresponding handle + debug!("send_request(receiving data): drain inbox"); + connection.drain_inbox(); + + // see if we got a message that was fulfilled in our handle + debug!("send_request(receiving data): try receive response"); + let rh = match request_handle.try_recv() { + Ok(resp) => { + break resp; + } + Err(Ok(handle)) => handle, + Err(Err(e)) => { + return Err(handle_net_error( + e, + "Failed to receive message after socket has been drained", + )); + } + }; + request_handle = rh; + + if Instant::now().saturating_duration_since(start) > timeout { + return Err(io::Error::new( + io::ErrorKind::WouldBlock, + "Timed out while receiving request", + )); + } + }; + + // Step 5: decode the HTTP message and return it if it's not an error. + let response_data = match response { + StacksHttpMessage::Response(response_data) => response_data, + StacksHttpMessage::Error(path, response) => { + return Err(io::Error::new( + io::ErrorKind::Other, + format!( + "Request did not succeed ({} != 200). Path: '{}'", + response.preamble().status_code, + &path + ) + .as_str(), + )); + } + _ => { + return Err(io::Error::new( + io::ErrorKind::Other, + "Did not receive an HTTP response", + )); + } + }; + + Ok(response_data) +} diff --git a/stackslib/src/net/p2p.rs b/stackslib/src/net/p2p.rs index 4a52945521..0bbda1ef48 100644 --- a/stackslib/src/net/p2p.rs +++ b/stackslib/src/net/p2p.rs @@ -4289,7 +4289,7 @@ impl PeerNetwork { let ih = sortdb.index_handle(&tip_sn.sortition_id); for rc in [cur_rc, prev_rc, prev_prev_rc] { - let rc_start_height = self.burnchain.reward_cycle_to_block_height(rc); + let rc_start_height = self.burnchain.nakamoto_first_block_of_cycle(rc); let Some(ancestor_sort_id) = get_ancestor_sort_id(&ih, rc_start_height, &tip_sn.sortition_id)? else { diff --git a/stackslib/src/net/relay.rs b/stackslib/src/net/relay.rs index 0496909973..d022148b3a 100644 --- a/stackslib/src/net/relay.rs +++ b/stackslib/src/net/relay.rs @@ -599,7 +599,7 @@ impl Relayer { // is the block signed by the active reward set? let sn_rc = burnchain - .pox_reward_cycle(sn.block_height) + .block_height_to_reward_cycle(sn.block_height) .expect("FATAL: sortition has no reward cycle"); let reward_cycle_info = if let Some(rc_info) = loaded_reward_sets.get(&sn_rc) { rc_info @@ -885,6 +885,7 @@ impl Relayer { // NOTE: it's `+ 1` because the first Nakamoto block is built atop the last epoch 2.x // tenure, right after the last 2.x sortition + // TODO: is this true? let epoch_id = SortitionDB::get_stacks_epoch(sort_handle, block_sn.block_height + 1)? .expect("FATAL: no epoch defined") .epoch_id; @@ -930,7 +931,7 @@ impl Relayer { let reward_info = match load_nakamoto_reward_set( burnchain - .pox_reward_cycle(block_sn.block_height) + .block_height_to_reward_cycle(block_sn.block_height) .expect("FATAL: block snapshot has no reward cycle"), &tip, burnchain, diff --git a/stackslib/src/net/tests/download/nakamoto.rs b/stackslib/src/net/tests/download/nakamoto.rs index 57bd557186..741f6b21ff 100644 --- a/stackslib/src/net/tests/download/nakamoto.rs +++ b/stackslib/src/net/tests/download/nakamoto.rs @@ -2104,7 +2104,10 @@ fn test_nakamoto_download_run_2_peers() { .get_nakamoto_tip_block_id() .unwrap() .unwrap(); - assert_eq!(tip.block_height, 81); + assert_eq!( + tip.block_height, + 41 + bitvecs.iter().map(|x| x.len() as u64).sum::() + ); // make a neighbor from this peer let boot_observer = TestEventObserver::new(); diff --git a/stackslib/src/net/tests/httpcore.rs b/stackslib/src/net/tests/httpcore.rs index 1837d8e1c4..d9c62eedf6 100644 --- a/stackslib/src/net/tests/httpcore.rs +++ b/stackslib/src/net/tests/httpcore.rs @@ -14,9 +14,11 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use std::io::Write; -use std::net::{SocketAddr, ToSocketAddrs}; -use std::str; +use std::io::{Read, Write}; +use std::net::{SocketAddr, TcpListener, TcpStream, ToSocketAddrs}; +use std::sync::mpsc::{channel, Receiver}; +use std::time::{Duration, Instant}; +use std::{str, thread}; use stacks_common::codec::StacksMessageCodec; use stacks_common::types::chainstate::{StacksAddress, StacksBlockId, StacksPrivateKey}; @@ -38,12 +40,12 @@ use crate::net::api::getneighbors::{RPCNeighbor, RPCNeighborsInfo}; use crate::net::connection::ConnectionOptions; use crate::net::http::{ http_error_from_code_and_text, http_reason, HttpContentType, HttpErrorResponse, - HttpRequestContents, HttpRequestPreamble, HttpReservedHeader, HttpResponsePreamble, - HttpVersion, HTTP_PREAMBLE_MAX_NUM_HEADERS, + HttpRequestContents, HttpRequestPreamble, HttpReservedHeader, HttpResponsePayload, + HttpResponsePreamble, HttpVersion, HTTP_PREAMBLE_MAX_NUM_HEADERS, }; use crate::net::httpcore::{ - HttpPreambleExtensions, HttpRequestContentsExtensions, StacksHttp, StacksHttpMessage, - StacksHttpPreamble, StacksHttpRequest, StacksHttpResponse, + send_http_request, HttpPreambleExtensions, HttpRequestContentsExtensions, StacksHttp, + StacksHttpMessage, StacksHttpPreamble, StacksHttpRequest, StacksHttpResponse, }; use crate::net::rpc::ConversationHttp; use crate::net::{ProtocolFamily, TipRequest}; @@ -118,8 +120,6 @@ fn test_parse_stacks_http_preamble_response_err() { "Failed to decode HTTP request or HTTP response"), ("HTTP/1.1 200 OK\r\nContent-Length: foo\r\n\r\n", "Failed to decode HTTP request or HTTP response"), - ("HTTP/1.1 200 OK\r\nContent-Length: 123\r\n\r\n", - "Failed to decode HTTP request or HTTP response"), ("HTTP/1.1 200 OK\r\nContent-Type: text/plain\r\n\r\n", "Failed to decode HTTP request or HTTP response"), ("HTTP/1.1 200 OK\r\nContent-Type: text/plain\r\nContent-Length: 123\r\nTransfer-Encoding: chunked\r\n\r\n", @@ -1118,3 +1118,157 @@ fn test_metrics_identifiers() { assert_eq!(response_handler_index.is_some(), should_have_handler); } } + +fn json_body(host: &str, port: u16, path: &str, json_bytes: &[u8]) -> StacksHttpRequest { + let peerhost: PeerHost = format!("{host}:{port}") + .parse() + .unwrap_or(PeerHost::DNS(host.to_string(), port)); + let mut request = StacksHttpRequest::new_for_peer( + peerhost, + "POST".into(), + path.into(), + HttpRequestContents::new().payload_json(serde_json::from_slice(json_bytes).unwrap()), + ) + .unwrap_or_else(|_| panic!("FATAL: failed to encode infallible data as HTTP request")); + request.add_header("Connection".into(), "close".into()); + + request +} + +#[test] +fn test_send_request_timeout() { + // Set up a TcpListener that accepts a connection but delays response + let listener = TcpListener::bind("127.0.0.1:0").expect("Failed to bind test listener"); + let addr = listener.local_addr().unwrap(); + + // Spawn a thread that will accept the connection and do nothing, simulating a long delay + thread::spawn(move || { + let (stream, _addr) = listener.accept().unwrap(); + // Hold the connection open to simulate a delay + thread::sleep(Duration::from_secs(10)); + drop(stream); // Close the stream + }); + + // Set a timeout shorter than the sleep duration to force a timeout + let connection_timeout = Duration::from_secs(2); + + // Attempt to connect, expecting a timeout error + let result = send_http_request( + "127.0.0.1", + addr.port(), + json_body("127.0.0.1", 80, "/", b"{}"), + connection_timeout, + ); + + // Assert that the result is an error, specifically a timeout + assert!( + result.is_err(), + "Expected a timeout error, got: {:?}", + result + ); + + if let Err(err) = result { + assert_eq!( + err.kind(), + std::io::ErrorKind::WouldBlock, + "Expected TimedOut error, got: {:?}", + err + ); + } +} + +fn start_mock_server(response: String, client_done_signal: Receiver<()>) -> String { + // Bind to an available port on localhost + let listener = TcpListener::bind("127.0.0.1:0").expect("Failed to bind server"); + let addr = listener.local_addr().unwrap(); + + debug!("Mock server listening on {}", addr); + + // Start the server in a new thread + thread::spawn(move || { + for stream in listener.incoming() { + debug!("Mock server accepted connection"); + let mut stream = stream.expect("Failed to accept connection"); + + // Read the client's request (even if we don't do anything with it) + let mut buffer = [0; 512]; + let _ = stream.read(&mut buffer); + debug!("Mock server received request"); + + // Simulate a basic HTTP response + stream + .write_all(response.as_bytes()) + .expect("Failed to write response"); + stream.flush().expect("Failed to flush stream"); + debug!("Mock server sent response"); + + // Wait for the client to signal that it's done reading + client_done_signal + .recv() + .expect("Failed to receive client done signal"); + + // Explicitly drop the stream after signaling to ensure the client finishes + // NOTE: this will cause the test to slow down, since `send_http_request` expects + // `Connection: close` + drop(stream); + + debug!("Mock server closing connection"); + + break; // Close after the first request + } + }); + + // Return the address of the mock server + format!("{}:{}", addr.ip(), addr.port()) +} + +fn parse_http_response(response: StacksHttpResponse) -> String { + let response_txt = match response.destruct().1 { + HttpResponsePayload::Text(s) => s, + HttpResponsePayload::Empty => "".to_string(), + HttpResponsePayload::JSON(js) => serde_json::to_string(&js).unwrap(), + HttpResponsePayload::Bytes(bytes) => String::from_utf8_lossy(bytes.as_slice()).to_string(), + }; + response_txt +} + +#[test] +fn test_send_request_success() { + // Prepare the mock server to return a successful HTTP response + let mock_response = "HTTP/1.1 200 OK\r\nContent-Length: 13\r\n\r\nHello, world!"; + + // Create a channel to signal when the client is done reading + let (tx_client_done, rx_client_done) = channel(); + let server_addr = start_mock_server(mock_response.to_string(), rx_client_done); + let timeout_duration = Duration::from_secs(5); + + let parts = server_addr.split(':').collect::>(); + let host = parts[0]; + let port = parts[1].parse().unwrap(); + + // Attempt to send a request to the mock server + let result = send_http_request( + host, + port, + json_body(host, port, "/", b"{}"), + timeout_duration, + ); + debug!("Got result: {:?}", result); + + // Ensure the server only closes after the client has finished processing + if let Ok(response) = &result { + let body = parse_http_response(response.clone()); + assert_eq!(body, "Hello, world!", "Unexpected response body: {}", body); + } + + tx_client_done + .send(()) + .expect("Failed to send close signal"); + + // Assert that the connection was successful + assert!( + result.is_ok(), + "Expected a successful request, but got {:?}", + result + ); +} diff --git a/stackslib/src/net/tests/mod.rs b/stackslib/src/net/tests/mod.rs index 8372116ced..4d7cdac375 100644 --- a/stackslib/src/net/tests/mod.rs +++ b/stackslib/src/net/tests/mod.rs @@ -89,6 +89,8 @@ pub struct NakamotoBootPlan { pub test_signers: TestSigners, pub observer: Option, pub num_peers: usize, + /// Whether to add an initial balance for `private_key`'s account + pub add_default_balance: bool, } impl NakamotoBootPlan { @@ -103,6 +105,7 @@ impl NakamotoBootPlan { test_signers, observer: Some(TestEventObserver::new()), num_peers: 0, + add_default_balance: true, } } @@ -347,8 +350,12 @@ impl NakamotoBootPlan { + 1) .into(), )); - peer_config.initial_balances = - vec![(addr.to_account_principal(), 1_000_000_000_000_000_000)]; + peer_config.initial_balances = vec![]; + if self.add_default_balance { + peer_config + .initial_balances + .push((addr.to_account_principal(), 1_000_000_000_000_000_000)); + } peer_config .initial_balances .append(&mut self.initial_balances.clone()); diff --git a/stackslib/src/net/tests/relay/nakamoto.rs b/stackslib/src/net/tests/relay/nakamoto.rs index 4df3171474..a0aae1c035 100644 --- a/stackslib/src/net/tests/relay/nakamoto.rs +++ b/stackslib/src/net/tests/relay/nakamoto.rs @@ -618,7 +618,7 @@ fn test_no_buffer_ready_nakamoto_blocks() { follower .network .burnchain - .pox_reward_cycle(block_sn.block_height) + .block_height_to_reward_cycle(block_sn.block_height) .unwrap() ), true @@ -642,7 +642,7 @@ fn test_no_buffer_ready_nakamoto_blocks() { follower .network .burnchain - .pox_reward_cycle( + .block_height_to_reward_cycle( follower.network.burnchain_tip.block_height ) .unwrap() @@ -670,7 +670,7 @@ fn test_no_buffer_ready_nakamoto_blocks() { follower .network .burnchain - .pox_reward_cycle(ancestor_sn.block_height) + .block_height_to_reward_cycle(ancestor_sn.block_height) .unwrap() ), true @@ -816,9 +816,12 @@ fn test_buffer_nonready_nakamoto_blocks() { let mut all_blocks = vec![]; thread::scope(|s| { - s.spawn(|| { - SeedNode::main(peer, rc_len, seed_comms); - }); + thread::Builder::new() + .name("seed".into()) + .spawn_scoped(s, || { + SeedNode::main(peer, rc_len, seed_comms); + }) + .unwrap(); let mut seed_exited = false; let mut exited_peer = None; diff --git a/stackslib/src/net/unsolicited.rs b/stackslib/src/net/unsolicited.rs index 5aeadc3dfd..f9ab5de87e 100644 --- a/stackslib/src/net/unsolicited.rs +++ b/stackslib/src/net/unsolicited.rs @@ -715,10 +715,11 @@ impl PeerNetwork { ) -> bool { let Some(rc_data) = self.current_reward_sets.get(&reward_cycle) else { info!( - "{:?}: Failed to validate Nakamoto block {}/{}: no reward set", + "{:?}: Failed to validate Nakamoto block {}/{}: no reward set for cycle {}", self.get_local_peer(), &nakamoto_block.header.consensus_hash, - &nakamoto_block.header.block_hash() + &nakamoto_block.header.block_hash(), + reward_cycle, ); return false; }; @@ -733,7 +734,7 @@ impl PeerNetwork { if let Err(e) = nakamoto_block.header.verify_signer_signatures(reward_set) { info!( - "{:?}: signature verification failrue for Nakamoto block {}/{} in reward cycle {}: {:?}", self.get_local_peer(), &nakamoto_block.header.consensus_hash, &nakamoto_block.header.block_hash(), reward_cycle, &e + "{:?}: signature verification failure for Nakamoto block {}/{} in reward cycle {}: {:?}", self.get_local_peer(), &nakamoto_block.header.consensus_hash, &nakamoto_block.header.block_hash(), reward_cycle, &e ); return false; } @@ -788,7 +789,7 @@ impl PeerNetwork { let reward_set_sn_rc = self .burnchain - .pox_reward_cycle(reward_set_sn.block_height) + .block_height_to_reward_cycle(reward_set_sn.block_height) .expect("FATAL: sortition has no reward cycle"); return (Some(reward_set_sn_rc), can_process); diff --git a/testnet/stacks-node/Cargo.toml b/testnet/stacks-node/Cargo.toml index a5b935d80e..6e6c5918a2 100644 --- a/testnet/stacks-node/Cargo.toml +++ b/testnet/stacks-node/Cargo.toml @@ -15,9 +15,6 @@ serde_json = { version = "1.0", features = ["arbitrary_precision", "raw_value"] stacks = { path = "../../stackslib", package = "stackslib" } stx-genesis = { path = "../../stx-genesis" } toml = "0.5.6" -async-h1 = "2.3.2" -async-std = { version = "1.6", features = ["attributes"] } -http-types = "2.12" base64 = "0.12.0" backtrace = "0.3.50" libc = "0.2.151" @@ -28,9 +25,13 @@ chrono = "0.4.19" regex = "1" libsigner = { path = "../../libsigner" } wsts = { workspace = true } +url = "2.1.0" rand = { workspace = true } rand_core = { workspace = true } hashbrown = { workspace = true } +async-h1 = { version = "2.3.2", optional = true } +async-std = { version = "1.6", optional = true, features = ["attributes"] } +http-types = { version = "2.12", optional = true } [target.'cfg(not(any(target_os = "macos", target_os="windows", target_arch = "arm")))'.dependencies] tikv-jemallocator = {workspace = true} @@ -39,8 +40,8 @@ tikv-jemallocator = {workspace = true} ring = "0.16.19" warp = "0.3.5" tokio = "1.15" -reqwest = { version = "0.11", default_features = false, features = ["blocking", "json", "rustls", "rustls-tls"] } -clarity = { path = "../../clarity", features = ["default", "testing"] } +reqwest = { version = "0.11", default-features = false, features = ["blocking", "json", "rustls", "rustls-tls"] } +clarity = { path = "../../clarity", features = ["default", "testing"]} stacks-common = { path = "../../stacks-common", features = ["default", "testing"] } stacks = { path = "../../stackslib", package = "stackslib", features = ["default", "testing"] } stacks-signer = { path = "../../stacks-signer" } @@ -48,10 +49,13 @@ tracing = "0.1.37" tracing-subscriber = { version = "0.3.17", features = ["env-filter"] } wsts = {workspace = true} mutants = "0.0.3" +tiny_http = "0.12.0" +http-types = "2.12" [dependencies.rusqlite] workspace = true features = ["blob", "serde_json", "i128_blob", "bundled", "trace"] +optional = true [[bin]] name = "stacks-node" @@ -62,7 +66,7 @@ name = "stacks-events" path = "src/stacks_events.rs" [features] -monitoring_prom = ["stacks/monitoring_prom", "libsigner/monitoring_prom", "stacks-signer/monitoring_prom"] +monitoring_prom = ["stacks/monitoring_prom", "libsigner/monitoring_prom", "stacks-signer/monitoring_prom", "async-h1", "async-std", "http-types"] slog_json = ["stacks/slog_json", "stacks-common/slog_json", "clarity/slog_json"] prod-genesis-chainstate = [] default = [] diff --git a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs index ce303086bb..749b88f583 100644 --- a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs +++ b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs @@ -1,14 +1,26 @@ -use std::cmp; +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2024 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + use std::io::Cursor; use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::Arc; -use std::time::Instant; +use std::time::{Duration, Instant}; +use std::{cmp, io}; -use async_h1::client; -use async_std::io::ReadExt; -use async_std::net::TcpStream; use base64::encode; -use http_types::{Method, Request, Url}; use serde::Serialize; use serde_json::json; use serde_json::value::RawValue; @@ -38,6 +50,9 @@ use stacks::chainstate::coordinator::comm::CoordinatorChannels; use stacks::chainstate::stacks::address::PoxAddress; use stacks::core::{StacksEpoch, StacksEpochId}; use stacks::monitoring::{increment_btc_blocks_received_counter, increment_btc_ops_sent_counter}; +use stacks::net::http::{HttpRequestContents, HttpResponsePayload}; +use stacks::net::httpcore::{send_http_request, StacksHttpRequest}; +use stacks::net::Error as NetError; use stacks_common::codec::StacksMessageCodec; use stacks_common::deps_common::bitcoin::blockdata::opcodes; use stacks_common::deps_common::bitcoin::blockdata::script::{Builder, Script}; @@ -50,9 +65,11 @@ use stacks_common::deps_common::bitcoin::network::serialize::deserialize as btc_ use stacks_common::deps_common::bitcoin::network::serialize::RawEncoder; use stacks_common::deps_common::bitcoin::util::hash::Sha256dHash; use stacks_common::types::chainstate::BurnchainHeaderHash; +use stacks_common::types::net::PeerHost; use stacks_common::util::hash::{hex_bytes, Hash160}; use stacks_common::util::secp256k1::Secp256k1PublicKey; use stacks_common::util::sleep_ms; +use url::Url; use super::super::operations::BurnchainOpSigner; use super::super::Config; @@ -1373,14 +1390,36 @@ impl BitcoinRegtestController { previous_fees: Option, previous_txids: &Vec, ) -> Option { - let mut estimated_fees = match previous_fees { + let _ = self.sortdb_mut(); + let burn_chain_tip = self.burnchain_db.as_ref()?.get_canonical_chain_tip().ok()?; + let estimated_fees = match previous_fees { Some(fees) => fees.fees_from_previous_tx(&payload, &self.config), None => LeaderBlockCommitFees::estimated_fees_from_payload(&payload, &self.config), }; - let _ = self.sortdb_mut(); - let burn_chain_tip = self.burnchain_db.as_ref()?.get_canonical_chain_tip().ok()?; + self.send_block_commit_operation_at_burnchain_height( + epoch_id, + payload, + signer, + utxos_to_include, + utxos_to_exclude, + estimated_fees, + previous_txids, + burn_chain_tip.block_height, + ) + } + fn send_block_commit_operation_at_burnchain_height( + &mut self, + epoch_id: StacksEpochId, + payload: LeaderBlockCommitOp, + signer: &mut BurnchainOpSigner, + utxos_to_include: Option, + utxos_to_exclude: Option, + mut estimated_fees: LeaderBlockCommitFees, + previous_txids: &Vec, + burnchain_block_height: u64, + ) -> Option { let public_key = signer.get_public_key(); let (mut tx, mut utxos) = self.prepare_tx( epoch_id, @@ -1388,7 +1427,7 @@ impl BitcoinRegtestController { estimated_fees.estimated_amount_required(), utxos_to_include, utxos_to_exclude, - burn_chain_tip.block_height, + burnchain_block_height, )?; // Serialize the payload @@ -1817,7 +1856,7 @@ impl BitcoinRegtestController { debug!("Not enough change to clear dust limit. Not adding change address."); } - for (i, utxo) in utxos_set.utxos.iter().enumerate() { + for (_i, utxo) in utxos_set.utxos.iter().enumerate() { let input = TxIn { previous_output: OutPoint { txid: utxo.txid, @@ -1828,7 +1867,8 @@ impl BitcoinRegtestController { witness: vec![], }; tx.input.push(input); - + } + for (i, utxo) in utxos_set.utxos.iter().enumerate() { let script_pub_key = utxo.script_pub_key.clone(); let sig_hash_all = 0x01; @@ -2408,8 +2448,20 @@ pub enum RPCError { type RPCResult = Result; +impl From for RPCError { + fn from(ioe: io::Error) -> Self { + Self::Network(format!("IO Error: {:?}", &ioe)) + } +} + +impl From for RPCError { + fn from(ne: NetError) -> Self { + Self::Network(format!("Net Error: {:?}", &ne)) + } +} + impl BitcoinRPCRequest { - fn build_rpc_request(config: &Config, payload: &BitcoinRPCRequest) -> Request { + fn build_rpc_request(config: &Config, payload: &BitcoinRPCRequest) -> StacksHttpRequest { let url = { // some methods require a wallet ID let wallet_id = match payload.method.as_str() { @@ -2424,16 +2476,35 @@ impl BitcoinRPCRequest { &payload.method, &config.burnchain.username, &config.burnchain.password, &url ); - let mut req = Request::new(Method::Post, url); + let host = url + .host_str() + .expect("Invalid bitcoin RPC URL: missing host"); + let port = url.port_or_known_default().unwrap_or(8333); + let peerhost: PeerHost = format!("{host}:{port}") + .parse() + .unwrap_or_else(|_| panic!("FATAL: could not parse URL into PeerHost")); + + let mut request = StacksHttpRequest::new_for_peer( + peerhost, + "POST".into(), + url.path().into(), + HttpRequestContents::new().payload_json( + serde_json::to_value(payload).unwrap_or_else(|_| { + panic!("FATAL: failed to encode Bitcoin RPC request as JSON") + }), + ), + ) + .unwrap_or_else(|_| panic!("FATAL: failed to encode infallible data as HTTP request")); + request.add_header("Connection".into(), "close".into()); match (&config.burnchain.username, &config.burnchain.password) { (Some(username), Some(password)) => { let auth_token = format!("Basic {}", encode(format!("{}:{}", username, password))); - req.append_header("Authorization", auth_token); + request.add_header("Authorization".into(), auth_token); } (_, _) => {} }; - req + request } #[cfg(test)] @@ -2518,10 +2589,10 @@ impl BitcoinRPCRequest { .map_err(|_| RPCError::Parsing("Failed to get bestblockhash".to_string()))?; let bhh = BurnchainHeaderHash::from_hex(&bhh) .map_err(|_| RPCError::Parsing("Failed to get bestblockhash".to_string()))?; - Ok(bhh) + bhh } _ => return Err(RPCError::Parsing("Failed to get UTXOs".to_string())), - }?; + }; let min_conf = 0i64; let max_conf = 9999999i64; @@ -2743,71 +2814,18 @@ impl BitcoinRPCRequest { } fn send(config: &Config, payload: BitcoinRPCRequest) -> RPCResult { - let mut request = BitcoinRPCRequest::build_rpc_request(&config, &payload); + let request = BitcoinRPCRequest::build_rpc_request(&config, &payload); + let timeout = Duration::from_secs(60); - let body = match serde_json::to_vec(&json!(payload)) { - Ok(body) => body, - Err(err) => { - return Err(RPCError::Network(format!("RPC Error: {}", err))); - } - }; - - request.append_header("Content-Type", "application/json"); - request.set_body(body); - - let mut response = async_std::task::block_on(async move { - let stream = match TcpStream::connect(config.burnchain.get_rpc_socket_addr()).await { - Ok(stream) => stream, - Err(err) => { - return Err(RPCError::Network(format!( - "Bitcoin RPC: connection failed - {:?}", - err - ))) - } - }; - - match client::connect(stream, request).await { - Ok(response) => Ok(response), - Err(err) => { - return Err(RPCError::Network(format!( - "Bitcoin RPC: invoking procedure failed - {:?}", - err - ))) - } - } - })?; - - let status = response.status(); - - let (res, buffer) = async_std::task::block_on(async move { - let mut buffer = Vec::new(); - let mut body = response.take_body(); - let res = body.read_to_end(&mut buffer).await; - (res, buffer) - }); - - if !status.is_success() { - return Err(RPCError::Network(format!( - "Bitcoin RPC: status({}) != success, body is '{:?}'", - status, - match serde_json::from_slice::(&buffer[..]) { - Ok(v) => v, - Err(_e) => serde_json::from_str("\"(unparseable)\"") - .expect("Failed to parse JSON literal"), - } - ))); - } + let host = request.preamble().host.hostname(); + let port = request.preamble().host.port(); - if res.is_err() { - return Err(RPCError::Network(format!( - "Bitcoin RPC: unable to read body - {:?}", - res - ))); + let response = send_http_request(&host, port, request, timeout)?; + if let HttpResponsePayload::JSON(js) = response.destruct().1 { + return Ok(js); + } else { + return Err(RPCError::Parsing("Did not get a JSON response".into())); } - - let payload = serde_json::from_slice::(&buffer[..]) - .map_err(|e| RPCError::Parsing(format!("Bitcoin RPC: {}", e)))?; - Ok(payload) } } @@ -2817,6 +2835,12 @@ mod tests { use std::fs::File; use std::io::Write; + use stacks::burnchains::BurnchainSigner; + use stacks_common::deps_common::bitcoin::blockdata::script::Builder; + use stacks_common::types::chainstate::{BlockHeaderHash, StacksAddress, VRFSeed}; + use stacks_common::util::hash::to_hex; + use stacks_common::util::secp256k1::Secp256k1PrivateKey; + use super::*; use crate::config::DEFAULT_SATS_PER_VB; @@ -2837,4 +2861,160 @@ mod tests { assert_eq!(get_satoshis_per_byte(&config), 51); } + + /// Verify that we can build a valid Bitcoin transaction with multiple UTXOs. + /// Taken from production data. + /// Tests `serialize_tx()` and `send_block_commit_operation_at_burnchain_height()` + #[test] + fn test_multiple_inputs() { + let spend_utxos = vec![ + UTXO { + txid: Sha256dHash::from_hex( + "d3eafb3aba3cec925473550ed2e4d00bcb0d00744bb3212e4a8e72878909daee", + ) + .unwrap(), + vout: 3, + script_pub_key: Builder::from( + hex_bytes("76a9141dc27eba0247f8cc9575e7d45e50a0bc7e72427d88ac").unwrap(), + ) + .into_script(), + amount: 42051, + confirmations: 1421, + }, + UTXO { + txid: Sha256dHash::from_hex( + "01132f2d4a98cc715624e033214c8d841098a1ee15b30188ab89589a320b3b24", + ) + .unwrap(), + vout: 0, + script_pub_key: Builder::from( + hex_bytes("76a9141dc27eba0247f8cc9575e7d45e50a0bc7e72427d88ac").unwrap(), + ) + .into_script(), + amount: 326456, + confirmations: 1421, + }, + ]; + + // test serialize_tx() + let mut config = Config::default(); + config.burnchain.magic_bytes = "T3".as_bytes().into(); + + let mut btc_controller = BitcoinRegtestController::new(config, None); + let mut utxo_set = UTXOSet { + bhh: BurnchainHeaderHash([0x01; 32]), + utxos: spend_utxos.clone(), + }; + let mut transaction = Transaction { + input: vec![], + output: vec![ + TxOut { + value: 0, + script_pubkey: Builder::from(hex_bytes("6a4c5054335be88c3d30cb59a142f83de3b27f897a43bbb0f13316911bb98a3229973dae32afd5b9f21bc1f40f24e2c101ecd13c55b8619e5e03dad81de2c62a1cc1d8c1b375000008a300010000059800015a").unwrap()).into_script(), + }, + TxOut { + value: 10000, + script_pubkey: Builder::from(hex_bytes("76a914000000000000000000000000000000000000000088ac").unwrap()).into_script(), + }, + TxOut { + value: 10000, + script_pubkey: Builder::from(hex_bytes("76a914000000000000000000000000000000000000000088ac").unwrap()).into_script(), + }, + ], + version: 1, + lock_time: 0, + }; + + let mut signer = BurnchainOpSigner::new( + Secp256k1PrivateKey::from_hex( + "9e446f6b0c6a96cf2190e54bcd5a8569c3e386f091605499464389b8d4e0bfc201", + ) + .unwrap(), + false, + ); + assert!(btc_controller.serialize_tx( + StacksEpochId::Epoch25, + &mut transaction, + 44950, + &mut utxo_set, + &mut signer, + true + )); + assert_eq!(transaction.output[3].value, 323557); + + // test send_block_commit_operation_at_burn_height() + let utxo_set = UTXOSet { + bhh: BurnchainHeaderHash([0x01; 32]), + utxos: spend_utxos.clone(), + }; + + let commit_op = LeaderBlockCommitOp { + block_header_hash: BlockHeaderHash::from_hex( + "e88c3d30cb59a142f83de3b27f897a43bbb0f13316911bb98a3229973dae32af", + ) + .unwrap(), + new_seed: VRFSeed::from_hex( + "d5b9f21bc1f40f24e2c101ecd13c55b8619e5e03dad81de2c62a1cc1d8c1b375", + ) + .unwrap(), + parent_block_ptr: 2211, // 0x000008a3 + parent_vtxindex: 1, // 0x0001 + key_block_ptr: 1432, // 0x00000598 + key_vtxindex: 1, // 0x0001 + memo: vec![11], // 0x5a >> 3 + + burn_fee: 0, + input: (Txid([0x00; 32]), 0), + burn_parent_modulus: 2, // 0x5a & 0b111 + + apparent_sender: BurnchainSigner("mgbpit8FvkVJ9kuXY8QSM5P7eibnhcEMBk".to_string()), + commit_outs: vec![ + PoxAddress::Standard(StacksAddress::burn_address(false), None), + PoxAddress::Standard(StacksAddress::burn_address(false), None), + ], + + treatment: vec![], + sunset_burn: 0, + + txid: Txid([0x00; 32]), + vtxindex: 0, + block_height: 2212, + burn_header_hash: BurnchainHeaderHash([0x01; 32]), + }; + + assert_eq!(to_hex(&commit_op.serialize_to_vec()), "5be88c3d30cb59a142f83de3b27f897a43bbb0f13316911bb98a3229973dae32afd5b9f21bc1f40f24e2c101ecd13c55b8619e5e03dad81de2c62a1cc1d8c1b375000008a300010000059800015a".to_string()); + + let leader_fees = LeaderBlockCommitFees { + sunset_fee: 0, + fee_rate: 50, + sortition_fee: 20000, + outputs_len: 2, + default_tx_size: 380, + spent_in_attempts: 0, + is_rbf_enabled: false, + final_size: 498, + }; + + assert_eq!(leader_fees.amount_per_output(), 10000); + assert_eq!(leader_fees.total_spent(), 44900); + + let block_commit = btc_controller + .send_block_commit_operation_at_burnchain_height( + StacksEpochId::Epoch30, + commit_op, + &mut signer, + Some(utxo_set), + None, + leader_fees, + &vec![], + 2212, + ) + .unwrap(); + + debug!("send_block_commit_operation:\n{:#?}", &block_commit); + debug!("{}", &SerializedTx::new(block_commit.clone()).to_hex()); + assert_eq!(block_commit.output[3].value, 323507); + + assert_eq!(&SerializedTx::new(block_commit.clone()).to_hex(), "0100000002eeda098987728e4a2e21b34b74000dcb0bd0e4d20e55735492ec3cba3afbead3030000006a4730440220558286e20e10ce31537f0625dae5cc62fac7961b9d2cf272c990de96323d7e2502202255adbea3d2e0509b80c5d8a3a4fe6397a87bcf18da1852740d5267d89a0cb20121035379aa40c02890d253cfa577964116eb5295570ae9f7287cbae5f2585f5b2c7cfdffffff243b0b329a5889ab8801b315eea19810848d4c2133e0245671cc984a2d2f1301000000006a47304402206d9f8de107f9e1eb15aafac66c2bb34331a7523260b30e18779257e367048d34022013c7dabb32a5c281aa00d405e2ccbd00f34f03a65b2336553a4acd6c52c251ef0121035379aa40c02890d253cfa577964116eb5295570ae9f7287cbae5f2585f5b2c7cfdffffff040000000000000000536a4c5054335be88c3d30cb59a142f83de3b27f897a43bbb0f13316911bb98a3229973dae32afd5b9f21bc1f40f24e2c101ecd13c55b8619e5e03dad81de2c62a1cc1d8c1b375000008a300010000059800015a10270000000000001976a914000000000000000000000000000000000000000088ac10270000000000001976a914000000000000000000000000000000000000000088acb3ef0400000000001976a9141dc27eba0247f8cc9575e7d45e50a0bc7e72427d88ac00000000"); + } } diff --git a/testnet/stacks-node/src/config.rs b/testnet/stacks-node/src/config.rs index 95043c625b..e43220c745 100644 --- a/testnet/stacks-node/src/config.rs +++ b/testnet/stacks-node/src/config.rs @@ -86,6 +86,7 @@ pub const OP_TX_ANY_ESTIM_SIZE: u64 = fmax!( const DEFAULT_MAX_RBF_RATE: u64 = 150; // 1.5x const DEFAULT_RBF_FEE_RATE_INCREMENT: u64 = 5; const INV_REWARD_CYCLES_TESTNET: u64 = 6; +const DEFAULT_MIN_TIME_BETWEEN_BLOCKS_MS: u64 = 1000; #[derive(Clone, Deserialize, Default, Debug)] pub struct ConfigFile { @@ -2359,6 +2360,9 @@ pub struct MinerConfig { pub wait_on_signers: Duration, /// Whether to mock sign in Epoch 2.5 through the .miners and .signers contracts. This is used for testing purposes in Epoch 2.5 only. pub pre_nakamoto_mock_signing: bool, + /// The minimum time to wait between mining blocks in milliseconds. The value must be greater than or equal to 1000 ms because if a block is mined + /// within the same second as its parent, it will be rejected by the signers. + pub min_time_between_blocks_ms: u64, } impl Default for MinerConfig { @@ -2390,6 +2394,7 @@ impl Default for MinerConfig { // TODO: update to a sane value based on stackerdb benchmarking wait_on_signers: Duration::from_secs(200), pre_nakamoto_mock_signing: false, // Should only default true if mining key is set + min_time_between_blocks_ms: DEFAULT_MIN_TIME_BETWEEN_BLOCKS_MS, } } } @@ -2740,6 +2745,7 @@ pub struct MinerConfigFile { pub max_reorg_depth: Option, pub wait_on_signers_ms: Option, pub pre_nakamoto_mock_signing: Option, + pub min_time_between_blocks_ms: Option, } impl MinerConfigFile { @@ -2851,6 +2857,12 @@ impl MinerConfigFile { pre_nakamoto_mock_signing: self .pre_nakamoto_mock_signing .unwrap_or(pre_nakamoto_mock_signing), // Should only default true if mining key is set + min_time_between_blocks_ms: self.min_time_between_blocks_ms.map(|ms| if ms < DEFAULT_MIN_TIME_BETWEEN_BLOCKS_MS { + warn!("miner.min_time_between_blocks_ms is less than the minimum allowed value of {DEFAULT_MIN_TIME_BETWEEN_BLOCKS_MS} ms. Using the default value instead."); + DEFAULT_MIN_TIME_BETWEEN_BLOCKS_MS + } else { + ms + }).unwrap_or(miner_default_config.min_time_between_blocks_ms), }) } } diff --git a/testnet/stacks-node/src/event_dispatcher.rs b/testnet/stacks-node/src/event_dispatcher.rs index ad0b70a2f1..34e42501ac 100644 --- a/testnet/stacks-node/src/event_dispatcher.rs +++ b/testnet/stacks-node/src/event_dispatcher.rs @@ -21,15 +21,10 @@ use std::sync::Mutex; use std::thread::sleep; use std::time::Duration; -use async_h1::client; -use async_std::future::timeout; -use async_std::net::TcpStream; -use async_std::task; use clarity::vm::analysis::contract_interface_builder::build_contract_interface; use clarity::vm::costs::ExecutionCost; use clarity::vm::events::{FTEventType, NFTEventType, STXEventType}; use clarity::vm::types::{AssetIdentifier, QualifiedContractIdentifier, Value}; -use http_types::{Method, Request, Url}; use serde_json::json; use stacks::burnchains::{PoxConstants, Txid}; use stacks::chainstate::burn::operations::BlockstackOperationType; @@ -57,13 +52,17 @@ use stacks::net::api::postblock_proposal::{ BlockValidateOk, BlockValidateReject, BlockValidateResponse, }; use stacks::net::atlas::{Attachment, AttachmentInstance}; +use stacks::net::http::HttpRequestContents; +use stacks::net::httpcore::{send_http_request, StacksHttpRequest}; use stacks::net::stackerdb::StackerDBEventDispatcher; use stacks::util::hash::to_hex; use stacks_common::bitvec::BitVec; use stacks_common::codec::StacksMessageCodec; use stacks_common::types::chainstate::{BlockHeaderHash, BurnchainHeaderHash, StacksBlockId}; +use stacks_common::types::net::PeerHost; use stacks_common::util::hash::{bytes_to_hex, Sha512Trunc256Sum}; use stacks_common::util::secp256k1::MessageSignature; +use url::Url; use super::config::{EventKeyType, EventObserverConfig}; @@ -318,65 +317,53 @@ impl EventObserver { debug!( "Event dispatcher: Sending payload"; "url" => %path, "payload" => ?payload ); - let body = match serde_json::to_vec(&payload) { - Ok(body) => body, - Err(err) => { - error!("Event dispatcher: serialization failed - {:?}", err); - return; - } - }; let url = { - let joined_components = match path.starts_with('/') { - true => format!("{}{}", &self.endpoint, path), - false => format!("{}/{}", &self.endpoint, path), + let joined_components = if path.starts_with('/') { + format!("{}{}", &self.endpoint, path) + } else { + format!("{}/{}", &self.endpoint, path) }; let url = format!("http://{}", joined_components); Url::parse(&url) .unwrap_or_else(|_| panic!("Event dispatcher: unable to parse {} as a URL", url)) }; - let backoff = Duration::from_millis((1.0 * 1_000.0) as u64); - let connection_timeout = Duration::from_secs(5); + let host = url.host_str().expect("Invalid URL: missing host"); + let port = url.port_or_known_default().unwrap_or(80); + let peerhost: PeerHost = format!("{host}:{port}") + .parse() + .unwrap_or(PeerHost::DNS(host.to_string(), port)); - loop { - let body = body.clone(); - let mut req = Request::new(Method::Post, url.clone()); - req.append_header("Content-Type", "application/json"); - req.set_body(body); - - let response = task::block_on(async { - let stream = - match timeout(connection_timeout, TcpStream::connect(&self.endpoint)).await { - Ok(Ok(stream)) => stream, - Ok(Err(err)) => { - warn!("Event dispatcher: connection failed - {:?}", err); - return None; - } - Err(_) => { - error!("Event dispatcher: connection attempt timed out"); - return None; - } - }; + let backoff = Duration::from_millis(1000); // 1 second - match client::connect(stream, req).await { - Ok(response) => Some(response), - Err(err) => { - warn!("Event dispatcher: rpc invocation failed - {:?}", err); - None + loop { + let mut request = StacksHttpRequest::new_for_peer( + peerhost.clone(), + "POST".into(), + url.path().into(), + HttpRequestContents::new().payload_json(payload.clone()), + ) + .unwrap_or_else(|_| panic!("FATAL: failed to encode infallible data as HTTP request")); + request.add_header("Connection".into(), "close".into()); + + match send_http_request(host, port, request, backoff) { + Ok(response) => { + if response.preamble().status_code == 200 { + debug!( + "Event dispatcher: Successful POST"; "url" => %url + ); + break; + } else { + error!( + "Event dispatcher: Failed POST"; "url" => %url, "response" => ?response.preamble() + ); } } - }); - - if let Some(response) = response { - if response.status().is_success() { - debug!( - "Event dispatcher: Successful POST"; "url" => %url - ); - break; - } else { - error!( - "Event dispatcher: Failed POST"; "url" => %url, "err" => ?response + Err(err) => { + warn!( + "Event dispatcher: connection or request failed to {}:{} - {:?}", + &host, &port, err ); } } @@ -1483,6 +1470,10 @@ impl EventDispatcher { #[cfg(test)] mod test { + use std::net::TcpListener; + use std::thread; + use std::time::Instant; + use clarity::vm::costs::ExecutionCost; use stacks::burnchains::{PoxConstants, Txid}; use stacks::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader}; @@ -1493,8 +1484,9 @@ mod test { use stacks::util::secp256k1::MessageSignature; use stacks_common::bitvec::BitVec; use stacks_common::types::chainstate::{BurnchainHeaderHash, StacksBlockId}; + use tiny_http::{Method, Response, Server, StatusCode}; - use crate::event_dispatcher::EventObserver; + use super::*; #[test] fn build_block_processed_event() { @@ -1615,4 +1607,146 @@ mod test { .expect("Unable to deserialize array of MessageSignature"); assert_eq!(event_signer_signature, signer_signature); } + + #[test] + fn test_send_request_connect_timeout() { + let timeout_duration = Duration::from_secs(3); + + // Start measuring time + let start_time = Instant::now(); + + let host = "10.255.255.1"; // non-routable IP for timeout + let port = 80; + + let peerhost: PeerHost = format!("{host}:{port}") + .parse() + .unwrap_or(PeerHost::DNS(host.to_string(), port)); + let mut request = StacksHttpRequest::new_for_peer( + peerhost, + "POST".into(), + "/".into(), + HttpRequestContents::new().payload_json(serde_json::from_slice(b"{}").unwrap()), + ) + .unwrap_or_else(|_| panic!("FATAL: failed to encode infallible data as HTTP request")); + request.add_header("Connection".into(), "close".into()); + + // Attempt to send a request with a timeout + let result = send_http_request(host, port, request, timeout_duration); + + // Measure the elapsed time + let elapsed_time = start_time.elapsed(); + + // Assert that the connection attempt timed out + assert!( + result.is_err(), + "Expected a timeout error, but got {:?}", + result + ); + assert_eq!( + result.unwrap_err().kind(), + std::io::ErrorKind::TimedOut, + "Expected a TimedOut error" + ); + + // Assert that the elapsed time is within an acceptable range + assert!( + elapsed_time >= timeout_duration, + "Timeout occurred too quickly" + ); + assert!( + elapsed_time < timeout_duration + Duration::from_secs(1), + "Timeout took too long" + ); + } + + fn get_random_port() -> u16 { + // Bind to a random port by specifying port 0, then retrieve the port assigned by the OS + let listener = TcpListener::bind("127.0.0.1:0").expect("Failed to bind to a random port"); + listener.local_addr().unwrap().port() + } + + #[test] + fn test_send_payload_success() { + let port = get_random_port(); + + // Set up a channel to notify when the server has processed the request + let (tx, rx) = channel(); + + // Start a mock server in a separate thread + let server = Server::http(format!("127.0.0.1:{}", port)).unwrap(); + thread::spawn(move || { + let request = server.recv().unwrap(); + assert_eq!(request.url(), "/test"); + assert_eq!(request.method(), &Method::Post); + + // Simulate a successful response + let response = Response::from_string("HTTP/1.1 200 OK"); + request.respond(response).unwrap(); + + // Notify the test that the request was processed + tx.send(()).unwrap(); + }); + + let observer = EventObserver { + endpoint: format!("127.0.0.1:{}", port), + }; + + let payload = json!({"key": "value"}); + + observer.send_payload(&payload, "/test"); + + // Wait for the server to process the request + rx.recv_timeout(Duration::from_secs(5)) + .expect("Server did not receive request in time"); + } + + #[test] + fn test_send_payload_retry() { + let port = get_random_port(); + + // Set up a channel to notify when the server has processed the request + let (tx, rx) = channel(); + + // Start a mock server in a separate thread + let server = Server::http(format!("127.0.0.1:{}", port)).unwrap(); + thread::spawn(move || { + let mut attempt = 0; + while let Ok(request) = server.recv() { + attempt += 1; + if attempt == 1 { + debug!("Mock server received request attempt 1"); + // Simulate a failure on the first attempt + let response = Response::new( + StatusCode(500), + vec![], + "Internal Server Error".as_bytes(), + Some(21), + None, + ); + request.respond(response).unwrap(); + } else { + debug!("Mock server received request attempt 2"); + // Simulate a successful response on the second attempt + let response = Response::from_string("HTTP/1.1 200 OK"); + request.respond(response).unwrap(); + + // Notify the test that the request was processed successfully + tx.send(()).unwrap(); + break; + } + } + }); + + let observer = EventObserver { + endpoint: format!("127.0.0.1:{}", port), + }; + + let payload = json!({"key": "value"}); + + observer.send_payload(&payload, "/test"); + + // Wait for the server to process the request + rx.recv_timeout(Duration::from_secs(5)) + .expect("Server did not receive request in time"); + } } diff --git a/testnet/stacks-node/src/keychain.rs b/testnet/stacks-node/src/keychain.rs index c9ed722a9e..b6df8549c4 100644 --- a/testnet/stacks-node/src/keychain.rs +++ b/testnet/stacks-node/src/keychain.rs @@ -206,7 +206,6 @@ impl Keychain { } /// Create a BurnchainOpSigner representation of this keychain - /// (this is going to be removed in 2.1) pub fn generate_op_signer(&self) -> BurnchainOpSigner { BurnchainOpSigner::new(self.get_secret_key(), false) } diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index 8036389d53..cd811a9346 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -45,6 +45,7 @@ use stacks::chainstate::stacks::{ use stacks::net::p2p::NetworkHandle; use stacks::net::stackerdb::StackerDBs; use stacks::net::{NakamotoBlocksData, StacksMessageType}; +use stacks::util::get_epoch_time_secs; use stacks::util::secp256k1::MessageSignature; use stacks_common::codec::read_next; use stacks_common::types::chainstate::{StacksAddress, StacksBlockId}; @@ -317,8 +318,17 @@ impl BlockMinerThread { } } } + match self.mine_block(&stackerdbs) { - Ok(x) => break Some(x), + Ok(x) => { + if !self.validate_timestamp(&x)? { + info!("Block mined too quickly. Will try again."; + "block_timestamp" => x.header.timestamp, + ); + continue; + } + break Some(x); + } Err(NakamotoNodeError::MiningFailure(ChainstateError::MinerAborted)) => { info!("Miner interrupted while mining, will try again"); // sleep, and try again. if the miner was interrupted because the burnchain @@ -1037,6 +1047,42 @@ impl BlockMinerThread { Some(vrf_proof) } + /// Check that the provided block is not mined too quickly after the parent block. + /// This is to ensure that the signers do not reject the block due to the block being mined within the same second as the parent block. + fn validate_timestamp(&self, x: &NakamotoBlock) -> Result { + let chain_state = neon_node::open_chainstate_with_faults(&self.config) + .expect("FATAL: could not open chainstate DB"); + let stacks_parent_header = + NakamotoChainState::get_block_header(chain_state.db(), &x.header.parent_block_id) + .map_err(|e| { + error!( + "Could not query header info for parent block ID {}: {:?}", + &x.header.parent_block_id, &e + ); + NakamotoNodeError::ParentNotFound + })? + .ok_or_else(|| { + error!( + "No header info for parent block ID {}", + &x.header.parent_block_id + ); + NakamotoNodeError::ParentNotFound + })?; + let current_timestamp = get_epoch_time_secs(); + let time_since_parent_ms = + current_timestamp.saturating_sub(stacks_parent_header.burn_header_timestamp) * 1000; + if time_since_parent_ms < self.config.miner.min_time_between_blocks_ms { + debug!("Parent block mined {time_since_parent_ms} ms ago. Required minimum gap between blocks is {} ms", self.config.miner.min_time_between_blocks_ms; + "current_timestamp" => current_timestamp, + "parent_block_id" => %stacks_parent_header.index_block_hash(), + "parent_block_height" => stacks_parent_header.stacks_block_height, + "parent_block_timestamp" => stacks_parent_header.burn_header_timestamp, + ); + return Ok(false); + } + Ok(true) + } + // TODO: add tests from mutation testing results #4869 #[cfg_attr(test, mutants::skip)] /// Try to mine a Stacks block by assembling one from mempool transactions and sending a diff --git a/testnet/stacks-node/src/nakamoto_node/peer.rs b/testnet/stacks-node/src/nakamoto_node/peer.rs index facb1dd835..004023ea26 100644 --- a/testnet/stacks-node/src/nakamoto_node/peer.rs +++ b/testnet/stacks-node/src/nakamoto_node/peer.rs @@ -16,8 +16,8 @@ use std::collections::VecDeque; use std::net::SocketAddr; use std::sync::mpsc::TrySendError; +use std::thread; use std::time::Duration; -use std::{cmp, thread}; use stacks::burnchains::db::BurnchainHeaderReader; use stacks::burnchains::PoxConstants; diff --git a/testnet/stacks-node/src/neon_node.rs b/testnet/stacks-node/src/neon_node.rs index 21ba451e6c..d627e081b2 100644 --- a/testnet/stacks-node/src/neon_node.rs +++ b/testnet/stacks-node/src/neon_node.rs @@ -805,7 +805,7 @@ impl MicroblockMinerThread { &mined_microblock.block_hash() ); - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] { use std::path::Path; if let Ok(path) = std::env::var("STACKS_BAD_BLOCKS_DIR") { @@ -1760,7 +1760,7 @@ impl BlockMinerThread { /// /// In testing, we ignore the parent stacks block hash because we don't have an easy way to /// reproduce it in integration tests. - #[cfg(not(any(test, feature = "testing")))] + #[cfg(not(test))] fn make_microblock_private_key( &mut self, parent_stacks_hash: &StacksBlockId, @@ -1773,7 +1773,7 @@ impl BlockMinerThread { /// Get the microblock private key we'll be using for this tenure, should we win. /// Return the private key on success - #[cfg(any(test, feature = "testing"))] + #[cfg(test)] fn make_microblock_private_key( &mut self, _parent_stacks_hash: &StacksBlockId, diff --git a/testnet/stacks-node/src/tests/bitcoin_regtest.rs b/testnet/stacks-node/src/tests/bitcoin_regtest.rs index 3fbfa51986..6619152f9f 100644 --- a/testnet/stacks-node/src/tests/bitcoin_regtest.rs +++ b/testnet/stacks-node/src/tests/bitcoin_regtest.rs @@ -44,6 +44,22 @@ impl BitcoinCoreController { } } + fn add_rpc_cli_args(&self, command: &mut Command) { + command.arg(format!("-rpcport={}", self.config.burnchain.rpc_port)); + + match ( + &self.config.burnchain.username, + &self.config.burnchain.password, + ) { + (Some(username), Some(password)) => { + command + .arg(format!("-rpcuser={username}")) + .arg(format!("-rpcpassword={password}")); + } + _ => {} + } + } + pub fn start_bitcoind(&mut self) -> BitcoinResult<()> { std::fs::create_dir_all(&self.config.get_burnchain_path_str()).unwrap(); @@ -58,30 +74,16 @@ impl BitcoinCoreController { .arg("-server=1") .arg("-listenonion=0") .arg("-rpcbind=127.0.0.1") - .arg(&format!("-port={}", self.config.burnchain.peer_port)) - .arg(&format!( - "-datadir={}", - self.config.get_burnchain_path_str() - )) - .arg(&format!("-rpcport={}", self.config.burnchain.rpc_port)); + .arg(format!("-port={}", self.config.burnchain.peer_port)) + .arg(format!("-datadir={}", self.config.get_burnchain_path_str())); - match ( - &self.config.burnchain.username, - &self.config.burnchain.password, - ) { - (Some(username), Some(password)) => { - command - .arg(&format!("-rpcuser={}", username)) - .arg(&format!("-rpcpassword={}", password)); - } - _ => {} - } + self.add_rpc_cli_args(&mut command); - eprintln!("bitcoind spawn: {:?}", command); + eprintln!("bitcoind spawn: {command:?}"); let mut process = match command.spawn() { Ok(child) => child, - Err(e) => return Err(BitcoinCoreError::SpawnFailed(format!("{:?}", e))), + Err(e) => return Err(BitcoinCoreError::SpawnFailed(format!("{e:?}"))), }; let mut out_reader = BufReader::new(process.stdout.take().unwrap()); @@ -108,17 +110,15 @@ impl BitcoinCoreController { pub fn stop_bitcoind(&mut self) -> Result<(), BitcoinCoreError> { if let Some(_) = self.bitcoind_process.take() { let mut command = Command::new("bitcoin-cli"); - command - .stdout(Stdio::piped()) - .arg("-rpcconnect=127.0.0.1") - .arg("-rpcport=8332") - .arg("-rpcuser=neon-tester") - .arg("-rpcpassword=neon-tester-pass") - .arg("stop"); + command.stdout(Stdio::piped()).arg("-rpcconnect=127.0.0.1"); + + self.add_rpc_cli_args(&mut command); + + command.arg("stop"); let mut process = match command.spawn() { Ok(child) => child, - Err(e) => return Err(BitcoinCoreError::SpawnFailed(format!("{:?}", e))), + Err(e) => return Err(BitcoinCoreError::SpawnFailed(format!("{e:?}"))), }; let mut out_reader = BufReader::new(process.stdout.take().unwrap()); @@ -127,7 +127,7 @@ impl BitcoinCoreController { if bytes_read == 0 { break; } - eprintln!("{}", &line); + eprintln!("{line}"); } } Ok(()) diff --git a/testnet/stacks-node/src/tests/epoch_21.rs b/testnet/stacks-node/src/tests/epoch_21.rs index 6696467930..bb168b28b9 100644 --- a/testnet/stacks-node/src/tests/epoch_21.rs +++ b/testnet/stacks-node/src/tests/epoch_21.rs @@ -1296,12 +1296,7 @@ fn transition_adds_get_pox_addr_recipients() { // NOTE: there's an even number of payouts here, so this works eprintln!("payout at {} = {}", burn_block_height, &payout); - if Burnchain::static_is_in_prepare_phase( - 0, - pox_constants.reward_cycle_length as u64, - pox_constants.prepare_length.into(), - burn_block_height, - ) { + if pox_constants.is_in_prepare_phase(0, burn_block_height) { // in prepare phase eprintln!("{} in prepare phase", burn_block_height); assert_eq!(payout, conf.burnchain.burn_fee_cap as u128); diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 24b7745419..a658bfbcf6 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -511,7 +511,7 @@ pub fn read_and_sign_block_proposal( let reward_set = load_nakamoto_reward_set( burnchain - .pox_reward_cycle(tip.block_height.saturating_add(1)) + .block_height_to_reward_cycle(tip.block_height) .unwrap(), &tip.sortition_id, &burnchain, diff --git a/testnet/stacks-node/src/tests/neon_integrations.rs b/testnet/stacks-node/src/tests/neon_integrations.rs index b876257bf2..0905fb1f60 100644 --- a/testnet/stacks-node/src/tests/neon_integrations.rs +++ b/testnet/stacks-node/src/tests/neon_integrations.rs @@ -12784,7 +12784,6 @@ fn mock_miner_replay() { // ---------- Test finished, clean up ---------- - btcd_controller.stop_bitcoind().unwrap(); miner_channel.stop_chains_coordinator(); follower_channel.stop_chains_coordinator(); } diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index cb232edd60..0e38f13ea8 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -278,12 +278,12 @@ impl SignerTest { self.run_until_epoch_3_boundary(); let commits_submitted = self.running_nodes.commits_submitted.clone(); - + let commits_before = commits_submitted.load(Ordering::SeqCst); info!("Waiting 1 burnchain block for miner VRF key confirmation"); // Wait one block to confirm the VRF register, wait until a block commit is submitted next_block_and(&mut self.running_nodes.btc_regtest_controller, 60, || { let commits_count = commits_submitted.load(Ordering::SeqCst); - Ok(commits_count >= 1) + Ok(commits_count > commits_before) }) .unwrap(); info!("Ready to mine Nakamoto blocks!"); @@ -293,6 +293,8 @@ impl SignerTest { fn mine_and_verify_confirmed_naka_block(&mut self, timeout: Duration, num_signers: usize) { info!("------------------------- Try mining one block -------------------------"); + let reward_cycle = self.get_current_reward_cycle(); + self.mine_nakamoto_block(timeout); // Verify that the signers accepted the proposed block, sending back a validate ok response @@ -310,8 +312,10 @@ impl SignerTest { // NOTE: signature.len() does not need to equal signers.len(); the stacks miner can finish the block // whenever it has crossed the threshold. assert!(signature.len() >= num_signers * 7 / 10); - - let reward_cycle = self.get_current_reward_cycle(); + info!( + "Verifying signatures against signers for reward cycle {:?}", + reward_cycle + ); let signers = self.get_reward_set_signers(reward_cycle); // Verify that the signers signed the proposed block @@ -1911,7 +1915,10 @@ fn end_of_tenure() { let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); let long_timeout = Duration::from_secs(200); let short_timeout = Duration::from_secs(20); - + let blocks_before = signer_test + .running_nodes + .nakamoto_blocks_mined + .load(Ordering::SeqCst); signer_test.boot_to_epoch_3(); let curr_reward_cycle = signer_test.get_current_reward_cycle(); // Advance to one before the next reward cycle to ensure we are on the reward cycle boundary @@ -1924,7 +1931,18 @@ fn end_of_tenure() { - 2; // give the system a chance to mine a Nakamoto block - sleep_ms(30_000); + // But it doesn't have to mine one for this test to succeed? + let start = Instant::now(); + while start.elapsed() <= short_timeout { + let mined_blocks = signer_test + .running_nodes + .nakamoto_blocks_mined + .load(Ordering::SeqCst); + if mined_blocks > blocks_before { + break; + } + sleep_ms(100); + } info!("------------------------- Test Mine to Next Reward Cycle Boundary -------------------------"); signer_test.run_until_burnchain_height_nakamoto( @@ -1932,7 +1950,7 @@ fn end_of_tenure() { final_reward_cycle_height_boundary, num_signers, ); - println!("Advanced to nexct reward cycle boundary: {final_reward_cycle_height_boundary}"); + println!("Advanced to next reward cycle boundary: {final_reward_cycle_height_boundary}"); assert_eq!( signer_test.get_current_reward_cycle(), final_reward_cycle - 1 @@ -1973,39 +1991,20 @@ fn end_of_tenure() { std::thread::sleep(Duration::from_millis(100)); } - info!("Triggering a new block to be mined"); - - // Mine a block into the next reward cycle - let commits_before = signer_test - .running_nodes - .commits_submitted - .load(Ordering::SeqCst); - next_block_and( - &mut signer_test.running_nodes.btc_regtest_controller, - 10, - || { - let commits_count = signer_test - .running_nodes - .commits_submitted - .load(Ordering::SeqCst); - Ok(commits_count > commits_before) - }, - ) - .unwrap(); - - // Mine a few blocks so we are well into the next reward cycle - for _ in 0..2 { + while signer_test.get_current_reward_cycle() != final_reward_cycle { next_block_and( &mut signer_test.running_nodes.btc_regtest_controller, 10, || Ok(true), ) .unwrap(); + assert!( + start_time.elapsed() <= short_timeout, + "Timed out waiting to enter the next reward cycle" + ); + std::thread::sleep(Duration::from_millis(100)); } - sleep_ms(10_000); - assert_eq!(signer_test.get_current_reward_cycle(), final_reward_cycle); - while test_observer::get_burn_blocks() .last() .unwrap() @@ -2844,3 +2843,105 @@ fn signer_set_rollover() { assert!(signer.stop().is_none()); } } + +#[test] +#[ignore] +/// This test checks that the signers will broadcast a block once they receive enough signatures. +fn min_gap_between_blocks() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + tracing_subscriber::registry() + .with(fmt::layer()) + .with(EnvFilter::from_default_env()) + .init(); + + info!("------------------------- Test Setup -------------------------"); + let num_signers = 5; + let sender_sk = Secp256k1PrivateKey::new(); + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 100; + let send_fee = 180; + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + let time_between_blocks_ms = 10_000; + let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( + num_signers, + vec![(sender_addr.clone(), send_amt + send_fee)], + Some(Duration::from_secs(15)), + |_config| {}, + |config| { + config.miner.min_time_between_blocks_ms = time_between_blocks_ms; + }, + &[], + ); + + let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); + + signer_test.boot_to_epoch_3(); + + let proposals_before = signer_test + .running_nodes + .nakamoto_blocks_proposed + .load(Ordering::SeqCst); + + let info_before = get_chain_info(&signer_test.running_nodes.conf); + + // submit a tx so that the miner will mine a block + let sender_nonce = 0; + let transfer_tx = + make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); + submit_tx(&http_origin, &transfer_tx); + + info!("Submitted transfer tx and waiting for block proposal. Ensure it does not arrive before the gap is exceeded"); + let start_time = Instant::now(); + loop { + let blocks_proposed = signer_test + .running_nodes + .nakamoto_blocks_proposed + .load(Ordering::SeqCst); + if blocks_proposed > proposals_before { + assert!( + start_time.elapsed().as_millis() >= time_between_blocks_ms.into(), + "Block proposed before gap was exceeded" + ); + break; + } + std::thread::sleep(Duration::from_millis(100)); + } + + debug!("Ensure that the block is mined after the gap is exceeded"); + + let start = Instant::now(); + let duration = 30; + let blocks_before = signer_test + .running_nodes + .nakamoto_blocks_mined + .load(Ordering::SeqCst); + loop { + let blocks_mined = signer_test + .running_nodes + .nakamoto_blocks_mined + .load(Ordering::SeqCst); + + let info = get_chain_info(&signer_test.running_nodes.conf); + if blocks_mined > blocks_before + && info.stacks_tip_height == info_before.stacks_tip_height + 1 + { + break; + } + + debug!( + "blocks_mined: {},{}, stacks_tip_height: {},{}", + blocks_mined, blocks_before, info_before.stacks_tip_height, info.stacks_tip_height + ); + + std::thread::sleep(Duration::from_millis(100)); + assert!( + start.elapsed() < Duration::from_secs(duration), + "Block not mined within timeout" + ); + } + + signer_test.shutdown(); +}