diff --git a/CHANGELOG.md b/CHANGELOG.md index ccb6505c4..2a2352883 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,6 +6,7 @@ - Added `GetAccountProofs` endpoint (#506). - Support Https in endpoint configuration (#556). +- Upgrade `block-producer` from FIFO queue to mempool dependency graph (#562). ### Changes diff --git a/Cargo.lock b/Cargo.lock index 0fd48b068..d18f90e80 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -683,6 +683,12 @@ dependencies = [ "syn", ] +[[package]] +name = "diff" +version = "0.1.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56254986775e3233ffa9c4d7d3faaf6d36a2c09d30b20687e9f88bc8bafc16c8" + [[package]] name = "digest" version = "0.10.7" @@ -1585,6 +1591,8 @@ dependencies = [ "miden-processor", "miden-stdlib", "miden-tx", + "pretty_assertions", + "rand", "rand_chacha", "serde", "thiserror 2.0.3", @@ -2166,6 +2174,16 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "925383efa346730478fb4838dbe9137d2a47675ad789c546d150a6e1dd4ab31c" +[[package]] +name = "pretty_assertions" +version = "1.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3ae130e2f271fbc2ac3a40fb1d07180839cdbbe443c7a27e1e3c13c5cac0116d" +dependencies = [ + "diff", + "yansi", +] + [[package]] name = "prettyplease" version = "0.2.25" diff --git a/Makefile b/Makefile index b10ad72a9..afa1a2da9 100644 --- a/Makefile +++ b/Makefile @@ -13,7 +13,7 @@ BUILD_PROTO=BUILD_PROTO=1 .PHONY: clippy clippy: ## Runs Clippy with configs - cargo clippy --locked --workspace --all-targets --all-features -- -D warnings --allow clippy::arc_with_non_send_sync + cargo clippy --locked --workspace --all-targets --all-features -- -D warnings .PHONY: fix diff --git a/bin/node/src/commands/start.rs b/bin/node/src/commands/start.rs index acbd356ac..f1e46fbf9 100644 --- a/bin/node/src/commands/start.rs +++ b/bin/node/src/commands/start.rs @@ -1,3 +1,5 @@ +use std::collections::HashMap; + use anyhow::{Context, Result}; use miden_node_block_producer::server::BlockProducer; use miden_node_rpc::server::Rpc; @@ -16,22 +18,41 @@ pub async fn start_node(config: NodeConfig) -> Result<()> { // Start store. The store endpoint is available after loading completes. let store = Store::init(store).await.context("Loading store")?; - join_set.spawn(async move { store.serve().await.context("Serving store") }); + let store_id = join_set.spawn(async move { store.serve().await.context("Serving store") }).id(); // Start block-producer. The block-producer's endpoint is available after loading completes. let block_producer = BlockProducer::init(block_producer).await.context("Loading block-producer")?; - join_set.spawn(async move { block_producer.serve().await.context("Serving block-producer") }); + let block_producer_id = join_set + .spawn(async move { block_producer.serve().await.context("Serving block-producer") }) + .id(); // Start RPC component. let rpc = Rpc::init(rpc).await.context("Loading RPC")?; - join_set.spawn(async move { rpc.serve().await.context("Serving RPC") }); - - // block on all tasks - while let Some(res) = join_set.join_next().await { - // For now, if one of the components fails, crash the node - res??; - } - - Ok(()) + let rpc_id = join_set.spawn(async move { rpc.serve().await.context("Serving RPC") }).id(); + + // Lookup table so we can identify the failed component. + let component_ids = HashMap::from([ + (store_id, "store"), + (block_producer_id, "block-producer"), + (rpc_id, "rpc"), + ]); + + // SAFETY: The joinset is definitely not empty. + let component_result = join_set.join_next_with_id().await.unwrap(); + + // We expect components to run indefinitely, so we treat any return as fatal. + // + // Map all outcomes to an error, and provide component context. + let (id, err) = match component_result { + Ok((id, Ok(_))) => (id, Err(anyhow::anyhow!("Component completed unexpectedly"))), + Ok((id, Err(err))) => (id, Err(err)), + Err(join_err) => (join_err.id(), Err(join_err).context("Joining component task")), + }; + let component = component_ids.get(&id).unwrap_or(&"unknown"); + + // We could abort and gracefully shutdown the other components, but since we're crashing the + // node there is no point. + + err.context(format!("Component {component} failed")) } diff --git a/crates/block-producer/Cargo.toml b/crates/block-producer/Cargo.toml index 9c9aeefc7..52c898f8c 100644 --- a/crates/block-producer/Cargo.toml +++ b/crates/block-producer/Cargo.toml @@ -24,6 +24,7 @@ miden-objects = { workspace = true } miden-processor = { workspace = true } miden-stdlib = { workspace = true } miden-tx = { workspace = true } +rand = { version = "0.8" } serde = { version = "1.0", features = ["derive"] } thiserror = { workspace = true } tokio = { workspace = true, features = ["rt-multi-thread", "net", "macros", "sync", "time"] } @@ -38,6 +39,7 @@ miden-lib = { workspace = true, features = ["testing"] } miden-node-test-macro = { path = "../test-macro" } miden-objects = { workspace = true, features = ["testing"] } miden-tx = { workspace = true, features = ["testing"] } +pretty_assertions = "1.4" rand_chacha = { version = "0.3", default-features = false } tokio = { workspace = true, features = ["test-util"] } winterfell = { version = "0.10" } diff --git a/crates/block-producer/src/batch_builder/batch.rs b/crates/block-producer/src/batch_builder/batch.rs index e8a13fb36..225112d00 100644 --- a/crates/block-producer/src/batch_builder/batch.rs +++ b/crates/block-producer/src/batch_builder/batch.rs @@ -9,21 +9,19 @@ use miden_objects::{ batches::BatchNoteTree, crypto::hash::blake::{Blake3Digest, Blake3_256}, notes::{NoteHeader, NoteId, Nullifier}, - transaction::{InputNoteCommitment, OutputNote, TransactionId}, - AccountDeltaError, Digest, MAX_ACCOUNTS_PER_BATCH, MAX_INPUT_NOTES_PER_BATCH, - MAX_OUTPUT_NOTES_PER_BATCH, + transaction::{InputNoteCommitment, OutputNote, ProvenTransaction, TransactionId}, + AccountDeltaError, Digest, }; use tracing::instrument; -use crate::{errors::BuildBatchError, ProvenTransaction}; +use crate::errors::BuildBatchError; pub type BatchId = Blake3Digest<32>; // TRANSACTION BATCH // ================================================================================================ -/// A batch of transactions that share a common proof. For any given account, at most 1 transaction -/// in the batch must be addressing that account (issue: #186). +/// A batch of transactions that share a common proof. /// /// Note: Until recursive proofs are available in the Miden VM, we don't include the common proof. #[derive(Debug, Clone, PartialEq, Eq)] @@ -77,13 +75,11 @@ impl TransactionBatch { /// for transforming unauthenticated notes into authenticated notes. /// /// # Errors + /// /// Returns an error if: - /// - The number of output notes across all transactions exceeds 4096. /// - There are duplicated output notes or unauthenticated notes found across all transactions /// in the batch. /// - Hashes for corresponding input notes and output notes don't match. - /// - /// TODO: enforce limit on the number of created nullifiers. #[instrument(target = "miden-block-producer", name = "new_batch", skip_all, err)] pub fn new( txs: Vec, @@ -102,11 +98,7 @@ impl TransactionBatch { vacant.insert(AccountUpdate::new(tx)); }, Entry::Occupied(occupied) => occupied.into_mut().merge_tx(tx).map_err(|error| { - BuildBatchError::AccountUpdateError { - account_id: tx.account_id(), - error, - txs: txs.clone(), - } + BuildBatchError::AccountUpdateError { account_id: tx.account_id(), error } })?, }; @@ -114,15 +106,11 @@ impl TransactionBatch { for note in tx.get_unauthenticated_notes() { let id = note.id(); if !unauthenticated_input_notes.insert(id) { - return Err(BuildBatchError::DuplicateUnauthenticatedNote(id, txs.clone())); + return Err(BuildBatchError::DuplicateUnauthenticatedNote(id)); } } } - if updated_accounts.len() > MAX_ACCOUNTS_PER_BATCH { - return Err(BuildBatchError::TooManyAccountsInBatch(txs)); - } - // Populate batch produced nullifiers and match output notes with corresponding // unauthenticated input notes in the same batch, which are removed from the unauthenticated // input notes set. @@ -137,7 +125,7 @@ impl TransactionBatch { // Header is presented only for unauthenticated input notes. let input_note = match input_note.header() { Some(input_note_header) => { - if output_notes.remove_note(input_note_header, &txs)? { + if output_notes.remove_note(input_note_header)? { continue; } @@ -155,16 +143,8 @@ impl TransactionBatch { input_notes.push(input_note) } - if input_notes.len() > MAX_INPUT_NOTES_PER_BATCH { - return Err(BuildBatchError::TooManyInputNotes(input_notes.len(), txs)); - } - let output_notes = output_notes.into_notes(); - if output_notes.len() > MAX_OUTPUT_NOTES_PER_BATCH { - return Err(BuildBatchError::TooManyNotesCreated(output_notes.len(), txs)); - } - // Build the output notes SMT. let output_notes_smt = BatchNoteTree::with_contiguous_leaves( output_notes.iter().map(|note| (note.id(), note.metadata())), @@ -250,7 +230,7 @@ impl OutputNoteTracker { for tx in txs { for note in tx.output_notes().iter() { if output_note_index.insert(note.id(), output_notes.len()).is_some() { - return Err(BuildBatchError::DuplicateOutputNote(note.id(), txs.to_vec())); + return Err(BuildBatchError::DuplicateOutputNote(note.id())); } output_notes.push(Some(note.clone())); } @@ -259,11 +239,7 @@ impl OutputNoteTracker { Ok(Self { output_notes, output_note_index }) } - pub fn remove_note( - &mut self, - input_note_header: &NoteHeader, - txs: &[ProvenTransaction], - ) -> Result { + pub fn remove_note(&mut self, input_note_header: &NoteHeader) -> Result { let id = input_note_header.id(); if let Some(note_index) = self.output_note_index.remove(&id) { if let Some(output_note) = mem::take(&mut self.output_notes[note_index]) { @@ -274,7 +250,6 @@ impl OutputNoteTracker { id, input_hash, output_hash, - txs: txs.to_vec(), }); } @@ -323,7 +298,7 @@ mod tests { )); match OutputNoteTracker::new(&txs) { - Err(BuildBatchError::DuplicateOutputNote(note_id, _)) => { + Err(BuildBatchError::DuplicateOutputNote(note_id)) => { assert_eq!(note_id, duplicate_output_note.id()) }, res => panic!("Unexpected result: {res:?}"), @@ -337,8 +312,8 @@ mod tests { let note_to_remove = mock_note(4); - assert!(tracker.remove_note(note_to_remove.header(), &txs).unwrap()); - assert!(!tracker.remove_note(note_to_remove.header(), &txs).unwrap()); + assert!(tracker.remove_note(note_to_remove.header()).unwrap()); + assert!(!tracker.remove_note(note_to_remove.header()).unwrap()); // Check that output notes are in the expected order and consumed note was removed assert_eq!( @@ -359,7 +334,7 @@ mod tests { let duplicate_note = mock_note(5); txs.push(mock_proven_tx(4, vec![duplicate_note.clone()], vec![mock_output_note(9)])); match TransactionBatch::new(txs, Default::default()) { - Err(BuildBatchError::DuplicateUnauthenticatedNote(note_id, _)) => { + Err(BuildBatchError::DuplicateUnauthenticatedNote(note_id)) => { assert_eq!(note_id, duplicate_note.id()) }, res => panic!("Unexpected result: {res:?}"), diff --git a/crates/block-producer/src/batch_builder/mod.rs b/crates/block-producer/src/batch_builder/mod.rs index 8151b1442..e6399ae9c 100644 --- a/crates/block-producer/src/batch_builder/mod.rs +++ b/crates/block-producer/src/batch_builder/mod.rs @@ -1,202 +1,261 @@ -use std::{cmp::min, collections::BTreeSet, sync::Arc, time::Duration}; +use std::{num::NonZeroUsize, ops::Range, time::Duration}; -use async_trait::async_trait; -use miden_objects::{notes::NoteId, transaction::OutputNote}; -use tokio::time; +use rand::Rng; +use tokio::{task::JoinSet, time}; use tracing::{debug, info, instrument, Span}; -use crate::{block_builder::BlockBuilder, ProvenTransaction, SharedRwVec, COMPONENT}; - -#[cfg(test)] -mod tests; +use crate::{ + domain::transaction::AuthenticatedTransaction, + mempool::{BatchJobId, SharedMempool}, + COMPONENT, SERVER_BUILD_BATCH_FREQUENCY, +}; pub mod batch; pub use batch::TransactionBatch; use miden_node_utils::formatting::{format_array, format_blake3_digest}; -use crate::{errors::BuildBatchError, store::Store}; +use crate::errors::BuildBatchError; // BATCH BUILDER // ================================================================================================ -/// Abstraction over batch proving of transactions. -/// -/// Transactions are aggregated into batches prior to being added to blocks. This trait abstracts -/// over this responsibility. The trait's goal is to be implementation agnostic, allowing for -/// multiple implementations, e.g.: +/// Builds [TransactionBatch] from sets of transactions. /// -/// - in-process cpu based prover -/// - out-of-process gpu based prover -/// - distributed prover on another machine -#[async_trait] -pub trait BatchBuilder: Send + Sync + 'static { - /// Start proving of a new batch. - async fn build_batch(&self, txs: Vec) -> Result<(), BuildBatchError>; +/// Transaction sets are pulled from the [Mempool] at a configurable interval, and passed to a pool +/// of provers for proof generation. Proving is currently unimplemented and is instead simulated via +/// the given proof time and failure rate. +pub struct BatchBuilder { + pub batch_interval: Duration, + pub workers: NonZeroUsize, + /// Used to simulate batch proving by sleeping for a random duration selected from this range. + pub simulated_proof_time: Range, + /// Simulated block failure rate as a percentage. + /// + /// Note: this _must_ be sign positive and less than 1.0. + pub failure_rate: f32, } -// DEFAULT BATCH BUILDER -// ================================================================================================ +impl Default for BatchBuilder { + fn default() -> Self { + Self { + batch_interval: SERVER_BUILD_BATCH_FREQUENCY, + // SAFETY: 2 is non-zero so this always succeeds. + workers: 2.try_into().unwrap(), + // Note: The range cannot be empty. + simulated_proof_time: Duration::ZERO..Duration::from_millis(1), + failure_rate: 0.0, + } + } +} -#[derive(Debug, Clone, PartialEq, Eq)] -pub struct DefaultBatchBuilderOptions { - /// The frequency at which blocks are created - pub block_frequency: Duration, +impl BatchBuilder { + /// Starts the [BatchBuilder], creating and proving batches at the configured interval. + /// + /// A pool of batch-proving workers is spawned, which are fed new batch jobs periodically. + /// A batch is skipped if there are no available workers, or if there are no transactions + /// available to batch. + pub async fn run(self, mempool: SharedMempool) { + assert!( + self.failure_rate < 1.0 && self.failure_rate.is_sign_positive(), + "Failure rate must be a percentage" + ); - /// Maximum number of batches in any given block - pub max_batches_per_block: usize, -} + let mut interval = tokio::time::interval(self.batch_interval); + interval.set_missed_tick_behavior(time::MissedTickBehavior::Delay); + + let mut worker_pool = + WorkerPool::new(self.workers, self.simulated_proof_time, self.failure_rate); + + loop { + tokio::select! { + _ = interval.tick() => { + if !worker_pool.has_capacity() { + tracing::info!("All batch workers occupied."); + continue; + } + + // Transactions available? + let Some((batch_id, transactions)) = + mempool.lock().await.select_batch() + else { + tracing::info!("No transactions available for batch."); + continue; + }; -pub struct DefaultBatchBuilder { - store: Arc, + worker_pool.spawn(batch_id, transactions).expect("Worker capacity was checked"); + }, + result = worker_pool.join_next() => { + let mut mempool = mempool.lock().await; + match result { + Err((batch_id, err)) => { + tracing::warn!(%batch_id, %err, "Batch job failed."); + mempool.batch_failed(batch_id); + }, + Ok((batch_id, batch)) => { + mempool.batch_proved(batch_id, batch); + } + } + } + } + } + } +} - block_builder: Arc, +// BATCH WORKER +// ================================================================================================ - options: DefaultBatchBuilderOptions, +type BatchResult = Result<(BatchJobId, TransactionBatch), (BatchJobId, BuildBatchError)>; - /// Batches ready to be included in a block - ready_batches: SharedRwVec, +/// Represents a pool of batch provers. +/// +/// Effectively a wrapper around tokio's JoinSet that remains pending if the set is empty, +/// instead of returning None. +struct WorkerPool { + in_progress: JoinSet, + simulated_proof_time: Range, + failure_rate: f32, + /// Maximum number of workers allowed. + capacity: NonZeroUsize, + /// Maps spawned tasks to their job ID. + /// + /// This allows us to map panic'd tasks to the job ID. Uses [Vec] because the task ID does not + /// implement [Ord]. Given that the expected capacity is relatively low, this has no real + /// impact beyond ergonomics. + task_map: Vec<(tokio::task::Id, BatchJobId)>, } -impl DefaultBatchBuilder -where - S: Store, - BB: BlockBuilder, -{ - // CONSTRUCTOR - // -------------------------------------------------------------------------------------------- - /// Returns an new [BatchBuilder] instantiated with the provided [BlockBuilder] and the - /// specified options. - pub fn new(store: Arc, block_builder: Arc, options: DefaultBatchBuilderOptions) -> Self { +impl WorkerPool { + fn new( + capacity: NonZeroUsize, + simulated_proof_time: Range, + failure_rate: f32, + ) -> Self { Self { - store, - block_builder, - options, - ready_batches: Default::default(), + simulated_proof_time, + failure_rate, + capacity, + in_progress: JoinSet::default(), + task_map: Default::default(), } } - // BATCH BUILDER STARTER - // -------------------------------------------------------------------------------------------- - pub async fn run(self: Arc) { - let mut interval = time::interval(self.options.block_frequency); + /// Returns the next batch proof result. + /// + /// Will return pending if there are no jobs in progress (unlike tokio's [JoinSet::join_next] + /// which returns an option). + async fn join_next(&mut self) -> BatchResult { + if self.in_progress.is_empty() { + return std::future::pending().await; + } - info!(target: COMPONENT, period_ms = interval.period().as_millis(), "Batch builder started"); + let result = self + .in_progress + .join_next() + .await + .expect("JoinSet::join_next must be Some as the set is not empty") + .map_err(|join_err| { + // Map task ID to job ID as otherwise the caller can't tell which batch failed. + // + // Note that the mapping cleanup happens lower down. + let batch_id = self + .task_map + .iter() + .find(|(task_id, _)| &join_err.id() == task_id) + .expect("Task ID should be in the task map") + .1; + + (batch_id, join_err.into()) + }) + .and_then(|x| x); + + // Cleanup task mapping by removing the result's task. This is inefficient but does not + // matter as the capacity is expected to be low. + let job_id = match &result { + Ok((id, _)) => id, + Err((id, _)) => id, + }; + self.task_map.retain(|(_, elem_job_id)| elem_job_id != job_id); - loop { - interval.tick().await; - self.try_build_block().await; - } + result } - // HELPER METHODS - // -------------------------------------------------------------------------------------------- + /// Returns `true` if there is a worker available. + fn has_capacity(&self) -> bool { + self.in_progress.len() < self.capacity.get() + } - /// Note that we call `build_block()` regardless of whether the `ready_batches` queue is empty. - /// A call to an empty `build_block()` indicates that an empty block should be created. - #[instrument(target = "miden-block-producer", skip_all)] - async fn try_build_block(&self) { - let mut batches_in_block: Vec = { - let mut locked_ready_batches = self.ready_batches.write().await; + /// Spawns a new batch proving task on the worker pool. + /// + /// # Errors + /// + /// Returns an error if no workers are available which can be checked using + /// [has_capacity](Self::has_capacity). + fn spawn( + &mut self, + id: BatchJobId, + transactions: Vec, + ) -> Result<(), ()> { + if !self.has_capacity() { + return Err(()); + } - let num_batches_in_block = - min(self.options.max_batches_per_block, locked_ready_batches.len()); + let task_id = self + .in_progress + .spawn({ + // Select a random work duration from the given proof range. + let simulated_proof_time = + rand::thread_rng().gen_range(self.simulated_proof_time.clone()); - locked_ready_batches.drain(..num_batches_in_block).collect() - }; + // Randomly fail batches at the configured rate. + // + // Note: Rng::gen rolls between [0, 1.0) for f32, so this works as expected. + let failed = rand::thread_rng().gen::() < self.failure_rate; - match self.block_builder.build_block(&batches_in_block).await { - Ok(_) => { - // block successfully built, do nothing - }, - Err(_) => { - // Block building failed; add back the batches at the end of the queue - self.ready_batches.write().await.append(&mut batches_in_block); - }, - } - } + async move { + tracing::debug!("Begin proving batch."); - /// Returns a list of IDs for unauthenticated notes which are not output notes of any ready - /// transaction batch or the candidate batch itself. - async fn find_dangling_notes(&self, txs: &[ProvenTransaction]) -> Vec { - // TODO: We can optimize this by looking at the notes created in the previous batches + let batch = Self::build_batch(transactions).map_err(|err| (id, err))?; - // build a set of output notes from all ready batches and the candidate batch - let mut all_output_notes: BTreeSet = txs - .iter() - .flat_map(|tx| tx.output_notes().iter().map(OutputNote::id)) - .chain( - self.ready_batches - .read() - .await - .iter() - .flat_map(|batch| batch.output_notes().iter().map(OutputNote::id)), - ) - .collect(); + tokio::time::sleep(simulated_proof_time).await; + if failed { + tracing::debug!("Batch proof failure injected."); + return Err((id, BuildBatchError::InjectedFailure)); + } + + tracing::debug!("Batch proof completed."); - // from the list of unauthenticated notes in the candidate batch, filter out any note - // which is also an output note either in any of the ready batches or in the candidate - // batch itself - txs.iter() - .flat_map(|tx| tx.get_unauthenticated_notes().map(|note| note.id())) - .filter(|note_id| !all_output_notes.remove(note_id)) - .collect() + Ok((id, batch)) + } + }) + .id(); + + self.task_map.push((task_id, id)); + + Ok(()) } -} -#[async_trait] -impl BatchBuilder for DefaultBatchBuilder -where - S: Store, - BB: BlockBuilder, -{ #[instrument(target = "miden-block-producer", skip_all, err, fields(batch_id))] - async fn build_batch(&self, txs: Vec) -> Result<(), BuildBatchError> { + fn build_batch( + txs: Vec, + ) -> Result { let num_txs = txs.len(); info!(target: COMPONENT, num_txs, "Building a transaction batch"); debug!(target: COMPONENT, txs = %format_array(txs.iter().map(|tx| tx.id().to_hex()))); - // make sure that all unauthenticated notes in the transactions of the proposed batch - // have been either created in any of the ready batches (or the batch itself) or are - // already in the store - // - // TODO: this can be optimized by first computing dangling notes of the batch itself, - // and only then checking against the other ready batches - let dangling_notes = self.find_dangling_notes(&txs).await; - let found_unauthenticated_notes = match dangling_notes.is_empty() { - true => Default::default(), - false => { - let stored_notes = - match self.store.get_note_authentication_info(dangling_notes.iter()).await { - Ok(stored_notes) => stored_notes, - Err(err) => return Err(BuildBatchError::NotePathsError(err, txs)), - }; - let missing_notes: Vec<_> = dangling_notes - .into_iter() - .filter(|note_id| !stored_notes.contains_note(note_id)) - .collect(); - - if !missing_notes.is_empty() { - return Err(BuildBatchError::UnauthenticatedNotesNotFound(missing_notes, txs)); - } - - stored_notes - }, - }; - - let batch = TransactionBatch::new(txs, found_unauthenticated_notes)?; + // TODO: This is a deep clone which can be avoided by change batch building to using + // refs or arcs. + let txs = txs + .iter() + .map(AuthenticatedTransaction::raw_proven_transaction) + .cloned() + .collect(); + // TODO: Found unauthenticated notes are no longer required.. potentially? + let batch = TransactionBatch::new(txs, Default::default())?; - info!(target: COMPONENT, "Transaction batch built"); Span::current().record("batch_id", format_blake3_digest(batch.id())); + info!(target: COMPONENT, "Transaction batch built"); - let num_batches = { - let mut write_guard = self.ready_batches.write().await; - write_guard.push(batch); - write_guard.len() - }; - - info!(target: COMPONENT, num_batches, "Transaction batch added to the batch queue"); - - Ok(()) + Ok(batch) } } diff --git a/crates/block-producer/src/batch_builder/tests/mod.rs b/crates/block-producer/src/batch_builder/tests/mod.rs deleted file mode 100644 index 46b63f703..000000000 --- a/crates/block-producer/src/batch_builder/tests/mod.rs +++ /dev/null @@ -1,329 +0,0 @@ -use std::iter; - -use assert_matches::assert_matches; -use miden_objects::{crypto::merkle::Mmr, Digest}; -use tokio::sync::RwLock; - -use super::*; -use crate::{ - block_builder::DefaultBlockBuilder, - errors::BuildBlockError, - test_utils::{ - note::mock_note, MockPrivateAccount, MockProvenTxBuilder, MockStoreSuccessBuilder, - }, -}; -// STRUCTS -// ================================================================================================ - -#[derive(Default)] -struct BlockBuilderSuccess { - batch_groups: SharedRwVec>, - num_empty_batches_received: Arc>, -} - -#[async_trait] -impl BlockBuilder for BlockBuilderSuccess { - async fn build_block(&self, batches: &[TransactionBatch]) -> Result<(), BuildBlockError> { - if batches.is_empty() { - *self.num_empty_batches_received.write().await += 1; - } else { - self.batch_groups.write().await.push(batches.to_vec()); - } - - Ok(()) - } -} - -#[derive(Default)] -struct BlockBuilderFailure; - -#[async_trait] -impl BlockBuilder for BlockBuilderFailure { - async fn build_block(&self, _batches: &[TransactionBatch]) -> Result<(), BuildBlockError> { - Err(BuildBlockError::TooManyBatchesInBlock(0)) - } -} - -// TESTS -// ================================================================================================ - -/// Tests that the number of batches in a block doesn't exceed `max_batches_per_block` -#[tokio::test] -#[miden_node_test_macro::enable_logging] -async fn test_block_size_doesnt_exceed_limit() { - let block_frequency = Duration::from_millis(20); - let max_batches_per_block = 2; - - let store = Arc::new(MockStoreSuccessBuilder::from_accounts(iter::empty()).build()); - let block_builder = Arc::new(BlockBuilderSuccess::default()); - - let batch_builder = Arc::new(DefaultBatchBuilder::new( - store, - block_builder.clone(), - DefaultBatchBuilderOptions { block_frequency, max_batches_per_block }, - )); - - // Add 3 batches in internal queue (remember: 2 batches/block) - { - let mut batch_group = - vec![dummy_tx_batch(0, 2), dummy_tx_batch(10, 2), dummy_tx_batch(20, 2)]; - - batch_builder.ready_batches.write().await.append(&mut batch_group); - } - - // start batch builder - tokio::spawn(batch_builder.run()); - - // Wait for 2 blocks to be produced - time::sleep(block_frequency * 3).await; - - // Ensure the block builder received 2 batches of the expected size - { - let batch_groups = block_builder.batch_groups.read().await; - - assert_eq!(batch_groups.len(), 2); - assert_eq!(batch_groups[0].len(), max_batches_per_block); - assert_eq!(batch_groups[1].len(), 1); - } -} - -/// Tests that `BlockBuilder::build_block()` is still called when there are no transactions -#[tokio::test] -#[miden_node_test_macro::enable_logging] -async fn test_build_block_called_when_no_batches() { - let block_frequency = Duration::from_millis(20); - let max_batches_per_block = 2; - - let store = Arc::new(MockStoreSuccessBuilder::from_accounts(iter::empty()).build()); - let block_builder = Arc::new(BlockBuilderSuccess::default()); - - let batch_builder = Arc::new(DefaultBatchBuilder::new( - store, - block_builder.clone(), - DefaultBatchBuilderOptions { block_frequency, max_batches_per_block }, - )); - - // start batch builder - tokio::spawn(batch_builder.run()); - - // Wait for at least 1 block to be produced - time::sleep(block_frequency * 2).await; - - // Ensure the block builder received at least 1 empty batch Note: we check `> 0` instead of an - // exact number to make the test flaky in case timings change in the implementation - assert!(*block_builder.num_empty_batches_received.read().await > 0); -} - -/// Tests that if `BlockBuilder::build_block()` fails, then batches are added back on the queue -#[tokio::test] -#[miden_node_test_macro::enable_logging] -async fn test_batches_added_back_to_queue_on_block_build_failure() { - let block_frequency = Duration::from_millis(20); - let max_batches_per_block = 2; - - let store = Arc::new(MockStoreSuccessBuilder::from_accounts(iter::empty()).build()); - let block_builder = Arc::new(BlockBuilderFailure); - - let batch_builder = Arc::new(DefaultBatchBuilder::new( - store, - block_builder.clone(), - DefaultBatchBuilderOptions { block_frequency, max_batches_per_block }, - )); - - let internal_ready_batches = batch_builder.ready_batches.clone(); - - // Add 3 batches in internal queue - { - let mut batch_group = - vec![dummy_tx_batch(0, 2), dummy_tx_batch(10, 2), dummy_tx_batch(20, 2)]; - - batch_builder.ready_batches.write().await.append(&mut batch_group); - } - - // start batch builder - tokio::spawn(batch_builder.run()); - - // Wait for 2 blocks to failed to be produced - time::sleep(block_frequency * 2 + (block_frequency / 2)).await; - - // Ensure the transaction batches are all still on the queue - assert_eq!(internal_ready_batches.read().await.len(), 3); -} - -#[tokio::test] -async fn test_batch_builder_find_dangling_notes() { - let store = Arc::new(MockStoreSuccessBuilder::from_accounts(iter::empty()).build()); - let block_builder = Arc::new(BlockBuilderSuccess::default()); - - let batch_builder = Arc::new(DefaultBatchBuilder::new( - store, - block_builder, - DefaultBatchBuilderOptions { - block_frequency: Duration::from_millis(20), - max_batches_per_block: 2, - }, - )); - - // An account with 5 states so that we can simulate running 2 transactions against it. - let account = MockPrivateAccount::<3>::from(1); - - let note_1 = mock_note(1); - let note_2 = mock_note(2); - let tx1 = MockProvenTxBuilder::with_account(account.id, account.states[0], account.states[1]) - .output_notes(vec![OutputNote::Full(note_1.clone())]) - .build(); - let tx2 = MockProvenTxBuilder::with_account(account.id, account.states[1], account.states[2]) - .unauthenticated_notes(vec![note_1.clone()]) - .output_notes(vec![OutputNote::Full(note_2.clone())]) - .build(); - - let txs = vec![tx1, tx2]; - - let dangling_notes = batch_builder.find_dangling_notes(&txs).await; - assert_eq!(dangling_notes, vec![], "Note must be presented in the same batch"); - - batch_builder.build_batch(txs.clone()).await.unwrap(); - - let dangling_notes = batch_builder.find_dangling_notes(&txs).await; - assert_eq!(dangling_notes, vec![], "Note must be presented in the same batch"); - - let note_3 = mock_note(3); - - let tx1 = MockProvenTxBuilder::with_account(account.id, account.states[0], account.states[1]) - .unauthenticated_notes(vec![note_2.clone()]) - .build(); - let tx2 = MockProvenTxBuilder::with_account(account.id, account.states[1], account.states[2]) - .unauthenticated_notes(vec![note_3.clone()]) - .build(); - - let txs = vec![tx1, tx2]; - - let dangling_notes = batch_builder.find_dangling_notes(&txs).await; - assert_eq!( - dangling_notes, - vec![note_3.id()], - "Only one dangling node must be found before block is built" - ); - - batch_builder.try_build_block().await; - - let dangling_notes = batch_builder.find_dangling_notes(&txs).await; - assert_eq!( - dangling_notes, - vec![note_2.id(), note_3.id()], - "Two dangling notes must be found after block is built" - ); -} - -#[tokio::test] -async fn test_block_builder_no_missing_notes() { - let account_1: MockPrivateAccount<3> = MockPrivateAccount::from(1); - let account_2: MockPrivateAccount<3> = MockPrivateAccount::from(2); - let store = Arc::new( - MockStoreSuccessBuilder::from_accounts( - [account_1, account_2].iter().map(|account| (account.id, account.states[0])), - ) - .build(), - ); - let block_builder = Arc::new(DefaultBlockBuilder::new(Arc::clone(&store), Arc::clone(&store))); - let batch_builder = Arc::new(DefaultBatchBuilder::new( - store, - Arc::clone(&block_builder), - DefaultBatchBuilderOptions { - block_frequency: Duration::from_millis(20), - max_batches_per_block: 2, - }, - )); - - let note_1 = mock_note(1); - let note_2 = mock_note(2); - - let tx1 = MockProvenTxBuilder::with_account_index(1) - .output_notes(vec![OutputNote::Full(note_1.clone())]) - .build(); - - let tx2 = MockProvenTxBuilder::with_account_index(2) - .unauthenticated_notes(vec![note_1.clone()]) - .output_notes(vec![OutputNote::Full(note_2.clone())]) - .build(); - - let txs = vec![tx1, tx2]; - - batch_builder.build_batch(txs.clone()).await.unwrap(); - - let build_block_result = batch_builder - .block_builder - .build_block(&batch_builder.ready_batches.read().await) - .await; - assert_matches!(build_block_result, Ok(())); -} - -#[tokio::test] -async fn test_block_builder_fails_if_notes_are_missing() { - let accounts: Vec<_> = (1..=4).map(MockPrivateAccount::<3>::from).collect(); - let notes: Vec<_> = (1..=6).map(mock_note).collect(); - // We require mmr for the note authentication to succeed. - // - // We also need two blocks worth of mmr because the mock store skips genesis. - let mut mmr = Mmr::new(); - mmr.add(Digest::new([1u32.into(), 2u32.into(), 3u32.into(), 4u32.into()])); - mmr.add(Digest::new([1u32.into(), 2u32.into(), 3u32.into(), 4u32.into()])); - - let store = Arc::new( - MockStoreSuccessBuilder::from_accounts( - accounts.iter().map(|account| (account.id, account.states[0])), - ) - .initial_notes([vec![OutputNote::Full(notes[0].clone())]].iter()) - .initial_chain_mmr(mmr) - .build(), - ); - let block_builder = Arc::new(DefaultBlockBuilder::new(Arc::clone(&store), Arc::clone(&store))); - let batch_builder = Arc::new(DefaultBatchBuilder::new( - store, - Arc::clone(&block_builder), - DefaultBatchBuilderOptions { - block_frequency: Duration::from_millis(20), - max_batches_per_block: 2, - }, - )); - - let tx1 = MockProvenTxBuilder::with_account_index(1) - .output_notes(vec![OutputNote::Full(notes[1].clone())]) - .build(); - - let tx2 = MockProvenTxBuilder::with_account_index(2) - .unauthenticated_notes(vec![notes[0].clone()]) - .output_notes(vec![OutputNote::Full(notes[2].clone()), OutputNote::Full(notes[3].clone())]) - .build(); - - let tx3 = MockProvenTxBuilder::with_account_index(3) - .unauthenticated_notes(notes.iter().skip(1).cloned().collect()) - .build(); - - let txs = vec![tx1, tx2, tx3]; - - let batch = TransactionBatch::new(txs.clone(), Default::default()).unwrap(); - let build_block_result = batch_builder.block_builder.build_block(&[batch]).await; - - let mut expected_missing_notes = vec![notes[4].id(), notes[5].id()]; - expected_missing_notes.sort(); - - assert_matches!( - build_block_result, - Err(BuildBlockError::UnauthenticatedNotesNotFound(actual_missing_notes)) => { - assert_eq!(actual_missing_notes, expected_missing_notes); - } - ); -} - -// HELPERS -// ================================================================================================ - -fn dummy_tx_batch(starting_account_index: u32, num_txs_in_batch: usize) -> TransactionBatch { - let txs = (0..num_txs_in_batch) - .map(|index| { - MockProvenTxBuilder::with_account_index(starting_account_index + index as u32).build() - }) - .collect(); - TransactionBatch::new(txs, Default::default()).unwrap() -} diff --git a/crates/block-producer/src/block_builder/mod.rs b/crates/block-producer/src/block_builder/mod.rs index 0d120801f..bb739131d 100644 --- a/crates/block-producer/src/block_builder/mod.rs +++ b/crates/block-producer/src/block_builder/mod.rs @@ -1,6 +1,5 @@ -use std::{collections::BTreeSet, sync::Arc}; +use std::{collections::BTreeSet, ops::Range}; -use async_trait::async_trait; use miden_node_utils::formatting::{format_array, format_blake3_digest}; use miden_objects::{ accounts::AccountId, @@ -8,62 +7,93 @@ use miden_objects::{ notes::{NoteHeader, Nullifier}, transaction::InputNoteCommitment, }; +use rand::Rng; +use tokio::time::Duration; use tracing::{debug, info, instrument}; use crate::{ batch_builder::batch::TransactionBatch, errors::BuildBlockError, - store::{ApplyBlock, Store}, - COMPONENT, + mempool::SharedMempool, + store::{ApplyBlock, DefaultStore, Store}, + COMPONENT, SERVER_BLOCK_FREQUENCY, }; pub(crate) mod prover; use self::prover::{block_witness::BlockWitness, BlockProver}; -#[cfg(test)] -mod tests; - // BLOCK BUILDER // ================================================================================================= -#[async_trait] -pub trait BlockBuilder: Send + Sync + 'static { - /// Receive batches to be included in a block. An empty vector indicates that no batches were - /// ready, and that an empty block should be created. +pub struct BlockBuilder { + pub block_interval: Duration, + /// Used to simulate block proving by sleeping for a random duration selected from this range. + pub simulated_proof_time: Range, + + /// Simulated block failure rate as a percentage. /// - /// The `BlockBuilder` relies on `build_block()` to be called as a precondition to creating a - /// block. In other words, if `build_block()` is never called, then no blocks are produced. - async fn build_block(&self, batches: &[TransactionBatch]) -> Result<(), BuildBlockError>; -} + /// Note: this _must_ be sign positive and less than 1.0. + pub failure_rate: f32, -#[derive(Debug)] -pub struct DefaultBlockBuilder { - store: Arc, - state_view: Arc, - block_kernel: BlockProver, + pub store: DefaultStore, + pub block_kernel: BlockProver, } -impl DefaultBlockBuilder -where - S: Store, - A: ApplyBlock, -{ - pub fn new(store: Arc, state_view: Arc) -> Self { +impl BlockBuilder { + pub fn new(store: DefaultStore) -> Self { Self { - store, - state_view, + block_interval: SERVER_BLOCK_FREQUENCY, + // Note: The range cannot be empty. + simulated_proof_time: Duration::ZERO..Duration::from_millis(1), + failure_rate: 0.0, block_kernel: BlockProver::new(), + store, + } + } + /// Starts the [BlockBuilder], infinitely producing blocks at the configured interval. + /// + /// Block production is sequential and consists of + /// + /// 1. Pulling the next set of batches from the mempool + /// 2. Compiling these batches into the next block + /// 3. Proving the block (this is simulated using random sleeps) + /// 4. Committing the block to the store + pub async fn run(self, mempool: SharedMempool) { + assert!( + self.failure_rate < 1.0 && self.failure_rate.is_sign_positive(), + "Failure rate must be a percentage" + ); + + let mut interval = tokio::time::interval(self.block_interval); + interval.set_missed_tick_behavior(tokio::time::MissedTickBehavior::Delay); + + loop { + interval.tick().await; + + let (block_number, batches) = mempool.lock().await.select_block(); + let batches = batches.into_values().collect::>(); + + let mut result = self.build_block(&batches).await; + let proving_duration = rand::thread_rng().gen_range(self.simulated_proof_time.clone()); + + tokio::time::sleep(proving_duration).await; + + // Randomly inject failures at the given rate. + // + // Note: Rng::gen rolls between [0, 1.0) for f32, so this works as expected. + if rand::thread_rng().gen::() < self.failure_rate { + result = Err(BuildBlockError::InjectedFailure); + } + + let mut mempool = mempool.lock().await; + match result { + Ok(_) => mempool.block_committed(block_number), + Err(_) => mempool.block_failed(block_number), + } } } -} -#[async_trait] -impl BlockBuilder for DefaultBlockBuilder -where - S: Store, - A: ApplyBlock, -{ #[instrument(target = "miden-block-producer", skip_all, err)] async fn build_block(&self, batches: &[TransactionBatch]) -> Result<(), BuildBlockError> { info!( @@ -133,7 +163,7 @@ where info!(target: COMPONENT, block_num, %block_hash, "block built"); debug!(target: COMPONENT, ?block); - self.state_view.apply_block(&block).await?; + self.store.apply_block(&block).await?; info!(target: COMPONENT, block_num, %block_hash, "block committed"); diff --git a/crates/block-producer/src/block_builder/prover/block_witness.rs b/crates/block-producer/src/block_builder/prover/block_witness.rs index 0b4f06567..10e37c00a 100644 --- a/crates/block-producer/src/block_builder/prover/block_witness.rs +++ b/crates/block-producer/src/block_builder/prover/block_witness.rs @@ -11,10 +11,9 @@ use miden_objects::{ }; use crate::{ - batch_builder::batch::AccountUpdate, + batch_builder::{batch::AccountUpdate, TransactionBatch}, block::BlockInputs, errors::{BlockProverError, BuildBlockError}, - TransactionBatch, }; // BLOCK WITNESS @@ -36,9 +35,9 @@ impl BlockWitness { mut block_inputs: BlockInputs, batches: &[TransactionBatch], ) -> Result<(Self, Vec), BuildBlockError> { - if batches.len() > MAX_BATCHES_PER_BLOCK { - return Err(BuildBlockError::TooManyBatchesInBlock(batches.len())); - } + // This limit should be enforced by the mempool. + assert!(batches.len() <= MAX_BATCHES_PER_BLOCK); + Self::validate_nullifiers(&block_inputs, batches)?; let batch_created_notes_roots = batches diff --git a/crates/block-producer/src/block_builder/prover/tests.rs b/crates/block-producer/src/block_builder/prover/tests.rs index 56124065d..28d011cff 100644 --- a/crates/block-producer/src/block_builder/prover/tests.rs +++ b/crates/block-producer/src/block_builder/prover/tests.rs @@ -21,13 +21,13 @@ use miden_objects::{ use self::block_witness::AccountUpdateWitness; use super::*; use crate::{ + batch_builder::TransactionBatch, block::{AccountWitness, BlockInputs}, store::Store, test_utils::{ block::{build_actual_block_header, build_expected_block_header, MockBlockBuilder}, MockProvenTxBuilder, MockStoreSuccessBuilder, }, - TransactionBatch, }; // BLOCK WITNESS TESTS diff --git a/crates/block-producer/src/block_builder/tests.rs b/crates/block-producer/src/block_builder/tests.rs deleted file mode 100644 index d8fbc3565..000000000 --- a/crates/block-producer/src/block_builder/tests.rs +++ /dev/null @@ -1,83 +0,0 @@ -// block builder tests (higher level) -// `apply_block()` is called - -use std::sync::Arc; - -use assert_matches::assert_matches; -use miden_objects::{ - accounts::{account_id::testing::ACCOUNT_ID_OFF_CHAIN_SENDER, AccountId}, - Digest, Felt, -}; - -use crate::{ - batch_builder::TransactionBatch, - block_builder::{BlockBuilder, BuildBlockError, DefaultBlockBuilder}, - test_utils::{MockProvenTxBuilder, MockStoreFailure, MockStoreSuccessBuilder}, -}; - -/// Tests that `build_block()` succeeds when the transaction batches are not empty -#[tokio::test] -#[miden_node_test_macro::enable_logging] -async fn test_apply_block_called_nonempty_batches() { - let account_id = AccountId::new_unchecked(Felt::new(ACCOUNT_ID_OFF_CHAIN_SENDER)); - let account_initial_hash: Digest = - [Felt::new(1u64), Felt::new(1u64), Felt::new(1u64), Felt::new(1u64)].into(); - let store = Arc::new( - MockStoreSuccessBuilder::from_accounts(std::iter::once((account_id, account_initial_hash))) - .build(), - ); - - let block_builder = DefaultBlockBuilder::new(store.clone(), store.clone()); - - let batches: Vec = { - let batch_1 = { - let tx = MockProvenTxBuilder::with_account( - account_id, - account_initial_hash, - [Felt::new(2u64), Felt::new(2u64), Felt::new(2u64), Felt::new(2u64)].into(), - ) - .build(); - - TransactionBatch::new(vec![tx], Default::default()).unwrap() - }; - - vec![batch_1] - }; - block_builder.build_block(&batches).await.unwrap(); - - // Ensure that the store's `apply_block()` was called - assert_eq!(*store.num_apply_block_called.read().await, 1); -} - -/// Tests that `build_block()` succeeds when the transaction batches are empty -#[tokio::test] -#[miden_node_test_macro::enable_logging] -async fn test_apply_block_called_empty_batches() { - let account_id = AccountId::new_unchecked(Felt::new(ACCOUNT_ID_OFF_CHAIN_SENDER)); - let account_hash: Digest = - [Felt::new(1u64), Felt::new(1u64), Felt::new(1u64), Felt::new(1u64)].into(); - let store = Arc::new( - MockStoreSuccessBuilder::from_accounts(std::iter::once((account_id, account_hash))).build(), - ); - - let block_builder = DefaultBlockBuilder::new(store.clone(), store.clone()); - - block_builder.build_block(&Vec::new()).await.unwrap(); - - // Ensure that the store's `apply_block()` was called - assert_eq!(*store.num_apply_block_called.read().await, 1); -} - -/// Tests that `build_block()` fails when `get_block_inputs()` fails -#[tokio::test] -#[miden_node_test_macro::enable_logging] -async fn test_build_block_failure() { - let store = Arc::new(MockStoreFailure); - - let block_builder = DefaultBlockBuilder::new(store.clone(), store.clone()); - - let result = block_builder.build_block(&Vec::new()).await; - - // Ensure that the store's `apply_block()` was called - assert_matches!(result, Err(BuildBlockError::GetBlockInputsFailed(_))); -} diff --git a/crates/block-producer/src/domain/mod.rs b/crates/block-producer/src/domain/mod.rs new file mode 100644 index 000000000..37f08066e --- /dev/null +++ b/crates/block-producer/src/domain/mod.rs @@ -0,0 +1 @@ +pub mod transaction; diff --git a/crates/block-producer/src/domain/transaction.rs b/crates/block-producer/src/domain/transaction.rs new file mode 100644 index 000000000..738fccf36 --- /dev/null +++ b/crates/block-producer/src/domain/transaction.rs @@ -0,0 +1,167 @@ +use std::{collections::BTreeSet, sync::Arc}; + +use miden_objects::{ + accounts::AccountId, + notes::{NoteId, Nullifier}, + transaction::{ProvenTransaction, TransactionId, TxAccountUpdate}, + Digest, +}; + +use crate::{errors::VerifyTxError, mempool::BlockNumber, store::TransactionInputs}; + +/// A transaction who's proof has been verified, and which has been authenticated against the store. +/// +/// Authentication ensures that all nullifiers are unspent, and additionally authenticates some +/// previously unauthenticated input notes. +/// +/// This struct is cheap to clone as it uses an Arc for the heavy data. +/// +/// Note that this is of course only valid for the chain height of the authentication. +#[derive(Clone, Debug, PartialEq)] +pub struct AuthenticatedTransaction { + inner: Arc, + /// The account state provided by the store [inputs](TransactionInputs). + /// + /// This does not necessarily have to match the transaction's initial state + /// as this may still be modified by inflight transactions. + store_account_state: Option, + /// Unauthenticates notes that have now been authenticated by the store + /// [inputs](TransactionInputs). + /// + /// In other words, notes which were unauthenticated at the time the transaction was proven, + /// but which have since been committed to, and authenticated by the store. + notes_authenticated_by_store: BTreeSet, + /// Chain height that the authentication took place at. + authentication_height: BlockNumber, +} + +impl AuthenticatedTransaction { + /// Verifies the transaction against the inputs, enforcing that all nullifiers are unspent. + /// + /// __No__ proof verification is peformed. The caller takes responsibility for ensuring + /// that the proof is valid. + /// + /// # Errors + /// + /// Returns an error if any of the transaction's nullifiers are marked as spent by the inputs. + pub fn new( + tx: ProvenTransaction, + inputs: TransactionInputs, + ) -> Result { + let nullifiers_already_spent = tx + .get_nullifiers() + .filter(|nullifier| inputs.nullifiers.get(nullifier).cloned().flatten().is_some()) + .collect::>(); + if !nullifiers_already_spent.is_empty() { + return Err(VerifyTxError::InputNotesAlreadyConsumed(nullifiers_already_spent)); + } + + // Invert the missing notes; i.e. we now know the rest were actually found. + let authenticated_notes = tx + .get_unauthenticated_notes() + .map(|header| header.id()) + .filter(|note_id| !inputs.missing_unauthenticated_notes.contains(note_id)) + .collect(); + + Ok(AuthenticatedTransaction { + inner: Arc::new(tx), + notes_authenticated_by_store: authenticated_notes, + authentication_height: BlockNumber::new(inputs.current_block_height), + store_account_state: inputs.account_hash, + }) + } + + pub fn id(&self) -> TransactionId { + self.inner.id() + } + + pub fn account_id(&self) -> AccountId { + self.inner.account_id() + } + + pub fn account_update(&self) -> &TxAccountUpdate { + self.inner.account_update() + } + + pub fn store_account_state(&self) -> Option { + self.store_account_state + } + + pub fn authentication_height(&self) -> BlockNumber { + self.authentication_height + } + + pub fn nullifiers(&self) -> impl Iterator + '_ { + self.inner.get_nullifiers() + } + + pub fn output_notes(&self) -> impl Iterator + '_ { + self.inner.output_notes().iter().map(|note| note.id()) + } + + pub fn output_note_count(&self) -> usize { + self.inner.output_notes().num_notes() + } + + pub fn input_note_count(&self) -> usize { + self.inner.input_notes().num_notes() + } + + /// Notes which were unauthenticate in the transaction __and__ which were + /// not authenticated by the store inputs. + pub fn unauthenticated_notes(&self) -> impl Iterator + '_ { + self.inner + .get_unauthenticated_notes() + .cloned() + .map(|header| header.id()) + .filter(|note_id| !self.notes_authenticated_by_store.contains(note_id)) + } + + pub fn raw_proven_transaction(&self) -> &ProvenTransaction { + &self.inner + } +} + +#[cfg(test)] +impl AuthenticatedTransaction { + //! Builder methods intended for easier test setup. + + /// Short-hand for `Self::new` where the input's are setup to match the transaction's initial + /// account state. This covers the account's initial state and nullifiers being set to unspent. + pub fn from_inner(inner: ProvenTransaction) -> Self { + let store_account_state = match inner.account_update().init_state_hash() { + zero if zero == Digest::default() => None, + non_zero => Some(non_zero), + }; + let inputs = TransactionInputs { + account_id: inner.account_id(), + account_hash: store_account_state, + nullifiers: inner.get_nullifiers().map(|nullifier| (nullifier, None)).collect(), + missing_unauthenticated_notes: inner + .get_unauthenticated_notes() + .map(|header| header.id()) + .collect(), + current_block_height: Default::default(), + }; + // SAFETY: nullifiers were set to None aka are definitely unspent. + Self::new(inner, inputs).unwrap() + } + + /// Overrides the authentication height with the given value. + pub fn with_authentication_height(mut self, height: u32) -> Self { + self.authentication_height = BlockNumber::new(height); + self + } + + /// Overrides the store state with the given value. + pub fn with_store_state(mut self, state: Digest) -> Self { + self.store_account_state = Some(state); + self + } + + /// Unsets the store state. + pub fn with_empty_store_state(mut self) -> Self { + self.store_account_state = None; + self + } +} diff --git a/crates/block-producer/src/errors.rs b/crates/block-producer/src/errors.rs index e31027a41..b2e9b13c7 100644 --- a/crates/block-producer/src/errors.rs +++ b/crates/block-producer/src/errors.rs @@ -4,12 +4,35 @@ use miden_objects::{ accounts::AccountId, crypto::merkle::{MerkleError, MmrError}, notes::{NoteId, Nullifier}, - transaction::{ProvenTransaction, TransactionId}, - AccountDeltaError, Digest, TransactionInputError, MAX_ACCOUNTS_PER_BATCH, - MAX_BATCHES_PER_BLOCK, MAX_INPUT_NOTES_PER_BATCH, MAX_OUTPUT_NOTES_PER_BATCH, + transaction::TransactionId, + AccountDeltaError, Digest, TransactionInputError, }; use miden_processor::ExecutionError; use thiserror::Error; +use tokio::task::JoinError; + +use crate::mempool::BlockNumber; + +// Block-producer errors +// ================================================================================================= + +#[derive(Debug, Error)] +pub enum BlockProducerError { + /// A block-producer task completed although it should have ran indefinitely. + #[error("task {task} completed unexpectedly")] + TaskFailedSuccesfully { task: &'static str }, + + /// A block-producer task panic'd. + #[error("error joining {task} task")] + JoinError { task: &'static str, source: JoinError }, + + /// A block-producer task reported a transport error. + #[error("task {task} had a transport error")] + TonicTransportError { + task: &'static str, + source: tonic::transport::Error, + }, +} // Transaction verification errors // ================================================================================================= @@ -27,6 +50,9 @@ pub enum VerifyTxError { )] UnauthenticatedNotesNotFound(Vec), + #[error("Output note IDs already used: {0:?}")] + OutputNotesAlreadyExist(Vec), + /// The account's initial hash did not match the current account's hash #[error("Incorrect account's initial hash ({tx_initial_account_hash}, current: {})", format_opt(.current_account_hash.as_ref()))] IncorrectAccountInitialHash { @@ -56,83 +82,72 @@ pub enum VerifyTxError { pub enum AddTransactionError { #[error("Transaction verification failed: {0}")] VerificationFailed(#[from] VerifyTxError), + + #[error("Transaction input data is stale. Required data from {stale_limit} or newer, but inputs are from {input_block}.")] + StaleInputs { + input_block: BlockNumber, + stale_limit: BlockNumber, + }, + + #[error("Deserialization failed: {0}")] + DeserializationError(String), +} + +impl From for tonic::Status { + fn from(value: AddTransactionError) -> Self { + use AddTransactionError::*; + match value { + VerificationFailed(VerifyTxError::InputNotesAlreadyConsumed(_)) + | VerificationFailed(VerifyTxError::UnauthenticatedNotesNotFound(_)) + | VerificationFailed(VerifyTxError::OutputNotesAlreadyExist(_)) + | VerificationFailed(VerifyTxError::IncorrectAccountInitialHash { .. }) + | VerificationFailed(VerifyTxError::InvalidTransactionProof(_)) + | DeserializationError(_) => Self::invalid_argument(value.to_string()), + + // Internal errors which should not be communicated to the user. + VerificationFailed(VerifyTxError::TransactionInputError(_)) + | VerificationFailed(VerifyTxError::StoreConnectionFailed(_)) + | StaleInputs { .. } => Self::internal("Internal error"), + } + } } // Batch building errors // ================================================================================================= -/// Error that may happen while building a transaction batch. -/// -/// These errors are returned from the batch builder to the transaction queue, instead of -/// dropping the transactions, they are included into the error values, so that the transaction -/// queue can re-queue them. +/// Error encountered while building a batch. #[derive(Debug, Error)] pub enum BuildBatchError { - #[error("Too many input notes in the batch. Got: {0}, limit: {MAX_INPUT_NOTES_PER_BATCH}")] - TooManyInputNotes(usize, Vec), - - #[error("Too many notes created in the batch. Got: {0}, limit: {MAX_OUTPUT_NOTES_PER_BATCH}")] - TooManyNotesCreated(usize, Vec), - - #[error( - "Too many account updates in the batch. Got: {}, limit: {}", - .0.len(), - MAX_ACCOUNTS_PER_BATCH - )] - TooManyAccountsInBatch(Vec), - - #[error("Failed to create notes SMT: {0}")] - NotesSmtError(MerkleError, Vec), - - #[error("Failed to get note paths: {0}")] - NotePathsError(NotePathsError, Vec), - #[error("Duplicated unauthenticated transaction input note ID in the batch: {0}")] - DuplicateUnauthenticatedNote(NoteId, Vec), + DuplicateUnauthenticatedNote(NoteId), #[error("Duplicated transaction output note ID in the batch: {0}")] - DuplicateOutputNote(NoteId, Vec), - - #[error("Unauthenticated transaction notes not found in the store: {0:?}")] - UnauthenticatedNotesNotFound(Vec, Vec), + DuplicateOutputNote(NoteId), #[error("Note hashes mismatch for note {id}: (input: {input_hash}, output: {output_hash})")] NoteHashesMismatch { id: NoteId, input_hash: Digest, output_hash: Digest, - txs: Vec, }, #[error("Failed to merge transaction delta into account {account_id}: {error}")] AccountUpdateError { account_id: AccountId, error: AccountDeltaError, - txs: Vec, }, -} -impl BuildBatchError { - pub fn into_transactions(self) -> Vec { - match self { - BuildBatchError::TooManyInputNotes(_, txs) => txs, - BuildBatchError::TooManyNotesCreated(_, txs) => txs, - BuildBatchError::TooManyAccountsInBatch(txs) => txs, - BuildBatchError::NotesSmtError(_, txs) => txs, - BuildBatchError::NotePathsError(_, txs) => txs, - BuildBatchError::DuplicateUnauthenticatedNote(_, txs) => txs, - BuildBatchError::DuplicateOutputNote(_, txs) => txs, - BuildBatchError::UnauthenticatedNotesNotFound(_, txs) => txs, - BuildBatchError::NoteHashesMismatch { txs, .. } => txs, - BuildBatchError::AccountUpdateError { txs, .. } => txs, - } - } + #[error("Nothing actually went wrong, failure was injected on purpose")] + InjectedFailure, + + #[error("Batch proving task panic'd")] + JoinError(#[from] tokio::task::JoinError), } // Block prover errors // ================================================================================================= -#[derive(Debug, PartialEq, Eq, Error)] +#[derive(Debug, Error)] pub enum BlockProverError { #[error("Received invalid merkle path")] InvalidMerklePaths(MerkleError), @@ -156,18 +171,6 @@ pub enum BlockInputsError { GrpcClientError(String), } -// Note paths errors -// ================================================================================================= - -#[allow(clippy::enum_variant_names)] -#[derive(Debug, Error)] -pub enum NotePathsError { - #[error("failed to parse protobuf message: {0}")] - ConversionError(#[from] ConversionError), - #[error("gRPC client failed with error: {0}")] - GrpcClientError(String), -} - // Block applying errors // ================================================================================================= @@ -198,13 +201,13 @@ pub enum BuildBlockError { InconsistentNullifiers(Vec), #[error("unauthenticated transaction notes not found in the store or in outputs of other transactions in the block: {0:?}")] UnauthenticatedNotesNotFound(Vec), - #[error("too many batches in block. Got: {0}, max: {MAX_BATCHES_PER_BLOCK}")] - TooManyBatchesInBlock(usize), - #[error("Failed to merge transaction delta into account {account_id}: {error}")] + #[error("failed to merge transaction delta into account {account_id}: {error}")] AccountUpdateError { account_id: AccountId, error: AccountDeltaError, }, + #[error("nothing actually went wrong, failure was injected on purpose")] + InjectedFailure, } // Transaction inputs errors diff --git a/crates/block-producer/src/lib.rs b/crates/block-producer/src/lib.rs index dbbdd9803..cf82a3570 100644 --- a/crates/block-producer/src/lib.rs +++ b/crates/block-producer/src/lib.rs @@ -1,29 +1,19 @@ -use std::{sync::Arc, time::Duration}; - -use batch_builder::batch::TransactionBatch; -use miden_objects::transaction::ProvenTransaction; -use tokio::sync::RwLock; +use std::time::Duration; #[cfg(test)] pub mod test_utils; mod batch_builder; mod block_builder; +mod domain; mod errors; -mod state_view; +mod mempool; mod store; -mod txqueue; pub mod block; pub mod config; pub mod server; -// TYPE ALIASES -// ================================================================================================= - -/// A vector that can be shared across threads -pub(crate) type SharedRwVec = Arc>>; - // CONSTANTS // ================================================================================================= @@ -31,7 +21,7 @@ pub(crate) type SharedRwVec = Arc>>; pub const COMPONENT: &str = "miden-block-producer"; /// The number of transactions per batch -const SERVER_BATCH_SIZE: usize = 2; +const SERVER_MAX_TXS_PER_BATCH: usize = 2; /// The frequency at which blocks are produced const SERVER_BLOCK_FREQUENCY: Duration = Duration::from_secs(10); @@ -41,3 +31,19 @@ const SERVER_BUILD_BATCH_FREQUENCY: Duration = Duration::from_secs(2); /// Maximum number of batches per block const SERVER_MAX_BATCHES_PER_BLOCK: usize = 4; + +/// The number of blocks of committed state that the mempool retains. +/// +/// This determines the grace period incoming transactions have between fetching their input from +/// the store and verification in the mempool. +const SERVER_MEMPOOL_STATE_RETENTION: usize = 5; + +const _: () = assert!( + SERVER_MAX_BATCHES_PER_BLOCK <= miden_objects::MAX_BATCHES_PER_BLOCK, + "Server constraint cannot exceed the protocol's constraint" +); + +const _: () = assert!( + SERVER_MAX_TXS_PER_BATCH <= miden_objects::MAX_ACCOUNTS_PER_BATCH, + "Server constraint cannot exceed the protocol's constraint" +); diff --git a/crates/block-producer/src/mempool/batch_graph.rs b/crates/block-producer/src/mempool/batch_graph.rs new file mode 100644 index 000000000..62b4c568c --- /dev/null +++ b/crates/block-producer/src/mempool/batch_graph.rs @@ -0,0 +1,346 @@ +use std::collections::{BTreeMap, BTreeSet}; + +use miden_objects::transaction::TransactionId; + +use super::{ + graph::{DependencyGraph, GraphError}, + BatchJobId, BlockBudget, BudgetStatus, +}; +use crate::batch_builder::batch::TransactionBatch; + +// BATCH GRAPH +// ================================================================================================ + +/// Tracks the dependencies between batches, transactions and their parents. +/// +/// Batches are inserted with their transaction and parent transaction sets which defines the edges +/// of the dependency graph. Batches are initially inserted in a pending state while we wait on +/// their proofs to be generated. The dependencies are still tracked in this state. +/// +/// Batches can then be promoted to ready by [submitting their proofs](Self::submit_proof) once +/// available. Proven batches are considered for inclusion in blocks once _all_ parent batches have +/// been selected. +/// +/// Committed batches (i.e. included in blocks) may be [pruned](Self::prune_committed) from the +/// graph to bound the graph's size. +/// +/// Batches may also be outright [purged](Self::remove_batches) from the graph. This is useful for +/// batches which may have become invalid due to external considerations e.g. expired transactions. +/// +/// # Batch lifecycle +/// ```text +/// │ +/// insert│ +/// ┌─────▼─────┐ +/// │ pending ┼────┐ +/// └─────┬─────┘ │ +/// │ │ +/// submit_proof│ │ +/// ┌─────▼─────┐ │ +/// │ proved ┼────┤ +/// └─────┬─────┘ │ +/// │ │ +/// select_block│ │ +/// ┌─────▼─────┐ │ +/// │ committed ┼────┤ +/// └─────┬─────┘ │ +/// │ │ +/// prune_committed│ │remove_batches +/// ┌─────▼─────┐ │ +/// │ ◄────┘ +/// └───────────┘ +/// ``` +#[derive(Default, Debug, Clone, PartialEq)] +pub struct BatchGraph { + /// Tracks the interdependencies between batches. + inner: DependencyGraph, + + /// Maps each transaction to its batch, allowing for reverse lookups. + /// + /// Incoming batches are defined entirely in terms of transactions, including parent edges. + /// This let's us transform these parent transactions into the relevant parent batches. + transactions: BTreeMap, + + /// Maps each batch to its transaction set. + /// + /// Required because the dependency graph is defined in terms of batches. This let's us + /// translate between batches and their transactions when required. + batches: BTreeMap>, +} + +#[derive(thiserror::Error, Debug, Clone, PartialEq, Eq)] +pub enum BatchInsertError { + #[error("Transactions are already in the graph: {0:?}")] + DuplicateTransactions(BTreeSet), + #[error("Unknown parent transaction {0}")] + UnknownParentTransaction(TransactionId), + #[error(transparent)] + GraphError(#[from] GraphError), +} + +impl BatchGraph { + /// Inserts a new batch into the graph. + /// + /// Parents are the transactions on which the given transactions have a direct dependency. This + /// includes transactions within the same batch i.e. a transaction and parent transaction may + /// both be in this batch. + /// + /// # Errors + /// + /// Returns an error if: + /// - the batch ID is already in use + /// - any transactions are already in the graph + /// - any parent transactions are _not_ in the graph + pub fn insert( + &mut self, + id: BatchJobId, + transactions: Vec, + mut parents: BTreeSet, + ) -> Result<(), BatchInsertError> { + let duplicates = transactions + .iter() + .filter(|tx| self.transactions.contains_key(tx)) + .copied() + .collect::>(); + if !duplicates.is_empty() { + return Err(BatchInsertError::DuplicateTransactions(duplicates)); + } + + // Reverse lookup parent batch IDs. Take care to allow for parent transactions within this + // batch i.e. internal dependencies. + transactions.iter().for_each(|tx| { + parents.remove(tx); + }); + let parent_batches = parents + .into_iter() + .map(|tx| { + self.transactions + .get(&tx) + .copied() + .ok_or(BatchInsertError::UnknownParentTransaction(tx)) + }) + .collect::>()?; + + self.inner.insert_pending(id, parent_batches)?; + + for tx in transactions.iter().copied() { + self.transactions.insert(tx, id); + } + self.batches.insert(id, transactions); + + Ok(()) + } + + /// Removes the batches and their descendants from the graph. + /// + /// # Returns + /// + /// Returns all removed batches and their transactions. + /// + /// # Errors + /// + /// Returns an error if any of the batches are not currently in the graph. + pub fn remove_batches( + &mut self, + batch_ids: BTreeSet, + ) -> Result>, GraphError> { + // This returns all descendent batches as well. + let batch_ids = self.inner.purge_subgraphs(batch_ids)?; + + // SAFETY: These batches must all have been inserted since they are emitted from the inner + // dependency graph, and therefore must all be in the batches mapping. + let batches = batch_ids + .into_iter() + .map(|batch_id| { + (batch_id, self.batches.remove(&batch_id).expect("batch should be removed")) + }) + .collect::>(); + + for tx in batches.values().flatten() { + self.transactions.remove(tx); + } + + Ok(batches) + } + + /// Removes the set of committed batches from the graph. + /// + /// The batches _must_ have been previously selected for inclusion in a block using + /// [`select_block`](Self::select_block). This is intended for limiting the size of the graph by + /// culling committed data. + /// + /// # Returns + /// + /// Returns the transactions of the pruned batches. + /// + /// # Errors + /// + /// Returns an error if + /// - any batch was not previously selected for inclusion in a block + /// - any batch is unknown + /// - any parent batch would be left dangling in the graph + /// + /// The last point implies that batches should be removed in block order. + pub fn prune_committed( + &mut self, + batch_ids: BTreeSet, + ) -> Result, GraphError> { + // This clone could be elided by moving this call to the end. This would lose the atomic + // property of this method though its unclear what value (if any) that has. + self.inner.prune_processed(batch_ids.clone())?; + let mut transactions = Vec::new(); + + for batch_id in &batch_ids { + transactions.extend(self.batches.remove(batch_id).into_iter().flatten()); + } + + for tx in &transactions { + self.transactions.remove(tx); + } + + Ok(transactions) + } + + /// Submits a proof for the given batch, promoting it from pending to ready for inclusion in a + /// block once all its parents have themselves been included. + /// + /// # Errors + /// + /// Returns an error if the batch is not in the graph or if it was already previously proven. + pub fn submit_proof( + &mut self, + id: BatchJobId, + batch: TransactionBatch, + ) -> Result<(), GraphError> { + self.inner.promote_pending(id, batch) + } + + /// Selects the next set of batches ready for inclusion in a block while adhering to the given + /// budget. + pub fn select_block( + &mut self, + mut budget: BlockBudget, + ) -> BTreeMap { + let mut batches = BTreeMap::new(); + + while let Some(batch_id) = self.inner.roots().first().copied() { + // SAFETY: Since it was a root batch, it must definitely have a processed batch + // associated with it. + let batch = self.inner.get(&batch_id).expect("root should be in graph"); + + // Adhere to block's budget. + if budget.check_then_subtract(batch) == BudgetStatus::Exceeded { + break; + } + + // Clone is required to avoid multiple borrows of self. We delay this clone until after + // the budget check, which is why this looks so out of place. + let batch = batch.clone(); + + // SAFETY: This is definitely a root since we just selected it from the set of roots. + self.inner.process_root(batch_id).expect("root should be processed"); + + batches.insert(batch_id, batch); + } + + batches + } + + /// Returns `true` if the graph contains the given batch. + pub fn contains(&self, id: &BatchJobId) -> bool { + self.batches.contains_key(id) + } +} + +#[cfg(any(test, doctest))] +mod tests { + use super::*; + use crate::test_utils::Random; + + // INSERT TESTS + // ================================================================================================ + + #[test] + fn insert_rejects_duplicate_batch_ids() { + let id = BatchJobId::new(1); + let mut uut = BatchGraph::default(); + + uut.insert(id, Default::default(), Default::default()).unwrap(); + let err = uut.insert(id, Default::default(), Default::default()).unwrap_err(); + let expected = BatchInsertError::GraphError(GraphError::DuplicateKey(id)); + + assert_eq!(err, expected); + } + + #[test] + fn insert_rejects_duplicate_transactions() { + let mut rng = Random::with_random_seed(); + let tx_dup = rng.draw_tx_id(); + let tx_non_dup = rng.draw_tx_id(); + + let mut uut = BatchGraph::default(); + + uut.insert(BatchJobId::new(1), vec![tx_dup], Default::default()).unwrap(); + let err = uut + .insert(BatchJobId::new(2), vec![tx_dup, tx_non_dup], Default::default()) + .unwrap_err(); + let expected = BatchInsertError::DuplicateTransactions([tx_dup].into()); + + assert_eq!(err, expected); + } + + #[test] + fn insert_rejects_missing_parents() { + let mut rng = Random::with_random_seed(); + let tx = rng.draw_tx_id(); + let missing = rng.draw_tx_id(); + + let mut uut = BatchGraph::default(); + + let err = uut.insert(BatchJobId::new(2), vec![tx], [missing].into()).unwrap_err(); + let expected = BatchInsertError::UnknownParentTransaction(missing); + + assert_eq!(err, expected); + } + + #[test] + fn insert_with_internal_parent_succeeds() { + // Ensure that a batch with internal dependencies can be inserted. + let mut rng = Random::with_random_seed(); + let parent = rng.draw_tx_id(); + let child = rng.draw_tx_id(); + + let mut uut = BatchGraph::default(); + uut.insert(BatchJobId::new(2), vec![parent, child], [parent].into()).unwrap(); + } + + // PURGE_SUBGRAPHS TESTS + // ================================================================================================ + + #[test] + fn purge_subgraphs_returns_all_purged_transaction_sets() { + // Ensure that purge_subgraphs returns both parent and child batches when the parent is + // pruned. Further ensure that a disjoint batch is not pruned. + let mut rng = Random::with_random_seed(); + let parent_batch_txs = (0..5).map(|_| rng.draw_tx_id()).collect::>(); + let child_batch_txs = (0..5).map(|_| rng.draw_tx_id()).collect::>(); + let disjoint_batch_txs = (0..5).map(|_| rng.draw_tx_id()).collect(); + + let parent_batch_id = BatchJobId::new(0); + let child_batch_id = BatchJobId::new(1); + let disjoint_batch_id = BatchJobId::new(2); + + let mut uut = BatchGraph::default(); + uut.insert(parent_batch_id, parent_batch_txs.clone(), Default::default()) + .unwrap(); + uut.insert(child_batch_id, child_batch_txs.clone(), [parent_batch_txs[0]].into()) + .unwrap(); + uut.insert(disjoint_batch_id, disjoint_batch_txs, Default::default()).unwrap(); + + let result = uut.remove_batches([parent_batch_id].into()).unwrap(); + let expected = + [(parent_batch_id, parent_batch_txs), (child_batch_id, child_batch_txs)].into(); + + assert_eq!(result, expected); + } +} diff --git a/crates/block-producer/src/mempool/graph/mod.rs b/crates/block-producer/src/mempool/graph/mod.rs new file mode 100644 index 000000000..d9199909d --- /dev/null +++ b/crates/block-producer/src/mempool/graph/mod.rs @@ -0,0 +1,419 @@ +use std::{ + collections::{BTreeMap, BTreeSet}, + fmt::{Debug, Display}, +}; + +#[cfg(test)] +mod tests; + +// DEPENDENCY GRAPH +// ================================================================================================ + +/// A dependency graph structure where nodes are inserted, and then made available for processing +/// once all parent nodes have been processed. +/// +/// Forms the basis of our transaction and batch dependency graphs. +/// +/// # Node lifecycle +/// ```text +/// │ +/// │ +/// insert_pending│ +/// ┌─────▼─────┐ +/// │ pending │────┐ +/// └─────┬─────┘ │ +/// │ │ +/// promote_pending│ │ +/// ┌─────▼─────┐ │ +/// ┌──────────► in queue │────│ +/// │ └─────┬─────┘ │ +/// revert_processed│ │ │ +/// │ process_root│ │ +/// │ ┌─────▼─────┐ │ +/// └──────────┼ processed │────│ +/// └─────┬─────┘ │ +/// │ │ +/// prune_processed│ │purge_subgraphs +/// ┌─────▼─────┐ │ +/// │ ◄────┘ +/// └───────────┘ +/// ``` +#[derive(Clone, PartialEq, Eq)] +pub struct DependencyGraph { + /// Node's who's data is still pending. + pending: BTreeSet, + + /// Each node's data. + vertices: BTreeMap, + + /// Each node's parents. This is redundant with `children`, + /// but we require both for efficient lookups. + parents: BTreeMap>, + + /// Each node's children. This is redundant with `parents`, + /// but we require both for efficient lookups. + children: BTreeMap>, + + /// Nodes that are available to process next. + /// + /// Effectively this is the set of nodes which are + /// unprocessed and whose parent's _are_ all processed. + roots: BTreeSet, + + /// Set of nodes that are already processed. + processed: BTreeSet, +} + +impl Debug for DependencyGraph +where + K: Debug, +{ + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("DependencyGraph") + .field("pending", &self.pending) + .field("vertices", &self.vertices.keys()) + .field("processed", &self.processed) + .field("roots", &self.roots) + .field("parents", &self.parents) + .field("children", &self.children) + .finish() + } +} + +#[derive(thiserror::Error, Debug, Clone, PartialEq, Eq)] +pub enum GraphError { + #[error("Node {0} already exists")] + DuplicateKey(K), + + #[error("Parents not found: {0:?}")] + MissingParents(BTreeSet), + + #[error("Nodes not found: {0:?}")] + UnknownNodes(BTreeSet), + + #[error("Nodes were not yet processed: {0:?}")] + UnprocessedNodes(BTreeSet), + + #[error("Nodes would be left dangling: {0:?}")] + DanglingNodes(BTreeSet), + + #[error("Node {0} is not a root node")] + InvalidRootNode(K), + + #[error("Node {0} is not a pending node")] + InvalidPendingNode(K), +} + +/// This cannot be derived without enforcing `Default` bounds on K and V. +impl Default for DependencyGraph { + fn default() -> Self { + Self { + vertices: Default::default(), + pending: Default::default(), + parents: Default::default(), + children: Default::default(), + roots: Default::default(), + processed: Default::default(), + } + } +} + +impl DependencyGraph { + /// Inserts a new pending node into the graph. + /// + /// # Errors + /// + /// Errors if the node already exists, or if any of the parents are not part of the graph. + /// + /// This method is atomic. + pub fn insert_pending(&mut self, key: K, parents: BTreeSet) -> Result<(), GraphError> { + if self.contains(&key) { + return Err(GraphError::DuplicateKey(key)); + } + + let missing_parents = parents + .iter() + .filter(|parent| !self.contains(parent)) + .copied() + .collect::>(); + if !missing_parents.is_empty() { + return Err(GraphError::MissingParents(missing_parents)); + } + + // Inform parents of their new child. + for parent in &parents { + self.children.entry(*parent).or_default().insert(key); + } + self.pending.insert(key); + self.parents.insert(key, parents); + self.children.insert(key, Default::default()); + + Ok(()) + } + + /// Promotes a pending node, associating it with the provided value and allowing it to be + /// considered for processing. + /// + /// # Errors + /// + /// Errors if the given node is not pending. + /// + /// This method is atomic. + pub fn promote_pending(&mut self, key: K, value: V) -> Result<(), GraphError> { + if !self.pending.remove(&key) { + return Err(GraphError::InvalidPendingNode(key)); + } + + self.vertices.insert(key, value); + self.try_make_root(key); + + Ok(()) + } + + /// Reverts the nodes __and their descendents__, requeueing them for processing. + /// + /// Descendents which are pending remain unchanged. + /// + /// # Errors + /// + /// Returns an error if any of the given nodes: + /// + /// - are not part of the graph, or + /// - were not previously processed + /// + /// This method is atomic. + pub fn revert_subgraphs(&mut self, keys: BTreeSet) -> Result<(), GraphError> { + let missing_nodes = keys + .iter() + .filter(|key| !self.vertices.contains_key(key)) + .copied() + .collect::>(); + if !missing_nodes.is_empty() { + return Err(GraphError::UnknownNodes(missing_nodes)); + } + let unprocessed = keys.difference(&self.processed).copied().collect::>(); + if !unprocessed.is_empty() { + return Err(GraphError::UnprocessedNodes(unprocessed)); + } + + let mut reverted = BTreeSet::new(); + let mut to_revert = keys.clone(); + + while let Some(key) = to_revert.pop_first() { + self.processed.remove(&key); + + let unprocessed_children = self + .children + .get(&key) + .map(|children| children.difference(&reverted)) + .into_iter() + .flatten() + // We should not revert children which are pending. + .filter(|child| self.vertices.contains_key(child)) + .copied(); + + to_revert.extend(unprocessed_children); + + reverted.insert(key); + } + + // Only the original keys and the current roots need to be considered as roots. + // + // The children of the input keys are disqualified by definition (they're descendents), + // and current roots must be re-evaluated since their parents may have been requeued. + std::mem::take(&mut self.roots) + .into_iter() + .chain(keys) + .for_each(|key| self.try_make_root(key)); + + Ok(()) + } + + /// Removes a set of previously processed nodes from the graph. + /// + /// This is used to bound the size of the graph by removing nodes once they are no longer + /// required. + /// + /// # Errors + /// + /// Errors if + /// - any node is unknown + /// - any node is __not__ processed + /// - any parent node would be left unpruned + /// + /// The last point implies that all parents of the given nodes must either be part of the set, + /// or already been pruned. + /// + /// This method is atomic. + pub fn prune_processed(&mut self, keys: BTreeSet) -> Result, GraphError> { + let missing_nodes = + keys.iter().filter(|key| !self.contains(key)).copied().collect::>(); + if !missing_nodes.is_empty() { + return Err(GraphError::UnknownNodes(missing_nodes)); + } + + let unprocessed = keys.difference(&self.processed).copied().collect::>(); + if !unprocessed.is_empty() { + return Err(GraphError::UnprocessedNodes(unprocessed)); + } + + // No parent may be left dangling i.e. all parents must be part of this prune set. + let dangling = keys + .iter() + .flat_map(|key| self.parents.get(key)) + .flatten() + .filter(|parent| !keys.contains(parent)) + .copied() + .collect::>(); + if !dangling.is_empty() { + return Err(GraphError::DanglingNodes(dangling)); + } + + let mut pruned = Vec::with_capacity(keys.len()); + + for key in keys { + let value = self.vertices.remove(&key).expect("Checked in precondition"); + pruned.push(value); + self.processed.remove(&key); + self.parents.remove(&key); + + let children = self.children.remove(&key).unwrap_or_default(); + + // Remove edges from children to this node. + for child in children { + if let Some(child) = self.parents.get_mut(&child) { + child.remove(&key); + } + } + } + + Ok(pruned) + } + + /// Removes the set of nodes __and all descendents__ from the graph, returning all removed + /// nodes. This __includes__ pending nodes. + /// + /// # Returns + /// + /// All nodes removed. + /// + /// # Errors + /// + /// Returns an error if any of the given nodes does not exist. + /// + /// This method is atomic. + pub fn purge_subgraphs(&mut self, keys: BTreeSet) -> Result, GraphError> { + let missing_nodes = + keys.iter().filter(|key| !self.contains(key)).copied().collect::>(); + if !missing_nodes.is_empty() { + return Err(GraphError::UnknownNodes(missing_nodes)); + } + + let visited = keys.clone(); + let mut to_remove = keys; + let mut removed = BTreeSet::new(); + + while let Some(key) = to_remove.pop_first() { + self.vertices.remove(&key); + self.pending.remove(&key); + removed.insert(key); + + self.processed.remove(&key); + self.roots.remove(&key); + + // Children must also be purged. Take care not to visit them twice which is + // possible since children can have multiple purged parents. + let unvisited_children = self.children.remove(&key).unwrap_or_default(); + let unvisited_children = unvisited_children.difference(&visited); + to_remove.extend(unvisited_children); + + // Inform parents that this child no longer exists. + let parents = self.parents.remove(&key).unwrap_or_default(); + for parent in parents { + if let Some(parent) = self.children.get_mut(&parent) { + parent.remove(&key); + } + } + } + + Ok(removed) + } + + /// Adds the node to the `roots` list _IFF_ all of its parents are processed. + /// + /// # SAFETY + /// + /// This method assumes the node exists. Caller is responsible for ensuring this is true. + fn try_make_root(&mut self, key: K) { + if self.pending.contains(&key) { + return; + } + debug_assert!( + self.vertices.contains_key(&key), + "Potential root {key} must exist in the graph" + ); + debug_assert!( + !self.processed.contains(&key), + "Potential root {key} cannot already be processed" + ); + + let all_parents_processed = self + .parents + .get(&key) + .into_iter() + .flatten() + .all(|parent| self.processed.contains(parent)); + + if all_parents_processed { + self.roots.insert(key); + } + } + + /// Returns the set of nodes that are ready for processing. + /// + /// Nodes can be selected from here and marked as processed using [`Self::process_root`]. + pub fn roots(&self) -> &BTreeSet { + &self.roots + } + + /// Marks a root node as processed, removing it from the roots list. + /// + /// The node's children are [evaluated](Self::try_make_root) as possible roots. + /// + /// # Error + /// + /// Errors if the node is not in the roots list. + /// + /// This method is atomic. + pub fn process_root(&mut self, key: K) -> Result<(), GraphError> { + if !self.roots.remove(&key) { + return Err(GraphError::InvalidRootNode(key)); + } + + self.processed.insert(key); + + self.children + .get(&key) + .cloned() + .unwrap_or_default() + .into_iter() + .for_each(|child| self.try_make_root(child)); + + Ok(()) + } + + /// Returns the value of a node. + pub fn get(&self, key: &K) -> Option<&V> { + self.vertices.get(key) + } + + /// Returns the parents of the node, or [None] if the node does not exist. + pub fn parents(&self, key: &K) -> Option<&BTreeSet> { + self.parents.get(key) + } + + /// Returns true if the node exists, in either the pending or non-pending sets. + fn contains(&self, key: &K) -> bool { + self.pending.contains(key) || self.vertices.contains_key(key) + } +} diff --git a/crates/block-producer/src/mempool/graph/tests.rs b/crates/block-producer/src/mempool/graph/tests.rs new file mode 100644 index 000000000..acee736f2 --- /dev/null +++ b/crates/block-producer/src/mempool/graph/tests.rs @@ -0,0 +1,577 @@ +use super::*; + +// TEST UTILITIES +// ================================================================================================ + +/// Simplified graph variant where a node's key always equals its value. This is done to make +/// generating test values simpler. +type TestGraph = DependencyGraph; + +impl TestGraph { + /// Alias for inserting a node with no parents. + fn insert_with_no_parents(&mut self, node: u32) -> Result<(), GraphError> { + self.insert_with_parents(node, Default::default()) + } + + /// Alias for inserting a node with a single parent. + fn insert_with_parent(&mut self, node: u32, parent: u32) -> Result<(), GraphError> { + self.insert_with_parents(node, [parent].into()) + } + + /// Alias for inserting a node with multiple parents. + fn insert_with_parents( + &mut self, + node: u32, + parents: BTreeSet, + ) -> Result<(), GraphError> { + self.insert_pending(node, parents) + } + + /// Alias for promoting nodes with the same value as the key. + fn promote(&mut self, nodes: impl IntoIterator) -> Result<(), GraphError> { + for node in nodes { + self.promote_pending(node, node)?; + } + Ok(()) + } + + /// Promotes all nodes in the pending list with value=key. + fn promote_all(&mut self) { + // SAFETY: these are definitely pending nodes. + self.promote(self.pending.clone()).unwrap(); + } + + /// Calls process_root until all nodes have been processed. + fn process_all(&mut self) { + while let Some(root) = self.roots().first().copied() { + // SAFETY: this is definitely a root since we just took it from there :) + self.process_root(root).unwrap(); + } + } +} + +// PROMOTE TESTS +// ================================================================================================ + +#[test] +fn promoted_nodes_are_considered_for_root() { + //! Ensure that a promoted node is added to the root list if all parents are already + //! processed. + let parent_a = 1; + let parent_b = 2; + let child_a = 3; + let child_b = 4; + let child_c = 5; + + let mut uut = TestGraph::default(); + uut.insert_with_no_parents(parent_a).unwrap(); + uut.insert_with_no_parents(parent_b).unwrap(); + uut.promote_all(); + + // Only process one parent so that some children remain unrootable. + uut.process_root(parent_a).unwrap(); + + uut.insert_with_parent(child_a, parent_a).unwrap(); + uut.insert_with_parent(child_b, parent_b).unwrap(); + uut.insert_with_parents(child_c, [parent_a, parent_b].into()).unwrap(); + + uut.promote_all(); + + // Only child_a should be added (in addition to the parents), since the other children + // are dependent on parent_b which is incomplete. + let expected_roots = [parent_b, child_a].into(); + + assert_eq!(uut.roots, expected_roots); +} + +#[test] +fn pending_nodes_are_not_considered_for_root() { + //! Ensure that an unpromoted node is _not_ added to the root list even if all parents are + //! already processed. + let parent_a = 1; + let parent_b = 2; + let child_a = 3; + let child_b = 4; + let child_c = 5; + + let mut uut = TestGraph::default(); + uut.insert_with_no_parents(parent_a).unwrap(); + uut.insert_with_no_parents(parent_b).unwrap(); + uut.promote_all(); + uut.process_all(); + + uut.insert_with_parent(child_a, parent_a).unwrap(); + uut.insert_with_parent(child_b, parent_b).unwrap(); + uut.insert_with_parents(child_c, [parent_a, parent_b].into()).unwrap(); + + uut.promote([child_b]).unwrap(); + + // Only child b is valid as it was promoted. + let expected = [child_b].into(); + + assert_eq!(uut.roots, expected); +} + +#[test] +fn promoted_nodes_are_moved() { + let mut uut = TestGraph::default(); + uut.insert_with_no_parents(123).unwrap(); + + assert!(uut.pending.contains(&123)); + assert!(!uut.vertices.contains_key(&123)); + + uut.promote_pending(123, 123).unwrap(); + + assert!(!uut.pending.contains(&123)); + assert!(uut.vertices.contains_key(&123)); +} + +#[test] +fn promote_rejects_already_promoted_nodes() { + let mut uut = TestGraph::default(); + uut.insert_with_no_parents(123).unwrap(); + uut.promote_all(); + + let err = uut.promote_pending(123, 123).unwrap_err(); + let expected = GraphError::InvalidPendingNode(123); + assert_eq!(err, expected); +} + +#[test] +fn promote_rejects_unknown_nodes() { + let err = TestGraph::default().promote_pending(123, 123).unwrap_err(); + let expected = GraphError::InvalidPendingNode(123); + assert_eq!(err, expected); +} + +// INSERT TESTS +// ================================================================================================ + +#[test] +fn insert_with_known_parents_succeeds() { + let parent_a = 10; + let parent_b = 20; + let grandfather = 123; + let uncle = 222; + + let mut uut = TestGraph::default(); + uut.insert_with_no_parents(grandfather).unwrap(); + uut.insert_with_no_parents(parent_a).unwrap(); + uut.insert_with_parent(parent_b, grandfather).unwrap(); + uut.insert_with_parent(uncle, grandfather).unwrap(); + uut.insert_with_parents(1, [parent_a, parent_b].into()).unwrap(); +} + +#[test] +fn insert_duplicate_is_rejected() { + //! Ensure that inserting a duplicate node + //! - results in an error, and + //! - does not mutate the state (atomicity) + const KEY: u32 = 123; + let mut uut = TestGraph::default(); + uut.insert_with_no_parents(KEY).unwrap(); + + let err = uut.insert_with_no_parents(KEY).unwrap_err(); + let expected = GraphError::DuplicateKey(KEY); + assert_eq!(err, expected); + + let mut atomic_reference = TestGraph::default(); + atomic_reference.insert_with_no_parents(KEY).unwrap(); + assert_eq!(uut, atomic_reference); +} + +#[test] +fn insert_with_all_parents_missing_is_rejected() { + //! Ensure that inserting a node with unknown parents + //! - results in an error, and + //! - does not mutate the state (atomicity) + const MISSING: [u32; 4] = [1, 2, 3, 4]; + let mut uut = TestGraph::default(); + + let err = uut.insert_with_parents(0xABC, MISSING.into()).unwrap_err(); + let expected = GraphError::MissingParents(MISSING.into()); + assert_eq!(err, expected); + + let atomic_reference = TestGraph::default(); + assert_eq!(uut, atomic_reference); +} + +#[test] +fn insert_with_some_parents_missing_is_rejected() { + //! Ensure that inserting a node with unknown parents + //! - results in an error, and + //! - does not mutate the state (atomicity) + const MISSING: u32 = 123; + let mut uut = TestGraph::default(); + + uut.insert_with_no_parents(1).unwrap(); + uut.insert_with_no_parents(2).unwrap(); + uut.insert_with_no_parents(3).unwrap(); + + let atomic_reference = uut.clone(); + + let err = uut.insert_with_parents(0xABC, [1, 2, 3, MISSING].into()).unwrap_err(); + let expected = GraphError::MissingParents([MISSING].into()); + assert_eq!(err, expected); + assert_eq!(uut, atomic_reference); +} + +// REVERT TESTS +// ================================================================================================ + +#[test] +fn reverting_unprocessed_nodes_is_rejected() { + let mut uut = TestGraph::default(); + uut.insert_with_no_parents(1).unwrap(); + uut.insert_with_no_parents(2).unwrap(); + uut.insert_with_no_parents(3).unwrap(); + uut.promote_all(); + uut.process_root(1).unwrap(); + + let err = uut.revert_subgraphs([1, 2, 3].into()).unwrap_err(); + let expected = GraphError::UnprocessedNodes([2, 3].into()); + + assert_eq!(err, expected); +} + +#[test] +fn reverting_unknown_nodes_is_rejected() { + let err = TestGraph::default().revert_subgraphs([1].into()).unwrap_err(); + let expected = GraphError::UnknownNodes([1].into()); + assert_eq!(err, expected); +} + +#[test] +fn reverting_resets_the_entire_subgraph() { + //! Reverting should reset the state to before any of the nodes where processed. + let grandparent = 1; + let parent_a = 2; + let parent_b = 3; + let child_a = 4; + let child_b = 5; + let child_c = 6; + + let disjoint = 7; + + let mut uut = TestGraph::default(); + uut.insert_with_no_parents(grandparent).unwrap(); + uut.insert_with_no_parents(disjoint).unwrap(); + uut.insert_with_parent(parent_a, grandparent).unwrap(); + uut.insert_with_parent(parent_b, grandparent).unwrap(); + uut.insert_with_parent(child_a, parent_a).unwrap(); + uut.insert_with_parent(child_b, parent_b).unwrap(); + uut.insert_with_parents(child_c, [parent_a, parent_b].into()).unwrap(); + + uut.promote([disjoint, grandparent, parent_a, parent_b, child_a, child_c]) + .unwrap(); + uut.process_root(disjoint).unwrap(); + + let reference = uut.clone(); + + uut.process_all(); + uut.revert_subgraphs([grandparent].into()).unwrap(); + + assert_eq!(uut, reference); +} + +#[test] +fn reverting_reevaluates_roots() { + //! Node reverting from processed to unprocessed should cause the root nodes to be + //! re-evaluated. Only nodes with all parents processed should remain in the set. + let disjoint_parent = 1; + let disjoint_child = 2; + + let parent_a = 3; + let parent_b = 4; + let child_a = 5; + let child_b = 6; + + let partially_disjoin_child = 7; + + let mut uut = TestGraph::default(); + // This pair of nodes should not be impacted by the reverted subgraph. + uut.insert_with_no_parents(disjoint_parent).unwrap(); + uut.insert_with_parent(disjoint_child, disjoint_parent).unwrap(); + + uut.insert_with_no_parents(parent_a).unwrap(); + uut.insert_with_no_parents(parent_b).unwrap(); + uut.insert_with_parent(child_a, parent_a).unwrap(); + uut.insert_with_parent(child_b, parent_b).unwrap(); + uut.insert_with_parents(partially_disjoin_child, [disjoint_parent, parent_a].into()) + .unwrap(); + + // Since we are reverting the other parents, we expect the roots to match the current state. + uut.promote_all(); + uut.process_root(disjoint_parent).unwrap(); + let reference = uut.roots().clone(); + + uut.process_root(parent_a).unwrap(); + uut.process_root(parent_b).unwrap(); + uut.revert_subgraphs([parent_a, parent_b].into()).unwrap(); + + assert_eq!(uut.roots(), &reference); +} + +// PRUNING TESTS +// ================================================================================================ + +#[test] +fn pruned_nodes_are_nonextant() { + //! Checks that processed and then pruned nodes behave as if they never existed in the + //! graph. We test this by comparing it to a reference graph created without these ancestor + //! nodes. + let ancestor_a = 1; + let ancestor_b = 2; + + let child_a = 3; + let child_b = 4; + let child_both = 5; + + let mut uut = TestGraph::default(); + uut.insert_with_no_parents(ancestor_a).unwrap(); + uut.insert_with_no_parents(ancestor_b).unwrap(); + uut.insert_with_parent(child_a, ancestor_a).unwrap(); + uut.insert_with_parent(child_b, ancestor_b).unwrap(); + uut.insert_with_parents(child_both, [ancestor_a, ancestor_b].into()).unwrap(); + uut.promote_all(); + + uut.process_root(ancestor_a).unwrap(); + uut.process_root(ancestor_b).unwrap(); + uut.prune_processed([ancestor_a, ancestor_b].into()).unwrap(); + + let mut reference = TestGraph::default(); + reference.insert_with_no_parents(child_a).unwrap(); + reference.insert_with_no_parents(child_b).unwrap(); + reference.insert_with_no_parents(child_both).unwrap(); + reference.promote_all(); + + assert_eq!(uut, reference); +} + +#[test] +fn pruning_unknown_nodes_is_rejected() { + let err = TestGraph::default().prune_processed([1].into()).unwrap_err(); + let expected = GraphError::UnknownNodes([1].into()); + assert_eq!(err, expected); +} + +#[test] +fn pruning_unprocessed_nodes_is_rejected() { + let mut uut = TestGraph::default(); + uut.insert_with_no_parents(1).unwrap(); + uut.promote_all(); + + let err = uut.prune_processed([1].into()).unwrap_err(); + let expected = GraphError::UnprocessedNodes([1].into()); + assert_eq!(err, expected); +} + +#[test] +fn pruning_cannot_leave_parents_dangling() { + //! Pruning processed nodes must always prune all parent nodes as well. No parent node may + //! be left behind. + let dangling = 1; + let pruned = 2; + let mut uut = TestGraph::default(); + uut.insert_with_no_parents(dangling).unwrap(); + uut.insert_with_parent(pruned, dangling).unwrap(); + uut.promote_all(); + uut.process_all(); + + let err = uut.prune_processed([pruned].into()).unwrap_err(); + let expected = GraphError::DanglingNodes([dangling].into()); + assert_eq!(err, expected); +} + +// PURGING TESTS +// ================================================================================================ + +#[test] +fn purging_subgraph_handles_internal_nodes() { + //! Purging a subgraph should correctly handle nodes already deleted within that subgraph. + //! + //! This is a concern for errors as we are deleting parts of the subgraph while we are + //! iterating through the nodes to purge. This means its likely a node will already have + //! been deleted before processing it as an input. + //! + //! We can force this to occur by re-ordering the inputs relative to the actual dependency + //! order. This means this test is a bit weaker because it relies on implementation details. + + let ancestor_a = 1; + let ancestor_b = 2; + let parent_a = 3; + let parent_b = 4; + let child_a = 5; + // This should be purged prior to parent_a. Relies on the fact that we are iterating over a + // btree which is ordered by value. + let child_b = 0; + let child_c = 6; + + let mut uut = TestGraph::default(); + uut.insert_with_no_parents(ancestor_a).unwrap(); + uut.insert_with_no_parents(ancestor_b).unwrap(); + uut.insert_with_parent(parent_a, ancestor_a).unwrap(); + uut.insert_with_parent(parent_b, ancestor_b).unwrap(); + uut.insert_with_parents(child_a, [ancestor_a, parent_a].into()).unwrap(); + uut.insert_with_parents(child_b, [parent_a, parent_b].into()).unwrap(); + uut.insert_with_parent(child_c, parent_b).unwrap(); + + uut.purge_subgraphs([child_b, parent_a].into()).unwrap(); + + let mut reference = TestGraph::default(); + reference.insert_with_no_parents(ancestor_a).unwrap(); + reference.insert_with_no_parents(ancestor_b).unwrap(); + reference.insert_with_parent(parent_b, ancestor_b).unwrap(); + reference.insert_with_parent(child_c, parent_b).unwrap(); + + assert_eq!(uut, reference); +} + +#[test] +fn purging_removes_all_descendents() { + let ancestor_a = 1; + let ancestor_b = 2; + let parent_a = 3; + let parent_b = 4; + let child_a = 5; + let child_b = 6; + let child_c = 7; + + let mut uut = TestGraph::default(); + uut.insert_with_no_parents(ancestor_a).unwrap(); + uut.insert_with_no_parents(ancestor_b).unwrap(); + uut.insert_with_parent(parent_a, ancestor_a).unwrap(); + uut.insert_with_parent(parent_b, ancestor_b).unwrap(); + uut.insert_with_parents(child_a, [ancestor_a, parent_a].into()).unwrap(); + uut.insert_with_parents(child_b, [parent_a, parent_b].into()).unwrap(); + uut.insert_with_parent(child_c, parent_b).unwrap(); + + uut.purge_subgraphs([parent_a].into()).unwrap(); + + let mut reference = TestGraph::default(); + reference.insert_with_no_parents(ancestor_a).unwrap(); + reference.insert_with_no_parents(ancestor_b).unwrap(); + reference.insert_with_parent(parent_b, ancestor_b).unwrap(); + reference.insert_with_parent(child_c, parent_b).unwrap(); + + assert_eq!(uut, reference); +} + +// PROCESSING TESTS +// ================================================================================================ + +#[test] +fn process_root_evaluates_children_as_roots() { + let parent_a = 1; + let parent_b = 2; + let child_a = 3; + let child_b = 4; + let child_c = 5; + + let mut uut = TestGraph::default(); + uut.insert_with_no_parents(parent_a).unwrap(); + uut.insert_with_no_parents(parent_b).unwrap(); + uut.insert_with_parent(child_a, parent_a).unwrap(); + uut.insert_with_parent(child_b, parent_b).unwrap(); + uut.insert_with_parents(child_c, [parent_a, parent_b].into()).unwrap(); + uut.promote_all(); + + uut.process_root(parent_a).unwrap(); + assert_eq!(uut.roots(), &[parent_b, child_a].into()); +} + +#[test] +fn process_root_rejects_non_root_node() { + let mut uut = TestGraph::default(); + uut.insert_with_no_parents(1).unwrap(); + uut.insert_with_parent(2, 1).unwrap(); + uut.promote_all(); + + let err = uut.process_root(2).unwrap_err(); + let expected = GraphError::InvalidRootNode(2); + assert_eq!(err, expected); +} + +#[test] +fn process_root_cannot_reprocess_same_node() { + let mut uut = TestGraph::default(); + uut.insert_with_no_parents(1).unwrap(); + uut.promote_all(); + uut.process_root(1).unwrap(); + + let err = uut.process_root(1).unwrap_err(); + let expected = GraphError::InvalidRootNode(1); + assert_eq!(err, expected); +} + +#[test] +fn processing_a_queue_graph() { + //! Creates a queue graph and ensures that nodes processed in FIFO order. + let nodes = (0..10).collect::>(); + + let mut uut = TestGraph::default(); + uut.insert_with_no_parents(nodes[0]).unwrap(); + for pairs in nodes.windows(2) { + let (parent, id) = (pairs[0], pairs[1]); + uut.insert_with_parent(id, parent).unwrap(); + } + uut.promote_all(); + + let mut ordered_roots = Vec::::new(); + for _ in &nodes { + let current_roots = uut.roots().clone(); + ordered_roots.extend(¤t_roots); + + for root in current_roots { + uut.process_root(root).unwrap(); + } + } + + assert_eq!(ordered_roots, nodes); +} + +#[test] +fn processing_and_root_tracking() { + //! Creates a somewhat arbitrarily connected graph and ensures that roots are tracked as + //! expected as the they are processed. + let ancestor_a = 1; + let ancestor_b = 2; + let parent_a = 3; + let parent_b = 4; + let child_a = 5; + let child_b = 6; + let child_c = 7; + + let mut uut = TestGraph::default(); + uut.insert_with_no_parents(ancestor_a).unwrap(); + uut.insert_with_no_parents(ancestor_b).unwrap(); + uut.insert_with_parent(parent_a, ancestor_a).unwrap(); + uut.insert_with_parent(parent_b, ancestor_b).unwrap(); + uut.insert_with_parents(child_a, [ancestor_a, parent_a].into()).unwrap(); + uut.insert_with_parents(child_b, [parent_a, parent_b].into()).unwrap(); + uut.insert_with_parent(child_c, parent_b).unwrap(); + uut.promote_all(); + + assert_eq!(uut.roots(), &[ancestor_a, ancestor_b].into()); + + uut.process_root(ancestor_a).unwrap(); + assert_eq!(uut.roots(), &[ancestor_b, parent_a].into()); + + uut.process_root(ancestor_b).unwrap(); + assert_eq!(uut.roots(), &[parent_a, parent_b].into()); + + uut.process_root(parent_a).unwrap(); + assert_eq!(uut.roots(), &[parent_b, child_a].into()); + + uut.process_root(parent_b).unwrap(); + assert_eq!(uut.roots(), &[child_a, child_b, child_c].into()); + + uut.process_root(child_a).unwrap(); + assert_eq!(uut.roots(), &[child_b, child_c].into()); + + uut.process_root(child_b).unwrap(); + assert_eq!(uut.roots(), &[child_c].into()); + + uut.process_root(child_c).unwrap(); + assert!(uut.roots().is_empty()); +} diff --git a/crates/block-producer/src/mempool/inflight_state/account_state.rs b/crates/block-producer/src/mempool/inflight_state/account_state.rs new file mode 100644 index 000000000..dc79a339a --- /dev/null +++ b/crates/block-producer/src/mempool/inflight_state/account_state.rs @@ -0,0 +1,322 @@ +use std::collections::VecDeque; + +use miden_objects::{transaction::TransactionId, Digest}; + +// IN-FLIGHT ACCOUNT STATE +// ================================================================================================ + +/// Tracks the inflight state of an account. +#[derive(Clone, Default, Debug, PartialEq)] +pub struct InflightAccountState { + /// This account's state transitions due to inflight transactions. + /// + /// Ordering is chronological from front (oldest) to back (newest). + states: VecDeque<(Digest, TransactionId)>, + + /// The number of inflight states that have been committed. + /// + /// This acts as a pivot index for `self.states`, splitting it into two segments. The first + /// contains committed states and the second those that are uncommitted. + committed: usize, +} + +impl InflightAccountState { + /// Appends the new state, returning the previous state's transaction ID __IFF__ it is + /// uncommitted. + pub fn insert(&mut self, state: Digest, tx: TransactionId) -> Option { + let mut parent = self.states.back().map(|(_, tx)| tx).copied(); + + // Only return uncommitted parent ID. + // + // Since this is the latest state's ID, we need at least one uncommitted state. + if self.uncommitted_count() == 0 { + parent.take(); + } + + self.states.push_back((state, tx)); + + parent + } + + /// The latest state of this account. + pub fn current_state(&self) -> Option<&Digest> { + self.states.back().map(|(state, _)| state) + } + + /// Reverts the most recent `n` uncommitted inflight transactions. + /// + /// # Returns + /// + /// Returns the emptiness state of the account. + /// + /// # Panics + /// + /// Panics if there are less than `n` uncommitted inflight transactions. + pub fn revert(&mut self, n: usize) -> AccountStatus { + let uncommitted = self.uncommitted_count(); + assert!( + uncommitted >= n, "Attempted to revert {n} transactions which is more than the {uncommitted} which are uncommitted.", + ); + + self.states.drain(self.states.len() - n..); + + self.emptiness() + } + + /// Commits the next `n` uncommitted inflight transactions. + /// + /// # Panics + /// + /// Panics if there are less than `n` uncommitted inflight transactions. + pub fn commit(&mut self, n: usize) { + let uncommitted = self.uncommitted_count(); + assert!( + uncommitted >= n, "Attempted to revert {n} transactions which is more than the {uncommitted} which are uncommitted." + ); + + self.committed += n; + } + + /// Removes `n` committed transactions. + /// + /// # Returns + /// + /// Returns the emptiness state of the account. + /// + /// # Panics + /// + /// Panics if there are less than `n` committed transactions. + pub fn prune_committed(&mut self, n: usize) -> AccountStatus { + assert!( + self.committed >= n, + "Attempted to prune {n} transactions, which is more than the {} which are committed", + self.committed + ); + + self.committed -= n; + self.states.drain(..n); + + self.emptiness() + } + + /// This is essentially `is_empty` with the additional benefit that [AccountStatus] is marked + /// as `#[must_use]`, forcing callers to handle empty accounts (which should be pruned). + fn emptiness(&self) -> AccountStatus { + if self.states.is_empty() { + AccountStatus::Empty + } else { + AccountStatus::NonEmpty + } + } + + /// The number of uncommitted inflight transactions. + fn uncommitted_count(&self) -> usize { + self.states.len() - self.committed + } +} + +/// Describes the emptiness of an [InflightAccountState]. +/// +/// Is marked as `#[must_use]` so that callers handle prune empty accounts. +#[must_use] +#[derive(Clone, Copy, PartialEq, Eq)] +pub enum AccountStatus { + /// [InflightAccountState] contains no state and should be pruned. + Empty, + /// [InflightAccountState] contains state and should be kept. + NonEmpty, +} + +impl AccountStatus { + pub fn is_empty(&self) -> bool { + *self == Self::Empty + } +} + +// TESTS +// ================================================================================================ + +#[cfg(test)] +mod tests { + use super::*; + use crate::test_utils::Random; + + #[test] + fn current_state_is_the_most_recently_inserted() { + let mut rng = Random::with_random_seed(); + let mut uut = InflightAccountState::default(); + uut.insert(rng.draw_digest(), rng.draw_tx_id()); + uut.insert(rng.draw_digest(), rng.draw_tx_id()); + uut.insert(rng.draw_digest(), rng.draw_tx_id()); + + let expected = rng.draw_digest(); + uut.insert(expected, rng.draw_tx_id()); + + assert_eq!(uut.current_state(), Some(&expected)); + } + + #[test] + fn parent_is_the_most_recently_inserted() { + let mut rng = Random::with_random_seed(); + let mut uut = InflightAccountState::default(); + uut.insert(rng.draw_digest(), rng.draw_tx_id()); + uut.insert(rng.draw_digest(), rng.draw_tx_id()); + uut.insert(rng.draw_digest(), rng.draw_tx_id()); + + let expected = rng.draw_tx_id(); + uut.insert(rng.draw_digest(), expected); + + let parent = uut.insert(rng.draw_digest(), rng.draw_tx_id()); + + assert_eq!(parent, Some(expected)); + } + + #[test] + fn empty_account_has_no_parent() { + let mut rng = Random::with_random_seed(); + let mut uut = InflightAccountState::default(); + let parent = uut.insert(rng.draw_digest(), rng.draw_tx_id()); + + assert!(parent.is_none()); + } + + #[test] + fn fully_committed_account_has_no_parent() { + let mut rng = Random::with_random_seed(); + let mut uut = InflightAccountState::default(); + uut.insert(rng.draw_digest(), rng.draw_tx_id()); + uut.commit(1); + let parent = uut.insert(rng.draw_digest(), rng.draw_tx_id()); + + assert!(parent.is_none()); + } + + #[test] + fn uncommitted_account_has_a_parent() { + let mut rng = Random::with_random_seed(); + let expected_parent = rng.draw_tx_id(); + + let mut uut = InflightAccountState::default(); + uut.insert(rng.draw_digest(), expected_parent); + + let parent = uut.insert(rng.draw_digest(), rng.draw_tx_id()); + + assert_eq!(parent, Some(expected_parent)); + } + + #[test] + fn partially_committed_account_has_a_parent() { + let mut rng = Random::with_random_seed(); + let expected_parent = rng.draw_tx_id(); + + let mut uut = InflightAccountState::default(); + uut.insert(rng.draw_digest(), rng.draw_tx_id()); + uut.insert(rng.draw_digest(), expected_parent); + uut.commit(1); + + let parent = uut.insert(rng.draw_digest(), rng.draw_tx_id()); + + assert_eq!(parent, Some(expected_parent)); + } + + #[test] + fn reverted_txs_are_nonextant() { + let mut rng = Random::with_random_seed(); + const N: usize = 5; + const REVERT: usize = 2; + + let states = (0..N).map(|_| (rng.draw_digest(), rng.draw_tx_id())).collect::>(); + + let mut uut = InflightAccountState::default(); + for (state, tx) in &states { + uut.insert(*state, *tx); + } + let _ = uut.revert(REVERT); + + let mut expected = InflightAccountState::default(); + for (state, tx) in states.iter().rev().skip(REVERT).rev() { + expected.insert(*state, *tx); + } + + assert_eq!(uut, expected); + } + + #[test] + fn pruned_txs_are_nonextant() { + let mut rng = Random::with_random_seed(); + const N: usize = 5; + const PRUNE: usize = 2; + + let states = (0..N).map(|_| (rng.draw_digest(), rng.draw_tx_id())).collect::>(); + + let mut uut = InflightAccountState::default(); + for (state, tx) in &states { + uut.insert(*state, *tx); + } + uut.commit(PRUNE); + let _ = uut.prune_committed(PRUNE); + + let mut expected = InflightAccountState::default(); + for (state, tx) in states.iter().skip(PRUNE) { + expected.insert(*state, *tx); + } + + assert_eq!(uut, expected); + } + + #[test] + fn is_empty_after_full_commit_and_prune() { + let mut rng = Random::with_random_seed(); + const N: usize = 5; + let mut uut = InflightAccountState::default(); + for _ in 0..N { + uut.insert(rng.draw_digest(), rng.draw_tx_id()); + } + + uut.commit(N); + let _ = uut.prune_committed(N); + + assert_eq!(uut, Default::default()); + } + + #[test] + fn is_empty_after_full_revert() { + const N: usize = 5; + let mut uut = InflightAccountState::default(); + let mut rng = Random::with_random_seed(); + for _ in 0..N { + uut.insert(rng.draw_digest(), rng.draw_tx_id()); + } + + let _ = uut.revert(N); + + assert_eq!(uut, Default::default()); + } + + #[test] + #[should_panic] + fn revert_panics_on_out_of_bounds() { + let mut rng = Random::with_random_seed(); + const N: usize = 5; + let mut uut = InflightAccountState::default(); + for _ in 0..N { + uut.insert(rng.draw_digest(), rng.draw_tx_id()); + } + + uut.commit(1); + let _ = uut.revert(N); + } + + #[test] + #[should_panic] + fn commit_panics_on_out_of_bounds() { + let mut rng = Random::with_random_seed(); + const N: usize = 5; + let mut uut = InflightAccountState::default(); + for _ in 0..N { + uut.insert(rng.draw_digest(), rng.draw_tx_id()); + } + + uut.commit(N + 1); + } +} diff --git a/crates/block-producer/src/mempool/inflight_state/mod.rs b/crates/block-producer/src/mempool/inflight_state/mod.rs new file mode 100644 index 000000000..2f4836621 --- /dev/null +++ b/crates/block-producer/src/mempool/inflight_state/mod.rs @@ -0,0 +1,654 @@ +use std::collections::{BTreeMap, BTreeSet, VecDeque}; + +use miden_objects::{ + accounts::AccountId, + notes::{NoteId, Nullifier}, + transaction::TransactionId, +}; + +use crate::{ + domain::transaction::AuthenticatedTransaction, + errors::{AddTransactionError, VerifyTxError}, +}; + +mod account_state; + +use account_state::InflightAccountState; + +use super::BlockNumber; + +// IN-FLIGHT STATE +// ================================================================================================ + +/// Tracks the inflight state of the mempool. This includes recently committed blocks. +/// +/// Allows appending and reverting transactions as well as marking them as part of a committed +/// block. Committed state can also be pruned once the state is considered past the stale +/// threshold. +#[derive(Clone, Debug, PartialEq)] +pub struct InflightState { + /// Account states from inflight transactions. + /// + /// Accounts which are empty are immediately pruned. + accounts: BTreeMap, + + /// Nullifiers produced by the input notes of inflight transactions. + nullifiers: BTreeSet, + + /// Notes created by inflight transactions. + /// + /// Some of these may already be consumed - check the nullifiers. + output_notes: BTreeMap, + + /// Inflight transaction deltas. + /// + /// This _excludes_ deltas in committed blocks. + transaction_deltas: BTreeMap, + + /// Committed block deltas. + committed_blocks: VecDeque>, + + /// Amount of recently committed blocks we retain in addition to the inflight state. + /// + /// This provides an overlap between committed and inflight state, giving a grace + /// period for incoming transactions to be verified against both without requiring it + /// to be an atomic action. + num_retained_blocks: usize, + + /// The latest committed block height. + chain_tip: BlockNumber, +} + +/// A summary of a transaction's impact on the state. +#[derive(Clone, Debug, PartialEq)] +struct Delta { + /// The account this transaction updated. + account: AccountId, + /// The nullifiers produced by this transaction. + nullifiers: BTreeSet, + /// The output notes created by this transaction. + output_notes: BTreeSet, +} + +impl Delta { + fn new(tx: &AuthenticatedTransaction) -> Self { + Self { + account: tx.account_id(), + nullifiers: tx.nullifiers().collect(), + output_notes: tx.output_notes().collect(), + } + } +} + +impl InflightState { + /// Creates an [InflightState] which will retain committed state for the given + /// amount of blocks before pruning them. + pub fn new(chain_tip: BlockNumber, num_retained_blocks: usize) -> Self { + Self { + num_retained_blocks, + chain_tip, + accounts: Default::default(), + nullifiers: Default::default(), + output_notes: Default::default(), + transaction_deltas: Default::default(), + committed_blocks: Default::default(), + } + } + + /// Appends the transaction to the inflight state. + /// + /// This operation is atomic i.e. a rejected transaction has no impact of the state. + pub fn add_transaction( + &mut self, + tx: &AuthenticatedTransaction, + ) -> Result, AddTransactionError> { + // Separate verification and state mutation so that a rejected transaction + // does not impact the state (atomicity). + self.verify_transaction(tx)?; + + let parents = self.insert_transaction(tx); + + Ok(parents) + } + + fn oldest_committed_state(&self) -> BlockNumber { + let committed_len: u32 = self + .committed_blocks + .len() + .try_into() + .expect("We should not be storing many blocks"); + self.chain_tip + .checked_sub(BlockNumber::new(committed_len)) + .expect("Chain height cannot be less than number of committed blocks") + } + + fn verify_transaction(&self, tx: &AuthenticatedTransaction) -> Result<(), AddTransactionError> { + // The mempool retains recently committed blocks, in addition to the state that is currently + // inflight. This overlap with the committed state allows us to verify incoming + // transactions against the current state (committed + inflight). Transactions are + // first authenticated against the committed state prior to being submitted to the + // mempool. The overlap provides a grace period between transaction authentication + // against committed state and verification against inflight state. + // + // Here we just ensure that this authentication point is still within this overlap zone. + // This should only fail if the grace period is too restrictive for the current + // combination of block rate, transaction throughput and database IO. + let stale_limit = self.oldest_committed_state(); + if tx.authentication_height() < stale_limit { + return Err(AddTransactionError::StaleInputs { + input_block: tx.authentication_height(), + stale_limit, + }); + } + + // Ensure current account state is correct. + let current = self + .accounts + .get(&tx.account_id()) + .and_then(|account_state| account_state.current_state()) + .copied() + .or(tx.store_account_state()); + let expected = tx.account_update().init_state_hash(); + + // SAFETY: a new accounts state is set to zero ie default. + if expected != current.unwrap_or_default() { + return Err(VerifyTxError::IncorrectAccountInitialHash { + tx_initial_account_hash: expected, + current_account_hash: current, + } + .into()); + } + + // Ensure nullifiers aren't already present. + // + // We don't need to check the store state here because that was + // already performed as part of authenticated the transaction. + let double_spend = tx + .nullifiers() + .filter(|nullifier| self.nullifiers.contains(nullifier)) + .collect::>(); + if !double_spend.is_empty() { + return Err(VerifyTxError::InputNotesAlreadyConsumed(double_spend).into()); + } + + // Ensure output notes aren't already present. + let duplicates = tx + .output_notes() + .filter(|note| self.output_notes.contains_key(note)) + .collect::>(); + if !duplicates.is_empty() { + return Err(VerifyTxError::OutputNotesAlreadyExist(duplicates).into()); + } + + // Ensure that all unauthenticated notes have an inflight output note to consume. + // + // We don't need to worry about double spending them since we already checked for + // that using the nullifiers. + // + // Note that the authenticated transaction already filters out notes that were + // previously unauthenticated, but were authenticated by the store. + let missing = tx + .unauthenticated_notes() + .filter(|note_id| !self.output_notes.contains_key(note_id)) + .collect::>(); + if !missing.is_empty() { + return Err(VerifyTxError::UnauthenticatedNotesNotFound(missing).into()); + } + + Ok(()) + } + + /// Aggregate the transaction into the state, returning its parent transactions. + fn insert_transaction(&mut self, tx: &AuthenticatedTransaction) -> BTreeSet { + self.transaction_deltas.insert(tx.id(), Delta::new(tx)); + let account_parent = self + .accounts + .entry(tx.account_id()) + .or_default() + .insert(tx.account_update().final_state_hash(), tx.id()); + + self.nullifiers.extend(tx.nullifiers()); + self.output_notes + .extend(tx.output_notes().map(|note_id| (note_id, OutputNoteState::new(tx.id())))); + + // Authenticated input notes (provably) consume notes that are already committed + // on chain. They therefore cannot form part of the inflight dependency chain. + // + // Additionally, we only care about parents which have not been committed yet. + let note_parents = tx + .unauthenticated_notes() + .filter_map(|note_id| self.output_notes.get(¬e_id)) + .filter_map(|note| note.transaction()) + .copied(); + + account_parent.into_iter().chain(note_parents).collect() + } + + /// Reverts the given set of _uncommitted_ transactions. + /// + /// # Panics + /// + /// Panics if any transactions is not part of the uncommitted state. Callers should take care to + /// only revert transaction sets who's ancestors are all either committed or reverted. + pub fn revert_transactions(&mut self, txs: BTreeSet) { + for tx in txs { + let delta = self.transaction_deltas.remove(&tx).expect("Transaction delta must exist"); + + // SAFETY: Since the delta exists, so must the account. + let account_status = self.accounts.get_mut(&delta.account).unwrap().revert(1); + // Prune empty accounts. + if account_status.is_empty() { + self.accounts.remove(&delta.account); + } + + for nullifier in delta.nullifiers { + assert!(self.nullifiers.remove(&nullifier), "Nullifier must exist"); + } + + for note in delta.output_notes { + assert!(self.output_notes.remove(¬e).is_some(), "Output note must exist"); + } + } + } + + /// Marks the given state diff as committed. + /// + /// These transactions are no longer considered inflight. Callers should take care to only + /// commit transactions who's ancestors are all committed. + /// + /// Note that this state is still retained for the configured number of blocks. The oldest + /// retained block is also pruned from the state. + /// + /// # Panics + /// + /// Panics if any transactions is not part of the uncommitted state. + pub fn commit_block(&mut self, txs: impl IntoIterator) { + let mut block_deltas = BTreeMap::new(); + for tx in txs.into_iter() { + let delta = self.transaction_deltas.remove(&tx).expect("Transaction delta must exist"); + + // SAFETY: Since the delta exists, so must the account. + self.accounts.get_mut(&delta.account).unwrap().commit(1); + + for note in &delta.output_notes { + self.output_notes.get_mut(note).expect("Output note must exist").commit(); + } + + block_deltas.insert(tx, delta); + } + + self.committed_blocks.push_back(block_deltas); + self.prune_block(); + self.chain_tip.increment(); + } + + /// Prunes the state from the oldest committed block _IFF_ there are more than the number we + /// wish to retain. + /// + /// This is used to bound the size of the inflight state. + fn prune_block(&mut self) { + // Keep the required number of committed blocks. + // + // This would occur on startup until we have accumulated enough blocks. + if self.committed_blocks.len() <= self.num_retained_blocks { + return; + } + // SAFETY: The length check above guarantees that we have at least one committed block. + let block = self.committed_blocks.pop_front().unwrap(); + + for (_, delta) in block { + // SAFETY: Since the delta exists, so must the account. + let status = self.accounts.get_mut(&delta.account).unwrap().prune_committed(1); + + // Prune empty accounts. + if status.is_empty() { + self.accounts.remove(&delta.account); + } + + for nullifier in delta.nullifiers { + self.nullifiers.remove(&nullifier); + } + + for output_note in delta.output_notes { + self.output_notes.remove(&output_note); + } + } + } +} + +/// Describes the state of an inflight output note. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +enum OutputNoteState { + /// Output note is part of a committed block, and its source transaction should no longer be + /// considered for dependency tracking. + Committed, + /// Output note is still inflight and should be considered for dependency tracking. + Inflight(TransactionId), +} + +impl OutputNoteState { + /// Creates a new inflight output note state. + fn new(tx: TransactionId) -> Self { + Self::Inflight(tx) + } + + /// Commits the output note, removing the source transaction. + fn commit(&mut self) { + *self = Self::Committed; + } + + /// Returns the source transaction ID if the output note is not yet committed. + fn transaction(&self) -> Option<&TransactionId> { + if let Self::Inflight(tx) = self { + Some(tx) + } else { + None + } + } +} + +// TESTS +// ================================================================================================ + +#[cfg(test)] +mod tests { + use assert_matches::assert_matches; + use miden_objects::Digest; + + use super::*; + use crate::test_utils::{ + mock_account_id, + note::{mock_note, mock_output_note}, + MockProvenTxBuilder, + }; + + #[test] + fn rejects_duplicate_nullifiers() { + let account = mock_account_id(1); + let states = (1u8..=4).map(|x| Digest::from([x, 0, 0, 0])).collect::>(); + + let note_seed = 123; + // We need to make the note available first, in order for it to be consumed at all. + let tx0 = MockProvenTxBuilder::with_account(account, states[0], states[1]) + .output_notes(vec![mock_output_note(note_seed)]) + .build(); + let tx1 = MockProvenTxBuilder::with_account(account, states[1], states[2]) + .unauthenticated_notes(vec![mock_note(note_seed)]) + .build(); + let tx2 = MockProvenTxBuilder::with_account(account, states[2], states[3]) + .unauthenticated_notes(vec![mock_note(note_seed)]) + .build(); + + let mut uut = InflightState::new(BlockNumber::default(), 1); + uut.add_transaction(&AuthenticatedTransaction::from_inner(tx0)).unwrap(); + uut.add_transaction(&AuthenticatedTransaction::from_inner(tx1)).unwrap(); + + let err = uut.add_transaction(&AuthenticatedTransaction::from_inner(tx2)).unwrap_err(); + + assert_matches!( + err, + AddTransactionError::VerificationFailed(VerifyTxError::InputNotesAlreadyConsumed( + notes + )) if notes == vec![mock_note(note_seed).nullifier()] + ); + } + + #[test] + fn rejects_duplicate_output_notes() { + let account = mock_account_id(1); + let states = (1u8..=3).map(|x| Digest::from([x, 0, 0, 0])).collect::>(); + + let note = mock_output_note(123); + let tx0 = MockProvenTxBuilder::with_account(account, states[0], states[1]) + .output_notes(vec![note.clone()]) + .build(); + let tx1 = MockProvenTxBuilder::with_account(account, states[1], states[2]) + .output_notes(vec![note.clone()]) + .build(); + + let mut uut = InflightState::new(BlockNumber::default(), 1); + uut.add_transaction(&AuthenticatedTransaction::from_inner(tx0)).unwrap(); + + let err = uut.add_transaction(&AuthenticatedTransaction::from_inner(tx1)).unwrap_err(); + + assert_matches!( + err, + AddTransactionError::VerificationFailed(VerifyTxError::OutputNotesAlreadyExist( + notes + )) if notes == vec![note.id()] + ); + } + + #[test] + fn rejects_account_state_mismatch() { + let account = mock_account_id(1); + let states = (1u8..=3).map(|x| Digest::from([x, 0, 0, 0])).collect::>(); + + let tx = MockProvenTxBuilder::with_account(account, states[0], states[1]).build(); + + let mut uut = InflightState::new(BlockNumber::default(), 1); + let err = uut + .add_transaction(&AuthenticatedTransaction::from_inner(tx).with_store_state(states[2])) + .unwrap_err(); + + assert_matches!( + err, + AddTransactionError::VerificationFailed(VerifyTxError::IncorrectAccountInitialHash { + tx_initial_account_hash: init_state, + current_account_hash: current_state, + }) if init_state == states[0] && current_state == states[2].into() + ); + } + + #[test] + fn account_state_transitions() { + let account = mock_account_id(1); + let states = (1u8..=3).map(|x| Digest::from([x, 0, 0, 0])).collect::>(); + + let tx0 = MockProvenTxBuilder::with_account(account, states[0], states[1]).build(); + let tx1 = MockProvenTxBuilder::with_account(account, states[1], states[2]).build(); + + let mut uut = InflightState::new(BlockNumber::default(), 1); + uut.add_transaction(&AuthenticatedTransaction::from_inner(tx0)).unwrap(); + uut.add_transaction(&AuthenticatedTransaction::from_inner(tx1).with_empty_store_state()) + .unwrap(); + } + + #[test] + fn new_account_state_defaults_to_zero() { + let account = mock_account_id(1); + + let tx = MockProvenTxBuilder::with_account( + account, + [0u8, 0, 0, 0].into(), + [1u8, 0, 0, 0].into(), + ) + .build(); + + let mut uut = InflightState::new(BlockNumber::default(), 1); + uut.add_transaction(&AuthenticatedTransaction::from_inner(tx).with_empty_store_state()) + .unwrap(); + } + + #[test] + fn inflight_account_state_overrides_input_state() { + let account = mock_account_id(1); + let states = (1u8..=3).map(|x| Digest::from([x, 0, 0, 0])).collect::>(); + + let tx0 = MockProvenTxBuilder::with_account(account, states[0], states[1]).build(); + let tx1 = MockProvenTxBuilder::with_account(account, states[1], states[2]).build(); + + let mut uut = InflightState::new(BlockNumber::default(), 1); + uut.add_transaction(&AuthenticatedTransaction::from_inner(tx0)).unwrap(); + + // Feed in an old state via input. This should be ignored, and the previous tx's final + // state should be used. + uut.add_transaction(&AuthenticatedTransaction::from_inner(tx1).with_store_state(states[0])) + .unwrap(); + } + + #[test] + fn dependency_tracking() { + let account = mock_account_id(1); + let states = (1u8..=3).map(|x| Digest::from([x, 0, 0, 0])).collect::>(); + let note_seed = 123; + + // Parent via account state. + let tx0 = MockProvenTxBuilder::with_account(account, states[0], states[1]).build(); + // Parent via output note. + let tx1 = MockProvenTxBuilder::with_account(mock_account_id(2), states[0], states[1]) + .output_notes(vec![mock_output_note(note_seed)]) + .build(); + + let tx = MockProvenTxBuilder::with_account(account, states[1], states[2]) + .unauthenticated_notes(vec![mock_note(note_seed)]) + .build(); + + let mut uut = InflightState::new(BlockNumber::default(), 1); + uut.add_transaction(&AuthenticatedTransaction::from_inner(tx0.clone())).unwrap(); + uut.add_transaction(&AuthenticatedTransaction::from_inner(tx1.clone())).unwrap(); + + let parents = uut + .add_transaction(&AuthenticatedTransaction::from_inner(tx).with_empty_store_state()) + .unwrap(); + let expected = BTreeSet::from([tx0.id(), tx1.id()]); + + assert_eq!(parents, expected); + } + + #[test] + fn committed_parents_are_not_tracked() { + let account = mock_account_id(1); + let states = (1u8..=3).map(|x| Digest::from([x, 0, 0, 0])).collect::>(); + let note_seed = 123; + + // Parent via account state. + let tx0 = MockProvenTxBuilder::with_account(account, states[0], states[1]).build(); + let tx0 = AuthenticatedTransaction::from_inner(tx0); + // Parent via output note. + let tx1 = MockProvenTxBuilder::with_account(mock_account_id(2), states[0], states[1]) + .output_notes(vec![mock_output_note(note_seed)]) + .build(); + let tx1 = AuthenticatedTransaction::from_inner(tx1); + + let tx = MockProvenTxBuilder::with_account(account, states[1], states[2]) + .unauthenticated_notes(vec![mock_note(note_seed)]) + .build(); + + let mut uut = InflightState::new(BlockNumber::default(), 1); + uut.add_transaction(&tx0.clone()).unwrap(); + uut.add_transaction(&tx1.clone()).unwrap(); + + // Commit the parents, which should remove them from dependency tracking. + uut.commit_block([tx0.id(), tx1.id()]); + + let parents = uut + .add_transaction(&AuthenticatedTransaction::from_inner(tx).with_empty_store_state()) + .unwrap(); + + assert!(parents.is_empty()); + } + + #[test] + fn tx_insertions_and_reversions_cancel_out() { + // Reverting txs should be equivalent to them never being inserted. + // + // We test this by reverting some txs and equating it to the remaining set. + // This is a form of proprty test. + let states = (1u8..=5).map(|x| Digest::from([x, 0, 0, 0])).collect::>(); + let txs = vec![ + MockProvenTxBuilder::with_account(mock_account_id(1), states[0], states[1]), + MockProvenTxBuilder::with_account(mock_account_id(1), states[1], states[2]) + .output_notes(vec![mock_output_note(111), mock_output_note(222)]), + MockProvenTxBuilder::with_account(mock_account_id(2), states[0], states[1]) + .unauthenticated_notes(vec![mock_note(222)]), + MockProvenTxBuilder::with_account(mock_account_id(1), states[2], states[3]), + MockProvenTxBuilder::with_account(mock_account_id(2), states[1], states[2]) + .unauthenticated_notes(vec![mock_note(111)]) + .output_notes(vec![mock_output_note(45)]), + ]; + + let txs = txs + .into_iter() + .map(MockProvenTxBuilder::build) + .map(AuthenticatedTransaction::from_inner) + .collect::>(); + + for i in 0..states.len() { + // Insert all txs and then revert the last `i` of them. + // This should match only inserting the first `N-i` of them. + let mut reverted = InflightState::new(BlockNumber::default(), 1); + for (idx, tx) in txs.iter().enumerate() { + reverted.add_transaction(tx).unwrap_or_else(|err| { + panic!("Inserting tx #{idx} in iteration {i} should succeed: {err}") + }); + } + reverted.revert_transactions( + txs.iter().rev().take(i).rev().map(AuthenticatedTransaction::id).collect(), + ); + + let mut inserted = InflightState::new(BlockNumber::default(), 1); + for (idx, tx) in txs.iter().rev().skip(i).rev().enumerate() { + inserted.add_transaction(tx).unwrap_or_else(|err| { + panic!("Inserting tx #{idx} in iteration {i} should succeed: {err}") + }); + } + + assert_eq!(reverted, inserted, "Iteration {i}"); + } + } + + #[test] + fn pruning_committed_state() { + //! This is a form of property test, where we assert that pruning the first `i` of `N` + //! transactions is equivalent to only inserting the last `N-i` transactions. + let states = (1u8..=5).map(|x| Digest::from([x, 0, 0, 0])).collect::>(); + + // Skipping initial txs means that output notes required for subsequent unauthenticated + // input notes wont' always be present. To work around this, we instead only use + // authenticated input notes. + let txs = vec![ + MockProvenTxBuilder::with_account(mock_account_id(1), states[0], states[1]), + MockProvenTxBuilder::with_account(mock_account_id(1), states[1], states[2]) + .output_notes(vec![mock_output_note(111), mock_output_note(222)]), + MockProvenTxBuilder::with_account(mock_account_id(2), states[0], states[1]) + .nullifiers(vec![mock_note(222).nullifier()]), + MockProvenTxBuilder::with_account(mock_account_id(1), states[2], states[3]), + MockProvenTxBuilder::with_account(mock_account_id(2), states[1], states[2]) + .nullifiers(vec![mock_note(111).nullifier()]) + .output_notes(vec![mock_output_note(45)]), + ]; + + let txs = txs + .into_iter() + .map(MockProvenTxBuilder::build) + .map(AuthenticatedTransaction::from_inner) + .collect::>(); + + for i in 0..states.len() { + // Insert all txs and then commit and prune the first `i` of them. + // + // This should match only inserting the final `N-i` transactions. + // Note: we force all committed state to immedietely be pruned by setting + // it to zero. + let mut committed = InflightState::new(BlockNumber::default(), 0); + for (idx, tx) in txs.iter().enumerate() { + committed.add_transaction(tx).unwrap_or_else(|err| { + panic!("Inserting tx #{idx} in iteration {i} should succeed: {err}") + }); + } + committed.commit_block(txs.iter().take(i).map(AuthenticatedTransaction::id)); + + let mut inserted = InflightState::new(BlockNumber::new(1), 0); + for (idx, tx) in txs.iter().skip(i).enumerate() { + // We need to adjust the height since we are effectively at block "1" now. + let tx = tx.clone().with_authentication_height(1); + inserted.add_transaction(&tx).unwrap_or_else(|err| { + panic!("Inserting tx #{idx} in iteration {i} should succeed: {err}") + }); + } + + assert_eq!(committed, inserted, "Iteration {i}"); + } + } +} diff --git a/crates/block-producer/src/mempool/mod.rs b/crates/block-producer/src/mempool/mod.rs new file mode 100644 index 000000000..00e96e2ad --- /dev/null +++ b/crates/block-producer/src/mempool/mod.rs @@ -0,0 +1,457 @@ +use std::{ + collections::{BTreeMap, BTreeSet}, + fmt::Display, + sync::Arc, +}; + +use batch_graph::BatchGraph; +use inflight_state::InflightState; +use miden_objects::{ + MAX_ACCOUNTS_PER_BATCH, MAX_INPUT_NOTES_PER_BATCH, MAX_OUTPUT_NOTES_PER_BATCH, +}; +use tokio::sync::Mutex; +use transaction_graph::TransactionGraph; + +use crate::{ + batch_builder::batch::TransactionBatch, domain::transaction::AuthenticatedTransaction, + errors::AddTransactionError, SERVER_MAX_BATCHES_PER_BLOCK, SERVER_MAX_TXS_PER_BATCH, +}; + +mod batch_graph; +mod graph; +mod inflight_state; +mod transaction_graph; + +#[derive(Debug, Default, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)] +pub struct BatchJobId(u64); + +impl Display for BatchJobId { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + self.0.fmt(f) + } +} + +impl BatchJobId { + pub fn increment(&mut self) { + self.0 += 1; + } + + #[cfg(test)] + pub fn new(value: u64) -> Self { + Self(value) + } +} + +#[derive(Debug, Default, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)] +pub struct BlockNumber(u32); + +impl Display for BlockNumber { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + self.0.fmt(f) + } +} + +impl BlockNumber { + pub fn new(x: u32) -> Self { + Self(x) + } + + pub fn next(&self) -> Self { + let mut ret = *self; + ret.increment(); + + ret + } + + pub fn prev(&self) -> Option { + self.checked_sub(Self(1)) + } + + pub fn increment(&mut self) { + self.0 += 1; + } + + pub fn checked_sub(&self, rhs: Self) -> Option { + self.0.checked_sub(rhs.0).map(Self) + } +} + +// MEMPOOL BUDGET +// ================================================================================================ + +/// Limits placed on a batch's contents. +#[derive(Copy, Clone, Debug, PartialEq, Eq)] +pub struct BatchBudget { + /// Maximum number of transactions allowed in a batch. + transactions: usize, + /// Maximum number of input notes allowed. + input_notes: usize, + /// Maximum number of output notes allowed. + output_notes: usize, + /// Maximum number of updated accounts. + accounts: usize, +} + +/// Limits placed on a blocks's contents. +#[derive(Copy, Clone, Debug, PartialEq, Eq)] +pub struct BlockBudget { + /// Maximum number of batches allowed in a block. + batches: usize, +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +enum BudgetStatus { + /// The operation remained within the budget. + WithinScope, + /// The operation exceeded the budget. + Exceeded, +} + +impl Default for BatchBudget { + fn default() -> Self { + Self { + transactions: SERVER_MAX_TXS_PER_BATCH, + input_notes: MAX_INPUT_NOTES_PER_BATCH, + output_notes: MAX_OUTPUT_NOTES_PER_BATCH, + accounts: MAX_ACCOUNTS_PER_BATCH, + } + } +} + +impl Default for BlockBudget { + fn default() -> Self { + Self { batches: SERVER_MAX_BATCHES_PER_BLOCK } + } +} + +impl BatchBudget { + /// Attempts to consume the transaction's resources from the budget. + /// + /// Returns [BudgetStatus::Exceeded] if the transaction would exceed the remaining budget, + /// otherwise returns [BudgetStatus::Ok] and subtracts the resources from the budger. + #[must_use] + fn check_then_subtract(&mut self, tx: &AuthenticatedTransaction) -> BudgetStatus { + // This type assertion reminds us to update the account check if we ever support multiple + // account updates per tx. + let _: miden_objects::accounts::AccountId = tx.account_update().account_id(); + const ACCOUNT_UPDATES_PER_TX: usize = 1; + + let output_notes = tx.output_note_count(); + let input_notes = tx.input_note_count(); + + if self.transactions == 0 + || self.accounts < ACCOUNT_UPDATES_PER_TX + || self.input_notes < input_notes + || self.output_notes < output_notes + { + return BudgetStatus::Exceeded; + } + + self.transactions -= 1; + self.accounts -= ACCOUNT_UPDATES_PER_TX; + self.input_notes -= input_notes; + self.output_notes -= output_notes; + + BudgetStatus::WithinScope + } +} + +impl BlockBudget { + /// Attempts to consume the batch's resources from the budget. + /// + /// Returns [BudgetStatus::Exceeded] if the batch would exceed the remaining budget, + /// otherwise returns [BudgetStatus::Ok]. + #[must_use] + fn check_then_subtract(&mut self, _batch: &TransactionBatch) -> BudgetStatus { + if self.batches == 0 { + BudgetStatus::Exceeded + } else { + self.batches -= 1; + BudgetStatus::WithinScope + } + } +} + +// MEMPOOL +// ================================================================================================ + +pub type SharedMempool = Arc>; + +#[derive(Clone, Debug, PartialEq)] +pub struct Mempool { + /// The latest inflight state of each account. + /// + /// Accounts without inflight transactions are not stored. + state: InflightState, + + /// Inflight transactions. + transactions: TransactionGraph, + + /// Inflight batches. + batches: BatchGraph, + + /// The next batches ID. + next_batch_id: BatchJobId, + + /// The current block height of the chain. + chain_tip: BlockNumber, + + /// The current inflight block, if any. + block_in_progress: Option>, + + block_budget: BlockBudget, + batch_budget: BatchBudget, +} + +impl Mempool { + /// Creates a new [SharedMempool] with the provided configuration. + pub fn shared( + chain_tip: BlockNumber, + batch_budget: BatchBudget, + block_budget: BlockBudget, + state_retention: usize, + ) -> SharedMempool { + Arc::new(Mutex::new(Self::new(chain_tip, batch_budget, block_budget, state_retention))) + } + + fn new( + chain_tip: BlockNumber, + batch_budget: BatchBudget, + block_budget: BlockBudget, + state_retention: usize, + ) -> Mempool { + Self { + chain_tip, + batch_budget, + block_budget, + state: InflightState::new(chain_tip, state_retention), + block_in_progress: Default::default(), + transactions: Default::default(), + batches: Default::default(), + next_batch_id: Default::default(), + } + } + + /// Adds a transaction to the mempool. + /// + /// # Returns + /// + /// Returns the current block height. + /// + /// # Errors + /// + /// Returns an error if the transaction's initial conditions don't match the current state. + pub fn add_transaction( + &mut self, + transaction: AuthenticatedTransaction, + ) -> Result { + // Add transaction to inflight state. + let parents = self.state.add_transaction(&transaction)?; + + self.transactions + .insert(transaction, parents) + .expect("Transaction should insert after passing inflight state"); + + Ok(self.chain_tip.0) + } + + /// Returns a set of transactions for the next batch. + /// + /// Transactions are returned in a valid execution ordering. + /// + /// Returns `None` if no transactions are available. + pub fn select_batch(&mut self) -> Option<(BatchJobId, Vec)> { + let (batch, parents) = self.transactions.select_batch(self.batch_budget); + if batch.is_empty() { + return None; + } + let tx_ids = batch.iter().map(AuthenticatedTransaction::id).collect(); + + let batch_id = self.next_batch_id; + self.next_batch_id.increment(); + + self.batches + .insert(batch_id, tx_ids, parents) + .expect("Selected batch should insert"); + + Some((batch_id, batch)) + } + + /// Drops the failed batch and all of its descendants. + /// + /// Transactions are placed back in the queue. + pub fn batch_failed(&mut self, batch: BatchJobId) { + // Batch may already have been removed as part of a parent batches failure. + if !self.batches.contains(&batch) { + return; + } + + let removed_batches = + self.batches.remove_batches([batch].into()).expect("Batch was not present"); + + let transactions = removed_batches.values().flatten().copied().collect(); + + self.transactions + .requeue_transactions(transactions) + .expect("Transaction should requeue"); + + tracing::warn!( + %batch, + descendents=?removed_batches.keys(), + "Batch failed, dropping all inflight descendent batches, impacted transactions are back in queue." + ); + } + + /// Marks a batch as proven if it exists. + pub fn batch_proved(&mut self, batch_id: BatchJobId, batch: TransactionBatch) { + // Batch may have been removed as part of a parent batches failure. + if !self.batches.contains(&batch_id) { + return; + } + + self.batches.submit_proof(batch_id, batch).expect("Batch proof should submit"); + } + + /// Select batches for the next block. + /// + /// May return an empty set if no batches are ready. + /// + /// # Panics + /// + /// Panics if there is already a block in flight. + pub fn select_block(&mut self) -> (BlockNumber, BTreeMap) { + assert!(self.block_in_progress.is_none(), "Cannot have two blocks inflight."); + + let batches = self.batches.select_block(self.block_budget); + self.block_in_progress = Some(batches.keys().cloned().collect()); + + (self.chain_tip.next(), batches) + } + + /// Notify the pool that the block was successfully completed. + /// + /// # Panics + /// + /// Panics if blocks are completed out-of-order or if there is no block in flight. + pub fn block_committed(&mut self, block_number: BlockNumber) { + assert_eq!(block_number, self.chain_tip.next(), "Blocks must be submitted sequentially"); + + // Remove committed batches and transactions from graphs. + let batches = self.block_in_progress.take().expect("No block in progress to commit"); + let transactions = self.batches.prune_committed(batches).expect("Batches failed to commit"); + self.transactions + .commit_transactions(&transactions) + .expect("Transaction graph malformed"); + + // Inform inflight state about committed data. + self.state.commit_block(transactions); + + self.chain_tip.increment(); + } + + /// Block and all of its contents and dependents are purged from the mempool. + /// + /// # Panics + /// + /// Panics if there is no block in flight or if the block number does not match the current + /// inflight block. + pub fn block_failed(&mut self, block_number: BlockNumber) { + assert_eq!(block_number, self.chain_tip.next(), "Blocks must be submitted sequentially"); + + let batches = self.block_in_progress.take().expect("No block in progress to be failed"); + + // Remove all transactions from the graphs. + let purged = self.batches.remove_batches(batches).expect("Batch should be removed"); + let transactions = purged.into_values().flatten().collect(); + + let transactions = self + .transactions + .remove_transactions(transactions) + .expect("Failed transactions should be removed"); + + // Rollback state. + self.state.revert_transactions(transactions); + } +} + +#[cfg(test)] +mod tests { + use pretty_assertions::assert_eq; + + use super::*; + use crate::test_utils::MockProvenTxBuilder; + + impl Mempool { + fn for_tests() -> Self { + Self::new(BlockNumber::new(0), Default::default(), Default::default(), 5) + } + } + + // BATCH REVERSION TESTS + // ================================================================================================ + + #[test] + fn children_of_reverted_batches_are_ignored() { + // Batches are proved concurrently. This makes it possible for a child job to complete after + // the parent has been reverted (and therefore reverting the child job). Such a child job + // should be ignored. + let txs = MockProvenTxBuilder::sequential(); + + let mut uut = Mempool::for_tests(); + uut.add_transaction(txs[0].clone()).unwrap(); + let (parent_batch, batch_txs) = uut.select_batch().unwrap(); + assert_eq!(batch_txs, vec![txs[0].clone()]); + + uut.add_transaction(txs[1].clone()).unwrap(); + let (child_batch_a, batch_txs) = uut.select_batch().unwrap(); + assert_eq!(batch_txs, vec![txs[1].clone()]); + + uut.add_transaction(txs[2].clone()).unwrap(); + let (child_batch_b, batch_txs) = uut.select_batch().unwrap(); + assert_eq!(batch_txs, vec![txs[2].clone()]); + + // Child batch jobs are now dangling. + uut.batch_failed(parent_batch); + let reference = uut.clone(); + + // Success or failure of the child job should effectively do nothing. + uut.batch_failed(child_batch_a); + assert_eq!(uut, reference); + + let proof = TransactionBatch::new( + vec![txs[2].raw_proven_transaction().clone()], + Default::default(), + ) + .unwrap(); + uut.batch_proved(child_batch_b, proof); + assert_eq!(uut, reference); + } + + #[test] + fn reverted_batch_transactions_are_requeued() { + let txs = MockProvenTxBuilder::sequential(); + + let mut uut = Mempool::for_tests(); + uut.add_transaction(txs[0].clone()).unwrap(); + uut.select_batch().unwrap(); + + uut.add_transaction(txs[1].clone()).unwrap(); + let (failed_batch, _) = uut.select_batch().unwrap(); + + uut.add_transaction(txs[2].clone()).unwrap(); + uut.select_batch().unwrap(); + + // Middle batch failed, so it and its child transaction should be re-entered into the queue. + uut.batch_failed(failed_batch); + + let mut reference = Mempool::for_tests(); + reference.add_transaction(txs[0].clone()).unwrap(); + reference.select_batch().unwrap(); + reference.add_transaction(txs[1].clone()).unwrap(); + reference.add_transaction(txs[2].clone()).unwrap(); + reference.next_batch_id.increment(); + reference.next_batch_id.increment(); + + assert_eq!(uut, reference); + } +} diff --git a/crates/block-producer/src/mempool/transaction_graph.rs b/crates/block-producer/src/mempool/transaction_graph.rs new file mode 100644 index 000000000..5709bfdeb --- /dev/null +++ b/crates/block-producer/src/mempool/transaction_graph.rs @@ -0,0 +1,200 @@ +use std::collections::BTreeSet; + +use miden_objects::transaction::TransactionId; + +use super::{ + graph::{DependencyGraph, GraphError}, + BatchBudget, BudgetStatus, +}; +use crate::domain::transaction::AuthenticatedTransaction; + +// TRANSACTION GRAPH +// ================================================================================================ + +/// Tracks the dependency graph and status of transactions. +/// +/// It handles insertion of transactions, locking them inqueue until they are ready to be processed. +/// A transaction is considered eligible for batch selection once all of its parents have also been +/// selected. Essentially this graph ensures that transaction dependency ordering is adhered to. +/// +/// Transactions from failed batches may be [re-queued](Self::requeue_transactions) for batch +/// selection. Successful batches will eventually form part of a committed block at which point the +/// transaction data may be safely [pruned](Self::commit_transactions). +/// +/// Transactions may also be outright [purged](Self::remove_transactions) from the graph. This is +/// useful for transactions which may have become invalid due to external considerations e.g. +/// expired transactions. +/// +/// # Transaction lifecycle: +/// ```text +/// │ +/// insert│ +/// ┌─────▼─────┐ +/// ┌─────────► ┼────┐ +/// │ └─────┬─────┘ │ +/// │ │ │ +/// requeue_transactions│ select_batch│ │ +/// │ ┌─────▼─────┐ │ +/// └─────────┼ in batch ┼────┤ +/// └─────┬─────┘ │ +/// │ │ +/// commit_transactions│ │remove_transactions +/// ┌─────▼─────┐ │ +/// │ ◄────┘ +/// └───────────┘ +/// ``` +#[derive(Clone, Debug, Default, PartialEq)] +pub struct TransactionGraph { + inner: DependencyGraph, +} + +impl TransactionGraph { + /// Inserts a new transaction node, with edges to the given parent nodes. + /// + /// # Errors + /// + /// Follows the error conditions of [DependencyGraph::insert_pending]. + pub fn insert( + &mut self, + transaction: AuthenticatedTransaction, + parents: BTreeSet, + ) -> Result<(), GraphError> { + self.inner.insert_pending(transaction.id(), parents)?; + self.inner.promote_pending(transaction.id(), transaction) + } + + /// Selects a set transactions for the next batch such that they adhere to the given budget. + /// + /// Also returns the transactions' parents. + /// + /// Internally these transactions are considered processed and cannot be emitted in future + /// batches. + /// + /// Note: this may emit empty batches. + /// + /// See also: + /// - [Self::requeue_transactions] + /// - [Self::commit_transactions] + pub fn select_batch( + &mut self, + mut budget: BatchBudget, + ) -> (Vec, BTreeSet) { + // This strategy just selects arbitrary roots for now. This is valid but not very + // interesting or efficient. + let mut batch = Vec::with_capacity(budget.transactions); + let mut parents = BTreeSet::new(); + + while let Some(root) = self.inner.roots().first().cloned() { + // SAFETY: Since it was a root batch, it must definitely have a processed batch + // associated with it. + let tx = self.inner.get(&root).unwrap().clone(); + + // Adhere to batch budget. + if budget.check_then_subtract(&tx) == BudgetStatus::Exceeded { + break; + } + + // SAFETY: This is definitely a root since we just selected it from the set of roots. + self.inner.process_root(root).unwrap(); + let tx_parents = self.inner.parents(&root).unwrap(); + + batch.push(tx); + parents.extend(tx_parents); + } + + (batch, parents) + } + + /// Marks the given transactions as being back in queue. + /// + /// # Errors + /// + /// Follows the error conditions of [DependencyGraph::revert_subgraphs]. + pub fn requeue_transactions( + &mut self, + transactions: BTreeSet, + ) -> Result<(), GraphError> { + self.inner.revert_subgraphs(transactions) + } + + /// Removes the provided transactions from the graph. + /// + /// # Errors + /// + /// Follows the error conditions of [DependencyGraph::prune_processed]. + pub fn commit_transactions( + &mut self, + tx_ids: &[TransactionId], + ) -> Result<(), GraphError> { + // TODO: revisit this api. + let tx_ids = tx_ids.iter().cloned().collect(); + self.inner.prune_processed(tx_ids)?; + Ok(()) + } + + /// Removes the transactions and all their descendants from the graph. + /// + /// Returns the removed transactions. + /// + /// # Errors + /// + /// Follows the error conditions of [DependencyGraph::purge_subgraphs]. + pub fn remove_transactions( + &mut self, + transactions: Vec, + ) -> Result, GraphError> { + // TODO: revisit this api. + let transactions = transactions.into_iter().collect(); + self.inner.purge_subgraphs(transactions) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::test_utils::mock_proven_tx; + + // BATCH SELECTION TESTS + // ================================================================================================ + + #[test] + fn select_batch_respects_transaction_limit() { + // These transactions are independent and just used to ensure we have more available + // transactions than we want in the batch. + let txs = (0..10) + .map(|i| mock_proven_tx(i, vec![], vec![])) + .map(AuthenticatedTransaction::from_inner); + + let mut uut = TransactionGraph::default(); + for tx in txs { + uut.insert(tx, [].into()).unwrap(); + } + + let (batch, parents) = + uut.select_batch(BatchBudget { transactions: 0, ..Default::default() }); + assert!(batch.is_empty()); + assert!(parents.is_empty()); + + let (batch, parents) = + uut.select_batch(BatchBudget { transactions: 3, ..Default::default() }); + assert_eq!(batch.len(), 3); + assert!(parents.is_empty()); + + let (batch, parents) = + uut.select_batch(BatchBudget { transactions: 4, ..Default::default() }); + assert_eq!(batch.len(), 4); + assert!(parents.is_empty()); + + // We expect this to be partially filled. + let (batch, parents) = + uut.select_batch(BatchBudget { transactions: 4, ..Default::default() }); + assert_eq!(batch.len(), 3); + assert!(parents.is_empty()); + + // And thereafter empty. + let (batch, parents) = + uut.select_batch(BatchBudget { transactions: 100, ..Default::default() }); + assert!(batch.is_empty()); + assert!(parents.is_empty()); + } +} diff --git a/crates/block-producer/src/server/mod.rs b/crates/block-producer/src/server/mod.rs index e81471804..08e72137e 100644 --- a/crates/block-producer/src/server/mod.rs +++ b/crates/block-producer/src/server/mod.rs @@ -1,32 +1,30 @@ -use std::{net::ToSocketAddrs, sync::Arc}; +use std::{collections::HashMap, net::ToSocketAddrs}; -use miden_node_proto::generated::{block_producer::api_server, store::api_client as store_client}; -use miden_node_utils::errors::ApiError; -use tokio::net::TcpListener; +use miden_node_proto::generated::{ + block_producer::api_server, requests::SubmitProvenTransactionRequest, + responses::SubmitProvenTransactionResponse, store::api_client as store_client, +}; +use miden_node_utils::{ + errors::ApiError, + formatting::{format_input_notes, format_output_notes}, +}; +use miden_objects::{transaction::ProvenTransaction, utils::serde::Deserializable}; +use tokio::{net::TcpListener, sync::Mutex}; use tokio_stream::wrappers::TcpListenerStream; -use tracing::info; +use tonic::Status; +use tracing::{debug, info, instrument}; use crate::{ - batch_builder::{DefaultBatchBuilder, DefaultBatchBuilderOptions}, - block_builder::DefaultBlockBuilder, + batch_builder::BatchBuilder, + block_builder::BlockBuilder, config::BlockProducerConfig, - state_view::DefaultStateView, - store::DefaultStore, - txqueue::{TransactionQueue, TransactionQueueOptions}, - COMPONENT, SERVER_BATCH_SIZE, SERVER_BLOCK_FREQUENCY, SERVER_BUILD_BATCH_FREQUENCY, - SERVER_MAX_BATCHES_PER_BLOCK, + domain::transaction::AuthenticatedTransaction, + errors::{AddTransactionError, BlockProducerError, VerifyTxError}, + mempool::{BatchBudget, BlockBudget, BlockNumber, Mempool, SharedMempool}, + store::{DefaultStore, Store}, + COMPONENT, SERVER_MEMPOOL_STATE_RETENTION, }; -pub mod api; - -type Api = api::BlockProducerApi< - DefaultBatchBuilder< - DefaultStore, - DefaultBlockBuilder>, - >, - DefaultStateView, ->; - /// Represents an initialized block-producer component where the RPC connection is open, /// but not yet actively responding to requests. /// @@ -34,8 +32,14 @@ type Api = api::BlockProducerApi< /// components to the store without resorting to sleeps or other mechanisms to spawn dependent /// components. pub struct BlockProducer { - api_service: api_server::ApiServer, - listener: TcpListener, + batch_builder: BatchBuilder, + block_builder: BlockBuilder, + batch_budget: BatchBudget, + block_budget: BlockBudget, + state_retention: usize, + rpc_listener: TcpListener, + store: DefaultStore, + chain_tip: BlockNumber, } impl BlockProducer { @@ -45,63 +49,198 @@ impl BlockProducer { pub async fn init(config: BlockProducerConfig) -> Result { info!(target: COMPONENT, %config, "Initializing server"); - let store = Arc::new(DefaultStore::new( + let store = DefaultStore::new( store_client::ApiClient::connect(config.store_url.to_string()) .await .map_err(|err| ApiError::DatabaseConnectionFailed(err.to_string()))?, - )); - let state_view = - Arc::new(DefaultStateView::new(Arc::clone(&store), config.verify_tx_proofs)); - - let block_builder = DefaultBlockBuilder::new(Arc::clone(&store), Arc::clone(&state_view)); - let batch_builder_options = DefaultBatchBuilderOptions { - block_frequency: SERVER_BLOCK_FREQUENCY, - max_batches_per_block: SERVER_MAX_BATCHES_PER_BLOCK, - }; - let batch_builder = Arc::new(DefaultBatchBuilder::new( - Arc::clone(&store), - Arc::new(block_builder), - batch_builder_options, - )); - - let transaction_queue_options = TransactionQueueOptions { - build_batch_frequency: SERVER_BUILD_BATCH_FREQUENCY, - batch_size: SERVER_BATCH_SIZE, - }; - let queue = Arc::new(TransactionQueue::new( - state_view, - Arc::clone(&batch_builder), - transaction_queue_options, - )); - - let api_service = - api_server::ApiServer::new(api::BlockProducerApi::new(Arc::clone(&queue))); + ); - tokio::spawn(async move { queue.run().await }); - tokio::spawn(async move { batch_builder.run().await }); + let latest_header = store + .latest_header() + .await + .map_err(|err| ApiError::DatabaseConnectionFailed(err.to_string()))?; + let chain_tip = BlockNumber::new(latest_header.block_num()); - let addr = config + let rpc_listener = config .endpoint .to_socket_addrs() .map_err(ApiError::EndpointToSocketFailed)? .next() - .ok_or_else(|| ApiError::AddressResolutionFailed(config.endpoint.to_string()))?; - - let listener = TcpListener::bind(addr).await?; + .ok_or_else(|| ApiError::AddressResolutionFailed(config.endpoint.to_string())) + .map(TcpListener::bind)? + .await?; info!(target: COMPONENT, "Server initialized"); - Ok(Self { api_service, listener }) + Ok(Self { + batch_builder: Default::default(), + block_builder: BlockBuilder::new(store.clone()), + batch_budget: Default::default(), + block_budget: Default::default(), + state_retention: SERVER_MEMPOOL_STATE_RETENTION, + store, + rpc_listener, + chain_tip, + }) + } + + pub async fn serve(self) -> Result<(), BlockProducerError> { + let Self { + batch_builder, + block_builder, + batch_budget, + block_budget, + state_retention, + rpc_listener, + store, + chain_tip, + } = self; + + let mempool = Mempool::shared(chain_tip, batch_budget, block_budget, state_retention); + + // Spawn rpc server and batch and block provers. + // + // These communicate indirectly via a shared mempool. + // + // These should run forever, so we combine them into a joinset so that if + // any complete or fail, we can shutdown the rest (somewhat) gracefully. + let mut tasks = tokio::task::JoinSet::new(); + + let batch_builder_id = tasks + .spawn({ + let mempool = mempool.clone(); + async { + batch_builder.run(mempool).await; + Ok(()) + } + }) + .id(); + let block_builder_id = tasks + .spawn({ + let mempool = mempool.clone(); + async { + block_builder.run(mempool).await; + Ok(()) + } + }) + .id(); + let rpc_id = + tasks + .spawn(async move { + BlockProducerRpcServer::new(mempool, store).serve(rpc_listener).await + }) + .id(); + + let task_ids = HashMap::from([ + (batch_builder_id, "batch-builder"), + (block_builder_id, "block-builder"), + (rpc_id, "rpc"), + ]); + + // Wait for any task to end. They should run indefinitely, so this is an unexpected result. + // + // SAFETY: The JoinSet is definitely not empty. + let task_result = tasks.join_next_with_id().await.unwrap(); + + let task_id = match &task_result { + Ok((id, _)) => *id, + Err(err) => err.id(), + }; + let task = task_ids.get(&task_id).unwrap_or(&"unknown"); + + // We could abort the other tasks here, but not much point as we're probably crashing the + // node. + + task_result + .map_err(|source| BlockProducerError::JoinError { task, source }) + .map(|(_, result)| match result { + Ok(_) => Err(BlockProducerError::TaskFailedSuccesfully { task }), + Err(source) => Err(BlockProducerError::TonicTransportError { task, source }), + }) + .and_then(|x| x) } +} - /// Serves the block-producers's RPC API. +/// Serves the block producer's RPC [api](api_server::Api). +struct BlockProducerRpcServer { + /// The mutex effectively rate limits incoming transactions into the mempool by forcing them + /// through a queue. /// - /// Note: this blocks until the server dies. - pub async fn serve(self) -> Result<(), ApiError> { + /// This gives mempool users such as the batch and block builders equal footing with __all__ + /// incoming transactions combined. Without this incoming transactions would greatly restrict + /// the block-producers usage of the mempool. + mempool: Mutex, + + store: DefaultStore, +} + +#[tonic::async_trait] +impl api_server::Api for BlockProducerRpcServer { + async fn submit_proven_transaction( + &self, + request: tonic::Request, + ) -> Result, Status> { + self.submit_proven_transaction(request.into_inner()) + .await + .map(tonic::Response::new) + // This Status::from mapping takes care of hiding internal errors. + .map_err(Into::into) + } +} + +impl BlockProducerRpcServer { + pub fn new(mempool: SharedMempool, store: DefaultStore) -> Self { + Self { mempool: Mutex::new(mempool), store } + } + + async fn serve(self, listener: TcpListener) -> Result<(), tonic::transport::Error> { tonic::transport::Server::builder() - .add_service(self.api_service) - .serve_with_incoming(TcpListenerStream::new(self.listener)) + .add_service(api_server::ApiServer::new(self)) + .serve_with_incoming(TcpListenerStream::new(listener)) + .await + } + + #[instrument( + target = "miden-block-producer", + name = "block_producer:submit_proven_transaction", + skip_all, + err + )] + async fn submit_proven_transaction( + &self, + request: SubmitProvenTransactionRequest, + ) -> Result { + debug!(target: COMPONENT, ?request); + + let tx = ProvenTransaction::read_from_bytes(&request.transaction) + .map_err(|err| AddTransactionError::DeserializationError(err.to_string()))?; + + let tx_id = tx.id(); + + info!( + target: COMPONENT, + tx_id = %tx_id.to_hex(), + account_id = %tx.account_id().to_hex(), + initial_account_hash = %tx.account_update().init_state_hash(), + final_account_hash = %tx.account_update().final_state_hash(), + input_notes = %format_input_notes(tx.input_notes()), + output_notes = %format_output_notes(tx.output_notes()), + block_ref = %tx.block_ref(), + "Deserialized transaction" + ); + debug!(target: COMPONENT, proof = ?tx.proof()); + + let inputs = self.store.get_tx_inputs(&tx).await.map_err(VerifyTxError::from)?; + + // SAFETY: we assume that the rpc component has verified the transaction proof already. + let tx = AuthenticatedTransaction::new(tx, inputs)?; + + self.mempool + .lock() + .await + .lock() .await - .map_err(ApiError::ApiServeFailed) + .add_transaction(tx) + .map(|block_height| SubmitProvenTransactionResponse { block_height }) } } diff --git a/crates/block-producer/src/state_view/account_state.rs b/crates/block-producer/src/state_view/account_state.rs deleted file mode 100644 index b3c538cc9..000000000 --- a/crates/block-producer/src/state_view/account_state.rs +++ /dev/null @@ -1,123 +0,0 @@ -use std::collections::{BTreeMap, VecDeque}; - -use miden_objects::{accounts::AccountId, Digest}; - -use crate::errors::VerifyTxError; - -/// Tracks the list of inflight account updates. -/// -/// New transactions can be registered with [Self::verify_and_add]. States that are no longer -/// considered inflight (e.g. due to being applied) may be removed using [Self::remove]. -/// -/// Both functions perform safety checks to ensure the states match what we expect. -#[derive(Debug, Default)] -pub struct InflightAccountStates(BTreeMap>); - -impl InflightAccountStates { - /// Verifies that the provided initial state matches the latest inflight account state (if any). - pub fn verify_update(&self, id: AccountId, init_state: Digest) -> Result<(), VerifyTxError> { - if let Some(latest) = self.get(id) { - if latest != &init_state { - return Err(VerifyTxError::IncorrectAccountInitialHash { - tx_initial_account_hash: init_state, - current_account_hash: Some(*latest), - }); - } - } - - Ok(()) - } - - /// [Verifies](Self::verify_update) the update and appends it to the list of inflight account - /// updates. - pub fn verify_and_add( - &mut self, - id: AccountId, - init_state: Digest, - final_state: Digest, - ) -> Result<(), VerifyTxError> { - let states = self.0.entry(id).or_default(); - - // Ensure the latest state matches the new inital state. - if let Some(latest) = states.back() { - if latest != &init_state { - return Err(VerifyTxError::IncorrectAccountInitialHash { - tx_initial_account_hash: init_state, - current_account_hash: Some(*latest), - }); - } - } - - states.push_back(final_state); - - Ok(()) - } - - /// Remove state transitions from earliest up until a state that matches the given - /// final state. Returns an error if no match was found. - /// - /// In other words, if an account has state transitions `a->b->c->d` then calling `remove(b)` - /// would leave behind `c->d`. - pub fn remove(&mut self, id: AccountId, final_state: Digest) -> Result<(), ()> { - let states = self.0.get_mut(&id).ok_or(())?; - let Some(idx) = states.iter().position(|x| x == &final_state) else { - return Err(()); - }; - - states.drain(..=idx); - // Prevent infinite growth by removing entries which have no - // inflight state changes. - if states.is_empty() { - self.0.remove(&id); - } - - Ok(()) - } - - /// The latest value of the given account. - pub fn get(&self, id: AccountId) -> Option<&Digest> { - self.0.get(&id).and_then(|states| states.back()) - } - - /// Number of accounts with inflight transactions. - #[cfg(test)] - pub fn contains(&self, id: AccountId) -> bool { - self.0.contains_key(&id) - } - - /// Number of accounts with inflight transactions. - #[cfg(test)] - pub fn len(&self) -> usize { - self.0.len() - } -} - -#[cfg(test)] -mod tests { - use miden_air::Felt; - use miden_objects::accounts::AccountId; - - use super::*; - - #[test] - fn account_states_must_chain() { - let account: AccountId = AccountId::new_unchecked(Felt::new(10)); - const ONE: Digest = Digest::new([Felt::new(1), Felt::new(1), Felt::new(1), Felt::new(1)]); - const TWO: Digest = Digest::new([Felt::new(2), Felt::new(2), Felt::new(2), Felt::new(2)]); - const THREE: Digest = Digest::new([Felt::new(3), Felt::new(3), Felt::new(3), Felt::new(3)]); - let mut uut = InflightAccountStates::default(); - - assert!(uut.verify_and_add(account, Digest::default(), ONE).is_ok()); - assert!(uut.verify_and_add(account, ONE, TWO).is_ok()); - assert!(uut.verify_and_add(account, TWO, THREE).is_ok()); - assert!(uut.verify_and_add(account, TWO, ONE).is_err()); - - assert!(uut.remove(account, TWO).is_ok()); - // Repeat removal should fail since this is no longer present. - assert!(uut.remove(account, TWO).is_err()); - assert!(uut.remove(account, THREE).is_ok()); - - // Check that cleanup is performed. - assert!(uut.0.is_empty()); - } -} diff --git a/crates/block-producer/src/state_view/tests/mod.rs b/crates/block-producer/src/state_view/tests/mod.rs deleted file mode 100644 index 4fbf87c60..000000000 --- a/crates/block-producer/src/state_view/tests/mod.rs +++ /dev/null @@ -1,42 +0,0 @@ -use miden_objects::{Hasher, EMPTY_WORD, ZERO}; - -use super::*; -use crate::test_utils::{MockPrivateAccount, MockProvenTxBuilder}; - -mod apply_block; -mod verify_tx; - -// HELPERS -// ------------------------------------------------------------------------------------------------- - -pub fn nullifier_by_index(index: u32) -> Nullifier { - Nullifier::new( - Hasher::hash(&index.to_be_bytes()), - Hasher::hash( - &[index.to_be_bytes(), index.to_be_bytes()] - .into_iter() - .flatten() - .collect::>(), - ), - EMPTY_WORD.into(), - [ZERO, ZERO, ZERO, index.into()], - ) -} - -/// Returns `num` transactions, and the corresponding account they modify. -/// The transactions each consume a single different note -pub fn get_txs_and_accounts( - starting_account_index: u32, - num: u32, -) -> impl Iterator { - (0..num).map(move |index| { - let account = MockPrivateAccount::from(starting_account_index + index); - let nullifier_starting_index = (starting_account_index + index) as u64; - let tx = - MockProvenTxBuilder::with_account(account.id, account.states[0], account.states[1]) - .nullifiers_range(nullifier_starting_index..(nullifier_starting_index + 1)) - .build(); - - (tx, account) - }) -} diff --git a/crates/block-producer/src/store/mod.rs b/crates/block-producer/src/store/mod.rs index 57e4f88f7..a6af90e0f 100644 --- a/crates/block-producer/src/store/mod.rs +++ b/crates/block-producer/src/store/mod.rs @@ -7,14 +7,10 @@ use std::{ use async_trait::async_trait; use itertools::Itertools; use miden_node_proto::{ - domain::notes::NoteAuthenticationInfo, errors::{ConversionError, MissingFieldHelper}, generated::{ digest, - requests::{ - ApplyBlockRequest, GetBlockInputsRequest, GetNoteAuthenticationInfoRequest, - GetTransactionInputsRequest, - }, + requests::{ApplyBlockRequest, GetBlockInputsRequest, GetTransactionInputsRequest}, responses::{GetTransactionInputsResponse, NullifierTransactionInputRecord}, store::api_client as store_client, }, @@ -25,15 +21,16 @@ use miden_objects::{ accounts::AccountId, block::Block, notes::{NoteId, Nullifier}, + transaction::ProvenTransaction, utils::Serializable, - Digest, + BlockHeader, Digest, }; use miden_processor::crypto::RpoDigest; use tonic::transport::Channel; use tracing::{debug, info, instrument}; pub use crate::errors::{ApplyBlockError, BlockInputsError, TxInputsError}; -use crate::{block::BlockInputs, errors::NotePathsError, ProvenTransaction, COMPONENT}; +use crate::{block::BlockInputs, COMPONENT}; // STORE TRAIT // ================================================================================================ @@ -53,15 +50,6 @@ pub trait Store: ApplyBlock { produced_nullifiers: impl Iterator + Send, notes: impl Iterator + Send, ) -> Result; - - /// Returns note authentication information for the set of specified notes. - /// - /// If authentication info for a note does not exist in the store, the note is omitted - /// from the returned set of notes. - async fn get_note_authentication_info( - &self, - notes: impl Iterator + Send, - ) -> Result; } #[async_trait] @@ -154,6 +142,7 @@ impl TryFrom for TransactionInputs { // DEFAULT STORE IMPLEMENTATION // ================================================================================================ +#[derive(Clone)] pub struct DefaultStore { store: store_client::ApiClient, } @@ -163,6 +152,20 @@ impl DefaultStore { pub fn new(store: store_client::ApiClient) -> Self { Self { store } } + + /// Returns the latest block's header from the store. + pub async fn latest_header(&self) -> Result { + // TODO: Consolidate the error types returned by the store (and its trait). + let response = self + .store + .clone() + .get_block_header_by_number(tonic::Request::new(Default::default())) + .await + .map_err(|err| err.to_string())? + .into_inner(); + + BlockHeader::try_from(response.block_header.unwrap()).map_err(|err| err.to_string()) + } } #[async_trait] @@ -249,28 +252,4 @@ impl Store for DefaultStore { Ok(store_response.try_into()?) } - - async fn get_note_authentication_info( - &self, - notes: impl Iterator + Send, - ) -> Result { - let request = tonic::Request::new(GetNoteAuthenticationInfoRequest { - note_ids: notes.map(digest::Digest::from).collect(), - }); - - let store_response = self - .store - .clone() - .get_note_authentication_info(request) - .await - .map_err(|err| NotePathsError::GrpcClientError(err.message().to_string()))? - .into_inner(); - - let note_authentication_info = store_response - .proofs - .ok_or(GetTransactionInputsResponse::missing_field("proofs"))? - .try_into()?; - - Ok(note_authentication_info) - } } diff --git a/crates/block-producer/src/test_utils/batch.rs b/crates/block-producer/src/test_utils/batch.rs index 889fc8829..2717a3a79 100644 --- a/crates/block-producer/src/test_utils/batch.rs +++ b/crates/block-producer/src/test_utils/batch.rs @@ -1,4 +1,4 @@ -use crate::{test_utils::MockProvenTxBuilder, TransactionBatch}; +use crate::{batch_builder::TransactionBatch, test_utils::MockProvenTxBuilder}; pub trait TransactionBatchConstructor { /// Returns a `TransactionBatch` with `notes_per_tx.len()` transactions, where the i'th diff --git a/crates/block-producer/src/test_utils/block.rs b/crates/block-producer/src/test_utils/block.rs index 71d15f7e6..35abbe6a0 100644 --- a/crates/block-producer/src/test_utils/block.rs +++ b/crates/block-producer/src/test_utils/block.rs @@ -10,10 +10,10 @@ use miden_objects::{ use super::MockStoreSuccess; use crate::{ + batch_builder::TransactionBatch, block::BlockInputs, block_builder::prover::{block_witness::BlockWitness, BlockProver}, store::Store, - TransactionBatch, }; /// Constructs the block we expect to be built given the store state, and a set of transaction diff --git a/crates/block-producer/src/test_utils/mod.rs b/crates/block-producer/src/test_utils/mod.rs index 07a7a4b52..e2385e747 100644 --- a/crates/block-producer/src/test_utils/mod.rs +++ b/crates/block-producer/src/test_utils/mod.rs @@ -1,7 +1,11 @@ use std::sync::Arc; -use miden_objects::{accounts::AccountId, Digest}; -use tokio::sync::RwLock; +use miden_objects::{ + accounts::AccountId, + crypto::rand::{FeltRng, RpoRandomCoin}, + transaction::TransactionId, + Digest, +}; mod proven_tx; @@ -20,3 +24,31 @@ pub mod block; pub mod batch; pub mod note; + +/// Generates random values for tests. +/// +/// It prints its seed on construction which allows us to reproduce +/// test failures. +pub struct Random(RpoRandomCoin); + +impl Random { + /// Creates a [Random] with a random seed. This seed is logged + /// so that it is known for test failures. + pub fn with_random_seed() -> Self { + let seed: [u32; 4] = rand::random(); + + println!("Random::with_random_seed: {seed:?}"); + + let seed = Digest::from(seed).into(); + + Self(RpoRandomCoin::new(seed)) + } + + pub fn draw_tx_id(&mut self) -> TransactionId { + self.0.draw_word().into() + } + + pub fn draw_digest(&mut self) -> Digest { + self.0.draw_word().into() + } +} diff --git a/crates/block-producer/src/test_utils/proven_tx.rs b/crates/block-producer/src/test_utils/proven_tx.rs index cf804d8d7..271466341 100644 --- a/crates/block-producer/src/test_utils/proven_tx.rs +++ b/crates/block-producer/src/test_utils/proven_tx.rs @@ -1,5 +1,6 @@ use std::ops::Range; +use itertools::Itertools; use miden_air::HashFunction; use miden_objects::{ accounts::AccountId, @@ -8,9 +9,11 @@ use miden_objects::{ vm::ExecutionProof, Digest, Felt, Hasher, ONE, }; +use rand::Rng; use winterfell::Proof; use super::MockPrivateAccount; +use crate::domain::transaction::AuthenticatedTransaction; pub struct MockProvenTxBuilder { account_id: AccountId, @@ -29,6 +32,25 @@ impl MockProvenTxBuilder { Self::with_account(mock_account.id, mock_account.states[0], mock_account.states[1]) } + /// Generates 3 random, sequential transactions acting on the same account. + pub fn sequential() -> [AuthenticatedTransaction; 3] { + let mut rng = rand::thread_rng(); + let mock_account: MockPrivateAccount<4> = rng.gen::().into(); + + (0..3) + .map(|i| { + Self::with_account( + mock_account.id, + mock_account.states[i], + mock_account.states[i + 1], + ) + }) + .map(|tx| AuthenticatedTransaction::from_inner(tx.build())) + .collect_vec() + .try_into() + .expect("Sizes should match") + } + pub fn with_account( account_id: AccountId, initial_account_hash: Digest, diff --git a/crates/block-producer/src/test_utils/store.rs b/crates/block-producer/src/test_utils/store.rs index f1fb7fef4..c04ff3923 100644 --- a/crates/block-producer/src/test_utils/store.rs +++ b/crates/block-producer/src/test_utils/store.rs @@ -10,21 +10,21 @@ use miden_objects::{ block::{Block, NoteBatch}, crypto::merkle::{Mmr, SimpleSmt, Smt, ValuePath}, notes::{NoteId, NoteInclusionProof, Nullifier}, + transaction::ProvenTransaction, BlockHeader, ACCOUNT_TREE_DEPTH, EMPTY_WORD, ZERO, }; +use tokio::sync::RwLock; use super::*; use crate::{ batch_builder::TransactionBatch, block::{AccountWitness, BlockInputs}, - errors::NotePathsError, store::{ ApplyBlock, ApplyBlockError, BlockInputsError, Store, TransactionInputs, TxInputsError, }, test_utils::block::{ block_output_notes, flatten_output_notes, note_created_smt_from_note_batches, }, - ProvenTransaction, }; /// Builds a [`MockStoreSuccess`] @@ -351,39 +351,6 @@ impl Store for MockStoreSuccess { found_unauthenticated_notes, }) } - - async fn get_note_authentication_info( - &self, - notes: impl Iterator + Send, - ) -> Result { - let locked_notes = self.notes.read().await; - let locked_headers = self.block_headers.read().await; - let locked_chain_mmr = self.chain_mmr.read().await; - - let note_proofs = notes - .filter_map(|id| locked_notes.get(id).map(|proof| (*id, proof.clone()))) - .collect::>(); - - let latest_header = - *locked_headers.iter().max_by_key(|(block_num, _)| *block_num).unwrap().1; - let chain_length = latest_header.block_num(); - - let block_proofs = note_proofs - .values() - .map(|note_proof| { - let block_num = note_proof.location().block_num(); - let block_header = *locked_headers.get(&block_num).unwrap(); - let mmr_path = locked_chain_mmr - .open_at(block_num as usize, latest_header.block_num() as usize) - .unwrap() - .merkle_path; - - BlockInclusionProof { block_header, mmr_path, chain_length } - }) - .collect(); - - Ok(NoteAuthenticationInfo { block_proofs, note_proofs }) - } } #[derive(Default)] @@ -413,11 +380,4 @@ impl Store for MockStoreFailure { ) -> Result { Err(BlockInputsError::GrpcClientError(String::new())) } - - async fn get_note_authentication_info( - &self, - _notes: impl Iterator + Send, - ) -> Result { - Err(NotePathsError::GrpcClientError(String::new())) - } } diff --git a/crates/block-producer/src/txqueue/mod.rs b/crates/block-producer/src/txqueue/mod.rs deleted file mode 100644 index 5f5f724c5..000000000 --- a/crates/block-producer/src/txqueue/mod.rs +++ /dev/null @@ -1,174 +0,0 @@ -use std::{sync::Arc, time::Duration}; - -use async_trait::async_trait; -use miden_objects::MAX_OUTPUT_NOTES_PER_BATCH; -use tokio::{sync::RwLock, time}; -use tracing::{debug, info, info_span, instrument, Instrument}; - -use crate::{ - batch_builder::BatchBuilder, - errors::{AddTransactionError, VerifyTxError}, - ProvenTransaction, SharedRwVec, COMPONENT, -}; - -#[cfg(test)] -mod tests; - -// TRANSACTION VALIDATOR -// ================================================================================================ - -/// Implementations are responsible to track in-flight transactions and verify that new transactions -/// added to the queue are not conflicting. -/// -/// See [crate::store::ApplyBlock], that trait's `apply_block` is called when a block is sealed, and -/// it can determine when transactions are no longer in-flight. -#[async_trait] -pub trait TransactionValidator: Send + Sync + 'static { - /// Method to receive a `tx` for processing and return the current block height. - /// - /// This method should: - /// - Verify the transaction is valid, against the current's rollup state, and also against - /// in-flight transactions. - /// - Track the necessary state of the transaction until it is committed to the `store`, to - /// perform the check above. - async fn verify_tx(&self, tx: &ProvenTransaction) -> Result; -} - -// TRANSACTION QUEUE -// ================================================================================================ - -pub struct TransactionQueueOptions { - /// The frequency at which we try to build batches from transactions in the queue - pub build_batch_frequency: Duration, - - /// The size of a batch - pub batch_size: usize, -} - -pub struct TransactionQueue { - ready_queue: SharedRwVec, - tx_validator: Arc, - batch_builder: Arc, - options: TransactionQueueOptions, -} - -impl TransactionQueue -where - TV: TransactionValidator, - BB: BatchBuilder, -{ - pub fn new( - tx_validator: Arc, - batch_builder: Arc, - options: TransactionQueueOptions, - ) -> Self { - Self { - ready_queue: Arc::new(RwLock::new(Vec::new())), - tx_validator, - batch_builder, - options, - } - } - - pub async fn run(self: Arc) { - let mut interval = time::interval(self.options.build_batch_frequency); - - info!(target: COMPONENT, period_ms = interval.period().as_millis(), "Transaction queue started"); - - loop { - interval.tick().await; - self.try_build_batches().await; - } - } - - /// Divides the queue in groups to be batched; those that failed are appended back on the queue - #[instrument(target = "miden-block-producer", skip_all)] - async fn try_build_batches(&self) { - let mut txs: Vec = { - let mut locked_ready_queue = self.ready_queue.write().await; - - // If there are no transactions in the queue, this call is a no-op. The [BatchBuilder] - // will produce empty blocks if necessary. - if locked_ready_queue.is_empty() { - debug!(target: COMPONENT, "Transaction queue empty"); - return; - } - - locked_ready_queue.drain(..).rev().collect() - }; - - while !txs.is_empty() { - let mut batch = Vec::with_capacity(self.options.batch_size); - let mut output_notes_in_batch = 0; - - while let Some(tx) = txs.pop() { - output_notes_in_batch += tx.output_notes().num_notes(); - - debug_assert!( - tx.output_notes().num_notes() <= MAX_OUTPUT_NOTES_PER_BATCH, - "Sanity check, the number of output notes of a single transaction must never be larger than the batch maximum", - ); - - if output_notes_in_batch > MAX_OUTPUT_NOTES_PER_BATCH - || batch.len() == self.options.batch_size - { - // Batch would be too big in number of notes or transactions. Push the tx back - // to the list of available transactions and forward the current batch. - txs.push(tx); - break; - } - - // The tx fits in the current batch - batch.push(tx) - } - - let ready_queue = self.ready_queue.clone(); - let batch_builder = self.batch_builder.clone(); - - tokio::spawn( - async move { - match batch_builder.build_batch(batch).await { - Ok(_) => { - // batch was successfully built, do nothing - }, - Err(e) => { - // batch building failed, add txs back to the beginning of the queue - let mut locked_ready_queue = ready_queue.write().await; - e.into_transactions() - .into_iter() - .enumerate() - .for_each(|(i, tx)| locked_ready_queue.insert(i, tx)); - }, - } - } - .instrument(info_span!(target: COMPONENT, "batch_builder")), - ); - } - } - - /// Queues `tx` to be added in a batch and subsequently into a block and returns the current - /// block height. - /// - /// This method will validate the `tx` and ensure it is valid w.r.t. the rollup state, and the - /// current in-flight transactions. - #[instrument(target = "miden-block-producer", skip_all, err)] - pub async fn add_transaction(&self, tx: ProvenTransaction) -> Result { - info!(target: COMPONENT, tx_id = %tx.id().to_hex(), account_id = %tx.account_id().to_hex()); - - let block_height = self - .tx_validator - .verify_tx(&tx) - .await - .map_err(AddTransactionError::VerificationFailed)?; - - let queue_len = { - let mut queue_write_guard = self.ready_queue.write().await; - queue_write_guard.push(tx); - queue_write_guard.len() - }; - - info!(target: COMPONENT, queue_len, "Transaction added to tx queue"); - - Ok(block_height) - } -}