diff --git a/air/Cargo.toml b/air/Cargo.toml index bba8d01801..7db8686ff5 100644 --- a/air/Cargo.toml +++ b/air/Cargo.toml @@ -27,10 +27,12 @@ harness = false [features] default = ["std"] std = ["vm-core/std", "winter-air/std"] +internals = [] [dependencies] vm-core = { package = "miden-core", path = "../core", version = "0.8", default-features = false } winter-air = { package = "winter-air", version = "0.7", default-features = false } +winter-prover = { package = "winter-prover", version = "0.7", default-features = false } [dev-dependencies] criterion = "0.5" diff --git a/air/src/lib.rs b/air/src/lib.rs index c561f5251d..a01664b019 100644 --- a/air/src/lib.rs +++ b/air/src/lib.rs @@ -14,6 +14,7 @@ use winter_air::{ Air, AirContext, Assertion, AuxTraceRandElements, EvaluationFrame, ProofOptions as WinterProofOptions, TraceInfo, TransitionConstraintDegree, }; +use winter_prover::matrix::ColMatrix; mod constraints; pub use constraints::stack; diff --git a/air/src/trace/main_trace.rs b/air/src/trace/main_trace.rs new file mode 100644 index 0000000000..699e38bf5d --- /dev/null +++ b/air/src/trace/main_trace.rs @@ -0,0 +1,444 @@ +use super::super::ColMatrix; +use super::{ + chiplets::{ + hasher::{DIGEST_LEN, STATE_WIDTH}, + BITWISE_A_COL_IDX, BITWISE_B_COL_IDX, BITWISE_OUTPUT_COL_IDX, HASHER_NODE_INDEX_COL_IDX, + HASHER_STATE_COL_RANGE, MEMORY_ADDR_COL_IDX, MEMORY_CLK_COL_IDX, MEMORY_CTX_COL_IDX, + MEMORY_V_COL_RANGE, + }, + decoder::{ + GROUP_COUNT_COL_IDX, HASHER_STATE_OFFSET, IN_SPAN_COL_IDX, IS_CALL_FLAG_COL_IDX, + IS_LOOP_BODY_FLAG_COL_IDX, IS_LOOP_FLAG_COL_IDX, IS_SYSCALL_FLAG_COL_IDX, + NUM_HASHER_COLUMNS, NUM_OP_BATCH_FLAGS, OP_BATCH_FLAGS_OFFSET, OP_BITS_EXTRA_COLS_OFFSET, + USER_OP_HELPERS_OFFSET, + }, + stack::{B0_COL_IDX, B1_COL_IDX, H0_COL_IDX}, + CHIPLETS_OFFSET, CLK_COL_IDX, CTX_COL_IDX, DECODER_TRACE_OFFSET, FMP_COL_IDX, FN_HASH_OFFSET, + STACK_TRACE_OFFSET, +}; +use core::ops::{Deref, Range}; +#[cfg(any(test, feature = "internals"))] +use vm_core::utils::collections::Vec; +use vm_core::{utils::range, Felt, ONE, ZERO}; + +// CONSTANTS +// ================================================================================================ + +const DECODER_HASHER_RANGE: Range = + range(DECODER_TRACE_OFFSET + HASHER_STATE_OFFSET, NUM_HASHER_COLUMNS); + +// HELPER STRUCT AND METHODS +// ================================================================================================ + +pub struct MainTrace { + columns: ColMatrix, +} + +impl Deref for MainTrace { + type Target = ColMatrix; + + fn deref(&self) -> &Self::Target { + &self.columns + } +} + +impl MainTrace { + pub fn new(main_trace: ColMatrix) -> Self { + Self { + columns: main_trace, + } + } + + pub fn num_rows(&self) -> usize { + self.columns.num_rows() + } + + #[cfg(any(test, feature = "internals"))] + pub fn get_column_range(&self, range: Range) -> Vec> { + range.fold(vec![], |mut acc, col_idx| { + acc.push(self.get_column(col_idx).to_vec()); + acc + }) + } + + // SYSTEM COLUMNS + // -------------------------------------------------------------------------------------------- + + /// Returns the value of the clk column at row i. + pub fn clk(&self, i: usize) -> Felt { + self.columns.get_column(CLK_COL_IDX)[i] + } + + /// Returns the value of the fmp column at row i. + pub fn fmp(&self, i: usize) -> Felt { + self.columns.get_column(FMP_COL_IDX)[i] + } + + /// Returns the value of the ctx column at row i. + pub fn ctx(&self, i: usize) -> Felt { + self.columns.get_column(CTX_COL_IDX)[i] + } + + // DECODER COLUMNS + // -------------------------------------------------------------------------------------------- + + /// Returns the value in the block address column at the row i. + pub fn addr(&self, i: usize) -> Felt { + self.columns.get_column(DECODER_TRACE_OFFSET)[i] + } + + /// Helper method to detect change of address. + pub fn is_addr_change(&self, i: usize) -> bool { + self.addr(i) != self.addr(i + 1) + } + + /// First decoder helper register at row i. + pub fn helper_0(&self, i: usize) -> Felt { + self.columns.get_column(DECODER_TRACE_OFFSET + USER_OP_HELPERS_OFFSET)[i] + } + + /// Second decoder helper register at row i. + pub fn helper_1(&self, i: usize) -> Felt { + self.columns.get_column(DECODER_TRACE_OFFSET + USER_OP_HELPERS_OFFSET + 1)[i] + } + + /// Third decoder helper register at row i. + pub fn helper_2(&self, i: usize) -> Felt { + self.columns.get_column(DECODER_TRACE_OFFSET + USER_OP_HELPERS_OFFSET + 2)[i] + } + + /// Returns the hasher state at row i. + pub fn decoder_hasher_state(&self, i: usize) -> [Felt; NUM_HASHER_COLUMNS] { + let mut state = [ZERO; NUM_HASHER_COLUMNS]; + for (idx, col_idx) in DECODER_HASHER_RANGE.enumerate() { + let column = self.columns.get_column(col_idx); + state[idx] = column[i]; + } + state + } + + /// Returns the first half of the hasher state at row i. + pub fn decoder_hasher_state_first_half(&self, i: usize) -> [Felt; DIGEST_LEN] { + let mut state = [ZERO; DIGEST_LEN]; + for (col, s) in state.iter_mut().enumerate() { + *s = self.columns.get_column(DECODER_TRACE_OFFSET + HASHER_STATE_OFFSET + col)[i]; + } + state + } + + /// Returns a specific element from the hasher state at row i. + pub fn decoder_hasher_state_element(&self, element: usize, i: usize) -> Felt { + self.columns.get_column(DECODER_TRACE_OFFSET + HASHER_STATE_OFFSET + element)[i + 1] + } + + /// Returns the current function hash (i.e., root) at row i. + pub fn fn_hash(&self, i: usize) -> [Felt; DIGEST_LEN] { + let mut state = [ZERO; DIGEST_LEN]; + for (col, s) in state.iter_mut().enumerate() { + *s = self.columns.get_column(FN_HASH_OFFSET + col)[i]; + } + state + } + + /// Returns the `is_loop_body` flag at row i. + pub fn is_loop_body_flag(&self, i: usize) -> Felt { + self.columns.get_column(DECODER_TRACE_OFFSET + IS_LOOP_BODY_FLAG_COL_IDX)[i] + } + + /// Returns the `is_loop` flag at row i. + pub fn is_loop_flag(&self, i: usize) -> Felt { + self.columns.get_column(DECODER_TRACE_OFFSET + IS_LOOP_FLAG_COL_IDX)[i] + } + + /// Returns the `is_call` flag at row i. + pub fn is_call_flag(&self, i: usize) -> Felt { + self.columns.get_column(DECODER_TRACE_OFFSET + IS_CALL_FLAG_COL_IDX)[i] + } + + /// Returns the `is_syscall` flag at row i. + pub fn is_syscall_flag(&self, i: usize) -> Felt { + self.columns.get_column(DECODER_TRACE_OFFSET + IS_SYSCALL_FLAG_COL_IDX)[i] + } + + /// Returns the operation batch flags at row i. This indicates the number of op groups in + /// the current batch that is being processed. + pub fn op_batch_flag(&self, i: usize) -> [Felt; NUM_OP_BATCH_FLAGS] { + [ + self.columns.get(DECODER_TRACE_OFFSET + OP_BATCH_FLAGS_OFFSET, i), + self.columns.get(DECODER_TRACE_OFFSET + OP_BATCH_FLAGS_OFFSET + 1, i), + self.columns.get(DECODER_TRACE_OFFSET + OP_BATCH_FLAGS_OFFSET + 2, i), + ] + } + + /// Returns the operation group count. This indicates the number of operation that remain + /// to be executed in the current span block. + pub fn group_count(&self, i: usize) -> Felt { + self.columns.get_column(DECODER_TRACE_OFFSET + GROUP_COUNT_COL_IDX)[i] + } + + /// Returns the delta between the current and next group counts. + pub fn delta_group_count(&self, i: usize) -> Felt { + self.group_count(i) - self.group_count(i + 1) + } + + /// Returns the `in_span` flag at row i. + pub fn is_in_span(&self, i: usize) -> Felt { + self.columns.get_column(DECODER_TRACE_OFFSET + IN_SPAN_COL_IDX)[i] + } + + /// Constructs the i-th op code value from its individual bits. + pub fn get_op_code(&self, i: usize) -> Felt { + let col_b0 = self.columns.get_column(DECODER_TRACE_OFFSET + 1); + let col_b1 = self.columns.get_column(DECODER_TRACE_OFFSET + 2); + let col_b2 = self.columns.get_column(DECODER_TRACE_OFFSET + 3); + let col_b3 = self.columns.get_column(DECODER_TRACE_OFFSET + 4); + let col_b4 = self.columns.get_column(DECODER_TRACE_OFFSET + 5); + let col_b5 = self.columns.get_column(DECODER_TRACE_OFFSET + 6); + let col_b6 = self.columns.get_column(DECODER_TRACE_OFFSET + 7); + let [b0, b1, b2, b3, b4, b5, b6] = + [col_b0[i], col_b1[i], col_b2[i], col_b3[i], col_b4[i], col_b5[i], col_b6[i]]; + b0 + b1.mul_small(2) + + b2.mul_small(4) + + b3.mul_small(8) + + b4.mul_small(16) + + b5.mul_small(32) + + b6.mul_small(64) + } + + /// Returns a flag indicating whether the current operation induces a left shift of the operand + /// stack. + pub fn is_left_shift(&self, i: usize) -> bool { + let b0 = self.columns.get(DECODER_TRACE_OFFSET + 1, i); + let b1 = self.columns.get(DECODER_TRACE_OFFSET + 2, i); + let b2 = self.columns.get(DECODER_TRACE_OFFSET + 3, i); + let b3 = self.columns.get(DECODER_TRACE_OFFSET + 4, i); + let b4 = self.columns.get(DECODER_TRACE_OFFSET + 5, i); + let b5 = self.columns.get(DECODER_TRACE_OFFSET + 6, i); + let b6 = self.columns.get(DECODER_TRACE_OFFSET + 7, i); + let e0 = self.columns.get(DECODER_TRACE_OFFSET + OP_BITS_EXTRA_COLS_OFFSET, i); + let h5 = self.columns.get(DECODER_TRACE_OFFSET + IS_LOOP_FLAG_COL_IDX, i); + + // group with left shift effect grouped by a common prefix + ([b6, b5, b4] == [ZERO, ONE, ZERO])|| + // U32ADD3 or U32MADD + ([b6, b5, b4, b3, b2] == [ONE, ZERO, ZERO, ONE, ONE]) || + // SPLIT or LOOP block + ([e0, b3, b2, b1] == [ONE, ZERO, ONE, ZERO]) || + // REPEAT + ([b6, b5, b4, b3, b2, b1, b0] == [ONE, ONE, ONE, ZERO, ONE, ZERO, ZERO]) || + // END of a loop + ([b6, b5, b4, b3, b2, b1, b0] == [ONE, ONE, ONE, ZERO, ZERO, ZERO, ZERO] && h5 == ONE) + } + + /// Returns a flag indicating whether the current operation induces a right shift of the operand + /// stack. + pub fn is_right_shift(&self, i: usize) -> bool { + let b0 = self.columns.get(DECODER_TRACE_OFFSET + 1, i); + let b1 = self.columns.get(DECODER_TRACE_OFFSET + 2, i); + let b2 = self.columns.get(DECODER_TRACE_OFFSET + 3, i); + let b3 = self.columns.get(DECODER_TRACE_OFFSET + 4, i); + let b4 = self.columns.get(DECODER_TRACE_OFFSET + 5, i); + let b5 = self.columns.get(DECODER_TRACE_OFFSET + 6, i); + let b6 = self.columns.get(DECODER_TRACE_OFFSET + 7, i); + + // group with right shift effect grouped by a common prefix + [b6, b5, b4] == [ZERO, ONE, ONE]|| + // u32SPLIT 100_1000 + ([b6, b5, b4, b3, b2, b1, b0] == [ONE, ZERO, ZERO, ONE, ZERO, ZERO, ZERO]) || + // PUSH i.e., 110_0100 + ([b6, b5, b4, b3, b2, b1, b0] == [ONE, ONE, ZERO, ZERO, ONE, ZERO, ZERO]) + } + + // STACK COLUMNS + // -------------------------------------------------------------------------------------------- + + /// Returns the value of the stack depth column at row i. + pub fn stack_depth(&self, i: usize) -> Felt { + self.columns.get_column(STACK_TRACE_OFFSET + B0_COL_IDX)[i] + } + + /// Returns the element at row i in a given stack trace column. + pub fn stack_element(&self, column: usize, i: usize) -> Felt { + self.columns.get_column(STACK_TRACE_OFFSET + column)[i] + } + + /// Returns the address of the top element in the stack overflow table at row i. + pub fn parent_overflow_address(&self, i: usize) -> Felt { + self.columns.get_column(STACK_TRACE_OFFSET + B1_COL_IDX)[i] + } + + /// Returns a flag indicating whether the overflow stack is non-empty. + pub fn is_non_empty_overflow(&self, i: usize) -> bool { + let b0 = self.columns.get_column(STACK_TRACE_OFFSET + B0_COL_IDX)[i]; + let h0 = self.columns.get_column(STACK_TRACE_OFFSET + H0_COL_IDX)[i]; + (b0 - Felt::new(16)) * h0 == ONE + } + + // CHIPLETS COLUMNS + // -------------------------------------------------------------------------------------------- + + /// Returns chiplet column number 0 at row i. + pub fn chiplet_selector_0(&self, i: usize) -> Felt { + self.columns.get_column(CHIPLETS_OFFSET)[i] + } + + /// Returns chiplet column number 1 at row i. + pub fn chiplet_selector_1(&self, i: usize) -> Felt { + self.columns.get_column(CHIPLETS_OFFSET + 1)[i] + } + + /// Returns chiplet column number 2 at row i. + pub fn chiplet_selector_2(&self, i: usize) -> Felt { + self.columns.get_column(CHIPLETS_OFFSET + 2)[i] + } + + /// Returns chiplet column number 3 at row i. + pub fn chiplet_selector_3(&self, i: usize) -> Felt { + self.columns.get_column(CHIPLETS_OFFSET + 3)[i] + } + + /// Returns chiplet column number 4 at row i. + pub fn chiplet_selector_4(&self, i: usize) -> Felt { + self.columns.get_column(CHIPLETS_OFFSET + 4)[i] + } + + /// Returns the (full) state of the hasher chiplet at row i. + pub fn chiplet_hasher_state(&self, i: usize) -> [Felt; STATE_WIDTH] { + let mut state = [ZERO; STATE_WIDTH]; + for (idx, col_idx) in HASHER_STATE_COL_RANGE.enumerate() { + let column = self.columns.get_column(col_idx); + state[idx] = column[i]; + } + state + } + + /// Returns the hasher's node index column at row i + pub fn chiplet_node_index(&self, i: usize) -> Felt { + self.columns.get(HASHER_NODE_INDEX_COL_IDX, i) + } + + /// Returns the bitwise column holding the aggregated value of input `a` at row i. + pub fn chiplet_bitwise_a(&self, i: usize) -> Felt { + self.columns.get_column(BITWISE_A_COL_IDX)[i] + } + + /// Returns the bitwise column holding the aggregated value of input `b` at row i. + pub fn chiplet_bitwise_b(&self, i: usize) -> Felt { + self.columns.get_column(BITWISE_B_COL_IDX)[i] + } + + /// Returns the bitwise column holding the aggregated value of the output at row i. + pub fn chiplet_bitwise_z(&self, i: usize) -> Felt { + self.columns.get_column(BITWISE_OUTPUT_COL_IDX)[i] + } + + /// Returns the i-th row of the chiplet column containing memory context. + pub fn chiplet_memory_ctx(&self, i: usize) -> Felt { + self.columns.get_column(MEMORY_CTX_COL_IDX)[i] + } + + /// Returns the i-th row of the chiplet column containing memory address. + pub fn chiplet_memory_addr(&self, i: usize) -> Felt { + self.columns.get_column(MEMORY_ADDR_COL_IDX)[i] + } + + /// Returns the i-th row of the chiplet column containing clock cycle. + pub fn chiplet_memory_clk(&self, i: usize) -> Felt { + self.columns.get_column(MEMORY_CLK_COL_IDX)[i] + } + + /// Returns the i-th row of the chiplet column containing the zeroth memory value element. + pub fn chiplet_memory_value_0(&self, i: usize) -> Felt { + self.columns.get_column(MEMORY_V_COL_RANGE.start)[i] + } + + /// Returns the i-th row of the chiplet column containing the first memory value element. + pub fn chiplet_memory_value_1(&self, i: usize) -> Felt { + self.columns.get_column(MEMORY_V_COL_RANGE.start + 1)[i] + } + + /// Returns the i-th row of the chiplet column containing the second memory value element. + pub fn chiplet_memory_value_2(&self, i: usize) -> Felt { + self.columns.get_column(MEMORY_V_COL_RANGE.start + 2)[i] + } + + /// Returns the i-th row of the chiplet column containing the third memory value element. + pub fn chiplet_memory_value_3(&self, i: usize) -> Felt { + self.columns.get_column(MEMORY_V_COL_RANGE.start + 3)[i] + } + + /// Returns the i-th row of the chiplet column containing the zeroth element of the kernel + /// procedure root. + pub fn chiplet_kernel_root_0(&self, i: usize) -> Felt { + self.columns.get_column(CHIPLETS_OFFSET + 6)[i] + } + + /// Returns the i-th row of the chiplet column containing the first element of the kernel + /// procedure root. + pub fn chiplet_kernel_root_1(&self, i: usize) -> Felt { + self.columns.get_column(CHIPLETS_OFFSET + 7)[i] + } + + /// Returns the i-th row of the chiplet column containing the second element of the kernel + /// procedure root. + pub fn chiplet_kernel_root_2(&self, i: usize) -> Felt { + self.columns.get_column(CHIPLETS_OFFSET + 8)[i] + } + + /// Returns the i-th row of the chiplet column containing the third element of the kernel + /// procedure root. + pub fn chiplet_kernel_root_3(&self, i: usize) -> Felt { + self.columns.get_column(CHIPLETS_OFFSET + 9)[i] + } + + /// Returns `true` if a row is part of the kernel chiplet. + pub fn is_kernel_row(&self, i: usize) -> bool { + self.chiplet_selector_0(i) == ONE + && self.chiplet_selector_1(i) == ONE + && self.chiplet_selector_2(i) == ONE + && self.chiplet_selector_3(i) == ZERO + } + + // MERKLE PATH HASHING SELECTORS + // -------------------------------------------------------------------------------------------- + + /// Returns `true` if the hasher chiplet flags indicate the initialization of verifying + /// a Merkle path to an old node during Merkle root update procedure (MRUPDATE). + pub fn f_mv(&self, i: usize) -> bool { + (i % 8 == 0) + && self.chiplet_selector_0(i) == ZERO + && self.chiplet_selector_1(i) == ONE + && self.chiplet_selector_2(i) == ONE + && self.chiplet_selector_3(i) == ZERO + } + + /// Returns `true` if the hasher chiplet flags indicate the continuation of verifying + /// a Merkle path to an old node during Merkle root update procedure (MRUPDATE). + pub fn f_mva(&self, i: usize) -> bool { + (i % 8 == 7) + && self.chiplet_selector_0(i) == ZERO + && self.chiplet_selector_1(i) == ONE + && self.chiplet_selector_2(i) == ONE + && self.chiplet_selector_3(i) == ZERO + } + + /// Returns `true` if the hasher chiplet flags indicate the initialization of verifying + /// a Merkle path to a new node during Merkle root update procedure (MRUPDATE). + pub fn f_mu(&self, i: usize) -> bool { + (i % 8 == 0) + && self.chiplet_selector_0(i) == ZERO + && self.chiplet_selector_1(i) == ONE + && self.chiplet_selector_2(i) == ONE + && self.chiplet_selector_3(i) == ONE + } + + /// Returns `true` if the hasher chiplet flags indicate the continuation of verifying + /// a Merkle path to a new node during Merkle root update procedure (MRUPDATE). + pub fn f_mua(&self, i: usize) -> bool { + (i % 8 == 7) + && self.chiplet_selector_0(i) == ZERO + && self.chiplet_selector_1(i) == ONE + && self.chiplet_selector_2(i) == ONE + && self.chiplet_selector_3(i) == ONE + } +} diff --git a/air/src/trace/mod.rs b/air/src/trace/mod.rs index fa908671f4..f61e1cae05 100644 --- a/air/src/trace/mod.rs +++ b/air/src/trace/mod.rs @@ -3,6 +3,7 @@ use vm_core::utils::range; pub mod chiplets; pub mod decoder; +pub mod main_trace; pub mod range; pub mod stack; diff --git a/docs/src/design/lookups/multiset.md b/docs/src/design/lookups/multiset.md index 7f34b57acc..4f99800eca 100644 --- a/docs/src/design/lookups/multiset.md +++ b/docs/src/design/lookups/multiset.md @@ -35,15 +35,32 @@ $$ Then, when row $i$ is added to the table, we'll update the value in the $p$ column like so: $$ -p' = p \cdot r_i +p' = p \cdot (\gamma + r_i) $$ Analogously, when row $i$ is removed from the table, we'll update the value in column $p$ like so: $$ -p' = \frac{p}{r_i} +p' = \frac{p}{(\gamma + r_i)} $$ +It is essential to shift the value $r_i$ by $ \gamma $. Failure to do that can result in the verifier accepting a multiset check even when the sets are not permutations of each other. + +One example can be in checking that the two sets below are permutations of each other. + +$$ t_1 = [1, 5, 3] $$ +$$ t_2 = [1, 1, 15] $$ + +In this case, although the two sets are not permutations of each other, the multiset check would pass. + +$$ +p = \frac{1 * 5 * 3}{1 * 1 * 15} +$$ + +Shifting the multiplicand by a random $ \gamma $ prevents a cheating prover from passing the check. + +Other benefits of shifting the multiplicand by $ \gamma $ is that it helps the protocol stay secure even when a developer forgets to add the $ \gamma $ as $ \gamma $ is already added. + ### Virtual tables in Miden VM Miden VM makes use of 6 virtual tables across 4 components: diff --git a/processor/Cargo.toml b/processor/Cargo.toml index 681f2b544a..caae5e15a1 100644 --- a/processor/Cargo.toml +++ b/processor/Cargo.toml @@ -19,7 +19,7 @@ doctest = false [features] concurrent = ["std", "winter-prover/concurrent"] default = ["std"] -internals = [] +internals = ["miden-air/internals"] std = ["tracing/attributes", "vm-core/std", "winter-prover/std"] [dependencies] diff --git a/processor/src/chiplets/aux_trace/bus.rs b/processor/src/chiplets/aux_trace/bus.rs deleted file mode 100644 index 50d603572c..0000000000 --- a/processor/src/chiplets/aux_trace/bus.rs +++ /dev/null @@ -1,281 +0,0 @@ -use super::{ - super::{hasher::HasherLookup, BitwiseLookup, KernelProcLookup, MemoryLookup}, - BTreeMap, BusTraceBuilder, ColMatrix, Felt, FieldElement, LookupTableRow, Vec, -}; - -// CHIPLETS BUS -// ================================================================================================ - -/// The Chiplets bus tracks data requested from or provided by chiplets in the Chiplets module. It -/// processes lookup requests from the stack & decoder and response data from the chiplets. -/// -/// For correct execution, the lookup data used by the stack for each chiplet must be a permutation -/// of the lookups executed by that chiplet so that they cancel out. This is ensured by the `b_chip` -/// bus column. When the `b_chip` column is built, requests from the stack must be divided out and -/// lookup results provided by the chiplets must be multiplied in. To ensure that all lookups are -/// attributed to the correct chiplet and operation, a unique chiplet operation label must be -/// included in the lookup row value when it is computed. - -#[derive(Default)] -pub struct ChipletsBus { - lookup_hints: BTreeMap, - requests: Vec, - responses: Vec, - // TODO: remove queued requests by refactoring the hasher/decoder interactions so that the - // lookups are built as they are requested. This will be made easier by removing state info from - // the HasherLookup struct. Primarily it will require a refactor of `hash_span_block`, - // `start_span_block`, `respan`, and `end_span_block`. - queued_requests: Vec, -} - -impl ChipletsBus { - // LOOKUP MUTATORS - // -------------------------------------------------------------------------------------------- - - /// Requests lookups for a single operation at the specified cycle. A Hasher operation request - /// can contain one or more lookups, while Bitwise and Memory requests will only contain a - /// single lookup. - fn request_lookups(&mut self, request_cycle: u32, request_indices: &mut Vec) { - self.lookup_hints - .entry(request_cycle) - .and_modify(|bus_row| { - bus_row.send_requests(request_indices); - }) - .or_insert_with(|| ChipletsBusRow::new(request_indices, None)); - } - - /// Provides lookup data at the specified cycle, which is the row of the Chiplets execution - /// trace that contains this lookup row. - fn provide_lookup(&mut self, response_cycle: u32) { - let response_idx = self.responses.len() as u32; - self.lookup_hints - .entry(response_cycle) - .and_modify(|bus_row| { - bus_row.send_response(response_idx); - }) - .or_insert_with(|| ChipletsBusRow::new(&[], Some(response_idx))); - } - - // HASHER LOOKUPS - // -------------------------------------------------------------------------------------------- - - /// Requests lookups at the specified `cycle` for the initial row and result row of Hash - /// operations in the Hash Chiplet. This request is expected to originate from operation - /// executors requesting one or more hash operations for the Stack where all operation lookups - /// must be included at the same cycle. For simple permutations this will require 2 lookups, - /// while for a Merkle root update it will require 4, since two Hash operations are required. - pub(crate) fn request_hasher_operation(&mut self, lookups: &[HasherLookup], cycle: u32) { - debug_assert!( - lookups.len() == 2 || lookups.len() == 4, - "incorrect number of lookup rows for hasher operation request" - ); - let mut request_indices = vec![0; lookups.len()]; - for (idx, lookup) in lookups.iter().enumerate() { - request_indices[idx] = self.requests.len() as u32; - self.requests.push(ChipletLookup::Hasher(*lookup)); - } - self.request_lookups(cycle, &mut request_indices); - } - - /// Requests the specified lookup from the Hash Chiplet at the specified `cycle`. Single lookup - /// requests are expected to originate from the decoder during control block decoding. This - /// lookup can be for either the initial or the final row of the hash operation. - pub(crate) fn request_hasher_lookup(&mut self, lookup: HasherLookup, cycle: u32) { - self.request_lookups(cycle, &mut vec![self.requests.len() as u32]); - self.requests.push(ChipletLookup::Hasher(lookup)); - } - - /// Adds the request for the specified lookup to a queue from which it can be sent later when - /// the cycle of the request is known. Queued requests are expected to originate from the - /// decoder, since the hash is computed at the start of each control block (along with all - /// required lookups), but the decoder does not request intermediate and final lookups until the - /// end of the control block or until a `RESPAN`, in the case of `SPAN` blocks with more than - /// one operation batch. - pub(crate) fn enqueue_hasher_request(&mut self, lookup: HasherLookup) { - self.queued_requests.push(lookup); - } - - /// Pops the top HasherLookup request off the queue and sends it to the bus. This request is - /// expected to originate from the decoder as it continues or finalizes control blocks with - /// `RESPAN` or `END`. - pub(crate) fn send_queued_hasher_request(&mut self, cycle: u32) { - let lookup = self.queued_requests.pop(); - debug_assert!(lookup.is_some(), "no queued requests"); - - if let Some(lookup) = lookup { - self.request_hasher_lookup(lookup, cycle); - } - } - - /// Provides the data of a hash chiplet operation contained in the [Hasher] table. The hash - /// lookup value is provided at cycle `response_cycle`, which is the row of the execution trace - /// that contains this Hasher row. It will always be either the first or last row of a Hasher - /// operation cycle. - pub(crate) fn provide_hasher_lookup(&mut self, lookup: HasherLookup, response_cycle: u32) { - self.provide_lookup(response_cycle); - self.responses.push(ChipletLookup::Hasher(lookup)); - } - - /// Provides multiple hash lookup values and their response cycles, which are the rows of the - /// execution trace which contains the corresponding hasher row for either the start or end of - /// a hasher operation cycle. - pub(crate) fn provide_hasher_lookups(&mut self, lookups: &[HasherLookup]) { - for lookup in lookups.iter() { - self.provide_hasher_lookup(*lookup, lookup.cycle()); - } - } - - // BITWISE LOOKUPS - // -------------------------------------------------------------------------------------------- - - /// Requests the specified bitwise lookup at the specified `cycle`. This request is expected to - /// originate from operation executors. - pub(crate) fn request_bitwise_operation(&mut self, lookup: BitwiseLookup, cycle: u32) { - self.request_lookups(cycle, &mut vec![self.requests.len() as u32]); - self.requests.push(ChipletLookup::Bitwise(lookup)); - } - - /// Provides the data of a bitwise operation contained in the [Bitwise] table. The bitwise value - /// is provided at cycle `response_cycle`, which is the row of the execution trace that contains - /// this Bitwise row. It will always be the final row of a Bitwise operation cycle. - pub(crate) fn provide_bitwise_operation(&mut self, lookup: BitwiseLookup, response_cycle: u32) { - self.provide_lookup(response_cycle); - self.responses.push(ChipletLookup::Bitwise(lookup)); - } - - // MEMORY LOOKUPS - // -------------------------------------------------------------------------------------------- - - /// Sends the specified memory access requests. There must be exactly one or two requests. The - /// requests are made at the specified `cycle` and are expected to originate from operation - /// executors. - pub(crate) fn request_memory_operation(&mut self, lookups: &[MemoryLookup], cycle: u32) { - debug_assert!( - lookups.len() == 1 || lookups.len() == 2, - "invalid number of requested memory operations" - ); - let mut request_indices = vec![0; lookups.len()]; - for (idx, lookup) in lookups.iter().enumerate() { - request_indices[idx] = self.requests.len() as u32; - self.requests.push(ChipletLookup::Memory(*lookup)); - } - self.request_lookups(cycle, &mut request_indices); - } - - /// Provides the data of the specified memory access. The memory access data is provided at - /// cycle `response_cycle`, which is the row of the execution trace that contains this Memory - /// row. - pub(crate) fn provide_memory_operation(&mut self, lookup: MemoryLookup, response_cycle: u32) { - self.provide_lookup(response_cycle); - self.responses.push(ChipletLookup::Memory(lookup)); - } - - // KERNEL ROM LOOKUPS - // -------------------------------------------------------------------------------------------- - - /// Requests the specified kernel procedure lookup at the specified `cycle`. This request is - /// expected to originate from operation executors. - pub(crate) fn request_kernel_proc_call(&mut self, lookup: KernelProcLookup, cycle: u32) { - self.request_lookups(cycle, &mut vec![self.requests.len() as u32]); - self.requests.push(ChipletLookup::KernelRom(lookup)); - } - - /// Provides a kernel procedure call contained in the [KernelRom] chiplet. The procedure access - /// is provided at cycle `response_cycle`, which is the row of the execution trace that contains - /// this [KernelRom] row. - pub(crate) fn provide_kernel_proc_call( - &mut self, - lookup: KernelProcLookup, - response_cycle: u32, - ) { - self.provide_lookup(response_cycle); - self.responses.push(ChipletLookup::KernelRom(lookup)); - } - - // AUX TRACE BUILDER GENERATION - // -------------------------------------------------------------------------------------------- - - /// Converts this [ChipletsBus] into an auxiliary trace builder which can be used to construct - /// the auxiliary trace column describing the [Chiplets] lookups at every cycle. - pub(crate) fn into_aux_builder(self) -> BusTraceBuilder { - let lookup_hints = self.lookup_hints.into_iter().collect(); - - BusTraceBuilder::new(lookup_hints, self.requests, self.responses) - } - - // PUBLIC ACCESSORS - // -------------------------------------------------------------------------------------------- - - /// Returns an option with the lookup hint for the specified cycle. - #[cfg(test)] - pub(crate) fn get_lookup_hint(&self, cycle: u32) -> Option<&ChipletsBusRow> { - self.lookup_hints.get(&cycle) - } - - /// Returns the ith lookup response provided by the Chiplets module. - #[cfg(test)] - pub(crate) fn get_response_row(&self, i: usize) -> ChipletLookup { - self.responses[i].clone() - } -} - -// CHIPLETS LOOKUPS -// ================================================================================================ - -/// This represents all communication with the Chiplets Bus at a single cycle. Multiple requests can -/// be sent to the bus in any given cycle, but only one response can be provided. -#[derive(Debug, Clone, PartialEq, Eq)] -pub struct ChipletsBusRow { - requests: Vec, - response: Option, -} - -impl ChipletsBusRow { - pub(crate) fn new(requests: &[u32], response: Option) -> Self { - ChipletsBusRow { - requests: requests.to_vec(), - response, - } - } - - pub(super) fn requests(&self) -> &[u32] { - &self.requests - } - - pub(super) fn response(&self) -> Option { - self.response - } - - fn send_requests(&mut self, requests: &mut Vec) { - self.requests.append(requests); - } - - fn send_response(&mut self, response: u32) { - debug_assert!(self.response.is_none(), "bus row already contains a response"); - self.response = Some(response); - } -} - -/// Data representing a single lookup row in one of the [Chiplets]. -#[derive(Debug, Clone, PartialEq, Eq)] -pub(crate) enum ChipletLookup { - Bitwise(BitwiseLookup), - Hasher(HasherLookup), - KernelRom(KernelProcLookup), - Memory(MemoryLookup), -} - -impl LookupTableRow for ChipletLookup { - fn to_value>( - &self, - main_trace: &ColMatrix, - alphas: &[E], - ) -> E { - match self { - ChipletLookup::Bitwise(row) => row.to_value(main_trace, alphas), - ChipletLookup::Hasher(row) => row.to_value(main_trace, alphas), - ChipletLookup::KernelRom(row) => row.to_value(main_trace, alphas), - ChipletLookup::Memory(row) => row.to_value(main_trace, alphas), - } - } -} diff --git a/processor/src/chiplets/aux_trace/mod.rs b/processor/src/chiplets/aux_trace/mod.rs index 03d75f877b..db60818606 100644 --- a/processor/src/chiplets/aux_trace/mod.rs +++ b/processor/src/chiplets/aux_trace/mod.rs @@ -1,29 +1,53 @@ -use super::{ - trace::{build_lookup_table_row_values, AuxColumnBuilder, LookupTableRow}, - BTreeMap, ColMatrix, Felt, FieldElement, StarkField, Vec, Word, +use super::{super::trace::AuxColumnBuilder, Felt, FieldElement, StarkField, Vec}; + +use miden_air::trace::{ + chiplets::{ + bitwise::OP_CYCLE_LEN as BITWISE_OP_CYCLE_LEN, + hasher::{ + CAPACITY_LEN, DIGEST_RANGE, HASH_CYCLE_LEN, LINEAR_HASH_LABEL, MP_VERIFY_LABEL, + MR_UPDATE_NEW_LABEL, MR_UPDATE_OLD_LABEL, NUM_ROUNDS, RETURN_HASH_LABEL, + RETURN_STATE_LABEL, STATE_WIDTH, + }, + kernel_rom::KERNEL_PROC_LABEL, + memory::{MEMORY_READ_LABEL, MEMORY_WRITE_LABEL}, + }, + main_trace::MainTrace, }; -mod bus; -pub(crate) use bus::{ChipletLookup, ChipletsBus, ChipletsBusRow}; +use vm_core::{Operation, ONE, ZERO}; -mod virtual_table; -pub(crate) use virtual_table::{ChipletsVTableRow, ChipletsVTableUpdate}; +// CONSTANTS +// ================================================================================================ -/// Contains all relevant information and describes how to construct the execution trace for -/// chiplets-related auxiliary columns (used in multiset checks). -pub struct AuxTraceBuilder { - bus_builder: BusTraceBuilder, - table_builder: ChipletsVTableTraceBuilder, -} +const JOIN: u8 = Operation::Join.op_code(); +const SPLIT: u8 = Operation::Split.op_code(); +const LOOP: u8 = Operation::Loop.op_code(); +const DYN: u8 = Operation::Dyn.op_code(); +const CALL: u8 = Operation::Call.op_code(); +const SYSCALL: u8 = Operation::SysCall.op_code(); +const SPAN: u8 = Operation::Span.op_code(); +const RESPAN: u8 = Operation::Respan.op_code(); +const END: u8 = Operation::End.op_code(); +const AND: u8 = Operation::U32and.op_code(); +const XOR: u8 = Operation::U32xor.op_code(); +const MLOADW: u8 = Operation::MLoadW.op_code(); +const MSTOREW: u8 = Operation::MStoreW.op_code(); +const MLOAD: u8 = Operation::MLoad.op_code(); +const MSTORE: u8 = Operation::MStore.op_code(); +const MSTREAM: u8 = Operation::MStream.op_code(); +const HPERM: u8 = Operation::HPerm.op_code(); +const MPVERIFY: u8 = Operation::MpVerify.op_code(); +const MRUPDATE: u8 = Operation::MrUpdate.op_code(); +const NUM_HEADER_ALPHAS: usize = 4; -impl AuxTraceBuilder { - pub fn new(bus_builder: BusTraceBuilder, table_builder: ChipletsVTableTraceBuilder) -> Self { - Self { - bus_builder, - table_builder, - } - } +// CHIPLETS AUXILIARY TRACE BUILDER +// ================================================================================================ + +/// Constructs the execution trace for chiplets-related auxiliary columns (used in multiset checks). +#[derive(Default)] +pub struct AuxTraceBuilder {} +impl AuxTraceBuilder { // COLUMN TRACE CONSTRUCTOR // -------------------------------------------------------------------------------------------- @@ -32,11 +56,13 @@ impl AuxTraceBuilder { /// provided by chiplets in the Chiplets module. pub fn build_aux_columns>( &self, - main_trace: &ColMatrix, + main_trace: &MainTrace, rand_elements: &[E], ) -> Vec> { - let t_chip = self.table_builder.build_aux_column(main_trace, rand_elements); - let b_chip = self.bus_builder.build_aux_column(main_trace, rand_elements); + let v_table_col_builder = ChipletsVTableColBuilder::default(); + let bus_col_builder = BusColumnBuilder::default(); + let t_chip = v_table_col_builder.build_aux_column(main_trace, rand_elements); + let b_chip = bus_col_builder.build_aux_column(main_trace, rand_elements); vec![t_chip, b_chip] } } @@ -45,203 +71,857 @@ impl AuxTraceBuilder { // ================================================================================================ /// Describes how to construct the execution trace of the chiplets bus auxiliary trace column. -pub struct BusTraceBuilder { - pub(super) lookup_hints: Vec<(u32, ChipletsBusRow)>, - pub(super) requests: Vec, - pub(super) responses: Vec, -} - -impl BusTraceBuilder { - pub(crate) fn new( - lookup_hints: Vec<(u32, ChipletsBusRow)>, - requests: Vec, - responses: Vec, - ) -> Self { - Self { - lookup_hints, - requests, - responses, - } - } -} - -impl AuxColumnBuilder for BusTraceBuilder { - /// This method is required, but because it is only called inside `build_row_values` which is - /// overridden below, it is not used here and should not be called. - fn get_table_rows(&self) -> &[ChipletLookup] { - unimplemented!() - } - - /// Returns hints which describe the [Chiplets] lookup requests and responses during program - /// execution. Each update hint is accompanied by a clock cycle at which the update happened. - /// - /// Internally, each update hint also contains an index of the row into the full list of request - /// rows or response rows, depending on whether it is a request, a response, or both (in which - /// case it contains 2 indices). - fn get_table_hints(&self) -> &[(u32, ChipletsBusRow)] { - &self.lookup_hints - } +#[derive(Default)] +pub struct BusColumnBuilder {} - /// Returns the value by which the running product column should be multiplied for the provided - /// hint value. - fn get_multiplicand>( - &self, - hint: ChipletsBusRow, - row_values: &[E], - inv_row_values: &[E], - ) -> E { - let mut mult = if let Some(response_idx) = hint.response() { - row_values[response_idx as usize] - } else { - E::ONE - }; +impl> AuxColumnBuilder for BusColumnBuilder { + /// Constructs the requests made by the VM-components to the chiplets at row i. + fn get_requests_at(&self, main_trace: &MainTrace, alphas: &[E], i: usize) -> E + where + E: FieldElement, + { + let op_code_felt = main_trace.get_op_code(i); + let op_code = op_code_felt.as_int() as u8; - for request_idx in hint.requests() { - mult *= inv_row_values[*request_idx as usize]; + match op_code { + JOIN | SPLIT | LOOP | DYN | CALL => { + build_control_block_request(main_trace, op_code_felt, alphas, i) + } + SYSCALL => build_syscall_block_request(main_trace, op_code_felt, alphas, i), + SPAN => build_span_block_request(main_trace, alphas, i), + RESPAN => build_respan_block_request(main_trace, alphas, i), + END => build_end_block_request(main_trace, alphas, i), + AND => build_bitwise_request(main_trace, ZERO, alphas, i), + XOR => build_bitwise_request(main_trace, ONE, alphas, i), + MLOADW => build_mem_request(main_trace, MEMORY_READ_LABEL, true, alphas, i), + MSTOREW => build_mem_request(main_trace, MEMORY_WRITE_LABEL, true, alphas, i), + MLOAD => build_mem_request(main_trace, MEMORY_READ_LABEL, false, alphas, i), + MSTORE => build_mem_request(main_trace, MEMORY_WRITE_LABEL, false, alphas, i), + MSTREAM => build_mstream_request(main_trace, alphas, i), + HPERM => build_hperm_request(main_trace, alphas, i), + MPVERIFY => build_mpverify_request(main_trace, alphas, i), + MRUPDATE => build_mrupdate_request(main_trace, alphas, i), + _ => E::ONE, } - - mult } - /// Build the row values and inverse values used to build the auxiliary column. - /// - /// The row values to be included come from the responses and the inverse values come from - /// requests. Since responses are grouped by chiplet, the operation order for the requests and - /// responses will be permutations of each other rather than sharing the same order. Therefore, - /// the `row_values` and `inv_row_values` must be built separately. - fn build_row_values(&self, main_trace: &ColMatrix, alphas: &[E]) -> (Vec, Vec) + /// Constructs the responses from the chiplets to the other VM-components at row i. + fn get_responses_at(&self, main_trace: &MainTrace, alphas: &[E], i: usize) -> E where E: FieldElement, { - // get the row values from the resonse rows - let row_values = self - .responses - .iter() - .map(|response| response.to_value(main_trace, alphas)) - .collect(); - // get the inverse values from the request rows - let (_, inv_row_values) = build_lookup_table_row_values(&self.requests, main_trace, alphas); - - (row_values, inv_row_values) + let selector0 = main_trace.chiplet_selector_0(i); + let selector1 = main_trace.chiplet_selector_1(i); + let selector2 = main_trace.chiplet_selector_2(i); + let selector3 = main_trace.chiplet_selector_3(i); + let selector4 = main_trace.chiplet_selector_4(i); + + if selector0 == ZERO { + build_hasher_chiplet_responses(main_trace, i, alphas, selector1, selector2, selector3) + } else if selector1 == ZERO { + debug_assert_eq!(selector0, ONE); + build_bitwise_chiplet_responses(main_trace, i, selector2, alphas) + } else if selector2 == ZERO { + debug_assert_eq!(selector0, ONE); + debug_assert_eq!(selector1, ONE); + build_memory_chiplet_responses(main_trace, i, selector3, alphas) + } else if selector3 == ZERO && selector4 == ONE { + debug_assert_eq!(selector0, ONE); + debug_assert_eq!(selector1, ONE); + debug_assert_eq!(selector2, ONE); + build_kernel_chiplet_responses(main_trace, i, alphas) + } else { + debug_assert_eq!(selector0, ONE); + debug_assert_eq!(selector1, ONE); + debug_assert_eq!(selector2, ONE); + debug_assert_eq!(selector3, ONE); + E::ONE + } } } // VIRTUAL TABLE TRACE BUILDER // ================================================================================================ -/// Describes how to construct the execution trace of the chiplets virtual table, used to manage -/// internal updates and data required by the chiplets. -/// -/// This manages construction of a single column which first represents the state of the sibling -/// table (used in Merkle root update computation), and then is subsequently used to represent the -/// procedures contained in the kernel ROM. Thus, it is expected that the initial value is ONE, the -/// value after all sibling table updates are completed is again ONE, and the value at the end of -/// the trace is the product of the representations of the kernel ROM procedures. -#[derive(Debug, Clone, Default)] -pub struct ChipletsVTableTraceBuilder { - pub(super) hints: Vec<(u32, ChipletsVTableUpdate)>, - pub(super) rows: Vec, -} - -impl ChipletsVTableTraceBuilder { - // STATE MUTATORS - // -------------------------------------------------------------------------------------------- +/// Describes how to construct the execution trace of the chiplets virtual table auxiliary trace +/// column. +#[derive(Default)] +pub struct ChipletsVTableColBuilder {} - /// Specifies that an entry for the provided sibling was added to the chiplets virtual table at - /// the specified step. - /// - /// It is assumed that the table is empty or contains only sibling entries at this point and has - /// not been used for any other chiplet updates. - pub fn sibling_added(&mut self, step: u32, index: Felt, sibling: Word) { - let row_index = self.rows.len(); - let update = ChipletsVTableUpdate::SiblingAdded(row_index as u32); - self.hints.push((step, update)); - self.rows.push(ChipletsVTableRow::new_sibling(index, sibling)); +impl> AuxColumnBuilder for ChipletsVTableColBuilder { + fn get_requests_at(&self, main_trace: &MainTrace, alphas: &[E], i: usize) -> E { + chiplets_vtable_remove_sibling(main_trace, alphas, i) } - /// Specifies that an entry for a sibling was removed from the chiplets virtual table. The entry - /// is defined by the provided offset. For example, if row_offset = 2, the second from the last - /// entry was removed from the table. - /// - /// It is assumed that the table contains only sibling entries at this point and has not been - /// used for any other chiplet updates. - pub fn sibling_removed(&mut self, step: u32, row_offset: usize) { - let row_index = self.rows.len() - row_offset - 1; - let update = ChipletsVTableUpdate::SiblingRemoved(row_index as u32); - self.hints.push((step, update)); + fn get_responses_at(&self, main_trace: &MainTrace, alphas: &[E], i: usize) -> E { + chiplets_vtable_add_sibling(main_trace, alphas, i) + * chiplets_kernel_table_include(main_trace, alphas, i) } +} - /// Specifies a kernel procedure that must be added to the virtual table. - /// - /// It is assumed that kernel procedures will only be added after all sibling updates have been - /// completed. - pub fn add_kernel_proc(&mut self, step: u32, addr: Felt, proc_hash: Word) { - let proc_index = self.rows.len(); - let update = ChipletsVTableUpdate::KernelProcAdded(proc_index as u32); - self.hints.push((step, update)); - self.rows.push(ChipletsVTableRow::new_kernel_proc(addr, proc_hash)); - } +// CHIPLETS VIRTUAL TABLE REQUESTS +// ================================================================================================ - // TEST HELPERS - // -------------------------------------------------------------------------------------------- - #[cfg(test)] - pub fn hints(&self) -> &[(u32, ChipletsVTableUpdate)] { - &self.hints - } +/// Constructs the inclusions to the table when the hasher absorbs a new sibling node while +/// computing the old Merkle root. +fn chiplets_vtable_add_sibling(main_trace: &MainTrace, alphas: &[E], i: usize) -> E +where + E: FieldElement, +{ + let f_mv: bool = main_trace.f_mv(i); + let f_mva: bool = if i == 0 { false } else { main_trace.f_mva(i - 1) }; - #[cfg(test)] - pub fn rows(&self) -> &[ChipletsVTableRow] { - &self.rows + if f_mv || f_mva { + let index = if f_mva { + main_trace.chiplet_node_index(i - 1) + } else { + main_trace.chiplet_node_index(i) + }; + let lsb = index.as_int() & 1; + if lsb == 0 { + let sibling = &main_trace.chiplet_hasher_state(i)[DIGEST_RANGE.end..]; + alphas[3].mul_base(index) + + alphas[12].mul_base(sibling[0]) + + alphas[13].mul_base(sibling[1]) + + alphas[14].mul_base(sibling[2]) + + alphas[15].mul_base(sibling[3]) + } else { + let sibling = &main_trace.chiplet_hasher_state(i)[DIGEST_RANGE]; + alphas[3].mul_base(index) + + alphas[8].mul_base(sibling[0]) + + alphas[9].mul_base(sibling[1]) + + alphas[10].mul_base(sibling[2]) + + alphas[11].mul_base(sibling[3]) + } + } else { + E::ONE } } -impl AuxColumnBuilder for ChipletsVTableTraceBuilder { - /// Returns a list of rows which were added to and then removed from the chiplets virtual table. - /// - /// The order of the rows in the list is the same as the order in which the rows were added to - /// the table. - fn get_table_rows(&self) -> &[ChipletsVTableRow] { - &self.rows +/// Constructs the removals from the table when the hasher absorbs a new sibling node while +/// computing the new Merkle root. +fn chiplets_vtable_remove_sibling(main_trace: &MainTrace, alphas: &[E], i: usize) -> E +where + E: FieldElement, +{ + let f_mu: bool = main_trace.f_mu(i); + let f_mua: bool = if i == 0 { false } else { main_trace.f_mua(i - 1) }; + + if f_mu || f_mua { + let index = if f_mua { + main_trace.chiplet_node_index(i - 1) + } else { + main_trace.chiplet_node_index(i) + }; + let lsb = index.as_int() & 1; + if lsb == 0 { + let sibling = &main_trace.chiplet_hasher_state(i)[DIGEST_RANGE.end..]; + alphas[3].mul_base(index) + + alphas[12].mul_base(sibling[0]) + + alphas[13].mul_base(sibling[1]) + + alphas[14].mul_base(sibling[2]) + + alphas[15].mul_base(sibling[3]) + } else { + let sibling = &main_trace.chiplet_hasher_state(i)[DIGEST_RANGE]; + alphas[3].mul_base(index) + + alphas[8].mul_base(sibling[0]) + + alphas[9].mul_base(sibling[1]) + + alphas[10].mul_base(sibling[2]) + + alphas[11].mul_base(sibling[3]) + } + } else { + E::ONE } +} - /// Returns hints which describe how the chiplets virtual table was updated during program - /// execution. Each update hint is accompanied by a clock cycle at which the update happened. - /// - /// Internally, each update hint also contains an index of the row into the full list of rows - /// which was either added or removed. - fn get_table_hints(&self) -> &[(u32, ChipletsVTableUpdate)] { - &self.hints +/// Constructs the inclusions to the kernel table. +fn chiplets_kernel_table_include(main_trace: &MainTrace, alphas: &[E], i: usize) -> E +where + E: FieldElement, +{ + if main_trace.is_kernel_row(i) && main_trace.is_addr_change(i) { + alphas[1].mul_base(main_trace.addr(i)) + + alphas[2].mul_base(main_trace.chiplet_kernel_root_0(i)) + + alphas[3].mul_base(main_trace.chiplet_kernel_root_1(i)) + + alphas[4].mul_base(main_trace.chiplet_kernel_root_2(i)) + + alphas[5].mul_base(main_trace.chiplet_kernel_root_3(i)) + } else { + E::ONE } +} - /// Returns the value by which the running product column should be multiplied for the provided - /// hint value. - fn get_multiplicand>( - &self, - hint: ChipletsVTableUpdate, - row_values: &[E], - inv_row_values: &[E], - ) -> E { - match hint { - ChipletsVTableUpdate::SiblingAdded(inserted_row_idx) => { - row_values[inserted_row_idx as usize] - } - ChipletsVTableUpdate::SiblingRemoved(removed_row_idx) => { - inv_row_values[removed_row_idx as usize] - } - ChipletsVTableUpdate::KernelProcAdded(idx) => row_values[idx as usize], +// CHIPLETS REQUESTS +// ================================================================================================ + +/// Builds requests made to the hasher chiplet at the start of a control block. +fn build_control_block_request>( + main_trace: &MainTrace, + op_code_felt: Felt, + alphas: &[E], + i: usize, +) -> E { + let op_label = LINEAR_HASH_LABEL; + let addr_nxt = main_trace.addr(i + 1); + let first_cycle_row = addr_to_row_index(addr_nxt) % HASH_CYCLE_LEN == 0; + let transition_label = if first_cycle_row { op_label + 16 } else { op_label + 32 }; + + let header = alphas[1].mul_base(Felt::from(transition_label)) + alphas[2].mul_base(addr_nxt); + + let state = main_trace.decoder_hasher_state(i); + + header + build_value(&alphas[8..16], &state) + alphas[5].mul_base(op_code_felt) +} + +/// Builds requests made to kernel ROM chiplet when initializing a syscall block. +fn build_syscall_block_request>( + main_trace: &MainTrace, + op_code_felt: Felt, + alphas: &[E], + i: usize, +) -> E { + let factor1 = build_control_block_request(main_trace, op_code_felt, alphas, i); + + let op_label = KERNEL_PROC_LABEL; + let state = main_trace.decoder_hasher_state(i); + let factor2 = alphas[1].mul_base(op_label) + + alphas[2].mul_base(state[0]) + + alphas[3].mul_base(state[1]) + + alphas[4].mul_base(state[2]) + + alphas[5].mul_base(state[3]); + + // TODO: remove difference + let difference = (alphas[0] * alphas[0]) + (alphas[0] * factor2); + (factor1 * factor2) + difference - alphas[0] +} + +/// Builds requests made to the hasher chiplet at the start of a span block. +fn build_span_block_request>( + main_trace: &MainTrace, + alphas: &[E], + i: usize, +) -> E { + let op_label = LINEAR_HASH_LABEL; + let addr_nxt = main_trace.addr(i + 1); + let first_cycle_row = addr_to_row_index(addr_nxt) % HASH_CYCLE_LEN == 0; + let transition_label = if first_cycle_row { op_label + 16 } else { op_label + 32 }; + + let header = alphas[1].mul_base(Felt::from(transition_label)) + alphas[2].mul_base(addr_nxt); + + let state = main_trace.decoder_hasher_state(i); + + header + build_value(&alphas[8..16], &state) +} + +/// Builds requests made to the hasher chiplet at the start of a respan block. +fn build_respan_block_request>( + main_trace: &MainTrace, + alphas: &[E], + i: usize, +) -> E { + let op_label = LINEAR_HASH_LABEL; + let addr_nxt = main_trace.addr(i + 1); + + let first_cycle_row = addr_to_row_index(addr_nxt - ONE) % HASH_CYCLE_LEN == 0; + let transition_label = if first_cycle_row { op_label + 16 } else { op_label + 32 }; + + let header = alphas[1].mul_base(Felt::from(transition_label)) + + alphas[2].mul_base(addr_nxt - ONE) + + alphas[3].mul_base(ZERO); + + let state = &main_trace.chiplet_hasher_state(i - 2)[CAPACITY_LEN..]; + let state_nxt = &main_trace.chiplet_hasher_state(i - 1)[CAPACITY_LEN..]; + + header + build_value(&alphas[8..16], state_nxt) - build_value(&alphas[8..16], state) +} + +/// Builds requests made to the hasher chiplet at the end of a block. +fn build_end_block_request>( + main_trace: &MainTrace, + alphas: &[E], + i: usize, +) -> E { + let op_label = RETURN_HASH_LABEL; + let addr = main_trace.addr(i) + Felt::from(NUM_ROUNDS as u8); + + let first_cycle_row = addr_to_row_index(addr) % HASH_CYCLE_LEN == 0; + let transition_label = if first_cycle_row { op_label + 16 } else { op_label + 32 }; + + let header = alphas[1].mul_base(Felt::from(transition_label)) + alphas[2].mul_base(addr); + + let state = main_trace.decoder_hasher_state(i); + let digest = &state[..4]; + + header + build_value(&alphas[8..12], digest) +} + +/// Builds requests made to the bitwise chiplet. This can be either a request for the computation +/// of a `XOR` or an `AND` operation. +fn build_bitwise_request>( + main_trace: &MainTrace, + is_xor: Felt, + alphas: &[E], + i: usize, +) -> E { + let op_label = get_op_label(ONE, ZERO, is_xor, ZERO); + let a = main_trace.stack_element(1, i); + let b = main_trace.stack_element(0, i); + let z = main_trace.stack_element(0, i + 1); + + alphas[1].mul_base(op_label) + + alphas[2].mul_base(a) + + alphas[3].mul_base(b) + + alphas[4].mul_base(z) +} + +/// Builds `MLOAD*` and `MSTORE*` requests made to the memory chiplet. +fn build_mem_request>( + main_trace: &MainTrace, + op_label: u8, + word: bool, + alphas: &[E], + i: usize, +) -> E { + let ctx = main_trace.ctx(i); + let clk = main_trace.clk(i); + + let (v0, v1, v2, v3) = if word { + ( + main_trace.stack_element(0, i + 1), + main_trace.stack_element(1, i + 1), + main_trace.stack_element(2, i + 1), + main_trace.stack_element(3, i + 1), + ) + } else { + ( + main_trace.helper_0(i), + main_trace.helper_1(i), + main_trace.helper_2(i), + main_trace.stack_element(0, i + 1), + ) + }; + + let s0_cur = main_trace.stack_element(0, i); + + alphas[1].mul_base(Felt::from(op_label)) + + alphas[2].mul_base(ctx) + + alphas[3].mul_base(s0_cur) + + alphas[4].mul_base(clk) + + alphas[5].mul_base(v3) + + alphas[6].mul_base(v2) + + alphas[7].mul_base(v1) + + alphas[8].mul_base(v0) +} + +/// Builds `MSTREAM` requests made to the memory chiplet. +fn build_mstream_request>( + main_trace: &MainTrace, + alphas: &[E], + i: usize, +) -> E { + let ctx = main_trace.ctx(i); + let clk = main_trace.clk(i); + + let s0_nxt = main_trace.stack_element(0, i + 1); + let s1_nxt = main_trace.stack_element(1, i + 1); + let s2_nxt = main_trace.stack_element(2, i + 1); + let s3_nxt = main_trace.stack_element(3, i + 1); + let s4_nxt = main_trace.stack_element(4, i + 1); + let s5_nxt = main_trace.stack_element(5, i + 1); + let s6_nxt = main_trace.stack_element(6, i + 1); + let s7_nxt = main_trace.stack_element(7, i + 1); + + let s12_cur = main_trace.stack_element(12, i); + + let op_label = MEMORY_READ_LABEL; + + let factor1 = alphas[1].mul_base(Felt::from(op_label)) + + alphas[2].mul_base(ctx) + + alphas[3].mul_base(s12_cur) + + alphas[4].mul_base(clk) + + alphas[5].mul_base(s7_nxt) + + alphas[6].mul_base(s6_nxt) + + alphas[7].mul_base(s5_nxt) + + alphas[8].mul_base(s4_nxt); + + let factor2 = alphas[1].mul_base(Felt::from(op_label)) + + alphas[2].mul_base(ctx) + + alphas[3].mul_base(s12_cur + ONE) + + alphas[4].mul_base(clk) + + alphas[5].mul_base(s3_nxt) + + alphas[6].mul_base(s2_nxt) + + alphas[7].mul_base(s1_nxt) + + alphas[8].mul_base(s0_nxt); + + // TODO: remove difference + let difference = (alphas[0] * alphas[0]) + (alphas[0] * factor1) + (alphas[0] * factor2); + factor1 * factor2 + difference - alphas[0] +} + +/// Builds `HPERM` requests made to the hash chiplet. +fn build_hperm_request>( + main_trace: &MainTrace, + alphas: &[E], + i: usize, +) -> E { + let helper_0 = main_trace.helper_0(i); + + let s0_s12_cur = [ + main_trace.stack_element(0, i), + main_trace.stack_element(1, i), + main_trace.stack_element(2, i), + main_trace.stack_element(3, i), + main_trace.stack_element(4, i), + main_trace.stack_element(5, i), + main_trace.stack_element(6, i), + main_trace.stack_element(7, i), + main_trace.stack_element(8, i), + main_trace.stack_element(9, i), + main_trace.stack_element(10, i), + main_trace.stack_element(11, i), + ]; + + let s0_s12_nxt = [ + main_trace.stack_element(0, i + 1), + main_trace.stack_element(1, i + 1), + main_trace.stack_element(2, i + 1), + main_trace.stack_element(3, i + 1), + main_trace.stack_element(4, i + 1), + main_trace.stack_element(5, i + 1), + main_trace.stack_element(6, i + 1), + main_trace.stack_element(7, i + 1), + main_trace.stack_element(8, i + 1), + main_trace.stack_element(9, i + 1), + main_trace.stack_element(10, i + 1), + main_trace.stack_element(11, i + 1), + ]; + + let op_label = LINEAR_HASH_LABEL; + let op_label = if addr_to_hash_cycle(helper_0) == 0 { + op_label + 16 + } else { + op_label + 32 + }; + + let sum_input = alphas[4..16] + .iter() + .rev() + .enumerate() + .fold(E::ZERO, |acc, (i, x)| acc + x.mul_base(s0_s12_cur[i])); + let v_input = + alphas[1].mul_base(Felt::from(op_label)) + alphas[2].mul_base(helper_0) + sum_input; + + let op_label = RETURN_STATE_LABEL; + let op_label = if addr_to_hash_cycle(helper_0 + Felt::new(7)) == 0 { + op_label + 16 + } else { + op_label + 32 + }; + + let sum_output = alphas[4..16] + .iter() + .rev() + .enumerate() + .fold(E::ZERO, |acc, (i, x)| acc + x.mul_base(s0_s12_nxt[i])); + let v_output = alphas[1].mul_base(Felt::from(op_label)) + + alphas[2].mul_base(helper_0 + Felt::new(7)) + + sum_output; + + let difference = (alphas[0] * alphas[0]) + (alphas[0] * v_input) + (alphas[0] * v_output); + v_input * v_output + difference - alphas[0] +} + +/// Builds `MPVERIFY` requests made to the hash chiplet. +fn build_mpverify_request>( + main_trace: &MainTrace, + alphas: &[E], + i: usize, +) -> E { + let helper_0 = main_trace.helper_0(i); + + let s0_s3 = [ + main_trace.stack_element(0, i), + main_trace.stack_element(1, i), + main_trace.stack_element(2, i), + main_trace.stack_element(3, i), + ]; + let s4 = main_trace.stack_element(4, i); + let s5 = main_trace.stack_element(5, i); + let s6_s9 = [ + main_trace.stack_element(6, i), + main_trace.stack_element(7, i), + main_trace.stack_element(8, i), + main_trace.stack_element(9, i), + ]; + + let op_label = MP_VERIFY_LABEL; + let op_label = if addr_to_hash_cycle(helper_0) == 0 { + op_label + 16 + } else { + op_label + 32 + }; + + let sum_input = alphas[8..12] + .iter() + .rev() + .enumerate() + .fold(E::ZERO, |acc, (i, x)| acc + x.mul_base(s0_s3[i])); + + let v_input = alphas[1].mul_base(Felt::from(op_label)) + + alphas[2].mul_base(helper_0) + + alphas[3].mul_base(s5) + + sum_input; + + let op_label = RETURN_HASH_LABEL; + let op_label = if (helper_0).as_int() % 8 == 0 { + op_label + 16 + } else { + op_label + 32 + }; + + let sum_output = alphas[8..12] + .iter() + .rev() + .enumerate() + .fold(E::ZERO, |acc, (i, x)| acc + x.mul_base(s6_s9[i])); + let v_output = alphas[1].mul_base(Felt::from(op_label)) + + alphas[2].mul_base(helper_0 + s4.mul_small(8) - ONE) + + sum_output; + + let difference = (alphas[0] * alphas[0]) + (alphas[0] * v_input) + (alphas[0] * v_output); + v_input * v_output + difference - alphas[0] +} + +/// Builds `MRUPDATE` requests made to the hash chiplet. +fn build_mrupdate_request>( + main_trace: &MainTrace, + alphas: &[E], + i: usize, +) -> E { + let helper_0 = main_trace.helper_0(i); + + let s0_s3 = [ + main_trace.stack_element(0, i), + main_trace.stack_element(1, i), + main_trace.stack_element(2, i), + main_trace.stack_element(3, i), + ]; + let s0_s3_nxt = [ + main_trace.stack_element(0, i + 1), + main_trace.stack_element(1, i + 1), + main_trace.stack_element(2, i + 1), + main_trace.stack_element(3, i + 1), + ]; + let s4 = main_trace.stack_element(4, i); + let s5 = main_trace.stack_element(5, i); + let s6_s9 = [ + main_trace.stack_element(6, i), + main_trace.stack_element(7, i), + main_trace.stack_element(8, i), + main_trace.stack_element(9, i), + ]; + let s10_s13 = [ + main_trace.stack_element(10, i), + main_trace.stack_element(11, i), + main_trace.stack_element(12, i), + main_trace.stack_element(13, i), + ]; + + let op_label = MR_UPDATE_OLD_LABEL; + let op_label = if addr_to_hash_cycle(helper_0) == 0 { + op_label + 16 + } else { + op_label + 32 + }; + + let sum_input = alphas[8..12] + .iter() + .rev() + .enumerate() + .fold(E::ZERO, |acc, (i, x)| acc + x.mul_base(s0_s3[i])); + let v_input_old = alphas[1].mul_base(Felt::from(op_label)) + + alphas[2].mul_base(helper_0) + + alphas[3].mul_base(s5) + + sum_input; + + let op_label = RETURN_HASH_LABEL; + let op_label = if addr_to_hash_cycle(helper_0 + s4.mul_small(8) - ONE) == 0 { + op_label + 16 + } else { + op_label + 32 + }; + + let sum_output = alphas[8..12] + .iter() + .rev() + .enumerate() + .fold(E::ZERO, |acc, (i, x)| acc + x.mul_base(s6_s9[i])); + let v_output_old = alphas[1].mul_base(Felt::from(op_label)) + + alphas[2].mul_base(helper_0 + s4.mul_small(8) - ONE) + + sum_output; + + let op_label = MR_UPDATE_NEW_LABEL; + let op_label = if addr_to_hash_cycle(helper_0 + s4.mul_small(8)) == 0 { + op_label + 16 + } else { + op_label + 32 + }; + let sum_input = alphas[8..12] + .iter() + .rev() + .enumerate() + .fold(E::ZERO, |acc, (i, x)| acc + x.mul_base(s10_s13[i])); + let v_input_new = alphas[1].mul_base(Felt::from(op_label)) + + alphas[2].mul_base(helper_0 + s4.mul_small(8)) + + alphas[3].mul_base(s5) + + sum_input; + + let op_label = RETURN_HASH_LABEL; + let op_label = if addr_to_hash_cycle(helper_0 + s4.mul_small(16) - ONE) == 0 { + op_label + 16 + } else { + op_label + 32 + }; + + let sum_output = alphas[8..12] + .iter() + .rev() + .enumerate() + .fold(E::ZERO, |acc, (i, x)| acc + x.mul_base(s0_s3_nxt[i])); + let v_output_new = alphas[1].mul_base(Felt::from(op_label)) + + alphas[2].mul_base(helper_0 + s4.mul_small(16) - ONE) + + sum_output; + + // calculate difference after factoring out alpha[0] + let r1 = v_input_new; + let r2 = v_input_old; + let r3 = v_output_new; + let r4 = v_output_old; + + let a1 = alphas[0]; + let a2 = a1 * alphas[0]; + let a3 = a2 * alphas[0]; + let a4 = a3 * alphas[0]; + + let difference = a4 + + (a3 * r3) + + (a3 * r4) + + (a2 * r3 * r4) + + (a3 * r1) + + (a2 * r1 * r3) + + (a2 * r1 * r4) + + (a1 * r1 * r3 * r4) + + (a3 * r2) + + (a2 * r2 * r3) + + (a2 * r2 * r4) + + (a1 * r2 * r3 * r4) + + (a2 * r1 * r2) + + (a1 * r1 * r2 * r3) + + (a1 * r1 * r2 * r4); + + (v_input_new * v_input_old * v_output_new * v_output_old) + difference - alphas[0] +} + +// CHIPLETS RESPONSES +// ================================================================================================ + +/// Builds the response from the hasher chiplet at row `i`. +fn build_hasher_chiplet_responses( + main_trace: &MainTrace, + i: usize, + alphas: &[E], + selector1: Felt, + selector2: Felt, + selector3: Felt, +) -> E +where + E: FieldElement, +{ + let mut multiplicand = E::ONE; + let selector0 = main_trace.chiplet_selector_0(i); + let op_label = get_op_label(selector0, selector1, selector2, selector3); + + // f_bp, f_mp, f_mv or f_mu == 1 + if i % HASH_CYCLE_LEN == 0 { + let state = main_trace.chiplet_hasher_state(i); + let alphas_state = &alphas[NUM_HEADER_ALPHAS..(NUM_HEADER_ALPHAS + STATE_WIDTH)]; + let node_index = main_trace.chiplet_node_index(i); + let transition_label = op_label + Felt::from(16_u8); + + // f_bp == 1 + // v_all = v_h + v_a + v_b + v_c + if selector1 == ONE && selector2 == ZERO && selector3 == ZERO { + let header = alphas[1].mul_base(transition_label) + + alphas[2].mul_base(Felt::from((i + 1) as u64)) + + alphas[3].mul_base(node_index); + + multiplicand = header + build_value(alphas_state, &state); + } + + // f_mp or f_mv or f_mu == 1 + // v_leaf = v_h + (1 - b) * v_b + b * v_d + if selector1 == ONE && !(selector2 == ZERO && selector3 == ZERO) { + let header = alphas[1].mul_base(transition_label) + + alphas[2].mul_base(Felt::from((i + 1) as u64)) + + alphas[3].mul_base(node_index); + + let bit = node_index.as_int() & 1; + let left_word = build_value(&alphas_state[DIGEST_RANGE], &state[DIGEST_RANGE]); + let right_word = build_value(&alphas_state[DIGEST_RANGE], &state[DIGEST_RANGE.end..]); + + multiplicand = header + E::from(1 - bit).mul(left_word) + E::from(bit).mul(right_word); } } - /// Returns the final value in the auxiliary column. Default implementation of this method - /// returns ONE. - fn final_column_value>(&self, row_values: &[E]) -> E { - let mut result = E::ONE; - for (_, table_update) in self.hints.iter() { - if let ChipletsVTableUpdate::KernelProcAdded(idx) = table_update { - result *= row_values[*idx as usize]; - } + // f_hout, f_sout, f_abp == 1 + if i % HASH_CYCLE_LEN == HASH_CYCLE_LEN - 1 { + let state = main_trace.chiplet_hasher_state(i); + let alphas_state = &alphas[NUM_HEADER_ALPHAS..(NUM_HEADER_ALPHAS + STATE_WIDTH)]; + let node_index = main_trace.chiplet_node_index(i); + let transition_label = op_label + Felt::from(32_u8); + + // f_hout == 1 + // v_res = v_h + v_b; + if selector1 == ZERO && selector2 == ZERO && selector3 == ZERO { + let header = alphas[1].mul_base(transition_label) + + alphas[2].mul_base(Felt::from((i + 1) as u64)) + + alphas[3].mul_base(node_index); + + multiplicand = header + build_value(&alphas_state[DIGEST_RANGE], &state[DIGEST_RANGE]); } - result + // f_sout == 1 + // v_all = v_h + v_a + v_b + v_c + if selector1 == ZERO && selector2 == ZERO && selector3 == ONE { + let header = alphas[1].mul_base(transition_label) + + alphas[2].mul_base(Felt::from((i + 1) as u64)) + + alphas[3].mul_base(node_index); + + multiplicand = header + build_value(alphas_state, &state); + } + + // f_abp == 1 + // v_abp = v_h + v_b' + v_c' - v_b - v_c + if selector1 == ONE && selector2 == ZERO && selector3 == ZERO { + let header = alphas[1].mul_base(transition_label) + + alphas[2].mul_base(Felt::from((i + 1) as u64)) + + alphas[3].mul_base(node_index); + + let state_nxt = main_trace.chiplet_hasher_state(i + 1); + + // build the value from the difference of the hasher state's just before and right + // after the absorption of new elements. + let next_state_value = + build_value(&alphas_state[CAPACITY_LEN..], &state_nxt[CAPACITY_LEN..]); + let state_value = build_value(&alphas_state[CAPACITY_LEN..], &state[CAPACITY_LEN..]); + + multiplicand = header + next_state_value - state_value; + } } + multiplicand +} + +/// Builds the response from the bitwise chiplet at row `i`. +fn build_bitwise_chiplet_responses( + main_trace: &MainTrace, + i: usize, + is_xor: Felt, + alphas: &[E], +) -> E +where + E: FieldElement, +{ + if i % BITWISE_OP_CYCLE_LEN == BITWISE_OP_CYCLE_LEN - 1 { + let op_label = get_op_label(ONE, ZERO, is_xor, ZERO); + + let a = main_trace.chiplet_bitwise_a(i); + let b = main_trace.chiplet_bitwise_b(i); + let z = main_trace.chiplet_bitwise_z(i); + + alphas[1].mul_base(op_label) + + alphas[2].mul_base(a) + + alphas[3].mul_base(b) + + alphas[4].mul_base(z) + } else { + E::ONE + } +} + +/// Builds the response from the memory chiplet at row `i`. +fn build_memory_chiplet_responses( + main_trace: &MainTrace, + i: usize, + is_read: Felt, + alphas: &[E], +) -> E +where + E: FieldElement, +{ + let op_label = get_op_label(ONE, ONE, ZERO, is_read); + + let ctx = main_trace.chiplet_memory_ctx(i); + let clk = main_trace.chiplet_memory_clk(i); + let addr = main_trace.chiplet_memory_addr(i); + let value0 = main_trace.chiplet_memory_value_0(i); + let value1 = main_trace.chiplet_memory_value_1(i); + let value2 = main_trace.chiplet_memory_value_2(i); + let value3 = main_trace.chiplet_memory_value_3(i); + + alphas[1].mul_base(op_label) + + alphas[2].mul_base(ctx) + + alphas[3].mul_base(addr) + + alphas[4].mul_base(clk) + + alphas[5].mul_base(value0) + + alphas[6].mul_base(value1) + + alphas[7].mul_base(value2) + + alphas[8].mul_base(value3) +} + +/// Builds the response from the kernel chiplet at row `i`. +fn build_kernel_chiplet_responses(main_trace: &MainTrace, i: usize, alphas: &[E]) -> E +where + E: FieldElement, +{ + let op_label = KERNEL_PROC_LABEL; + + let root0 = main_trace.chiplet_kernel_root_0(i); + let root1 = main_trace.chiplet_kernel_root_1(i); + let root2 = main_trace.chiplet_kernel_root_2(i); + let root3 = main_trace.chiplet_kernel_root_3(i); + + alphas[1].mul_base(op_label) + + alphas[2].mul_base(root0) + + alphas[3].mul_base(root1) + + alphas[4].mul_base(root2) + + alphas[5].mul_base(root3) +} + +/// Reduces a slice of elements to a single field element in the field specified by E using a slice +/// of alphas of matching length. This can be used to build the value for a single word or for an +/// entire [HasherState]. +fn build_value>(alphas: &[E], elements: &[Felt]) -> E { + assert_eq!(alphas.len(), elements.len()); + let mut value = E::ZERO; + for (&alpha, &element) in alphas.iter().zip(elements.iter()) { + value += alpha.mul_base(element); + } + value +} + +/// Returns the operation unique label. +fn get_op_label(s0: Felt, s1: Felt, s2: Felt, s3: Felt) -> Felt { + s3.mul_small(1 << 3) + s2.mul_small(1 << 2) + s1.mul_small(2) + s0 + ONE +} + +/// Returns the hash cycle corresponding to the provided Hasher address. +fn addr_to_hash_cycle(addr: Felt) -> usize { + let row = (addr.as_int() - 1) as usize; + let cycle_row = row % HASH_CYCLE_LEN; + debug_assert!(cycle_row == 0 || cycle_row == HASH_CYCLE_LEN - 1, "invalid address for hasher"); + + cycle_row +} + +/// Convenience method to convert from addresses to rows. +fn addr_to_row_index(addr: Felt) -> usize { + (addr.as_int() - 1) as usize } diff --git a/processor/src/chiplets/aux_trace/virtual_table.rs b/processor/src/chiplets/aux_trace/virtual_table.rs deleted file mode 100644 index 77d0736c99..0000000000 --- a/processor/src/chiplets/aux_trace/virtual_table.rs +++ /dev/null @@ -1,160 +0,0 @@ -use super::{ColMatrix, Felt, FieldElement, StarkField, Word}; -use crate::trace::LookupTableRow; - -// CHIPLETS VIRTUAL TABLE -// ================================================================================================ - -/// Describes updates to the chiplets virtual table. This includes management of the "sibling table" -/// used by the hasher chiplet and the "kernel procedure table" used by the kernel ROM chiplet. -/// -/// - The sibling table is used to enforce Merkle root update computations. The internal u32 values -/// are indices of added/removed rows in a list of rows sorted chronologically (i.e., from first -/// added row to last). -/// - The kernel procedure table contains all kernel procedures along with the address where they -/// first appear in the kernel ROM trace. Each kernel procedure is expected to be included exactly -/// once, regardless of whether it is ever called or not. -#[derive(Debug, Copy, Clone, PartialEq, Eq)] -pub enum ChipletsVTableUpdate { - SiblingAdded(u32), - SiblingRemoved(u32), - KernelProcAdded(u32), -} - -#[derive(Debug, Copy, Clone, PartialEq, Eq)] -pub struct ChipletsVTableRow { - sibling: Option, - kernel_proc: Option, -} - -impl ChipletsVTableRow { - pub fn new_sibling(index: Felt, sibling: Word) -> Self { - Self { - sibling: Some(SiblingTableRow::new(index, sibling)), - kernel_proc: None, - } - } - - pub fn new_kernel_proc(addr: Felt, proc_hash: Word) -> Self { - Self { - sibling: None, - kernel_proc: Some(KernelProc::new(addr, proc_hash)), - } - } - - #[cfg(test)] - pub fn kernel_proc(&self) -> Option { - self.kernel_proc - } -} - -impl LookupTableRow for ChipletsVTableRow { - /// Reduces this row to a single field element in the field specified by E. This requires - /// at least 6 alpha values. - fn to_value>( - &self, - main_trace: &ColMatrix, - alphas: &[E], - ) -> E { - if let Some(sibling) = self.sibling { - debug_assert!( - self.kernel_proc.is_none(), - "a chiplet virtual table row cannot represent both a sibling and a kernel ROM procedure" - ); - sibling.to_value(main_trace, alphas) - } else if let Some(kernel_proc) = self.kernel_proc { - kernel_proc.to_value(main_trace, alphas) - } else { - E::ONE - } - } -} - -// SIBLING TABLE ROW -// ================================================================================================ - -/// Describes a single entry in the sibling table which consists of a tuple `(index, node)` where -/// index is the index of the node at its depth. For example, assume a leaf has index n. For the -/// leaf's parent the index will be n << 1. For the parent of the parent, the index will be -/// n << 2 etc. -#[derive(Debug, Copy, Clone, PartialEq, Eq)] -pub struct SiblingTableRow { - index: Felt, - sibling: Word, -} - -impl SiblingTableRow { - pub fn new(index: Felt, sibling: Word) -> Self { - Self { index, sibling } - } -} - -impl LookupTableRow for SiblingTableRow { - /// Reduces this row to a single field element in the field specified by E. This requires - /// at least 6 alpha values. - fn to_value>( - &self, - _main_trace: &ColMatrix, - alphas: &[E], - ) -> E { - // when the least significant bit of the index is 0, the sibling will be in the 3rd word - // of the hasher state, and when the least significant bit is 1, it will be in the 2nd - // word. we compute the value in this way to make constraint evaluation a bit easier since - // we need to compute the 2nd and the 3rd word values for other purposes as well. - let lsb = self.index.as_int() & 1; - if lsb == 0 { - alphas[0] - + alphas[3].mul_base(self.index) - + alphas[12].mul_base(self.sibling[0]) - + alphas[13].mul_base(self.sibling[1]) - + alphas[14].mul_base(self.sibling[2]) - + alphas[15].mul_base(self.sibling[3]) - } else { - alphas[0] - + alphas[3].mul_base(self.index) - + alphas[8].mul_base(self.sibling[0]) - + alphas[9].mul_base(self.sibling[1]) - + alphas[10].mul_base(self.sibling[2]) - + alphas[11].mul_base(self.sibling[3]) - } - } -} - -// KERNEL ROM PROCEDURES -// ================================================================================================ - -/// Describes a single entry in the kernel rom procedure table which consists of a tuple -/// `(addr, proc_hash)` where `addr` is the address of the first entry of the procedure in the -/// kernel ROM table and `proc_hash` is the 4-element root hash of the procedure. -#[derive(Debug, Copy, Clone, PartialEq, Eq)] -pub struct KernelProc { - addr: Felt, - proc_hash: Word, -} - -impl KernelProc { - pub fn new(addr: Felt, proc_hash: Word) -> Self { - Self { addr, proc_hash } - } - - #[cfg(test)] - pub fn proc_hash(&self) -> Word { - self.proc_hash - } -} - -impl LookupTableRow for KernelProc { - /// Reduces this row to a single field element in the field specified by E. This requires - /// at least 6 alpha values. - fn to_value>( - &self, - _main_trace: &ColMatrix, - alphas: &[E], - ) -> E { - alphas[0] - + alphas[1].mul_base(self.addr) - + alphas[2].mul_base(self.proc_hash[0]) - + alphas[3].mul_base(self.proc_hash[1]) - + alphas[4].mul_base(self.proc_hash[2]) - + alphas[5].mul_base(self.proc_hash[3]) - } -} diff --git a/processor/src/chiplets/bitwise/mod.rs b/processor/src/chiplets/bitwise/mod.rs index b7d84e47a1..8dd86e85cf 100644 --- a/processor/src/chiplets/bitwise/mod.rs +++ b/processor/src/chiplets/bitwise/mod.rs @@ -1,10 +1,7 @@ -use super::{ - trace::LookupTableRow, utils::get_trace_len, ChipletsBus, ColMatrix, ExecutionError, Felt, - FieldElement, StarkField, TraceFragment, Vec, BITWISE_AND_LABEL, BITWISE_XOR_LABEL, ZERO, -}; +use super::{utils::get_trace_len, ExecutionError, Felt, StarkField, TraceFragment, Vec, ZERO}; use miden_air::trace::chiplets::bitwise::{ - A_COL_IDX, A_COL_RANGE, BITWISE_AND, BITWISE_XOR, B_COL_IDX, B_COL_RANGE, OP_CYCLE_LEN, - OUTPUT_COL_IDX, PREV_OUTPUT_COL_IDX, TRACE_WIDTH, + A_COL_IDX, A_COL_RANGE, BITWISE_AND, BITWISE_XOR, B_COL_IDX, B_COL_RANGE, OUTPUT_COL_IDX, + PREV_OUTPUT_COL_IDX, TRACE_WIDTH, }; #[cfg(test)] @@ -150,43 +147,12 @@ impl Bitwise { // EXECUTION TRACE GENERATION // -------------------------------------------------------------------------------------------- - /// Fills the provided trace fragment with trace data from this bitwise helper instance. Each - /// bitwise operation lookup is also sent to the chiplets bus, along with the cycle at which it - /// was provided, which is calculated as an offset from the first row of the Bitwise chiplet. - /// Lookup values come from the last row of each bitwise operation cycle which contains both the - /// aggregated input values and the output result. - pub fn fill_trace( - self, - trace: &mut TraceFragment, - chiplets_bus: &mut ChipletsBus, - bitwise_start_row: usize, - ) { + /// Fills the provided trace fragment with trace data from this bitwise helper instance. + pub fn fill_trace(self, trace: &mut TraceFragment) { // make sure fragment dimensions are consistent with the dimensions of this trace debug_assert_eq!(self.trace_len(), trace.len(), "inconsistent trace lengths"); debug_assert_eq!(TRACE_WIDTH, trace.width(), "inconsistent trace widths"); - // provide the lookup data from the last row in each bitwise cycle - for row in ((OP_CYCLE_LEN - 1)..self.trace_len()).step_by(OP_CYCLE_LEN) { - let a = self.trace[A_COL_IDX][row]; - let b = self.trace[B_COL_IDX][row]; - let z = self.trace[OUTPUT_COL_IDX][row]; - - // get the operation label. - let op_selector: Felt = self.trace[0][row]; - let label = if op_selector == BITWISE_AND { - BITWISE_AND_LABEL - } else { - assert!( - op_selector == BITWISE_XOR, - "Unrecognized operation selectors in Bitwise chiplet" - ); - BITWISE_XOR_LABEL - }; - - let lookup = BitwiseLookup::new(label, a, b, z); - chiplets_bus.provide_bitwise_operation(lookup, (bitwise_start_row + row) as u32); - } - // copy trace into the fragment column-by-column // TODO: this can be parallelized to copy columns in multiple threads for (out_column, column) in trace.columns().zip(self.trace) { @@ -239,36 +205,3 @@ pub fn assert_u32(value: Felt) -> Result { Ok(value) } } - -// BITWISE LOOKUPS -// ================================================================================================ -#[derive(Debug, Copy, Clone, PartialEq, Eq)] -pub struct BitwiseLookup { - // unique label identifying the bitwise operation - label: Felt, - a: Felt, - b: Felt, - z: Felt, -} - -impl BitwiseLookup { - pub fn new(label: Felt, a: Felt, b: Felt, z: Felt) -> Self { - Self { label, a, b, z } - } -} - -impl LookupTableRow for BitwiseLookup { - /// Reduces this row to a single field element in the field specified by E. This requires - /// at least 5 alpha values. - fn to_value>( - &self, - _main_trace: &ColMatrix, - alphas: &[E], - ) -> E { - alphas[0] - + alphas[1].mul_base(self.label) - + alphas[2].mul_base(self.a) - + alphas[3].mul_base(self.b) - + alphas[4].mul_base(self.z) - } -} diff --git a/processor/src/chiplets/bitwise/tests.rs b/processor/src/chiplets/bitwise/tests.rs index c590359a67..ece71de5ae 100644 --- a/processor/src/chiplets/bitwise/tests.rs +++ b/processor/src/chiplets/bitwise/tests.rs @@ -1,8 +1,7 @@ -use super::{ - super::aux_trace::{ChipletLookup, ChipletsBusRow}, - Bitwise, BitwiseLookup, ChipletsBus, Felt, StarkField, TraceFragment, Vec, A_COL_IDX, - A_COL_RANGE, BITWISE_AND, BITWISE_AND_LABEL, BITWISE_XOR, BITWISE_XOR_LABEL, B_COL_IDX, - B_COL_RANGE, OP_CYCLE_LEN, OUTPUT_COL_IDX, PREV_OUTPUT_COL_IDX, TRACE_WIDTH, +use super::{Bitwise, Felt, StarkField, TraceFragment, Vec}; +use miden_air::trace::chiplets::bitwise::{ + A_COL_IDX, A_COL_RANGE, BITWISE_AND, BITWISE_XOR, B_COL_IDX, B_COL_RANGE, OP_CYCLE_LEN, + OUTPUT_COL_IDX, PREV_OUTPUT_COL_IDX, TRACE_WIDTH, }; use test_utils::rand::rand_value; use vm_core::ZERO; @@ -24,7 +23,7 @@ fn bitwise_and() { assert_eq!(a.as_int() & b.as_int(), result.as_int()); // --- check generated trace ---------------------------------------------- - let (trace, chiplets_bus) = build_trace(bitwise, OP_CYCLE_LEN); + let trace = build_trace(bitwise, OP_CYCLE_LEN); // make sure the selector values specify bitwise AND at each step in the trace for row in 0..OP_CYCLE_LEN { @@ -54,11 +53,6 @@ fn bitwise_and() { prev_result = result; } - - // make sure the lookup was sent to the bus correctly - let bitwise_lookup = - BitwiseLookup::new(BITWISE_AND_LABEL, a, b, Felt::new(a.as_int() & b.as_int())); - verify_bus(&chiplets_bus, 0, (OP_CYCLE_LEN - 1) as u32, &bitwise_lookup); } #[test] @@ -72,7 +66,7 @@ fn bitwise_xor() { assert_eq!(a.as_int() ^ b.as_int(), result.as_int()); // --- check generated trace ---------------------------------------------- - let (trace, chiplets_bus) = build_trace(bitwise, OP_CYCLE_LEN); + let trace = build_trace(bitwise, OP_CYCLE_LEN); // make sure the selector values specify bitwise XOR at each step in the trace for row in 0..OP_CYCLE_LEN { @@ -102,11 +96,6 @@ fn bitwise_xor() { prev_result = result; } - - // make sure the lookup was sent to the bus correctly - let bitwise_lookup = - BitwiseLookup::new(BITWISE_XOR_LABEL, a, b, Felt::new(a.as_int() ^ b.as_int())); - verify_bus(&chiplets_bus, 0, (OP_CYCLE_LEN - 1) as u32, &bitwise_lookup); } #[test] @@ -129,7 +118,7 @@ fn bitwise_multiple() { assert_eq!(a[2].as_int() & b[2].as_int(), result2.as_int()); // --- check generated trace ---------------------------------------------- - let (trace, chiplets_bus) = build_trace(bitwise, 3 * OP_CYCLE_LEN); + let trace = build_trace(bitwise, 3 * OP_CYCLE_LEN); // make sure results and results from the trace are the same assert_eq!(result0, trace[OUTPUT_COL_IDX][OP_CYCLE_LEN - 1]); @@ -189,32 +178,18 @@ fn bitwise_multiple() { prev_result = result; } - - // make sure the lookups were sent to the bus correctly - let bitwise_lookup = - BitwiseLookup::new(BITWISE_AND_LABEL, a[0], b[0], Felt::new(a[0].as_int() & b[0].as_int())); - verify_bus(&chiplets_bus, 0, (OP_CYCLE_LEN - 1) as u32, &bitwise_lookup); - - let bitwise_lookup = - BitwiseLookup::new(BITWISE_XOR_LABEL, a[1], b[1], Felt::new(a[1].as_int() ^ b[1].as_int())); - verify_bus(&chiplets_bus, 1, (OP_CYCLE_LEN * 2 - 1) as u32, &bitwise_lookup); - - let bitwise_lookup = - BitwiseLookup::new(BITWISE_AND_LABEL, a[2], b[2], Felt::new(a[2].as_int() & b[2].as_int())); - verify_bus(&chiplets_bus, 2, (OP_CYCLE_LEN * 3 - 1) as u32, &bitwise_lookup); } // HELPER FUNCTIONS // ================================================================================================ /// Builds a trace of the specified length and fills it with data from the provided Bitwise instance. -fn build_trace(bitwise: Bitwise, num_rows: usize) -> (Vec>, ChipletsBus) { - let mut chiplets_bus = ChipletsBus::default(); +fn build_trace(bitwise: Bitwise, num_rows: usize) -> Vec> { let mut trace = (0..TRACE_WIDTH).map(|_| vec![ZERO; num_rows]).collect::>(); let mut fragment = TraceFragment::trace_to_fragment(&mut trace); - bitwise.fill_trace(&mut fragment, &mut chiplets_bus, 0); + bitwise.fill_trace(&mut fragment); - (trace, chiplets_bus) + trace } fn check_decomposition(trace: &[Vec], start: usize, a: u64, b: u64) { @@ -253,21 +228,3 @@ fn rand_u32() -> Felt { let value = rand_value::() as u32 as u64; Felt::new(value) } - -/// Verifies that the chiplet bus received the specified BitwiseLookup response at `cycle` which was -/// added to the list of responses at `index`. -fn verify_bus( - chiplets_bus: &ChipletsBus, - index: usize, - cycle: u32, - bitwise_lookup: &BitwiseLookup, -) { - let expected_lookup = ChipletLookup::Bitwise(*bitwise_lookup); - let expected_hint = ChipletsBusRow::new(&[], Some(index as u32)); - - let lookup = chiplets_bus.get_response_row(index); - let hint = chiplets_bus.get_lookup_hint(cycle).unwrap(); - - assert_eq!(expected_lookup, lookup); - assert_eq!(&expected_hint, hint); -} diff --git a/processor/src/chiplets/hasher/lookups.rs b/processor/src/chiplets/hasher/lookups.rs deleted file mode 100644 index 98db0ea177..0000000000 --- a/processor/src/chiplets/hasher/lookups.rs +++ /dev/null @@ -1,197 +0,0 @@ -use super::{ColMatrix, Felt, FieldElement, LookupTableRow, StarkField, Vec, ZERO}; -use core::ops::Range; -use miden_air::trace::chiplets::{ - hasher::{ - CAPACITY_LEN, DIGEST_LEN, DIGEST_RANGE, LINEAR_HASH_LABEL, MP_VERIFY_LABEL, - MR_UPDATE_NEW_LABEL, MR_UPDATE_OLD_LABEL, RATE_LEN, RETURN_HASH_LABEL, RETURN_STATE_LABEL, - STATE_WIDTH, - }, - HASHER_RATE_COL_RANGE, HASHER_STATE_COL_RANGE, -}; - -// CONSTANTS -// ================================================================================================ -const NUM_HEADER_ALPHAS: usize = 4; - -// HASHER LOOKUPS -// ================================================================================================ - -/// Specifies the context of the [HasherLookup], indicating whether it describes the beginning of a -/// hash operation, the return of a specified result, or the absorption of additional elements, -/// initiating a new hash cycle with the provided [HasherState]. -#[derive(Debug, Copy, Clone, PartialEq, Eq)] -pub enum HasherLookupContext { - Start, - Absorb, - Return, -} - -/// Contains the data required to describe and verify hash operations. -#[derive(Debug, Copy, Clone, PartialEq, Eq)] -pub struct HasherLookup { - // unique label identifying the hash operation - label: u8, - // row address in the Hasher table - addr: u32, - // node index - index: Felt, - // context - context: HasherLookupContext, -} - -impl HasherLookup { - /// Creates a new HasherLookup. - pub(super) fn new(label: u8, addr: u32, index: Felt, context: HasherLookupContext) -> Self { - Self { - label, - addr, - index, - context, - } - } - - /// The cycle at which the lookup is provided by the hasher. - pub fn cycle(&self) -> u32 { - // the hasher's addresses start from one instead of zero, so the cycle at which each lookup - // is provided is one less than its address - self.addr - 1 - } - - /// Returns the common header value which describes this hash operation. It is a combination of - /// the transition label, the row address, and the node index. - fn get_header_value>(&self, alphas: &[E]) -> E { - let transition_label = match self.context { - HasherLookupContext::Start => E::from(self.label) + E::from(16_u8), - _ => E::from(self.label) + E::from(32_u8), - }; - - alphas[0] - + alphas[1].mul(transition_label) - + alphas[2].mul(E::from(self.addr)) - + alphas[3].mul_base(self.index) - } -} - -impl LookupTableRow for HasherLookup { - /// Reduces this row to a single field element in the field specified by E. This requires - /// at least 16 alpha values. - fn to_value>( - &self, - main_trace: &ColMatrix, - alphas: &[E], - ) -> E { - let header = self.get_header_value(&alphas[..NUM_HEADER_ALPHAS]); - // computing the rest of the value requires an alpha for each element in the [HasherState] - let alphas = &alphas[NUM_HEADER_ALPHAS..(NUM_HEADER_ALPHAS + STATE_WIDTH)]; - - match self.context { - HasherLookupContext::Start => { - if self.label == LINEAR_HASH_LABEL { - // include the entire state when initializing a linear hash. - header - + build_value( - alphas, - &get_hasher_state_at(self.addr, main_trace, 0..STATE_WIDTH), - ) - } else { - let state = - &get_hasher_state_at(self.addr, main_trace, CAPACITY_LEN..STATE_WIDTH); - assert!( - self.label == MR_UPDATE_OLD_LABEL - || self.label == MR_UPDATE_NEW_LABEL - || self.label == MP_VERIFY_LABEL, - "unrecognized hash operation" - ); - // build the leaf value by selecting from the left and right words of the state. - // the same alphas must be used in both cases, since whichever word is selected - // by the index bit will be the leaf node, and the value must be computed in - // the same way in both cases. - let bit = (self.index.as_int() >> 1) & 1; - let left_word = build_value(&alphas[DIGEST_RANGE], &state[..DIGEST_LEN]); - let right_word = build_value(&alphas[DIGEST_RANGE], &state[DIGEST_LEN..]); - - header + E::from(1 - bit).mul(left_word) + E::from(bit).mul(right_word) - } - } - HasherLookupContext::Absorb => { - assert!(self.label == LINEAR_HASH_LABEL, "unrecognized hash operation"); - let (curr_hasher_rate, next_hasher_rate) = - get_adjacent_hasher_rates(self.addr, main_trace); - // build the value from the delta of the hasher state's rate before and after the - // absorption of new elements. - let next_state_value = build_value(&alphas[CAPACITY_LEN..], &next_hasher_rate); - let state_value = build_value(&alphas[CAPACITY_LEN..], &curr_hasher_rate); - - header + next_state_value - state_value - } - HasherLookupContext::Return => { - if self.label == RETURN_STATE_LABEL { - // build the value from the result, which is the entire state - header - + build_value( - alphas, - &get_hasher_state_at(self.addr, main_trace, 0..STATE_WIDTH), - ) - } else { - assert!(self.label == RETURN_HASH_LABEL, "unrecognized hash operation"); - // build the value from the result, which is the digest portion of the state - header - + build_value( - &alphas[DIGEST_RANGE], - &get_hasher_state_at(self.addr, main_trace, DIGEST_RANGE), - ) - } - } - } - } -} - -// HELPER FUNCTIONS -// ================================================================================================ - -/// Reduces a slice of elements to a single field element in the field specified by E using a slice -/// of alphas of matching length. This can be used to build the value for a single word or for an -/// entire [HasherState]. -fn build_value>(alphas: &[E], elements: &[Felt]) -> E { - let mut value = E::ZERO; - for (&alpha, &element) in alphas.iter().zip(elements.iter()) { - value += alpha.mul_base(element); - } - value -} - -/// Returns the portion of the hasher state at the provided address that is within the provided -/// column range. -fn get_hasher_state_at( - addr: u32, - main_trace: &ColMatrix, - col_range: Range, -) -> Vec { - let row = get_row_from_addr(addr); - col_range - .map(|col| main_trace.get(HASHER_STATE_COL_RANGE.start + col, row)) - .collect::>() -} - -/// Returns the rate portion of the hasher state for the provided row and the next row. -fn get_adjacent_hasher_rates( - addr: u32, - main_trace: &ColMatrix, -) -> ([Felt; RATE_LEN], [Felt; RATE_LEN]) { - let row = get_row_from_addr(addr); - - let mut current = [ZERO; RATE_LEN]; - let mut next = [ZERO; RATE_LEN]; - for (idx, col_idx) in HASHER_RATE_COL_RANGE.enumerate() { - let column = main_trace.get_column(col_idx); - current[idx] = column[row]; - next[idx] = column[row + 1]; - } - - (current, next) -} - -/// Gets the row index from the specified row address. -fn get_row_from_addr(addr: u32) -> usize { - addr as usize - 1 -} diff --git a/processor/src/chiplets/hasher/mod.rs b/processor/src/chiplets/hasher/mod.rs index 6ffd083abf..8cbd343c4d 100644 --- a/processor/src/chiplets/hasher/mod.rs +++ b/processor/src/chiplets/hasher/mod.rs @@ -1,19 +1,12 @@ use super::{ - trace::LookupTableRow, BTreeMap, ChipletsVTableTraceBuilder, ColMatrix, Felt, FieldElement, - HasherState, MerklePath, MerkleRootUpdate, OpBatch, StarkField, TraceFragment, Vec, Word, ONE, - ZERO, + BTreeMap, Felt, HasherState, MerklePath, MerkleRootUpdate, OpBatch, StarkField, TraceFragment, + Vec, Word, ONE, ZERO, }; use miden_air::trace::chiplets::hasher::{ - Digest, Selectors, DIGEST_LEN, DIGEST_RANGE, HASH_CYCLE_LEN, LINEAR_HASH, LINEAR_HASH_LABEL, - MP_VERIFY, MP_VERIFY_LABEL, MR_UPDATE_NEW, MR_UPDATE_NEW_LABEL, MR_UPDATE_OLD, - MR_UPDATE_OLD_LABEL, RATE_LEN, RETURN_HASH, RETURN_HASH_LABEL, RETURN_STATE, - RETURN_STATE_LABEL, STATE_WIDTH, TRACE_WIDTH, + Digest, Selectors, DIGEST_LEN, DIGEST_RANGE, LINEAR_HASH, MP_VERIFY, MR_UPDATE_NEW, + MR_UPDATE_OLD, RATE_LEN, RETURN_HASH, RETURN_STATE, STATE_WIDTH, TRACE_WIDTH, }; -mod lookups; -pub use lookups::HasherLookup; -use lookups::HasherLookupContext; - mod trace; use trace::HasherTrace; @@ -55,9 +48,6 @@ mod tests; /// In addition to the execution trace, the hash chiplet also maintains: /// - an auxiliary trace builder, which can be used to construct a running product column describing /// the state of the sibling table (used in Merkle root update operations). -/// - a vector of [HasherLookup]s, each of which specifies the data for one of the lookup rows which -/// are required for verification of the communication between the stack/decoder and the Hash -/// Chiplet via the Chiplets Bus. /// - a map of memoized execution trace, which keeps track of start and end rows of the sections of /// the trace of a control or span block that can be copied to be used later for program blocks /// encountered with the same digest instead of building it from scratch everytime. The hash of @@ -65,7 +55,6 @@ mod tests; #[derive(Default)] pub struct Hasher { trace: HasherTrace, - aux_trace: ChipletsVTableTraceBuilder, memoized_trace_map: BTreeMap<[u8; 32], (usize, usize)>, } @@ -78,51 +67,25 @@ impl Hasher { self.trace.trace_len() } - /// Returns the [HasherLookup] from the provided label, index and context inputs. - #[inline(always)] - fn get_lookup(&self, label: u8, index: Felt, context: HasherLookupContext) -> HasherLookup { - let addr = match context { - // when starting a new hash operation, lookups are added before the operation begins. - HasherLookupContext::Start => self.trace.next_row_addr().as_int() as u32, - // in all other cases, they are added after the hash operation has completed. - _ => self.trace_len() as u32, - }; - HasherLookup::new(label, addr, index, context) - } - // HASHING METHODS // -------------------------------------------------------------------------------------------- /// Applies a single permutation of the hash function to the provided state and records the - /// execution trace of this computation as well as the lookups required for verifying the - /// correctness of the permutation so that they can be provided to the Chiplets Bus. + /// execution trace of this computation. /// /// The returned tuple contains the hasher state after the permutation and the row address of /// the execution trace at which the permutation started. - pub(super) fn permute( - &mut self, - mut state: HasherState, - lookups: &mut Vec, - ) -> (Felt, HasherState) { + pub(super) fn permute(&mut self, mut state: HasherState) -> (Felt, HasherState) { let addr = self.trace.next_row_addr(); - // add the lookup for the hash initialization. - let lookup = self.get_lookup(LINEAR_HASH_LABEL, ZERO, HasherLookupContext::Start); - lookups.push(lookup); - // perform the hash. self.trace.append_permutation(&mut state, LINEAR_HASH, RETURN_STATE); - // add the lookup for the hash result. - let lookup = self.get_lookup(RETURN_STATE_LABEL, ZERO, HasherLookupContext::Return); - lookups.push(lookup); - (addr, state) } /// Computes the hash of the control block by computing hash(h1, h2) and returns the result. - /// It also records the execution trace of this computation as well as the lookups required for - /// verifying its correctness so that they can be provided to the Chiplets Bus. + /// It also records the execution trace of this computation. /// /// The returned tuple also contains the row address of the execution trace at which the hash /// computation started. @@ -132,15 +95,10 @@ impl Hasher { h2: Word, domain: Felt, expected_hash: Digest, - lookups: &mut Vec, ) -> (Felt, Word) { let addr = self.trace.next_row_addr(); let mut state = init_state_from_words_with_domain(&h1, &h2, domain); - // add the lookup for the hash initialization. - let lookup = self.get_lookup(LINEAR_HASH_LABEL, ZERO, HasherLookupContext::Start); - lookups.push(lookup); - if let Some((start_row, end_row)) = self.get_memoized_trace(expected_hash) { // copy the trace of a block with same hash instead of building it again. self.trace.copy_trace(&mut state, *start_row..*end_row); @@ -151,18 +109,13 @@ impl Hasher { self.insert_to_memoized_trace_map(addr, expected_hash); }; - // add the lookup for the hash result. - let lookup = self.get_lookup(RETURN_HASH_LABEL, ZERO, HasherLookupContext::Return); - lookups.push(lookup); - let result = get_digest(&state); (addr, result) } /// Computes a sequential hash of all operation batches in the list and returns the result. It - /// also records the execution trace of this computation, as well as the lookups required for - /// verifying its correctness so that they can be provided to the Chiplets Bus. + /// also records the execution trace of this computation. /// /// The returned tuple also contains the row address of the execution trace at which the hash /// computation started. @@ -170,17 +123,13 @@ impl Hasher { &mut self, op_batches: &[OpBatch], expected_hash: Digest, - lookups: &mut Vec, ) -> (Felt, Word) { const START: Selectors = LINEAR_HASH; - const START_LABEL: u8 = LINEAR_HASH_LABEL; const RETURN: Selectors = RETURN_HASH; - const RETURN_LABEL: u8 = RETURN_HASH_LABEL; // absorb selectors are the same as linear hash selectors, but absorb selectors are // applied on the last row of a permutation cycle, while linear hash selectors are // applied on the first row of a permutation cycle. const ABSORB: Selectors = LINEAR_HASH; - const ABSORB_LABEL: u8 = LINEAR_HASH_LABEL; // to continue linear hash we need retain the 2nd and 3rd selector flags and set the // 1st flag to ZERO. const CONTINUE: Selectors = [ZERO, LINEAR_HASH[1], LINEAR_HASH[2]]; @@ -190,10 +139,6 @@ impl Hasher { // initialize the state and absorb the first operation batch into it let mut state = init_state(op_batches[0].groups(), ZERO); - // add the lookup for the hash initialization. - let lookup = self.get_lookup(START_LABEL, ZERO, HasherLookupContext::Start); - lookups.push(lookup); - // check if a span block with same hash has been encountered before in which case we can // directly copy it's trace. let (start_row, end_row, is_memoized) = @@ -226,53 +171,24 @@ impl Hasher { for batch in op_batches.iter().take(num_batches - 1).skip(1) { absorb_into_state(&mut state, batch.groups()); - // add the lookup for absorbing the next operation batch. - let lookup = self.get_lookup(ABSORB_LABEL, ZERO, HasherLookupContext::Absorb); - lookups.push(lookup); - self.trace.append_permutation(&mut state, CONTINUE, ABSORB); } absorb_into_state(&mut state, op_batches[num_batches - 1].groups()); - // add the lookup for absorbing the final operation batch. - let lookup = self.get_lookup(ABSORB_LABEL, ZERO, HasherLookupContext::Absorb); - lookups.push(lookup); - self.trace.append_permutation(&mut state, CONTINUE, RETURN); } self.insert_to_memoized_trace_map(addr, expected_hash); - } else if num_batches == 1 { - self.trace.copy_trace(&mut state, start_row..end_row); } else { - for i in 1..num_batches { - // add the lookup for absorbing the next operation batch. Here we add the - // lookups before actually copying the memoized trace. - let lookup_addr = self.trace_len() + i * HASH_CYCLE_LEN; - let lookup = HasherLookup::new( - ABSORB_LABEL, - lookup_addr as u32, - ZERO, - HasherLookupContext::Absorb, - ); - lookups.push(lookup); - } - self.trace.copy_trace(&mut state, start_row..end_row); } - // add the lookup for the hash result. - let lookup = self.get_lookup(RETURN_LABEL, ZERO, HasherLookupContext::Return); - lookups.push(lookup); - let result = get_digest(&state); (addr, result) } - /// Performs Merkle path verification computation and records its execution trace, as well as - /// the lookups required for verifying its correctness so that they can be provided to the - /// Chiplets Bus. + /// Performs Merkle path verification computation and records its execution trace. /// /// The computation consists of computing a Merkle root of the specified path for a node with /// the specified value, located at the specified index. @@ -289,24 +205,16 @@ impl Hasher { value: Word, path: &MerklePath, index: Felt, - lookups: &mut Vec, ) -> (Felt, Word) { let addr = self.trace.next_row_addr(); - let root = self.verify_merkle_path( - value, - path, - index.as_int(), - MerklePathContext::MpVerify, - lookups, - ); + let root = + self.verify_merkle_path(value, path, index.as_int(), MerklePathContext::MpVerify); (addr, root) } - /// Performs Merkle root update computation and records its execution trace, as well as the - /// lookups required for verifying its correctness so that they can be provided to the Chiplets - /// Bus. + /// Performs Merkle root update computation and records its execution trace. /// /// The computation consists of two Merkle path verifications, one for the old value of the /// node (value before the update), and another for the new value (value after the update). @@ -321,25 +229,14 @@ impl Hasher { new_value: Word, path: &MerklePath, index: Felt, - lookups: &mut Vec, ) -> MerkleRootUpdate { let address = self.trace.next_row_addr(); let index = index.as_int(); - let old_root = self.verify_merkle_path( - old_value, - path, - index, - MerklePathContext::MrUpdateOld, - lookups, - ); - let new_root = self.verify_merkle_path( - new_value, - path, - index, - MerklePathContext::MrUpdateNew, - lookups, - ); + let old_root = + self.verify_merkle_path(old_value, path, index, MerklePathContext::MrUpdateOld); + let new_root = + self.verify_merkle_path(new_value, path, index, MerklePathContext::MrUpdateNew); MerkleRootUpdate { address, @@ -353,10 +250,8 @@ impl Hasher { /// Fills the provided trace fragment with trace data from this hasher trace instance. This /// also returns the trace builder for hasher-related auxiliary trace columns. - pub(super) fn fill_trace(self, trace: &mut TraceFragment) -> ChipletsVTableTraceBuilder { - self.trace.fill_trace(trace); - - self.aux_trace + pub(super) fn fill_trace(self, trace: &mut TraceFragment) { + self.trace.fill_trace(trace) } // HELPER METHODS @@ -365,8 +260,7 @@ impl Hasher { /// Computes a root of the provided Merkle path in the specified context. The path is assumed /// to be for a node with the specified value at the specified index. /// - /// This also records the execution trace of the Merkle path computation and all lookups - /// required for verifying its correctness. + /// This also records the execution trace of the Merkle path computation. /// /// # Panics /// Panics if: @@ -378,7 +272,6 @@ impl Hasher { path: &MerklePath, mut index: u64, context: MerklePathContext, - lookups: &mut Vec, ) -> Word { assert!(!path.is_empty(), "path is empty"); assert!( @@ -386,7 +279,6 @@ impl Hasher { "invalid index for the path" ); let mut root = value; - let mut depth = path.len() - 1; // determine selectors for the specified context let main_selectors = context.main_selectors(); @@ -395,41 +287,22 @@ impl Hasher { if path.len() == 1 { // handle path of length 1 separately because pattern for init and final selectors // is different from other cases - self.update_sibling_hints(context, index, path[0], depth); - self.verify_mp_leg(root, path[0], &mut index, main_selectors, RETURN_HASH, lookups) + self.verify_mp_leg(root, path[0], &mut index, main_selectors, RETURN_HASH) } else { // process the first node of the path; for this node, init and final selectors are // the same let sibling = path[0]; - self.update_sibling_hints(context, index, sibling, depth); - root = self.verify_mp_leg( - root, - sibling, - &mut index, - main_selectors, - main_selectors, - lookups, - ); - depth -= 1; + root = self.verify_mp_leg(root, sibling, &mut index, main_selectors, main_selectors); // process all other nodes, except for the last one for &sibling in &path[1..path.len() - 1] { - self.update_sibling_hints(context, index, sibling, depth); - root = self.verify_mp_leg( - root, - sibling, - &mut index, - part_selectors, - main_selectors, - lookups, - ); - depth -= 1; + root = + self.verify_mp_leg(root, sibling, &mut index, part_selectors, main_selectors); } // process the last node let sibling = path[path.len() - 1]; - self.update_sibling_hints(context, index, sibling, depth); - self.verify_mp_leg(root, sibling, &mut index, part_selectors, RETURN_HASH, lookups) + self.verify_mp_leg(root, sibling, &mut index, part_selectors, RETURN_HASH) } } @@ -437,11 +310,7 @@ impl Hasher { /// /// This function does the following: /// - Builds the initial hasher state based on the least significant bit of the index. - /// - Records the lookup required for verification of the hash initialization if the - /// `init_selectors` indicate that it is the beginning of the Merkle path verification. /// - Applies a permutation to this state and records the resulting trace. - /// - Records the lookup required for verification of the hash result if the `final_selectors` - /// indicate that it is the end of the Merkle path verification. /// - Returns the result of the permutation and updates the index by removing its least /// significant bit. fn verify_mp_leg( @@ -451,19 +320,11 @@ impl Hasher { index: &mut u64, init_selectors: Selectors, final_selectors: Selectors, - lookups: &mut Vec, ) -> Word { // build the hasher state based on the value of the least significant bit of the index let index_bit = *index & 1; let mut state = build_merge_state(&root, &sibling, index_bit); - // add the lookup for the hash initialization if this is the beginning. - let context = HasherLookupContext::Start; - if let Some(label) = get_selector_context_label(init_selectors, context) { - let lookup = self.get_lookup(label, Felt::new(*index), context); - lookups.push(lookup); - } - // determine values for the node index column for this permutation. if the first selector // of init_selectors is not ZERO (i.e., we are processing the first leg of the Merkle // path), the index for the first row is different from the index for the other rows; @@ -486,44 +347,9 @@ impl Hasher { // remove the least significant bit from the index and return hash result *index >>= 1; - // add the lookup for the hash result if this is the end. - let context = HasherLookupContext::Return; - if let Some(label) = get_selector_context_label(final_selectors, context) { - let lookup = self.get_lookup(label, Felt::new(*index), context); - lookups.push(lookup); - } - get_digest(&state) } - /// Records an update hint in the auxiliary trace builder to indicate whether the sibling was - /// consumed as a part of computing the new or the old Merkle root. This is relevant only for - /// the Merkle root update computation. - fn update_sibling_hints( - &mut self, - context: MerklePathContext, - index: u64, - sibling: Digest, - depth: usize, - ) { - let step = self.trace.trace_len() as u32; - match context { - MerklePathContext::MrUpdateOld => { - self.aux_trace.sibling_added(step, Felt::new(index), sibling.into()); - } - MerklePathContext::MrUpdateNew => { - // we use node depth as row offset here because siblings are added to the table - // in reverse order of their depth (i.e., the sibling with the greatest depth is - // added first). thus, when removing siblings from the table, we can find the right - // entry by looking at the n-th entry from the end of the table, where n is the - // node's depth (e.g., an entry for the sibling with depth 2, would be in the - // second entry from the end of the table). - self.aux_trace.sibling_removed(step, depth); - } - _ => (), - } - } - /// Checks if a trace for a program block already exists and returns the start and end rows /// of the memoized trace. Returns None otherwise. fn get_memoized_trace(&self, hash: Digest) -> Option<&(usize, usize)> { @@ -590,44 +416,6 @@ fn build_merge_state(a: &Word, b: &Word, index_bit: u64) -> HasherState { } } -/// Gets the label for the hash operation from the provided selectors and the specified context. -pub fn get_selector_context_label( - selectors: Selectors, - context: HasherLookupContext, -) -> Option { - match context { - HasherLookupContext::Start => { - if selectors == LINEAR_HASH { - Some(LINEAR_HASH_LABEL) - } else if selectors == MP_VERIFY { - Some(MP_VERIFY_LABEL) - } else if selectors == MR_UPDATE_OLD { - Some(MR_UPDATE_OLD_LABEL) - } else if selectors == MR_UPDATE_NEW { - Some(MR_UPDATE_NEW_LABEL) - } else { - None - } - } - HasherLookupContext::Return => { - if selectors == RETURN_HASH { - Some(RETURN_HASH_LABEL) - } else if selectors == RETURN_STATE { - Some(RETURN_STATE_LABEL) - } else { - None - } - } - _ => { - if selectors == LINEAR_HASH { - Some(LINEAR_HASH_LABEL) - } else { - None - } - } - } -} - // TODO: Move these to another file. // HASHER STATE MUTATORS diff --git a/processor/src/chiplets/hasher/tests.rs b/processor/src/chiplets/hasher/tests.rs index 09275089b4..26b7a80310 100644 --- a/processor/src/chiplets/hasher/tests.rs +++ b/processor/src/chiplets/hasher/tests.rs @@ -1,15 +1,11 @@ use super::{ - init_state_from_words, lookups::HasherLookupContext, Digest, Felt, Hasher, HasherLookup, - HasherState, MerklePath, Selectors, TraceFragment, Vec, Word, LINEAR_HASH, MP_VERIFY, - MR_UPDATE_NEW, MR_UPDATE_OLD, RETURN_HASH, RETURN_STATE, TRACE_WIDTH, -}; -use crate::chiplets::aux_trace::{ - ChipletsVTableRow, ChipletsVTableTraceBuilder, ChipletsVTableUpdate, + init_state_from_words, Digest, Felt, Hasher, HasherState, MerklePath, Selectors, TraceFragment, + Vec, Word, LINEAR_HASH, MP_VERIFY, MR_UPDATE_NEW, MR_UPDATE_OLD, RETURN_HASH, RETURN_STATE, + TRACE_WIDTH, }; + use miden_air::trace::chiplets::hasher::{ - DIGEST_LEN, HASH_CYCLE_LEN, LINEAR_HASH_LABEL, MP_VERIFY_LABEL, MR_UPDATE_NEW_LABEL, - MR_UPDATE_OLD_LABEL, NUM_ROUNDS, NUM_SELECTORS, RETURN_HASH_LABEL, RETURN_STATE_LABEL, - STATE_COL_RANGE, + DIGEST_LEN, HASH_CYCLE_LEN, NUM_ROUNDS, NUM_SELECTORS, STATE_COL_RANGE, }; use test_utils::rand::rand_array; use vm_core::{ @@ -29,27 +25,8 @@ fn hasher_permute() { // initialize the hasher and perform one permutation let mut hasher = Hasher::default(); let init_state: HasherState = rand_array(); - let mut lookups = Vec::new(); - let (addr, final_state) = hasher.permute(init_state, &mut lookups); - - let lookup_start_addr = 1; - // there should be two lookups for start and end rows of hasher operation - let expected_lookups_len = 2; - // make sure the lookups have correct labels, addresses, indices and contexts. - let expected_lookup_start = - HasherLookup::new(LINEAR_HASH_LABEL, lookup_start_addr, ZERO, HasherLookupContext::Start); - - let expected_lookup_end = HasherLookup::new( - RETURN_STATE_LABEL, - lookup_start_addr + HASH_CYCLE_LEN as u32 - 1, - ZERO, - HasherLookupContext::Return, - ); - check_lookups_validity( - lookups, - expected_lookups_len, - vec![expected_lookup_start, expected_lookup_end], - ); + + let (addr, final_state) = hasher.permute(init_state); // address of the permutation should be ONE (as hasher address starts at ONE) assert_eq!(ONE, addr); @@ -59,28 +36,23 @@ fn hasher_permute() { assert_eq!(expected_state, final_state); // build the trace - let (trace, aux_hints) = build_trace(hasher, 8); + let trace = build_trace(hasher, 8); // make sure the trace is correct check_selector_trace(&trace, 0, LINEAR_HASH, RETURN_STATE); check_hasher_state_trace(&trace, 0, init_state); assert_eq!(trace.last().unwrap(), &[ZERO; 8]); - // make sure aux hints for sibling table are empty - assert!(aux_hints.hints().is_empty()); - assert!(aux_hints.rows().is_empty()); - // --- test two permutations ---------------------------------------------- // initialize the hasher and perform two permutations let mut hasher = Hasher::default(); let init_state1: HasherState = rand_array(); - let mut lookups1 = Vec::new(); - let (addr1, final_state1) = hasher.permute(init_state1, &mut lookups1); - let mut lookups2 = Vec::new(); + let (addr1, final_state1) = hasher.permute(init_state1); + let init_state2: HasherState = rand_array(); - let (addr2, final_state2) = hasher.permute(init_state2, &mut lookups2); + let (addr2, final_state2) = hasher.permute(init_state2); // make sure the returned addresses are correct (they must be 8 rows apart) assert_eq!(ONE, addr1); @@ -94,7 +66,7 @@ fn hasher_permute() { assert_eq!(expected_state2, final_state2); // build the trace - let (trace, aux_hints) = build_trace(hasher, 16); + let trace = build_trace(hasher, 16); // make sure the trace is correct check_selector_trace(&trace, 0, LINEAR_HASH, RETURN_STATE); @@ -102,10 +74,6 @@ fn hasher_permute() { check_hasher_state_trace(&trace, 0, init_state1); check_hasher_state_trace(&trace, 8, init_state2); assert_eq!(trace.last().unwrap(), &[ZERO; 16]); - - // make sure aux hints for sibling table are empty - assert!(aux_hints.hints().is_empty()); - assert!(aux_hints.rows().is_empty()); } // MERKLE TREE TESTS @@ -122,51 +90,15 @@ fn hasher_build_merkle_root() { // initialize the hasher and perform two Merkle branch verifications let mut hasher = Hasher::default(); let path0 = tree.get_path(NodeIndex::new(1, 0).unwrap()).unwrap(); - let mut lookups = Vec::new(); - hasher.build_merkle_root(leaves[0], &path0, ZERO, &mut lookups); - - // there should be two lookups for start and end rows of hasher operation - let expected_lookups_len = 2; - // make sure the lookups have correct labels, addresses, indices and contexts. - let lookup_start_addr = 1; - let expected_lookup_start = - HasherLookup::new(MP_VERIFY_LABEL, lookup_start_addr, ZERO, HasherLookupContext::Start); - let expected_lookup_end = HasherLookup::new( - RETURN_HASH_LABEL, - lookup_start_addr + HASH_CYCLE_LEN as u32 - 1, - ZERO, - HasherLookupContext::Return, - ); - check_lookups_validity( - lookups, - expected_lookups_len, - vec![expected_lookup_start, expected_lookup_end], - ); + + hasher.build_merkle_root(leaves[0], &path0, ZERO); let path1 = tree.get_path(NodeIndex::new(1, 1).unwrap()).unwrap(); - let mut lookups = Vec::new(); - hasher.build_merkle_root(leaves[1], &path1, ONE, &mut lookups); - - let lookup_start_addr = 9; - // there should be two lookups for start and end rows of hasher operation - let expected_lookups_len = 2; - // make sure the lookups have correct labels, addresses, indices and contexts. - let expected_lookup_start = - HasherLookup::new(MP_VERIFY_LABEL, lookup_start_addr, ONE, HasherLookupContext::Start); - let expected_lookup_end = HasherLookup::new( - RETURN_HASH_LABEL, - lookup_start_addr + HASH_CYCLE_LEN as u32 - 1, - ZERO, - HasherLookupContext::Return, - ); - check_lookups_validity( - lookups, - expected_lookups_len, - vec![expected_lookup_start, expected_lookup_end], - ); + + hasher.build_merkle_root(leaves[1], &path1, ONE); // build the trace - let (trace, aux_hints) = build_trace(hasher, 16); + let trace = build_trace(hasher, 16); // make sure the trace is correct check_selector_trace(&trace, 0, MP_VERIFY, RETURN_HASH); @@ -178,10 +110,6 @@ fn hasher_build_merkle_root() { assert_eq!(node_idx_column[8], ONE); assert_eq!(&node_idx_column[9..], &[ZERO; 7]); - // make sure aux hints for sibling table are empty - assert!(aux_hints.hints().is_empty()); - assert!(aux_hints.rows().is_empty()); - // --- Merkle tree with 8 leaves ------------------------------------------ // build a Merkle tree @@ -191,154 +119,39 @@ fn hasher_build_merkle_root() { // initialize the hasher and perform one Merkle branch verifications let mut hasher = Hasher::default(); let path = tree.get_path(NodeIndex::new(3, 5).unwrap()).unwrap(); - let mut lookups = Vec::new(); - hasher.build_merkle_root(leaves[5], &path, Felt::new(5), &mut lookups); - - let lookup_start_addr = 1; - // there should be two lookups for start and end rows of hasher operation - let expected_lookups_len = 2; - // make sure the lookups have correct labels, addresses, indices and contexts. - let expected_lookup_start = HasherLookup::new( - MP_VERIFY_LABEL, - lookup_start_addr, - Felt::new(5), - HasherLookupContext::Start, - ); - let expected_lookup_end = HasherLookup::new( - RETURN_HASH_LABEL, - lookup_start_addr + 3 * HASH_CYCLE_LEN as u32 - 1, - ZERO, - HasherLookupContext::Return, - ); - check_lookups_validity( - lookups, - expected_lookups_len, - vec![expected_lookup_start, expected_lookup_end], - ); + hasher.build_merkle_root(leaves[5], &path, Felt::new(5)); // build and check the trace for validity - let (trace, aux_hints) = build_trace(hasher, 24); + let trace = build_trace(hasher, 24); check_merkle_path(&trace, 0, leaves[5], &path, 5, MP_VERIFY); - // make sure aux hints for sibling table are empty - assert!(aux_hints.hints().is_empty()); - assert!(aux_hints.rows().is_empty()); - // --- Merkle tree with 8 leaves (multiple branches) ---------------------- // initialize the hasher and perform one Merkle branch verifications let mut hasher = Hasher::default(); let path0 = tree.get_path(NodeIndex::new(3, 0).unwrap()).unwrap(); - let mut lookups = Vec::new(); - hasher.build_merkle_root(leaves[0], &path0, ZERO, &mut lookups); - - let lookup_start_addr = 1; - // there should be two lookups for start and end rows of hasher operation - let expected_lookups_len = 2; - // make sure the lookups have correct labels, addresses, indices and contexts. - let expected_lookup_start = - HasherLookup::new(MP_VERIFY_LABEL, lookup_start_addr, ZERO, HasherLookupContext::Start); - let expected_lookup_end = HasherLookup::new( - RETURN_HASH_LABEL, - lookup_start_addr + 3 * HASH_CYCLE_LEN as u32 - 1, - ZERO, - HasherLookupContext::Return, - ); - check_lookups_validity( - lookups, - expected_lookups_len, - vec![expected_lookup_start, expected_lookup_end], - ); + + hasher.build_merkle_root(leaves[0], &path0, ZERO); let path3 = tree.get_path(NodeIndex::new(3, 3).unwrap()).unwrap(); - let mut lookups = Vec::new(); - hasher.build_merkle_root(leaves[3], &path3, Felt::new(3), &mut lookups); - - let lookup_start_addr = 25; - // there should be two lookups for start and end rows of hasher operation - let expected_lookups_len = 2; - // make sure the lookups have correct labels, addresses, indices and contexts. - let expected_lookup_start = HasherLookup::new( - MP_VERIFY_LABEL, - lookup_start_addr, - Felt::new(3), - HasherLookupContext::Start, - ); - let expected_lookup_end = HasherLookup::new( - RETURN_HASH_LABEL, - lookup_start_addr + 3 * HASH_CYCLE_LEN as u32 - 1, - ZERO, - HasherLookupContext::Return, - ); - check_lookups_validity( - lookups, - expected_lookups_len, - vec![expected_lookup_start, expected_lookup_end], - ); + + hasher.build_merkle_root(leaves[3], &path3, Felt::new(3)); let path7 = tree.get_path(NodeIndex::new(3, 7).unwrap()).unwrap(); - let mut lookups = Vec::new(); - hasher.build_merkle_root(leaves[7], &path7, Felt::new(7), &mut lookups); - - let lookup_start_addr = 49; - // there should be two lookups for start and end rows of hasher operation - let expected_lookups_len = 2; - // make sure the lookups have correct labels, addresses, indices and contexts. - let expected_lookup_start = HasherLookup::new( - MP_VERIFY_LABEL, - lookup_start_addr, - Felt::new(7), - HasherLookupContext::Start, - ); - let expected_lookup_end = HasherLookup::new( - RETURN_HASH_LABEL, - lookup_start_addr + 3 * HASH_CYCLE_LEN as u32 - 1, - ZERO, - HasherLookupContext::Return, - ); - check_lookups_validity( - lookups, - expected_lookups_len, - vec![expected_lookup_start, expected_lookup_end], - ); + + hasher.build_merkle_root(leaves[7], &path7, Felt::new(7)); // path3 again - let mut lookups = Vec::new(); - hasher.build_merkle_root(leaves[3], &path3, Felt::new(3), &mut lookups); - - let lookup_start_addr = 73; - // there should be two lookups for start and end rows of hasher operation - let expected_lookups_len = 2; - // make sure the lookups have correct labels, addresses, indices and contexts. - let expected_lookup_start = HasherLookup::new( - MP_VERIFY_LABEL, - lookup_start_addr, - Felt::new(3), - HasherLookupContext::Start, - ); - let expected_lookup_end = HasherLookup::new( - RETURN_HASH_LABEL, - lookup_start_addr + 3 * HASH_CYCLE_LEN as u32 - 1, - ZERO, - HasherLookupContext::Return, - ); - check_lookups_validity( - lookups, - expected_lookups_len, - vec![expected_lookup_start, expected_lookup_end], - ); + + hasher.build_merkle_root(leaves[3], &path3, Felt::new(3)); // build and check the trace for validity - let (trace, aux_hints) = build_trace(hasher, 96); + let trace = build_trace(hasher, 96); check_merkle_path(&trace, 0, leaves[0], &path0, 0, MP_VERIFY); check_merkle_path(&trace, 24, leaves[3], &path3, 3, MP_VERIFY); check_merkle_path(&trace, 48, leaves[7], &path7, 7, MP_VERIFY); check_merkle_path(&trace, 72, leaves[3], &path3, 3, MP_VERIFY); - - // make sure aux hints for sibling table are empty - assert!(aux_hints.hints().is_empty()); - assert!(aux_hints.rows().is_empty()); } #[test] @@ -354,71 +167,18 @@ fn hasher_update_merkle_root() { let path0 = tree.get_path(NodeIndex::new(1, 0).unwrap()).unwrap(); let new_leaf0 = init_leaf(3); - let mut lookups = Vec::new(); - let lookup_start_addr = 1; - hasher.update_merkle_root(leaves[0], new_leaf0, &path0, ZERO, &mut lookups); - tree.update_leaf(0, new_leaf0).unwrap(); - let expected_lookups_len = 4; - // make sure the lookups have correct labels, addresses, indices and contexts. - let expected_lookups = vec![ - HasherLookup::new(MR_UPDATE_OLD_LABEL, lookup_start_addr, ZERO, HasherLookupContext::Start), - HasherLookup::new( - RETURN_HASH_LABEL, - lookup_start_addr + HASH_CYCLE_LEN as u32 - 1, - ZERO, - HasherLookupContext::Return, - ), - HasherLookup::new( - MR_UPDATE_NEW_LABEL, - lookup_start_addr + HASH_CYCLE_LEN as u32, - ZERO, - HasherLookupContext::Start, - ), - HasherLookup::new( - RETURN_HASH_LABEL, - lookup_start_addr + 2 * HASH_CYCLE_LEN as u32 - 1, - ZERO, - HasherLookupContext::Return, - ), - ]; - check_lookups_validity(lookups, expected_lookups_len, expected_lookups); + hasher.update_merkle_root(leaves[0], new_leaf0, &path0, ZERO); + tree.update_leaf(0, new_leaf0).unwrap(); let path1 = tree.get_path(NodeIndex::new(1, 1).unwrap()).unwrap(); let new_leaf1 = init_leaf(4); - let mut lookups = Vec::new(); - hasher.update_merkle_root(leaves[1], new_leaf1, &path1, ONE, &mut lookups); + hasher.update_merkle_root(leaves[1], new_leaf1, &path1, ONE); tree.update_leaf(1, new_leaf1).unwrap(); - let lookup_start_addr = 17; - let expected_lookups_len = 4; - // make sure the lookups have correct labels, addresses, indices and contexts. - let expected_lookups = vec![ - HasherLookup::new(MR_UPDATE_OLD_LABEL, lookup_start_addr, ONE, HasherLookupContext::Start), - HasherLookup::new( - RETURN_HASH_LABEL, - lookup_start_addr + HASH_CYCLE_LEN as u32 - 1, - ZERO, - HasherLookupContext::Return, - ), - HasherLookup::new( - MR_UPDATE_NEW_LABEL, - lookup_start_addr + HASH_CYCLE_LEN as u32, - ONE, - HasherLookupContext::Start, - ), - HasherLookup::new( - RETURN_HASH_LABEL, - lookup_start_addr + 2 * HASH_CYCLE_LEN as u32 - 1, - ZERO, - HasherLookupContext::Return, - ), - ]; - check_lookups_validity(lookups, expected_lookups_len, expected_lookups); - // build the trace - let (trace, aux_hints) = build_trace(hasher, 32); + let trace = build_trace(hasher, 32); // make sure the trace is correct check_selector_trace(&trace, 0, MR_UPDATE_OLD, RETURN_HASH); @@ -436,23 +196,6 @@ fn hasher_update_merkle_root() { assert_eq!(node_idx_column[24], ONE); assert_eq!(&node_idx_column[25..], &[ZERO; 7]); - // make sure sibling table hints were built correctly - let expected_hints = vec![ - // first update - (0, ChipletsVTableUpdate::SiblingAdded(0)), - (8, ChipletsVTableUpdate::SiblingRemoved(0)), - // second update - (16, ChipletsVTableUpdate::SiblingAdded(1)), - (24, ChipletsVTableUpdate::SiblingRemoved(1)), - ]; - assert_eq!(expected_hints, aux_hints.hints()); - - let expected_sibling_rows = vec![ - ChipletsVTableRow::new_sibling(ZERO, path0[0].into()), - ChipletsVTableRow::new_sibling(ONE, path1[0].into()), - ]; - assert_eq!(expected_sibling_rows, aux_hints.rows()); - // --- Merkle tree with 8 leaves ------------------------------------------ // build a Merkle tree @@ -464,167 +207,30 @@ fn hasher_update_merkle_root() { let path3 = tree.get_path(NodeIndex::new(3, 3).unwrap()).unwrap(); let new_leaf3 = init_leaf(23); - let mut lookups = Vec::new(); - hasher.update_merkle_root(leaves[3], new_leaf3, &path3, Felt::new(3), &mut lookups); - tree.update_leaf(3, new_leaf3).unwrap(); - let lookup_start_addr = 1; - let expected_lookups_len = 4; - // make sure the lookups have correct labels, addresses, indices and contexts. - let expected_lookups = vec![ - HasherLookup::new( - MR_UPDATE_OLD_LABEL, - lookup_start_addr, - Felt::new(3), - HasherLookupContext::Start, - ), - HasherLookup::new( - RETURN_HASH_LABEL, - lookup_start_addr + 3 * HASH_CYCLE_LEN as u32 - 1, - ZERO, - HasherLookupContext::Return, - ), - HasherLookup::new( - MR_UPDATE_NEW_LABEL, - lookup_start_addr + 3 * HASH_CYCLE_LEN as u32, - Felt::new(3), - HasherLookupContext::Start, - ), - HasherLookup::new( - RETURN_HASH_LABEL, - lookup_start_addr + 3 * HASH_CYCLE_LEN as u32 + 3 * HASH_CYCLE_LEN as u32 - 1, - ZERO, - HasherLookupContext::Return, - ), - ]; - check_lookups_validity(lookups, expected_lookups_len, expected_lookups); + hasher.update_merkle_root(leaves[3], new_leaf3, &path3, Felt::new(3)); + tree.update_leaf(3, new_leaf3).unwrap(); let path6 = tree.get_path(NodeIndex::new(3, 6).unwrap()).unwrap(); let new_leaf6 = init_leaf(25); - let mut lookups = Vec::new(); - hasher.update_merkle_root(leaves[6], new_leaf6, &path6, Felt::new(6), &mut lookups); + hasher.update_merkle_root(leaves[6], new_leaf6, &path6, Felt::new(6)); tree.update_leaf(6, new_leaf6).unwrap(); - let lookup_start_addr = 49; - let expected_lookups_len = 4; - // make sure the lookups have correct labels, addresses, indices and contexts. - let expected_lookups = vec![ - HasherLookup::new( - MR_UPDATE_OLD_LABEL, - lookup_start_addr, - Felt::new(6), - HasherLookupContext::Start, - ), - HasherLookup::new( - RETURN_HASH_LABEL, - lookup_start_addr + 3 * HASH_CYCLE_LEN as u32 - 1, - ZERO, - HasherLookupContext::Return, - ), - HasherLookup::new( - MR_UPDATE_NEW_LABEL, - lookup_start_addr + 3 * HASH_CYCLE_LEN as u32, - Felt::new(6), - HasherLookupContext::Start, - ), - HasherLookup::new( - RETURN_HASH_LABEL, - lookup_start_addr + 3 * HASH_CYCLE_LEN as u32 + 3 * HASH_CYCLE_LEN as u32 - 1, - ZERO, - HasherLookupContext::Return, - ), - ]; - check_lookups_validity(lookups, expected_lookups_len, expected_lookups); - // update leaf 3 again let path3_2 = tree.get_path(NodeIndex::new(3, 3).unwrap()).unwrap(); let new_leaf3_2 = init_leaf(27); - let mut lookups = Vec::new(); - hasher.update_merkle_root(new_leaf3, new_leaf3_2, &path3_2, Felt::new(3), &mut lookups); + hasher.update_merkle_root(new_leaf3, new_leaf3_2, &path3_2, Felt::new(3)); tree.update_leaf(3, new_leaf3_2).unwrap(); assert_ne!(path3, path3_2); - let lookup_start_addr = 97; - let expected_lookups_len = 4; - // make sure the lookups have correct labels, addresses, indices and contexts. - let expected_lookups = vec![ - HasherLookup::new( - MR_UPDATE_OLD_LABEL, - lookup_start_addr, - Felt::new(3), - HasherLookupContext::Start, - ), - HasherLookup::new( - RETURN_HASH_LABEL, - lookup_start_addr + 3 * HASH_CYCLE_LEN as u32 - 1, - ZERO, - HasherLookupContext::Return, - ), - HasherLookup::new( - MR_UPDATE_NEW_LABEL, - lookup_start_addr + 3 * HASH_CYCLE_LEN as u32, - Felt::new(3), - HasherLookupContext::Start, - ), - HasherLookup::new( - RETURN_HASH_LABEL, - lookup_start_addr + 3 * HASH_CYCLE_LEN as u32 + 3 * HASH_CYCLE_LEN as u32 - 1, - ZERO, - HasherLookupContext::Return, - ), - ]; - check_lookups_validity(lookups, expected_lookups_len, expected_lookups); - // build and check the trace for validity - let (trace, aux_hints) = build_trace(hasher, 144); + let trace = build_trace(hasher, 144); check_merkle_path(&trace, 0, leaves[3], &path3, 3, MR_UPDATE_OLD); check_merkle_path(&trace, 24, new_leaf3, &path3, 3, MR_UPDATE_NEW); check_merkle_path(&trace, 48, leaves[6], &path6, 6, MR_UPDATE_OLD); check_merkle_path(&trace, 72, new_leaf6, &path6, 6, MR_UPDATE_NEW); check_merkle_path(&trace, 96, new_leaf3, &path3_2, 3, MR_UPDATE_OLD); check_merkle_path(&trace, 120, new_leaf3_2, &path3_2, 3, MR_UPDATE_NEW); - - // make sure sibling table hints were built correctly - let expected_hints = vec![ - // first update - (0, ChipletsVTableUpdate::SiblingAdded(0)), - (8, ChipletsVTableUpdate::SiblingAdded(1)), - (16, ChipletsVTableUpdate::SiblingAdded(2)), - (24, ChipletsVTableUpdate::SiblingRemoved(0)), - (32, ChipletsVTableUpdate::SiblingRemoved(1)), - (40, ChipletsVTableUpdate::SiblingRemoved(2)), - // second update - (48, ChipletsVTableUpdate::SiblingAdded(3)), - (56, ChipletsVTableUpdate::SiblingAdded(4)), - (64, ChipletsVTableUpdate::SiblingAdded(5)), - (72, ChipletsVTableUpdate::SiblingRemoved(3)), - (80, ChipletsVTableUpdate::SiblingRemoved(4)), - (88, ChipletsVTableUpdate::SiblingRemoved(5)), - // third update - (96, ChipletsVTableUpdate::SiblingAdded(6)), - (104, ChipletsVTableUpdate::SiblingAdded(7)), - (112, ChipletsVTableUpdate::SiblingAdded(8)), - (120, ChipletsVTableUpdate::SiblingRemoved(6)), - (128, ChipletsVTableUpdate::SiblingRemoved(7)), - (136, ChipletsVTableUpdate::SiblingRemoved(8)), - ]; - assert_eq!(expected_hints, aux_hints.hints()); - - let expected_sibling_rows = vec![ - // first update - ChipletsVTableRow::new_sibling(Felt::new(3), path3[0].into()), - ChipletsVTableRow::new_sibling(Felt::new(3 >> 1), path3[1].into()), - ChipletsVTableRow::new_sibling(Felt::new(3 >> 2), path3[2].into()), - // second update - ChipletsVTableRow::new_sibling(Felt::new(6), path6[0].into()), - ChipletsVTableRow::new_sibling(Felt::new(6 >> 1), path6[1].into()), - ChipletsVTableRow::new_sibling(Felt::new(6 >> 2), path6[2].into()), - // third update - ChipletsVTableRow::new_sibling(Felt::new(3), path3_2[0].into()), - ChipletsVTableRow::new_sibling(Felt::new(3 >> 1), path3_2[1].into()), - ChipletsVTableRow::new_sibling(Felt::new(3 >> 2), path3_2[2].into()), - ]; - assert_eq!(expected_sibling_rows, aux_hints.rows()); } // MEMOIZATION TESTS @@ -659,23 +265,8 @@ fn hash_memoization_control_blocks() { let expected_hash = join_block.hash(); - let mut lookups = Vec::new(); // builds the trace of the join block. - let (_, final_state) = - hasher.hash_control_block(h1, h2, join_block.domain(), expected_hash, &mut lookups); - - let lookup_start_addr = 1; - let expected_lookups_len = 2; - // make sure the lookups have correct labels, addresses, indices and contexts. - let lookup_start = - HasherLookup::new(LINEAR_HASH_LABEL, lookup_start_addr, ZERO, HasherLookupContext::Start); - let lookup_end = HasherLookup::new( - RETURN_HASH_LABEL, - lookup_start_addr + HASH_CYCLE_LEN as u32 - 1, - ZERO, - HasherLookupContext::Return, - ); - check_lookups_validity(lookups, expected_lookups_len, vec![lookup_start, lookup_end]); + let (_, final_state) = hasher.hash_control_block(h1, h2, join_block.domain(), expected_hash); // make sure the hash of the final state is the same as the expected hash. assert_eq!(Digest::new(final_state), expected_hash); @@ -693,23 +284,9 @@ fn hash_memoization_control_blocks() { let expected_hash = split1_block.hash(); - let mut lookups = Vec::new(); // builds the hash execution trace of the first split block from scratch. let (addr, final_state) = - hasher.hash_control_block(h1, h2, split1_block.domain(), expected_hash, &mut lookups); - - let lookup_start_addr = 9; - let expected_lookups_len = 2; - // make sure the lookups have correct labels, addresses, indices and contexts. - let lookup_start = - HasherLookup::new(LINEAR_HASH_LABEL, lookup_start_addr, ZERO, HasherLookupContext::Start); - let lookup_end = HasherLookup::new( - RETURN_HASH_LABEL, - lookup_start_addr + HASH_CYCLE_LEN as u32 - 1, - ZERO, - HasherLookupContext::Return, - ); - check_lookups_validity(lookups, expected_lookups_len, vec![lookup_start, lookup_end]); + hasher.hash_control_block(h1, h2, split1_block.domain(), expected_hash); let first_block_final_state = final_state; @@ -732,24 +309,10 @@ fn hash_memoization_control_blocks() { .expect("Could not convert slice to array"); let expected_hash = split2_block.hash(); - let mut lookups = Vec::new(); // builds the hash execution trace of the second split block by copying it from the trace of // the first split block. let (addr, final_state) = - hasher.hash_control_block(h1, h2, split2_block.domain(), expected_hash, &mut lookups); - - let lookup_start_addr = 17; - let expected_lookups_len = 2; - // make sure the lookups have correct labels, addresses, indices and contexts. - let lookup_start = - HasherLookup::new(LINEAR_HASH_LABEL, lookup_start_addr, ZERO, HasherLookupContext::Start); - let lookup_end = HasherLookup::new( - RETURN_HASH_LABEL, - lookup_start_addr + HASH_CYCLE_LEN as u32 - 1, - ZERO, - HasherLookupContext::Return, - ); - check_lookups_validity(lookups, expected_lookups_len, vec![lookup_start, lookup_end]); + hasher.hash_control_block(h1, h2, split2_block.domain(), expected_hash); // make sure the hash of the final state of the second split block is the same as the expected // hash. @@ -760,7 +323,7 @@ fn hash_memoization_control_blocks() { let copied_start_row = addr.as_int() as usize - 1; let copied_end_row = hasher.trace_len() - 1; - let (trace, _) = build_trace(hasher, copied_end_row + 1); + let trace = build_trace(hasher, copied_end_row + 1); // check the row address at which memoized block starts. let hash_cycle_len: u64 = HASH_CYCLE_LEN.try_into().expect("Could not convert usize to u64"); @@ -857,23 +420,9 @@ fn hash_memoization_span_blocks_check(span_block: CodeBlock) { .expect("Could not convert slice to array"); let expected_hash = join1_block.hash(); - let mut lookups = Vec::new(); // builds the trace of the Join1 block. - let (_, final_state) = - hasher.hash_control_block(h1, h2, join1_block.domain(), expected_hash, &mut lookups); - - let lookup_start_addr = 1; - let expected_lookups_len = 2; - // make sure the lookups have correct labels, addresses, indices and contexts. - let lookup_start = - HasherLookup::new(LINEAR_HASH_LABEL, lookup_start_addr, ZERO, HasherLookupContext::Start); - let lookup_end = HasherLookup::new( - RETURN_HASH_LABEL, - lookup_start_addr + HASH_CYCLE_LEN as u32 - 1, - ZERO, - HasherLookupContext::Return, - ); - check_lookups_validity(lookups, expected_lookups_len, vec![lookup_start, lookup_end]); + let (_, final_state) = hasher.hash_control_block(h1, h2, join1_block.domain(), expected_hash); + // make sure the hash of the final state of Join1 is the same as the expected hash. assert_eq!(Digest::new(final_state), expected_hash); @@ -889,22 +438,7 @@ fn hash_memoization_span_blocks_check(span_block: CodeBlock) { .expect("Could not convert slice to array"); let expected_hash = join2_block.hash(); - let mut lookups = Vec::new(); - let (_, final_state) = - hasher.hash_control_block(h1, h2, join2_block.domain(), expected_hash, &mut lookups); - - let lookup_start_addr = 9; - let expected_lookups_len = 2; - // make sure the lookups have correct labels, addresses, indices and contexts. - let lookup_start = - HasherLookup::new(LINEAR_HASH_LABEL, lookup_start_addr, ZERO, HasherLookupContext::Start); - let lookup_end = HasherLookup::new( - RETURN_HASH_LABEL, - lookup_start_addr + HASH_CYCLE_LEN as u32 - 1, - ZERO, - HasherLookupContext::Return, - ); - check_lookups_validity(lookups, expected_lookups_len, vec![lookup_start, lookup_end]); + let (_, final_state) = hasher.hash_control_block(h1, h2, join2_block.domain(), expected_hash); // make sure the hash of the final state of Join2 is the same as the expected hash. assert_eq!(Digest::new(final_state), expected_hash); @@ -916,46 +450,10 @@ fn hash_memoization_span_blocks_check(span_block: CodeBlock) { }; // builds the hash execution trace of the first span block from scratch. - let mut lookups = Vec::new(); let (addr, final_state) = - hasher.hash_span_block(span1_block_val.op_batches(), span1_block.hash(), &mut lookups); - - let num_batches = span1_block_val.op_batches().len(); - let lookup_start_addr = 17; - - let expected_lookups_len = 2 + num_batches - 1; - - let mut expected_lookups = Vec::new(); - - // add lookup for start of span block - let lookup_start = - HasherLookup::new(LINEAR_HASH_LABEL, lookup_start_addr, ZERO, HasherLookupContext::Start); - expected_lookups.push(lookup_start); + hasher.hash_span_block(span1_block_val.op_batches(), span1_block.hash()); - // add lookups for absorbed batches - for i in 1..num_batches { - let lookup = HasherLookup::new( - LINEAR_HASH_LABEL, - lookup_start_addr + (i * HASH_CYCLE_LEN) as u32 - 1, - ZERO, - HasherLookupContext::Absorb, - ); - expected_lookups.push(lookup); - } - - let last_lookup_addr_memoized_block = - lookup_start_addr + (num_batches * HASH_CYCLE_LEN) as u32 - 1; - - // add lookup for end of span block - let lookup_end = HasherLookup::new( - RETURN_HASH_LABEL, - last_lookup_addr_memoized_block, - ZERO, - HasherLookupContext::Return, - ); - expected_lookups.push(lookup_end); - - check_lookups_validity(lookups, expected_lookups_len, expected_lookups); + let _num_batches = span1_block_val.op_batches().len(); let first_span_block_final_state = final_state; @@ -972,45 +470,12 @@ fn hash_memoization_span_blocks_check(span_block: CodeBlock) { unreachable!() }; - let mut lookups = Vec::new(); // builds the hash execution trace of the second span block by copying the sections of the // trace corresponding to the first span block with the same hash. let (addr, final_state) = - hasher.hash_span_block(span2_block_val.op_batches(), span2_block.hash(), &mut lookups); - - let num_batches = span2_block_val.op_batches().len(); - let lookup_start_addr = last_lookup_addr_memoized_block + 1; - - let expected_lookups_len = 2 + num_batches - 1; + hasher.hash_span_block(span2_block_val.op_batches(), span2_block.hash()); - let mut expected_lookups = Vec::new(); - - // add lookup for start of span block - let lookup_start = - HasherLookup::new(LINEAR_HASH_LABEL, lookup_start_addr, ZERO, HasherLookupContext::Start); - expected_lookups.push(lookup_start); - - // add lookups for absorbed batches - for i in 1..num_batches { - let lookup = HasherLookup::new( - LINEAR_HASH_LABEL, - lookup_start_addr + (i * HASH_CYCLE_LEN) as u32 - 1, - ZERO, - HasherLookupContext::Absorb, - ); - expected_lookups.push(lookup); - } - - // add lookup for end of span block - let lookup_end = HasherLookup::new( - RETURN_HASH_LABEL, - lookup_start_addr + (num_batches * HASH_CYCLE_LEN) as u32 - 1, - ZERO, - HasherLookupContext::Return, - ); - expected_lookups.push(lookup_end); - - check_lookups_validity(lookups, expected_lookups_len, expected_lookups); + let _num_batches = span2_block_val.op_batches().len(); let expected_hash = span2_block.hash(); // make sure the hash of the final state of Span2 block is the same as the expected hash. @@ -1022,7 +487,7 @@ fn hash_memoization_span_blocks_check(span_block: CodeBlock) { let copied_start_row = addr.as_int() as usize - 1; let copied_end_row = hasher.trace_len() - 1; - let (trace, _) = build_trace(hasher, copied_end_row + 1); + let trace = build_trace(hasher, copied_end_row + 1); // check correct copy after memoization check_memoized_trace(&trace, start_row, end_row, copied_start_row, copied_end_row); @@ -1033,11 +498,11 @@ fn hash_memoization_span_blocks_check(span_block: CodeBlock) { /// Builds an execution trace for the provided hasher. The trace must have the number of rows /// specified by num_rows. -fn build_trace(hasher: Hasher, num_rows: usize) -> (Vec>, ChipletsVTableTraceBuilder) { +fn build_trace(hasher: Hasher, num_rows: usize) -> Vec> { let mut trace = (0..TRACE_WIDTH).map(|_| vec![ZERO; num_rows]).collect::>(); let mut fragment = TraceFragment::trace_to_fragment(&mut trace); - let aux_trace_builder = hasher.fill_trace(&mut fragment); - (trace, aux_trace_builder) + let _aux_trace_builder = hasher.fill_trace(&mut fragment); + trace } /// Makes sure that the provided trace is consistent with verifying the specified Merkle path @@ -1145,24 +610,6 @@ fn check_memoized_trace( } } -/// Makes sure the lookups are built correctly. -fn check_lookups_validity( - lookups: Vec, - expected_lookups_length: usize, - expected_lookups: Vec, -) { - // make sure the length of the lookups is what we expect. - assert_eq!(expected_lookups_length, lookups.len()); - - // make sure the length of lookups and expected lookups is same. - assert_eq!(expected_lookups.len(), lookups.len()); - - for (lookup, expected_lookup) in lookups.iter().zip(expected_lookups) { - // make sure the lookups match with what we expect. - assert_eq!(expected_lookup, *lookup); - } -} - /// Makes sure that a row in the provided trace is equal to the provided values at the specified /// row index. fn assert_row_equal(trace: &[Vec], row_idx: usize, values: &[Felt]) { diff --git a/processor/src/chiplets/kernel_rom/mod.rs b/processor/src/chiplets/kernel_rom/mod.rs index ee2a4a2422..8e5c0100ae 100644 --- a/processor/src/chiplets/kernel_rom/mod.rs +++ b/processor/src/chiplets/kernel_rom/mod.rs @@ -1,8 +1,5 @@ -use super::{ - trace::LookupTableRow, BTreeMap, ChipletsBus, ChipletsVTableTraceBuilder, ColMatrix, Digest, - ExecutionError, Felt, FieldElement, Kernel, TraceFragment, Word, ONE, ZERO, -}; -use miden_air::trace::chiplets::kernel_rom::{KERNEL_PROC_LABEL, TRACE_WIDTH}; +use super::{BTreeMap, Digest, ExecutionError, Felt, Kernel, TraceFragment, Word, ONE, ZERO}; +use miden_air::trace::chiplets::kernel_rom::TRACE_WIDTH; #[cfg(test)] mod tests; @@ -99,13 +96,7 @@ impl KernelRom { // -------------------------------------------------------------------------------------------- /// Populates the provided execution trace fragment with execution trace of this kernel ROM. - pub fn fill_trace( - self, - trace: &mut TraceFragment, - chiplets_bus: &mut ChipletsBus, - virtual_table: &mut ChipletsVTableTraceBuilder, - kernel_rom_start_row: usize, - ) { + pub fn fill_trace(self, trace: &mut TraceFragment) { debug_assert_eq!(TRACE_WIDTH, trace.width(), "inconsistent trace fragment width"); let mut row = 0; for (idx, access_info) in self.access_map.values().enumerate() { @@ -113,21 +104,13 @@ impl KernelRom { // write at least one row into the trace for each kernel procedure access_info.write_into_trace(trace, row, idx); - // add the procedure to the virtual table - virtual_table.add_kernel_proc(row as u32, idx, access_info.proc_hash); - // provide the kernel procedure to the chiplets bus, if it was accessed at least once - let lookup = KernelProcLookup::new(access_info.proc_hash); - if access_info.num_accesses >= 1 { - chiplets_bus.provide_kernel_proc_call(lookup, (kernel_rom_start_row + row) as u32); - } row += 1; // if the procedure was accessed more than once, we need write a row and provide the // procedure to the bus per additional access for _ in 1..access_info.num_accesses { access_info.write_into_trace(trace, row, idx); - chiplets_bus.provide_kernel_proc_call(lookup, (kernel_rom_start_row + row) as u32); row += 1; } } @@ -171,33 +154,3 @@ impl ProcAccessInfo { trace.set(row, 5, self.proc_hash[3]); } } - -// KERNEL ROM PROCEDURE LOOKUPS -// ================================================================================================ -#[derive(Debug, Copy, Clone, PartialEq, Eq)] -pub struct KernelProcLookup { - proc_hash: Word, -} - -impl KernelProcLookup { - pub fn new(proc_hash: Word) -> Self { - Self { proc_hash } - } -} - -impl LookupTableRow for KernelProcLookup { - /// Reduces this row to a single field element in the field specified by E. This requires - /// at least 6 alpha values. - fn to_value>( - &self, - _main_trace: &ColMatrix, - alphas: &[E], - ) -> E { - alphas[0] - + alphas[1].mul_base(KERNEL_PROC_LABEL) - + alphas[2].mul_base(self.proc_hash[0]) - + alphas[3].mul_base(self.proc_hash[1]) - + alphas[4].mul_base(self.proc_hash[2]) - + alphas[5].mul_base(self.proc_hash[3]) - } -} diff --git a/processor/src/chiplets/kernel_rom/tests.rs b/processor/src/chiplets/kernel_rom/tests.rs index 01f40cc1da..cf7f7d515c 100644 --- a/processor/src/chiplets/kernel_rom/tests.rs +++ b/processor/src/chiplets/kernel_rom/tests.rs @@ -1,11 +1,4 @@ -use super::{ - ChipletsBus, Felt, Kernel, KernelProcLookup, KernelRom, TraceFragment, Word, ONE, TRACE_WIDTH, - ZERO, -}; -use crate::chiplets::{ - aux_trace::{ChipletLookup, ChipletsBusRow}, - ChipletsVTableTraceBuilder, -}; +use super::{Felt, Kernel, KernelRom, TraceFragment, Word, ONE, TRACE_WIDTH, ZERO}; use vm_core::utils::collections::Vec; // CONSTANTS @@ -17,19 +10,6 @@ const PROC2_HASH: Word = [ONE, ONE, ONE, ONE]; // TESTS // ================================================================================================ -#[test] -fn kernel_rom_empty() { - let kernel = Kernel::default(); - let rom = KernelRom::new(kernel); - assert_eq!(0, rom.trace_len()); - - // generate trace - let (_, _, virtual_table) = build_trace(rom, 0); - - // make sure the chiplets table includes no kernel procedures - verify_proc_table(&virtual_table, &[]); -} - #[test] fn kernel_rom_invalid_access() { let kernel = build_kernel(); @@ -51,7 +31,7 @@ fn kernel_rom_no_access() { assert_eq!(expected_trace_len, rom.trace_len()); // generate trace - let (trace, _, virtual_table) = build_trace(rom, expected_trace_len); + let trace = build_trace(rom, expected_trace_len); // first row of the trace should correspond to the first procedure let row = 0; @@ -72,9 +52,6 @@ fn kernel_rom_no_access() { assert_eq!(trace[3][row], PROC2_HASH[1]); assert_eq!(trace[4][row], PROC2_HASH[2]); assert_eq!(trace[5][row], PROC2_HASH[3]); - - // make sure the chiplets table includes each kernel procedure exactly once - verify_proc_table(&virtual_table, &[PROC1_HASH, PROC2_HASH]); } #[test] @@ -93,7 +70,7 @@ fn kernel_rom_with_access() { assert_eq!(expected_trace_len, rom.trace_len()); // generate trace - let (trace, chiplets_bus, virtual_table) = build_trace(rom, expected_trace_len); + let trace = build_trace(rom, expected_trace_len); // first 3 rows of the trace should correspond to the first procedure for row in 0..3 { @@ -114,19 +91,6 @@ fn kernel_rom_with_access() { assert_eq!(trace[4][row], PROC2_HASH[2]); assert_eq!(trace[5][row], PROC2_HASH[3]); } - - // make sure the lookups were sent to the bus correctly from the kernel rom chiplet - let proc1_lookup = KernelProcLookup::new(PROC1_HASH); - let proc2_lookup = KernelProcLookup::new(PROC2_HASH); - - verify_bus(&chiplets_bus, 0, 0, &proc1_lookup); - verify_bus(&chiplets_bus, 1, 1, &proc1_lookup); - verify_bus(&chiplets_bus, 2, 2, &proc1_lookup); - verify_bus(&chiplets_bus, 3, 3, &proc2_lookup); - verify_bus(&chiplets_bus, 4, 4, &proc2_lookup); - - // make sure the chiplets table includes each kernel procedure exactly once - verify_proc_table(&virtual_table, &[PROC1_HASH, PROC2_HASH]); } // HELPER FUNCTIONS @@ -139,46 +103,10 @@ fn build_kernel() -> Kernel { /// Builds a trace of the specified length and fills it with data from the provided KernelRom /// instance. -fn build_trace( - kernel_rom: KernelRom, - num_rows: usize, -) -> (Vec>, ChipletsBus, ChipletsVTableTraceBuilder) { - let mut chiplets_bus = ChipletsBus::default(); - let mut virtual_table = ChipletsVTableTraceBuilder::default(); +fn build_trace(kernel_rom: KernelRom, num_rows: usize) -> Vec> { let mut trace = (0..TRACE_WIDTH).map(|_| vec![ZERO; num_rows]).collect::>(); let mut fragment = TraceFragment::trace_to_fragment(&mut trace); - kernel_rom.fill_trace(&mut fragment, &mut chiplets_bus, &mut virtual_table, 0); - - (trace, chiplets_bus, virtual_table) -} - -/// Verifies that the chiplet bus received the specified KernelProcLookup response at `cycle` which -/// was added to the list of responses at `index`. -fn verify_bus( - chiplets_bus: &ChipletsBus, - index: usize, - cycle: u32, - proc_lookup: &KernelProcLookup, -) { - let expected_lookup = ChipletLookup::KernelRom(*proc_lookup); - let expected_hint = ChipletsBusRow::new(&[], Some(index as u32)); - - let lookup = chiplets_bus.get_response_row(index); - let hint = chiplets_bus.get_lookup_hint(cycle).unwrap(); - - assert_eq!(expected_lookup, lookup); - assert_eq!(&expected_hint, hint); -} + kernel_rom.fill_trace(&mut fragment); -/// Verifies that the kernel procedure table contains every procedure in the kernel exactly once. -fn verify_proc_table(virtual_table: &ChipletsVTableTraceBuilder, proc_hashes: &[Word]) { - // these tests are only for the kernel rom chiplet, so the virtual table should not be used by - // other chiplets in these cases - assert_eq!(virtual_table.rows().len(), proc_hashes.len()); - for (row, proc_hash) in virtual_table.rows().iter().zip(proc_hashes) { - assert!(row.kernel_proc().is_some()); - if let Some(proc) = row.kernel_proc() { - assert_eq!(proc.proc_hash(), *proc_hash); - } - } + trace } diff --git a/processor/src/chiplets/memory/mod.rs b/processor/src/chiplets/memory/mod.rs index b24a957eb4..d9315389b3 100644 --- a/processor/src/chiplets/memory/mod.rs +++ b/processor/src/chiplets/memory/mod.rs @@ -1,8 +1,7 @@ use super::{ - trace::LookupTableRow, utils::{split_element_u32_into_u16, split_u32_into_u16}, - BTreeMap, ChipletsBus, ColMatrix, Felt, FieldElement, RangeChecker, StarkField, TraceFragment, - Vec, Word, EMPTY_WORD, ONE, + BTreeMap, Felt, FieldElement, RangeChecker, StarkField, TraceFragment, Vec, Word, EMPTY_WORD, + ONE, }; use crate::system::ContextId; use miden_air::trace::chiplets::memory::{ @@ -190,12 +189,7 @@ impl Memory { } /// Fills the provided trace fragment with trace data from this memory instance. - pub fn fill_trace( - self, - trace: &mut TraceFragment, - chiplets_bus: &mut ChipletsBus, - memory_start_row: usize, - ) { + pub fn fill_trace(self, trace: &mut TraceFragment) { debug_assert_eq!(self.trace_len(), trace.len(), "inconsistent trace lengths"); // set the pervious address and clock cycle to the first address and clock cycle of the @@ -245,17 +239,6 @@ impl Memory { // TODO: switch to batch inversion to improve efficiency. trace.set(row, D_INV_COL_IDX, delta.inv()); - // provide the memory access data to the chiplets bus. - let memory_lookup = MemoryLookup::new( - memory_access.op_label(), - ctx, - Felt::from(addr), - clk, - value, - ); - chiplets_bus - .provide_memory_operation(memory_lookup, (memory_start_row + row) as u32); - // update values for the next iteration of the loop prev_ctx = ctx; prev_addr = felt_addr; @@ -291,62 +274,3 @@ impl Memory { self.trace.iter().fold(0, |acc, (_, s)| acc + s.size()) } } - -// MEMORY LOOKUPS -// ================================================================================================ - -/// Contains the data required to describe a memory read or write. -#[derive(Debug, Copy, Clone, PartialEq, Eq)] -pub struct MemoryLookup { - // unique label identifying the memory operation - label: u8, - ctx: Felt, - addr: Felt, - clk: Felt, - word: Word, -} - -impl MemoryLookup { - pub fn new(label: u8, ctx: Felt, addr: Felt, clk: Felt, word: Word) -> Self { - Self { - label, - ctx, - addr, - clk, - word, - } - } - - pub fn from_ints(label: u8, ctx: ContextId, addr: u32, clk: u32, word: Word) -> Self { - Self { - label, - ctx: Felt::from(ctx), - addr: Felt::from(addr), - clk: Felt::from(clk), - word, - } - } -} - -impl LookupTableRow for MemoryLookup { - /// Reduces this row to a single field element in the field specified by E. This requires - /// at least 9 alpha values. - fn to_value>( - &self, - _main_trace: &ColMatrix, - alphas: &[E], - ) -> E { - let word_value = self - .word - .iter() - .enumerate() - .fold(E::ZERO, |acc, (j, element)| acc + alphas[j + 5].mul_base(*element)); - - alphas[0] - + alphas[1].mul_base(Felt::from(self.label)) - + alphas[2].mul_base(self.ctx) - + alphas[3].mul_base(self.addr) - + alphas[4].mul_base(self.clk) - + word_value - } -} diff --git a/processor/src/chiplets/memory/segment.rs b/processor/src/chiplets/memory/segment.rs index 15ceb760ec..f859b320b3 100644 --- a/processor/src/chiplets/memory/segment.rs +++ b/processor/src/chiplets/memory/segment.rs @@ -1,6 +1,5 @@ use miden_air::trace::chiplets::memory::{ - Selectors, MEMORY_COPY_READ, MEMORY_INIT_READ, MEMORY_READ_LABEL, MEMORY_WRITE, - MEMORY_WRITE_LABEL, + Selectors, MEMORY_COPY_READ, MEMORY_INIT_READ, MEMORY_WRITE, }; use super::{BTreeMap, Felt, StarkField, Vec, Word, INIT_MEM_VALUE}; @@ -164,14 +163,6 @@ impl MemorySegmentAccess { } } - /// Returns the operation label of the memory operation used in this memory access. - pub(super) fn op_label(&self) -> u8 { - match self.op { - MemoryOperation::InitRead | MemoryOperation::CopyRead => MEMORY_READ_LABEL, - MemoryOperation::Write => MEMORY_WRITE_LABEL, - } - } - /// Returns the word value for this memory access. pub(super) fn value(&self) -> Word { self.value diff --git a/processor/src/chiplets/memory/tests.rs b/processor/src/chiplets/memory/tests.rs index abf550cd0c..7675a6e021 100644 --- a/processor/src/chiplets/memory/tests.rs +++ b/processor/src/chiplets/memory/tests.rs @@ -1,15 +1,13 @@ use crate::ContextId; use super::{ - super::aux_trace::{ChipletLookup, ChipletsBusRow}, - super::ZERO, - ChipletsBus, Felt, FieldElement, Memory, MemoryLookup, TraceFragment, Vec, ADDR_COL_IDX, - CLK_COL_IDX, CTX_COL_IDX, D0_COL_IDX, D1_COL_IDX, D_INV_COL_IDX, EMPTY_WORD, ONE, V_COL_RANGE, + super::ZERO, Felt, FieldElement, Memory, TraceFragment, Vec, ADDR_COL_IDX, CLK_COL_IDX, + CTX_COL_IDX, D0_COL_IDX, D1_COL_IDX, D_INV_COL_IDX, EMPTY_WORD, ONE, V_COL_RANGE, }; use miden_air::trace::chiplets::memory::{ - Selectors, MEMORY_COPY_READ, MEMORY_INIT_READ, MEMORY_READ_LABEL, MEMORY_WRITE, - MEMORY_WRITE_LABEL, TRACE_WIDTH as MEMORY_TRACE_WIDTH, + Selectors, MEMORY_COPY_READ, MEMORY_INIT_READ, MEMORY_WRITE, TRACE_WIDTH as MEMORY_TRACE_WIDTH, }; +use vm_core::Word; #[test] fn mem_init() { @@ -51,30 +49,23 @@ fn mem_read() { // check generated trace and memory data provided to the ChipletsBus; rows should be sorted by // address and then clock cycle - let (trace, chiplets_bus) = build_trace(mem, 4); + let trace = build_trace(mem, 4); // address 0 let mut prev_row = [ZERO; MEMORY_TRACE_WIDTH]; - let memory_access = - MemoryLookup::from_ints(MEMORY_READ_LABEL, ContextId::root(), addr0, 1, EMPTY_WORD); - prev_row = - verify_memory_access(&trace, &chiplets_bus, 0, MEMORY_INIT_READ, &memory_access, prev_row); + let memory_access = MemoryAccess::new(ContextId::root(), addr0, 1, EMPTY_WORD); + prev_row = verify_memory_access(&trace, 0, MEMORY_INIT_READ, &memory_access, prev_row); - let memory_access = - MemoryLookup::from_ints(MEMORY_READ_LABEL, ContextId::root(), addr0, 3, EMPTY_WORD); - prev_row = - verify_memory_access(&trace, &chiplets_bus, 1, MEMORY_COPY_READ, &memory_access, prev_row); + let memory_access = MemoryAccess::new(ContextId::root(), addr0, 3, EMPTY_WORD); + prev_row = verify_memory_access(&trace, 1, MEMORY_COPY_READ, &memory_access, prev_row); // address 2 - let memory_access = - MemoryLookup::from_ints(MEMORY_READ_LABEL, ContextId::root(), addr2, 4, EMPTY_WORD); - prev_row = - verify_memory_access(&trace, &chiplets_bus, 2, MEMORY_INIT_READ, &memory_access, prev_row); + let memory_access = MemoryAccess::new(ContextId::root(), addr2, 4, EMPTY_WORD); + prev_row = verify_memory_access(&trace, 2, MEMORY_INIT_READ, &memory_access, prev_row); // address 3 - let memory_access = - MemoryLookup::from_ints(MEMORY_READ_LABEL, ContextId::root(), addr3, 2, EMPTY_WORD); - verify_memory_access(&trace, &chiplets_bus, 3, MEMORY_INIT_READ, &memory_access, prev_row); + let memory_access = MemoryAccess::new(ContextId::root(), addr3, 2, EMPTY_WORD); + verify_memory_access(&trace, 3, MEMORY_INIT_READ, &memory_access, prev_row); } #[test] @@ -114,30 +105,23 @@ fn mem_write() { // check generated trace and memory data provided to the ChipletsBus; rows should be sorted by // address and then clock cycle - let (trace, chiplets_bus) = build_trace(mem, 4); + let trace = build_trace(mem, 4); // address 0 let mut prev_row = [ZERO; MEMORY_TRACE_WIDTH]; - let memory_access = - MemoryLookup::from_ints(MEMORY_WRITE_LABEL, ContextId::root(), addr0, 1, value1); - prev_row = - verify_memory_access(&trace, &chiplets_bus, 0, MEMORY_WRITE, &memory_access, prev_row); + let memory_access = MemoryAccess::new(ContextId::root(), addr0, 1, value1); + prev_row = verify_memory_access(&trace, 0, MEMORY_WRITE, &memory_access, prev_row); - let memory_access = - MemoryLookup::from_ints(MEMORY_WRITE_LABEL, ContextId::root(), addr0, 4, value9); - prev_row = - verify_memory_access(&trace, &chiplets_bus, 1, MEMORY_WRITE, &memory_access, prev_row); + let memory_access = MemoryAccess::new(ContextId::root(), addr0, 4, value9); + prev_row = verify_memory_access(&trace, 1, MEMORY_WRITE, &memory_access, prev_row); // address 1 - let memory_access = - MemoryLookup::from_ints(MEMORY_WRITE_LABEL, ContextId::root(), addr1, 3, value7); - prev_row = - verify_memory_access(&trace, &chiplets_bus, 2, MEMORY_WRITE, &memory_access, prev_row); + let memory_access = MemoryAccess::new(ContextId::root(), addr1, 3, value7); + prev_row = verify_memory_access(&trace, 2, MEMORY_WRITE, &memory_access, prev_row); // address 2 - let memory_access = - MemoryLookup::from_ints(MEMORY_WRITE_LABEL, ContextId::root(), addr2, 2, value5); - verify_memory_access(&trace, &chiplets_bus, 3, MEMORY_WRITE, &memory_access, prev_row); + let memory_access = MemoryAccess::new(ContextId::root(), addr2, 2, value5); + verify_memory_access(&trace, 3, MEMORY_WRITE, &memory_access, prev_row); } #[test] @@ -179,54 +163,37 @@ fn mem_write_read() { // check generated trace and memory data provided to the ChipletsBus; rows should be sorted by // address and then clock cycle - let (trace, chiplets_bus) = build_trace(mem, 9); + let trace = build_trace(mem, 9); // address 2 let mut prev_row = [ZERO; MEMORY_TRACE_WIDTH]; - let memory_access = - MemoryLookup::from_ints(MEMORY_WRITE_LABEL, ContextId::root(), addr2, 2, value4); - prev_row = - verify_memory_access(&trace, &chiplets_bus, 0, MEMORY_WRITE, &memory_access, prev_row); - - let memory_access = - MemoryLookup::from_ints(MEMORY_READ_LABEL, ContextId::root(), addr2, 5, value4); - prev_row = - verify_memory_access(&trace, &chiplets_bus, 1, MEMORY_COPY_READ, &memory_access, prev_row); - - let memory_access = - MemoryLookup::from_ints(MEMORY_WRITE_LABEL, ContextId::root(), addr2, 6, value7); - prev_row = - verify_memory_access(&trace, &chiplets_bus, 2, MEMORY_WRITE, &memory_access, prev_row); - - let memory_access = - MemoryLookup::from_ints(MEMORY_READ_LABEL, ContextId::root(), addr2, 8, value7); - prev_row = - verify_memory_access(&trace, &chiplets_bus, 3, MEMORY_COPY_READ, &memory_access, prev_row); + let memory_access = MemoryAccess::new(ContextId::root(), addr2, 2, value4); + prev_row = verify_memory_access(&trace, 0, MEMORY_WRITE, &memory_access, prev_row); + + let memory_access = MemoryAccess::new(ContextId::root(), addr2, 5, value4); + prev_row = verify_memory_access(&trace, 1, MEMORY_COPY_READ, &memory_access, prev_row); + + let memory_access = MemoryAccess::new(ContextId::root(), addr2, 6, value7); + prev_row = verify_memory_access(&trace, 2, MEMORY_WRITE, &memory_access, prev_row); + + let memory_access = MemoryAccess::new(ContextId::root(), addr2, 8, value7); + prev_row = verify_memory_access(&trace, 3, MEMORY_COPY_READ, &memory_access, prev_row); // address 5 - let memory_access = - MemoryLookup::from_ints(MEMORY_WRITE_LABEL, ContextId::root(), addr5, 1, value1); - prev_row = - verify_memory_access(&trace, &chiplets_bus, 4, MEMORY_WRITE, &memory_access, prev_row); - - let memory_access = - MemoryLookup::from_ints(MEMORY_READ_LABEL, ContextId::root(), addr5, 3, value1); - prev_row = - verify_memory_access(&trace, &chiplets_bus, 5, MEMORY_COPY_READ, &memory_access, prev_row); - - let memory_access = - MemoryLookup::from_ints(MEMORY_WRITE_LABEL, ContextId::root(), addr5, 4, value2); - prev_row = - verify_memory_access(&trace, &chiplets_bus, 6, MEMORY_WRITE, &memory_access, prev_row); - - let memory_access = - MemoryLookup::from_ints(MEMORY_READ_LABEL, ContextId::root(), addr5, 7, value2); - prev_row = - verify_memory_access(&trace, &chiplets_bus, 7, MEMORY_COPY_READ, &memory_access, prev_row); - - let memory_access = - MemoryLookup::from_ints(MEMORY_READ_LABEL, ContextId::root(), addr5, 9, value2); - verify_memory_access(&trace, &chiplets_bus, 8, MEMORY_COPY_READ, &memory_access, prev_row); + let memory_access = MemoryAccess::new(ContextId::root(), addr5, 1, value1); + prev_row = verify_memory_access(&trace, 4, MEMORY_WRITE, &memory_access, prev_row); + + let memory_access = MemoryAccess::new(ContextId::root(), addr5, 3, value1); + prev_row = verify_memory_access(&trace, 5, MEMORY_COPY_READ, &memory_access, prev_row); + + let memory_access = MemoryAccess::new(ContextId::root(), addr5, 4, value2); + prev_row = verify_memory_access(&trace, 6, MEMORY_WRITE, &memory_access, prev_row); + + let memory_access = MemoryAccess::new(ContextId::root(), addr5, 7, value2); + prev_row = verify_memory_access(&trace, 7, MEMORY_COPY_READ, &memory_access, prev_row); + + let memory_access = MemoryAccess::new(ContextId::root(), addr5, 9, value2); + verify_memory_access(&trace, 8, MEMORY_COPY_READ, &memory_access, prev_row); } #[test] @@ -268,31 +235,26 @@ fn mem_multi_context() { // check generated trace and memory data provided to the ChipletsBus; rows should be sorted by // address and then clock cycle - let (trace, chiplets_bus) = build_trace(mem, 5); + let trace = build_trace(mem, 5); // ctx = 0, addr = 0 let mut prev_row = [ZERO; MEMORY_TRACE_WIDTH]; - let memory_access = - MemoryLookup::from_ints(MEMORY_WRITE_LABEL, ContextId::root(), 0, 1, value1); - prev_row = - verify_memory_access(&trace, &chiplets_bus, 0, MEMORY_WRITE, &memory_access, prev_row); + let memory_access = MemoryAccess::new(ContextId::root(), 0, 1, value1); + prev_row = verify_memory_access(&trace, 0, MEMORY_WRITE, &memory_access, prev_row); - let memory_access = MemoryLookup::from_ints(MEMORY_READ_LABEL, ContextId::root(), 0, 9, value1); - prev_row = - verify_memory_access(&trace, &chiplets_bus, 1, MEMORY_COPY_READ, &memory_access, prev_row); + let memory_access = MemoryAccess::new(ContextId::root(), 0, 9, value1); + prev_row = verify_memory_access(&trace, 1, MEMORY_COPY_READ, &memory_access, prev_row); // ctx = 3, addr = 0 - let memory_access = MemoryLookup::from_ints(MEMORY_WRITE_LABEL, 3.into(), 0, 7, value3); - prev_row = - verify_memory_access(&trace, &chiplets_bus, 2, MEMORY_WRITE, &memory_access, prev_row); + let memory_access = MemoryAccess::new(3.into(), 0, 7, value3); + prev_row = verify_memory_access(&trace, 2, MEMORY_WRITE, &memory_access, prev_row); // ctx = 3, addr = 1 - let memory_access = MemoryLookup::from_ints(MEMORY_WRITE_LABEL, 3.into(), 1, 4, value2); - prev_row = - verify_memory_access(&trace, &chiplets_bus, 3, MEMORY_WRITE, &memory_access, prev_row); + let memory_access = MemoryAccess::new(3.into(), 1, 4, value2); + prev_row = verify_memory_access(&trace, 3, MEMORY_WRITE, &memory_access, prev_row); - let memory_access = MemoryLookup::from_ints(MEMORY_READ_LABEL, 3.into(), 1, 6, value2); - verify_memory_access(&trace, &chiplets_bus, 4, MEMORY_COPY_READ, &memory_access, prev_row); + let memory_access = MemoryAccess::new(3.into(), 1, 6, value2); + verify_memory_access(&trace, 4, MEMORY_COPY_READ, &memory_access, prev_row); } #[test] @@ -334,14 +296,32 @@ fn mem_get_state_at() { // HELPER STRUCT & FUNCTIONS // ================================================================================================ +/// Contains data representing a memory access. +pub struct MemoryAccess { + ctx: ContextId, + addr: Felt, + clk: Felt, + word: [Felt; 4], +} + +impl MemoryAccess { + pub fn new(ctx: ContextId, addr: u32, clk: u32, word: Word) -> Self { + Self { + ctx, + addr: Felt::from(addr), + clk: Felt::from(clk), + word, + } + } +} + /// Builds a trace of the specified length and fills it with data from the provided Memory instance. -fn build_trace(mem: Memory, num_rows: usize) -> (Vec>, ChipletsBus) { - let mut chiplets_bus = ChipletsBus::default(); +fn build_trace(mem: Memory, num_rows: usize) -> Vec> { let mut trace = (0..MEMORY_TRACE_WIDTH).map(|_| vec![ZERO; num_rows]).collect::>(); let mut fragment = TraceFragment::trace_to_fragment(&mut trace); - mem.fill_trace(&mut fragment, &mut chiplets_bus, 0); + mem.fill_trace(&mut fragment); - (trace, chiplets_bus) + trace } fn read_trace_row(trace: &[Vec], step: usize) -> [Felt; MEMORY_TRACE_WIDTH] { @@ -353,12 +333,11 @@ fn read_trace_row(trace: &[Vec], step: usize) -> [Felt; MEMORY_TRACE_WIDTH } fn build_trace_row( - memory_access: &MemoryLookup, + memory_access: &MemoryAccess, op_selectors: Selectors, prev_row: [Felt; MEMORY_TRACE_WIDTH], ) -> [Felt; MEMORY_TRACE_WIDTH] { - let MemoryLookup { - label: _, + let MemoryAccess { ctx, addr, clk, @@ -369,8 +348,8 @@ fn build_trace_row( row[0] = op_selectors[0]; row[1] = op_selectors[1]; - row[CTX_COL_IDX] = ctx; - row[ADDR_COL_IDX] = addr; + row[CTX_COL_IDX] = ctx.into(); + row[ADDR_COL_IDX] = Felt::from(addr); row[CLK_COL_IDX] = clk; row[V_COL_RANGE.start] = new_val[0]; row[V_COL_RANGE.start + 1] = new_val[1]; @@ -397,22 +376,13 @@ fn build_trace_row( fn verify_memory_access( trace: &[Vec], - chiplets_bus: &ChipletsBus, row: u32, op_selectors: Selectors, - memory_access: &MemoryLookup, + memory_access: &MemoryAccess, prev_row: [Felt; MEMORY_TRACE_WIDTH], ) -> [Felt; MEMORY_TRACE_WIDTH] { let expected_row = build_trace_row(memory_access, op_selectors, prev_row); - let expected_lookup = ChipletLookup::Memory(*memory_access); - let expected_hint = ChipletsBusRow::new(&[], Some(row)); - - let lookup = chiplets_bus.get_response_row(row as usize); - let hint = chiplets_bus.get_lookup_hint(row).unwrap(); - assert_eq!(expected_row, read_trace_row(trace, row as usize)); - assert_eq!(expected_lookup, lookup); - assert_eq!(&expected_hint, hint); expected_row } diff --git a/processor/src/chiplets/mod.rs b/processor/src/chiplets/mod.rs index 92a14d0395..c353d29d7d 100644 --- a/processor/src/chiplets/mod.rs +++ b/processor/src/chiplets/mod.rs @@ -1,19 +1,14 @@ use crate::system::ContextId; use super::{ - crypto::MerklePath, trace, utils, BTreeMap, ChipletsTrace, ColMatrix, ExecutionError, Felt, - FieldElement, RangeChecker, StarkField, TraceFragment, Vec, Word, CHIPLETS_WIDTH, EMPTY_WORD, - ONE, ZERO, -}; -use miden_air::trace::chiplets::{ - bitwise::{BITWISE_AND_LABEL, BITWISE_XOR_LABEL}, - hasher::{Digest, HasherState}, - memory::{MEMORY_READ_LABEL, MEMORY_WRITE_LABEL}, + crypto::MerklePath, utils, BTreeMap, ChipletsTrace, ExecutionError, Felt, FieldElement, + RangeChecker, StarkField, TraceFragment, Vec, Word, CHIPLETS_WIDTH, EMPTY_WORD, ONE, ZERO, }; +use miden_air::trace::chiplets::hasher::{Digest, HasherState}; use vm_core::{code_blocks::OpBatch, Kernel}; mod bitwise; -use bitwise::{Bitwise, BitwiseLookup}; +use bitwise::Bitwise; mod hasher; #[cfg(test)] @@ -21,44 +16,18 @@ pub(crate) use hasher::init_state_from_words; use hasher::Hasher; mod memory; -use memory::{Memory, MemoryLookup}; +use memory::Memory; mod kernel_rom; -use kernel_rom::{KernelProcLookup, KernelRom}; +use kernel_rom::KernelRom; mod aux_trace; -#[cfg(test)] -pub(crate) use aux_trace::ChipletsVTableRow; -pub(crate) use aux_trace::{AuxTraceBuilder, ChipletsBus, ChipletsVTableTraceBuilder}; + +pub(crate) use aux_trace::AuxTraceBuilder; #[cfg(test)] mod tests; -// HELPER STRUCTS -// ================================================================================================ - -/// Result of a merkle tree node update. The result contains the old merkle_root, which -/// corresponding to the old_value, and the new merkle_root, for the updated value. As well as the -/// row address of the execution trace at which the computation started. -#[derive(Debug, Copy, Clone)] -pub struct MerkleRootUpdate { - address: Felt, - old_root: Word, - new_root: Word, -} - -impl MerkleRootUpdate { - pub fn get_address(&self) -> Felt { - self.address - } - pub fn get_old_root(&self) -> Word { - self.old_root - } - pub fn get_new_root(&self) -> Word { - self.new_root - } -} - // CHIPLETS MODULE OF HASHER, BITWISE, MEMORY, AND KERNEL ROM CHIPLETS // ================================================================================================ @@ -102,6 +71,46 @@ impl MerkleRootUpdate { /// exactly enough rows remaining for the specified number of random rows. /// - columns 0-3: selector columns with values set to ONE /// - columns 3-17: unused columns padded with ZERO +/// +/// The following is a pictorial representation of the chiplet module: +/// +---+-------------------------------------------------------+-------------+ +/// | 0 | | |-------------| +/// | . | Hash chiplet | Hash chiplet |-------------| +/// | . | internal | 16 columns |-- Padding --| +/// | . | selectors | constraint degree 8 |-------------| +/// | 0 | | |-------------| +/// +---+---+---------------------------------------------------+-------------+ +/// | 1 | 0 | | |-------------| +/// | . | . | Bitwise | Bitwise chiplet |-------------| +/// | . | . | chiplet | 13 columns |-- Padding --| +/// | . | . | internal | constraint degree 13 |-------------| +/// | . | . | selectors | |-------------| +/// | . | 0 | | |-------------| +/// | . +---+---+-----------------------------------------------+-------------+ +/// | . | 1 | 0 | | |-------------| +/// | . | . | . | Memory chiplet | Memory chiplet |-------------| +/// | . | . | . | internal | 12 columns |-- Padding --| +/// | . | . | . | selectors | constraint degree 9 |-------------| +/// | . | . | 0 | | |-------------| +/// | . + . |---+---+-------------------------------------------+-------------+ +/// | . | . | 1 | 0 | | |-------------| +/// | . | . | . | . | Kernel ROM | Kernel ROM chiplet |-------------| +/// | . | . | . | . | chiplet internal | 6 columns |-- Padding --| +/// | . | . | . | . | selectors | constraint degree 9 |-------------| +/// | . | . | . | 0 | | |-------------| +/// | . + . | . |---+-------------------------------------------+-------------+ +/// | . | . | . | 1 |---------------------------------------------------------| +/// | . | . | . | . |---------------------------------------------------------| +/// | . | . | . | . |---------------------------------------------------------| +/// | . | . | . | . |---------------------------------------------------------| +/// | . | . | . | . |----------------------- Padding -------------------------| +/// | . + . | . | . |---------------------------------------------------------| +/// | . | . | . | . |---------------------------------------------------------| +/// | . | . | . | . |---------------------------------------------------------| +/// | . | . | . | . |---------------------------------------------------------| +/// | 1 | 1 | 1 | 1 |---------------------------------------------------------| +/// +---+---+---+---+---------------------------------------------------------+ +/// pub struct Chiplets { /// Current clock cycle of the VM. clk: u32, @@ -109,7 +118,6 @@ pub struct Chiplets { bitwise: Bitwise, memory: Memory, kernel_rom: KernelRom, - bus: ChipletsBus, } impl Chiplets { @@ -123,7 +131,6 @@ impl Chiplets { bitwise: Bitwise::default(), memory: Memory::default(), kernel_rom: KernelRom::new(kernel), - bus: ChipletsBus::default(), } } @@ -175,12 +182,7 @@ impl Chiplets { /// The returned tuple contains the hasher state after the permutation and the row address of /// the execution trace at which the permutation started. pub fn permute(&mut self, state: HasherState) -> (Felt, HasherState) { - let mut lookups = Vec::new(); - let (addr, return_state) = self.hasher.permute(state, &mut lookups); - self.bus.request_hasher_operation(&lookups, self.clk); - - // provide the responses to the bus - self.bus.provide_hasher_lookups(&lookups); + let (addr, return_state) = self.hasher.permute(state); (addr, return_state) } @@ -201,13 +203,7 @@ impl Chiplets { path: &MerklePath, index: Felt, ) -> (Felt, Word) { - let mut lookups = Vec::new(); - let (addr, root) = self.hasher.build_merkle_root(value, path, index, &mut lookups); - - self.bus.request_hasher_operation(&lookups, self.clk); - - // provide the responses to the bus - self.bus.provide_hasher_lookups(&lookups); + let (addr, root) = self.hasher.build_merkle_root(value, path, index); (addr, root) } @@ -225,16 +221,7 @@ impl Chiplets { path: &MerklePath, index: Felt, ) -> MerkleRootUpdate { - let mut lookups = Vec::new(); - - let merkle_root_update = - self.hasher.update_merkle_root(old_value, new_value, path, index, &mut lookups); - self.bus.request_hasher_operation(&lookups, self.clk); - - // provide the responses to the bus - self.bus.provide_hasher_lookups(&lookups); - - merkle_root_update + self.hasher.update_merkle_root(old_value, new_value, path, index) } // HASH CHIPLET ACCESSORS FOR CONTROL BLOCK DECODING @@ -251,22 +238,11 @@ impl Chiplets { domain: Felt, expected_hash: Digest, ) -> Felt { - let mut lookups = Vec::new(); - let (addr, result) = - self.hasher.hash_control_block(h1, h2, domain, expected_hash, &mut lookups); + let (addr, result) = self.hasher.hash_control_block(h1, h2, domain, expected_hash); // make sure the result computed by the hasher is the same as the expected block hash debug_assert_eq!(expected_hash, result.into()); - // send the request for the hash initialization - self.bus.request_hasher_lookup(lookups[0], self.clk); - - // enqueue the request for the hash result - self.bus.enqueue_hasher_request(lookups[1]); - - // provide the responses to the bus - self.bus.provide_hasher_lookups(&lookups); - addr } @@ -275,48 +251,14 @@ impl Chiplets { /// /// It returns the row address of the execution trace at which the hash computation started. pub fn hash_span_block(&mut self, op_batches: &[OpBatch], expected_hash: Digest) -> Felt { - let mut lookups = Vec::new(); - let (addr, result) = self.hasher.hash_span_block(op_batches, expected_hash, &mut lookups); + let (addr, result) = self.hasher.hash_span_block(op_batches, expected_hash); // make sure the result computed by the hasher is the same as the expected block hash debug_assert_eq!(expected_hash, result.into()); - // send the request for the hash initialization - self.bus.request_hasher_lookup(lookups[0], self.clk); - - // enqueue the rest of the requests in reverse order so that the next request is at - // the top of the queue. - for lookup in lookups.iter().skip(1).rev() { - self.bus.enqueue_hasher_request(*lookup); - } - - // provide the responses to the bus - self.bus.provide_hasher_lookups(&lookups); - addr } - /// Sends a request for a [HasherLookup] required for verifying absorption of a new `SPAN` batch - /// to the Chiplets Bus. It's expected to be called by the decoder while processing a `RESPAN`. - /// - /// It's processed by moving the corresponding lookup from the Chiplets bus' queued lookups to - /// its requested lookups. Therefore, the next queued lookup is expected to be a precomputed - /// lookup for absorbing new elements into the hasher state. - pub fn absorb_span_batch(&mut self) { - self.bus.send_queued_hasher_request(self.clk); - } - - /// Sends a request for a control block hash result to the Chiplets Bus. It's expected to be - /// called by the decoder to request the finalization (return hash) of a control block hash - /// computation for the control block it has just finished decoding. - /// - /// It's processed by moving the corresponding lookup from the Chiplets bus' queued lookups to - /// its requested lookups. Therefore, the next queued lookup is expected to be a precomputed - /// lookup for returning a hash result. - pub fn read_hash_result(&mut self) { - self.bus.send_queued_hasher_request(self.clk); - } - // BITWISE CHIPLET ACCESSORS // -------------------------------------------------------------------------------------------- @@ -326,9 +268,6 @@ impl Chiplets { pub fn u32and(&mut self, a: Felt, b: Felt) -> Result { let result = self.bitwise.u32and(a, b)?; - let bitwise_lookup = BitwiseLookup::new(BITWISE_AND_LABEL, a, b, result); - self.bus.request_bitwise_operation(bitwise_lookup, self.clk); - Ok(result) } @@ -338,9 +277,6 @@ impl Chiplets { pub fn u32xor(&mut self, a: Felt, b: Felt) -> Result { let result = self.bitwise.u32xor(a, b)?; - let bitwise_lookup = BitwiseLookup::new(BITWISE_XOR_LABEL, a, b, result); - self.bus.request_bitwise_operation(bitwise_lookup, self.clk); - Ok(result) } @@ -354,13 +290,7 @@ impl Chiplets { /// returned. This effectively implies that memory is initialized to ZERO. pub fn read_mem(&mut self, ctx: ContextId, addr: u32) -> Word { // read the word from memory - let value = self.memory.read(ctx, addr, self.clk); - - // send the memory read request to the bus - let lookup = MemoryLookup::from_ints(MEMORY_READ_LABEL, ctx, addr, self.clk, value); - self.bus.request_memory_operation(&[lookup], self.clk); - - value + self.memory.read(ctx, addr, self.clk) } /// Returns two words read from consecutive addresses started with `addr` in the specified @@ -371,65 +301,32 @@ impl Chiplets { pub fn read_mem_double(&mut self, ctx: ContextId, addr: u32) -> [Word; 2] { // read two words from memory: from addr and from addr + 1 let addr2 = addr + 1; - let words = [self.memory.read(ctx, addr, self.clk), self.memory.read(ctx, addr2, self.clk)]; - - // create lookups for both memory reads - let lookups = [ - MemoryLookup::from_ints(MEMORY_READ_LABEL, ctx, addr, self.clk, words[0]), - MemoryLookup::from_ints(MEMORY_READ_LABEL, ctx, addr2, self.clk, words[1]), - ]; - - // send lookups to the bus and return the result - self.bus.request_memory_operation(&lookups, self.clk); - words + [self.memory.read(ctx, addr, self.clk), self.memory.read(ctx, addr2, self.clk)] } /// Writes the provided word at the specified context/address. - /// - /// This also modifies the memory access trace and sends a memory lookup request to the bus. pub fn write_mem(&mut self, ctx: ContextId, addr: u32, word: Word) { self.memory.write(ctx, addr, self.clk, word); - - // send the memory write request to the bus - let lookup = MemoryLookup::from_ints(MEMORY_WRITE_LABEL, ctx, addr, self.clk, word); - self.bus.request_memory_operation(&[lookup], self.clk); } /// Writes the provided element into the specified context/address leaving the remaining 3 /// elements of the word previously stored at that address unchanged. - /// - /// This also modifies the memory access trace and sends a memory lookup request to the bus. pub fn write_mem_element(&mut self, ctx: ContextId, addr: u32, value: Felt) -> Word { let old_word = self.memory.get_old_value(ctx, addr); let new_word = [value, old_word[1], old_word[2], old_word[3]]; self.memory.write(ctx, addr, self.clk, new_word); - // send the memory write request to the bus - let lookup = MemoryLookup::from_ints(MEMORY_WRITE_LABEL, ctx, addr, self.clk, new_word); - self.bus.request_memory_operation(&[lookup], self.clk); - old_word } /// Writes the two provided words to two consecutive addresses in memory in the specified /// context, starting at the specified address. - /// - /// This also modifies the memory access trace and sends two memory lookup requests to the bus. pub fn write_mem_double(&mut self, ctx: ContextId, addr: u32, words: [Word; 2]) { let addr2 = addr + 1; // write two words to memory at addr and addr + 1 self.memory.write(ctx, addr, self.clk, words[0]); self.memory.write(ctx, addr2, self.clk, words[1]); - - // create lookups for both memory writes - let lookups = [ - MemoryLookup::from_ints(MEMORY_WRITE_LABEL, ctx, addr, self.clk, words[0]), - MemoryLookup::from_ints(MEMORY_WRITE_LABEL, ctx, addr2, self.clk, words[1]), - ]; - - // send lookups to the bus - self.bus.request_memory_operation(&lookups, self.clk); } /// Returns a word located at the specified context/address, or None if the address hasn't @@ -465,10 +362,6 @@ impl Chiplets { pub fn access_kernel_proc(&mut self, proc_hash: Digest) -> Result<(), ExecutionError> { self.kernel_rom.access_proc(proc_hash)?; - // record the access in the chiplet bus - let kernel_proc_lookup = KernelProcLookup::new(proc_hash.into()); - self.bus.request_kernel_proc_call(kernel_proc_lookup, self.clk); - Ok(()) } @@ -484,7 +377,7 @@ impl Chiplets { // -------------------------------------------------------------------------------------------- /// Adds all range checks required by the memory chiplet to the provided [RangeChecker] - /// instance, along with the cycle rows at which the processor performs the lookups. + /// instance. pub fn append_range_checks(&self, range_checker: &mut RangeChecker) { self.memory.append_range_checks(self.memory_start(), range_checker); } @@ -504,10 +397,12 @@ impl Chiplets { .collect::>() .try_into() .expect("failed to convert vector to array"); + self.fill_trace(&mut trace); - let aux_builder = self.fill_trace(&mut trace); - - ChipletsTrace { trace, aux_builder } + ChipletsTrace { + trace, + aux_builder: AuxTraceBuilder::default(), + } } // HELPER METHODS @@ -519,7 +414,7 @@ impl Chiplets { /// /// It returns the auxiliary trace builders for generating auxiliary trace columns that depend /// on data from [Chiplets]. - fn fill_trace(self, trace: &mut [Vec; CHIPLETS_WIDTH]) -> AuxTraceBuilder { + fn fill_trace(self, trace: &mut [Vec; CHIPLETS_WIDTH]) { // get the rows where chiplets begin. let bitwise_start = self.bitwise_start(); let memory_start = self.memory_start(); @@ -532,7 +427,6 @@ impl Chiplets { bitwise, memory, kernel_rom, - mut bus, } = self; // populate external selector columns for all chiplets @@ -579,16 +473,34 @@ impl Chiplets { // fill the fragments with the execution trace from each chiplet // TODO: this can be parallelized to fill the traces in multiple threads - let mut table_builder = hasher.fill_trace(&mut hasher_fragment); - bitwise.fill_trace(&mut bitwise_fragment, &mut bus, bitwise_start); - memory.fill_trace(&mut memory_fragment, &mut bus, memory_start); - kernel_rom.fill_trace( - &mut kernel_rom_fragment, - &mut bus, - &mut table_builder, - kernel_rom_start, - ); - - AuxTraceBuilder::new(bus.into_aux_builder(), table_builder) + hasher.fill_trace(&mut hasher_fragment); + bitwise.fill_trace(&mut bitwise_fragment); + memory.fill_trace(&mut memory_fragment); + kernel_rom.fill_trace(&mut kernel_rom_fragment); + } +} + +// HELPER STRUCTS +// ================================================================================================ + +/// Result of a Merkle tree node update. The result contains the old Merkle_root, which +/// corresponding to the old_value, and the new merkle_root, for the updated value. As well as the +/// row address of the execution trace at which the computation started. +#[derive(Debug, Copy, Clone)] +pub struct MerkleRootUpdate { + address: Felt, + old_root: Word, + new_root: Word, +} + +impl MerkleRootUpdate { + pub fn get_address(&self) -> Felt { + self.address + } + pub fn get_old_root(&self) -> Word { + self.old_root + } + pub fn get_new_root(&self) -> Word { + self.new_root } } diff --git a/processor/src/chiplets/tests.rs b/processor/src/chiplets/tests.rs index aaa16d09b4..37fda7e045 100644 --- a/processor/src/chiplets/tests.rs +++ b/processor/src/chiplets/tests.rs @@ -1,6 +1,6 @@ use crate::{ - utils::get_trace_len, CodeBlock, DefaultHost, ExecutionOptions, ExecutionTrace, Kernel, - Operation, Process, StackInputs, Vec, + CodeBlock, DefaultHost, ExecutionOptions, ExecutionTrace, Kernel, Operation, Process, + StackInputs, Vec, }; use miden_air::trace::{ chiplets::{ @@ -117,11 +117,11 @@ fn build_trace( process.execute_code_block(&program, &CodeBlockTable::default()).unwrap(); let (trace, _, _) = ExecutionTrace::test_finalize_trace(process); - let trace_len = get_trace_len(&trace) - ExecutionTrace::NUM_RAND_ROWS; + let trace_len = trace.num_rows() - ExecutionTrace::NUM_RAND_ROWS; ( - trace[CHIPLETS_RANGE] - .to_vec() + trace + .get_column_range(CHIPLETS_RANGE) .try_into() .expect("failed to convert vector to array"), trace_len, diff --git a/processor/src/decoder/aux_hints.rs b/processor/src/decoder/aux_hints.rs deleted file mode 100644 index c6333d37e0..0000000000 --- a/processor/src/decoder/aux_hints.rs +++ /dev/null @@ -1,485 +0,0 @@ -use crate::system::ContextId; - -use super::{ - super::trace::LookupTableRow, get_num_groups_in_next_batch, BlockInfo, ColMatrix, Felt, - FieldElement, StarkField, Vec, Word, EMPTY_WORD, ONE, ZERO, -}; - -// AUXILIARY TRACE HINTS -// ================================================================================================ - -/// Contains information which can be used to simplify construction of execution traces of -/// decoder-related auxiliary trace segment columns (used in multiset checks). -pub struct AuxTraceHints { - /// A list of updates made to the block stack and block hash tables. Each entry contains a - /// clock cycle at which the update was made, as well as the description of the update. - block_exec_hints: Vec<(u32, BlockTableUpdate)>, - /// A list of rows which were added and then removed from the block stack table. The rows are - /// sorted by `block_id` in ascending order. - block_stack_rows: Vec, - /// A list of rows which were added and then removed from the block hash table. The rows are - /// sorted first by `parent_id` and then by `is_first_child` with the entry where - /// `is_first_child` = true coming first. - block_hash_rows: Vec, - /// A list of updates made to the op group table where each entry is a tuple containing the - /// cycle at which the update was made and the update description. - op_group_hints: Vec<(u32, OpGroupTableUpdate)>, - /// A list of rows which were added to and then removed from the op group table. - op_group_rows: Vec, -} - -impl AuxTraceHints { - // CONSTRUCTOR - // -------------------------------------------------------------------------------------------- - /// Returns an empty [AuxTraceHints] struct. - pub fn new() -> Self { - // initialize block hash table with an blank entry, this will be replaced with an entry - // containing the actual program hash at the end of trace generation - let block_hash_rows = vec![BlockHashTableRow::from_program_hash(EMPTY_WORD)]; - - Self { - block_exec_hints: Vec::new(), - block_stack_rows: Vec::new(), - block_hash_rows, - op_group_hints: Vec::new(), - op_group_rows: Vec::new(), - } - } - - // PUBLIC ACCESSORS - // -------------------------------------------------------------------------------------------- - - /// Returns hints which describe how the block stack and block hash tables were updated during - /// program execution. Each hint consists of a clock cycle and the update description for that - /// cycle. The hints are sorted by clock cycle in ascending order. - pub fn block_exec_hints(&self) -> &[(u32, BlockTableUpdate)] { - &self.block_exec_hints - } - - /// Returns a list of table rows which were added to and then removed from the block stack - /// table. We don't specify which cycles these rows were added/removed at because this info - /// can be inferred from execution hints. - /// - /// The rows are sorted by block_id in ascending order. - pub fn block_stack_table_rows(&self) -> &[BlockStackTableRow] { - &self.block_stack_rows - } - - /// Returns a list of table rows which were added to and then removed from the block hash - /// table. We don't specify which cycles these rows were added/removed at because this info - /// can be inferred from execution hints. - /// - /// The rows are sorted first by `parent_id` in ascending order and then by `is_first_child` - /// with the entry where `is_first_child` = true coming first. - pub fn block_hash_table_rows(&self) -> &[BlockHashTableRow] { - &self.block_hash_rows - } - - /// Returns hints which describe how the op group was updated during program execution. Each - /// hint consists of a clock cycle and the update description for that cycle. - pub fn op_group_table_hints(&self) -> &[(u32, OpGroupTableUpdate)] { - &self.op_group_hints - } - - /// Returns a list of table rows which were added to and then removed from the op group table. - /// We don't specify which cycles these rows were added/removed at because this info can be - /// inferred from the op group table hints. - pub fn op_group_table_rows(&self) -> &[OpGroupTableRow] { - &self.op_group_rows - } - - /// Returns an index of the row with the specified block_id in the list of block stack table - /// rows. Since the rows in the list are sorted by block_id, we can use binary search to find - /// the relevant row. - /// - /// If the row for the specified block_id is not found, None is returned. - pub fn get_block_stack_row_idx(&self, block_id: Felt) -> Option { - let block_id = block_id.as_int(); - self.block_stack_rows - .binary_search_by_key(&block_id, |row| row.block_id.as_int()) - .ok() - } - - /// Returns an index of the row with the specified parent_id and is_first_child in the list of - /// block hash table rows. Since the rows in the list are sorted by parent_id, we can use - /// binary search to find the relevant row. - /// - /// If the row for the specified parent_id and is_first_child is not found, None is returned. - pub fn get_block_hash_row_idx(&self, parent_id: Felt, is_first_child: bool) -> Option { - let parent_id = parent_id.as_int(); - match self - .block_hash_rows - .binary_search_by_key(&parent_id, |row| row.parent_id.as_int()) - { - Ok(idx) => { - // check if the row for the found index is the right one; we need to do this - // because binary search may return an index for either of the two entries for - // the specified parent_id - if self.block_hash_rows[idx].is_first_child == is_first_child { - Some(idx) - } else if is_first_child { - // if we got here, it means that is_first_child for the row at the found index - // is false. thus, the row with is_first_child = true should be right before it - let row = &self.block_hash_rows[idx - 1]; - debug_assert_eq!(row.parent_id.as_int(), parent_id); - debug_assert_eq!(row.is_first_child, is_first_child); - Some(idx - 1) - } else { - // similarly, if we got here, is_first_child for the row at the found index - // must be true. thus, the row with is_first_child = false should be right - // after it - let row = &self.block_hash_rows[idx + 1]; - debug_assert_eq!(row.parent_id.as_int(), parent_id); - debug_assert_eq!(row.is_first_child, is_first_child); - Some(idx + 1) - } - } - Err(_) => None, - } - } - - // STATE MUTATORS - // -------------------------------------------------------------------------------------------- - - /// Specifies that a new code block started executing at the specified clock cycle. This also - /// records the relevant rows for both, block stack and block hash tables. - pub fn block_started( - &mut self, - clk: u32, - block_info: &BlockInfo, - child1_hash: Option, - child2_hash: Option, - ) { - // insert the hint with the relevant update - let hint = BlockTableUpdate::BlockStarted(block_info.num_children()); - self.block_exec_hints.push((clk, hint)); - - // create a row which would be inserted into the block stack table - let bst_row = BlockStackTableRow::new(block_info); - self.block_stack_rows.push(bst_row); - - // create rows for the block hash table. this may result in creation of 0, 1, or 2 rows: - // - no rows are created for SPAN blocks (both child hashes are None). - // - one row is created with is_first_child=false for SPLIT and LOOP blocks. - // - two rows are created for JOIN blocks with first row having is_first_child=true, and - // the second row having is_first_child=false - if let Some(child1_hash) = child1_hash { - let is_first_child = child2_hash.is_some(); - let bsh_row1 = BlockHashTableRow::from_parent(block_info, child1_hash, is_first_child); - self.block_hash_rows.push(bsh_row1); - - if let Some(child2_hash) = child2_hash { - let bsh_row2 = BlockHashTableRow::from_parent(block_info, child2_hash, false); - self.block_hash_rows.push(bsh_row2); - } - } - } - - /// Specifies that a code block execution was completed at the specified clock cycle. We also - /// need to specify whether the block was the first child of a JOIN block so that we can find - /// correct block hash table row. - pub fn block_ended(&mut self, clk: u32, is_first_child: bool) { - self.block_exec_hints.push((clk, BlockTableUpdate::BlockEnded(is_first_child))); - } - - /// Specifies that another execution of a loop's body started at the specified clock cycle. - /// This is triggered by the REPEAT operation. - pub fn loop_repeat_started(&mut self, clk: u32) { - self.block_exec_hints.push((clk, BlockTableUpdate::LoopRepeated)); - } - - /// Specifies that execution of a SPAN block was extended at the specified clock cycle. This - /// is triggered by the RESPAN operation. This also adds a row for the new span batch to the - /// block stack table. - pub fn span_extended(&mut self, clk: u32, block_info: &BlockInfo) { - let row = BlockStackTableRow::new(block_info); - self.block_stack_rows.push(row); - self.block_exec_hints.push((clk, BlockTableUpdate::SpanExtended)) - } - - /// Specifies that an operation batch may have been inserted into the op group table at the - /// specified cycle. Operation groups are inserted into the table only if the number of groups - /// left is greater than 1. - pub fn insert_op_batch(&mut self, clk: u32, num_groups_left: Felt) { - // compute number of op groups in this batch - let num_batch_groups = get_num_groups_in_next_batch(num_groups_left); - debug_assert!(num_batch_groups > 0, "op batch is empty"); - - // the first op group in a batch is not added to the op_group table, so, we subtract 1 here - let num_inserted_groups = num_batch_groups - 1; - - // if at least one group was inserted, mark the current clock cycle with the number of op - // groups added to the op group table - if num_inserted_groups > 0 { - let update = OpGroupTableUpdate::InsertRows(num_inserted_groups as u32); - self.op_group_hints.push((clk, update)); - } - } - - /// Specifies that an entry for an operation group was removed from the op group table at the - /// specified clock cycle. - pub fn remove_op_group( - &mut self, - clk: u32, - batch_id: Felt, - group_pos: Felt, - group_value: Felt, - ) { - self.op_group_hints.push((clk, OpGroupTableUpdate::RemoveRow)); - // we record a row only when it is deleted because rows are added and deleted in the same - // order. thus, a sequence of deleted rows is exactly the same as the sequence of added - // rows. - self.op_group_rows.push(OpGroupTableRow::new(batch_id, group_pos, group_value)); - } - - /// Inserts the first entry into the block hash table. - pub fn set_program_hash(&mut self, program_hash: Word) { - self.block_hash_rows[0] = BlockHashTableRow::from_program_hash(program_hash); - } -} - -impl Default for AuxTraceHints { - fn default() -> Self { - Self::new() - } -} - -// UPDATE HINTS -// ================================================================================================ - -/// Describes updates to both, block stack and block hash tables as follows: -/// - `BlockStarted` and `BlockEnded` are relevant for both tables. -/// - `SpanExtended` is relevant only for the block stack table. -/// - `LoopRepeated` is relevant only for the block hash table. -#[derive(Debug, Copy, Clone, PartialEq, Eq)] -pub enum BlockTableUpdate { - BlockStarted(u32), // inner value contains the number of children for the block: 0, 1, or 2. - SpanExtended, - LoopRepeated, - BlockEnded(bool), // true indicates that the block was the first child of a JOIN block -} - -/// Describes an update to the op group table. There could be two types of updates: -/// - Some number of rows could be added to the table. In this case, the associated value specifies -/// how many rows were added. -/// - A single row could be removed from the table. -#[derive(Debug, Copy, Clone, PartialEq, Eq)] -pub enum OpGroupTableUpdate { - InsertRows(u32), - RemoveRow, -} - -// BLOCK STACK TABLE ROW -// ================================================================================================ - -/// Describes a single entry in the block stack table. -#[derive(Debug, Copy, Clone, PartialEq, Eq)] -pub struct BlockStackTableRow { - block_id: Felt, - parent_id: Felt, - is_loop: bool, - parent_ctx: ContextId, - parent_fn_hash: Word, - parent_fmp: Felt, - parent_stack_depth: u32, - parent_next_overflow_addr: Felt, -} - -impl BlockStackTableRow { - /// Returns a new [BlockStackTableRow] instantiated from the specified block info. - pub fn new(block_info: &BlockInfo) -> Self { - let ctx_info = block_info.ctx_info.unwrap_or_default(); - Self { - block_id: block_info.addr, - parent_id: block_info.parent_addr, - is_loop: block_info.is_entered_loop() == ONE, - parent_ctx: ctx_info.parent_ctx, - parent_fn_hash: ctx_info.parent_fn_hash, - parent_fmp: ctx_info.parent_fmp, - parent_stack_depth: ctx_info.parent_stack_depth, - parent_next_overflow_addr: ctx_info.parent_next_overflow_addr, - } - } - - /// Returns a new [BlockStackTableRow] instantiated with the specified parameters. This is - /// used for test purpose only. - #[cfg(test)] - pub fn new_test(block_id: Felt, parent_id: Felt, is_loop: bool) -> Self { - Self { - block_id, - parent_id, - is_loop, - parent_ctx: ContextId::root(), - parent_fn_hash: EMPTY_WORD, - parent_fmp: ZERO, - parent_stack_depth: 0, - parent_next_overflow_addr: ZERO, - } - } - - #[cfg(test)] - /// Returns a new [BlockStackTableRow] corresponding to a CALL code block. This is used for - /// test purpose only. - pub fn new_test_with_ctx( - block_id: Felt, - parent_id: Felt, - is_loop: bool, - ctx_info: super::ExecutionContextInfo, - ) -> Self { - Self { - block_id, - parent_id, - is_loop, - parent_ctx: ctx_info.parent_ctx, - parent_fn_hash: ctx_info.parent_fn_hash, - parent_fmp: ctx_info.parent_fmp, - parent_stack_depth: ctx_info.parent_stack_depth, - parent_next_overflow_addr: ctx_info.parent_next_overflow_addr, - } - } -} - -impl LookupTableRow for BlockStackTableRow { - /// Reduces this row to a single field element in the field specified by E. This requires - /// at least 12 alpha values. - fn to_value>( - &self, - _main_trace: &ColMatrix, - alphas: &[E], - ) -> E { - let is_loop = if self.is_loop { ONE } else { ZERO }; - alphas[0] - + alphas[1].mul_base(self.block_id) - + alphas[2].mul_base(self.parent_id) - + alphas[3].mul_base(is_loop) - + alphas[4].mul_base(Felt::from(self.parent_ctx)) - + alphas[5].mul_base(self.parent_fmp) - + alphas[6].mul_base(Felt::from(self.parent_stack_depth)) - + alphas[7].mul_base(self.parent_next_overflow_addr) - + alphas[8].mul_base(self.parent_fn_hash[0]) - + alphas[9].mul_base(self.parent_fn_hash[1]) - + alphas[10].mul_base(self.parent_fn_hash[2]) - + alphas[11].mul_base(self.parent_fn_hash[3]) - } -} - -// BLOCK HASH TABLE ROW -// ================================================================================================ - -/// Describes a single entry in the block hash table. An entry in the block hash table is a tuple -/// (parent_id, block_hash, is_first_child, is_loop_body). -#[derive(Debug, Copy, Clone, PartialEq, Eq)] -pub struct BlockHashTableRow { - parent_id: Felt, - block_hash: Word, - is_first_child: bool, - is_loop_body: bool, -} - -impl BlockHashTableRow { - // CONSTRUCTORS - // -------------------------------------------------------------------------------------------- - /// Returns a new [BlockHashTableRow] instantiated with the specified parameters. - pub fn from_parent(parent_info: &BlockInfo, block_hash: Word, is_first_child: bool) -> Self { - Self { - parent_id: parent_info.addr, - block_hash, - is_first_child, - is_loop_body: parent_info.is_entered_loop() == ONE, - } - } - - /// Returns a new [BlockHashTableRow] containing the hash of the entire program. - pub fn from_program_hash(program_hash: Word) -> Self { - Self { - parent_id: ZERO, - block_hash: program_hash, - is_first_child: false, - is_loop_body: false, - } - } - - /// Returns a new [BlockHashTableRow] instantiated with the specified parameters. This is - /// used for test purpose only. - #[cfg(test)] - pub fn new_test( - parent_id: Felt, - block_hash: Word, - is_first_child: bool, - is_loop_body: bool, - ) -> Self { - Self { - parent_id, - block_hash, - is_first_child, - is_loop_body, - } - } - - // PUBLIC ACCESSORS - // -------------------------------------------------------------------------------------------- - - /// Returns true if this table row is for a block which is the first child of a JOIN block. - pub fn is_first_child(&self) -> bool { - self.is_first_child - } -} - -impl LookupTableRow for BlockHashTableRow { - /// Reduces this row to a single field element in the field specified by E. This requires - /// at least 8 alpha values. - fn to_value>( - &self, - _main_trace: &ColMatrix, - alphas: &[E], - ) -> E { - let is_first_child = if self.is_first_child { ONE } else { ZERO }; - let is_loop_body = if self.is_loop_body { ONE } else { ZERO }; - alphas[0] - + alphas[1].mul_base(self.parent_id) - + alphas[2].mul_base(self.block_hash[0]) - + alphas[3].mul_base(self.block_hash[1]) - + alphas[4].mul_base(self.block_hash[2]) - + alphas[5].mul_base(self.block_hash[3]) - + alphas[6].mul_base(is_first_child) - + alphas[7].mul_base(is_loop_body) - } -} - -// OP GROUP TABLE ROW -// ================================================================================================ - -/// Describes a single entry in the op group table. An entry in the op group table is a tuple -/// (batch_id, group_pos, group_value). -#[derive(Debug, Copy, Clone, PartialEq, Eq)] -pub struct OpGroupTableRow { - batch_id: Felt, - group_pos: Felt, - group_value: Felt, -} - -impl OpGroupTableRow { - /// Returns a new [OpGroupTableRow] instantiated with the specified parameters. - pub fn new(batch_id: Felt, group_pos: Felt, group_value: Felt) -> Self { - Self { - batch_id, - group_pos, - group_value, - } - } -} - -impl LookupTableRow for OpGroupTableRow { - /// Reduces this row to a single field element in the field specified by E. This requires - /// at least 4 alpha values. - fn to_value>( - &self, - _main_trace: &ColMatrix, - alphas: &[E], - ) -> E { - alphas[0] - + alphas[1].mul_base(self.batch_id) - + alphas[2].mul_base(self.group_pos) - + alphas[3].mul_base(self.group_value) - } -} diff --git a/processor/src/decoder/auxiliary.rs b/processor/src/decoder/auxiliary.rs new file mode 100644 index 0000000000..910cc99701 --- /dev/null +++ b/processor/src/decoder/auxiliary.rs @@ -0,0 +1,552 @@ +use crate::trace::AuxColumnBuilder; + +use super::{Felt, StarkField, Vec, ONE, ZERO}; + +use miden_air::trace::{ + decoder::{OP_BATCH_2_GROUPS, OP_BATCH_4_GROUPS, OP_BATCH_8_GROUPS}, + main_trace::MainTrace, +}; + +use vm_core::{crypto::hash::RpoDigest, FieldElement, Operation}; + +// CONSTANTS +// ================================================================================================ + +const JOIN: u8 = Operation::Join.op_code(); +const SPLIT: u8 = Operation::Split.op_code(); +const LOOP: u8 = Operation::Loop.op_code(); +const REPEAT: u8 = Operation::Repeat.op_code(); +const DYN: u8 = Operation::Dyn.op_code(); +const CALL: u8 = Operation::Call.op_code(); +const SYSCALL: u8 = Operation::SysCall.op_code(); +const SPAN: u8 = Operation::Span.op_code(); +const RESPAN: u8 = Operation::Respan.op_code(); +const PUSH: u8 = Operation::Push(ZERO).op_code(); +const END: u8 = Operation::End.op_code(); +const HALT: u8 = Operation::Halt.op_code(); + +// AUXILIARY TRACE BUILDER +// ================================================================================================ + +/// Constructs the execution traces of stack-related auxiliary trace segment columns +/// (used in multiset checks). +#[derive(Default, Clone, Copy)] +pub struct AuxTraceBuilder {} + +impl AuxTraceBuilder { + /// Builds and returns decoder auxiliary trace columns p1, p2, and p3 describing states of block + /// stack, block hash, and op group tables respectively. + pub fn build_aux_columns>( + &self, + main_trace: &MainTrace, + rand_elements: &[E], + ) -> Vec> { + let block_stack_column_builder = BlockStackColumnBuilder::default(); + let block_hash_column_builder = BlockHashTableColumnBuilder::default(); + let op_group_table_column_builder = OpGroupTableColumnBuilder::default(); + + let p1 = block_stack_column_builder.build_aux_column(main_trace, rand_elements); + let p2 = block_hash_column_builder.build_aux_column(main_trace, rand_elements); + let p3 = op_group_table_column_builder.build_aux_column(main_trace, rand_elements); + + vec![p1, p2, p3] + } +} + +// BLOCK STACK TABLE COLUMN +// ================================================================================================ + +/// Builds the execution trace of the decoder's `p1` column which describes the state of the block +/// stack table via multiset checks. +#[derive(Default)] +pub struct BlockStackColumnBuilder {} + +impl> AuxColumnBuilder for BlockStackColumnBuilder { + fn get_requests_at(&self, main_trace: &MainTrace, alphas: &[E], i: usize) -> E { + block_stack_table_removals(main_trace, alphas, i) + } + + fn get_responses_at(&self, main_trace: &MainTrace, alphas: &[E], i: usize) -> E { + block_stack_table_inclusions(main_trace, alphas, i) + } +} + +/// Adds a row to the block stack table. +fn block_stack_table_inclusions(main_trace: &MainTrace, alphas: &[E], i: usize) -> E +where + E: FieldElement, +{ + let op_code_felt = main_trace.get_op_code(i); + let op_code = op_code_felt.as_int() as u8; + + match op_code { + JOIN | SPLIT | SPAN | DYN | LOOP | RESPAN | CALL | SYSCALL => { + get_block_stack_table_inclusion_multiplicand(main_trace, i, alphas, op_code) + } + _ => E::ONE, + } +} + +/// Removes a row from the block stack table. +fn block_stack_table_removals(main_trace: &MainTrace, alphas: &[E], i: usize) -> E +where + E: FieldElement, +{ + let op_code_felt = main_trace.get_op_code(i); + let op_code = op_code_felt.as_int() as u8; + + match op_code { + RESPAN => get_block_stack_table_removal_multiplicand(main_trace, i, true, alphas), + END => get_block_stack_table_removal_multiplicand(main_trace, i, false, alphas), + _ => E::ONE, + } +} + +// BLOCK HASH TABLE COLUMN +// ================================================================================================ + +/// Builds the execution trace of the decoder's `p2` column which describes the state of the block +/// hash table via multiset checks. +#[derive(Default)] +pub struct BlockHashTableColumnBuilder {} + +impl> AuxColumnBuilder for BlockHashTableColumnBuilder { + fn init_responses(&self, main_trace: &MainTrace, alphas: &[E]) -> E { + let row_index = (0..main_trace.num_rows()) + .find(|row| main_trace.get_op_code(*row) == Felt::from(HALT)) + .expect("execution trace must include at least one occurance of HALT"); + let program_hash = main_trace.decoder_hasher_state_first_half(row_index); + block_hash_table_initialize(&program_hash.into(), alphas) + } + + fn get_requests_at(&self, main_trace: &MainTrace, alphas: &[E], i: usize) -> E { + block_hash_table_removals(main_trace, alphas, i) + } + + fn get_responses_at(&self, main_trace: &MainTrace, alphas: &[E], i: usize) -> E { + block_hash_table_inclusions(main_trace, alphas, i) + } +} + +/// Adds a row to the block hash table. +fn block_hash_table_inclusions(main_trace: &MainTrace, alphas: &[E], i: usize) -> E +where + E: FieldElement, +{ + let op_code_felt = main_trace.get_op_code(i); + let op_code = op_code_felt.as_int() as u8; + + match op_code { + JOIN => get_block_hash_table_inclusion_multiplicand_join(main_trace, i, alphas), + SPLIT => get_block_hash_table_inclusion_multiplicand_split(main_trace, i, alphas), + LOOP => get_block_hash_table_inclusion_multiplicand_loop(main_trace, i, alphas), + REPEAT => get_block_hash_table_inclusion_multiplicand_repeat(main_trace, i, alphas), + DYN => get_block_hash_table_inclusion_multiplicand_dyn(main_trace, i, alphas), + _ => E::ONE, + } +} + +/// Removes a row from the block hash table. +fn block_hash_table_removals(main_trace: &MainTrace, alphas: &[E], i: usize) -> E +where + E: FieldElement, +{ + let op_code_felt = main_trace.get_op_code(i); + let op_code = op_code_felt.as_int() as u8; + + let op_code_felt_next = main_trace.get_op_code(i + 1); + let op_code_next = op_code_felt_next.as_int() as u8; + + match op_code { + END => get_block_hash_table_removal_multiplicand(main_trace, i, alphas, op_code_next), + _ => E::ONE, + } +} + +// OP GROUP TABLE COLUMN +// ================================================================================================ + +/// Builds the execution trace of the decoder's `p3` column which describes the state of the op +/// group table via multiset checks. +#[derive(Default)] +pub struct OpGroupTableColumnBuilder {} + +impl> AuxColumnBuilder for OpGroupTableColumnBuilder { + fn get_requests_at(&self, main_trace: &MainTrace, alphas: &[E], i: usize) -> E { + op_group_table_removals(main_trace, alphas, i) + } + + fn get_responses_at(&self, main_trace: &MainTrace, alphas: &[E], i: usize) -> E { + op_group_table_inclusions(main_trace, alphas, i) + } +} + +/// Adds a row to the block hash table. +fn op_group_table_inclusions(main_trace: &MainTrace, alphas: &[E], i: usize) -> E +where + E: FieldElement, +{ + let op_code_felt = main_trace.get_op_code(i); + let op_code = op_code_felt.as_int() as u8; + + match op_code { + SPAN | RESPAN => get_op_group_table_inclusion_multiplicand(main_trace, i, alphas), + _ => E::ONE, + } +} + +/// Removes a row from the block hash table. +fn op_group_table_removals(main_trace: &MainTrace, alphas: &[E], i: usize) -> E +where + E: FieldElement, +{ + let delete_group_flag = main_trace.delta_group_count(i) * main_trace.is_in_span(i); + + if delete_group_flag == ONE { + get_op_group_table_removal_multiplicand(main_trace, i, alphas) + } else { + E::ONE + } +} + +// HELPER FUNCTIONS +// ================================================================================================ + +/// Computes the multiplicand representing the inclusion of a new row to the block stack table. +pub fn get_block_stack_table_inclusion_multiplicand>( + main_trace: &MainTrace, + i: usize, + alphas: &[E], + op_code: u8, +) -> E { + let block_id = main_trace.addr(i + 1); + let parent_id = if op_code == RESPAN { + main_trace.decoder_hasher_state_element(1, i + 1) + } else { + main_trace.addr(i) + }; + let is_loop = if op_code == LOOP { + main_trace.stack_element(0, i) + } else { + ZERO + }; + let elements = if op_code == CALL || op_code == SYSCALL { + let parent_ctx = main_trace.ctx(i); + let parent_fmp = main_trace.fmp(i); + let parent_stack_depth = main_trace.stack_depth(i); + let parent_next_overflow_addr = main_trace.parent_overflow_address(i); + let parent_fn_hash = main_trace.decoder_hasher_state_first_half(i); + [ + ONE, + block_id, + parent_id, + is_loop, + parent_ctx, + parent_fmp, + parent_stack_depth, + parent_next_overflow_addr, + parent_fn_hash[0], + parent_fn_hash[1], + parent_fn_hash[2], + parent_fn_hash[3], + ] + } else { + let mut result = [ZERO; 12]; + result[0] = ONE; + result[1] = block_id; + result[2] = parent_id; + result[3] = is_loop; + result + }; + + let mut value = E::ZERO; + + for (&alpha, &element) in alphas.iter().zip(elements.iter()).skip(1) { + value += alpha.mul_base(element); + } + value +} + +/// Computes the multiplicand representing the removal of a row from the block stack table. +pub fn get_block_stack_table_removal_multiplicand>( + main_trace: &MainTrace, + i: usize, + is_respan: bool, + alphas: &[E], +) -> E { + let block_id = main_trace.addr(i); + let parent_id = if is_respan { + main_trace.decoder_hasher_state_element(1, i + 1) + } else { + main_trace.addr(i + 1) + }; + let is_loop = main_trace.is_loop_flag(i); + + let elements = if main_trace.is_call_flag(i) == ONE || main_trace.is_syscall_flag(i) == ONE { + let parent_ctx = main_trace.ctx(i + 1); + let parent_fmp = main_trace.fmp(i + 1); + let parent_stack_depth = main_trace.stack_depth(i + 1); + let parent_next_overflow_addr = main_trace.parent_overflow_address(i + 1); + let parent_fn_hash = main_trace.fn_hash(i); + + [ + ONE, + block_id, + parent_id, + is_loop, + parent_ctx, + parent_fmp, + parent_stack_depth, + parent_next_overflow_addr, + parent_fn_hash[0], + parent_fn_hash[1], + parent_fn_hash[2], + parent_fn_hash[0], + ] + } else { + let mut result = [ZERO; 12]; + result[0] = ONE; + result[1] = block_id; + result[2] = parent_id; + result[3] = is_loop; + result + }; + + let mut value = E::ZERO; + + for (&alpha, &element) in alphas.iter().zip(elements.iter()).skip(1) { + value += alpha.mul_base(element); + } + value +} + +/// Computes the intitialization value for the block hash table. +fn block_hash_table_initialize(program_hash: &RpoDigest, alphas: &[E]) -> E +where + E: FieldElement, +{ + alphas[0] + + alphas[2].mul_base(program_hash[0]) + + alphas[3].mul_base(program_hash[1]) + + alphas[4].mul_base(program_hash[2]) + + alphas[5].mul_base(program_hash[3]) +} + +/// Computes the multiplicand representing the inclusion of a new row representing a JOIN block +/// to the block hash table. +fn get_block_hash_table_inclusion_multiplicand_join>( + main_trace: &MainTrace, + i: usize, + alphas: &[E], +) -> E { + let a_prime = main_trace.addr(i + 1); + let state = main_trace.decoder_hasher_state(i); + let ch1 = alphas[1].mul_base(a_prime) + + alphas[2].mul_base(state[0]) + + alphas[3].mul_base(state[1]) + + alphas[4].mul_base(state[2]) + + alphas[5].mul_base(state[3]); + let ch2 = alphas[1].mul_base(a_prime) + + alphas[2].mul_base(state[4]) + + alphas[3].mul_base(state[5]) + + alphas[4].mul_base(state[6]) + + alphas[5].mul_base(state[7]); + + let difference = (alphas[0] * alphas[0]) + (alphas[0] * (ch1 + alphas[6])) + (alphas[0] * ch2); + (ch1 + alphas[6]) * ch2 + difference - alphas[0] +} + +/// Computes the multiplicand representing the inclusion of a new row representing a SPLIT block +/// to the block hash table. +fn get_block_hash_table_inclusion_multiplicand_split>( + main_trace: &MainTrace, + i: usize, + alphas: &[E], +) -> E { + let s0 = main_trace.stack_element(0, i); + let a_prime = main_trace.addr(i + 1); + let state = main_trace.decoder_hasher_state(i); + + if s0 == ONE { + alphas[1].mul_base(a_prime) + + alphas[2].mul_base(state[0]) + + alphas[3].mul_base(state[1]) + + alphas[4].mul_base(state[2]) + + alphas[5].mul_base(state[3]) + } else { + alphas[1].mul_base(a_prime) + + alphas[2].mul_base(state[4]) + + alphas[3].mul_base(state[5]) + + alphas[4].mul_base(state[6]) + + alphas[5].mul_base(state[7]) + } +} + +/// Computes the multiplicand representing the inclusion of a new row representing a LOOP block +/// to the block hash table. +fn get_block_hash_table_inclusion_multiplicand_loop>( + main_trace: &MainTrace, + i: usize, + alphas: &[E], +) -> E { + let s0 = main_trace.stack_element(0, i); + + if s0 == ONE { + let a_prime = main_trace.addr(i + 1); + let state = main_trace.decoder_hasher_state(i); + alphas[1].mul_base(a_prime) + + alphas[2].mul_base(state[0]) + + alphas[3].mul_base(state[1]) + + alphas[4].mul_base(state[2]) + + alphas[5].mul_base(state[3]) + + alphas[7] + } else { + E::ONE + } +} + +/// Computes the multiplicand representing the inclusion of a new row representing a REPEAT +/// to the block hash table. +fn get_block_hash_table_inclusion_multiplicand_repeat>( + main_trace: &MainTrace, + i: usize, + alphas: &[E], +) -> E { + let a_prime = main_trace.addr(i + 1); + let state = main_trace.decoder_hasher_state_first_half(i); + + alphas[1].mul_base(a_prime) + + alphas[2].mul_base(state[0]) + + alphas[3].mul_base(state[1]) + + alphas[4].mul_base(state[2]) + + alphas[5].mul_base(state[3]) + + alphas[7] +} + +/// Computes the multiplicand representing the inclusion of a new row representing a DYN block +/// to the block hash table. +fn get_block_hash_table_inclusion_multiplicand_dyn>( + main_trace: &MainTrace, + i: usize, + alphas: &[E], +) -> E { + let a_prime = main_trace.addr(i + 1); + let s0 = main_trace.stack_element(0, i); + let s1 = main_trace.stack_element(1, i); + let s2 = main_trace.stack_element(2, i); + let s3 = main_trace.stack_element(3, i); + + alphas[1].mul_base(a_prime) + + alphas[2].mul_base(s3) + + alphas[3].mul_base(s2) + + alphas[4].mul_base(s1) + + alphas[5].mul_base(s0) +} + +/// Computes the multiplicand representing the removal of a row from the block hash table. +fn get_block_hash_table_removal_multiplicand>( + main_trace: &MainTrace, + i: usize, + alphas: &[E], + op_code_next: u8, +) -> E { + let a = main_trace.addr(i + 1); + let digest = main_trace.decoder_hasher_state_first_half(i); + let is_loop_body = main_trace.is_loop_body_flag(i); + let next_end_or_repeat = + if op_code_next == END || op_code_next == REPEAT || op_code_next == HALT { + E::ZERO + } else { + alphas[6] + }; + + alphas[1].mul_base(a) + + alphas[2].mul_base(digest[0]) + + alphas[3].mul_base(digest[1]) + + alphas[4].mul_base(digest[2]) + + alphas[5].mul_base(digest[3]) + + alphas[7].mul_base(is_loop_body) + + next_end_or_repeat +} + +// TODO: move this to the tests +fn get_difference(alpha: E, rest: Vec) -> E { + // computes + // (alpha + rest_0) * (alpha + rest_1) * ... * (alpha + rest_n) - (rest_1 * rest_2 * ... * rest_n) + let sum_product = rest.iter().fold(E::ONE, |acc, rest_i| acc * (alpha + *rest_i)); + let rest_product = rest.iter().fold(E::ONE, |acc, rest_i| *rest_i * acc); + sum_product - rest_product +} + +/// Computes the multiplicand representing the inclusion of a new row to the op group table. +pub fn get_op_group_table_inclusion_multiplicand>( + main_trace: &MainTrace, + i: usize, + alphas: &[E], +) -> E { + let block_id = main_trace.addr(i + 1); + let group_count = main_trace.group_count(i); + let op_batch_flag = main_trace.op_batch_flag(i); + + if op_batch_flag == OP_BATCH_8_GROUPS { + let h = main_trace.decoder_hasher_state(i); + let actual = (1..8_usize).fold(E::ONE, |acc, k| { + acc * (alphas[1].mul_base(block_id) + + alphas[2].mul_base(group_count - Felt::from(k as u64)) + + alphas[3].mul_base(h[k])) + }); + let rest = (1..8_usize) + .map(|k| { + alphas[1].mul_base(block_id) + + alphas[2].mul_base(group_count - Felt::from(k as u64)) + + alphas[3].mul_base(h[k]) + }) + .collect::>(); + actual + get_difference(alphas[0], rest) - alphas[0] + } else if op_batch_flag == OP_BATCH_4_GROUPS { + let h = main_trace.decoder_hasher_state_first_half(i); + let actual = (1..4_usize).fold(E::ONE, |acc, k| { + acc * (alphas[1].mul_base(block_id) + + alphas[2].mul_base(group_count - Felt::from(k as u64)) + + alphas[3].mul_base(h[k])) + }); + let rest = (1..4_usize) + .map(|k| { + alphas[1].mul_base(block_id) + + alphas[2].mul_base(group_count - Felt::from(k as u64)) + + alphas[3].mul_base(h[k]) + }) + .collect::>(); + actual + get_difference(alphas[0], rest) - alphas[0] + } else if op_batch_flag == OP_BATCH_2_GROUPS { + let h = main_trace.decoder_hasher_state_first_half(i); + alphas[1].mul_base(block_id) + + alphas[2].mul_base(group_count - ONE) + + alphas[3].mul_base(h[1]) + } else { + E::ONE + } +} + +/// Computes the multiplicand representing the removal of a row from the op group table. +pub fn get_op_group_table_removal_multiplicand>( + main_trace: &MainTrace, + i: usize, + alphas: &[E], +) -> E { + let group_count = main_trace.group_count(i); + let block_id = main_trace.addr(i); + + let op_code = main_trace.get_op_code(i); + let tmp = if op_code == Felt::from(PUSH) { + main_trace.stack_element(0, i + 1) + } else { + let h0 = main_trace.decoder_hasher_state_first_half(i + 1)[0]; + + let op_prime = main_trace.get_op_code(i + 1); + h0.mul_small(1 << 7) + op_prime + }; + alphas[1].mul_base(block_id) + + alphas[2].mul_base(group_count) + + alphas[3].mul_base(tmp) +} diff --git a/processor/src/decoder/block_stack.rs b/processor/src/decoder/block_stack.rs index 4fdc1e995a..8d9bc0e294 100644 --- a/processor/src/decoder/block_stack.rs +++ b/processor/src/decoder/block_stack.rs @@ -139,20 +139,6 @@ impl BlockInfo { _ => ZERO, } } - - /// Returns the number of children a block has. This is an integer between 0 and 2 (both - /// inclusive). - pub fn num_children(&self) -> u32 { - match self.block_type { - BlockType::Join(_) => 2, - BlockType::Split => 1, - BlockType::Loop(is_entered) => u32::from(is_entered), - BlockType::Call => 1, - BlockType::Dyn => 1, - BlockType::SysCall => 1, - BlockType::Span => 0, - } - } } // EXECUTION CONTEXT INFO diff --git a/processor/src/decoder/mod.rs b/processor/src/decoder/mod.rs index ea08cb620e..a626632339 100644 --- a/processor/src/decoder/mod.rs +++ b/processor/src/decoder/mod.rs @@ -1,7 +1,6 @@ use super::{ - Call, ColMatrix, Dyn, ExecutionError, Felt, FieldElement, Host, Join, Loop, OpBatch, Operation, - Process, Span, Split, StarkField, Vec, Word, EMPTY_WORD, MIN_TRACE_LEN, ONE, OP_BATCH_SIZE, - ZERO, + Call, Dyn, ExecutionError, Felt, Host, Join, Loop, OpBatch, Operation, Process, Span, Split, + StarkField, Vec, Word, EMPTY_WORD, MIN_TRACE_LEN, ONE, OP_BATCH_SIZE, ZERO, }; use miden_air::trace::{ chiplets::hasher::DIGEST_LEN, @@ -15,16 +14,14 @@ use vm_core::{code_blocks::get_span_op_group_count, stack::STACK_TOP_SIZE, Assem mod trace; use trace::DecoderTrace; -#[cfg(test)] -use miden_air::trace::decoder::NUM_USER_OP_HELPERS; +mod auxiliary; +pub use auxiliary::AuxTraceBuilder; mod block_stack; -use block_stack::{BlockInfo, BlockStack, BlockType, ExecutionContextInfo}; +use block_stack::{BlockStack, BlockType, ExecutionContextInfo}; -mod aux_hints; -pub use aux_hints::{AuxTraceHints, BlockTableUpdate, OpGroupTableUpdate}; #[cfg(test)] -pub(crate) use aux_hints::{BlockHashTableRow, BlockStackTableRow, OpGroupTableRow}; +use miden_air::trace::decoder::NUM_USER_OP_HELPERS; #[cfg(test)] mod tests; @@ -67,9 +64,6 @@ where // executed the rest of the VM state does not change self.decoder.end_control_block(block.hash().into()); - // send the end of control block to the chiplets bus to handle the final hash request. - self.chiplets.read_hash_result(); - self.execute_op(Operation::Noop) } @@ -92,7 +86,7 @@ where // start decoding the SPLIT block. this appends a row with SPLIT operation to the decoder // trace. we also pop the value off the top of the stack and return it. - self.decoder.start_split(child1_hash, child2_hash, addr, condition); + self.decoder.start_split(child1_hash, child2_hash, addr); self.execute_op(Operation::Drop)?; Ok(condition) } @@ -103,9 +97,6 @@ where // executed the rest of the VM state does not change self.decoder.end_control_block(block.hash().into()); - // send the end of control block to the chiplets bus to handle the final hash request. - self.chiplets.read_hash_result(); - self.execute_op(Operation::Noop) } @@ -146,9 +137,6 @@ where // this appends a row with END operation to the decoder trace. self.decoder.end_control_block(block.hash().into()); - // send the end of control block to the chiplets bus to handle the final hash request. - self.chiplets.read_hash_result(); - // if we are exiting a loop, we also need to pop the top value off the stack (and this // value must be ZERO - otherwise, we should have stayed in the loop). but, if we never // entered the loop in the first place, the stack would have been popped when the LOOP @@ -221,9 +209,6 @@ where .end_control_block(block.hash().into()) .expect("no execution context"); - // send the end of control block to the chiplets bus to handle the final hash request. - self.chiplets.read_hash_result(); - // when returning from a function call or a syscall, restore the context of the system // registers and the operand stack to what it was prior to the call. self.system.restore_context( @@ -263,9 +248,6 @@ where // executed the rest of the VM state does not change self.decoder.end_control_block(block.hash().into()); - // send the end of control block to the chiplets bus to handle the final hash request. - self.chiplets.read_hash_result(); - self.execute_op(Operation::Noop) } @@ -292,9 +274,6 @@ where /// Continues decoding a SPAN block by absorbing the next batch of operations. pub(super) fn respan(&mut self, op_batch: &OpBatch) { self.decoder.respan(op_batch); - - // send a request to the chiplets to continue the hash and absorb new elements. - self.chiplets.absorb_span_batch(); } /// Ends decoding a SPAN block. @@ -303,9 +282,6 @@ where // executed the rest of the VM state does not change self.decoder.end_span(block.hash().into()); - // send the end of control block to the chiplets bus to handle the final hash request. - self.chiplets.read_hash_result(); - self.execute_op(Operation::Noop) } } @@ -355,7 +331,6 @@ where /// - `be1` is set when the two most significant op bits are ONE. /// /// In addition to the execution trace, the decoder also contains the following: -/// - A set of hints used in construction of decoder-related columns in auxiliary trace segment. /// - An instance of [DebugInfo] which is only populated in debug mode. This debug_info instance /// includes operations executed by the VM and AsmOp decorators. AsmOp decorators are populated /// only when both the processor and assembler are in debug mode. @@ -363,7 +338,6 @@ pub struct Decoder { block_stack: BlockStack, span_context: Option, trace: DecoderTrace, - aux_hints: AuxTraceHints, debug_info: DebugInfo, } @@ -376,7 +350,6 @@ impl Decoder { block_stack: BlockStack::default(), span_context: None, trace: DecoderTrace::new(), - aux_hints: AuxTraceHints::new(), debug_info: DebugInfo::new(in_debug_mode), } } @@ -415,24 +388,11 @@ impl Decoder { /// This pushes a block with ID=addr onto the block stack and appends execution of a JOIN /// operation to the trace. pub fn start_join(&mut self, child1_hash: Word, child2_hash: Word, addr: Felt) { - // get the current clock cycle here (before the trace table is updated) - let clk = self.trace_len() as u32; - // append a JOIN row to the execution trace let parent_addr = self.block_stack.push(addr, BlockType::Join(false), None); self.trace .append_block_start(parent_addr, Operation::Join, child1_hash, child2_hash); - // mark this cycle as the cycle at which a new JOIN block began execution (this affects - // block stack and block hash tables). Both children of the JOIN block are expected to - // be executed, and thus we record both of their hashes. - self.aux_hints.block_started( - clk, - self.block_stack.peek(), - Some(child1_hash), - Some(child2_hash), - ); - self.debug_info.append_operation(Operation::Join); } @@ -440,28 +400,12 @@ impl Decoder { /// /// This pushes a block with ID=addr onto the block stack and appends execution of a SPLIT /// operation to the trace. - pub fn start_split( - &mut self, - child1_hash: Word, - child2_hash: Word, - addr: Felt, - stack_top: Felt, - ) { - // get the current clock cycle here (before the trace table is updated) - let clk = self.trace_len() as u32; - + pub fn start_split(&mut self, child1_hash: Word, child2_hash: Word, addr: Felt) { // append a SPLIT row to the execution trace let parent_addr = self.block_stack.push(addr, BlockType::Split, None); self.trace .append_block_start(parent_addr, Operation::Split, child1_hash, child2_hash); - // mark this cycle as the cycle at which a SPLIT block began execution (this affects block - // stack and block hash tables). Only one child of the SPLIT block is expected to be - // executed, and thus, we record the hash only for that child. - let taken_branch_hash = if stack_top == ONE { child1_hash } else { child2_hash }; - self.aux_hints - .block_started(clk, self.block_stack.peek(), Some(taken_branch_hash), None); - self.debug_info.append_operation(Operation::Split); } @@ -470,22 +414,12 @@ impl Decoder { /// This pushes a block with ID=addr onto the block stack and appends execution of a LOOP /// operation to the trace. A block is marked as a loop block only if is_loop = ONE. pub fn start_loop(&mut self, loop_body_hash: Word, addr: Felt, stack_top: Felt) { - // get the current clock cycle here (before the trace table is updated) - let clk = self.trace_len() as u32; - // append a LOOP row to the execution trace let enter_loop = stack_top == ONE; let parent_addr = self.block_stack.push(addr, BlockType::Loop(enter_loop), None); self.trace .append_block_start(parent_addr, Operation::Loop, loop_body_hash, EMPTY_WORD); - // mark this cycle as the cycle at which a new LOOP block has started (this may affect - // block hash table). A loop block has a single child only if the body of the loop is - // executed at least once. - let executed_loop_body = if enter_loop { Some(loop_body_hash) } else { None }; - self.aux_hints - .block_started(clk, self.block_stack.peek(), executed_loop_body, None); - self.debug_info.append_operation(Operation::Loop); } @@ -493,18 +427,11 @@ impl Decoder { /// /// This appends an execution of a REPEAT operation to the trace. pub fn repeat(&mut self) { - // get the current clock cycle here (before the trace table is updated) - let clk = self.trace_len() as u32; - // append a REPEAT row to the execution trace let block_info = self.block_stack.peek(); debug_assert_eq!(ONE, block_info.is_entered_loop()); self.trace.append_loop_repeat(block_info.addr); - // mark this cycle as the cycle at which a new iteration of a loop started (this affects - // block hash table) - self.aux_hints.loop_repeat_started(clk); - self.debug_info.append_operation(Operation::Repeat); } @@ -513,17 +440,10 @@ impl Decoder { /// This pushes a block with ID=addr onto the block stack and appends execution of a CALL /// operation to the trace. pub fn start_call(&mut self, fn_hash: Word, addr: Felt, ctx_info: ExecutionContextInfo) { - // get the current clock cycle here (before the trace table is updated) - let clk = self.trace_len() as u32; - // push CALL block info onto the block stack and append a CALL row to the execution trace let parent_addr = self.block_stack.push(addr, BlockType::Call, Some(ctx_info)); self.trace.append_block_start(parent_addr, Operation::Call, fn_hash, EMPTY_WORD); - // mark this cycle as the cycle at which a new CALL block began execution (this affects - // block stack and block hash tables). A CALL block has only a single child. - self.aux_hints.block_started(clk, self.block_stack.peek(), Some(fn_hash), None); - self.debug_info.append_operation(Operation::Call); } @@ -532,19 +452,12 @@ impl Decoder { /// This pushes a block with ID=addr onto the block stack and appends execution of a SYSCALL /// operation to the trace. pub fn start_syscall(&mut self, fn_hash: Word, addr: Felt, ctx_info: ExecutionContextInfo) { - // get the current clock cycle here (before the trace table is updated) - let clk = self.trace_len() as u32; - // push SYSCALL block info onto the block stack and append a SYSCALL row to the execution // trace let parent_addr = self.block_stack.push(addr, BlockType::SysCall, Some(ctx_info)); self.trace .append_block_start(parent_addr, Operation::SysCall, fn_hash, EMPTY_WORD); - // mark this cycle as the cycle at which a new SYSCALL block began execution (this affects - // block stack and block hash tables). A SYSCALL block has only a single child. - self.aux_hints.block_started(clk, self.block_stack.peek(), Some(fn_hash), None); - self.debug_info.append_operation(Operation::SysCall); } @@ -553,18 +466,10 @@ impl Decoder { /// This pushes a block with ID=addr onto the block stack and appends execution of a DYN /// operation to the trace. pub fn start_dyn(&mut self, dyn_hash: Word, addr: Felt) { - // get the current clock cycle here (before the trace table is updated) - let clk = self.trace_len() as u32; - // push DYN block info onto the block stack and append a DYN row to the execution trace let parent_addr = self.block_stack.push(addr, BlockType::Dyn, None); self.trace.append_block_start(parent_addr, Operation::Dyn, dyn_hash, [ZERO; 4]); - // mark this cycle as the cycle at which a new DYN block began execution (this affects - // block stack and block hash tables). A DYN block has no children but points to the hash - // provided on the stack. - self.aux_hints.block_started(clk, self.block_stack.peek(), Some(dyn_hash), None); - self.debug_info.append_operation(Operation::Dyn); } @@ -577,9 +482,6 @@ impl Decoder { /// execution context and free memory pointers were set before the CALL block started /// executing. For non-CALL blocks these values are set to zeros and should be ignored. pub fn end_control_block(&mut self, block_hash: Word) -> Option { - // get the current clock cycle here (before the trace table is updated) - let clk = self.trace_len() as u32; - // remove the block from the top of the block stack and add an END row to the trace let block_info = self.block_stack.pop(); self.trace.append_block_end( @@ -591,9 +493,6 @@ impl Decoder { block_info.is_syscall(), ); - // mark this cycle as the cycle at which block execution has ended - self.aux_hints.block_ended(clk, block_info.is_first_child); - self.debug_info.append_operation(Operation::End); block_info.ctx_info @@ -607,9 +506,6 @@ impl Decoder { debug_assert!(self.span_context.is_none(), "already in span"); let parent_addr = self.block_stack.push(addr, BlockType::Span, None); - // get the current clock cycle here (before the trace table is updated) - let clk = self.trace_len() as u32; - // add a SPAN row to the trace self.trace .append_span_start(parent_addr, first_op_batch.groups(), num_op_groups); @@ -621,22 +517,12 @@ impl Decoder { group_ops_left: first_op_batch.groups()[0], }); - // mark the current cycle as a cycle at which an operation batch may have been inserted - // into the op_group table - self.aux_hints.insert_op_batch(clk, num_op_groups); - - // mark the current cycle as the cycle at which a SPAN block has started; SPAN block has - // no children - self.aux_hints.block_started(clk, self.block_stack.peek(), None, None); - self.debug_info.append_operation(Operation::Span); } /// Starts decoding of the next operation batch in the current SPAN. pub fn respan(&mut self, op_batch: &OpBatch) { // get the current clock cycle here (before the trace table is updated) - let clk = self.trace_len() as u32; - // add RESPAN row to the trace self.trace.append_respan(op_batch.groups()); @@ -647,14 +533,6 @@ impl Decoder { let ctx = self.span_context.as_mut().expect("not in span"); - // mark the current cycle as a cycle at which an operation batch may have been inserted - // into the op_group table - self.aux_hints.insert_op_batch(clk, ctx.num_groups_left); - - // mark the current cycle as a cycle at which the ID of the span block was changed (this - // causes an update in the block stack table) - self.aux_hints.span_extended(clk, block_info); - // after RESPAN operation is executed, we decrement the number of remaining groups by ONE // because executing RESPAN consumes the first group of the batch ctx.num_groups_left -= ONE; @@ -665,16 +543,8 @@ impl Decoder { /// Starts decoding a new operation group. pub fn start_op_group(&mut self, op_group: Felt) { - let clk = self.trace_len() as u32; let ctx = self.span_context.as_mut().expect("not in span"); - // mark the cycle of the last operation as a cycle at which an operation group was - // removed from the op_group table. decoding of the removed operation will begin - // at the current cycle. - let group_pos = ctx.num_groups_left; - let batch_id = self.block_stack.peek().addr; - self.aux_hints.remove_op_group(clk - 1, batch_id, group_pos, op_group); - // reset the current group value and decrement the number of left groups by ONE debug_assert_eq!(ZERO, ctx.group_ops_left, "not all ops executed in current group"); ctx.group_ops_left = op_group; @@ -683,9 +553,6 @@ impl Decoder { /// Decodes a user operation (i.e., not a control flow operation). pub fn execute_user_op(&mut self, op: Operation, op_idx: usize) { - // get the current clock cycle here (before the trace table is updated) - let clk = self.trace_len() as u32; - let block = self.block_stack.peek(); let ctx = self.span_context.as_mut().expect("not in span"); @@ -706,10 +573,7 @@ impl Decoder { // groups left to decode. this number will be inserted into the trace in the next row. // we also mark the current clock cycle as a cycle at which the immediate value was // removed from the op_group table. - if let Some(imm_value) = op.imm_value() { - let group_pos = ctx.num_groups_left; - self.aux_hints.remove_op_group(clk, block.addr, group_pos, imm_value); - + if op.imm_value().is_some() { ctx.num_groups_left -= ONE; } @@ -728,43 +592,30 @@ impl Decoder { /// Ends decoding of a SPAN block. pub fn end_span(&mut self, block_hash: Word) { - // get the current clock cycle here (before the trace table is updated) - let clk = self.trace_len() as u32; - // remove the block from the stack of executing blocks and add an END row to the // execution trace let block_info = self.block_stack.pop(); self.trace.append_span_end(block_hash, block_info.is_loop_body()); self.span_context = None; - // mark this cycle as the cycle at which block execution has ended - self.aux_hints.block_ended(clk, block_info.is_first_child); - self.debug_info.append_operation(Operation::End); } // TRACE GENERATIONS // -------------------------------------------------------------------------------------------- - /// Returns an array of columns containing an execution trace of this decoder together with - /// hints to be used in construction of decoder-related auxiliary trace segment columns. + /// Returns an array of columns containing an execution trace of this decoder. /// /// Trace columns are extended to match the specified trace length. - pub fn into_trace(mut self, trace_len: usize, num_rand_rows: usize) -> super::DecoderTrace { - // once we know the hash of the program, we update the auxiliary trace hints so that the - // block hash table could be initialized properly - self.aux_hints.set_program_hash(self.program_hash()); - + pub fn into_trace(self, trace_len: usize, num_rand_rows: usize) -> super::DecoderTrace { let trace = self .trace .into_vec(trace_len, num_rand_rows) .try_into() .expect("failed to convert vector to array"); + let aux_builder = AuxTraceBuilder::default(); - super::DecoderTrace { - trace, - aux_trace_hints: self.aux_hints, - } + super::DecoderTrace { trace, aux_builder } } // HELPERS diff --git a/processor/src/decoder/tests.rs b/processor/src/decoder/tests.rs index f8b56460c4..4772bfde2e 100644 --- a/processor/src/decoder/tests.rs +++ b/processor/src/decoder/tests.rs @@ -1,12 +1,10 @@ use super::{ super::{ - utils::get_trace_len, ExecutionOptions, ExecutionTrace, Felt, Kernel, Operation, Process, - StackInputs, Word, + ExecutionOptions, ExecutionTrace, Felt, Kernel, Operation, Process, StackInputs, Word, }, - build_op_group, AuxTraceHints, BlockHashTableRow, BlockStackTableRow, BlockTableUpdate, - ExecutionContextInfo, OpGroupTableRow, OpGroupTableUpdate, + build_op_group, }; -use crate::{ContextId, DefaultHost}; +use crate::DefaultHost; use miden_air::trace::{ decoder::{ ADDR_COL_IDX, GROUP_COUNT_COL_IDX, HASHER_STATE_RANGE, IN_SPAN_COL_IDX, NUM_HASHER_COLUMNS, @@ -50,7 +48,7 @@ fn span_block_one_group() { let span = Span::new(ops.clone()); let program = CodeBlock::new_span(ops.clone()); - let (trace, aux_hints, trace_len) = build_trace(&[], &program); + let (trace, trace_len) = build_trace(&[], &program); // --- check block address, op_bits, group count, op_index, and in_span columns --------------- check_op_decoding(&trace, 0, ZERO, Operation::Span, 1, 0, 0); @@ -80,24 +78,6 @@ fn span_block_one_group() { assert_eq!(ONE, trace[OP_BITS_EXTRA_COLS_RANGE.start + 1][i]); assert_eq!(program_hash, get_hasher_state1(&trace, i)); } - - // --- check op_group table hints ------------------------------------------------------------- - // op_group table should not have been touched - assert!(&aux_hints.op_group_table_hints().is_empty()); - assert!(aux_hints.op_group_table_rows().is_empty()); - - // --- check block execution hints ------------------------------------------------------------ - let expected_hints = - vec![(0, BlockTableUpdate::BlockStarted(0)), (4, BlockTableUpdate::BlockEnded(false))]; - assert_eq!(expected_hints, aux_hints.block_exec_hints()); - - // --- check block stack table hints ---------------------------------------------------------- - let expected_rows = vec![BlockStackTableRow::new_test(INIT_ADDR, ZERO, false)]; - assert_eq!(expected_rows, aux_hints.block_stack_table_rows()); - - // --- check block hash table hints ---------------------------------------------------------- - let expected_rows = vec![BlockHashTableRow::from_program_hash(program_hash)]; - assert_eq!(expected_rows, aux_hints.block_hash_table_rows()); } #[test] @@ -107,7 +87,7 @@ fn span_block_small() { let span = Span::new(ops.clone()); let program = CodeBlock::new_span(ops.clone()); - let (trace, aux_hints, trace_len) = build_trace(&[], &program); + let (trace, trace_len) = build_trace(&[], &program); // --- check block address, op_bits, group count, op_index, and in_span columns --------------- check_op_decoding(&trace, 0, ZERO, Operation::Span, 4, 0, 0); @@ -141,38 +121,6 @@ fn span_block_small() { assert_eq!(ONE, trace[OP_BITS_EXTRA_COLS_RANGE.start + 1][i]); assert_eq!(program_hash, get_hasher_state1(&trace, i)); } - - // --- check op_group table hints ------------------------------------------------------------- - - // 3 op groups should be inserted at cycle 0, and removed one by one in subsequent cycles - let expected_ogt_hints = vec![ - (0, OpGroupTableUpdate::InsertRows(3)), - (1, OpGroupTableUpdate::RemoveRow), - (2, OpGroupTableUpdate::RemoveRow), - (3, OpGroupTableUpdate::RemoveRow), - ]; - assert_eq!(&expected_ogt_hints, aux_hints.op_group_table_hints()); - - // the groups are imm(1), imm(2), and op group with a single NOOP - let expected_ogt_rows = vec![ - OpGroupTableRow::new(INIT_ADDR, Felt::new(3), iv[0]), - OpGroupTableRow::new(INIT_ADDR, TWO, iv[1]), - OpGroupTableRow::new(INIT_ADDR, ONE, ZERO), - ]; - assert_eq!(expected_ogt_rows, aux_hints.op_group_table_rows()); - - // --- check block execution hints ------------------------------------------------------------ - let expected_hints = - vec![(0, BlockTableUpdate::BlockStarted(0)), (5, BlockTableUpdate::BlockEnded(false))]; - assert_eq!(expected_hints, aux_hints.block_exec_hints()); - - // --- check block stack table hints ---------------------------------------------------------- - let expected_rows = vec![BlockStackTableRow::new_test(INIT_ADDR, ZERO, false)]; - assert_eq!(expected_rows, aux_hints.block_stack_table_rows()); - - // --- check block hash table hints ---------------------------------------------------------- - let expected_rows = vec![BlockHashTableRow::from_program_hash(program_hash)]; - assert_eq!(expected_rows, aux_hints.block_hash_table_rows()); } #[test] @@ -194,7 +142,7 @@ fn span_block() { ]; let span = Span::new(ops.clone()); let program = CodeBlock::new_span(ops.clone()); - let (trace, aux_hints, trace_len) = build_trace(&[], &program); + let (trace, trace_len) = build_trace(&[], &program); // --- check block address, op_bits, group count, op_index, and in_span columns --------------- check_op_decoding(&trace, 0, ZERO, Operation::Span, 8, 0, 0); @@ -249,47 +197,6 @@ fn span_block() { assert_eq!(ONE, trace[OP_BITS_EXTRA_COLS_RANGE.start + 1][i]); assert_eq!(program_hash, get_hasher_state1(&trace, i)); } - - // --- check op_group table hints ------------------------------------------------------------- - - let expected_ogt_hints = vec![ - (0, OpGroupTableUpdate::InsertRows(7)), - (1, OpGroupTableUpdate::RemoveRow), - (2, OpGroupTableUpdate::RemoveRow), - (3, OpGroupTableUpdate::RemoveRow), - (8, OpGroupTableUpdate::RemoveRow), - (9, OpGroupTableUpdate::RemoveRow), - (10, OpGroupTableUpdate::RemoveRow), - (13, OpGroupTableUpdate::RemoveRow), - ]; - assert_eq!(&expected_ogt_hints, aux_hints.op_group_table_hints()); - - let batch0_groups = &span.op_batches()[0].groups(); - let expected_ogt_rows = vec![ - OpGroupTableRow::new(INIT_ADDR, Felt::new(7), batch0_groups[1]), - OpGroupTableRow::new(INIT_ADDR, Felt::new(6), batch0_groups[2]), - OpGroupTableRow::new(INIT_ADDR, Felt::new(5), batch0_groups[3]), - OpGroupTableRow::new(INIT_ADDR, Felt::new(4), batch0_groups[4]), - OpGroupTableRow::new(INIT_ADDR, Felt::new(3), batch0_groups[5]), - OpGroupTableRow::new(INIT_ADDR, TWO, batch0_groups[6]), - OpGroupTableRow::new(INIT_ADDR, ONE, batch0_groups[7]), - ]; - assert_eq!(expected_ogt_rows, aux_hints.op_group_table_rows()); - - // --- check block execution hints ------------------------------------------------------------ - let expected_hints = vec![ - (0, BlockTableUpdate::BlockStarted(0)), - (15, BlockTableUpdate::BlockEnded(false)), - ]; - assert_eq!(expected_hints, aux_hints.block_exec_hints()); - - // --- check block stack table hints ---------------------------------------------------------- - let expected_rows = vec![BlockStackTableRow::new_test(INIT_ADDR, ZERO, false)]; - assert_eq!(expected_rows, aux_hints.block_stack_table_rows()); - - // --- check block hash table hints ---------------------------------------------------------- - let expected_rows = vec![BlockHashTableRow::from_program_hash(program_hash)]; - assert_eq!(expected_rows, aux_hints.block_hash_table_rows()); } #[test] @@ -320,7 +227,7 @@ fn span_block_with_respan() { ]; let span = Span::new(ops.clone()); let program = CodeBlock::new_span(ops.clone()); - let (trace, aux_hints, trace_len) = build_trace(&[], &program); + let (trace, trace_len) = build_trace(&[], &program); // --- check block address, op_bits, group count, op_index, and in_span columns --------------- check_op_decoding(&trace, 0, ZERO, Operation::Span, 12, 0, 0); @@ -377,60 +284,6 @@ fn span_block_with_respan() { assert_eq!(ONE, trace[OP_BITS_EXTRA_COLS_RANGE.start + 1][i]); assert_eq!(program_hash, get_hasher_state1(&trace, i)); } - - // --- check op_group table hints ------------------------------------------------------------- - - let expected_ogt_hints = vec![ - (0, OpGroupTableUpdate::InsertRows(7)), - (1, OpGroupTableUpdate::RemoveRow), - (2, OpGroupTableUpdate::RemoveRow), - (3, OpGroupTableUpdate::RemoveRow), - (4, OpGroupTableUpdate::RemoveRow), - (5, OpGroupTableUpdate::RemoveRow), - (6, OpGroupTableUpdate::RemoveRow), - (7, OpGroupTableUpdate::RemoveRow), - (9, OpGroupTableUpdate::InsertRows(3)), - (10, OpGroupTableUpdate::RemoveRow), - (12, OpGroupTableUpdate::RemoveRow), - (13, OpGroupTableUpdate::RemoveRow), - ]; - assert_eq!(&expected_ogt_hints, aux_hints.op_group_table_hints()); - - let batch0_groups = &span.op_batches()[0].groups(); - let batch1_groups = &span.op_batches()[1].groups(); - let expected_ogt_rows = vec![ - OpGroupTableRow::new(INIT_ADDR, Felt::new(11), batch0_groups[1]), - OpGroupTableRow::new(INIT_ADDR, Felt::new(10), batch0_groups[2]), - OpGroupTableRow::new(INIT_ADDR, Felt::new(9), batch0_groups[3]), - OpGroupTableRow::new(INIT_ADDR, EIGHT, batch0_groups[4]), - OpGroupTableRow::new(INIT_ADDR, Felt::new(7), batch0_groups[5]), - OpGroupTableRow::new(INIT_ADDR, Felt::new(6), batch0_groups[6]), - OpGroupTableRow::new(INIT_ADDR, Felt::new(5), batch0_groups[7]), - // skipping the first group of batch 1 - OpGroupTableRow::new(batch1_addr, Felt::new(3), batch1_groups[1]), - OpGroupTableRow::new(batch1_addr, TWO, batch1_groups[2]), - OpGroupTableRow::new(batch1_addr, ONE, batch1_groups[3]), - ]; - assert_eq!(expected_ogt_rows, aux_hints.op_group_table_rows()); - - // --- check block execution hints ------------------------------------------------------------ - let expected_hints = vec![ - (0, BlockTableUpdate::BlockStarted(0)), - (9, BlockTableUpdate::SpanExtended), - (15, BlockTableUpdate::BlockEnded(false)), - ]; - assert_eq!(expected_hints, aux_hints.block_exec_hints()); - - // --- check block stack table hints ---------------------------------------------------------- - let expected_rows = vec![ - BlockStackTableRow::new_test(INIT_ADDR, ZERO, false), - BlockStackTableRow::new_test(batch1_addr, ZERO, false), - ]; - assert_eq!(expected_rows, aux_hints.block_stack_table_rows()); - - // --- check block hash table hints ---------------------------------------------------------- - let expected_rows = vec![BlockHashTableRow::from_program_hash(program_hash)]; - assert_eq!(expected_rows, aux_hints.block_hash_table_rows()); } // JOIN BLOCK TESTS @@ -442,7 +295,7 @@ fn join_block() { let span2 = CodeBlock::new_span(vec![Operation::Add]); let program = CodeBlock::new_join([span1.clone(), span2.clone()]); - let (trace, aux_hints, trace_len) = build_trace(&[], &program); + let (trace, trace_len) = build_trace(&[], &program); // --- check block address, op_bits, group count, op_index, and in_span columns --------------- check_op_decoding(&trace, 0, ZERO, Operation::Join, 0, 0, 0); @@ -487,38 +340,6 @@ fn join_block() { assert_eq!(ONE, trace[OP_BITS_EXTRA_COLS_RANGE.start + 1][i]); assert_eq!(program_hash, get_hasher_state1(&trace, i)); } - - // --- check op_group table hints ------------------------------------------------------------- - // op_group table should not have been touched - assert!(&aux_hints.op_group_table_hints().is_empty()); - assert!(aux_hints.op_group_table_rows().is_empty()); - - // --- check block execution hints ------------------------------------------------------------ - let expected_hints = vec![ - (0, BlockTableUpdate::BlockStarted(2)), - (1, BlockTableUpdate::BlockStarted(0)), - (3, BlockTableUpdate::BlockEnded(true)), - (4, BlockTableUpdate::BlockStarted(0)), - (6, BlockTableUpdate::BlockEnded(false)), - (7, BlockTableUpdate::BlockEnded(false)), - ]; - assert_eq!(expected_hints, aux_hints.block_exec_hints()); - - // --- check block stack table hints ---------------------------------------------------------- - let expected_rows = vec![ - BlockStackTableRow::new_test(INIT_ADDR, ZERO, false), - BlockStackTableRow::new_test(span1_addr, INIT_ADDR, false), - BlockStackTableRow::new_test(span2_addr, INIT_ADDR, false), - ]; - assert_eq!(expected_rows, aux_hints.block_stack_table_rows()); - - // --- check block hash table hints ---------------------------------------------------------- - let expected_rows = vec![ - BlockHashTableRow::from_program_hash(program_hash), - BlockHashTableRow::new_test(INIT_ADDR, span1_hash, true, false), - BlockHashTableRow::new_test(INIT_ADDR, span2_hash, false, false), - ]; - assert_eq!(expected_rows, aux_hints.block_hash_table_rows()); } // SPLIT BLOCK TESTS @@ -530,7 +351,7 @@ fn split_block_true() { let span2 = CodeBlock::new_span(vec![Operation::Add]); let program = CodeBlock::new_split(span1.clone(), span2.clone()); - let (trace, aux_hints, trace_len) = build_trace(&[1], &program); + let (trace, trace_len) = build_trace(&[1], &program); // --- check block address, op_bits, group count, op_index, and in_span columns --------------- let span_addr = INIT_ADDR + EIGHT; @@ -565,34 +386,6 @@ fn split_block_true() { assert_eq!(ONE, trace[OP_BITS_EXTRA_COLS_RANGE.start + 1][i]); assert_eq!(program_hash, get_hasher_state1(&trace, i)); } - - // --- check op_group table hints ------------------------------------------------------------- - // op_group table should not have been touched - assert!(&aux_hints.op_group_table_hints().is_empty()); - assert!(aux_hints.op_group_table_rows().is_empty()); - - // --- check block execution hints ------------------------------------------------------------ - let expected_hints = vec![ - (0, BlockTableUpdate::BlockStarted(1)), - (1, BlockTableUpdate::BlockStarted(0)), - (3, BlockTableUpdate::BlockEnded(false)), - (4, BlockTableUpdate::BlockEnded(false)), - ]; - assert_eq!(expected_hints, aux_hints.block_exec_hints()); - - // --- check block stack table hints ---------------------------------------------------------- - let expected_rows = vec![ - BlockStackTableRow::new_test(INIT_ADDR, ZERO, false), - BlockStackTableRow::new_test(span_addr, INIT_ADDR, false), - ]; - assert_eq!(expected_rows, aux_hints.block_stack_table_rows()); - - // --- check block hash table hints ---------------------------------------------------------- - let expected_rows = vec![ - BlockHashTableRow::from_program_hash(program_hash), - BlockHashTableRow::new_test(INIT_ADDR, span1_hash, false, false), - ]; - assert_eq!(expected_rows, aux_hints.block_hash_table_rows()); } #[test] @@ -601,7 +394,7 @@ fn split_block_false() { let span2 = CodeBlock::new_span(vec![Operation::Add]); let program = CodeBlock::new_split(span1.clone(), span2.clone()); - let (trace, aux_hints, trace_len) = build_trace(&[0], &program); + let (trace, trace_len) = build_trace(&[0], &program); // --- check block address, op_bits, group count, op_index, and in_span columns --------------- let span_addr = INIT_ADDR + EIGHT; @@ -636,34 +429,6 @@ fn split_block_false() { assert_eq!(ONE, trace[OP_BITS_EXTRA_COLS_RANGE.start + 1][i]); assert_eq!(program_hash, get_hasher_state1(&trace, i)); } - - // --- check op_group table hints ------------------------------------------------------------- - // op_group table should not have been touched - assert!(&aux_hints.op_group_table_hints().is_empty()); - assert!(aux_hints.op_group_table_rows().is_empty()); - - // --- check block execution hints ------------------------------------------------------------ - let expected_hints = vec![ - (0, BlockTableUpdate::BlockStarted(1)), - (1, BlockTableUpdate::BlockStarted(0)), - (3, BlockTableUpdate::BlockEnded(false)), - (4, BlockTableUpdate::BlockEnded(false)), - ]; - assert_eq!(expected_hints, aux_hints.block_exec_hints()); - - // --- check block stack table hints ---------------------------------------------------------- - let expected_rows = vec![ - BlockStackTableRow::new_test(INIT_ADDR, ZERO, false), - BlockStackTableRow::new_test(span_addr, INIT_ADDR, false), - ]; - assert_eq!(expected_rows, aux_hints.block_stack_table_rows()); - - // --- check block hash table hints ---------------------------------------------------------- - let expected_rows = vec![ - BlockHashTableRow::from_program_hash(program_hash), - BlockHashTableRow::new_test(INIT_ADDR, span2_hash, false, false), - ]; - assert_eq!(expected_rows, aux_hints.block_hash_table_rows()); } // LOOP BLOCK TESTS @@ -674,7 +439,7 @@ fn loop_block() { let loop_body = CodeBlock::new_span(vec![Operation::Pad, Operation::Drop]); let program = CodeBlock::new_loop(loop_body.clone()); - let (trace, aux_hints, trace_len) = build_trace(&[0, 1], &program); + let (trace, trace_len) = build_trace(&[0, 1], &program); // --- check block address, op_bits, group count, op_index, and in_span columns --------------- let body_addr = INIT_ADDR + EIGHT; @@ -711,34 +476,6 @@ fn loop_block() { assert_eq!(ONE, trace[OP_BITS_EXTRA_COLS_RANGE.start + 1][i]); assert_eq!(program_hash, get_hasher_state1(&trace, i)); } - - // --- check op_group table hints ------------------------------------------------------------- - // op_group table should not have been touched - assert!(&aux_hints.op_group_table_hints().is_empty()); - assert!(aux_hints.op_group_table_rows().is_empty()); - - // --- check block execution hints ------------------------------------------------------------ - let expected_hints = vec![ - (0, BlockTableUpdate::BlockStarted(1)), - (1, BlockTableUpdate::BlockStarted(0)), - (4, BlockTableUpdate::BlockEnded(false)), - (5, BlockTableUpdate::BlockEnded(false)), - ]; - assert_eq!(expected_hints, aux_hints.block_exec_hints()); - - // --- check block stack table hints ---------------------------------------------------------- - let expected_rows = vec![ - BlockStackTableRow::new_test(INIT_ADDR, ZERO, true), - BlockStackTableRow::new_test(body_addr, INIT_ADDR, false), - ]; - assert_eq!(expected_rows, aux_hints.block_stack_table_rows()); - - // --- check block hash table hints ---------------------------------------------------------- - let expected_rows = vec![ - BlockHashTableRow::from_program_hash(program_hash), - BlockHashTableRow::new_test(INIT_ADDR, loop_body_hash, false, true), - ]; - assert_eq!(expected_rows, aux_hints.block_hash_table_rows()); } #[test] @@ -746,7 +483,7 @@ fn loop_block_skip() { let loop_body = CodeBlock::new_span(vec![Operation::Pad, Operation::Drop]); let program = CodeBlock::new_loop(loop_body.clone()); - let (trace, aux_hints, trace_len) = build_trace(&[0], &program); + let (trace, trace_len) = build_trace(&[0], &program); // --- check block address, op_bits, group count, op_index, and in_span columns --------------- check_op_decoding(&trace, 0, ZERO, Operation::Loop, 0, 0, 0); @@ -773,24 +510,6 @@ fn loop_block_skip() { assert_eq!(ONE, trace[OP_BITS_EXTRA_COLS_RANGE.start + 1][i]); assert_eq!(program_hash, get_hasher_state1(&trace, i)); } - - // --- check op_group table hints ------------------------------------------------------------- - // op_group table should not have been touched - assert!(&aux_hints.op_group_table_hints().is_empty()); - assert!(aux_hints.op_group_table_rows().is_empty()); - - // --- check block execution hints ------------------------------------------------------------ - let expected_hints = - vec![(0, BlockTableUpdate::BlockStarted(0)), (1, BlockTableUpdate::BlockEnded(false))]; - assert_eq!(expected_hints, aux_hints.block_exec_hints()); - - // --- check block stack table hints ---------------------------------------------------------- - let expected_rows = vec![BlockStackTableRow::new_test(INIT_ADDR, ZERO, false)]; - assert_eq!(expected_rows, aux_hints.block_stack_table_rows()); - - // --- check block hash table hints ---------------------------------------------------------- - let expected_rows = vec![BlockHashTableRow::from_program_hash(program_hash)]; - assert_eq!(expected_rows, aux_hints.block_hash_table_rows()); } #[test] @@ -798,7 +517,7 @@ fn loop_block_repeat() { let loop_body = CodeBlock::new_span(vec![Operation::Pad, Operation::Drop]); let program = CodeBlock::new_loop(loop_body.clone()); - let (trace, aux_hints, trace_len) = build_trace(&[0, 1, 1], &program); + let (trace, trace_len) = build_trace(&[0, 1, 1], &program); // --- check block address, op_bits, group count, op_index, and in_span columns --------------- let iter1_addr = INIT_ADDR + EIGHT; @@ -852,38 +571,6 @@ fn loop_block_repeat() { assert_eq!(ONE, trace[OP_BITS_EXTRA_COLS_RANGE.start + 1][i]); assert_eq!(program_hash, get_hasher_state1(&trace, i)); } - - // --- check op_group table hints ------------------------------------------------------------- - // op_group table should not have been touched - assert!(&aux_hints.op_group_table_hints().is_empty()); - assert!(aux_hints.op_group_table_rows().is_empty()); - - // --- check block execution hints ------------------------------------------------------------ - let expected_hints = vec![ - (0, BlockTableUpdate::BlockStarted(1)), - (1, BlockTableUpdate::BlockStarted(0)), - (4, BlockTableUpdate::BlockEnded(false)), - (5, BlockTableUpdate::LoopRepeated), - (6, BlockTableUpdate::BlockStarted(0)), - (9, BlockTableUpdate::BlockEnded(false)), - (10, BlockTableUpdate::BlockEnded(false)), - ]; - assert_eq!(expected_hints, aux_hints.block_exec_hints()); - - // --- check block stack table hints ---------------------------------------------------------- - let expected_rows = vec![ - BlockStackTableRow::new_test(INIT_ADDR, ZERO, true), - BlockStackTableRow::new_test(iter1_addr, INIT_ADDR, false), - BlockStackTableRow::new_test(iter2_addr, INIT_ADDR, false), - ]; - assert_eq!(expected_rows, aux_hints.block_stack_table_rows()); - - // --- check block hash table hints ---------------------------------------------------------- - let expected_rows = vec![ - BlockHashTableRow::from_program_hash(program_hash), - BlockHashTableRow::new_test(INIT_ADDR, loop_body_hash, false, true), - ]; - assert_eq!(expected_rows, aux_hints.block_hash_table_rows()); } // CALL BLOCK TESTS @@ -916,7 +603,7 @@ fn call_block() { let join1 = CodeBlock::new_join([first_span.clone(), foo_call.clone()]); let program = CodeBlock::new_join([join1.clone(), last_span.clone()]); - let (sys_trace, dec_trace, aux_hints, trace_len) = + let (sys_trace, dec_trace, trace_len) = build_call_trace(&program, foo_root.clone(), None); // --- check block address, op_bits, group count, op_index, and in_span columns --------------- @@ -929,10 +616,6 @@ fn call_block() { check_op_decoding(&dec_trace, 2, join1_addr, Operation::Span, 2, 0, 0); check_op_decoding(&dec_trace, 3, first_span_addr, Operation::Push(TWO), 1, 0, 1); check_op_decoding(&dec_trace, 4, first_span_addr, Operation::FmpUpdate, 0, 1, 1); - // as PAD operation is executed, the last item from the stack top moves to the overflow table. - // thus, the overflow address for the top row in the table will be set to the clock cycle at - // which PAD was executed - which is 5. - let overflow_addr_after_pad = Felt::new(5); check_op_decoding(&dec_trace, 5, first_span_addr, Operation::Pad, 0, 2, 1); check_op_decoding(&dec_trace, 6, first_span_addr, Operation::End, 0, 0, 0); // starting CALL block @@ -1078,47 +761,6 @@ fn call_block() { for i in 13..trace_len { assert_eq!(get_fn_hash(&sys_trace, i), EMPTY_WORD); } - - // --- check block execution hints ------------------------------------------------------------ - let expected_hints = vec![ - (0, BlockTableUpdate::BlockStarted(2)), - (1, BlockTableUpdate::BlockStarted(2)), - (2, BlockTableUpdate::BlockStarted(0)), - (6, BlockTableUpdate::BlockEnded(true)), - (7, BlockTableUpdate::BlockStarted(1)), - (8, BlockTableUpdate::BlockStarted(0)), - (11, BlockTableUpdate::BlockEnded(false)), - (12, BlockTableUpdate::BlockEnded(false)), - (13, BlockTableUpdate::BlockEnded(true)), - (14, BlockTableUpdate::BlockStarted(0)), - (16, BlockTableUpdate::BlockEnded(false)), - (17, BlockTableUpdate::BlockEnded(false)), - ]; - assert_eq!(expected_hints, aux_hints.block_exec_hints()); - - // --- check block stack table rows ----------------------------------------------------------- - let call_ctx = - ExecutionContextInfo::new(ContextId::root(), EMPTY_WORD, FMP_MIN + TWO, 17, overflow_addr_after_pad); - let expected_rows = vec![ - BlockStackTableRow::new_test(INIT_ADDR, ZERO, false), - BlockStackTableRow::new_test(join1_addr, INIT_ADDR, false), - BlockStackTableRow::new_test(first_span_addr, join1_addr, false), - BlockStackTableRow::new_test_with_ctx(foo_call_addr, join1_addr, false, call_ctx), - BlockStackTableRow::new_test(foo_root_addr, foo_call_addr, false), - BlockStackTableRow::new_test(last_span_addr, INIT_ADDR, false), - ]; - assert_eq!(expected_rows, aux_hints.block_stack_table_rows()); - - // --- check block hash table hints ---------------------------------------------------------- - let expected_rows = vec![ - BlockHashTableRow::from_program_hash(program_hash), - BlockHashTableRow::new_test(INIT_ADDR, join1_hash, true, false), - BlockHashTableRow::new_test(INIT_ADDR, last_span_hash, false, false), - BlockHashTableRow::new_test(join1_addr, first_span_hash, true, false), - BlockHashTableRow::new_test(join1_addr, foo_call_hash, false, false), - BlockHashTableRow::new_test(foo_call_addr, foo_root_hash, false, false), - ]; - assert_eq!(expected_rows, aux_hints.block_hash_table_rows()); } // SYSCALL BLOCK TESTS @@ -1166,7 +808,7 @@ fn syscall_block() { let inner_join = CodeBlock::new_join([first_span.clone(), bar_call.clone()]); let program = CodeBlock::new_join([inner_join.clone(), last_span.clone()]); - let (sys_trace, dec_trace, aux_hints, trace_len) = + let (sys_trace, dec_trace, trace_len) = build_call_trace(&program, bar_root.clone(), Some(foo_root.clone())); // --- check block address, op_bits, group count, op_index, and in_span columns --------------- @@ -1179,10 +821,6 @@ fn syscall_block() { check_op_decoding(&dec_trace, 2, inner_join_addr, Operation::Span, 2, 0, 0); check_op_decoding(&dec_trace, 3, first_span_addr, Operation::Push(TWO), 1, 0, 1); check_op_decoding(&dec_trace, 4, first_span_addr, Operation::FmpUpdate, 0, 1, 1); - // as PAD operation is executed, the last item from the stack top moves to the overflow table. - // thus, the overflow address for the top row in the table will be set to the clock cycle at - // which PAD was executed - which is 5. - let overflow_addr_after_pad = Felt::new(5); check_op_decoding(&dec_trace, 5, first_span_addr, Operation::Pad, 0, 2, 1); check_op_decoding(&dec_trace, 6, first_span_addr, Operation::End, 0, 0, 0); @@ -1409,60 +1047,6 @@ fn syscall_block() { for i in 21..trace_len { assert_eq!(get_fn_hash(&sys_trace, i), EMPTY_WORD); } - - // --- check block execution hints ------------------------------------------------------------ - let expected_hints = vec![ - (0, BlockTableUpdate::BlockStarted(2)), // join0 - (1, BlockTableUpdate::BlockStarted(2)), // join1 - (2, BlockTableUpdate::BlockStarted(0)), // span0 - (6, BlockTableUpdate::BlockEnded(true)), // end span0 - (7, BlockTableUpdate::BlockStarted(1)), // call - (8, BlockTableUpdate::BlockStarted(2)), // join2 - (9, BlockTableUpdate::BlockStarted(0)), // span1 - (12, BlockTableUpdate::BlockEnded(true)), // end span1 - (13, BlockTableUpdate::BlockStarted(1)), // syscall - (14, BlockTableUpdate::BlockStarted(0)), // span2 - (17, BlockTableUpdate::BlockEnded(false)), // end span2 - (18, BlockTableUpdate::BlockEnded(false)), // end syscall - (19, BlockTableUpdate::BlockEnded(false)), // end join2 - (20, BlockTableUpdate::BlockEnded(false)), // end join1 - (21, BlockTableUpdate::BlockEnded(true)), // end join0 - (22, BlockTableUpdate::BlockStarted(0)), // span3 - (24, BlockTableUpdate::BlockEnded(false)), // end span3 - (25, BlockTableUpdate::BlockEnded(false)), // end program - ]; - assert_eq!(expected_hints, aux_hints.block_exec_hints()); - - // --- check block stack table rows ----------------------------------------------------------- - let call_ctx = - ExecutionContextInfo::new(ContextId::root(), EMPTY_WORD, FMP_MIN + ONE, 17, overflow_addr_after_pad); - let syscall_ctx = ExecutionContextInfo::new(8.into(), bar_root_hash, FMP_MIN + TWO, 16, ZERO); - let expected_rows = vec![ - BlockStackTableRow::new_test(INIT_ADDR, ZERO, false), - BlockStackTableRow::new_test(inner_join_addr, INIT_ADDR, false), - BlockStackTableRow::new_test(first_span_addr, inner_join_addr, false), - BlockStackTableRow::new_test_with_ctx(call_addr, inner_join_addr, false, call_ctx), - BlockStackTableRow::new_test(bar_join_addr, call_addr, false), - BlockStackTableRow::new_test(bar_span_addr, bar_join_addr, false), - BlockStackTableRow::new_test_with_ctx(syscall_addr, bar_join_addr, false, syscall_ctx), - BlockStackTableRow::new_test(syscall_span_addr, syscall_addr, false), - BlockStackTableRow::new_test(last_span_addr, INIT_ADDR, false), - ]; - assert_eq!(expected_rows, aux_hints.block_stack_table_rows()); - - // --- check block hash table hints ---------------------------------------------------------- - let expected_rows = vec![ - BlockHashTableRow::from_program_hash(program_hash), - BlockHashTableRow::new_test(INIT_ADDR, inner_join_hash, true, false), - BlockHashTableRow::new_test(INIT_ADDR, last_span_hash, false, false), - BlockHashTableRow::new_test(inner_join_addr, first_span_hash, true, false), - BlockHashTableRow::new_test(inner_join_addr, bar_call_hash, false, false), - BlockHashTableRow::new_test(call_addr, bar_root_hash, false, false), - BlockHashTableRow::new_test(bar_join_addr, bar_span_hash, true, false), - BlockHashTableRow::new_test(bar_join_addr, foo_call_hash, false, false), - BlockHashTableRow::new_test(syscall_addr, foo_root_hash, false, false), - ]; - assert_eq!(expected_rows, aux_hints.block_hash_table_rows()); } // DYN BLOCK TESTS @@ -1480,7 +1064,7 @@ fn dyn_block() { let dyn_block = CodeBlock::new_dyn(); let program = CodeBlock::new_join([join.clone(), dyn_block.clone()]); - let (trace, aux_hints, trace_len) = build_dyn_trace( + let (trace, trace_len) = build_dyn_trace( &[ foo_root.hash()[0].as_int(), foo_root.hash()[1].as_int(), @@ -1574,55 +1158,6 @@ fn dyn_block() { assert_eq!(ONE, trace[OP_BITS_EXTRA_COLS_RANGE.start + 1][i]); assert_eq!(program_hash, get_hasher_state1(&trace, i)); } - - // --- check op_group table hints ------------------------------------------------------------- - // 1 op group should be inserted at cycle 10, and removed in the subsequent cycle - let expected_ogt_hints = - vec![(10, OpGroupTableUpdate::InsertRows(1)), (11, OpGroupTableUpdate::RemoveRow)]; - assert_eq!(&expected_ogt_hints, aux_hints.op_group_table_hints()); - - // the group is an op group with a single ADD - let expected_ogt_rows = vec![OpGroupTableRow::new(add_span_addr, ONE, ONE)]; - assert_eq!(expected_ogt_rows, aux_hints.op_group_table_rows()); - - // --- check block execution hints ------------------------------------------------------------ - let expected_hints = vec![ - (0, BlockTableUpdate::BlockStarted(2)), // outer join start - (1, BlockTableUpdate::BlockStarted(2)), // inner join start - (2, BlockTableUpdate::BlockStarted(0)), // mul span start - (4, BlockTableUpdate::BlockEnded(true)), // mul span end - (5, BlockTableUpdate::BlockStarted(0)), // save span start - (7, BlockTableUpdate::BlockEnded(false)), // save span end - (8, BlockTableUpdate::BlockEnded(true)), // inner join end - (9, BlockTableUpdate::BlockStarted(1)), // dyn start - (10, BlockTableUpdate::BlockStarted(0)), // foo span start - (13, BlockTableUpdate::BlockEnded(false)), // foo span end - (14, BlockTableUpdate::BlockEnded(false)), // dyn end - (15, BlockTableUpdate::BlockEnded(false)), // outer join end - ]; - assert_eq!(expected_hints, aux_hints.block_exec_hints()); - - // --- check block stack table hints ---------------------------------------------------------- - let expected_rows = vec![ - BlockStackTableRow::new_test(INIT_ADDR, ZERO, false), // join - BlockStackTableRow::new_test(join_addr, INIT_ADDR, false), // inner join - BlockStackTableRow::new_test(mul_span_addr, join_addr, false), // mul span - BlockStackTableRow::new_test(save_span_addr, join_addr, false), // save span - BlockStackTableRow::new_test(dyn_addr, INIT_ADDR, false), // dyn - BlockStackTableRow::new_test(add_span_addr, dyn_addr, false), // foo span - ]; - assert_eq!(expected_rows, aux_hints.block_stack_table_rows()); - - // --- check block hash table hints ---------------------------------------------------------- - let expected_rows = vec![ - BlockHashTableRow::from_program_hash(program_hash), - BlockHashTableRow::new_test(INIT_ADDR, join_hash, true, false), - BlockHashTableRow::new_test(INIT_ADDR, dyn_hash, false, false), - BlockHashTableRow::new_test(join_addr, mul_span_hash, true, false), - BlockHashTableRow::new_test(join_addr, save_span_hash, false, false), - BlockHashTableRow::new_test(dyn_addr, foo_hash, false, false), - ]; - assert_eq!(expected_rows, aux_hints.block_hash_table_rows()); } // HELPER REGISTERS TESTS @@ -1658,22 +1193,21 @@ fn set_user_op_helpers_many() { // HELPER FUNCTIONS // ================================================================================================ -fn build_trace(stack_inputs: &[u64], program: &CodeBlock) -> (DecoderTrace, AuxTraceHints, usize) { +fn build_trace(stack_inputs: &[u64], program: &CodeBlock) -> (DecoderTrace, usize) { let stack_inputs = StackInputs::try_from_values(stack_inputs.iter().copied()).unwrap(); let host = DefaultHost::default(); let mut process = Process::new(Kernel::default(), stack_inputs, host, ExecutionOptions::default()); process.execute_code_block(program, &CodeBlockTable::default()).unwrap(); - let (trace, aux_hints, _) = ExecutionTrace::test_finalize_trace(process); - let trace_len = get_trace_len(&trace) - ExecutionTrace::NUM_RAND_ROWS; + let (trace, _, _) = ExecutionTrace::test_finalize_trace(process); + let trace_len = trace.num_rows() - ExecutionTrace::NUM_RAND_ROWS; ( - trace[DECODER_TRACE_RANGE] - .to_vec() + trace + .get_column_range(DECODER_TRACE_RANGE) .try_into() .expect("failed to convert vector to array"), - aux_hints.decoder, trace_len, ) } @@ -1682,7 +1216,7 @@ fn build_dyn_trace( stack_inputs: &[u64], program: &CodeBlock, fn_block: CodeBlock, -) -> (DecoderTrace, AuxTraceHints, usize) { +) -> (DecoderTrace, usize) { let stack_inputs = StackInputs::try_from_values(stack_inputs.iter().copied()).unwrap(); let host = DefaultHost::default(); let mut process = @@ -1694,15 +1228,14 @@ fn build_dyn_trace( process.execute_code_block(program, &cb_table).unwrap(); - let (trace, aux_hints, _) = ExecutionTrace::test_finalize_trace(process); - let trace_len = get_trace_len(&trace) - ExecutionTrace::NUM_RAND_ROWS; + let (trace, _, _) = ExecutionTrace::test_finalize_trace(process); + let trace_len = trace.num_rows() - ExecutionTrace::NUM_RAND_ROWS; ( - trace[DECODER_TRACE_RANGE] - .to_vec() + trace + .get_column_range(DECODER_TRACE_RANGE) .try_into() .expect("failed to convert vector to array"), - aux_hints.decoder, trace_len, ) } @@ -1711,7 +1244,7 @@ fn build_call_trace( program: &CodeBlock, fn_block: CodeBlock, kernel_proc: Option, -) -> (SystemTrace, DecoderTrace, AuxTraceHints, usize) { +) -> (SystemTrace, DecoderTrace, usize) { let kernel = match kernel_proc { Some(ref proc) => Kernel::new(&[proc.hash()]).unwrap(), None => Kernel::default(), @@ -1729,20 +1262,20 @@ fn build_call_trace( process.execute_code_block(program, &cb_table).unwrap(); - let (trace, aux_hints, _) = ExecutionTrace::test_finalize_trace(process); - let trace_len = get_trace_len(&trace) - ExecutionTrace::NUM_RAND_ROWS; + let (trace, _, _) = ExecutionTrace::test_finalize_trace(process); + let trace_len = trace.num_rows() - ExecutionTrace::NUM_RAND_ROWS; - let sys_trace = trace[SYS_TRACE_RANGE] - .to_vec() + let sys_trace = trace + .get_column_range(SYS_TRACE_RANGE) .try_into() .expect("failed to convert vector to array"); - let decoder_trace = trace[DECODER_TRACE_RANGE] - .to_vec() + let decoder_trace = trace + .get_column_range(DECODER_TRACE_RANGE) .try_into() .expect("failed to convert vector to array"); - (sys_trace, decoder_trace, aux_hints.decoder, trace_len) + (sys_trace, decoder_trace, trace_len) } // OPCODES diff --git a/processor/src/lib.rs b/processor/src/lib.rs index bb0ea0a956..4f9f17da40 100644 --- a/processor/src/lib.rs +++ b/processor/src/lib.rs @@ -3,6 +3,7 @@ #[cfg(not(feature = "std"))] #[macro_use] extern crate alloc; +extern crate core; use core::cell::RefCell; @@ -24,7 +25,7 @@ use vm_core::{ CodeBlockTable, Decorator, DecoratorIterator, Felt, FieldElement, StackTopState, StarkField, }; -use winter_prover::matrix::ColMatrix; +pub use winter_prover::matrix::ColMatrix; mod operations; @@ -93,7 +94,7 @@ type SysTrace = [Vec; SYS_TRACE_WIDTH]; pub struct DecoderTrace { trace: [Vec; DECODER_TRACE_WIDTH], - aux_trace_hints: decoder::AuxTraceHints, + aux_builder: decoder::AuxTraceBuilder, } pub struct StackTrace { diff --git a/processor/src/operations/crypto_ops.rs b/processor/src/operations/crypto_ops.rs index fc4a329d4d..8da2df47bd 100644 --- a/processor/src/operations/crypto_ops.rs +++ b/processor/src/operations/crypto_ops.rs @@ -33,8 +33,8 @@ where self.stack.get(0), ]; - let (_addr, output_state) = self.chiplets.permute(input_state); - + let (addr, output_state) = self.chiplets.permute(input_state); + self.decoder.set_user_op_helpers(Operation::HPerm, &[addr]); for (i, &value) in output_state.iter().rev().enumerate() { self.stack.set(i, value); } @@ -196,7 +196,7 @@ mod tests { 1, 0, 0, 0, 0, 0 // padding: ONE followed by the necessary ZEROs ]; let stack = StackInputs::try_from_values(inputs).unwrap(); - let mut process = Process::new_dummy(stack); + let mut process = Process::new_dummy_with_decoder_helpers(stack); let expected: [Felt; STATE_WIDTH] = build_expected_perm(&inputs); process.execute_op(Operation::HPerm).unwrap(); @@ -207,7 +207,7 @@ mod tests { let mut inputs: Vec = vec![values.len() as u64, 0, 0, 0]; inputs.extend_from_slice(&values); let stack = StackInputs::try_from_values(inputs.clone()).unwrap(); - let mut process = Process::new_dummy(stack); + let mut process = Process::new_dummy_with_decoder_helpers(stack); // add the capacity to prepare the input vector let expected: [Felt; STATE_WIDTH] = build_expected_perm(&inputs); @@ -221,7 +221,7 @@ mod tests { inputs.extend_from_slice(&values); let stack = StackInputs::try_from_values(inputs).unwrap(); - let mut process = Process::new_dummy(stack); + let mut process = Process::new_dummy_with_decoder_helpers(stack); process.execute_op(Operation::HPerm).unwrap(); assert_eq!(expected, &process.stack.trace_state()[12..16]); } diff --git a/processor/src/range/aux_trace.rs b/processor/src/range/aux_trace.rs index 531a6dba55..c2e634add3 100644 --- a/processor/src/range/aux_trace.rs +++ b/processor/src/range/aux_trace.rs @@ -1,4 +1,5 @@ -use super::{uninit_vector, BTreeMap, ColMatrix, Felt, FieldElement, Vec, NUM_RAND_ROWS}; +use super::{uninit_vector, BTreeMap, Felt, FieldElement, Vec, NUM_RAND_ROWS}; +use miden_air::trace::main_trace::MainTrace; use miden_air::trace::range::{M_COL_IDX, V_COL_IDX}; use vm_core::StarkField; @@ -42,7 +43,7 @@ impl AuxTraceBuilder { /// requested by the Stack and Memory processors. pub fn build_aux_columns>( &self, - main_trace: &ColMatrix, + main_trace: &MainTrace, rand_elements: &[E], ) -> Vec> { let b_range = self.build_aux_col_b_range(main_trace, rand_elements); @@ -53,7 +54,7 @@ impl AuxTraceBuilder { /// check lookups performed by user operations match those executed by the Range Checker. fn build_aux_col_b_range>( &self, - main_trace: &ColMatrix, + main_trace: &MainTrace, rand_elements: &[E], ) -> Vec { // run batch inversion on the lookup values diff --git a/processor/src/range/mod.rs b/processor/src/range/mod.rs index e1d12fe436..10f6e2c12c 100644 --- a/processor/src/range/mod.rs +++ b/processor/src/range/mod.rs @@ -1,6 +1,6 @@ use super::{ - trace::NUM_RAND_ROWS, utils::uninit_vector, BTreeMap, ColMatrix, Felt, FieldElement, - RangeCheckTrace, Vec, ZERO, + trace::NUM_RAND_ROWS, utils::uninit_vector, BTreeMap, Felt, FieldElement, RangeCheckTrace, Vec, + ZERO, }; mod aux_trace; diff --git a/processor/src/range/request.rs b/processor/src/range/request.rs index 28cf9c0704..c71256eb2e 100644 --- a/processor/src/range/request.rs +++ b/processor/src/range/request.rs @@ -54,7 +54,7 @@ impl CycleRangeChecks { /// element in the field specified by E. pub fn to_stack_value>( &self, - main_trace: &ColMatrix, + main_trace: &MainTrace, alphas: &[E], ) -> E { let mut value = E::ONE; @@ -70,7 +70,7 @@ impl CycleRangeChecks { /// element in the field specified by E. fn to_mem_value>( &self, - main_trace: &ColMatrix, + main_trace: &MainTrace, alphas: &[E], ) -> E { let mut value = E::ONE; @@ -88,7 +88,7 @@ impl LookupTableRow for CycleRangeChecks { /// at least 1 alpha value. Includes all values included at this cycle from all processors. fn to_value>( &self, - main_trace: &ColMatrix, + main_trace: &MainTrace, alphas: &[E], ) -> E { let stack_value = self.to_stack_value(main_trace, alphas); @@ -115,7 +115,7 @@ impl LookupTableRow for RangeCheckRequest { /// at least 1 alpha value. fn to_value>( &self, - _main_trace: &ColMatrix, + _main_trace: &MainTrace, alphas: &[E], ) -> E { let alpha: E = alphas[0]; diff --git a/processor/src/stack/aux_trace.rs b/processor/src/stack/aux_trace.rs index d040a63b83..064b0fa941 100644 --- a/processor/src/stack/aux_trace.rs +++ b/processor/src/stack/aux_trace.rs @@ -1,7 +1,7 @@ -use super::{ - super::trace::AuxColumnBuilder, ColMatrix, Felt, FieldElement, OverflowTableRow, - OverflowTableUpdate, Vec, -}; +use crate::trace::AuxColumnBuilder; + +use super::{Felt, FieldElement, OverflowTableRow, Vec}; +use miden_air::trace::main_trace::MainTrace; // AUXILIARY TRACE BUILDER // ================================================================================================ @@ -9,16 +9,10 @@ use super::{ /// Describes how to construct execution traces of stack-related auxiliary trace segment columns /// (used in multiset checks). pub struct AuxTraceBuilder { - /// A list of updates made to the overflow table during program execution. For each update we - /// also track the cycle at which the update happened. - pub(super) overflow_hints: Vec<(u64, OverflowTableUpdate)>, /// A list of all rows that were added to and then removed from the overflow table. pub(super) overflow_table_rows: Vec, /// The number of rows in the overflow table when execution begins. pub(super) num_init_rows: usize, - /// A list of indices into the `all_rows` vector which describes the rows remaining in the - /// overflow table at the end of execution. - pub(super) final_rows: Vec, } impl AuxTraceBuilder { @@ -26,7 +20,7 @@ impl AuxTraceBuilder { /// column p1 describing states of the stack overflow table. pub fn build_aux_columns>( &self, - main_trace: &ColMatrix, + main_trace: &MainTrace, rand_elements: &[E], ) -> Vec> { let p1 = self.build_aux_column(main_trace, rand_elements); @@ -34,71 +28,50 @@ impl AuxTraceBuilder { } } -// OVERFLOW TABLE -// ================================================================================================ - -impl AuxColumnBuilder for AuxTraceBuilder { - /// Returns a list of rows which were added to and then removed from the stack overflow table. - /// - /// The order of the rows in the list is the same as the order in which the rows were added to - /// the table. - fn get_table_rows(&self) -> &[OverflowTableRow] { - &self.overflow_table_rows +impl> AuxColumnBuilder for AuxTraceBuilder { + /// Initializes the overflow stack auxiliary column. + fn init_responses(&self, _main_trace: &MainTrace, alphas: &[E]) -> E { + let mut initial_column_value = E::ONE; + for row in self.overflow_table_rows.iter().take(self.num_init_rows) { + // TODO: are we supposed to shift the initial value? + // this can also be fixed from the air side, by preventing the addition of + // alphas[0] to the initial value + let value = (*row).to_value(alphas) + alphas[0]; + initial_column_value *= value; + } + initial_column_value } - /// Returns hints which describe how the stack overflow table was updated during program - /// execution. Each update hint is accompanied by a clock cycle at which the update happened. - /// - /// Internally, each update hint also contains an index of the row into the full list of rows - /// which was either added or removed. - fn get_table_hints(&self) -> &[(u64, OverflowTableUpdate)] { - &self.overflow_hints[self.num_init_rows..] - } + /// Removes a row from the stack overflow table. + fn get_requests_at(&self, main_trace: &MainTrace, alphas: &[E], i: usize) -> E { + let is_left_shift = main_trace.is_left_shift(i); + let is_non_empty_overflow = main_trace.is_non_empty_overflow(i); - /// Returns the value by which the running product column should be multiplied for the provided - /// hint value. - fn get_multiplicand>( - &self, - hint: OverflowTableUpdate, - row_values: &[E], - inv_row_values: &[E], - ) -> E { - match hint { - OverflowTableUpdate::RowInserted(inserted_row_idx) => { - row_values[inserted_row_idx as usize] - } - OverflowTableUpdate::RowRemoved(removed_row_idx) => { - inv_row_values[removed_row_idx as usize] - } + if is_left_shift && is_non_empty_overflow { + let b1 = main_trace.parent_overflow_address(i); + let s15_prime = main_trace.stack_element(15, i + 1); + let b1_prime = main_trace.parent_overflow_address(i + 1); + + let row = OverflowTableRow::new(b1, s15_prime, b1_prime); + row.to_value(alphas) + } else { + E::ONE } } - /// Returns the initial value in the auxiliary column. - fn init_column_value>(&self, row_values: &[E]) -> E { - let mut init_column_value = E::ONE; - // iterate through the elements in the initial table - for (_, hint) in &self.overflow_hints[..self.num_init_rows] { - // no rows should have been removed from the table before execution begins. - if let OverflowTableUpdate::RowInserted(row) = hint { - init_column_value *= row_values[*row as usize]; - } else { - debug_assert!( - false, - "overflow table row incorrectly removed before execution started" - ) - } - } + /// Adds a row to the stack overflow table. + fn get_responses_at(&self, main_trace: &MainTrace, alphas: &[E], i: usize) -> E { + let is_right_shift = main_trace.is_right_shift(i); - init_column_value - } + if is_right_shift { + let k0 = main_trace.clk(i); + let s15 = main_trace.stack_element(15, i); + let b1 = main_trace.parent_overflow_address(i); - /// Returns the final value in the auxiliary column. - fn final_column_value>(&self, row_values: &[E]) -> E { - let mut final_column_value = E::ONE; - for &row in &self.final_rows { - final_column_value *= row_values[row]; + let row = OverflowTableRow::new(k0, s15, b1); + row.to_value(alphas) + } else { + E::ONE } - - final_column_value } } diff --git a/processor/src/stack/mod.rs b/processor/src/stack/mod.rs index bbfee3de17..09fda3a85b 100644 --- a/processor/src/stack/mod.rs +++ b/processor/src/stack/mod.rs @@ -1,6 +1,5 @@ use super::{ - BTreeMap, ColMatrix, Felt, FieldElement, StackInputs, StackOutputs, Vec, ONE, - STACK_TRACE_WIDTH, ZERO, + BTreeMap, Felt, FieldElement, StackInputs, StackOutputs, Vec, ONE, STACK_TRACE_WIDTH, ZERO, }; use core::cmp; use vm_core::{stack::STACK_TOP_SIZE, Word, WORD_SIZE}; @@ -10,7 +9,7 @@ use trace::StackTrace; mod overflow; use overflow::OverflowTable; -pub use overflow::{OverflowTableRow, OverflowTableUpdate}; +pub use overflow::OverflowTableRow; mod aux_trace; pub use aux_trace::AuxTraceBuilder; @@ -240,7 +239,7 @@ impl Stack { // Update the overflow table. let to_overflow = self.trace.get_stack_value_at(self.clk, MAX_TOP_IDX); - self.overflow.push(to_overflow, self.clk as u64); + self.overflow.push(to_overflow, Felt::from(self.clk)); // Stack depth always increases on right shift. self.active_depth += 1; diff --git a/processor/src/stack/overflow.rs b/processor/src/stack/overflow.rs index e4d1cc4872..486df385d8 100644 --- a/processor/src/stack/overflow.rs +++ b/processor/src/stack/overflow.rs @@ -1,7 +1,4 @@ -use super::{ - super::trace::LookupTableRow, AuxTraceBuilder, BTreeMap, ColMatrix, Felt, FieldElement, Vec, - ZERO, -}; +use super::{AuxTraceBuilder, BTreeMap, Felt, FieldElement, Vec, ZERO}; use vm_core::{utils::uninit_vector, StarkField}; // OVERFLOW TABLE @@ -19,9 +16,6 @@ pub struct OverflowTable { /// A list of indices into the `all_rows` vector which describes the rows currently in the /// overflow table. active_rows: Vec, - /// A list of updates made to the overflow table during program execution. For each update we - /// also track the cycle at which the update happened. - update_trace: Vec<(u64, OverflowTableUpdate)>, /// A map which records the full state of the overflow table at every cycle during which an /// update happened. This map is populated only when `trace_enabled` = true. trace: BTreeMap>, @@ -45,7 +39,6 @@ impl OverflowTable { Self { all_rows: Vec::new(), active_rows: Vec::new(), - update_trace: Vec::new(), trace: BTreeMap::new(), trace_enabled: enable_trace, num_init_rows: 0, @@ -65,7 +58,7 @@ impl OverflowTable { let mut clk = Felt::MODULUS - init_values.len() as u64; for &val in init_values.iter().rev() { - overflow_table.push(val, clk); + overflow_table.push(val, Felt::new(clk)); clk += 1; } @@ -78,12 +71,12 @@ impl OverflowTable { /// Pushes the specified value into the overflow table. /// /// Parameter clk specifies the clock cycle at which the value is added to the table. - pub fn push(&mut self, value: Felt, clk: u64) { + pub fn push(&mut self, value: Felt, clk: Felt) { // ZERO address indicates that the overflow table is empty, and thus, no actual value // should be inserted into the table with this address. This is not a problem since for // every real program, we first execute an operation marking the start of a code block, // and thus, no operation can shift the stack to the right at clk = 0. - debug_assert_ne!(clk, 0, "cannot add value to overflow at clk=0"); + debug_assert_ne!(clk, ZERO, "cannot add value to overflow at clk=0"); // create and record the new row, and also put it at the top of the overflow table let row_idx = self.all_rows.len() as u32; @@ -92,14 +85,11 @@ impl OverflowTable { self.active_rows.push(row_idx as usize); // set the last row address to the address of the newly added row - self.last_row_addr = Felt::from(clk); - - // mark this clock cycle as the cycle at which a new row was inserted into the table - self.update_trace.push((clk, OverflowTableUpdate::RowInserted(row_idx))); + self.last_row_addr = clk; if self.trace_enabled { // insert a copy of the current table state into the trace - self.save_current_state(clk); + self.save_current_state(clk.as_int()); } } @@ -122,10 +112,6 @@ impl OverflowTable { let removed_value = last_row.val; self.last_row_addr = last_row.prev; - // mark this clock cycle as the clock cycle at which a row was removed from the table - self.update_trace - .push((clk, OverflowTableUpdate::RowRemoved(last_row_idx as u32))); - if self.trace_enabled { // insert a copy of the current table state into the trace self.save_current_state(clk); @@ -211,9 +197,7 @@ impl OverflowTable { pub fn into_aux_builder(self) -> AuxTraceBuilder { AuxTraceBuilder { num_init_rows: self.num_init_rows, - overflow_hints: self.update_trace, overflow_table_rows: self.all_rows, - final_rows: self.active_rows, } } @@ -257,40 +241,17 @@ pub struct OverflowTableRow { } impl OverflowTableRow { - pub fn new(clk: u64, val: Felt, prev: Felt) -> Self { - Self { - val, - clk: Felt::from(clk), - prev, - } + pub fn new(clk: Felt, val: Felt, prev: Felt) -> Self { + Self { val, clk, prev } } } -impl LookupTableRow for OverflowTableRow { +impl OverflowTableRow { /// Reduces this row to a single field element in the field specified by E. This requires /// at least 4 alpha values. - fn to_value>( - &self, - _main_trace: &ColMatrix, - alphas: &[E], - ) -> E { - alphas[0] - + alphas[1].mul_base(self.clk) + pub fn to_value>(&self, alphas: &[E]) -> E { + alphas[1].mul_base(self.clk) + alphas[2].mul_base(self.val) + alphas[3].mul_base(self.prev) } } - -// OVERFLOW TABLE UPDATES -// ================================================================================================ - -/// Describes an update to the stack overflow table. There could be two types of updates: -/// - A single row can be added to the table. This happens during a right shift. -/// - A single row can be removed from the table. This happens during a left shift. -/// -/// For each update we also record the index of the row that was added/removed from the table. -#[derive(Debug, Copy, Clone, PartialEq, Eq)] -pub enum OverflowTableUpdate { - RowInserted(u32), - RowRemoved(u32), -} diff --git a/processor/src/stack/tests.rs b/processor/src/stack/tests.rs index 4319ca4326..70e4411bd9 100644 --- a/processor/src/stack/tests.rs +++ b/processor/src/stack/tests.rs @@ -53,9 +53,9 @@ fn initialize_overflow() { ]; let init_addr = Felt::MODULUS - 3; let expected_overflow_rows = vec![ - OverflowTableRow::new(init_addr, ONE, ZERO), - OverflowTableRow::new(init_addr + 1, Felt::new(2), Felt::new(init_addr)), - OverflowTableRow::new(init_addr + 2, Felt::new(3), Felt::new(init_addr + 1)), + OverflowTableRow::new(Felt::new(init_addr), ONE, ZERO), + OverflowTableRow::new(Felt::new(init_addr + 1), Felt::new(2), Felt::new(init_addr)), + OverflowTableRow::new(Felt::new(init_addr + 2), Felt::new(3), Felt::new(init_addr + 1)), ]; let expected_overflow_active_rows = vec![0, 1, 2]; diff --git a/processor/src/trace/decoder/mod.rs b/processor/src/trace/decoder/mod.rs deleted file mode 100644 index d101665fed..0000000000 --- a/processor/src/trace/decoder/mod.rs +++ /dev/null @@ -1,306 +0,0 @@ -use super::{ - super::decoder::{AuxTraceHints, BlockTableUpdate, OpGroupTableUpdate}, - utils::build_lookup_table_row_values, - ColMatrix, Felt, FieldElement, Vec, DECODER_TRACE_OFFSET, -}; -use vm_core::utils::uninit_vector; - -#[cfg(test)] -mod tests; - -// CONSTANTS -// ================================================================================================ - -const ADDR_COL_IDX: usize = DECODER_TRACE_OFFSET + miden_air::trace::decoder::ADDR_COL_IDX; - -// DECODER AUXILIARY TRACE COLUMNS -// ================================================================================================ - -/// Builds and returns decoder auxiliary trace columns p1, p2, and p3 describing states of block -/// stack, block hash, and op group tables respectively. -pub fn build_aux_columns>( - main_trace: &ColMatrix, - aux_trace_hints: &AuxTraceHints, - rand_elements: &[E], -) -> Vec> { - let p1 = build_aux_col_p1(main_trace, aux_trace_hints, rand_elements); - let p2 = build_aux_col_p2(main_trace, aux_trace_hints, rand_elements); - let p3 = build_aux_col_p3(main_trace, main_trace.num_rows(), aux_trace_hints, rand_elements); - vec![p1, p2, p3] -} - -// BLOCK STACK TABLE COLUMN -// ================================================================================================ - -/// Builds the execution trace of the decoder's `p1` column which describes the state of the block -/// stack table via multiset checks. -fn build_aux_col_p1>( - main_trace: &ColMatrix, - aux_trace_hints: &AuxTraceHints, - alphas: &[E], -) -> Vec { - // compute row values and their inverses for all rows that were added to the block stack table - let table_rows = aux_trace_hints.block_stack_table_rows(); - let (row_values, inv_row_values) = - build_lookup_table_row_values(table_rows, main_trace, alphas); - - // allocate memory for the running product column and set the initial value to ONE - let mut result = unsafe { uninit_vector(main_trace.num_rows()) }; - result[0] = E::ONE; - - // keep track of the index into the list of block stack table rows for started blocks; we can - // use this index because the sequence in which blocks are started is exactly the same as the - // sequence in which the rows are added to the block stack table. - let mut started_block_idx = 0; - - // keep track of the last updated row in the running product column - let mut result_idx = 0_usize; - - // iterate through the list of updates and apply them one by one - for (clk, update) in aux_trace_hints.block_exec_hints() { - let clk = *clk as usize; - - // if we skipped some cycles since the last update was processed, values in the last - // updated row should by copied over until the current cycle. - if result_idx < clk { - let last_value = result[result_idx]; - result[(result_idx + 1)..=clk].fill(last_value); - } - - // move the result pointer to the next row - result_idx = clk + 1; - - // apply the relevant updates to the column - match update { - BlockTableUpdate::BlockStarted(_) => { - // when a new block is started, multiply the running product by the value - // representing the entry for the block in the block stack table. - result[result_idx] = result[clk] * row_values[started_block_idx]; - started_block_idx += 1; - } - BlockTableUpdate::SpanExtended => { - // when a RESPAN operation is executed, we need to remove the entry for - // the last batch from the block stack table and also add an entry for the - // new batch. - let old_row_value_inv = inv_row_values[started_block_idx - 1]; - let new_row_value = row_values[started_block_idx]; - result[result_idx] = result[clk] * old_row_value_inv * new_row_value; - started_block_idx += 1; - } - BlockTableUpdate::BlockEnded(_) => { - // when a block is ended, we need to remove the entry for the block from the - // block stack table; we can look up the index of the entry using the block's - // ID which we get from the current row of the execution trace. - let block_id = get_block_addr(main_trace, clk as u32); - let row_idx = aux_trace_hints - .get_block_stack_row_idx(block_id) - .expect("block stack row not found"); - result[result_idx] = result[clk] * inv_row_values[row_idx]; - } - // REPEAT operation has no effect on the block stack table - BlockTableUpdate::LoopRepeated => result[result_idx] = result[clk], - } - } - - // at this point, block stack table must be empty - so, the last value must be ONE; - // we also fill in all the remaining values in the column with ONE's. - let last_value = result[result_idx]; - assert_eq!(last_value, E::ONE); - if result_idx < result.len() - 1 { - result[(result_idx + 1)..].fill(E::ONE); - } - - result -} - -// BLOCK HASH TABLE COLUMN -// ================================================================================================ - -/// Builds the execution trace of the decoder's `p2` column which describes the state of the block -/// hash table via multiset checks. -fn build_aux_col_p2>( - main_trace: &ColMatrix, - aux_trace_hints: &AuxTraceHints, - alphas: &[E], -) -> Vec { - // compute row values and their inverses for all rows that were added to the block hash table - let table_rows = aux_trace_hints.block_hash_table_rows(); - let (row_values, inv_row_values) = - build_lookup_table_row_values(table_rows, main_trace, alphas); - - // initialize memory for the running product column, and set the first value in the column to - // the value of the first row (which represents an entry for the root block of the program) - let mut result = unsafe { uninit_vector(main_trace.num_rows()) }; - result[0] = row_values[0]; - - // keep track of the index into the list of block hash table rows for started blocks; we can - // use this index because the sequence in which blocks are started is exactly the same as the - // sequence in which the rows are added to the block hash table. we start at 1 because the - // first row is already included in the running product above. - let mut started_block_idx = 1; - - // keep track of the last updated row in the running product column - let mut result_idx = 0_usize; - - // iterate through the list of updates and apply them one by one - for (clk, update) in aux_trace_hints.block_exec_hints() { - let clk = *clk as usize; - - // if we skipped some cycles since the last update was processed, values in the last - // updated row should by copied over until the current cycle. - if result_idx < clk { - let last_value = result[result_idx]; - result[(result_idx + 1)..=clk].fill(last_value); - } - - // move the result pointer to the next row - result_idx = clk + 1; - - // apply relevant updates - match update { - BlockTableUpdate::BlockStarted(num_children) => { - // if a new block was started, entries for the block's children are added to the - // table; in case this was a JOIN block with two children, the first child should - // have is_first_child set to true. - match *num_children { - 0 => result[result_idx] = result[clk], - 1 => { - debug_assert!(!table_rows[started_block_idx].is_first_child()); - result[result_idx] = result[clk] * row_values[started_block_idx]; - } - 2 => { - debug_assert!(table_rows[started_block_idx].is_first_child()); - debug_assert!(!table_rows[started_block_idx + 1].is_first_child()); - result[result_idx] = result[clk] - * row_values[started_block_idx] - * row_values[started_block_idx + 1]; - } - _ => panic!("invalid number of children for a block"), - } - - // move pointer into the table row list by the number of children - started_block_idx += *num_children as usize; - } - BlockTableUpdate::LoopRepeated => { - // When a REPEAT operation is executed, we need to add an entry for the loop's - // body to the table. Entries for blocks in the block hash table can be identified - // by their parent ID (which is the ID of the executing LOOP block). Parent ID is - // always the address value in the next row of the execution trace after a REPEAT - // operation is executed. Therefore, we can get the parent ID from the execution - // trace at the next row: clk + 1 (which is the same as result_idx), and use it to - // find this entry. - let parent_id = get_block_addr(main_trace, result_idx as u32); - let row_idx = aux_trace_hints - .get_block_hash_row_idx(parent_id, false) - .expect("block hash row not found"); - result[result_idx] = result[clk] * row_values[row_idx]; - } - BlockTableUpdate::BlockEnded(is_first_child) => { - // when END operation is executed, we need to remove an entry for the block from - // the block hash table. we can find the entry by its parent_id, which we can get - // from the trace in the same way as described above. we also need to know whether - // this block is the first or the second child of its parent, because for JOIN - // block, the same parent ID would map to two children. - let parent_id = get_block_addr(main_trace, result_idx as u32); - let row_idx = aux_trace_hints - .get_block_hash_row_idx(parent_id, *is_first_child) - .expect("block hash row not found"); - result[result_idx] = result[clk] * inv_row_values[row_idx]; - } - // RESPAN operation has no effect on the block hash table - BlockTableUpdate::SpanExtended => result[result_idx] = result[clk], - } - } - - // at this point, block hash table must be empty - so, the last value must be ONE; - // we also fill in all the remaining values in the column with ONE's. - let last_value = result[result_idx]; - assert_eq!(last_value, E::ONE); - if result_idx < result.len() - 1 { - result[(result_idx + 1)..].fill(E::ONE); - } - - result -} - -// OP GROUP TABLE COLUMN -// ================================================================================================ - -/// Builds the execution trace of the decoder's `p3` column which describes the state of the op -/// group table via multiset checks. -fn build_aux_col_p3>( - main_trace: &ColMatrix, - trace_len: usize, - aux_trace_hints: &AuxTraceHints, - alphas: &[E], -) -> Vec { - // allocate memory for the column and set the starting value to ONE - let mut result = unsafe { uninit_vector(trace_len) }; - result[0] = E::ONE; - - // compute row values and their inverses for all rows which were added to the op group table - let (row_values, inv_row_values) = - build_lookup_table_row_values(aux_trace_hints.op_group_table_rows(), main_trace, alphas); - - // keep track of indexes into the list of op group table rows separately for inserted and - // removed rows - let mut inserted_group_idx = 0_usize; - let mut removed_group_idx = 0_usize; - - // keep track of the last updated row in the running product column - let mut result_idx = 0_usize; - - for (clk, update) in aux_trace_hints.op_group_table_hints() { - let clk = *clk as usize; - - // if we skipped some cycles since the last update was processed, values in the last - // updated row should by copied over until the current cycle. - if result_idx < clk { - let last_value = result[result_idx]; - result[(result_idx + 1)..=clk].fill(last_value); - } - - // apply the relevant updates to the column - result_idx = clk + 1; - match update { - OpGroupTableUpdate::InsertRows(num_op_groups) => { - // if the rows were added, multiply the current value in the column by the values - // of all added rows - let mut value = row_values[inserted_group_idx]; - for i in 1..(*num_op_groups as usize) { - value *= row_values[inserted_group_idx + i]; - } - result[result_idx] = result[clk] * value; - - // advance the inserted group pointer by the number of inserted rows - inserted_group_idx += *num_op_groups as usize; - } - OpGroupTableUpdate::RemoveRow => { - // if a row was removed, divide the current value in the column by the value - // of the row - result[result_idx] = result[clk] * inv_row_values[removed_group_idx]; - - // advance the removed group pointer by one - removed_group_idx += 1; - } - } - } - - // at this point, op group table must be empty - so, the last value must be ONE; - // we also fill in all the remaining values in the column with ONE's. - let last_value = result[result_idx]; - assert_eq!(last_value, E::ONE); - if result_idx < result.len() - 1 { - result[(result_idx + 1)..].fill(E::ONE); - } - - result -} - -// HELPER FUNCTIONS -// ================================================================================================ - -/// Returns the value in the block address column at the specified row. -fn get_block_addr(main_trace: &ColMatrix, row_idx: u32) -> Felt { - main_trace.get(ADDR_COL_IDX, row_idx as usize) -} diff --git a/processor/src/trace/mod.rs b/processor/src/trace/mod.rs index 078452381e..7dc889f433 100644 --- a/processor/src/trace/mod.rs +++ b/processor/src/trace/mod.rs @@ -1,10 +1,11 @@ use super::{ chiplets::AuxTraceBuilder as ChipletsAuxTraceBuilder, crypto::RpoRandomCoin, - decoder::AuxTraceHints as DecoderAuxTraceHints, + decoder::AuxTraceBuilder as DecoderAuxTraceBuilder, range::AuxTraceBuilder as RangeCheckerAuxTraceBuilder, stack::AuxTraceBuilder as StackAuxTraceBuilder, ColMatrix, Digest, Felt, FieldElement, Host, Process, StackTopState, Vec, }; +use miden_air::trace::main_trace::MainTrace; use miden_air::trace::{ decoder::{NUM_USER_OP_HELPERS, USER_OP_HELPERS_OFFSET}, AUX_TRACE_RAND_ELEMENTS, AUX_TRACE_WIDTH, DECODER_TRACE_OFFSET, MIN_TRACE_LEN, @@ -17,12 +18,7 @@ use winter_prover::{crypto::RandomCoin, EvaluationFrame, Trace, TraceLayout}; use vm_core::StarkField; mod utils; -pub use utils::{ - build_lookup_table_row_values, AuxColumnBuilder, ChipletsLengths, LookupTableRow, - TraceFragment, TraceLenSummary, -}; - -mod decoder; +pub use utils::{AuxColumnBuilder, ChipletsLengths, TraceFragment, TraceLenSummary}; #[cfg(test)] mod tests; @@ -38,8 +34,8 @@ pub const NUM_RAND_ROWS: usize = 1; // VM EXECUTION TRACE // ================================================================================================ -pub struct AuxTraceHints { - pub(crate) decoder: DecoderAuxTraceHints, +pub struct AuxTraceBuilders { + pub(crate) decoder: DecoderAuxTraceBuilder, pub(crate) stack: StackAuxTraceBuilder, pub(crate) range: RangeCheckerAuxTraceBuilder, pub(crate) chiplets: ChipletsAuxTraceBuilder, @@ -55,8 +51,8 @@ pub struct AuxTraceHints { pub struct ExecutionTrace { meta: Vec, layout: TraceLayout, - main_trace: ColMatrix, - aux_trace_hints: AuxTraceHints, + main_trace: MainTrace, + aux_trace_builders: AuxTraceBuilders, program_info: ProgramInfo, stack_outputs: StackOutputs, trace_len_summary: TraceLenSummary, @@ -91,8 +87,8 @@ impl ExecutionTrace { Self { meta: Vec::new(), layout: TraceLayout::new(TRACE_WIDTH, [AUX_TRACE_WIDTH], [AUX_TRACE_RAND_ELEMENTS]), - main_trace: ColMatrix::new(main_trace), - aux_trace_hints, + aux_trace_builders: aux_trace_hints, + main_trace, program_info, stack_outputs, trace_len_summary, @@ -178,7 +174,7 @@ impl ExecutionTrace { #[cfg(test)] pub fn test_finalize_trace( process: Process, - ) -> (Vec>, AuxTraceHints, TraceLenSummary) + ) -> (MainTrace, AuxTraceBuilders, TraceLenSummary) where H: Host, { @@ -222,23 +218,24 @@ impl Trace for ExecutionTrace { // TODO: build auxiliary columns in multiple threads // add decoder's running product columns - let decoder_aux_columns = decoder::build_aux_columns( - &self.main_trace, - &self.aux_trace_hints.decoder, - rand_elements, - ); + let decoder_aux_columns = self + .aux_trace_builders + .decoder + .build_aux_columns(&self.main_trace, rand_elements); // add stack's running product columns let stack_aux_columns = - self.aux_trace_hints.stack.build_aux_columns(&self.main_trace, rand_elements); + self.aux_trace_builders.stack.build_aux_columns(&self.main_trace, rand_elements); // add the range checker's running product columns let range_aux_columns = - self.aux_trace_hints.range.build_aux_columns(&self.main_trace, rand_elements); + self.aux_trace_builders.range.build_aux_columns(&self.main_trace, rand_elements); // add the running product columns for the chiplets - let chiplets = - self.aux_trace_hints.chiplets.build_aux_columns(&self.main_trace, rand_elements); + let chiplets = self + .aux_trace_builders + .chiplets + .build_aux_columns(&self.main_trace, rand_elements); // combine all auxiliary columns into a single vector let mut aux_columns = decoder_aux_columns @@ -280,7 +277,7 @@ impl Trace for ExecutionTrace { fn finalize_trace( process: Process, mut rng: RpoRandomCoin, -) -> (Vec>, AuxTraceHints, TraceLenSummary) +) -> (MainTrace, AuxTraceBuilders, TraceLenSummary) where H: Host, { @@ -338,12 +335,14 @@ where } } - let aux_trace_hints = AuxTraceHints { - decoder: decoder_trace.aux_trace_hints, + let aux_trace_hints = AuxTraceBuilders { + decoder: decoder_trace.aux_builder, stack: stack_trace.aux_builder, range: range_check_trace.aux_builder, chiplets: chiplets_trace.aux_builder, }; - (trace, aux_trace_hints, trace_len_summary) + let main_trace = MainTrace::new(ColMatrix::new(trace)); + + (main_trace, aux_trace_hints, trace_len_summary) } diff --git a/processor/src/trace/tests/chiplets/hasher.rs b/processor/src/trace/tests/chiplets/hasher.rs index fe7eb3ce40..c799d8e4ef 100644 --- a/processor/src/trace/tests/chiplets/hasher.rs +++ b/processor/src/trace/tests/chiplets/hasher.rs @@ -538,6 +538,219 @@ fn b_chip_mpverify() { } } +/// Tests the generation of the `b_chip` bus column when the hasher performs a Merkle root update +/// requested by the `MrUpdate` user operation. +#[test] +#[allow(clippy::needless_range_loop)] +fn b_chip_mrupdate() { + let index = 5usize; + let leaves = init_leaves(&[1, 2, 3, 4, 5, 6, 7, 8]); + let mut tree = MerkleTree::new(leaves.to_vec()).unwrap(); + + let old_root = tree.root(); + let old_leaf_value = leaves[index]; + + let new_leaf_value = leaves[0]; + + let stack_inputs = [ + new_leaf_value[0].as_int(), + new_leaf_value[1].as_int(), + new_leaf_value[2].as_int(), + new_leaf_value[3].as_int(), + old_root[0].as_int(), + old_root[1].as_int(), + old_root[2].as_int(), + old_root[3].as_int(), + index as u64, + tree.depth() as u64, + old_leaf_value[0].as_int(), + old_leaf_value[1].as_int(), + old_leaf_value[2].as_int(), + old_leaf_value[3].as_int(), + ]; + let stack_inputs = StackInputs::try_from_values(stack_inputs).unwrap(); + let store = MerkleStore::from(&tree); + let advice_inputs = AdviceInputs::default().with_merkle_store(store); + + let mut trace = + build_trace_from_ops_with_inputs(vec![Operation::MrUpdate], stack_inputs, advice_inputs); + let alphas = rand_array::(); + let aux_columns = trace.build_aux_segment(&[], &alphas).unwrap(); + let b_chip = aux_columns.get_column(CHIPLETS_AUX_TRACE_OFFSET); + + assert_eq!(trace.length(), b_chip.len()); + assert_eq!(ONE, b_chip[0]); + + // at cycle 0 the following are added for inclusion in the next row: + // - the initialization of the span hash is requested by the decoder + // - the initialization of the span hash is provided by the hasher + + // initialize the request state. + let mut span_state = [ZERO; STATE_WIDTH]; + fill_state_from_decoder_with_domain(&trace, &mut span_state, 0); + // request the initialization of the span hash + let span_init = + build_expected(&alphas, LINEAR_HASH_LABEL, span_state, [ZERO; STATE_WIDTH], ONE, ZERO); + let mut expected = span_init.inv(); + // provide the initialization of the span hash + expected *= build_expected_from_trace(&trace, &alphas, 0); + assert_eq!(expected, b_chip[1]); + + // at cycle 1 a merkle path verification is executed and the initialization and result of the + // hash are both requested by the stack. + let path = tree + .get_path(NodeIndex::new(tree.depth(), index as u64).unwrap()) + .expect("failed to get Merkle tree path"); + let mp_state = init_state_from_words( + &[path[0][0], path[0][1], path[0][2], path[0][3]], + &[leaves[index][0], leaves[index][1], leaves[index][2], leaves[index][3]], + ); + let mp_init_old = build_expected( + &alphas, + MR_UPDATE_OLD_LABEL, + mp_state, + [ZERO; STATE_WIDTH], + Felt::new(9), + Felt::new(index as u64), + ); + // request the initialization of the (first) Merkle path verification + expected *= mp_init_old.inv(); + + let mp_old_verify_complete = HASH_CYCLE_LEN + (tree.depth() as usize) * HASH_CYCLE_LEN; + let mp_result_old = build_expected( + &alphas, + RETURN_HASH_LABEL, + // for the return hash, only the state digest matters, and it should match the root + [ + ZERO, + ZERO, + ZERO, + ZERO, + tree.root()[0], + tree.root()[1], + tree.root()[2], + tree.root()[3], + ZERO, + ZERO, + ZERO, + ZERO, + ], + [ZERO; STATE_WIDTH], + Felt::new(mp_old_verify_complete as u64), + Felt::new(index as u64 >> tree.depth()), + ); + + // request the result of the first Merkle path verification + expected *= mp_result_old.inv(); + + let new_leaf_value = leaves[0]; + tree.update_leaf(index as u64, new_leaf_value).unwrap(); + let new_root = tree.root(); + + // a second merkle path verification is executed and the initialization and result of the + // hash are both requested by the stack. + let path = tree + .get_path(NodeIndex::new(tree.depth(), index as u64).unwrap()) + .expect("failed to get Merkle tree path"); + let mp_state = init_state_from_words( + &[path[0][0], path[0][1], path[0][2], path[0][3]], + &[new_leaf_value[0], new_leaf_value[1], new_leaf_value[2], new_leaf_value[3]], + ); + + let mp_new_verify_complete = mp_old_verify_complete + (tree.depth() as usize) * HASH_CYCLE_LEN; + let mp_init_new = build_expected( + &alphas, + MR_UPDATE_NEW_LABEL, + mp_state, + [ZERO; STATE_WIDTH], + Felt::from(mp_old_verify_complete as u64 + 1), + Felt::new(index as u64), + ); + + // request the initialization of the second Merkle path verification + expected *= mp_init_new.inv(); + + let mp_result_new = build_expected( + &alphas, + RETURN_HASH_LABEL, + // for the return hash, only the state digest matters, and it should match the root + [ + ZERO, + ZERO, + ZERO, + ZERO, + new_root[0], + new_root[1], + new_root[2], + new_root[3], + ZERO, + ZERO, + ZERO, + ZERO, + ], + [ZERO; STATE_WIDTH], + Felt::new(mp_new_verify_complete as u64), + Felt::new(index as u64 >> tree.depth()), + ); + + // request the result of the second Merkle path verification + expected *= mp_result_new.inv(); + assert_eq!(expected, b_chip[2]); + + // at cycle 2 the result of the span hash is requested by the decoder + apply_permutation(&mut span_state); + let span_result = build_expected( + &alphas, + RETURN_HASH_LABEL, + span_state, + [ZERO; STATE_WIDTH], + Felt::new(8), + ZERO, + ); + expected *= span_result.inv(); + assert_eq!(expected, b_chip[3]); + + // Nothing changes when there is no communication with the hash chiplet. + for row in 3..8 { + assert_eq!(expected, b_chip[row]); + } + + // at cycle 7 the result of the span hash is provided by the hasher + expected *= build_expected_from_trace(&trace, &alphas, 7); + assert_eq!(expected, b_chip[8]); + + // at cycle 8 the initialization of the first merkle path is provided by the hasher + expected *= build_expected_from_trace(&trace, &alphas, 8); + assert_eq!(expected, b_chip[9]); + + // Nothing changes when there is no communication with the hash chiplet. + for row in 10..(mp_old_verify_complete) { + assert_eq!(expected, b_chip[row]); + } + + // when the first merkle path verification has been completed the hasher provides the result + expected *= build_expected_from_trace(&trace, &alphas, mp_old_verify_complete - 1); + assert_eq!(expected, b_chip[mp_old_verify_complete]); + + // at cycle 32 the initialization of the second merkle path is provided by the hasher + expected *= build_expected_from_trace(&trace, &alphas, mp_old_verify_complete); + assert_eq!(expected, b_chip[mp_old_verify_complete + 1]); + + // Nothing changes when there is no communication with the hash chiplet. + for row in (mp_old_verify_complete + 1)..(mp_new_verify_complete) { + assert_eq!(expected, b_chip[row]); + } + + // when the merkle path verification has been completed the hasher provides the result + expected *= build_expected_from_trace(&trace, &alphas, mp_new_verify_complete - 1); + assert_eq!(expected, b_chip[mp_new_verify_complete]); + + // The value in b_chip should be ONE now and for the rest of the trace. + for row in (mp_new_verify_complete)..trace.length() - NUM_RAND_ROWS { + assert_eq!(ONE, b_chip[row]); + } +} + // TEST HELPERS // ================================================================================================ @@ -572,7 +785,7 @@ fn build_expected( || label == MR_UPDATE_NEW_LABEL || label == MR_UPDATE_OLD_LABEL ); - let bit = (index.as_int() >> 1) & 1; + let bit = (index.as_int() >> 0) & 1; let left_word = build_value(&alphas[8..12], &state[DIGEST_RANGE]); let right_word = build_value(&alphas[8..12], &state[DIGEST_RANGE.end..]); diff --git a/processor/src/trace/decoder/tests.rs b/processor/src/trace/tests/decoder.rs similarity index 80% rename from processor/src/trace/decoder/tests.rs rename to processor/src/trace/tests/decoder.rs index 71c806a1ef..da0da42ba4 100644 --- a/processor/src/trace/decoder/tests.rs +++ b/processor/src/trace/tests/decoder.rs @@ -2,17 +2,17 @@ use super::{ super::{ tests::{build_trace_from_block, build_trace_from_ops}, utils::build_span_with_respan_ops, - LookupTableRow, Trace, NUM_RAND_ROWS, + Trace, NUM_RAND_ROWS, }, Felt, }; -use crate::decoder::{build_op_group, BlockHashTableRow, BlockStackTableRow, OpGroupTableRow}; +use crate::{decoder::build_op_group, ContextId}; use miden_air::trace::{ decoder::{P1_COL_IDX, P2_COL_IDX, P3_COL_IDX}, AUX_TRACE_RAND_ELEMENTS, }; use test_utils::rand::rand_array; -use vm_core::{code_blocks::CodeBlock, FieldElement, Operation, ONE, ZERO}; +use vm_core::{code_blocks::CodeBlock, FieldElement, Operation, Word, ONE, ZERO}; // BLOCK STACK TABLE TESTS // ================================================================================================ @@ -27,9 +27,8 @@ fn decoder_p1_span_with_respan() { let p1 = aux_columns.get_column(P1_COL_IDX); let row_values = [ - BlockStackTableRow::new_test(ONE, ZERO, false).to_value(&trace.main_trace, &alphas), - BlockStackTableRow::new_test(Felt::new(9), ZERO, false) - .to_value(&trace.main_trace, &alphas), + BlockStackTableRow::new(ONE, ZERO, false).to_value(&alphas), + BlockStackTableRow::new(Felt::new(9), ZERO, false).to_value(&alphas), ]; // make sure the first entry is ONE @@ -76,9 +75,9 @@ fn decoder_p1_join() { let a_9 = Felt::new(9); let a_17 = Felt::new(17); let row_values = [ - BlockStackTableRow::new_test(ONE, ZERO, false).to_value(&trace.main_trace, &alphas), - BlockStackTableRow::new_test(a_9, ONE, false).to_value(&trace.main_trace, &alphas), - BlockStackTableRow::new_test(a_17, ONE, false).to_value(&trace.main_trace, &alphas), + BlockStackTableRow::new(ONE, ZERO, false).to_value(&alphas), + BlockStackTableRow::new(a_9, ONE, false).to_value(&alphas), + BlockStackTableRow::new(a_17, ONE, false).to_value(&alphas), ]; // make sure the first entry is ONE @@ -135,8 +134,8 @@ fn decoder_p1_split() { let a_9 = Felt::new(9); let row_values = [ - BlockStackTableRow::new_test(ONE, ZERO, false).to_value(&trace.main_trace, &alphas), - BlockStackTableRow::new_test(a_9, ONE, false).to_value(&trace.main_trace, &alphas), + BlockStackTableRow::new(ONE, ZERO, false).to_value(&alphas), + BlockStackTableRow::new(a_9, ONE, false).to_value(&alphas), ]; // make sure the first entry is ONE @@ -188,13 +187,13 @@ fn decoder_p1_loop_with_repeat() { let a_41 = Felt::new(41); // address of the first SPAN block in the second iteration let a_49 = Felt::new(49); // address of the second SPAN block in the second iteration let row_values = [ - BlockStackTableRow::new_test(ONE, ZERO, true).to_value(&trace.main_trace, &alphas), - BlockStackTableRow::new_test(a_9, ONE, false).to_value(&trace.main_trace, &alphas), - BlockStackTableRow::new_test(a_17, a_9, false).to_value(&trace.main_trace, &alphas), - BlockStackTableRow::new_test(a_25, a_9, false).to_value(&trace.main_trace, &alphas), - BlockStackTableRow::new_test(a_33, ONE, false).to_value(&trace.main_trace, &alphas), - BlockStackTableRow::new_test(a_41, a_33, false).to_value(&trace.main_trace, &alphas), - BlockStackTableRow::new_test(a_49, a_33, false).to_value(&trace.main_trace, &alphas), + BlockStackTableRow::new(ONE, ZERO, true).to_value(&alphas), + BlockStackTableRow::new(a_9, ONE, false).to_value(&alphas), + BlockStackTableRow::new(a_17, a_9, false).to_value(&alphas), + BlockStackTableRow::new(a_25, a_9, false).to_value(&alphas), + BlockStackTableRow::new(a_33, ONE, false).to_value(&alphas), + BlockStackTableRow::new(a_41, a_33, false).to_value(&alphas), + BlockStackTableRow::new(a_49, a_33, false).to_value(&alphas), ]; // make sure the first entry is ONE @@ -295,8 +294,8 @@ fn decoder_p2_span_with_respan() { let aux_columns = trace.build_aux_segment(&[], &alphas).unwrap(); let p2 = aux_columns.get_column(P2_COL_IDX); - let row_values = [BlockHashTableRow::new_test(ZERO, span.hash().into(), false, false) - .to_value(&trace.main_trace, &alphas)]; + let row_values = + [BlockHashTableRow::new_test(ZERO, span.hash().into(), false, false).to_value(&alphas)]; // make sure the first entry is initialized to program hash let mut expected_value = row_values[0]; @@ -328,12 +327,9 @@ fn decoder_p2_join() { let p2 = aux_columns.get_column(P2_COL_IDX); let row_values = [ - BlockHashTableRow::new_test(ZERO, program.hash().into(), false, false) - .to_value(&trace.main_trace, &alphas), - BlockHashTableRow::new_test(ONE, span1.hash().into(), true, false) - .to_value(&trace.main_trace, &alphas), - BlockHashTableRow::new_test(ONE, span2.hash().into(), false, false) - .to_value(&trace.main_trace, &alphas), + BlockHashTableRow::new_test(ZERO, program.hash().into(), false, false).to_value(&alphas), + BlockHashTableRow::new_test(ONE, span1.hash().into(), true, false).to_value(&alphas), + BlockHashTableRow::new_test(ONE, span2.hash().into(), false, false).to_value(&alphas), ]; // make sure the first entry is initialized to program hash @@ -384,10 +380,8 @@ fn decoder_p2_split_true() { let p2 = aux_columns.get_column(P2_COL_IDX); let row_values = [ - BlockHashTableRow::new_test(ZERO, program.hash().into(), false, false) - .to_value(&trace.main_trace, &alphas), - BlockHashTableRow::new_test(ONE, span1.hash().into(), false, false) - .to_value(&trace.main_trace, &alphas), + BlockHashTableRow::new_test(ZERO, program.hash().into(), false, false).to_value(&alphas), + BlockHashTableRow::new_test(ONE, span1.hash().into(), false, false).to_value(&alphas), ]; // make sure the first entry is initialized to program hash @@ -430,10 +424,8 @@ fn decoder_p2_split_false() { let p2 = aux_columns.get_column(P2_COL_IDX); let row_values = [ - BlockHashTableRow::new_test(ZERO, program.hash().into(), false, false) - .to_value(&trace.main_trace, &alphas), - BlockHashTableRow::new_test(ONE, span2.hash().into(), false, false) - .to_value(&trace.main_trace, &alphas), + BlockHashTableRow::new_test(ZERO, program.hash().into(), false, false).to_value(&alphas), + BlockHashTableRow::new_test(ONE, span2.hash().into(), false, false).to_value(&alphas), ]; // make sure the first entry is initialized to program hash @@ -479,18 +471,12 @@ fn decoder_p2_loop_with_repeat() { let a_9 = Felt::new(9); // address of the JOIN block in the first iteration let a_33 = Felt::new(33); // address of the JOIN block in the second iteration let row_values = [ - BlockHashTableRow::new_test(ZERO, program.hash().into(), false, false) - .to_value(&trace.main_trace, &alphas), - BlockHashTableRow::new_test(ONE, body.hash().into(), false, true) - .to_value(&trace.main_trace, &alphas), - BlockHashTableRow::new_test(a_9, span1.hash().into(), true, false) - .to_value(&trace.main_trace, &alphas), - BlockHashTableRow::new_test(a_9, span2.hash().into(), false, false) - .to_value(&trace.main_trace, &alphas), - BlockHashTableRow::new_test(a_33, span1.hash().into(), true, false) - .to_value(&trace.main_trace, &alphas), - BlockHashTableRow::new_test(a_33, span2.hash().into(), false, false) - .to_value(&trace.main_trace, &alphas), + BlockHashTableRow::new_test(ZERO, program.hash().into(), false, false).to_value(&alphas), + BlockHashTableRow::new_test(ONE, body.hash().into(), false, true).to_value(&alphas), + BlockHashTableRow::new_test(a_9, span1.hash().into(), true, false).to_value(&alphas), + BlockHashTableRow::new_test(a_9, span2.hash().into(), false, false).to_value(&alphas), + BlockHashTableRow::new_test(a_33, span1.hash().into(), true, false).to_value(&alphas), + BlockHashTableRow::new_test(a_33, span2.hash().into(), false, false).to_value(&alphas), ]; // make sure the first entry is initialized to program hash @@ -616,12 +602,9 @@ fn decoder_p3_trace_one_batch() { // make sure 3 groups were inserted at clock cycle 1; these entries are for the two immediate // values and the second operation group consisting of [SWAP, MUL, ADD] - let g1_value = - OpGroupTableRow::new(ONE, Felt::new(3), ONE).to_value(&trace.main_trace, &alphas); - let g2_value = - OpGroupTableRow::new(ONE, Felt::new(2), Felt::new(2)).to_value(&trace.main_trace, &alphas); - let g3_value = OpGroupTableRow::new(ONE, ONE, build_op_group(&ops[9..])) - .to_value(&trace.main_trace, &alphas); + let g1_value = OpGroupTableRow::new(ONE, Felt::new(3), ONE).to_value(&alphas); + let g2_value = OpGroupTableRow::new(ONE, Felt::new(2), Felt::new(2)).to_value(&alphas); + let g3_value = OpGroupTableRow::new(ONE, ONE, build_op_group(&ops[9..])).to_value(&alphas); let expected_value = g1_value * g2_value * g3_value; assert_eq!(expected_value, p3[1]); @@ -672,13 +655,13 @@ fn decoder_p3_trace_two_batches() { // --- first batch ---------------------------------------------------------------------------- // make sure entries for 7 groups were inserted at clock cycle 1 let b0_values = [ - OpGroupTableRow::new(ONE, Felt::new(11), iv[0]).to_value(&trace.main_trace, &alphas), - OpGroupTableRow::new(ONE, Felt::new(10), iv[1]).to_value(&trace.main_trace, &alphas), - OpGroupTableRow::new(ONE, Felt::new(9), iv[2]).to_value(&trace.main_trace, &alphas), - OpGroupTableRow::new(ONE, Felt::new(8), iv[3]).to_value(&trace.main_trace, &alphas), - OpGroupTableRow::new(ONE, Felt::new(7), iv[4]).to_value(&trace.main_trace, &alphas), - OpGroupTableRow::new(ONE, Felt::new(6), iv[5]).to_value(&trace.main_trace, &alphas), - OpGroupTableRow::new(ONE, Felt::new(5), iv[6]).to_value(&trace.main_trace, &alphas), + OpGroupTableRow::new(ONE, Felt::new(11), iv[0]).to_value(&alphas), + OpGroupTableRow::new(ONE, Felt::new(10), iv[1]).to_value(&alphas), + OpGroupTableRow::new(ONE, Felt::new(9), iv[2]).to_value(&alphas), + OpGroupTableRow::new(ONE, Felt::new(8), iv[3]).to_value(&alphas), + OpGroupTableRow::new(ONE, Felt::new(7), iv[4]).to_value(&alphas), + OpGroupTableRow::new(ONE, Felt::new(6), iv[5]).to_value(&alphas), + OpGroupTableRow::new(ONE, Felt::new(5), iv[6]).to_value(&alphas), ]; let mut expected_value: Felt = b0_values.iter().fold(ONE, |acc, &val| acc * val); assert_eq!(expected_value, p3[1]); @@ -701,9 +684,9 @@ fn decoder_p3_trace_two_batches() { let batch1_addr = ONE + Felt::new(8); let op_group3 = build_op_group(&[Operation::Drop; 2]); let b1_values = [ - OpGroupTableRow::new(batch1_addr, Felt::new(3), iv[7]).to_value(&trace.main_trace, &alphas), - OpGroupTableRow::new(batch1_addr, Felt::new(2), iv[8]).to_value(&trace.main_trace, &alphas), - OpGroupTableRow::new(batch1_addr, ONE, op_group3).to_value(&trace.main_trace, &alphas), + OpGroupTableRow::new(batch1_addr, Felt::new(3), iv[7]).to_value(&alphas), + OpGroupTableRow::new(batch1_addr, Felt::new(2), iv[8]).to_value(&alphas), + OpGroupTableRow::new(batch1_addr, ONE, op_group3).to_value(&alphas), ]; let mut expected_value: Felt = b1_values.iter().fold(ONE, |acc, &val| acc * val); assert_eq!(expected_value, p3[10]); @@ -730,3 +713,133 @@ fn decoder_p3_trace_two_batches() { assert_eq!(ONE, p3[i]); } } + +// HELPER STRUCTS AND METHODS +// ================================================================================================ + +/// Describes a single entry in the block stack table. +#[derive(Debug, Copy, Clone, PartialEq, Eq)] +pub struct BlockStackTableRow { + block_id: Felt, + parent_id: Felt, + is_loop: bool, + parent_ctx: ContextId, + parent_fn_hash: Word, + parent_fmp: Felt, + parent_stack_depth: u32, + parent_next_overflow_addr: Felt, +} + +impl BlockStackTableRow { + /// Returns a new [BlockStackTableRow] instantiated with the specified parameters. This is + /// used for test purpose only. + #[cfg(test)] + pub fn new(block_id: Felt, parent_id: Felt, is_loop: bool) -> Self { + Self { + block_id, + parent_id, + is_loop, + parent_ctx: ContextId::root(), + parent_fn_hash: vm_core::EMPTY_WORD, + parent_fmp: ZERO, + parent_stack_depth: 0, + parent_next_overflow_addr: ZERO, + } + } +} + +impl BlockStackTableRow { + /// Reduces this row to a single field element in the field specified by E. This requires + /// at least 12 alpha values. + pub fn to_value>(&self, alphas: &[E]) -> E { + let is_loop = if self.is_loop { ONE } else { ZERO }; + alphas[0] + + alphas[1].mul_base(self.block_id) + + alphas[2].mul_base(self.parent_id) + + alphas[3].mul_base(is_loop) + + alphas[4].mul_base(Felt::from(self.parent_ctx)) + + alphas[5].mul_base(self.parent_fmp) + + alphas[6].mul_base(Felt::from(self.parent_stack_depth)) + + alphas[7].mul_base(self.parent_next_overflow_addr) + + alphas[8].mul_base(self.parent_fn_hash[0]) + + alphas[9].mul_base(self.parent_fn_hash[1]) + + alphas[10].mul_base(self.parent_fn_hash[2]) + + alphas[11].mul_base(self.parent_fn_hash[3]) + } +} + +/// Describes a single entry in the block hash table. An entry in the block hash table is a tuple +/// (parent_id, block_hash, is_first_child, is_loop_body). +#[derive(Debug, Copy, Clone, PartialEq, Eq)] +pub struct BlockHashTableRow { + parent_id: Felt, + block_hash: Word, + is_first_child: bool, + is_loop_body: bool, +} + +impl BlockHashTableRow { + /// Returns a new [BlockHashTableRow] instantiated with the specified parameters. This is + /// used for test purpose only. + pub fn new_test( + parent_id: Felt, + block_hash: Word, + is_first_child: bool, + is_loop_body: bool, + ) -> Self { + Self { + parent_id, + block_hash, + is_first_child, + is_loop_body, + } + } +} + +impl BlockHashTableRow { + /// Reduces this row to a single field element in the field specified by E. This requires + /// at least 8 alpha values. + pub fn to_value>(&self, alphas: &[E]) -> E { + let is_first_child = if self.is_first_child { ONE } else { ZERO }; + let is_loop_body = if self.is_loop_body { ONE } else { ZERO }; + alphas[0] + + alphas[1].mul_base(self.parent_id) + + alphas[2].mul_base(self.block_hash[0]) + + alphas[3].mul_base(self.block_hash[1]) + + alphas[4].mul_base(self.block_hash[2]) + + alphas[5].mul_base(self.block_hash[3]) + + alphas[6].mul_base(is_first_child) + + alphas[7].mul_base(is_loop_body) + } +} + +/// Describes a single entry in the op group table. An entry in the op group table is a tuple +/// (batch_id, group_pos, group_value). +#[derive(Debug, Copy, Clone, PartialEq, Eq)] +pub struct OpGroupTableRow { + batch_id: Felt, + group_pos: Felt, + group_value: Felt, +} + +impl OpGroupTableRow { + /// Returns a new [OpGroupTableRow] instantiated with the specified parameters. + pub fn new(batch_id: Felt, group_pos: Felt, group_value: Felt) -> Self { + Self { + batch_id, + group_pos, + group_value, + } + } +} + +impl OpGroupTableRow { + /// Reduces this row to a single field element in the field specified by E. This requires + /// at least 4 alpha values. + pub fn to_value>(&self, alphas: &[E]) -> E { + alphas[0] + + alphas[1].mul_base(self.batch_id) + + alphas[2].mul_base(self.group_pos) + + alphas[3].mul_base(self.group_value) + } +} diff --git a/processor/src/trace/tests/hasher.rs b/processor/src/trace/tests/hasher.rs index bcb54b7451..8753e67a50 100644 --- a/processor/src/trace/tests/hasher.rs +++ b/processor/src/trace/tests/hasher.rs @@ -1,9 +1,11 @@ use super::{ super::{Trace, NUM_RAND_ROWS}, - build_trace_from_ops_with_inputs, rand_array, AdviceInputs, Felt, LookupTableRow, Operation, - Vec, Word, ONE, ZERO, + build_trace_from_ops_with_inputs, rand_array, AdviceInputs, Felt, Operation, Vec, Word, ONE, + ZERO, }; -use crate::{chiplets::ChipletsVTableRow, StackInputs}; + +use crate::StackInputs; +use miden_air::trace::main_trace::MainTrace; use miden_air::trace::{chiplets::hasher::P1_COL_IDX, AUX_TRACE_RAND_ELEMENTS}; use vm_core::{ crypto::merkle::{MerkleStore, MerkleTree, NodeIndex}, @@ -72,11 +74,10 @@ fn hasher_p1_mr_update() { let p1 = aux_columns.get_column(P1_COL_IDX); let row_values = [ - ChipletsVTableRow::new_sibling(Felt::new(index), path[0].into()) - .to_value(&trace.main_trace, &alphas), - ChipletsVTableRow::new_sibling(Felt::new(index >> 1), path[1].into()) + SiblingTableRow::new(Felt::new(index), path[0].into()).to_value(&trace.main_trace, &alphas), + SiblingTableRow::new(Felt::new(index >> 1), path[1].into()) .to_value(&trace.main_trace, &alphas), - ChipletsVTableRow::new_sibling(Felt::new(index >> 2), path[2].into()) + SiblingTableRow::new(Felt::new(index >> 2), path[2].into()) .to_value(&trace.main_trace, &alphas), ]; @@ -148,7 +149,7 @@ fn hasher_p1_mr_update() { } } -// HELPER FUNCTIONS +// HELPER STRUCTS, METHODS AND FUNCTIONS // ================================================================================================ fn build_merkle_tree() -> (MerkleTree, Vec) { @@ -168,3 +169,48 @@ fn init_leaf(value: u64) -> Word { fn append_word(target: &mut Vec, word: Word) { word.iter().rev().for_each(|v| target.push(v.as_int())); } + +/// Describes a single entry in the sibling table which consists of a tuple `(index, node)` where +/// index is the index of the node at its depth. For example, assume a leaf has index n. For the +/// leaf's parent the index will be n << 1. For the parent of the parent, the index will be +/// n << 2 etc. +#[derive(Debug, Copy, Clone, PartialEq, Eq)] +pub struct SiblingTableRow { + index: Felt, + sibling: Word, +} + +impl SiblingTableRow { + pub fn new(index: Felt, sibling: Word) -> Self { + Self { index, sibling } + } + + /// Reduces this row to a single field element in the field specified by E. This requires + /// at least 6 alpha values. + pub fn to_value>( + &self, + _main_trace: &MainTrace, + alphas: &[E], + ) -> E { + // when the least significant bit of the index is 0, the sibling will be in the 3rd word + // of the hasher state, and when the least significant bit is 1, it will be in the 2nd + // word. we compute the value in this way to make constraint evaluation a bit easier since + // we need to compute the 2nd and the 3rd word values for other purposes as well. + let lsb = self.index.as_int() & 1; + if lsb == 0 { + alphas[0] + + alphas[3].mul_base(self.index) + + alphas[12].mul_base(self.sibling[0]) + + alphas[13].mul_base(self.sibling[1]) + + alphas[14].mul_base(self.sibling[2]) + + alphas[15].mul_base(self.sibling[3]) + } else { + alphas[0] + + alphas[3].mul_base(self.index) + + alphas[8].mul_base(self.sibling[0]) + + alphas[9].mul_base(self.sibling[1]) + + alphas[10].mul_base(self.sibling[2]) + + alphas[11].mul_base(self.sibling[3]) + } + } +} diff --git a/processor/src/trace/tests/mod.rs b/processor/src/trace/tests/mod.rs index 8aeac004a9..35f174ec3b 100644 --- a/processor/src/trace/tests/mod.rs +++ b/processor/src/trace/tests/mod.rs @@ -1,6 +1,6 @@ use super::{ - super::chiplets::init_state_from_words, ExecutionTrace, Felt, FieldElement, LookupTableRow, - Process, Trace, Vec, NUM_RAND_ROWS, + super::chiplets::init_state_from_words, ExecutionTrace, Felt, FieldElement, Process, Trace, + Vec, NUM_RAND_ROWS, }; use crate::{AdviceInputs, DefaultHost, ExecutionOptions, MemAdviceProvider, StackInputs}; use test_utils::rand::rand_array; @@ -9,7 +9,9 @@ use vm_core::{ }; mod chiplets; +mod decoder; mod hasher; +mod multiset; mod range; mod stack; diff --git a/processor/src/trace/tests/multiset.rs b/processor/src/trace/tests/multiset.rs new file mode 100644 index 0000000000..5b2292f1b1 --- /dev/null +++ b/processor/src/trace/tests/multiset.rs @@ -0,0 +1,81 @@ +#[cfg(test)] +mod tests { + use crate::trace::AuxColumnBuilder; + use crate::{Felt, FieldElement}; + use miden_air::trace::main_trace::MainTrace; + use vm_core::polynom::mul; + use winter_prover::matrix::ColMatrix; + + struct MultisetTester> { + multiplicands: Vec, + divisors: Vec, + alphas: Vec, + } + + impl> MultisetTester { + fn new(multiplicands: Vec, divisors: Vec) -> Self { + Self { + multiplicands, + divisors, + alphas: vec![Felt::new(83747374).into()], + } + } + } + + impl> AuxColumnBuilder for MultisetTester { + fn get_requests_at(&self, main_trace: &MainTrace, alphas: &[E], row_idx: usize) -> E { + self.multiplicands[row_idx] + } + + fn get_responses_at(&self, main_trace: &MainTrace, alphas: &[E], row_idx: usize) -> E { + self.divisors[row_idx] + } + } + + fn main_trace_with_n_rows(n_rows: usize) -> MainTrace { + let trace_columns = vec![(0..n_rows).fold(vec![], |mut acc, row| { + acc.push(Felt::new(row as u64)); + acc + })]; + MainTrace::new(ColMatrix::new(trace_columns)) + } + + #[test] + fn multiset_check_valid_permutation() { + let multiplicands = vec![Felt::new(1), Felt::new(2), Felt::new(3)]; + let divisors = vec![Felt::new(2), Felt::new(1), Felt::new(3)]; + let multiset_tester = MultisetTester::new(multiplicands, divisors); + let aux_column = multiset_tester.build_aux_column( + &main_trace_with_n_rows(4), + multiset_tester.alphas.as_slice() + ); + assert_eq!(aux_column.first().unwrap(), aux_column.last().unwrap()); + } + + #[test] + fn multiset_check_invalid_permutation() { + let multiplicands = vec![Felt::new(1), Felt::new(3), Felt::new(10)]; + let divisors = vec![Felt::new(4), Felt::new(3), Felt::new(10)]; + let multiset_tester = MultisetTester::new(multiplicands, divisors); + let aux_column = multiset_tester.build_aux_column( + &main_trace_with_n_rows(4), + multiset_tester.alphas.as_slice(), + ); + assert_ne!(aux_column.first().unwrap(), aux_column.last().unwrap()); + } + + #[test] + fn multiset_check_invalid_permutation_same_grand_product() { + // ensure that the multiset check is not just the grand product check + // multiplicands and divisors are not permutations of one another + // but have the same grand product + let multiplicands = vec![Felt::new(1), Felt::new(3), Felt::new(5)]; + let divisors = vec![Felt::new(15), Felt::new(1), Felt::new(1)]; + let multiset_tester = MultisetTester::new(multiplicands, divisors); + let aux_column = multiset_tester.build_aux_column( + &main_trace_with_n_rows(4), + multiset_tester.alphas.as_slice(), + ); + assert_ne!(aux_column.first().unwrap(), aux_column.last().unwrap()); + } +} diff --git a/processor/src/trace/tests/stack.rs b/processor/src/trace/tests/stack.rs index 09c22cb09a..a015619e5e 100644 --- a/processor/src/trace/tests/stack.rs +++ b/processor/src/trace/tests/stack.rs @@ -1,6 +1,6 @@ use super::{ - build_trace_from_ops, rand_array, Felt, FieldElement, LookupTableRow, Operation, Trace, Vec, - NUM_RAND_ROWS, ONE, ZERO, + build_trace_from_ops, rand_array, Felt, FieldElement, Operation, Trace, Vec, NUM_RAND_ROWS, + ONE, ZERO, }; use crate::stack::OverflowTableRow; use miden_air::trace::{AUX_TRACE_RAND_ELEMENTS, STACK_AUX_TRACE_OFFSET}; @@ -37,10 +37,10 @@ fn p1_trace() { let p1 = aux_columns.get_column(P1_COL_IDX); let row_values = [ - OverflowTableRow::new(2, ONE, ZERO).to_value(&trace.main_trace, &alphas), - OverflowTableRow::new(3, TWO, TWO).to_value(&trace.main_trace, &alphas), - OverflowTableRow::new(6, TWO, TWO).to_value(&trace.main_trace, &alphas), - OverflowTableRow::new(10, ZERO, ZERO).to_value(&trace.main_trace, &alphas), + OverflowTableRow::new(Felt::new(2), ONE, ZERO).to_value(&alphas) + alphas[0], + OverflowTableRow::new(Felt::new(3), TWO, TWO).to_value(&alphas) + alphas[0], + OverflowTableRow::new(Felt::new(6), TWO, TWO).to_value(&alphas) + alphas[0], + OverflowTableRow::new(Felt::new(10), ZERO, ZERO).to_value(&alphas) + alphas[0], ]; // make sure the first entry is ONE diff --git a/processor/src/trace/utils.rs b/processor/src/trace/utils.rs index 705fa314ba..b5d4a87874 100644 --- a/processor/src/trace/utils.rs +++ b/processor/src/trace/utils.rs @@ -1,7 +1,11 @@ -use super::{ColMatrix, Felt, FieldElement, Vec, NUM_RAND_ROWS}; +use super::{Felt, Vec, NUM_RAND_ROWS}; use crate::chiplets::Chiplets; use core::slice; -use vm_core::utils::uninit_vector; +use miden_air::trace::main_trace::MainTrace; +use vm_core::{utils::uninit_vector, FieldElement}; + +#[cfg(test)] +use vm_core::{utils::ToElements, Operation}; // TRACE FRAGMENT // ================================================================================================ @@ -68,176 +72,6 @@ impl<'a> TraceFragment<'a> { } } -// LOOKUP TABLES -// ================================================================================================ - -/// Defines a single row in a lookup table defined via multiset checks. -pub trait LookupTableRow { - /// Returns a single element representing the row in the field defined by E. The value is - /// computed using the provided random values. - fn to_value>( - &self, - main_trace: &ColMatrix, - rand_values: &[E], - ) -> E; -} - -/// Computes values as well as inverse value for all specified lookup table rows. -/// -/// To compute the inverses of row values we use a modified version of batch inversion algorithm. -/// The main modification is that we don't need to check for ZERO values, because, assuming -/// random values are drawn from a large enough field, coming across a ZERO value should be -/// computationally infeasible. -pub fn build_lookup_table_row_values, R: LookupTableRow>( - rows: &[R], - main_trace: &ColMatrix, - rand_values: &[E], -) -> (Vec, Vec) { - let mut row_values = unsafe { uninit_vector(rows.len()) }; - let mut inv_row_values = unsafe { uninit_vector(rows.len()) }; - - // compute row values and compute their product - let mut acc = E::ONE; - for ((row, value), inv_value) in - rows.iter().zip(row_values.iter_mut()).zip(inv_row_values.iter_mut()) - { - *inv_value = acc; - *value = row.to_value(main_trace, rand_values); - debug_assert_ne!(*value, E::ZERO, "row value cannot be ZERO"); - - acc *= *value; - } - - // invert the accumulated product - acc = acc.inv(); - - // multiply the accumulated value by original values to compute inverses - for i in (0..row_values.len()).rev() { - inv_row_values[i] *= acc; - acc *= row_values[i]; - } - - (row_values, inv_row_values) -} - -// AUX COLUMN BUILDER -// ================================================================================================ - -/// Defines a builder responsible for building a single column in an auxiliary segment of the -/// execution trace. -pub trait AuxColumnBuilder { - // REQUIRED METHODS - // -------------------------------------------------------------------------------------------- - - /// Returns an exhaustive list of rows which are present in the table. - fn get_table_rows(&self) -> &[R]; - - /// Returns a sequence of hints which indicate how the table was updated. Each hint consists - /// of a clock cycle at which the update happened as well as the hint describing the update. - fn get_table_hints(&self) -> &[(U, H)]; - - /// Returns a value by which the current value of the column should be multiplied to get the - /// next value. It is expected that this value should never be ZERO in practice. - fn get_multiplicand>( - &self, - hint: H, - row_values: &[E], - inv_row_values: &[E], - ) -> E; - - // PROVIDED METHODS - // -------------------------------------------------------------------------------------------- - - /// Builds and returns the auxiliary trace column managed by this builder. - fn build_aux_column(&self, main_trace: &ColMatrix, alphas: &[E]) -> Vec - where - E: FieldElement, - { - // compute row values and their inverses for all rows that were added to the table - let (row_values, inv_row_values) = self.build_row_values(main_trace, alphas); - - // allocate memory for the running product column and set its initial value - let mut result = unsafe { uninit_vector(main_trace.num_rows()) }; - result[0] = self.init_column_value(&row_values); - - // keep track of the last updated row in the running product column - let mut result_idx = 0_usize; - - // iterate through the list of updates and apply them one by one - for (clk, hint) in self.get_table_hints() { - let clk = clk.as_index(); - - // if we skipped some cycles since the last update was processed, values in the last - // updated row should by copied over until the current cycle. - if result_idx < clk { - let last_value = result[result_idx]; - result[(result_idx + 1)..=clk].fill(last_value); - } - - // move the result pointer to the next row - result_idx = clk + 1; - - // apply the relevant updates to the column; since the multiplicand value should be - // generated by "mixing-in" random values from a large field, the probability that we - // get a ZERO should be negligible (i.e., it should never come up in practice). - let multiplicand = self.get_multiplicand(hint.clone(), &row_values, &inv_row_values); - debug_assert_ne!(E::ZERO, multiplicand); - result[result_idx] = result[clk] * multiplicand; - } - - // after all updates have been processed, the table should not change; we make sure that - // the last value in the column is equal to the expected value, and fill in all the - // remaining column values with the last value - let last_value = result[result_idx]; - assert_eq!(last_value, self.final_column_value(&row_values)); - if result_idx < result.len() - 1 { - result[(result_idx + 1)..].fill(last_value); - } - - result - } - - /// Builds and returns row values and their inverses for all rows which were added to the - /// lookup table managed by this column builder. - fn build_row_values(&self, main_trace: &ColMatrix, alphas: &[E]) -> (Vec, Vec) - where - E: FieldElement, - { - build_lookup_table_row_values(self.get_table_rows(), main_trace, alphas) - } - - /// Returns the initial value in the auxiliary column. Default implementation of this method - /// returns ONE. - fn init_column_value>(&self, _row_values: &[E]) -> E { - E::ONE - } - - /// Returns the final value in the auxiliary column. Default implementation of this method - /// returns ONE. - fn final_column_value>(&self, _row_values: &[E]) -> E { - E::ONE - } -} - -/// Defines a simple trait to recognize the possible types of clock cycles associated with auxiliary -/// column update hints. -pub trait HintCycle { - /// Returns the cycle as a `usize` for indexing. - fn as_index(&self) -> usize; -} - -impl HintCycle for u32 { - fn as_index(&self) -> usize { - *self as usize - } -} - -impl HintCycle for u64 { - fn as_index(&self) -> usize { - *self as usize - } -} - // TRACE LENGTH SUMMARY // ================================================================================================ @@ -361,10 +195,74 @@ impl ChipletsLengths { } } +// AUXILIARY COLUMN BUILDER +// ================================================================================================ + +/// Defines a builder responsible for building a single column in an auxiliary segment of the +/// execution trace. +pub trait AuxColumnBuilder> { + // REQUIRED METHODS + // -------------------------------------------------------------------------------------------- + + fn get_requests_at(&self, main_trace: &MainTrace, alphas: &[E], row_idx: usize) -> E; + + fn get_responses_at(&self, main_trace: &MainTrace, alphas: &[E], row_idx: usize) -> E; + + // PROVIDED METHODS + // -------------------------------------------------------------------------------------------- + + fn init_requests(&self, _main_trace: &MainTrace, _alphas: &[E]) -> E { + E::ONE + } + + fn init_responses(&self, _main_trace: &MainTrace, _alphas: &[E]) -> E { + E::ONE + } + + /// Builds the chiplets bus auxiliary trace column. + fn build_aux_column(&self, main_trace: &MainTrace, alphas: &[E]) -> Vec { + let mut responses_prod: Vec = unsafe { uninit_vector(main_trace.num_rows()) }; + let mut requests: Vec = unsafe { uninit_vector(main_trace.num_rows()) }; + + responses_prod[0] = self.init_responses(main_trace, alphas); + requests[0] = self.init_requests(main_trace, alphas); + + let mut requests_running_prod = E::ONE; + for row_idx in 0..main_trace.num_rows() - 1 { + let response = self.get_responses_at(main_trace, alphas, row_idx); + let request = self.get_requests_at(main_trace, alphas, row_idx); + + // shift response and request + let shifted_response = if response != E::ONE { + response + alphas[0] + } else { + response + }; + + let shifted_request = if request != E::ONE { + request + alphas[0] + } else { + request + }; + + responses_prod[row_idx + 1] = responses_prod[row_idx] * shifted_response; + requests[row_idx + 1] = shifted_request; + requests_running_prod *= requests[row_idx + 1]; + } + + let mut requests_running_divisor = requests_running_prod.inv(); + let mut result_aux_column = responses_prod; + for i in (0..main_trace.num_rows()).rev() { + result_aux_column[i] *= requests_running_divisor; + requests_running_divisor *= requests[i]; + } + result_aux_column + } +} + // TEST HELPERS // ================================================================================================ -#[cfg(test)] -use vm_core::{utils::ToElements, Operation}; + #[cfg(test)] pub fn build_span_with_respan_ops() -> (Vec, Vec) { let iv = [1, 3, 5, 7, 9, 11, 13, 15, 17].to_elements(); diff --git a/prover/src/gpu.rs b/prover/src/gpu.rs index af04c4cee3..51a0a16abb 100644 --- a/prover/src/gpu.rs +++ b/prover/src/gpu.rs @@ -8,6 +8,7 @@ use super::{ ExecutionProver, ExecutionTrace, Felt, FieldElement, Level, ProcessorAir, PublicInputs, WinterProofOptions, }; +use air::trace::main_trace::MainTrace; use elsa::FrozenVec; use ministark_gpu::{ plan::{gen_rpo_merkle_tree, GpuRpo256RowMajor}, @@ -62,7 +63,7 @@ where fn new_trace_lde>( &self, trace_info: &TraceInfo, - main_trace: &ColMatrix, + main_trace: &MainTrace, domain: &StarkDomain, ) -> (Self::TraceLde, TracePolyTable) { MetalRpoTraceLde::new(trace_info, main_trace, domain) @@ -200,7 +201,7 @@ impl> MetalRpoTraceLde { /// segment and the new [DefaultTraceLde]. pub fn new( trace_info: &TraceInfo, - main_trace: &ColMatrix, + main_trace: &MainTrace, domain: &StarkDomain, ) -> (Self, TracePolyTable) { // extend the main execution trace and build a Merkle tree from the extended trace