From c1619b7444190757abc51e452e95f42e684416f6 Mon Sep 17 00:00:00 2001 From: kunxian xia Date: Wed, 4 Dec 2024 19:28:05 +0800 Subject: [PATCH 01/22] assignment is defined over base field --- spartan_parallel/Cargo.toml | 1 + spartan_parallel/src/lib.rs | 28 ++++++++-------------------- spartan_parallel/src/scalar/mod.rs | 6 +++--- 3 files changed, 12 insertions(+), 23 deletions(-) diff --git a/spartan_parallel/Cargo.toml b/spartan_parallel/Cargo.toml index 2472d2d5..e61bc5d0 100644 --- a/spartan_parallel/Cargo.toml +++ b/spartan_parallel/Cargo.toml @@ -30,6 +30,7 @@ colored = { version = "2", default-features = false, optional = true } flate2 = { version = "1" } goldilocks = { git = "https://github.com/scroll-tech/ceno-Goldilocks" } ff = "0.13.0" +halo2curves = "0.1.0" [dev-dependencies] criterion = "0.5" diff --git a/spartan_parallel/src/lib.rs b/spartan_parallel/src/lib.rs index 67d56c6f..0c378fe6 100644 --- a/spartan_parallel/src/lib.rs +++ b/spartan_parallel/src/lib.rs @@ -46,6 +46,7 @@ use std::{ use dense_mlpoly::{DensePolynomial, PolyEvalProof}; use errors::{ProofVerifyError, R1CSError}; +use halo2curves::serde::SerdeObject; use instance::Instance; use itertools::Itertools; use math::Math; @@ -79,34 +80,21 @@ pub struct ComputationDecommitment { #[derive(Clone, Serialize, Deserialize)] pub struct Assignment { /// Entries of an assignment - pub assignment: Vec, + pub assignment: Vec, } impl Assignment { /// Constructs a new `Assignment` from a vector pub fn new(assignment: &[[u8; 32]]) -> Result, R1CSError> { - let bytes_to_scalar = |vec: &[[u8; 32]]| -> Result, R1CSError> { - let mut vec_scalar: Vec = Vec::new(); - for v in vec { - let val = S::from_bytes(v); - if val.is_some().unwrap_u8() == 1 { - vec_scalar.push(val.unwrap()); - } else { - return Err(R1CSError::InvalidScalar); - } - } - Ok(vec_scalar) + let bytes_to_scalar = |vec: &[[u8; 32]]| -> Result, R1CSError> { + vec + .into_iter() + .map(|v| S::BaseField::from_raw_bytes(v).ok_or(R1CSError::InvalidScalar)) + .collect() }; let assignment_scalar = bytes_to_scalar(assignment); - // check for any parsing errors - if assignment_scalar.is_err() { - return Err(R1CSError::InvalidScalar); - } - - Ok(Assignment { - assignment: assignment_scalar.unwrap(), - }) + assignment_scalar.map(|a| Assignment { assignment: a }) } /// Write the assignment into a file diff --git a/spartan_parallel/src/scalar/mod.rs b/spartan_parallel/src/scalar/mod.rs index a8a73968..a3a47ddd 100644 --- a/spartan_parallel/src/scalar/mod.rs +++ b/spartan_parallel/src/scalar/mod.rs @@ -4,10 +4,10 @@ mod fp2; use ff::Field; pub use fp::Scalar; pub use fp2::ScalarExt2; -use goldilocks::ExtensionField; +use goldilocks::{ExtensionField, SmallField}; use merlin::Transcript; use rand::{CryptoRng, RngCore}; -use serde::Serialize; +use serde::{Deserialize, Serialize}; use std::fmt; use std::{ cmp::Eq, @@ -49,7 +49,7 @@ pub trait SpartanExtensionField: type InnerType: ExtensionField + Field; /// Basefield for conserving computational resources - type BaseField: Field; + type BaseField: SmallField + for<'a> Deserialize<'a>; /// Return inner Goldilocks field element fn inner(&self) -> &Self::InnerType; From 214fe758ec778c22733d2db1287a861e7d3a7b25 Mon Sep 17 00:00:00 2001 From: kunxian xia Date: Wed, 4 Dec 2024 19:55:16 +0800 Subject: [PATCH 02/22] dense polynomial can be defined over base field --- spartan_parallel/src/dense_mlpoly.rs | 37 +++++++++++++++++++++++----- spartan_parallel/src/scalar/mod.rs | 2 +- 2 files changed, 32 insertions(+), 7 deletions(-) diff --git a/spartan_parallel/src/dense_mlpoly.rs b/spartan_parallel/src/dense_mlpoly.rs index 9ef128b2..2ace8880 100644 --- a/spartan_parallel/src/dense_mlpoly.rs +++ b/spartan_parallel/src/dense_mlpoly.rs @@ -1,5 +1,7 @@ #![allow(clippy::too_many_arguments)] use crate::scalar::SpartanExtensionField; +use crate::dense_mlpoly::MLE::Base; +use crate::dense_mlpoly::MLE::Ext; use super::errors::ProofVerifyError; use super::math::Math; @@ -10,15 +12,31 @@ use core::ops::Index; use merlin::Transcript; use serde::{Deserialize, Serialize}; use std::collections::HashMap; +use ff::Field; #[cfg(feature = "multicore")] use rayon::prelude::*; +#[derive(Debug, Clone)] +pub enum MLE { + Base(Vec), + Ext(Vec), +} + +impl MLE { + pub(crate) fn len(&self) -> usize { + match self { + Base(v) => v.len(), + Ext(v) => v.len(), + } + } +} + #[derive(Debug, Clone)] pub struct DensePolynomial { num_vars: usize, // the number of variables in the multilinear polynomial len: usize, - Z: Vec, // evaluations of the polynomial in all the 2^num_vars Boolean inputs + Z: MLE, // evaluations of the polynomial in all the 2^num_vars Boolean inputs } pub struct PolyCommitmentBlinds { @@ -127,7 +145,18 @@ impl DensePolynomial { DensePolynomial { num_vars: Z.len().log_2(), len: Z.len(), - Z, + Z: MLE::Ext(Z), + } + } + + pub fn new_from_base(mut Z: Vec) -> Self { + // If length of Z is not a power of 2, append Z with 0 + let zero = S::BaseField::ZERO; + Z.extend(vec![zero; Z.len().next_power_of_two() - Z.len()]); + DensePolynomial { + num_vars: Z.len().log_2(), + len: Z.len(), + Z: MLE::Base(Z), } } @@ -139,10 +168,6 @@ impl DensePolynomial { self.len } - pub fn clone(&self) -> DensePolynomial { - DensePolynomial::new(self.Z[0..self.len].to_vec()) - } - pub fn split(&self, idx: usize) -> (DensePolynomial, DensePolynomial) { assert!(idx < self.len()); ( diff --git a/spartan_parallel/src/scalar/mod.rs b/spartan_parallel/src/scalar/mod.rs index a3a47ddd..1210e0ab 100644 --- a/spartan_parallel/src/scalar/mod.rs +++ b/spartan_parallel/src/scalar/mod.rs @@ -49,7 +49,7 @@ pub trait SpartanExtensionField: type InnerType: ExtensionField + Field; /// Basefield for conserving computational resources - type BaseField: SmallField + for<'a> Deserialize<'a>; + type BaseField: SmallField + Field + for<'a> Deserialize<'a>; /// Return inner Goldilocks field element fn inner(&self) -> &Self::InnerType; From c86debdfb41b77b3a3e333f272e8840b302d51cf Mon Sep 17 00:00:00 2001 From: Ray Gao Date: Wed, 4 Dec 2024 17:43:00 -0500 Subject: [PATCH 03/22] Remove zk --- spartan_parallel/src/nizk/mod.rs | 9 ++-- spartan_parallel/src/r1csproof.rs | 30 ++++++------- spartan_parallel/src/sumcheck.rs | 74 +++++-------------------------- 3 files changed, 26 insertions(+), 87 deletions(-) diff --git a/spartan_parallel/src/nizk/mod.rs b/spartan_parallel/src/nizk/mod.rs index a57b1d94..50249f07 100644 --- a/spartan_parallel/src/nizk/mod.rs +++ b/spartan_parallel/src/nizk/mod.rs @@ -76,7 +76,6 @@ impl EqualityProof { _v1: &S, s1: &S, _v2: &S, - s2: &S, ) -> EqualityProof { >::append_protocol_name( transcript, @@ -86,7 +85,7 @@ impl EqualityProof { // produce a random Scalar let r = random_tape.random_scalar(b"r"); let c: S = transcript.challenge_scalar(b"c"); - let z = c * (*s1 - *s2) + r; + let z = c * *s1 + r; EqualityProof { z } } @@ -193,10 +192,8 @@ impl DotProductProof { transcript: &mut Transcript, random_tape: &mut RandomTape, x_vec: &[S], - blind_x: &S, a_vec: &[S], _y: &S, - blind_y: &S, ) -> DotProductProof { >::append_protocol_name( transcript, @@ -220,8 +217,8 @@ impl DotProductProof { .map(|i| c * x_vec[i] + d_vec[i]) .collect::>(); - let z_delta = c * *blind_x + r_delta; - let z_beta = c * *blind_y + r_beta; + let z_delta = c + r_delta; + let z_beta = c + r_beta; DotProductProof { z, z_delta, z_beta } } diff --git a/spartan_parallel/src/r1csproof.rs b/spartan_parallel/src/r1csproof.rs index 1188c0c9..30de2542 100644 --- a/spartan_parallel/src/r1csproof.rs +++ b/spartan_parallel/src/r1csproof.rs @@ -6,7 +6,7 @@ use super::math::Math; use super::nizk::{EqualityProof, KnowledgeProof, ProductProof}; use super::r1csinstance::R1CSInstance; use super::random::RandomTape; -use super::sumcheck::ZKSumcheckInstanceProof; +use super::sumcheck::R1CSSumcheckInstanceProof; use super::timer::Timer; use super::transcript::ProofTranscript; use crate::scalar::SpartanExtensionField; @@ -17,8 +17,8 @@ use std::cmp::min; #[derive(Serialize, Deserialize, Debug)] pub struct R1CSProof { - sc_proof_phase1: ZKSumcheckInstanceProof, - sc_proof_phase2: ZKSumcheckInstanceProof, + sc_proof_phase1: R1CSSumcheckInstanceProof, + sc_proof_phase2: R1CSSumcheckInstanceProof, pok_claims_phase2: (KnowledgeProof, ProductProof), proof_eq_sc_phase1: EqualityProof, proof_eq_sc_phase2: EqualityProof, @@ -41,15 +41,14 @@ impl R1CSProof { evals_Cz: &mut DensePolynomialPqx, transcript: &mut Transcript, random_tape: &mut RandomTape, - ) -> (ZKSumcheckInstanceProof, Vec, Vec, S) { + ) -> (R1CSSumcheckInstanceProof, Vec, Vec) { let comb_func = |poly_A_comp: &S, poly_B_comp: &S, poly_C_comp: &S, poly_D_comp: &S| -> S { *poly_A_comp * (*poly_B_comp * *poly_C_comp - *poly_D_comp) }; - let (sc_proof_phase_one, r, claims, blind_claim_postsc) = - ZKSumcheckInstanceProof::::prove_cubic_with_additive_term_disjoint_rounds( + let (sc_proof_phase_one, r, claims) = + R1CSSumcheckInstanceProof::::prove_cubic_with_additive_term_disjoint_rounds( &S::field_zero(), // claim is zero - &S::field_zero(), // blind for claim is also zero num_rounds, num_rounds_x_max, num_rounds_q_max, @@ -67,7 +66,7 @@ impl R1CSProof { random_tape, ); - (sc_proof_phase_one, r, claims, blind_claim_postsc) + (sc_proof_phase_one, r, claims) } fn prove_phase_two( @@ -85,14 +84,13 @@ impl R1CSProof { evals_z: &mut DensePolynomialPqx, transcript: &mut Transcript, random_tape: &mut RandomTape, - ) -> (ZKSumcheckInstanceProof, Vec, Vec, S) { + ) -> (R1CSSumcheckInstanceProof, Vec, Vec) { let comb_func = |poly_A_comp: &S, poly_B_comp: &S, poly_C_comp: &S| -> S { *poly_A_comp * *poly_B_comp * *poly_C_comp }; - let (sc_proof_phase_two, r, claims, blind_claim_postsc) = - ZKSumcheckInstanceProof::::prove_cubic_disjoint_rounds( + let (sc_proof_phase_two, r, claims) = + R1CSSumcheckInstanceProof::::prove_cubic_disjoint_rounds( claim, - blind_claim, num_rounds, num_rounds_y_max, num_rounds_w, @@ -108,7 +106,7 @@ impl R1CSProof { random_tape, ); - (sc_proof_phase_two, r, claims, blind_claim_postsc) + (sc_proof_phase_two, r, claims) } fn protocol_name() -> &'static [u8] { @@ -235,7 +233,7 @@ impl R1CSProof { // Sumcheck 1: (Az * Bz - Cz) * eq(x, q, p) = 0 let timer_tmp = Timer::new("prove_sum_check"); - let (sc_proof_phase1, rx, _claims_phase1, blind_claim_postsc1) = R1CSProof::prove_phase_one( + let (sc_proof_phase1, rx, _claims_phase1) = R1CSProof::prove_phase_one( num_rounds_x + num_rounds_q + num_rounds_p, num_rounds_x, num_rounds_q, @@ -303,7 +301,6 @@ impl R1CSProof { &claim_post_phase1, &blind_expected_claim_postsc1, &claim_post_phase1, - &blind_claim_postsc1, ); // Separate the result rx into rp, rq, and rx @@ -380,7 +377,7 @@ impl R1CSProof { let mut eq_p_rp_poly = DensePolynomial::new(EqPolynomial::new(rp).evals()); // Sumcheck 2: (rA + rB + rC) * Z * eq(p) = e - let (sc_proof_phase2, ry, claims_phase2, blind_claim_postsc2) = R1CSProof::prove_phase_two( + let (sc_proof_phase2, ry, claims_phase2) = R1CSProof::prove_phase_two( num_rounds_y + num_rounds_w + num_rounds_p, num_rounds_y, num_rounds_w, @@ -553,7 +550,6 @@ impl R1CSProof { &claim_post_phase2, &blind_expected_claim_postsc2, &claim_post_phase2, - &blind_claim_postsc2, ); timer_prove.stop(); diff --git a/spartan_parallel/src/sumcheck.rs b/spartan_parallel/src/sumcheck.rs index 249b1abd..a3b891a9 100644 --- a/spartan_parallel/src/sumcheck.rs +++ b/spartan_parallel/src/sumcheck.rs @@ -71,13 +71,13 @@ impl SumcheckInstanceProof { } #[derive(Serialize, Deserialize, Debug)] -pub struct ZKSumcheckInstanceProof { +pub struct R1CSSumcheckInstanceProof { proofs: Vec>, } -impl ZKSumcheckInstanceProof { +impl R1CSSumcheckInstanceProof { pub fn new(proofs: Vec>) -> Self { - ZKSumcheckInstanceProof { proofs } + R1CSSumcheckInstanceProof { proofs } } pub fn verify( @@ -381,10 +381,9 @@ impl SumcheckInstanceProof { } } -impl ZKSumcheckInstanceProof { +impl R1CSSumcheckInstanceProof { pub fn prove_cubic_disjoint_rounds( claim: &S, - blind_claim: &S, num_rounds: usize, num_rounds_y_max: usize, num_rounds_w: usize, @@ -398,7 +397,7 @@ impl ZKSumcheckInstanceProof { comb_func: F, transcript: &mut Transcript, random_tape: &mut RandomTape, - ) -> (Self, Vec, Vec, S) + ) -> (Self, Vec, Vec) where F: Fn(&S, &S, &S) -> S, { @@ -408,11 +407,6 @@ impl ZKSumcheckInstanceProof { // poly_A is the EQ polynomial of size P * W * Y_max assert_eq!(num_rounds, num_rounds_y_max + num_rounds_w + num_rounds_p); - let (blinds_poly, blinds_evals) = ( - random_tape.random_vector(b"blinds_poly", num_rounds), - random_tape.random_vector(b"blinds_evals", num_rounds), - ); - let mut claim_per_round = *claim; let mut r: Vec = Vec::new(); @@ -577,18 +571,6 @@ impl ZKSumcheckInstanceProof { // compute a weighted sum of the RHS let target = w[0] * claim_per_round + w[1] * eval; - let blind = { - let blind_sc = if j == 0 { - blind_claim - } else { - &blinds_evals[j - 1] - }; - - let blind_eval = &blinds_evals[j]; - - w[0] * *blind_sc + w[1] * *blind_eval - }; - let a = { // the vector to use to decommit for sum-check test let a_sc = { @@ -613,15 +595,7 @@ impl ZKSumcheckInstanceProof { .collect::>() }; - let proof = DotProductProof::prove( - transcript, - random_tape, - &poly.as_vec(), - &blinds_poly[j], - &a, - &target, - &blind, - ); + let proof = DotProductProof::prove(transcript, random_tape, &poly.as_vec(), &a, &target); (proof, eval) }; @@ -632,20 +606,18 @@ impl ZKSumcheckInstanceProof { } ( - ZKSumcheckInstanceProof::new(proofs), + R1CSSumcheckInstanceProof::new(proofs), r, vec![ poly_A[0], poly_B.index(0, 0, 0, 0), poly_C.index(0, 0, 0, 0), ], - blinds_evals[num_rounds - 1], ) } pub fn prove_cubic_with_additive_term_disjoint_rounds( claim: &S, - blind_claim: &S, num_rounds: usize, num_rounds_x_max: usize, num_rounds_q_max: usize, @@ -661,7 +633,7 @@ impl ZKSumcheckInstanceProof { comb_func: F, transcript: &mut Transcript, random_tape: &mut RandomTape, - ) -> (Self, Vec, Vec, S) + ) -> (Self, Vec, Vec) where F: Fn(&S, &S, &S, &S) -> S, { @@ -678,11 +650,6 @@ impl ZKSumcheckInstanceProof { assert_eq!(poly_C.num_witness_secs, 1); assert_eq!(poly_D.num_witness_secs, 1); - let (blinds_poly, blinds_evals) = ( - random_tape.random_vector(b"blinds_poly", num_rounds), - random_tape.random_vector(b"blinds_evals", num_rounds), - ); - let mut claim_per_round = *claim; let mut r: Vec = Vec::new(); @@ -864,18 +831,6 @@ impl ZKSumcheckInstanceProof { // compute a weighted sum of the RHS let target = w[0] * claim_per_round + w[1] * eval; - let blind = { - let blind_sc = if j == 0 { - blind_claim - } else { - &blinds_evals[j - 1] - }; - - let blind_eval = &blinds_evals[j]; - - w[0] * *blind_sc + w[1] * *blind_eval - }; - let a = { // the vector to use to decommit for sum-check test let a_sc = { @@ -900,15 +855,7 @@ impl ZKSumcheckInstanceProof { .collect::>() }; - let proof = DotProductProof::prove( - transcript, - random_tape, - &poly.as_vec(), - &blinds_poly[j], - &a, - &target, - &blind, - ); + let proof = DotProductProof::prove(transcript, random_tape, &poly.as_vec(), &a, &target); (proof, eval) }; @@ -919,7 +866,7 @@ impl ZKSumcheckInstanceProof { } ( - ZKSumcheckInstanceProof::new(proofs), + R1CSSumcheckInstanceProof::new(proofs), r, vec![ poly_Ap[0] * poly_Aq[0] * poly_Ax[0], @@ -927,7 +874,6 @@ impl ZKSumcheckInstanceProof { poly_C.index(0, 0, 0, 0), poly_D.index(0, 0, 0, 0), ], - blinds_evals[num_rounds - 1], ) } } From 88029e3f146e5105ec56859d0e5d384911a761f6 Mon Sep 17 00:00:00 2001 From: Ray Gao Date: Wed, 4 Dec 2024 17:47:52 -0500 Subject: [PATCH 04/22] Remove blinds --- spartan_parallel/src/r1csproof.rs | 3 --- 1 file changed, 3 deletions(-) diff --git a/spartan_parallel/src/r1csproof.rs b/spartan_parallel/src/r1csproof.rs index 30de2542..5cd1e650 100644 --- a/spartan_parallel/src/r1csproof.rs +++ b/spartan_parallel/src/r1csproof.rs @@ -78,7 +78,6 @@ impl R1CSProof { num_witness_secs: usize, num_inputs: Vec, claim: &S, - blind_claim: &S, evals_eq: &mut DensePolynomial, evals_ABC: &mut DensePolynomialPqx, evals_z: &mut DensePolynomialPqx, @@ -321,7 +320,6 @@ impl R1CSProof { let r_C: S = transcript.challenge_scalar(b"challenge_Cz"); let claim_phase2 = r_A * *Az_claim + r_B * *Bz_claim + r_C * *Cz_claim; - let blind_claim_phase2 = r_A * Az_blind + r_B * Bz_blind + r_C * Cz_blind; let timer_tmp = Timer::new("prove_abc_gen"); let evals_ABC = { @@ -386,7 +384,6 @@ impl R1CSProof { num_witness_secs, num_inputs.clone(), &claim_phase2, - &blind_claim_phase2, &mut eq_p_rp_poly, &mut ABC_poly, &mut Z_poly, From 6b2df83dd899770e933c219d066c254ccf7618e3 Mon Sep 17 00:00:00 2001 From: Ray Gao Date: Wed, 4 Dec 2024 21:18:42 -0500 Subject: [PATCH 05/22] Add typeset pattern --- circ_blocks/Cargo.lock | 3 +- spartan_parallel/src/dense_mlpoly.rs | 29 ++++----------- spartan_parallel/src/lib.rs | 22 ++++++++---- spartan_parallel/src/mle.rs | 53 ++++++++++++++++++++++++++++ 4 files changed, 76 insertions(+), 31 deletions(-) create mode 100644 spartan_parallel/src/mle.rs diff --git a/circ_blocks/Cargo.lock b/circ_blocks/Cargo.lock index 3ac35621..7d3c4624 100644 --- a/circ_blocks/Cargo.lock +++ b/circ_blocks/Cargo.lock @@ -1,6 +1,6 @@ # This file is automatically @generated by Cargo. # It is not intended for manual editing. -version = 3 +version = 4 [[package]] name = "addchain" @@ -1705,6 +1705,7 @@ dependencies = [ "ff 0.13.0", "flate2", "goldilocks", + "halo2curves", "itertools 0.13.0", "merlin", "rand 0.8.5", diff --git a/spartan_parallel/src/dense_mlpoly.rs b/spartan_parallel/src/dense_mlpoly.rs index 2ace8880..8baebb85 100644 --- a/spartan_parallel/src/dense_mlpoly.rs +++ b/spartan_parallel/src/dense_mlpoly.rs @@ -1,8 +1,5 @@ #![allow(clippy::too_many_arguments)] use crate::scalar::SpartanExtensionField; -use crate::dense_mlpoly::MLE::Base; -use crate::dense_mlpoly::MLE::Ext; - use super::errors::ProofVerifyError; use super::math::Math; use super::nizk::DotProductProofLog; @@ -13,30 +10,16 @@ use merlin::Transcript; use serde::{Deserialize, Serialize}; use std::collections::HashMap; use ff::Field; +use crate::mle::{MLE, MLEType, Base, Ext}; #[cfg(feature = "multicore")] use rayon::prelude::*; #[derive(Debug, Clone)] -pub enum MLE { - Base(Vec), - Ext(Vec), -} - -impl MLE { - pub(crate) fn len(&self) -> usize { - match self { - Base(v) => v.len(), - Ext(v) => v.len(), - } - } -} - -#[derive(Debug, Clone)] -pub struct DensePolynomial { +pub struct DensePolynomial { num_vars: usize, // the number of variables in the multilinear polynomial len: usize, - Z: MLE, // evaluations of the polynomial in all the 2^num_vars Boolean inputs + Z: MLE, // evaluations of the polynomial in all the 2^num_vars Boolean inputs } pub struct PolyCommitmentBlinds { @@ -137,7 +120,7 @@ impl IdentityPolynomial { } } -impl DensePolynomial { +impl DensePolynomial { pub fn new(mut Z: Vec) -> Self { // If length of Z is not a power of 2, append Z with 0 let zero = S::field_zero(); @@ -145,7 +128,7 @@ impl DensePolynomial { DensePolynomial { num_vars: Z.len().log_2(), len: Z.len(), - Z: MLE::Ext(Z), + Z: MLE::::new(Z), } } @@ -156,7 +139,7 @@ impl DensePolynomial { DensePolynomial { num_vars: Z.len().log_2(), len: Z.len(), - Z: MLE::Base(Z), + Z: MLE::::new(Z), } } diff --git a/spartan_parallel/src/lib.rs b/spartan_parallel/src/lib.rs index 0c378fe6..d7c74b8c 100644 --- a/spartan_parallel/src/lib.rs +++ b/spartan_parallel/src/lib.rs @@ -21,6 +21,7 @@ extern crate rayon; mod custom_dense_mlpoly; mod dense_mlpoly; +mod mle; mod errors; /// R1CS instance used by libspartan pub mod instance; @@ -845,31 +846,38 @@ impl SNARK { // unwrap the assignments let mut block_vars_mat = block_vars_mat .into_iter() - .map(|a| a.into_iter().map(|v| v.assignment).collect::>>()) + .map(|a| + a.into_iter().map(|v| + v.assignment + .into_iter() + .map(|a| S::from_base(&a)) + .collect::>()) + .collect::>>() + ) .collect::>>>(); let mut exec_inputs_list = exec_inputs_list .into_iter() - .map(|v| v.assignment) + .map(|v| v.assignment.into_iter().map(|a| S::from_base(&a)).collect::>()) .collect::>>(); let mut init_phy_mems_list = init_phy_mems_list .into_iter() - .map(|v| v.assignment) + .map(|v| v.assignment.into_iter().map(|a| S::from_base(&a)).collect::>()) .collect::>>(); let mut init_vir_mems_list = init_vir_mems_list .into_iter() - .map(|v| v.assignment) + .map(|v| v.assignment.into_iter().map(|a| S::from_base(&a)).collect::>()) .collect::>>(); let mut addr_phy_mems_list = addr_phy_mems_list .into_iter() - .map(|v| v.assignment) + .map(|v| v.assignment.into_iter().map(|a| S::from_base(&a)).collect::>()) .collect::>>(); let mut addr_vir_mems_list = addr_vir_mems_list .into_iter() - .map(|v| v.assignment) + .map(|v| v.assignment.into_iter().map(|a| S::from_base(&a)).collect::>()) .collect::>>(); let mut addr_ts_bits_list = addr_ts_bits_list .into_iter() - .map(|v| v.assignment) + .map(|v| v.assignment.into_iter().map(|a| S::from_base(&a)).collect::>()) .collect::>>(); // -- diff --git a/spartan_parallel/src/mle.rs b/spartan_parallel/src/mle.rs new file mode 100644 index 00000000..0de49a3c --- /dev/null +++ b/spartan_parallel/src/mle.rs @@ -0,0 +1,53 @@ +use crate::scalar::SpartanExtensionField; +use std::cmp::max; + +pub trait MLEType {} + +pub struct Base; +impl MLEType for Base {} + +pub struct Ext; +impl MLEType for Ext {} + +#[derive(Debug, Clone)] +pub struct MLE { + t: T, + + // Depending on T, one of the following fields will be empty. + // For MLE, field elements can potentially be stored as elements + // in the base field (resource saving) or in the extended field. + ext_vec: Vec, + base_vec: Vec, +} + +// Define general behavior of MLE +impl MLE +{ + pub fn len(&self) -> usize { + max(self.ext_vec.len(), self.base_vec.len()) + } +} + +// Define behavior of MLE when elements are in the base field +impl MLE +{ + pub fn new(vals: Vec) -> Self { + Self { + t: Base, + ext_vec: vec![], + base_vec: vals, + } + } +} + +// Define behavior of MLE when elements are in the extended field +impl MLE +{ + pub fn new(vals: Vec) -> Self { + Self { + t: Ext, + ext_vec: vals, + base_vec: vec![], + } + } +} \ No newline at end of file From d134a9fd18a4f14feba4966b8d48360ea8cbd038 Mon Sep 17 00:00:00 2001 From: Ray Gao Date: Wed, 4 Dec 2024 17:43:00 -0500 Subject: [PATCH 06/22] Remove zk --- spartan_parallel/src/nizk/mod.rs | 9 ++-- spartan_parallel/src/r1csproof.rs | 30 ++++++------- spartan_parallel/src/sumcheck.rs | 74 +++++-------------------------- 3 files changed, 26 insertions(+), 87 deletions(-) diff --git a/spartan_parallel/src/nizk/mod.rs b/spartan_parallel/src/nizk/mod.rs index a57b1d94..50249f07 100644 --- a/spartan_parallel/src/nizk/mod.rs +++ b/spartan_parallel/src/nizk/mod.rs @@ -76,7 +76,6 @@ impl EqualityProof { _v1: &S, s1: &S, _v2: &S, - s2: &S, ) -> EqualityProof { >::append_protocol_name( transcript, @@ -86,7 +85,7 @@ impl EqualityProof { // produce a random Scalar let r = random_tape.random_scalar(b"r"); let c: S = transcript.challenge_scalar(b"c"); - let z = c * (*s1 - *s2) + r; + let z = c * *s1 + r; EqualityProof { z } } @@ -193,10 +192,8 @@ impl DotProductProof { transcript: &mut Transcript, random_tape: &mut RandomTape, x_vec: &[S], - blind_x: &S, a_vec: &[S], _y: &S, - blind_y: &S, ) -> DotProductProof { >::append_protocol_name( transcript, @@ -220,8 +217,8 @@ impl DotProductProof { .map(|i| c * x_vec[i] + d_vec[i]) .collect::>(); - let z_delta = c * *blind_x + r_delta; - let z_beta = c * *blind_y + r_beta; + let z_delta = c + r_delta; + let z_beta = c + r_beta; DotProductProof { z, z_delta, z_beta } } diff --git a/spartan_parallel/src/r1csproof.rs b/spartan_parallel/src/r1csproof.rs index 1188c0c9..30de2542 100644 --- a/spartan_parallel/src/r1csproof.rs +++ b/spartan_parallel/src/r1csproof.rs @@ -6,7 +6,7 @@ use super::math::Math; use super::nizk::{EqualityProof, KnowledgeProof, ProductProof}; use super::r1csinstance::R1CSInstance; use super::random::RandomTape; -use super::sumcheck::ZKSumcheckInstanceProof; +use super::sumcheck::R1CSSumcheckInstanceProof; use super::timer::Timer; use super::transcript::ProofTranscript; use crate::scalar::SpartanExtensionField; @@ -17,8 +17,8 @@ use std::cmp::min; #[derive(Serialize, Deserialize, Debug)] pub struct R1CSProof { - sc_proof_phase1: ZKSumcheckInstanceProof, - sc_proof_phase2: ZKSumcheckInstanceProof, + sc_proof_phase1: R1CSSumcheckInstanceProof, + sc_proof_phase2: R1CSSumcheckInstanceProof, pok_claims_phase2: (KnowledgeProof, ProductProof), proof_eq_sc_phase1: EqualityProof, proof_eq_sc_phase2: EqualityProof, @@ -41,15 +41,14 @@ impl R1CSProof { evals_Cz: &mut DensePolynomialPqx, transcript: &mut Transcript, random_tape: &mut RandomTape, - ) -> (ZKSumcheckInstanceProof, Vec, Vec, S) { + ) -> (R1CSSumcheckInstanceProof, Vec, Vec) { let comb_func = |poly_A_comp: &S, poly_B_comp: &S, poly_C_comp: &S, poly_D_comp: &S| -> S { *poly_A_comp * (*poly_B_comp * *poly_C_comp - *poly_D_comp) }; - let (sc_proof_phase_one, r, claims, blind_claim_postsc) = - ZKSumcheckInstanceProof::::prove_cubic_with_additive_term_disjoint_rounds( + let (sc_proof_phase_one, r, claims) = + R1CSSumcheckInstanceProof::::prove_cubic_with_additive_term_disjoint_rounds( &S::field_zero(), // claim is zero - &S::field_zero(), // blind for claim is also zero num_rounds, num_rounds_x_max, num_rounds_q_max, @@ -67,7 +66,7 @@ impl R1CSProof { random_tape, ); - (sc_proof_phase_one, r, claims, blind_claim_postsc) + (sc_proof_phase_one, r, claims) } fn prove_phase_two( @@ -85,14 +84,13 @@ impl R1CSProof { evals_z: &mut DensePolynomialPqx, transcript: &mut Transcript, random_tape: &mut RandomTape, - ) -> (ZKSumcheckInstanceProof, Vec, Vec, S) { + ) -> (R1CSSumcheckInstanceProof, Vec, Vec) { let comb_func = |poly_A_comp: &S, poly_B_comp: &S, poly_C_comp: &S| -> S { *poly_A_comp * *poly_B_comp * *poly_C_comp }; - let (sc_proof_phase_two, r, claims, blind_claim_postsc) = - ZKSumcheckInstanceProof::::prove_cubic_disjoint_rounds( + let (sc_proof_phase_two, r, claims) = + R1CSSumcheckInstanceProof::::prove_cubic_disjoint_rounds( claim, - blind_claim, num_rounds, num_rounds_y_max, num_rounds_w, @@ -108,7 +106,7 @@ impl R1CSProof { random_tape, ); - (sc_proof_phase_two, r, claims, blind_claim_postsc) + (sc_proof_phase_two, r, claims) } fn protocol_name() -> &'static [u8] { @@ -235,7 +233,7 @@ impl R1CSProof { // Sumcheck 1: (Az * Bz - Cz) * eq(x, q, p) = 0 let timer_tmp = Timer::new("prove_sum_check"); - let (sc_proof_phase1, rx, _claims_phase1, blind_claim_postsc1) = R1CSProof::prove_phase_one( + let (sc_proof_phase1, rx, _claims_phase1) = R1CSProof::prove_phase_one( num_rounds_x + num_rounds_q + num_rounds_p, num_rounds_x, num_rounds_q, @@ -303,7 +301,6 @@ impl R1CSProof { &claim_post_phase1, &blind_expected_claim_postsc1, &claim_post_phase1, - &blind_claim_postsc1, ); // Separate the result rx into rp, rq, and rx @@ -380,7 +377,7 @@ impl R1CSProof { let mut eq_p_rp_poly = DensePolynomial::new(EqPolynomial::new(rp).evals()); // Sumcheck 2: (rA + rB + rC) * Z * eq(p) = e - let (sc_proof_phase2, ry, claims_phase2, blind_claim_postsc2) = R1CSProof::prove_phase_two( + let (sc_proof_phase2, ry, claims_phase2) = R1CSProof::prove_phase_two( num_rounds_y + num_rounds_w + num_rounds_p, num_rounds_y, num_rounds_w, @@ -553,7 +550,6 @@ impl R1CSProof { &claim_post_phase2, &blind_expected_claim_postsc2, &claim_post_phase2, - &blind_claim_postsc2, ); timer_prove.stop(); diff --git a/spartan_parallel/src/sumcheck.rs b/spartan_parallel/src/sumcheck.rs index 249b1abd..a3b891a9 100644 --- a/spartan_parallel/src/sumcheck.rs +++ b/spartan_parallel/src/sumcheck.rs @@ -71,13 +71,13 @@ impl SumcheckInstanceProof { } #[derive(Serialize, Deserialize, Debug)] -pub struct ZKSumcheckInstanceProof { +pub struct R1CSSumcheckInstanceProof { proofs: Vec>, } -impl ZKSumcheckInstanceProof { +impl R1CSSumcheckInstanceProof { pub fn new(proofs: Vec>) -> Self { - ZKSumcheckInstanceProof { proofs } + R1CSSumcheckInstanceProof { proofs } } pub fn verify( @@ -381,10 +381,9 @@ impl SumcheckInstanceProof { } } -impl ZKSumcheckInstanceProof { +impl R1CSSumcheckInstanceProof { pub fn prove_cubic_disjoint_rounds( claim: &S, - blind_claim: &S, num_rounds: usize, num_rounds_y_max: usize, num_rounds_w: usize, @@ -398,7 +397,7 @@ impl ZKSumcheckInstanceProof { comb_func: F, transcript: &mut Transcript, random_tape: &mut RandomTape, - ) -> (Self, Vec, Vec, S) + ) -> (Self, Vec, Vec) where F: Fn(&S, &S, &S) -> S, { @@ -408,11 +407,6 @@ impl ZKSumcheckInstanceProof { // poly_A is the EQ polynomial of size P * W * Y_max assert_eq!(num_rounds, num_rounds_y_max + num_rounds_w + num_rounds_p); - let (blinds_poly, blinds_evals) = ( - random_tape.random_vector(b"blinds_poly", num_rounds), - random_tape.random_vector(b"blinds_evals", num_rounds), - ); - let mut claim_per_round = *claim; let mut r: Vec = Vec::new(); @@ -577,18 +571,6 @@ impl ZKSumcheckInstanceProof { // compute a weighted sum of the RHS let target = w[0] * claim_per_round + w[1] * eval; - let blind = { - let blind_sc = if j == 0 { - blind_claim - } else { - &blinds_evals[j - 1] - }; - - let blind_eval = &blinds_evals[j]; - - w[0] * *blind_sc + w[1] * *blind_eval - }; - let a = { // the vector to use to decommit for sum-check test let a_sc = { @@ -613,15 +595,7 @@ impl ZKSumcheckInstanceProof { .collect::>() }; - let proof = DotProductProof::prove( - transcript, - random_tape, - &poly.as_vec(), - &blinds_poly[j], - &a, - &target, - &blind, - ); + let proof = DotProductProof::prove(transcript, random_tape, &poly.as_vec(), &a, &target); (proof, eval) }; @@ -632,20 +606,18 @@ impl ZKSumcheckInstanceProof { } ( - ZKSumcheckInstanceProof::new(proofs), + R1CSSumcheckInstanceProof::new(proofs), r, vec![ poly_A[0], poly_B.index(0, 0, 0, 0), poly_C.index(0, 0, 0, 0), ], - blinds_evals[num_rounds - 1], ) } pub fn prove_cubic_with_additive_term_disjoint_rounds( claim: &S, - blind_claim: &S, num_rounds: usize, num_rounds_x_max: usize, num_rounds_q_max: usize, @@ -661,7 +633,7 @@ impl ZKSumcheckInstanceProof { comb_func: F, transcript: &mut Transcript, random_tape: &mut RandomTape, - ) -> (Self, Vec, Vec, S) + ) -> (Self, Vec, Vec) where F: Fn(&S, &S, &S, &S) -> S, { @@ -678,11 +650,6 @@ impl ZKSumcheckInstanceProof { assert_eq!(poly_C.num_witness_secs, 1); assert_eq!(poly_D.num_witness_secs, 1); - let (blinds_poly, blinds_evals) = ( - random_tape.random_vector(b"blinds_poly", num_rounds), - random_tape.random_vector(b"blinds_evals", num_rounds), - ); - let mut claim_per_round = *claim; let mut r: Vec = Vec::new(); @@ -864,18 +831,6 @@ impl ZKSumcheckInstanceProof { // compute a weighted sum of the RHS let target = w[0] * claim_per_round + w[1] * eval; - let blind = { - let blind_sc = if j == 0 { - blind_claim - } else { - &blinds_evals[j - 1] - }; - - let blind_eval = &blinds_evals[j]; - - w[0] * *blind_sc + w[1] * *blind_eval - }; - let a = { // the vector to use to decommit for sum-check test let a_sc = { @@ -900,15 +855,7 @@ impl ZKSumcheckInstanceProof { .collect::>() }; - let proof = DotProductProof::prove( - transcript, - random_tape, - &poly.as_vec(), - &blinds_poly[j], - &a, - &target, - &blind, - ); + let proof = DotProductProof::prove(transcript, random_tape, &poly.as_vec(), &a, &target); (proof, eval) }; @@ -919,7 +866,7 @@ impl ZKSumcheckInstanceProof { } ( - ZKSumcheckInstanceProof::new(proofs), + R1CSSumcheckInstanceProof::new(proofs), r, vec![ poly_Ap[0] * poly_Aq[0] * poly_Ax[0], @@ -927,7 +874,6 @@ impl ZKSumcheckInstanceProof { poly_C.index(0, 0, 0, 0), poly_D.index(0, 0, 0, 0), ], - blinds_evals[num_rounds - 1], ) } } From d4e022b7fb08d9f9f4b705fe082e5f77bfc6a9fc Mon Sep 17 00:00:00 2001 From: Ray Gao Date: Wed, 4 Dec 2024 17:47:52 -0500 Subject: [PATCH 07/22] Remove blinds --- spartan_parallel/src/r1csproof.rs | 3 --- 1 file changed, 3 deletions(-) diff --git a/spartan_parallel/src/r1csproof.rs b/spartan_parallel/src/r1csproof.rs index 30de2542..5cd1e650 100644 --- a/spartan_parallel/src/r1csproof.rs +++ b/spartan_parallel/src/r1csproof.rs @@ -78,7 +78,6 @@ impl R1CSProof { num_witness_secs: usize, num_inputs: Vec, claim: &S, - blind_claim: &S, evals_eq: &mut DensePolynomial, evals_ABC: &mut DensePolynomialPqx, evals_z: &mut DensePolynomialPqx, @@ -321,7 +320,6 @@ impl R1CSProof { let r_C: S = transcript.challenge_scalar(b"challenge_Cz"); let claim_phase2 = r_A * *Az_claim + r_B * *Bz_claim + r_C * *Cz_claim; - let blind_claim_phase2 = r_A * Az_blind + r_B * Bz_blind + r_C * Cz_blind; let timer_tmp = Timer::new("prove_abc_gen"); let evals_ABC = { @@ -386,7 +384,6 @@ impl R1CSProof { num_witness_secs, num_inputs.clone(), &claim_phase2, - &blind_claim_phase2, &mut eq_p_rp_poly, &mut ABC_poly, &mut Z_poly, From acbc979f796efb4d72a547783f77e079f7d405f5 Mon Sep 17 00:00:00 2001 From: kunxian xia Date: Thu, 5 Dec 2024 15:38:02 +0800 Subject: [PATCH 08/22] remove nizk --- spartan_parallel/src/lib.rs | 1 - spartan_parallel/src/nizk/bullet.rs | 159 ------------- spartan_parallel/src/nizk/mod.rs | 343 ---------------------------- 3 files changed, 503 deletions(-) delete mode 100644 spartan_parallel/src/nizk/bullet.rs delete mode 100644 spartan_parallel/src/nizk/mod.rs diff --git a/spartan_parallel/src/lib.rs b/spartan_parallel/src/lib.rs index 67d56c6f..d1eb04d5 100644 --- a/spartan_parallel/src/lib.rs +++ b/spartan_parallel/src/lib.rs @@ -25,7 +25,6 @@ mod errors; /// R1CS instance used by libspartan pub mod instance; mod math; -mod nizk; mod product_tree; mod r1csinstance; mod r1csproof; diff --git a/spartan_parallel/src/nizk/bullet.rs b/spartan_parallel/src/nizk/bullet.rs deleted file mode 100644 index 549c924d..00000000 --- a/spartan_parallel/src/nizk/bullet.rs +++ /dev/null @@ -1,159 +0,0 @@ -//! This module is an adaptation of code from the bulletproofs crate. -//! See NOTICE.md for more details -#![allow(non_snake_case)] -#![allow(clippy::type_complexity)] -#![allow(clippy::too_many_arguments)] -use super::super::errors::ProofVerifyError; -use super::super::scalar::SpartanExtensionField; -use super::super::transcript::ProofTranscript; -use merlin::Transcript; -use serde::{Deserialize, Serialize}; - -#[derive(Clone, Debug, Serialize, Deserialize)] -pub struct BulletReductionProof { - _phantom: S, -} - -impl BulletReductionProof { - /// Create an inner-product proof. - /// - /// The proof is created with respect to the bases \\(G\\). - /// - /// The `transcript` is passed in as a parameter so that the - /// challenges depend on the *entire* transcript (including parent - /// protocols). - /// - /// The lengths of the vectors must all be the same, and must all be - /// either 0 or a power of 2. - pub fn prove( - transcript: &mut Transcript, - a_vec: &[S], - b_vec: &[S], - blind: &S, - blinds_vec: &[(S, S)], - ) -> (S, S, S) { - // Create slices G, H, a, b backed by their respective - // vectors. This lets us reslice as we compress the lengths - // of the vectors in the main loop below. - let mut a: &mut [S] = &mut a_vec.to_owned()[..]; - let mut b: &mut [S] = &mut b_vec.to_owned()[..]; - - let mut blinds_iter = blinds_vec.iter(); - let mut blind_fin: S = *blind; - - let mut n = a.len(); - assert_eq!(a.len(), n); - assert_eq!(b.len(), n); - - while n != 1 { - n /= 2; - let (a_L, a_R) = a.split_at_mut(n); - let (b_L, b_R) = b.split_at_mut(n); - - let _c_L = inner_product(a_L, b_R); - let _c_R = inner_product(a_R, b_L); - - let (blind_L, blind_R) = blinds_iter.next().unwrap(); - - let u: S = transcript.challenge_scalar(b"u"); - - let u_inv = u.invert().unwrap(); - - for i in 0..n { - a_L[i] = a_L[i] * u + u_inv * a_R[i]; - b_L[i] = b_L[i] * u_inv + u * b_R[i]; - } - - blind_fin = blind_fin + *blind_L * u * u + *blind_R * u_inv * u_inv; - - a = a_L; - b = b_L; - } - - (a[0], b[0], blind_fin) - } - - /// Computes three vectors of verification scalars \\([u\_{i}^{2}]\\), \\([u\_{i}^{-2}]\\) and \\([s\_{i}]\\) for combined multiscalar multiplication - /// in a parent protocol. See [inner product protocol notes](index.html#verification-equation) for details. - /// The verifier must provide the input length \\(n\\) explicitly to avoid unbounded allocation within the inner product proof. - fn verification_scalars( - &self, - n: usize, - transcript: &mut Transcript, - ) -> Result<(Vec, Vec, Vec), ProofVerifyError> { - let mut lg_n = 0usize; - assert!(n > 0, "n must not be 0"); - - let mut value = n; - while value > 1 { - value >>= 1; // Divide value by 2 - lg_n += 1; - } - - // 1. Recompute x_k,...,x_1 based on the proof transcript - let mut challenges = Vec::with_capacity(lg_n); - for _i in 0..lg_n { - challenges.push(transcript.challenge_scalar(b"u")); - } - - // 2. Compute 1/(u_k...u_1) and 1/u_k, ..., 1/u_1 - let mut challenges_inv = challenges.clone(); - let allinv = S::batch_invert(&mut challenges_inv); - - // 3. Compute u_i^2 and (1/u_i)^2 - for i in 0..lg_n { - challenges[i] = challenges[i].square(); - challenges_inv[i] = challenges_inv[i].square(); - } - let challenges_sq = challenges; - let challenges_inv_sq = challenges_inv; - - // 4. Compute s values inductively. - let mut s = Vec::with_capacity(n); - s.push(allinv); - for i in 1..n { - let lg_i = (32 - 1 - (i as u32).leading_zeros()) as usize; - let k = 1 << lg_i; - // The challenges are stored in "creation order" as [u_k,...,u_1], - // so u_{lg(i)+1} = is indexed by (lg_n-1) - lg_i - let u_lg_i_sq = challenges_sq[(lg_n - 1) - lg_i]; - s.push(s[i - k] * u_lg_i_sq); - } - - Ok((challenges_sq, challenges_inv_sq, s)) - } - - /// This method is for testing that proof generation work, - /// but for efficiency the actual protocols would use `verification_scalars` - /// method to combine inner product verification with other checks - /// in a single multiscalar multiplication. - pub fn verify( - &self, - n: usize, - a: &[S], - transcript: &mut Transcript, - ) -> Result { - let (_u_sq, _u_inv_sq, s) = self.verification_scalars(n, transcript)?; - - let a_hat = inner_product(a, &s); - - Ok(a_hat) - } -} - -/// Computes an inner product of two vectors -/// \\[ -/// {\langle {\mathbf{a}}, {\mathbf{b}} \rangle} = \sum\_{i=0}^{n-1} a\_i \cdot b\_i. -/// \\] -/// Panics if the lengths of \\(\mathbf{a}\\) and \\(\mathbf{b}\\) are not equal. -pub fn inner_product(a: &[S], b: &[S]) -> S { - assert!( - a.len() == b.len(), - "inner_product(a,b): lengths of vectors do not match" - ); - let mut out = S::field_zero(); - for i in 0..a.len() { - out = out + a[i] * b[i]; - } - out -} diff --git a/spartan_parallel/src/nizk/mod.rs b/spartan_parallel/src/nizk/mod.rs deleted file mode 100644 index 50249f07..00000000 --- a/spartan_parallel/src/nizk/mod.rs +++ /dev/null @@ -1,343 +0,0 @@ -#![allow(clippy::too_many_arguments)] -use crate::scalar::SpartanExtensionField; - -use super::errors::ProofVerifyError; -use super::math::Math; -use super::random::RandomTape; -use super::transcript::ProofTranscript; -use merlin::Transcript; -use serde::{Deserialize, Serialize}; -mod bullet; -use bullet::BulletReductionProof; - -#[derive(Serialize, Deserialize, Debug)] -pub struct KnowledgeProof { - z1: S, - z2: S, -} - -impl KnowledgeProof { - fn protocol_name() -> &'static [u8] { - b"knowledge proof" - } - - pub fn prove( - transcript: &mut Transcript, - random_tape: &mut RandomTape, - x: &S, - r: &S, - ) -> KnowledgeProof { - >::append_protocol_name( - transcript, - KnowledgeProof::::protocol_name(), - ); - - // produce two random Scalars - let t1 = random_tape.random_scalar(b"t1"); - let t2 = random_tape.random_scalar(b"t2"); - - let c: S = transcript.challenge_scalar(b"c"); - - let z1 = *x * c + t1; - let z2 = *r * c + t2; - - KnowledgeProof { z1, z2 } - } - - pub fn verify(&self, transcript: &mut Transcript) -> Result<(), ProofVerifyError> { - // Transcript operations to preserve consistency for the verify function - { - >::append_protocol_name( - transcript, - KnowledgeProof::::protocol_name(), - ); - - let _c: S = transcript.challenge_scalar(b"c"); - } - - // TODO: Alternative PCS Verification - Ok(()) - } -} - -#[derive(Serialize, Deserialize, Debug)] -pub struct EqualityProof { - z: S, -} - -impl EqualityProof { - fn protocol_name() -> &'static [u8] { - b"equality proof" - } - - pub fn prove( - transcript: &mut Transcript, - random_tape: &mut RandomTape, - _v1: &S, - s1: &S, - _v2: &S, - ) -> EqualityProof { - >::append_protocol_name( - transcript, - EqualityProof::::protocol_name(), - ); - - // produce a random Scalar - let r = random_tape.random_scalar(b"r"); - let c: S = transcript.challenge_scalar(b"c"); - let z = c * *s1 + r; - - EqualityProof { z } - } - - pub fn verify(&self, transcript: &mut Transcript) -> Result<(), ProofVerifyError> { - // Transcript operations to preserve consistency for the verify function - { - >::append_protocol_name( - transcript, - EqualityProof::::protocol_name(), - ); - - let _c: S = transcript.challenge_scalar(b"c"); - } - - // TODO: Alternative PCS Verification - Ok(()) - } -} - -#[derive(Serialize, Deserialize, Debug)] -pub struct ProductProof { - z: [S; 5], -} - -impl ProductProof { - fn protocol_name() -> &'static [u8] { - b"product proof" - } - - pub fn prove( - transcript: &mut Transcript, - random_tape: &mut RandomTape, - x: &S, - rX: &S, - y: &S, - rY: &S, - _z: &S, - rZ: &S, - ) -> ProductProof { - >::append_protocol_name( - transcript, - ProductProof::::protocol_name(), - ); - - // produce five random Scalar - let b1 = random_tape.random_scalar(b"b1"); - let b2 = random_tape.random_scalar(b"b2"); - let b3 = random_tape.random_scalar(b"b3"); - let b4 = random_tape.random_scalar(b"b4"); - let b5 = random_tape.random_scalar(b"b5"); - - let c: S = transcript.challenge_scalar(b"c"); - - let z1 = b1 + c * *x; - let z2 = b2 + c * *rX; - let z3 = b3 + c * *y; - let z4 = b4 + c * *rY; - let z5 = b5 + c * (*rZ - *rX * *y); - let z = [z1, z2, z3, z4, z5]; - - ProductProof { z } - } - - fn _check_equality(_c: &S, _z1: &S, _z2: &S) -> bool { - // TODO: Alternative PCS Verification - true - } - - pub fn verify(&self, transcript: &mut Transcript) -> Result<(), ProofVerifyError> { - // Transcript operations to preserve consistency for the verify function - { - >::append_protocol_name( - transcript, - ProductProof::::protocol_name(), - ); - - let _c: S = transcript.challenge_scalar(b"c"); - } - - // TODO: Alternative PCS Verification - Ok(()) - } -} - -#[derive(Debug, Serialize, Deserialize)] -pub struct DotProductProof { - z: Vec, - z_delta: S, - z_beta: S, -} - -impl DotProductProof { - fn protocol_name() -> &'static [u8] { - b"dot product proof" - } - - pub fn compute_dotproduct(a: &[S], b: &[S]) -> S { - assert_eq!(a.len(), b.len()); - (0..a.len()).map(|i| a[i] * b[i]).sum() - } - - pub fn prove( - transcript: &mut Transcript, - random_tape: &mut RandomTape, - x_vec: &[S], - a_vec: &[S], - _y: &S, - ) -> DotProductProof { - >::append_protocol_name( - transcript, - DotProductProof::::protocol_name(), - ); - - let n = x_vec.len(); - assert_eq!(x_vec.len(), a_vec.len()); - - // produce randomness for the proofs - let d_vec = random_tape.random_vector(b"d_vec", n); - let r_delta = random_tape.random_scalar(b"r_delta"); - let r_beta = random_tape.random_scalar(b"r_beta"); - - let _dotproduct_a_d = DotProductProof::compute_dotproduct(a_vec, &d_vec); - - S::append_field_vector_to_transcript(b"a", transcript, a_vec); - let c: S = transcript.challenge_scalar(b"c"); - - let z = (0..d_vec.len()) - .map(|i| c * x_vec[i] + d_vec[i]) - .collect::>(); - - let z_delta = c + r_delta; - let z_beta = c + r_beta; - - DotProductProof { z, z_delta, z_beta } - } - - pub fn verify(&self, transcript: &mut Transcript, a: &[S]) -> Result<(), ProofVerifyError> { - // Transcript operations to preserve consistency for the verify function - { - >::append_protocol_name( - transcript, - DotProductProof::::protocol_name(), - ); - S::append_field_vector_to_transcript(b"a", transcript, a); - let _c: S = transcript.challenge_scalar(b"c"); - } - - let _dotproduct_z_a = DotProductProof::compute_dotproduct(&self.z, a); - - // TODO: Alternative PCS Verification - Ok(()) - } -} - -#[derive(Clone, Debug, Serialize, Deserialize)] -pub struct DotProductProofLog { - z1: S, - z2: S, -} - -impl DotProductProofLog { - fn protocol_name() -> &'static [u8] { - b"dot product proof (log)" - } - - pub fn compute_dotproduct(a: &[S], b: &[S]) -> S { - assert_eq!(a.len(), b.len()); - (0..a.len()).map(|i| a[i] * b[i]).sum() - } - - pub fn prove( - transcript: &mut Transcript, - random_tape: &mut RandomTape, - x_vec: &[S], - blind_x: &S, - a_vec: &[S], - _y: &S, - blind_y: &S, - ) -> DotProductProofLog { - >::append_protocol_name( - transcript, - DotProductProofLog::::protocol_name(), - ); - - let n = x_vec.len(); - assert_eq!(x_vec.len(), a_vec.len()); - - // produce randomness for generating a proof - let d = random_tape.random_scalar(b"d"); - let r_delta = random_tape.random_scalar(b"r_delta"); - let r_beta = random_tape.random_scalar(b"r_delta"); - let blinds_vec = { - let v1 = random_tape.random_vector(b"blinds_vec_1", 2 * n.log_2()); - let v2 = random_tape.random_vector(b"blinds_vec_2", 2 * n.log_2()); - (0..v1.len()) - .map(|i| (v1[i], v2[i])) - .collect::>() - }; - S::append_field_vector_to_transcript(b"a", transcript, a_vec); - - // sample a random base and scale the generator used for - // the output of the inner product - let r: S = transcript.challenge_scalar(b"r"); - - let blind_Gamma: S = *blind_x + r * *blind_y; - let (x_hat, a_hat, rhat_Gamma) = - BulletReductionProof::prove(transcript, x_vec, a_vec, &blind_Gamma, &blinds_vec); - - let y_hat = x_hat * a_hat; - - let c: S = transcript.challenge_scalar(b"c"); - - let z1 = d + c * y_hat; - let z2 = a_hat * (c * rhat_Gamma + r_beta) + r_delta; - - DotProductProofLog { z1, z2 } - } - - pub fn verify( - &self, - n: usize, - transcript: &mut Transcript, - a: &[S], - ) -> Result<(), ProofVerifyError> { - assert_eq!(a.len(), n); - - // Transcript operations to preserve consistency for the verify function - { - >::append_protocol_name( - transcript, - DotProductProofLog::::protocol_name(), - ); - - S::append_field_vector_to_transcript(b"a", transcript, a); - - // sample a random base and scale the generator used for - // the output of the inner product - let _r: S = transcript.challenge_scalar(b"r"); - - // BulletReductionProof - verification_scalars - let mut m = a.len(); - while m != 1 { - m /= 2; - - let _u: S = transcript.challenge_scalar(b"u"); - } - - let _c: S = transcript.challenge_scalar(b"c"); - } - - // TODO: Alternative PCS Verification - Ok(()) - } -} From 4b160ca9693daaf0c57acbc7d571301d7cb1203f Mon Sep 17 00:00:00 2001 From: Ray Gao Date: Thu, 5 Dec 2024 03:11:24 -0500 Subject: [PATCH 09/22] Adapt proving methods onto SumcheckInstanceProof. Remove ZK proof struct. --- spartan_parallel/src/dense_mlpoly.rs | 6 +- spartan_parallel/src/r1csproof.rs | 16 +-- spartan_parallel/src/sumcheck.rs | 188 +++------------------------ 3 files changed, 27 insertions(+), 183 deletions(-) diff --git a/spartan_parallel/src/dense_mlpoly.rs b/spartan_parallel/src/dense_mlpoly.rs index 9ef128b2..c2253269 100644 --- a/spartan_parallel/src/dense_mlpoly.rs +++ b/spartan_parallel/src/dense_mlpoly.rs @@ -3,9 +3,9 @@ use crate::scalar::SpartanExtensionField; use super::errors::ProofVerifyError; use super::math::Math; -use super::nizk::DotProductProofLog; use super::random::RandomTape; use super::transcript::ProofTranscript; +use super::unipoly::CompressedUniPoly; use core::ops::Index; use merlin::Transcript; use serde::{Deserialize, Serialize}; @@ -309,9 +309,9 @@ impl Index for DensePolynomial { } } -#[derive(Clone, Debug, Serialize, Deserialize)] +#[derive(Debug, Serialize, Deserialize)] pub struct PolyEvalProof { - proof: DotProductProofLog, + polys: Vec>, } impl PolyEvalProof { diff --git a/spartan_parallel/src/r1csproof.rs b/spartan_parallel/src/r1csproof.rs index 5cd1e650..50bff1e0 100644 --- a/spartan_parallel/src/r1csproof.rs +++ b/spartan_parallel/src/r1csproof.rs @@ -3,10 +3,9 @@ use super::custom_dense_mlpoly::DensePolynomialPqx; use super::dense_mlpoly::{DensePolynomial, EqPolynomial, PolyEvalProof}; use super::errors::ProofVerifyError; use super::math::Math; -use super::nizk::{EqualityProof, KnowledgeProof, ProductProof}; use super::r1csinstance::R1CSInstance; use super::random::RandomTape; -use super::sumcheck::R1CSSumcheckInstanceProof; +use super::sumcheck::SumcheckInstanceProof; use super::timer::Timer; use super::transcript::ProofTranscript; use crate::scalar::SpartanExtensionField; @@ -17,8 +16,8 @@ use std::cmp::min; #[derive(Serialize, Deserialize, Debug)] pub struct R1CSProof { - sc_proof_phase1: R1CSSumcheckInstanceProof, - sc_proof_phase2: R1CSSumcheckInstanceProof, + sc_proof_phase1: SumcheckInstanceProof, + sc_proof_phase2: SumcheckInstanceProof, pok_claims_phase2: (KnowledgeProof, ProductProof), proof_eq_sc_phase1: EqualityProof, proof_eq_sc_phase2: EqualityProof, @@ -41,13 +40,13 @@ impl R1CSProof { evals_Cz: &mut DensePolynomialPqx, transcript: &mut Transcript, random_tape: &mut RandomTape, - ) -> (R1CSSumcheckInstanceProof, Vec, Vec) { + ) -> (SumcheckInstanceProof, Vec, Vec) { let comb_func = |poly_A_comp: &S, poly_B_comp: &S, poly_C_comp: &S, poly_D_comp: &S| -> S { *poly_A_comp * (*poly_B_comp * *poly_C_comp - *poly_D_comp) }; let (sc_proof_phase_one, r, claims) = - R1CSSumcheckInstanceProof::::prove_cubic_with_additive_term_disjoint_rounds( + SumcheckInstanceProof::::prove_cubic_with_additive_term_disjoint_rounds( &S::field_zero(), // claim is zero num_rounds, num_rounds_x_max, @@ -83,12 +82,12 @@ impl R1CSProof { evals_z: &mut DensePolynomialPqx, transcript: &mut Transcript, random_tape: &mut RandomTape, - ) -> (R1CSSumcheckInstanceProof, Vec, Vec) { + ) -> (SumcheckInstanceProof, Vec, Vec) { let comb_func = |poly_A_comp: &S, poly_B_comp: &S, poly_C_comp: &S| -> S { *poly_A_comp * *poly_B_comp * *poly_C_comp }; let (sc_proof_phase_two, r, claims) = - R1CSSumcheckInstanceProof::::prove_cubic_disjoint_rounds( + SumcheckInstanceProof::::prove_cubic_disjoint_rounds( claim, num_rounds, num_rounds_y_max, @@ -102,7 +101,6 @@ impl R1CSProof { evals_z, comb_func, transcript, - random_tape, ); (sc_proof_phase_two, r, claims) diff --git a/spartan_parallel/src/sumcheck.rs b/spartan_parallel/src/sumcheck.rs index a3b891a9..36295c27 100644 --- a/spartan_parallel/src/sumcheck.rs +++ b/spartan_parallel/src/sumcheck.rs @@ -6,7 +6,6 @@ use crate::scalar::SpartanExtensionField; use super::dense_mlpoly::DensePolynomial; use super::errors::ProofVerifyError; -use super::nizk::DotProductProof; use super::random::RandomTape; use super::transcript::{AppendToTranscript, ProofTranscript}; use super::unipoly::{CompressedUniPoly, UniPoly}; @@ -70,67 +69,6 @@ impl SumcheckInstanceProof { } } -#[derive(Serialize, Deserialize, Debug)] -pub struct R1CSSumcheckInstanceProof { - proofs: Vec>, -} - -impl R1CSSumcheckInstanceProof { - pub fn new(proofs: Vec>) -> Self { - R1CSSumcheckInstanceProof { proofs } - } - - pub fn verify( - &self, - num_rounds: usize, - degree_bound: usize, - transcript: &mut Transcript, - ) -> Result, ProofVerifyError> { - let mut r: Vec = Vec::new(); - - for i in 0..num_rounds { - // derive the verifier's challenge for the next round - let r_i = transcript.challenge_scalar(b"challenge_nextround"); - - // verify the proof of sum-check and evals - let _res = { - // produce two weights - let w: Vec = transcript.challenge_vector(b"combine_two_claims_to_one", 2); - - let a = { - // the vector to use to decommit for sum-check test - let a_sc = { - let mut a = vec![S::field_one(); degree_bound + 1]; - a[0] = a[0] + S::field_one(); - a - }; - - // the vector to use to decommit for evaluation - let a_eval = { - let mut a = vec![S::field_one(); degree_bound + 1]; - for j in 1..a.len() { - a[j] = a[j - 1] * r_i; - } - a - }; - - // take weighted sum of the two vectors using w - assert_eq!(a_sc.len(), a_eval.len()); - (0..a_sc.len()) - .map(|i| w[0] * a_sc[i] + w[1] * a_eval[i]) - .collect::>() - }; - - self.proofs[i].verify(transcript, &a).is_ok() - }; - - r.push(r_i); - } - - Ok(r) - } -} - impl SumcheckInstanceProof { pub fn prove_cubic( claim: &S, @@ -379,9 +317,7 @@ impl SumcheckInstanceProof { claims_dotp, ) } -} -impl R1CSSumcheckInstanceProof { pub fn prove_cubic_disjoint_rounds( claim: &S, num_rounds: usize, @@ -396,7 +332,6 @@ impl R1CSSumcheckInstanceProof { poly_C: &mut DensePolynomialPqx, comb_func: F, transcript: &mut Transcript, - random_tape: &mut RandomTape, ) -> (Self, Vec, Vec) where F: Fn(&S, &S, &S) -> S, @@ -410,7 +345,7 @@ impl R1CSSumcheckInstanceProof { let mut claim_per_round = *claim; let mut r: Vec = Vec::new(); - let mut proofs: Vec> = Vec::new(); + let mut cubic_polys: Vec> = Vec::new(); let mut inputs_len = num_rounds_y_max.pow2(); let mut witness_secs_len = num_rounds_w.pow2(); @@ -540,8 +475,12 @@ impl R1CSSumcheckInstanceProof { poly }; + // append the prover's message to the transcript + poly.append_to_transcript(b"poly", transcript); + //derive the verifier's challenge for the next round let r_j = transcript.challenge_scalar(b"challenge_nextround"); + r.push(r_j); // bound all tables to the verifier's challenege if mode == MODE_P { @@ -552,61 +491,12 @@ impl R1CSSumcheckInstanceProof { } poly_C.bound_poly(&r_j, mode); - // produce a proof of sum-check and of evaluation - let (proof, claim_next_round) = { - let eval = poly.evaluate(&r_j); - - // we need to prove the following under homomorphic commitments: - // (1) poly(0) + poly(1) = claim_per_round - // (2) poly(r_j) = eval - - // Our technique is to leverage dot product proofs: - // (1) we can prove: = claim_per_round - // (2) we can prove: = transcript.challenge_vector(b"combine_two_claims_to_one", 2); - - // compute a weighted sum of the RHS - let target = w[0] * claim_per_round + w[1] * eval; - - let a = { - // the vector to use to decommit for sum-check test - let a_sc = { - let mut a = vec![S::field_one(); poly.degree() + 1]; - a[0] = a[0] + S::field_one(); - a - }; - - // the vector to use to decommit for evaluation - let a_eval = { - let mut a = vec![S::field_one(); poly.degree() + 1]; - for j in 1..a.len() { - a[j] = a[j - 1] * r_j; - } - a - }; - - // take weighted sum of the two vectors using w - assert_eq!(a_sc.len(), a_eval.len()); - (0..a_sc.len()) - .map(|i| w[0] * a_sc[i] + w[1] * a_eval[i]) - .collect::>() - }; - - let proof = DotProductProof::prove(transcript, random_tape, &poly.as_vec(), &a, &target); - - (proof, eval) - }; - - proofs.push(proof); - claim_per_round = claim_next_round; - r.push(r_j); + claim_per_round = poly.evaluate(&r_j); + cubic_polys.push(poly.compress()); } ( - R1CSSumcheckInstanceProof::new(proofs), + SumcheckInstanceProof::new(cubic_polys), r, vec![ poly_A[0], @@ -653,7 +543,7 @@ impl R1CSSumcheckInstanceProof { let mut claim_per_round = *claim; let mut r: Vec = Vec::new(); - let mut proofs: Vec> = Vec::new(); + let mut cubic_polys: Vec> = Vec::new(); let mut cons_len = num_rounds_x_max.pow2(); let mut proof_len = num_rounds_q_max.pow2(); @@ -798,8 +688,12 @@ impl R1CSSumcheckInstanceProof { poly }; + // append the prover's message to the transcript + poly.append_to_transcript(b"poly", transcript); + //derive the verifier's challenge for the next round let r_j = transcript.challenge_scalar(b"challenge_nextround"); + r.push(r_j); // bound all tables to the verifier's challenege if mode == 1 { @@ -813,60 +707,12 @@ impl R1CSSumcheckInstanceProof { poly_C.bound_poly(&r_j, mode); poly_D.bound_poly(&r_j, mode); - let (proof, claim_next_round) = { - let eval = poly.evaluate(&r_j); - - // we need to prove the following under homomorphic commitments: - // (1) poly(0) + poly(1) = claim_per_round - // (2) poly(r_j) = eval - - // Our technique is to leverage dot product proofs: - // (1) we can prove: = claim_per_round - // (2) we can prove: = transcript.challenge_vector(b"combine_two_claims_to_one", 2); - - // compute a weighted sum of the RHS - let target = w[0] * claim_per_round + w[1] * eval; - - let a = { - // the vector to use to decommit for sum-check test - let a_sc = { - let mut a = vec![S::field_one(); poly.degree() + 1]; - a[0] = a[0] + S::field_one(); - a - }; - - // the vector to use to decommit for evaluation - let a_eval = { - let mut a = vec![S::field_one(); poly.degree() + 1]; - for j in 1..a.len() { - a[j] = a[j - 1] * r_j; - } - a - }; - - // take weighted sum of the two vectors using w - assert_eq!(a_sc.len(), a_eval.len()); - (0..a_sc.len()) - .map(|i| w[0] * a_sc[i] + w[1] * a_eval[i]) - .collect::>() - }; - - let proof = DotProductProof::prove(transcript, random_tape, &poly.as_vec(), &a, &target); - - (proof, eval) - }; - - proofs.push(proof); - claim_per_round = claim_next_round; - r.push(r_j); + claim_per_round = poly.evaluate(&r_j); + cubic_polys.push(poly.compress()); } ( - R1CSSumcheckInstanceProof::new(proofs), + SumcheckInstanceProof::new(cubic_polys), r, vec![ poly_Ap[0] * poly_Aq[0] * poly_Ax[0], @@ -876,4 +722,4 @@ impl R1CSSumcheckInstanceProof { ], ) } -} +} \ No newline at end of file From d1bfaca7d874e4693d75b2dab83d329af74a4f96 Mon Sep 17 00:00:00 2001 From: Ray Gao Date: Thu, 5 Dec 2024 18:59:47 -0500 Subject: [PATCH 10/22] Revert "Adapt proving methods onto SumcheckInstanceProof. Remove ZK proof struct." This reverts commit 4b160ca9693daaf0c57acbc7d571301d7cb1203f. --- spartan_parallel/src/dense_mlpoly.rs | 6 +- spartan_parallel/src/r1csproof.rs | 16 ++- spartan_parallel/src/sumcheck.rs | 188 ++++++++++++++++++++++++--- 3 files changed, 183 insertions(+), 27 deletions(-) diff --git a/spartan_parallel/src/dense_mlpoly.rs b/spartan_parallel/src/dense_mlpoly.rs index c2253269..9ef128b2 100644 --- a/spartan_parallel/src/dense_mlpoly.rs +++ b/spartan_parallel/src/dense_mlpoly.rs @@ -3,9 +3,9 @@ use crate::scalar::SpartanExtensionField; use super::errors::ProofVerifyError; use super::math::Math; +use super::nizk::DotProductProofLog; use super::random::RandomTape; use super::transcript::ProofTranscript; -use super::unipoly::CompressedUniPoly; use core::ops::Index; use merlin::Transcript; use serde::{Deserialize, Serialize}; @@ -309,9 +309,9 @@ impl Index for DensePolynomial { } } -#[derive(Debug, Serialize, Deserialize)] +#[derive(Clone, Debug, Serialize, Deserialize)] pub struct PolyEvalProof { - polys: Vec>, + proof: DotProductProofLog, } impl PolyEvalProof { diff --git a/spartan_parallel/src/r1csproof.rs b/spartan_parallel/src/r1csproof.rs index 50bff1e0..5cd1e650 100644 --- a/spartan_parallel/src/r1csproof.rs +++ b/spartan_parallel/src/r1csproof.rs @@ -3,9 +3,10 @@ use super::custom_dense_mlpoly::DensePolynomialPqx; use super::dense_mlpoly::{DensePolynomial, EqPolynomial, PolyEvalProof}; use super::errors::ProofVerifyError; use super::math::Math; +use super::nizk::{EqualityProof, KnowledgeProof, ProductProof}; use super::r1csinstance::R1CSInstance; use super::random::RandomTape; -use super::sumcheck::SumcheckInstanceProof; +use super::sumcheck::R1CSSumcheckInstanceProof; use super::timer::Timer; use super::transcript::ProofTranscript; use crate::scalar::SpartanExtensionField; @@ -16,8 +17,8 @@ use std::cmp::min; #[derive(Serialize, Deserialize, Debug)] pub struct R1CSProof { - sc_proof_phase1: SumcheckInstanceProof, - sc_proof_phase2: SumcheckInstanceProof, + sc_proof_phase1: R1CSSumcheckInstanceProof, + sc_proof_phase2: R1CSSumcheckInstanceProof, pok_claims_phase2: (KnowledgeProof, ProductProof), proof_eq_sc_phase1: EqualityProof, proof_eq_sc_phase2: EqualityProof, @@ -40,13 +41,13 @@ impl R1CSProof { evals_Cz: &mut DensePolynomialPqx, transcript: &mut Transcript, random_tape: &mut RandomTape, - ) -> (SumcheckInstanceProof, Vec, Vec) { + ) -> (R1CSSumcheckInstanceProof, Vec, Vec) { let comb_func = |poly_A_comp: &S, poly_B_comp: &S, poly_C_comp: &S, poly_D_comp: &S| -> S { *poly_A_comp * (*poly_B_comp * *poly_C_comp - *poly_D_comp) }; let (sc_proof_phase_one, r, claims) = - SumcheckInstanceProof::::prove_cubic_with_additive_term_disjoint_rounds( + R1CSSumcheckInstanceProof::::prove_cubic_with_additive_term_disjoint_rounds( &S::field_zero(), // claim is zero num_rounds, num_rounds_x_max, @@ -82,12 +83,12 @@ impl R1CSProof { evals_z: &mut DensePolynomialPqx, transcript: &mut Transcript, random_tape: &mut RandomTape, - ) -> (SumcheckInstanceProof, Vec, Vec) { + ) -> (R1CSSumcheckInstanceProof, Vec, Vec) { let comb_func = |poly_A_comp: &S, poly_B_comp: &S, poly_C_comp: &S| -> S { *poly_A_comp * *poly_B_comp * *poly_C_comp }; let (sc_proof_phase_two, r, claims) = - SumcheckInstanceProof::::prove_cubic_disjoint_rounds( + R1CSSumcheckInstanceProof::::prove_cubic_disjoint_rounds( claim, num_rounds, num_rounds_y_max, @@ -101,6 +102,7 @@ impl R1CSProof { evals_z, comb_func, transcript, + random_tape, ); (sc_proof_phase_two, r, claims) diff --git a/spartan_parallel/src/sumcheck.rs b/spartan_parallel/src/sumcheck.rs index 36295c27..a3b891a9 100644 --- a/spartan_parallel/src/sumcheck.rs +++ b/spartan_parallel/src/sumcheck.rs @@ -6,6 +6,7 @@ use crate::scalar::SpartanExtensionField; use super::dense_mlpoly::DensePolynomial; use super::errors::ProofVerifyError; +use super::nizk::DotProductProof; use super::random::RandomTape; use super::transcript::{AppendToTranscript, ProofTranscript}; use super::unipoly::{CompressedUniPoly, UniPoly}; @@ -69,6 +70,67 @@ impl SumcheckInstanceProof { } } +#[derive(Serialize, Deserialize, Debug)] +pub struct R1CSSumcheckInstanceProof { + proofs: Vec>, +} + +impl R1CSSumcheckInstanceProof { + pub fn new(proofs: Vec>) -> Self { + R1CSSumcheckInstanceProof { proofs } + } + + pub fn verify( + &self, + num_rounds: usize, + degree_bound: usize, + transcript: &mut Transcript, + ) -> Result, ProofVerifyError> { + let mut r: Vec = Vec::new(); + + for i in 0..num_rounds { + // derive the verifier's challenge for the next round + let r_i = transcript.challenge_scalar(b"challenge_nextround"); + + // verify the proof of sum-check and evals + let _res = { + // produce two weights + let w: Vec = transcript.challenge_vector(b"combine_two_claims_to_one", 2); + + let a = { + // the vector to use to decommit for sum-check test + let a_sc = { + let mut a = vec![S::field_one(); degree_bound + 1]; + a[0] = a[0] + S::field_one(); + a + }; + + // the vector to use to decommit for evaluation + let a_eval = { + let mut a = vec![S::field_one(); degree_bound + 1]; + for j in 1..a.len() { + a[j] = a[j - 1] * r_i; + } + a + }; + + // take weighted sum of the two vectors using w + assert_eq!(a_sc.len(), a_eval.len()); + (0..a_sc.len()) + .map(|i| w[0] * a_sc[i] + w[1] * a_eval[i]) + .collect::>() + }; + + self.proofs[i].verify(transcript, &a).is_ok() + }; + + r.push(r_i); + } + + Ok(r) + } +} + impl SumcheckInstanceProof { pub fn prove_cubic( claim: &S, @@ -317,7 +379,9 @@ impl SumcheckInstanceProof { claims_dotp, ) } +} +impl R1CSSumcheckInstanceProof { pub fn prove_cubic_disjoint_rounds( claim: &S, num_rounds: usize, @@ -332,6 +396,7 @@ impl SumcheckInstanceProof { poly_C: &mut DensePolynomialPqx, comb_func: F, transcript: &mut Transcript, + random_tape: &mut RandomTape, ) -> (Self, Vec, Vec) where F: Fn(&S, &S, &S) -> S, @@ -345,7 +410,7 @@ impl SumcheckInstanceProof { let mut claim_per_round = *claim; let mut r: Vec = Vec::new(); - let mut cubic_polys: Vec> = Vec::new(); + let mut proofs: Vec> = Vec::new(); let mut inputs_len = num_rounds_y_max.pow2(); let mut witness_secs_len = num_rounds_w.pow2(); @@ -475,12 +540,8 @@ impl SumcheckInstanceProof { poly }; - // append the prover's message to the transcript - poly.append_to_transcript(b"poly", transcript); - //derive the verifier's challenge for the next round let r_j = transcript.challenge_scalar(b"challenge_nextround"); - r.push(r_j); // bound all tables to the verifier's challenege if mode == MODE_P { @@ -491,12 +552,61 @@ impl SumcheckInstanceProof { } poly_C.bound_poly(&r_j, mode); - claim_per_round = poly.evaluate(&r_j); - cubic_polys.push(poly.compress()); + // produce a proof of sum-check and of evaluation + let (proof, claim_next_round) = { + let eval = poly.evaluate(&r_j); + + // we need to prove the following under homomorphic commitments: + // (1) poly(0) + poly(1) = claim_per_round + // (2) poly(r_j) = eval + + // Our technique is to leverage dot product proofs: + // (1) we can prove: = claim_per_round + // (2) we can prove: = transcript.challenge_vector(b"combine_two_claims_to_one", 2); + + // compute a weighted sum of the RHS + let target = w[0] * claim_per_round + w[1] * eval; + + let a = { + // the vector to use to decommit for sum-check test + let a_sc = { + let mut a = vec![S::field_one(); poly.degree() + 1]; + a[0] = a[0] + S::field_one(); + a + }; + + // the vector to use to decommit for evaluation + let a_eval = { + let mut a = vec![S::field_one(); poly.degree() + 1]; + for j in 1..a.len() { + a[j] = a[j - 1] * r_j; + } + a + }; + + // take weighted sum of the two vectors using w + assert_eq!(a_sc.len(), a_eval.len()); + (0..a_sc.len()) + .map(|i| w[0] * a_sc[i] + w[1] * a_eval[i]) + .collect::>() + }; + + let proof = DotProductProof::prove(transcript, random_tape, &poly.as_vec(), &a, &target); + + (proof, eval) + }; + + proofs.push(proof); + claim_per_round = claim_next_round; + r.push(r_j); } ( - SumcheckInstanceProof::new(cubic_polys), + R1CSSumcheckInstanceProof::new(proofs), r, vec![ poly_A[0], @@ -543,7 +653,7 @@ impl SumcheckInstanceProof { let mut claim_per_round = *claim; let mut r: Vec = Vec::new(); - let mut cubic_polys: Vec> = Vec::new(); + let mut proofs: Vec> = Vec::new(); let mut cons_len = num_rounds_x_max.pow2(); let mut proof_len = num_rounds_q_max.pow2(); @@ -688,12 +798,8 @@ impl SumcheckInstanceProof { poly }; - // append the prover's message to the transcript - poly.append_to_transcript(b"poly", transcript); - //derive the verifier's challenge for the next round let r_j = transcript.challenge_scalar(b"challenge_nextround"); - r.push(r_j); // bound all tables to the verifier's challenege if mode == 1 { @@ -707,12 +813,60 @@ impl SumcheckInstanceProof { poly_C.bound_poly(&r_j, mode); poly_D.bound_poly(&r_j, mode); - claim_per_round = poly.evaluate(&r_j); - cubic_polys.push(poly.compress()); + let (proof, claim_next_round) = { + let eval = poly.evaluate(&r_j); + + // we need to prove the following under homomorphic commitments: + // (1) poly(0) + poly(1) = claim_per_round + // (2) poly(r_j) = eval + + // Our technique is to leverage dot product proofs: + // (1) we can prove: = claim_per_round + // (2) we can prove: = transcript.challenge_vector(b"combine_two_claims_to_one", 2); + + // compute a weighted sum of the RHS + let target = w[0] * claim_per_round + w[1] * eval; + + let a = { + // the vector to use to decommit for sum-check test + let a_sc = { + let mut a = vec![S::field_one(); poly.degree() + 1]; + a[0] = a[0] + S::field_one(); + a + }; + + // the vector to use to decommit for evaluation + let a_eval = { + let mut a = vec![S::field_one(); poly.degree() + 1]; + for j in 1..a.len() { + a[j] = a[j - 1] * r_j; + } + a + }; + + // take weighted sum of the two vectors using w + assert_eq!(a_sc.len(), a_eval.len()); + (0..a_sc.len()) + .map(|i| w[0] * a_sc[i] + w[1] * a_eval[i]) + .collect::>() + }; + + let proof = DotProductProof::prove(transcript, random_tape, &poly.as_vec(), &a, &target); + + (proof, eval) + }; + + proofs.push(proof); + claim_per_round = claim_next_round; + r.push(r_j); } ( - SumcheckInstanceProof::new(cubic_polys), + R1CSSumcheckInstanceProof::new(proofs), r, vec![ poly_Ap[0] * poly_Aq[0] * poly_Ax[0], @@ -722,4 +876,4 @@ impl SumcheckInstanceProof { ], ) } -} \ No newline at end of file +} From 6b398537b933f95ab1e1a62efec1e37c05295731 Mon Sep 17 00:00:00 2001 From: Ray Gao Date: Thu, 5 Dec 2024 20:06:35 -0500 Subject: [PATCH 11/22] Resolve compilation issues --- spartan_parallel/src/dense_mlpoly.rs | 165 ++++------------------- spartan_parallel/src/lib.rs | 4 - spartan_parallel/src/r1csproof.rs | 129 ++++++++++-------- spartan_parallel/src/sparse_mlpoly.rs | 6 - spartan_parallel/src/sumcheck.rs | 187 +++----------------------- 5 files changed, 115 insertions(+), 376 deletions(-) diff --git a/spartan_parallel/src/dense_mlpoly.rs b/spartan_parallel/src/dense_mlpoly.rs index 9ef128b2..0d0a09f8 100644 --- a/spartan_parallel/src/dense_mlpoly.rs +++ b/spartan_parallel/src/dense_mlpoly.rs @@ -3,7 +3,6 @@ use crate::scalar::SpartanExtensionField; use super::errors::ProofVerifyError; use super::math::Math; -use super::nizk::DotProductProofLog; use super::random::RandomTape; use super::transcript::ProofTranscript; use core::ops::Index; @@ -21,10 +20,6 @@ pub struct DensePolynomial { Z: Vec, // evaluations of the polynomial in all the 2^num_vars Boolean inputs } -pub struct PolyCommitmentBlinds { - pub(crate) blinds: Vec, -} - pub struct EqPolynomial { r: Vec, } @@ -258,7 +253,12 @@ impl DensePolynomial { assert_eq!(r.len(), self.get_num_vars()); let chis = EqPolynomial::new(r.to_vec()).evals(); assert_eq!(chis.len(), self.Z.len()); - DotProductProofLog::compute_dotproduct(&self.Z, &chis) + Self::compute_dotproduct(&self.Z, &chis) + } + + fn compute_dotproduct(a: &[S], b: &[S]) -> S { + assert_eq!(a.len(), b.len()); + (0..a.len()).map(|i| a[i] * b[i]).sum() } fn vec(&self) -> &Vec { @@ -311,7 +311,7 @@ impl Index for DensePolynomial { #[derive(Clone, Debug, Serialize, Deserialize)] pub struct PolyEvalProof { - proof: DotProductProofLog, + v: Vec, } impl PolyEvalProof { @@ -321,12 +321,10 @@ impl PolyEvalProof { pub fn prove( poly: &DensePolynomial, - blinds_opt: Option<&PolyCommitmentBlinds>, r: &[S], // point at which the polynomial is evaluated - Zr: &S, // evaluation of \widetilde{Z}(r) - blind_Zr_opt: Option<&S>, // specifies a blind for Zr + _Zr: &S, // evaluation of \widetilde{Z}(r) transcript: &mut Transcript, - random_tape: &mut RandomTape, + _random_tape: &mut RandomTape, ) -> PolyEvalProof { >::append_protocol_name( transcript, @@ -340,32 +338,16 @@ impl PolyEvalProof { let L_size = left_num_vars.pow2(); let R_size = right_num_vars.pow2(); - let default_blinds = PolyCommitmentBlinds { - blinds: vec![S::field_zero(); L_size], - }; - let blinds = blinds_opt.map_or(&default_blinds, |p| p); - - assert_eq!(blinds.blinds.len(), L_size); - - let zero = S::field_zero(); - let blind_Zr = blind_Zr_opt.map_or(&zero, |p| p); - // compute the L and R vectors let eq = EqPolynomial::new(r.to_vec()); let (L, R) = eq.compute_factored_evals(); assert_eq!(L.len(), L_size); assert_eq!(R.len(), R_size); - // compute the vector underneath L*Z and the L*blinds // compute vector-matrix product between L and Z viewed as a matrix let LZ = poly.bound(&L); - let LZ_blind: S = (0..L.len()).map(|i| blinds.blinds[i] * L[i]).sum(); - - // a dot product proof of size R_size - let proof = - DotProductProofLog::prove(transcript, random_tape, &LZ, &LZ_blind, &R, Zr, blind_Zr); - PolyEvalProof { proof } + PolyEvalProof { v: LZ } } pub fn verify( @@ -380,9 +362,7 @@ impl PolyEvalProof { // compute L and R let eq = EqPolynomial::new(r.to_vec()); - let (L, R) = eq.compute_factored_evals(); - - let _ = self.proof.verify(R.len(), transcript, &R); + let (_L, _R) = eq.compute_factored_evals(); // TODO: Alternative PCS Verification Ok(()) @@ -403,12 +383,10 @@ impl PolyEvalProof { // Evaluation of multiple points on the same instance pub fn prove_batched_points( poly: &DensePolynomial, - blinds_opt: Option<&PolyCommitmentBlinds>, r_list: Vec>, // point at which the polynomial is evaluated Zr_list: Vec, // evaluation of \widetilde{Z}(r) on each point - blind_Zr_opt: Option<&S>, // specifies a blind for Zr transcript: &mut Transcript, - random_tape: &mut RandomTape, + _random_tape: &mut RandomTape, ) -> Vec> { >::append_protocol_name( transcript, @@ -425,16 +403,6 @@ impl PolyEvalProof { let L_size = left_num_vars.pow2(); let R_size = right_num_vars.pow2(); - let default_blinds = PolyCommitmentBlinds { - blinds: vec![S::field_zero(); L_size], - }; - let blinds = blinds_opt.map_or(&default_blinds, |p| p); - - assert_eq!(blinds.blinds.len(), L_size); - - let zero = S::field_zero(); - let blind_Zr = blind_Zr_opt.map_or(&zero, |p| p); - // compute the L and R vectors // We can perform batched opening if L is the same, so we regroup the proofs by L vector // Map from the left half of the r to index in L_list @@ -468,31 +436,14 @@ impl PolyEvalProof { let mut proof_list = Vec::new(); for i in 0..L_list.len() { let L = &L_list[i]; - let R = &R_list[i]; - // compute the vector underneath L*Z and the L*blinds + let _R = &R_list[i]; // compute vector-matrix product between L and Z viewed as a matrix let LZ = poly.bound(L); - let LZ_blind: S = (0..L.len()).map(|i| blinds.blinds[i] * L[i]).sum(); - - // a dot product proof of size R_size - let proof = DotProductProofLog::prove( - transcript, - random_tape, - &LZ, - &LZ_blind, - R, - &Zc_list[i], - blind_Zr, - ); - proof_list.push(proof); + + proof_list.push(PolyEvalProof{ v: LZ }); } proof_list - .iter() - .map(|proof| PolyEvalProof { - proof: proof.clone(), - }) - .collect() } pub fn verify_plain_batched_points( @@ -546,12 +497,10 @@ impl PolyEvalProof { // Size of each instance might be different, but all are larger than the evaluation point pub fn prove_batched_instances( poly_list: &Vec>, // list of instances - blinds_opt: Option<&PolyCommitmentBlinds>, r_list: Vec<&Vec>, // point at which the polynomial is evaluated Zr_list: &Vec, // evaluation of \widetilde{Z}(r) on each instance - blind_Zr_opt: Option<&S>, // specifies a blind for Zr transcript: &mut Transcript, - random_tape: &mut RandomTape, + _random_tape: &mut RandomTape, ) -> Vec> { >::append_protocol_name( transcript, @@ -611,31 +560,10 @@ impl PolyEvalProof { } let mut proof_list = Vec::new(); - for i in 0..LZ_list.len() { - let L = &L_list[i]; - let L_size = L.len(); - - let default_blinds = PolyCommitmentBlinds { - blinds: vec![S::field_zero(); L_size], - }; - let blinds = blinds_opt.map_or(&default_blinds, |p| p); - assert_eq!(blinds.blinds.len(), L_size); - let blind_Zr = blind_Zr_opt.map_or(&zero, |p| p); - let LZ_blind: S = (0..L.len()).map(|i| blinds.blinds[i] * L[i]).sum(); - - // a dot product proof of size R_size - let proof = DotProductProofLog::prove( - transcript, - random_tape, - &LZ_list[i], - &LZ_blind, - &R_list[i], - &Zc_list[i], - blind_Zr, - ); - proof_list.push(PolyEvalProof { proof }); + for v in LZ_list.into_iter() { + proof_list.push(PolyEvalProof { v }); } - + proof_list } @@ -700,13 +628,11 @@ impl PolyEvalProof { poly_list: &Vec<&DensePolynomial>, num_proofs_list: &Vec, num_inputs_list: &Vec, - blinds_opt: Option<&PolyCommitmentBlinds>, rq: &[S], ry: &[S], Zr_list: &Vec, - blind_Zr_opt: Option<&S>, transcript: &mut Transcript, - random_tape: &mut RandomTape, + _random_tape: &mut RandomTape, ) -> Vec> { >::append_protocol_name( transcript, @@ -771,32 +697,8 @@ impl PolyEvalProof { } let mut proof_list = Vec::new(); - - for i in 0..LZ_list.len() { - let L = &L_list[i]; - let L_size = L.len(); - let default_blinds = PolyCommitmentBlinds { - blinds: vec![S::field_zero(); L_size], - }; - let blinds = blinds_opt.map_or(&default_blinds, |p| p); - - assert_eq!(blinds.blinds.len(), L_size); - - let blind_Zr = blind_Zr_opt.map_or(&zero, |p| p); - let LZ_blind: S = (0..L.len()).map(|i| blinds.blinds[i] * L[i]).sum(); - - // a dot product proof of size R_size - let proof = DotProductProofLog::prove( - transcript, - random_tape, - &LZ_list[i], - &LZ_blind, - &R_list[i], - &Zc_list[i], - blind_Zr, - ); - - proof_list.push(PolyEvalProof { proof }); + for v in LZ_list.into_iter() { + proof_list.push(PolyEvalProof{ v }); } proof_list @@ -871,7 +773,7 @@ impl PolyEvalProof { for i in 0..LZ_list.len() { let R = &R_list[i]; - proof_list[i].proof.verify(R.len(), transcript, R)?; + proof_list[i].verify(transcript, R)?; } Ok(()) @@ -883,7 +785,7 @@ impl PolyEvalProof { r: &S, // point at which the polynomial is evaluated Zr: &Vec, // evaluation of \widetilde{Z}(r) transcript: &mut Transcript, - random_tape: &mut RandomTape, + _random_tape: &mut RandomTape, ) -> PolyEvalProof { >::append_protocol_name( transcript, @@ -904,7 +806,7 @@ impl PolyEvalProof { let R_size = right_num_vars.pow2(); // compute R = <1, r, r^2, ...> - let R = { + let _R = { let mut r_base = S::field_one(); let mut R = Vec::new(); for _ in 0..R_size { @@ -951,18 +853,7 @@ impl PolyEvalProof { c = c * c_base; } - // a dot product proof of size R_size - let proof = DotProductProofLog::prove( - transcript, - random_tape, - &LZ_comb, - &zero, - &R, - &Zr_comb, - &zero, - ); - - PolyEvalProof { proof } + PolyEvalProof { v: LZ_comb } } pub fn verify_uni_batched_instances( @@ -1021,7 +912,7 @@ impl PolyEvalProof { c = c * c_base; } - self.proof.verify(R.len(), transcript, &R) + self.verify(transcript, &R) } } @@ -1049,7 +940,7 @@ mod tests { .collect::>(); // compute dot product between LZ and R - DotProductProofLog::compute_dotproduct(&LZ, &R) + DensePolynomial::compute_dotproduct(&LZ, &R) } #[test] diff --git a/spartan_parallel/src/lib.rs b/spartan_parallel/src/lib.rs index d1eb04d5..eb4b747c 100644 --- a/spartan_parallel/src/lib.rs +++ b/spartan_parallel/src/lib.rs @@ -204,7 +204,6 @@ impl IOProofs { // batch prove all proofs let proofs = PolyEvalProof::prove_batched_points( exec_poly_inputs, - None, [ vec![ 0, // input valid @@ -230,7 +229,6 @@ impl IOProofs { live_input, ] .concat(), - None, transcript, random_tape, ); @@ -2199,10 +2197,8 @@ impl SNARK { .collect(); let proof_eval_perm_poly_prod_list = PolyEvalProof::prove_batched_instances( &perm_poly_w3_prover.poly_w, - None, r_list, &perm_poly_poly_list, - None, transcript, &mut random_tape, ); diff --git a/spartan_parallel/src/r1csproof.rs b/spartan_parallel/src/r1csproof.rs index 5cd1e650..866f1642 100644 --- a/spartan_parallel/src/r1csproof.rs +++ b/spartan_parallel/src/r1csproof.rs @@ -3,10 +3,9 @@ use super::custom_dense_mlpoly::DensePolynomialPqx; use super::dense_mlpoly::{DensePolynomial, EqPolynomial, PolyEvalProof}; use super::errors::ProofVerifyError; use super::math::Math; -use super::nizk::{EqualityProof, KnowledgeProof, ProductProof}; use super::r1csinstance::R1CSInstance; use super::random::RandomTape; -use super::sumcheck::R1CSSumcheckInstanceProof; +use super::sumcheck::SumcheckInstanceProof; use super::timer::Timer; use super::transcript::ProofTranscript; use crate::scalar::SpartanExtensionField; @@ -17,11 +16,13 @@ use std::cmp::min; #[derive(Serialize, Deserialize, Debug)] pub struct R1CSProof { - sc_proof_phase1: R1CSSumcheckInstanceProof, - sc_proof_phase2: R1CSSumcheckInstanceProof, - pok_claims_phase2: (KnowledgeProof, ProductProof), - proof_eq_sc_phase1: EqualityProof, - proof_eq_sc_phase2: EqualityProof, + sc_proof_phase1: SumcheckInstanceProof, + sc_proof_phase2: SumcheckInstanceProof, + claims_phase2: (S, S, S), + // debug_zk + // pok_claims_phase2: (KnowledgeProof, ProductProof), + // proof_eq_sc_phase1: EqualityProof, + // proof_eq_sc_phase2: EqualityProof, proof_eval_vars_at_ry_list: Vec>, } @@ -41,13 +42,13 @@ impl R1CSProof { evals_Cz: &mut DensePolynomialPqx, transcript: &mut Transcript, random_tape: &mut RandomTape, - ) -> (R1CSSumcheckInstanceProof, Vec, Vec) { + ) -> (SumcheckInstanceProof, Vec, Vec) { let comb_func = |poly_A_comp: &S, poly_B_comp: &S, poly_C_comp: &S, poly_D_comp: &S| -> S { *poly_A_comp * (*poly_B_comp * *poly_C_comp - *poly_D_comp) }; let (sc_proof_phase_one, r, claims) = - R1CSSumcheckInstanceProof::::prove_cubic_with_additive_term_disjoint_rounds( + SumcheckInstanceProof::::prove_cubic_with_additive_term_disjoint_rounds( &S::field_zero(), // claim is zero num_rounds, num_rounds_x_max, @@ -83,12 +84,12 @@ impl R1CSProof { evals_z: &mut DensePolynomialPqx, transcript: &mut Transcript, random_tape: &mut RandomTape, - ) -> (R1CSSumcheckInstanceProof, Vec, Vec) { + ) -> (SumcheckInstanceProof, Vec, Vec) { let comb_func = |poly_A_comp: &S, poly_B_comp: &S, poly_C_comp: &S| -> S { *poly_A_comp * *poly_B_comp * *poly_C_comp }; let (sc_proof_phase_two, r, claims) = - R1CSSumcheckInstanceProof::::prove_cubic_disjoint_rounds( + SumcheckInstanceProof::::prove_cubic_disjoint_rounds( claim, num_rounds, num_rounds_y_max, @@ -272,21 +273,23 @@ impl R1CSProof { random_tape.random_scalar(b"prod_Az_Bz_blind"), ); - let pok_Cz_claim = { KnowledgeProof::prove(transcript, random_tape, Cz_claim, &Cz_blind) }; - - let proof_prod = { - let prod = *Az_claim * *Bz_claim; - ProductProof::prove( - transcript, - random_tape, - Az_claim, - &Az_blind, - Bz_claim, - &Bz_blind, - &prod, - &prod_Az_Bz_blind, - ) - }; + // debug_zk + // let pok_Cz_claim = { KnowledgeProof::prove(transcript, random_tape, Cz_claim, &Cz_blind) }; + + // debug_zk + // let proof_prod = { + // let prod = *Az_claim * *Bz_claim; + // ProductProof::prove( + // transcript, + // random_tape, + // Az_claim, + // &Az_blind, + // Bz_claim, + // &Bz_blind, + // &prod, + // &prod_Az_Bz_blind, + // ) + // }; // prove the final step of sum-check #1 let taus_bound_rx = tau_claim; @@ -294,13 +297,14 @@ impl R1CSProof { let blind_expected_claim_postsc1 = *taus_bound_rx * (prod_Az_Bz_blind - Cz_blind); let claim_post_phase1 = (*Az_claim * *Bz_claim - *Cz_claim) * *taus_bound_rx; - let proof_eq_sc_phase1 = EqualityProof::prove( - transcript, - random_tape, - &claim_post_phase1, - &blind_expected_claim_postsc1, - &claim_post_phase1, - ); + // debug_zk + // let proof_eq_sc_phase1 = EqualityProof::prove( + // transcript, + // random_tape, + // &claim_post_phase1, + // &blind_expected_claim_postsc1, + // &claim_post_phase1, + // ); // Separate the result rx into rp, rq, and rx let (rx_rev, rq_rev) = rx.split_at(num_rounds_x); @@ -464,11 +468,9 @@ impl R1CSProof { &poly_list, &num_proofs_list, &num_inputs_list, - None, &rq, &ry, &Zr_list, - None, transcript, random_tape, ); @@ -541,25 +543,29 @@ impl R1CSProof { let blind_expected_claim_postsc2 = S::field_zero(); let claim_post_phase2 = claims_phase2[0] * claims_phase2[1] * claims_phase2[2]; - let proof_eq_sc_phase2 = EqualityProof::prove( - transcript, - random_tape, - &claim_post_phase2, - &blind_expected_claim_postsc2, - &claim_post_phase2, - ); + // debug_zk + // let proof_eq_sc_phase2 = EqualityProof::prove( + // transcript, + // random_tape, + // &claim_post_phase2, + // &blind_expected_claim_postsc2, + // &claim_post_phase2, + // ); timer_prove.stop(); - let pok_claims_phase2 = (pok_Cz_claim, proof_prod); + // debug_zk + // let pok_claims_phase2 = (pok_Cz_claim, proof_prod); ( R1CSProof { sc_proof_phase1, sc_proof_phase2, - pok_claims_phase2, - proof_eq_sc_phase1, - proof_eq_sc_phase2, + claims_phase2: (*Az_claim, *Bz_claim, *Cz_claim), + // debug_zk + // pok_claims_phase2, + // proof_eq_sc_phase1, + // proof_eq_sc_phase2, proof_eval_vars_at_ry_list, }, [rp, rq_rev, rx, [rw, ry].concat()], @@ -612,16 +618,18 @@ impl R1CSProof { let tau_q = transcript.challenge_vector(b"challenge_tau_q", num_rounds_q); let tau_x = transcript.challenge_vector(b"challenge_tau_x", num_rounds_x); - let rx = + let (_, rx) = self .sc_proof_phase1 - .verify(num_rounds_x + num_rounds_q + num_rounds_p, 3, transcript)?; + .verify(S::field_zero(), num_rounds_x + num_rounds_q + num_rounds_p, 3, transcript)?; + // debug_zk // perform the intermediate sum-check test with claimed Az, Bz, and Cz - let (pok_Cz_claim, proof_prod) = &self.pok_claims_phase2; + // let (pok_Cz_claim, proof_prod) = &self.pok_claims_phase2; - pok_Cz_claim.verify(transcript)?; - proof_prod.verify(transcript)?; + // debug_zk + // pok_Cz_claim.verify(transcript)?; + // proof_prod.verify(transcript)?; // Separate the result rx into rp_round1, rq, and rx let (rx_rev, rq_rev) = rx.split_at(num_rounds_x); @@ -645,19 +653,23 @@ impl R1CSProof { .product(); let _taus_bound_rx = taus_bound_rp * taus_bound_rq * taus_bound_rx; + // debug_zk // verify proof that expected_claim_post_phase1 == claim_post_phase1 - self.proof_eq_sc_phase1.verify(transcript)?; + // self.proof_eq_sc_phase1.verify(transcript)?; // derive three public challenges and then derive a joint claim - let _r_A: S = transcript.challenge_scalar(b"challenge_Az"); - let _r_B: S = transcript.challenge_scalar(b"challenge_Bz"); - let _r_C: S = transcript.challenge_scalar(b"challenge_Cz"); + let r_A: S = transcript.challenge_scalar(b"challenge_Az"); + let r_B: S = transcript.challenge_scalar(b"challenge_Bz"); + let r_C: S = transcript.challenge_scalar(b"challenge_Cz"); + + let (Az_claim, Bz_claim, Cz_claim) = self.claims_phase2; + let claim_phase2 = r_A * Az_claim + r_B * Bz_claim + r_C * Cz_claim; // verify the joint claim with a sum-check protocol - let ry = + let (_, ry) = self .sc_proof_phase2 - .verify(num_rounds_y + num_rounds_w + num_rounds_p, 3, transcript)?; + .verify(claim_phase2, num_rounds_y + num_rounds_w + num_rounds_p, 3, transcript)?; // Separate ry into rp, rw, and ry let (ry_rev, rw) = ry.split_at(num_rounds_y); @@ -747,8 +759,9 @@ impl R1CSProof { timer_commit_opening.stop(); + // debug_zk // verify proof that expected_claim_post_phase2 == claim_post_phase2 - self.proof_eq_sc_phase2.verify(transcript)?; + // self.proof_eq_sc_phase2.verify(transcript)?; Ok([rp, rq_rev, rx, [rw, ry].concat()]) } diff --git a/spartan_parallel/src/sparse_mlpoly.rs b/spartan_parallel/src/sparse_mlpoly.rs index cff1f6c6..1f181685 100644 --- a/spartan_parallel/src/sparse_mlpoly.rs +++ b/spartan_parallel/src/sparse_mlpoly.rs @@ -106,10 +106,8 @@ impl DerefsEvalProof { let proof_derefs = PolyEvalProof::prove( joint_poly, - None, &r_joint, &eval_joint, - None, transcript, random_tape, ); @@ -764,10 +762,8 @@ impl HashLayerProof { let proof_ops = PolyEvalProof::prove( &dense.comb_ops, - None, &r_joint_ops, &joint_claim_eval_ops, - None, transcript, random_tape, ); @@ -791,10 +787,8 @@ impl HashLayerProof { let proof_mem = PolyEvalProof::prove( &dense.comb_mem, - None, &r_joint_mem, &joint_claim_eval_mem, - None, transcript, random_tape, ); diff --git a/spartan_parallel/src/sumcheck.rs b/spartan_parallel/src/sumcheck.rs index a3b891a9..cff58b7f 100644 --- a/spartan_parallel/src/sumcheck.rs +++ b/spartan_parallel/src/sumcheck.rs @@ -6,7 +6,6 @@ use crate::scalar::SpartanExtensionField; use super::dense_mlpoly::DensePolynomial; use super::errors::ProofVerifyError; -use super::nizk::DotProductProof; use super::random::RandomTape; use super::transcript::{AppendToTranscript, ProofTranscript}; use super::unipoly::{CompressedUniPoly, UniPoly}; @@ -70,67 +69,6 @@ impl SumcheckInstanceProof { } } -#[derive(Serialize, Deserialize, Debug)] -pub struct R1CSSumcheckInstanceProof { - proofs: Vec>, -} - -impl R1CSSumcheckInstanceProof { - pub fn new(proofs: Vec>) -> Self { - R1CSSumcheckInstanceProof { proofs } - } - - pub fn verify( - &self, - num_rounds: usize, - degree_bound: usize, - transcript: &mut Transcript, - ) -> Result, ProofVerifyError> { - let mut r: Vec = Vec::new(); - - for i in 0..num_rounds { - // derive the verifier's challenge for the next round - let r_i = transcript.challenge_scalar(b"challenge_nextround"); - - // verify the proof of sum-check and evals - let _res = { - // produce two weights - let w: Vec = transcript.challenge_vector(b"combine_two_claims_to_one", 2); - - let a = { - // the vector to use to decommit for sum-check test - let a_sc = { - let mut a = vec![S::field_one(); degree_bound + 1]; - a[0] = a[0] + S::field_one(); - a - }; - - // the vector to use to decommit for evaluation - let a_eval = { - let mut a = vec![S::field_one(); degree_bound + 1]; - for j in 1..a.len() { - a[j] = a[j - 1] * r_i; - } - a - }; - - // take weighted sum of the two vectors using w - assert_eq!(a_sc.len(), a_eval.len()); - (0..a_sc.len()) - .map(|i| w[0] * a_sc[i] + w[1] * a_eval[i]) - .collect::>() - }; - - self.proofs[i].verify(transcript, &a).is_ok() - }; - - r.push(r_i); - } - - Ok(r) - } -} - impl SumcheckInstanceProof { pub fn prove_cubic( claim: &S, @@ -379,9 +317,7 @@ impl SumcheckInstanceProof { claims_dotp, ) } -} -impl R1CSSumcheckInstanceProof { pub fn prove_cubic_disjoint_rounds( claim: &S, num_rounds: usize, @@ -410,7 +346,7 @@ impl R1CSSumcheckInstanceProof { let mut claim_per_round = *claim; let mut r: Vec = Vec::new(); - let mut proofs: Vec> = Vec::new(); + let mut polys: Vec> = Vec::new(); let mut inputs_len = num_rounds_y_max.pow2(); let mut witness_secs_len = num_rounds_w.pow2(); @@ -540,8 +476,12 @@ impl R1CSSumcheckInstanceProof { poly }; + // append the prover's message to the transcript + poly.append_to_transcript(b"poly", transcript); + //derive the verifier's challenge for the next round let r_j = transcript.challenge_scalar(b"challenge_nextround"); + r.push(r_j); // bound all tables to the verifier's challenege if mode == MODE_P { @@ -551,62 +491,12 @@ impl R1CSSumcheckInstanceProof { poly_B.bound_poly(&r_j, mode); } poly_C.bound_poly(&r_j, mode); - - // produce a proof of sum-check and of evaluation - let (proof, claim_next_round) = { - let eval = poly.evaluate(&r_j); - - // we need to prove the following under homomorphic commitments: - // (1) poly(0) + poly(1) = claim_per_round - // (2) poly(r_j) = eval - - // Our technique is to leverage dot product proofs: - // (1) we can prove: = claim_per_round - // (2) we can prove: = transcript.challenge_vector(b"combine_two_claims_to_one", 2); - - // compute a weighted sum of the RHS - let target = w[0] * claim_per_round + w[1] * eval; - - let a = { - // the vector to use to decommit for sum-check test - let a_sc = { - let mut a = vec![S::field_one(); poly.degree() + 1]; - a[0] = a[0] + S::field_one(); - a - }; - - // the vector to use to decommit for evaluation - let a_eval = { - let mut a = vec![S::field_one(); poly.degree() + 1]; - for j in 1..a.len() { - a[j] = a[j - 1] * r_j; - } - a - }; - - // take weighted sum of the two vectors using w - assert_eq!(a_sc.len(), a_eval.len()); - (0..a_sc.len()) - .map(|i| w[0] * a_sc[i] + w[1] * a_eval[i]) - .collect::>() - }; - - let proof = DotProductProof::prove(transcript, random_tape, &poly.as_vec(), &a, &target); - - (proof, eval) - }; - - proofs.push(proof); - claim_per_round = claim_next_round; - r.push(r_j); + claim_per_round = poly.evaluate(&r_j); + polys.push(poly.compress()); } ( - R1CSSumcheckInstanceProof::new(proofs), + SumcheckInstanceProof::new(polys), r, vec![ poly_A[0], @@ -653,7 +543,7 @@ impl R1CSSumcheckInstanceProof { let mut claim_per_round = *claim; let mut r: Vec = Vec::new(); - let mut proofs: Vec> = Vec::new(); + let mut polys: Vec> = Vec::new(); let mut cons_len = num_rounds_x_max.pow2(); let mut proof_len = num_rounds_q_max.pow2(); @@ -798,8 +688,12 @@ impl R1CSSumcheckInstanceProof { poly }; + // append the prover's message to the transcript + poly.append_to_transcript(b"poly", transcript); + //derive the verifier's challenge for the next round let r_j = transcript.challenge_scalar(b"challenge_nextround"); + r.push(r_j); // bound all tables to the verifier's challenege if mode == 1 { @@ -812,61 +706,12 @@ impl R1CSSumcheckInstanceProof { poly_B.bound_poly(&r_j, mode); poly_C.bound_poly(&r_j, mode); poly_D.bound_poly(&r_j, mode); - - let (proof, claim_next_round) = { - let eval = poly.evaluate(&r_j); - - // we need to prove the following under homomorphic commitments: - // (1) poly(0) + poly(1) = claim_per_round - // (2) poly(r_j) = eval - - // Our technique is to leverage dot product proofs: - // (1) we can prove: = claim_per_round - // (2) we can prove: = transcript.challenge_vector(b"combine_two_claims_to_one", 2); - - // compute a weighted sum of the RHS - let target = w[0] * claim_per_round + w[1] * eval; - - let a = { - // the vector to use to decommit for sum-check test - let a_sc = { - let mut a = vec![S::field_one(); poly.degree() + 1]; - a[0] = a[0] + S::field_one(); - a - }; - - // the vector to use to decommit for evaluation - let a_eval = { - let mut a = vec![S::field_one(); poly.degree() + 1]; - for j in 1..a.len() { - a[j] = a[j - 1] * r_j; - } - a - }; - - // take weighted sum of the two vectors using w - assert_eq!(a_sc.len(), a_eval.len()); - (0..a_sc.len()) - .map(|i| w[0] * a_sc[i] + w[1] * a_eval[i]) - .collect::>() - }; - - let proof = DotProductProof::prove(transcript, random_tape, &poly.as_vec(), &a, &target); - - (proof, eval) - }; - - proofs.push(proof); - claim_per_round = claim_next_round; - r.push(r_j); + claim_per_round = poly.evaluate(&r_j); + polys.push(poly.compress()); } ( - R1CSSumcheckInstanceProof::new(proofs), + SumcheckInstanceProof::new(polys), r, vec![ poly_Ap[0] * poly_Aq[0] * poly_Ax[0], From a8ba63f53b58532d088cb732a362562cac80ec39 Mon Sep 17 00:00:00 2001 From: Kunming Jiang Date: Fri, 6 Dec 2024 11:44:58 -0500 Subject: [PATCH 12/22] Remove additional commitment proofs --- spartan_parallel/src/lib.rs | 8 ++++++-- spartan_parallel/src/r1csproof.rs | 8 ++++++-- 2 files changed, 12 insertions(+), 4 deletions(-) diff --git a/spartan_parallel/src/lib.rs b/spartan_parallel/src/lib.rs index eb4b747c..7d75e430 100644 --- a/spartan_parallel/src/lib.rs +++ b/spartan_parallel/src/lib.rs @@ -610,7 +610,7 @@ pub struct SNARK { perm_poly_poly_list: Vec, proof_eval_perm_poly_prod_list: Vec>, - shift_proof: ShiftProofs, + // shift_proof: ShiftProofs, io_proof: IOProofs, } @@ -2260,6 +2260,7 @@ impl SNARK { shifted_polys.push(&vir_mem_addr_w3_shifted_prover.poly_w[0]); header_len_list.push(6); } + /* let shift_proof = ShiftProofs::prove( orig_polys, shifted_polys, @@ -2268,6 +2269,7 @@ impl SNARK { &mut random_tape, ); shift_proof + */ }; timer_proof.stop(); @@ -2314,7 +2316,7 @@ impl SNARK { perm_poly_poly_list, proof_eval_perm_poly_prod_list, - shift_proof, + // shift_proof, io_proof, } } @@ -3256,9 +3258,11 @@ impl SNARK { header_len_list.push(6); } + /* self .shift_proof .verify(poly_size_list, shift_size_list, header_len_list, transcript)?; + */ } timer_proof.stop(); diff --git a/spartan_parallel/src/r1csproof.rs b/spartan_parallel/src/r1csproof.rs index 866f1642..c0a07f5b 100644 --- a/spartan_parallel/src/r1csproof.rs +++ b/spartan_parallel/src/r1csproof.rs @@ -23,7 +23,7 @@ pub struct R1CSProof { // pok_claims_phase2: (KnowledgeProof, ProductProof), // proof_eq_sc_phase1: EqualityProof, // proof_eq_sc_phase2: EqualityProof, - proof_eval_vars_at_ry_list: Vec>, + // proof_eval_vars_at_ry_list: Vec>, } impl R1CSProof { @@ -464,6 +464,7 @@ impl R1CSProof { } } + /* let proof_eval_vars_at_ry_list = PolyEvalProof::prove_batched_instances_disjoint_rounds( &poly_list, &num_proofs_list, @@ -474,6 +475,7 @@ impl R1CSProof { transcript, random_tape, ); + */ // Bind the resulting witness list to rp // poly_vars stores the result of each witness matrix bounded to (rq_short ++ ry) @@ -566,7 +568,7 @@ impl R1CSProof { // pok_claims_phase2, // proof_eq_sc_phase1, // proof_eq_sc_phase2, - proof_eval_vars_at_ry_list, + // proof_eval_vars_at_ry_list, }, [rp, rq_rev, rx, [rw, ry].concat()], ) @@ -706,6 +708,7 @@ impl R1CSProof { } } + /* PolyEvalProof::verify_batched_instances_disjoint_rounds( &self.proof_eval_vars_at_ry_list, &num_proofs_list, @@ -714,6 +717,7 @@ impl R1CSProof { &rq, &ry, )?; + */ // Then on rp for p in 0..num_instances { From 7d123387780770b2f0660bd4b384ad7219fabb4a Mon Sep 17 00:00:00 2001 From: Ray Gao Date: Fri, 6 Dec 2024 12:57:13 -0500 Subject: [PATCH 13/22] fmtr --- spartan_parallel/src/dense_mlpoly.rs | 18 ++++----- spartan_parallel/src/r1csproof.rs | 53 ++++++++++++++------------- spartan_parallel/src/sparse_mlpoly.rs | 9 +---- 3 files changed, 39 insertions(+), 41 deletions(-) diff --git a/spartan_parallel/src/dense_mlpoly.rs b/spartan_parallel/src/dense_mlpoly.rs index 0d0a09f8..a94924c1 100644 --- a/spartan_parallel/src/dense_mlpoly.rs +++ b/spartan_parallel/src/dense_mlpoly.rs @@ -321,8 +321,8 @@ impl PolyEvalProof { pub fn prove( poly: &DensePolynomial, - r: &[S], // point at which the polynomial is evaluated - _Zr: &S, // evaluation of \widetilde{Z}(r) + r: &[S], // point at which the polynomial is evaluated + _Zr: &S, // evaluation of \widetilde{Z}(r) transcript: &mut Transcript, _random_tape: &mut RandomTape, ) -> PolyEvalProof { @@ -383,8 +383,8 @@ impl PolyEvalProof { // Evaluation of multiple points on the same instance pub fn prove_batched_points( poly: &DensePolynomial, - r_list: Vec>, // point at which the polynomial is evaluated - Zr_list: Vec, // evaluation of \widetilde{Z}(r) on each point + r_list: Vec>, // point at which the polynomial is evaluated + Zr_list: Vec, // evaluation of \widetilde{Z}(r) on each point transcript: &mut Transcript, _random_tape: &mut RandomTape, ) -> Vec> { @@ -440,7 +440,7 @@ impl PolyEvalProof { // compute vector-matrix product between L and Z viewed as a matrix let LZ = poly.bound(L); - proof_list.push(PolyEvalProof{ v: LZ }); + proof_list.push(PolyEvalProof { v: LZ }); } proof_list @@ -497,8 +497,8 @@ impl PolyEvalProof { // Size of each instance might be different, but all are larger than the evaluation point pub fn prove_batched_instances( poly_list: &Vec>, // list of instances - r_list: Vec<&Vec>, // point at which the polynomial is evaluated - Zr_list: &Vec, // evaluation of \widetilde{Z}(r) on each instance + r_list: Vec<&Vec>, // point at which the polynomial is evaluated + Zr_list: &Vec, // evaluation of \widetilde{Z}(r) on each instance transcript: &mut Transcript, _random_tape: &mut RandomTape, ) -> Vec> { @@ -563,7 +563,7 @@ impl PolyEvalProof { for v in LZ_list.into_iter() { proof_list.push(PolyEvalProof { v }); } - + proof_list } @@ -698,7 +698,7 @@ impl PolyEvalProof { let mut proof_list = Vec::new(); for v in LZ_list.into_iter() { - proof_list.push(PolyEvalProof{ v }); + proof_list.push(PolyEvalProof { v }); } proof_list diff --git a/spartan_parallel/src/r1csproof.rs b/spartan_parallel/src/r1csproof.rs index c0a07f5b..6671e392 100644 --- a/spartan_parallel/src/r1csproof.rs +++ b/spartan_parallel/src/r1csproof.rs @@ -88,23 +88,22 @@ impl R1CSProof { let comb_func = |poly_A_comp: &S, poly_B_comp: &S, poly_C_comp: &S| -> S { *poly_A_comp * *poly_B_comp * *poly_C_comp }; - let (sc_proof_phase_two, r, claims) = - SumcheckInstanceProof::::prove_cubic_disjoint_rounds( - claim, - num_rounds, - num_rounds_y_max, - num_rounds_w, - num_rounds_p, - single_inst, - num_witness_secs, - num_inputs, - evals_eq, - evals_ABC, - evals_z, - comb_func, - transcript, - random_tape, - ); + let (sc_proof_phase_two, r, claims) = SumcheckInstanceProof::::prove_cubic_disjoint_rounds( + claim, + num_rounds, + num_rounds_y_max, + num_rounds_w, + num_rounds_p, + single_inst, + num_witness_secs, + num_inputs, + evals_eq, + evals_ABC, + evals_z, + comb_func, + transcript, + random_tape, + ); (sc_proof_phase_two, r, claims) } @@ -620,10 +619,12 @@ impl R1CSProof { let tau_q = transcript.challenge_vector(b"challenge_tau_q", num_rounds_q); let tau_x = transcript.challenge_vector(b"challenge_tau_x", num_rounds_x); - let (_, rx) = - self - .sc_proof_phase1 - .verify(S::field_zero(), num_rounds_x + num_rounds_q + num_rounds_p, 3, transcript)?; + let (_, rx) = self.sc_proof_phase1.verify( + S::field_zero(), + num_rounds_x + num_rounds_q + num_rounds_p, + 3, + transcript, + )?; // debug_zk // perform the intermediate sum-check test with claimed Az, Bz, and Cz @@ -668,10 +669,12 @@ impl R1CSProof { let claim_phase2 = r_A * Az_claim + r_B * Bz_claim + r_C * Cz_claim; // verify the joint claim with a sum-check protocol - let (_, ry) = - self - .sc_proof_phase2 - .verify(claim_phase2, num_rounds_y + num_rounds_w + num_rounds_p, 3, transcript)?; + let (_, ry) = self.sc_proof_phase2.verify( + claim_phase2, + num_rounds_y + num_rounds_w + num_rounds_p, + 3, + transcript, + )?; // Separate ry into rp, rw, and ry let (ry_rev, rw) = ry.split_at(num_rounds_y); diff --git a/spartan_parallel/src/sparse_mlpoly.rs b/spartan_parallel/src/sparse_mlpoly.rs index 1f181685..830d2803 100644 --- a/spartan_parallel/src/sparse_mlpoly.rs +++ b/spartan_parallel/src/sparse_mlpoly.rs @@ -104,13 +104,8 @@ impl DerefsEvalProof { // decommit the joint polynomial at r_joint S::append_field_to_transcript(b"joint_claim_eval", transcript, eval_joint); - let proof_derefs = PolyEvalProof::prove( - joint_poly, - &r_joint, - &eval_joint, - transcript, - random_tape, - ); + let proof_derefs = + PolyEvalProof::prove(joint_poly, &r_joint, &eval_joint, transcript, random_tape); proof_derefs } From ce6cd4e8b6ca170e3cf09fcf7d0f6ea8bd08abaa Mon Sep 17 00:00:00 2001 From: Ray Gao Date: Mon, 9 Dec 2024 00:55:20 -0500 Subject: [PATCH 14/22] Remove debug flag --- spartan_parallel/src/r1csproof.rs | 70 +------------------------------ 1 file changed, 1 insertion(+), 69 deletions(-) diff --git a/spartan_parallel/src/r1csproof.rs b/spartan_parallel/src/r1csproof.rs index 6671e392..acbc054e 100644 --- a/spartan_parallel/src/r1csproof.rs +++ b/spartan_parallel/src/r1csproof.rs @@ -20,9 +20,6 @@ pub struct R1CSProof { sc_proof_phase2: SumcheckInstanceProof, claims_phase2: (S, S, S), // debug_zk - // pok_claims_phase2: (KnowledgeProof, ProductProof), - // proof_eq_sc_phase1: EqualityProof, - // proof_eq_sc_phase2: EqualityProof, // proof_eval_vars_at_ry_list: Vec>, } @@ -272,39 +269,9 @@ impl R1CSProof { random_tape.random_scalar(b"prod_Az_Bz_blind"), ); - // debug_zk - // let pok_Cz_claim = { KnowledgeProof::prove(transcript, random_tape, Cz_claim, &Cz_blind) }; - - // debug_zk - // let proof_prod = { - // let prod = *Az_claim * *Bz_claim; - // ProductProof::prove( - // transcript, - // random_tape, - // Az_claim, - // &Az_blind, - // Bz_claim, - // &Bz_blind, - // &prod, - // &prod_Az_Bz_blind, - // ) - // }; - // prove the final step of sum-check #1 let taus_bound_rx = tau_claim; - let blind_expected_claim_postsc1 = *taus_bound_rx * (prod_Az_Bz_blind - Cz_blind); - let claim_post_phase1 = (*Az_claim * *Bz_claim - *Cz_claim) * *taus_bound_rx; - - // debug_zk - // let proof_eq_sc_phase1 = EqualityProof::prove( - // transcript, - // random_tape, - // &claim_post_phase1, - // &blind_expected_claim_postsc1, - // &claim_post_phase1, - // ); - // Separate the result rx into rp, rq, and rx let (rx_rev, rq_rev) = rx.split_at(num_rounds_x); let (rq_rev, rp) = rq_rev.split_at(num_rounds_q); @@ -540,33 +507,14 @@ impl R1CSProof { let poly_vars = DensePolynomial::new(eval_vars_comb_list); let _eval_vars_at_ry = poly_vars.evaluate(&rp); - // prove the final step of sum-check #2 - let blind_expected_claim_postsc2 = S::field_zero(); - let claim_post_phase2 = claims_phase2[0] * claims_phase2[1] * claims_phase2[2]; - - // debug_zk - // let proof_eq_sc_phase2 = EqualityProof::prove( - // transcript, - // random_tape, - // &claim_post_phase2, - // &blind_expected_claim_postsc2, - // &claim_post_phase2, - // ); - timer_prove.stop(); - // debug_zk - // let pok_claims_phase2 = (pok_Cz_claim, proof_prod); - ( R1CSProof { sc_proof_phase1, sc_proof_phase2, claims_phase2: (*Az_claim, *Bz_claim, *Cz_claim), // debug_zk - // pok_claims_phase2, - // proof_eq_sc_phase1, - // proof_eq_sc_phase2, // proof_eval_vars_at_ry_list, }, [rp, rq_rev, rx, [rw, ry].concat()], @@ -626,14 +574,6 @@ impl R1CSProof { transcript, )?; - // debug_zk - // perform the intermediate sum-check test with claimed Az, Bz, and Cz - // let (pok_Cz_claim, proof_prod) = &self.pok_claims_phase2; - - // debug_zk - // pok_Cz_claim.verify(transcript)?; - // proof_prod.verify(transcript)?; - // Separate the result rx into rp_round1, rq, and rx let (rx_rev, rq_rev) = rx.split_at(num_rounds_x); let (rq_rev, rp_round1) = rq_rev.split_at(num_rounds_q); @@ -655,11 +595,7 @@ impl R1CSProof { .map(|i| rx_rev[i] * tau_x[i] + (S::field_one() - rx_rev[i]) * (S::field_one() - tau_x[i])) .product(); let _taus_bound_rx = taus_bound_rp * taus_bound_rq * taus_bound_rx; - - // debug_zk - // verify proof that expected_claim_post_phase1 == claim_post_phase1 - // self.proof_eq_sc_phase1.verify(transcript)?; - + // derive three public challenges and then derive a joint claim let r_A: S = transcript.challenge_scalar(b"challenge_Az"); let r_B: S = transcript.challenge_scalar(b"challenge_Bz"); @@ -766,10 +702,6 @@ impl R1CSProof { timer_commit_opening.stop(); - // debug_zk - // verify proof that expected_claim_post_phase2 == claim_post_phase2 - // self.proof_eq_sc_phase2.verify(transcript)?; - Ok([rp, rq_rev, rx, [rw, ry].concat()]) } } From aa193624178ce7f51165d6f7e4156e2d8c3c26b3 Mon Sep 17 00:00:00 2001 From: Ray Gao Date: Mon, 9 Dec 2024 00:56:00 -0500 Subject: [PATCH 15/22] Remove blinds --- spartan_parallel/src/r1csproof.rs | 15 ++++----------- 1 file changed, 4 insertions(+), 11 deletions(-) diff --git a/spartan_parallel/src/r1csproof.rs b/spartan_parallel/src/r1csproof.rs index acbc054e..2497081f 100644 --- a/spartan_parallel/src/r1csproof.rs +++ b/spartan_parallel/src/r1csproof.rs @@ -262,15 +262,8 @@ impl R1CSProof { &poly_Cz.index(0, 0, 0, 0), ); - let (Az_blind, Bz_blind, Cz_blind, prod_Az_Bz_blind) = ( - random_tape.random_scalar(b"Az_blind"), - random_tape.random_scalar(b"Bz_blind"), - random_tape.random_scalar(b"Cz_blind"), - random_tape.random_scalar(b"prod_Az_Bz_blind"), - ); - // prove the final step of sum-check #1 - let taus_bound_rx = tau_claim; + let _taus_bound_rx = tau_claim; // Separate the result rx into rp, rq, and rx let (rx_rev, rq_rev) = rx.split_at(num_rounds_x); @@ -345,7 +338,7 @@ impl R1CSProof { let mut eq_p_rp_poly = DensePolynomial::new(EqPolynomial::new(rp).evals()); // Sumcheck 2: (rA + rB + rC) * Z * eq(p) = e - let (sc_proof_phase2, ry, claims_phase2) = R1CSProof::prove_phase_two( + let (sc_proof_phase2, ry, _claims_phase2) = R1CSProof::prove_phase_two( num_rounds_y + num_rounds_w + num_rounds_p, num_rounds_y, num_rounds_w, @@ -579,7 +572,7 @@ impl R1CSProof { let (rq_rev, rp_round1) = rq_rev.split_at(num_rounds_q); let rx: Vec = rx_rev.iter().copied().rev().collect(); let rq_rev = rq_rev.to_vec(); - let rq: Vec = rq_rev.iter().copied().rev().collect(); + let _rq: Vec = rq_rev.iter().copied().rev().collect(); let rp_round1 = rp_round1.to_vec(); // taus_bound_rx is really taus_bound_rx_rq_rp @@ -595,7 +588,7 @@ impl R1CSProof { .map(|i| rx_rev[i] * tau_x[i] + (S::field_one() - rx_rev[i]) * (S::field_one() - tau_x[i])) .product(); let _taus_bound_rx = taus_bound_rp * taus_bound_rq * taus_bound_rx; - + // derive three public challenges and then derive a joint claim let r_A: S = transcript.challenge_scalar(b"challenge_Az"); let r_B: S = transcript.challenge_scalar(b"challenge_Bz"); From e9057206b703b335b526b068a1e5f505ef58cd30 Mon Sep 17 00:00:00 2001 From: Ray Gao Date: Mon, 9 Dec 2024 01:09:15 -0500 Subject: [PATCH 16/22] Remove random tape --- spartan_parallel/src/lib.rs | 3 --- spartan_parallel/src/r1csproof.rs | 11 +---------- spartan_parallel/src/sumcheck.rs | 2 -- 3 files changed, 1 insertion(+), 15 deletions(-) diff --git a/spartan_parallel/src/lib.rs b/spartan_parallel/src/lib.rs index 7d75e430..3c41496b 100644 --- a/spartan_parallel/src/lib.rs +++ b/spartan_parallel/src/lib.rs @@ -1854,7 +1854,6 @@ impl SNARK { block_wit_secs, &block_inst.inst, transcript, - &mut random_tape, ) }; @@ -1969,7 +1968,6 @@ impl SNARK { ], &pairwise_check_inst.inst, transcript, - &mut random_tape, ) }; @@ -2096,7 +2094,6 @@ impl SNARK { ], &perm_root_inst.inst, transcript, - &mut random_tape, ) }; diff --git a/spartan_parallel/src/r1csproof.rs b/spartan_parallel/src/r1csproof.rs index 2497081f..05a52d94 100644 --- a/spartan_parallel/src/r1csproof.rs +++ b/spartan_parallel/src/r1csproof.rs @@ -1,6 +1,6 @@ #![allow(clippy::too_many_arguments)] use super::custom_dense_mlpoly::DensePolynomialPqx; -use super::dense_mlpoly::{DensePolynomial, EqPolynomial, PolyEvalProof}; +use super::dense_mlpoly::{DensePolynomial, EqPolynomial}; use super::errors::ProofVerifyError; use super::math::Math; use super::r1csinstance::R1CSInstance; @@ -19,7 +19,6 @@ pub struct R1CSProof { sc_proof_phase1: SumcheckInstanceProof, sc_proof_phase2: SumcheckInstanceProof, claims_phase2: (S, S, S), - // debug_zk // proof_eval_vars_at_ry_list: Vec>, } @@ -38,7 +37,6 @@ impl R1CSProof { evals_Bz: &mut DensePolynomialPqx, evals_Cz: &mut DensePolynomialPqx, transcript: &mut Transcript, - random_tape: &mut RandomTape, ) -> (SumcheckInstanceProof, Vec, Vec) { let comb_func = |poly_A_comp: &S, poly_B_comp: &S, poly_C_comp: &S, poly_D_comp: &S| -> S { *poly_A_comp * (*poly_B_comp * *poly_C_comp - *poly_D_comp) @@ -61,7 +59,6 @@ impl R1CSProof { evals_Cz, comb_func, transcript, - random_tape, ); (sc_proof_phase_one, r, claims) @@ -80,7 +77,6 @@ impl R1CSProof { evals_ABC: &mut DensePolynomialPqx, evals_z: &mut DensePolynomialPqx, transcript: &mut Transcript, - random_tape: &mut RandomTape, ) -> (SumcheckInstanceProof, Vec, Vec) { let comb_func = |poly_A_comp: &S, poly_B_comp: &S, poly_C_comp: &S| -> S { *poly_A_comp * *poly_B_comp * *poly_C_comp @@ -99,7 +95,6 @@ impl R1CSProof { evals_z, comb_func, transcript, - random_tape, ); (sc_proof_phase_two, r, claims) @@ -131,7 +126,6 @@ impl R1CSProof { // INSTANCES inst: &R1CSInstance, transcript: &mut Transcript, - random_tape: &mut RandomTape, ) -> (R1CSProof, [Vec; 4]) { let timer_prove = Timer::new("R1CSProof::prove"); >::append_protocol_name( @@ -243,7 +237,6 @@ impl R1CSProof { &mut poly_Bz, &mut poly_Cz, transcript, - random_tape, ); assert_eq!(poly_tau_p.len(), 1); @@ -351,7 +344,6 @@ impl R1CSProof { &mut ABC_poly, &mut Z_poly, transcript, - random_tape, ); timer_sc_proof_phase2.stop(); @@ -507,7 +499,6 @@ impl R1CSProof { sc_proof_phase1, sc_proof_phase2, claims_phase2: (*Az_claim, *Bz_claim, *Cz_claim), - // debug_zk // proof_eval_vars_at_ry_list, }, [rp, rq_rev, rx, [rw, ry].concat()], diff --git a/spartan_parallel/src/sumcheck.rs b/spartan_parallel/src/sumcheck.rs index cff58b7f..57b11cf4 100644 --- a/spartan_parallel/src/sumcheck.rs +++ b/spartan_parallel/src/sumcheck.rs @@ -332,7 +332,6 @@ impl SumcheckInstanceProof { poly_C: &mut DensePolynomialPqx, comb_func: F, transcript: &mut Transcript, - random_tape: &mut RandomTape, ) -> (Self, Vec, Vec) where F: Fn(&S, &S, &S) -> S, @@ -522,7 +521,6 @@ impl SumcheckInstanceProof { poly_D: &mut DensePolynomialPqx, comb_func: F, transcript: &mut Transcript, - random_tape: &mut RandomTape, ) -> (Self, Vec, Vec) where F: Fn(&S, &S, &S, &S) -> S, From 1a4fdb3de65709ffcd9acf0d9fa1afdea6ac2db8 Mon Sep 17 00:00:00 2001 From: Ray Gao Date: Mon, 9 Dec 2024 13:45:46 -0500 Subject: [PATCH 17/22] Remove PolyEvalProof prove, verify logic --- spartan_parallel/src/dense_mlpoly.rs | 587 +++------------------------ 1 file changed, 58 insertions(+), 529 deletions(-) diff --git a/spartan_parallel/src/dense_mlpoly.rs b/spartan_parallel/src/dense_mlpoly.rs index a94924c1..c1c0a5cf 100644 --- a/spartan_parallel/src/dense_mlpoly.rs +++ b/spartan_parallel/src/dense_mlpoly.rs @@ -311,7 +311,7 @@ impl Index for DensePolynomial { #[derive(Clone, Debug, Serialize, Deserialize)] pub struct PolyEvalProof { - v: Vec, + _phantom: S, } impl PolyEvalProof { @@ -320,34 +320,14 @@ impl PolyEvalProof { } pub fn prove( - poly: &DensePolynomial, - r: &[S], // point at which the polynomial is evaluated + _poly: &DensePolynomial, + _r: &[S], // point at which the polynomial is evaluated _Zr: &S, // evaluation of \widetilde{Z}(r) - transcript: &mut Transcript, + _transcript: &mut Transcript, _random_tape: &mut RandomTape, ) -> PolyEvalProof { - >::append_protocol_name( - transcript, - PolyEvalProof::::protocol_name(), - ); - - // assert vectors are of the right size - assert_eq!(poly.get_num_vars(), r.len()); - - let (left_num_vars, right_num_vars) = EqPolynomial::::compute_factored_lens(r.len()); - let L_size = left_num_vars.pow2(); - let R_size = right_num_vars.pow2(); - - // compute the L and R vectors - let eq = EqPolynomial::new(r.to_vec()); - let (L, R) = eq.compute_factored_evals(); - assert_eq!(L.len(), L_size); - assert_eq!(R.len(), R_size); - - // compute vector-matrix product between L and Z viewed as a matrix - let LZ = poly.bound(&L); - - PolyEvalProof { v: LZ } + // TODO: Alternative evaluation proof scheme + PolyEvalProof { _phantom: S::field_zero() } } pub fn verify( @@ -355,16 +335,7 @@ impl PolyEvalProof { transcript: &mut Transcript, r: &[S], // point at which the polynomial is evaluated ) -> Result<(), ProofVerifyError> { - >::append_protocol_name( - transcript, - PolyEvalProof::::protocol_name(), - ); - - // compute L and R - let eq = EqPolynomial::new(r.to_vec()); - let (_L, _R) = eq.compute_factored_evals(); - - // TODO: Alternative PCS Verification + // TODO: Alternative evaluation proof scheme Ok(()) } @@ -374,545 +345,103 @@ impl PolyEvalProof { r: &[S], // point at which the polynomial is evaluated _Zr: &S, // evaluation \widetilde{Z}(r) ) -> Result<(), ProofVerifyError> { - self.verify(transcript, r); - - // TODO: Alternative PCS Verification - Ok(()) + self.verify(transcript, r) } // Evaluation of multiple points on the same instance pub fn prove_batched_points( - poly: &DensePolynomial, - r_list: Vec>, // point at which the polynomial is evaluated - Zr_list: Vec, // evaluation of \widetilde{Z}(r) on each point - transcript: &mut Transcript, + _poly: &DensePolynomial, + _r_list: Vec>, // point at which the polynomial is evaluated + _Zr_list: Vec, // evaluation of \widetilde{Z}(r) on each point + _transcript: &mut Transcript, _random_tape: &mut RandomTape, ) -> Vec> { - >::append_protocol_name( - transcript, - PolyEvalProof::::protocol_name(), - ); - - // assert vectors are of the right size - assert_eq!(r_list.len(), Zr_list.len()); - for r in &r_list { - assert_eq!(poly.get_num_vars(), r.len()); - } - - let (left_num_vars, right_num_vars) = EqPolynomial::::compute_factored_lens(r_list[0].len()); - let L_size = left_num_vars.pow2(); - let R_size = right_num_vars.pow2(); - - // compute the L and R vectors - // We can perform batched opening if L is the same, so we regroup the proofs by L vector - // Map from the left half of the r to index in L_list - let mut index_map: HashMap, usize> = HashMap::new(); - let mut L_list: Vec> = Vec::new(); - let mut R_list: Vec> = Vec::new(); - let mut Zc_list: Vec = Vec::new(); - - let c_base: S = transcript.challenge_scalar(b"challenge_c"); - let mut c = S::field_one(); - for i in 0..r_list.len() { - let eq = EqPolynomial::new(r_list[i].to_vec()); - let (Li, Ri) = eq.compute_factored_evals(); - assert_eq!(Li.len(), L_size); - assert_eq!(Ri.len(), R_size); - if let Some(index) = index_map.get(&r_list[i][..left_num_vars]) { - // L already exist - // generate coefficient for RLC - c = c * c_base; - R_list[*index] = (0..R_size).map(|j| R_list[*index][j] + c * Ri[j]).collect(); - Zc_list[*index] = Zc_list[*index] + c * Zr_list[i]; - } else { - let next_index = L_list.len(); - index_map.insert(r_list[i][..left_num_vars].to_vec(), next_index); - L_list.push(Li); - R_list.push(Ri); - Zc_list.push(Zr_list[i]); - } - } - - let mut proof_list = Vec::new(); - for i in 0..L_list.len() { - let L = &L_list[i]; - let _R = &R_list[i]; - // compute vector-matrix product between L and Z viewed as a matrix - let LZ = poly.bound(L); - - proof_list.push(PolyEvalProof { v: LZ }); - } - - proof_list + // TODO: Alternative evaluation proof scheme + vec![] } pub fn verify_plain_batched_points( - proof_list: &Vec>, - transcript: &mut Transcript, - r_list: Vec>, // point at which the polynomial is evaluated - Zr_list: Vec, // commitment to \widetilde{Z}(r) on each point + _proof_list: &Vec>, + _transcript: &mut Transcript, + _r_list: Vec>, // point at which the polynomial is evaluated + _Zr_list: Vec, // commitment to \widetilde{Z}(r) on each point ) -> Result<(), ProofVerifyError> { - >::append_protocol_name( - transcript, - PolyEvalProof::::protocol_name(), - ); - - let (left_num_vars, _) = EqPolynomial::::compute_factored_lens(r_list[0].len()); - - // compute the L and R - // We can perform batched opening if L is the same, so we regroup the proofs by L vector - // Map from the left half of the r to index in L_list - let mut index_map: HashMap, usize> = HashMap::new(); - let mut L_list: Vec> = Vec::new(); - let mut R_list: Vec> = Vec::new(); - let mut Zc_list: Vec = Vec::new(); - - let c_base: S = transcript.challenge_scalar(b"challenge_c"); - let mut c = S::field_one(); - for i in 0..r_list.len() { - let eq = EqPolynomial::new(r_list[i].to_vec()); - let (Li, Ri) = eq.compute_factored_evals(); - if let Some(index) = index_map.get(&r_list[i][..left_num_vars]) { - // L already exist - // generate coefficient for RLC - c = c * c_base; - R_list[*index] = (0..Ri.len()) - .map(|j| R_list[*index][j] + c * Ri[j]) - .collect(); - Zc_list[*index] = Zc_list[*index] + c * Zr_list[i]; - } else { - let next_index = L_list.len(); - index_map.insert(r_list[i][..left_num_vars].to_vec(), next_index); - L_list.push(Li); - R_list.push(Ri); - Zc_list.push(Zr_list[i]); - } - } - assert_eq!(L_list.len(), proof_list.len()); - + // TODO: Alternative evaluation proof scheme Ok(()) } // Evaluation on multiple instances, each at different point // Size of each instance might be different, but all are larger than the evaluation point pub fn prove_batched_instances( - poly_list: &Vec>, // list of instances - r_list: Vec<&Vec>, // point at which the polynomial is evaluated - Zr_list: &Vec, // evaluation of \widetilde{Z}(r) on each instance - transcript: &mut Transcript, + _poly_list: &Vec>, // list of instances + _r_list: Vec<&Vec>, // point at which the polynomial is evaluated + _Zr_list: &Vec, // evaluation of \widetilde{Z}(r) on each instance + _transcript: &mut Transcript, _random_tape: &mut RandomTape, ) -> Vec> { - >::append_protocol_name( - transcript, - PolyEvalProof::::protocol_name(), - ); - - // assert vectors are of the right size - assert_eq!(poly_list.len(), r_list.len()); - assert_eq!(poly_list.len(), Zr_list.len()); - - // We need one proof per poly size & R - let mut index_map: HashMap<(usize, Vec), usize> = HashMap::new(); - let mut LZ_list: Vec> = Vec::new(); - let mut Zc_list = Vec::new(); - let mut L_list: Vec> = Vec::new(); - let mut R_list: Vec> = Vec::new(); - - // generate coefficient for RLC - let c_base: S = transcript.challenge_scalar(b"challenge_c"); - let mut c = S::field_one(); - let zero = S::field_zero(); - for i in 0..poly_list.len() { - let poly = &poly_list[i]; - let num_vars = poly.get_num_vars(); - - // compute L and R - let (L, R) = { - let r = r_list[i]; - // pad or trim r to correct length - let r = { - if num_vars >= r.len() { - [vec![zero; num_vars - r.len()], r.to_vec()].concat() - } else { - r[r.len() - num_vars..].to_vec() - } - }; - let eq = EqPolynomial::new(r); - eq.compute_factored_evals() - }; - - if let Some(index) = index_map.get(&(num_vars, R.clone())) { - c = c * c_base; - let LZ = poly.bound(&L); - LZ_list[*index] = (0..LZ.len()) - .map(|j| LZ_list[*index][j] + c * LZ[j]) - .collect(); - Zc_list[*index] = Zc_list[*index] + c * Zr_list[i]; - } else { - index_map.insert((num_vars, R.clone()), LZ_list.len()); - Zc_list.push(Zr_list[i]); - // compute a weighted sum of commitments and L - let LZ = poly.bound(&L); - L_list.push(L); - R_list.push(R); - LZ_list.push(LZ); - } - } - - let mut proof_list = Vec::new(); - for v in LZ_list.into_iter() { - proof_list.push(PolyEvalProof { v }); - } - - proof_list + // TODO: Alternative evaluation proof scheme + vec![] } pub fn verify_plain_batched_instances( - proof_list: &Vec>, - transcript: &mut Transcript, - r_list: Vec<&Vec>, // point at which the polynomial is evaluated - Zr_list: &Vec, // commitment to \widetilde{Z}(r) of each instance - num_vars_list: &Vec, // size of each polynomial + _proof_list: &Vec>, + _transcript: &mut Transcript, + _r_list: Vec<&Vec>, // point at which the polynomial is evaluated + _Zr_list: &Vec, // commitment to \widetilde{Z}(r) of each instance + _num_vars_list: &Vec, // size of each polynomial ) -> Result<(), ProofVerifyError> { - >::append_protocol_name( - transcript, - PolyEvalProof::::protocol_name(), - ); - - // We need one proof per poly size + L size - let mut index_map: HashMap<(usize, Vec), usize> = HashMap::new(); - let mut Zc_list = Vec::new(); - let mut L_list: Vec> = Vec::new(); - let mut R_list: Vec> = Vec::new(); - - // generate coefficient for RLC - let c_base: S = transcript.challenge_scalar(b"challenge_c"); - let mut c = S::field_one(); - let zero = S::field_zero(); - - for i in 0..r_list.len() { - let num_vars = num_vars_list[i]; - - // compute L and R - let (L, R) = { - let r = r_list[i]; - // pad or trim r to correct length - let r = { - if num_vars >= r.len() { - [vec![zero; num_vars - r.len()], r.to_vec()].concat() - } else { - r[r.len() - num_vars..].to_vec() - } - }; - let eq = EqPolynomial::new(r); - eq.compute_factored_evals() - }; - - if let Some(index) = index_map.get(&(num_vars, R.clone())) { - c = c * c_base; - Zc_list[*index] = Zc_list[*index] + c * Zr_list[i]; - } else { - Zc_list.push(Zr_list[i]); - // compute a weighted sum of commitments and L - L_list.push(L); - R_list.push(R); - } - } - + // TODO: Alternative evaluation proof scheme Ok(()) } // Like prove_batched_instances, but r is divided into rq ++ ry // Each polynomial is supplemented with num_proofs and num_inputs pub fn prove_batched_instances_disjoint_rounds( - poly_list: &Vec<&DensePolynomial>, - num_proofs_list: &Vec, - num_inputs_list: &Vec, - rq: &[S], - ry: &[S], - Zr_list: &Vec, - transcript: &mut Transcript, + _poly_list: &Vec<&DensePolynomial>, + _num_proofs_list: &Vec, + _num_inputs_list: &Vec, + _rq: &[S], + _ry: &[S], + _Zr_list: &Vec, + _transcript: &mut Transcript, _random_tape: &mut RandomTape, ) -> Vec> { - >::append_protocol_name( - transcript, - PolyEvalProof::::protocol_name(), - ); - - // assert vectors are of the right size - assert_eq!(poly_list.len(), Zr_list.len()); - - // We need one proof per (num_proofs, num_inputs) pair - let mut index_map: HashMap<(usize, usize), usize> = HashMap::new(); - let mut LZ_list: Vec> = Vec::new(); - let mut Zc_list = Vec::new(); - let mut L_list: Vec> = Vec::new(); - let mut R_list = Vec::new(); - - // generate coefficient for RLC - let c_base: S = transcript.challenge_scalar(b"challenge_c"); - let mut c = S::field_one(); - let zero = S::field_zero(); - for i in 0..poly_list.len() { - let poly = poly_list[i]; - let num_proofs = num_proofs_list[i]; - let num_inputs = num_inputs_list[i]; - if let Some(index) = index_map.get(&(num_proofs, num_inputs)) { - c = c * c_base; - let L = &L_list[*index].to_vec(); - let LZ = poly.bound(&L); - LZ_list[*index] = (0..LZ.len()) - .map(|j| LZ_list[*index][j] + c * LZ[j]) - .collect(); - Zc_list[*index] = Zc_list[*index] + c * Zr_list[i]; - } else { - index_map.insert((num_proofs, num_inputs), LZ_list.len()); - Zc_list.push(Zr_list[i]); - let num_vars_q = num_proofs.log_2(); - let num_vars_y = num_inputs.log_2(); - // pad or trim rq and ry to correct length - let (L, R) = { - let ry_short = { - if num_vars_y >= ry.len() { - let ry_pad = &vec![zero; num_vars_y - ry.len()]; - [ry_pad, ry].concat() - } - // Else ry_short is the last w.num_inputs[p].log_2() entries of ry - // thus, to obtain the actual ry, need to multiply by (1 - ry2)(1 - ry3)..., which is ry_factors[num_rounds_y - w.num_inputs[p]] - else { - ry[ry.len() - num_vars_y..].to_vec() - } - }; - let rq_short = rq[rq.len() - num_vars_q..].to_vec(); - let r = [rq_short, ry_short.clone()].concat(); - let eq = EqPolynomial::new(r); - eq.compute_factored_evals() - }; - // compute a weighted sum of commitments and L - let LZ = poly.bound(&L); - L_list.push(L); - R_list.push(R); - LZ_list.push(LZ); - } - } - - let mut proof_list = Vec::new(); - for v in LZ_list.into_iter() { - proof_list.push(PolyEvalProof { v }); - } - - proof_list + // TODO: Alternative evaluation proof scheme + vec![] } pub fn verify_batched_instances_disjoint_rounds( - proof_list: &Vec>, - num_proofs_list: &Vec, - num_inputs_list: &Vec, - transcript: &mut Transcript, - rq: &[S], - ry: &[S], + _proof_list: &Vec>, + _num_proofs_list: &Vec, + _num_inputs_list: &Vec, + _transcript: &mut Transcript, + _rq: &[S], + _ry: &[S], ) -> Result<(), ProofVerifyError> { - >::append_protocol_name( - transcript, - PolyEvalProof::::protocol_name(), - ); - - // We need one proof per poly size - let mut index_map: HashMap<(usize, usize), usize> = HashMap::new(); - let mut LZ_list: Vec = Vec::new(); - let mut L_list = Vec::new(); - let mut R_list = Vec::new(); - - // generate coefficient for RLC - let c_base: S = transcript.challenge_scalar(b"challenge_c"); - let mut c = S::field_one(); - let zero = S::field_zero(); - - for i in 0..num_proofs_list.len() { - let num_proofs = num_proofs_list[i]; - let num_inputs = num_inputs_list[i]; - if let Some(index) = index_map.get(&(num_proofs, num_inputs)) { - c = c * c_base; - let _L = &L_list[*index]; - - let LZ = S::field_zero(); - LZ_list[*index] = LZ_list[*index] + c * LZ; - } else { - index_map.insert((num_proofs, num_inputs), LZ_list.len()); - let num_vars_q = num_proofs.log_2(); - let num_vars_y = num_inputs.log_2(); - // pad or trim rq and ry to correct length - let (L, R) = { - let ry_short = { - if num_vars_y >= ry.len() { - let ry_pad = &vec![zero; num_vars_y - ry.len()]; - [ry_pad, ry].concat() - } - // Else ry_short is the last w.num_inputs[p].log_2() entries of ry - // thus, to obtain the actual ry, need to multiply by (1 - ry2)(1 - ry3)..., which is ry_factors[num_rounds_y - w.num_inputs[p]] - else { - ry[ry.len() - num_vars_y..].to_vec() - } - }; - let rq_short = rq[rq.len() - num_vars_q..].to_vec(); - let r = [rq_short, ry_short.clone()].concat(); - let eq = EqPolynomial::new(r); - eq.compute_factored_evals() - }; - // compute a weighted sum of commitments and L - let LZ = S::field_zero(); - L_list.push(L); - R_list.push(R); - LZ_list.push(LZ); - } - } - - assert_eq!(LZ_list.len(), proof_list.len()); - - // Verify proofs - for i in 0..LZ_list.len() { - let R = &R_list[i]; - - proof_list[i].verify(transcript, R)?; - } - + // TODO: Alternative evaluation proof scheme Ok(()) } // Treat the polynomial(s) as univariate and open on a single point pub fn prove_uni_batched_instances( - poly_list: &Vec<&DensePolynomial>, - r: &S, // point at which the polynomial is evaluated - Zr: &Vec, // evaluation of \widetilde{Z}(r) - transcript: &mut Transcript, + _poly_list: &Vec<&DensePolynomial>, + _r: &S, // point at which the polynomial is evaluated + _Zr: &Vec, // evaluation of \widetilde{Z}(r) + _transcript: &mut Transcript, _random_tape: &mut RandomTape, ) -> PolyEvalProof { - >::append_protocol_name( - transcript, - PolyEvalProof::::protocol_name(), - ); - - let max_num_vars = poly_list.iter().fold(0, |m, p| { - if p.get_num_vars() > m { - p.get_num_vars() - } else { - m - } - }); - let zero = S::field_zero(); - - // L differs depending on size of the polynomial, but R always stay the same - let (_, right_num_vars) = EqPolynomial::::compute_factored_lens(max_num_vars); - let R_size = right_num_vars.pow2(); - - // compute R = <1, r, r^2, ...> - let _R = { - let mut r_base = S::field_one(); - let mut R = Vec::new(); - for _ in 0..R_size { - R.push(r_base); - r_base = r_base * *r; - } - R - }; - let mut L_map: HashMap> = HashMap::new(); - - // compute the vector underneath L*Z - // compute vector-matrix product between L and Z viewed as a matrix - let c_base: S = transcript.challenge_scalar(b"challenge_c"); - let mut c = S::field_one(); - let mut LZ_comb = vec![zero; R_size]; - let mut Zr_comb = zero; - - for i in 0..poly_list.len() { - let poly = &poly_list[i]; - let num_vars = poly.get_num_vars(); - let L = if let Some(L) = L_map.get(&num_vars) { - L - } else { - let (left_num_vars, right_num_vars) = EqPolynomial::::compute_factored_lens(num_vars); - let L_size = left_num_vars.pow2(); - let R_size = right_num_vars.pow2(); - let r_base = (0..R_size).fold(S::field_one(), |p, _| p * *r); - // L is 1, r^k, r^2k, ... - let mut l_base = S::field_one(); - let mut L = Vec::new(); - for _ in 0..L_size { - L.push(l_base); - l_base = l_base * r_base; - } - L_map.insert(num_vars, L.clone()); - L_map.get(&num_vars).unwrap() - }; - - let LZ = poly.bound(&L); - LZ_comb = (0..R_size) - .map(|i| LZ_comb[i] + if i < LZ.len() { c * LZ[i] } else { zero }) - .collect(); - Zr_comb = Zr_comb + c * Zr[i]; - c = c * c_base; - } - - PolyEvalProof { v: LZ_comb } + // TODO: Alternative evaluation proof scheme + PolyEvalProof { _phantom: S::field_zero() } } pub fn verify_uni_batched_instances( &self, - transcript: &mut Transcript, - r: &S, // point at which the polynomial is evaluated - poly_size: Vec, + _transcript: &mut Transcript, + _r: &S, // point at which the polynomial is evaluated + _poly_size: Vec, ) -> Result<(), ProofVerifyError> { - >::append_protocol_name( - transcript, - PolyEvalProof::::protocol_name(), - ); - - let max_poly_size = poly_size.iter().fold(0, |m, i| if *i > m { *i } else { m }); - // compute L and R - let (_, right_num_vars) = - EqPolynomial::::compute_factored_lens(max_poly_size.next_power_of_two().log_2()); - let R_size = right_num_vars.pow2(); - - // compute R = <1, r, r^2, ...> - let R = { - let mut r_base = S::field_one(); - let mut R = Vec::new(); - for _ in 0..R_size { - R.push(r_base); - r_base = r_base * *r; - } - R - }; - let mut L_map: HashMap> = HashMap::new(); - - // compute a weighted sum of commitments and L - let c_base: S = transcript.challenge_scalar(b"challenge_c"); - let mut c = S::field_one(); - - for i in 0..poly_size.len() { - let num_vars = poly_size[i].next_power_of_two().log_2(); - let _L = if let Some(L) = L_map.get(&num_vars) { - L - } else { - let (left_num_vars, right_num_vars) = EqPolynomial::::compute_factored_lens(num_vars); - let L_size = left_num_vars.pow2(); - let R_size = right_num_vars.pow2(); - let r_base = (0..R_size).fold(S::field_one(), |p, _| p * *r); - // L is 1, r^k, r^2k, ... - let mut l_base = S::field_one(); - let mut L = Vec::new(); - for _ in 0..L_size { - L.push(l_base); - l_base = l_base * r_base; - } - L_map.insert(num_vars, L.clone()); - L_map.get(&num_vars).unwrap() - }; - - c = c * c_base; - } - - self.verify(transcript, &R) + // TODO: Alternative evaluation proof scheme + Ok(()) } } From abef32af712e4a2693eb45938be6985df05a6819 Mon Sep 17 00:00:00 2001 From: Ray Gao Date: Mon, 9 Dec 2024 13:46:16 -0500 Subject: [PATCH 18/22] fmt --- spartan_parallel/src/dense_mlpoly.rs | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/spartan_parallel/src/dense_mlpoly.rs b/spartan_parallel/src/dense_mlpoly.rs index c1c0a5cf..78a0886f 100644 --- a/spartan_parallel/src/dense_mlpoly.rs +++ b/spartan_parallel/src/dense_mlpoly.rs @@ -322,12 +322,14 @@ impl PolyEvalProof { pub fn prove( _poly: &DensePolynomial, _r: &[S], // point at which the polynomial is evaluated - _Zr: &S, // evaluation of \widetilde{Z}(r) + _Zr: &S, // evaluation of \widetilde{Z}(r) _transcript: &mut Transcript, _random_tape: &mut RandomTape, ) -> PolyEvalProof { // TODO: Alternative evaluation proof scheme - PolyEvalProof { _phantom: S::field_zero() } + PolyEvalProof { + _phantom: S::field_zero(), + } } pub fn verify( @@ -431,7 +433,9 @@ impl PolyEvalProof { _random_tape: &mut RandomTape, ) -> PolyEvalProof { // TODO: Alternative evaluation proof scheme - PolyEvalProof { _phantom: S::field_zero() } + PolyEvalProof { + _phantom: S::field_zero(), + } } pub fn verify_uni_batched_instances( From 8ff185218ac7d9acb87a52c44e6a9bf232c920e1 Mon Sep 17 00:00:00 2001 From: Ray Gao Date: Mon, 9 Dec 2024 20:12:11 -0500 Subject: [PATCH 19/22] Use typeset. Resolve compilation issues --- circ_blocks/Cargo.lock | 1 + circ_blocks/Cargo.toml | 1 + circ_blocks/examples/zxc.rs | 37 +++-- spartan_parallel/src/custom_dense_mlpoly.rs | 6 +- spartan_parallel/src/dense_mlpoly.rs | 75 +++++----- spartan_parallel/src/lib.rs | 91 ++++++++---- spartan_parallel/src/mle.rs | 147 ++++++++++++++++---- spartan_parallel/src/product_tree.rs | 39 +++--- spartan_parallel/src/r1csproof.rs | 9 +- spartan_parallel/src/sparse_mlpoly.rs | 60 ++++---- spartan_parallel/src/sumcheck.rs | 27 ++-- 11 files changed, 331 insertions(+), 162 deletions(-) diff --git a/circ_blocks/Cargo.lock b/circ_blocks/Cargo.lock index 7d3c4624..cc080398 100644 --- a/circ_blocks/Cargo.lock +++ b/circ_blocks/Cargo.lock @@ -314,6 +314,7 @@ dependencies = [ "gmp-mpfr-sys", "good_lp", "group 0.12.1", + "halo2curves", "ieee754", "im", "itertools 0.10.5", diff --git a/circ_blocks/Cargo.toml b/circ_blocks/Cargo.toml index 7a97c52a..92860d8e 100644 --- a/circ_blocks/Cargo.toml +++ b/circ_blocks/Cargo.toml @@ -29,6 +29,7 @@ thiserror = "1" bellman = { git = "https://github.com/alex-ozdemir/bellman.git", branch = "mirage", optional = true } rayon = { version = "1", optional = true } ff = { version = "0.12", optional = true } +halo2curves = "0.1.0" fxhash = "0.2" good_lp = { version = "1.10", features = [ "lp-solvers", diff --git a/circ_blocks/examples/zxc.rs b/circ_blocks/examples/zxc.rs index c2627f31..9baeaa19 100644 --- a/circ_blocks/examples/zxc.rs +++ b/circ_blocks/examples/zxc.rs @@ -12,6 +12,7 @@ use circ::target::r1cs::wit_comp::StagedWitCompEvaluator; use circ::target::r1cs::ProverData; use circ::target::r1cs::{Lc, VarType}; use core::cmp::min; +use halo2curves::serde::SerdeObject; use libspartan::scalar::{ScalarExt2, SpartanExtensionField}; use rug::Integer; @@ -33,7 +34,6 @@ use libspartan::{ use merlin::Transcript; use serde::{Deserialize, Serialize}; use std::time::*; -use std::time::*; // How many reserved variables (EXCLUDING V) are in front of the actual input / output? // %BN, %RET, %TS, %AS, %SP, %BP @@ -437,7 +437,10 @@ impl RunTimeKnowledge { for exec in block { writeln!(&mut f, "EXEC {}", exec_counter)?; for assg in &exec.assignment { - write!(&mut f, "{} ", bytes_to_integer(&assg.to_bytes()))?; + let mut padded = [0; 32]; + padded[..8].copy_from_slice(&assg.to_raw_bytes()); + + write!(&mut f, "{} ", bytes_to_integer(&padded))?; } writeln!(&mut f)?; exec_counter += 1; @@ -449,7 +452,10 @@ impl RunTimeKnowledge { for exec in &self.exec_inputs { writeln!(&mut f, "EXEC {}", exec_counter)?; for assg in &exec.assignment { - write!(&mut f, "{} ", bytes_to_integer(&assg.to_bytes()))?; + let mut padded = [0; 32]; + padded[..8].copy_from_slice(&assg.to_raw_bytes()); + + write!(&mut f, "{} ", bytes_to_integer(&padded))?; } writeln!(&mut f)?; exec_counter += 1; @@ -459,7 +465,10 @@ impl RunTimeKnowledge { for addr in &self.init_phy_mems_list { writeln!(&mut f, "ACCESS {}", addr_counter)?; for assg in &addr.assignment { - write!(&mut f, "{} ", bytes_to_integer(&assg.to_bytes()))?; + let mut padded = [0; 32]; + padded[..8].copy_from_slice(&assg.to_raw_bytes()); + + write!(&mut f, "{} ", bytes_to_integer(&padded))?; } writeln!(&mut f)?; addr_counter += 1; @@ -469,7 +478,10 @@ impl RunTimeKnowledge { for addr in &self.init_vir_mems_list { writeln!(&mut f, "ACCESS {}", addr_counter)?; for assg in &addr.assignment { - write!(&mut f, "{} ", bytes_to_integer(&assg.to_bytes()))?; + let mut padded = [0; 32]; + padded[..8].copy_from_slice(&assg.to_raw_bytes()); + + write!(&mut f, "{} ", bytes_to_integer(&padded))?; } writeln!(&mut f)?; addr_counter += 1; @@ -479,7 +491,10 @@ impl RunTimeKnowledge { for addr in &self.addr_phy_mems_list { writeln!(&mut f, "ACCESS {}", addr_counter)?; for assg in &addr.assignment { - write!(&mut f, "{} ", bytes_to_integer(&assg.to_bytes()))?; + let mut padded = [0; 32]; + padded[..8].copy_from_slice(&assg.to_raw_bytes()); + + write!(&mut f, "{} ", bytes_to_integer(&padded))?; } writeln!(&mut f)?; addr_counter += 1; @@ -489,7 +504,10 @@ impl RunTimeKnowledge { for addr in &self.addr_vir_mems_list { writeln!(&mut f, "ACCESS {}", addr_counter)?; for assg in &addr.assignment { - write!(&mut f, "{} ", bytes_to_integer(&assg.to_bytes()))?; + let mut padded = [0; 32]; + padded[..8].copy_from_slice(&assg.to_raw_bytes()); + + write!(&mut f, "{} ", bytes_to_integer(&padded))?; } writeln!(&mut f)?; addr_counter += 1; @@ -499,7 +517,10 @@ impl RunTimeKnowledge { for addr in &self.addr_ts_bits_list { writeln!(&mut f, "ACCESS {}", addr_counter)?; for assg in &addr.assignment { - write!(&mut f, "{} ", bytes_to_integer(&assg.to_bytes()))?; + let mut padded = [0; 32]; + padded[..8].copy_from_slice(&assg.to_raw_bytes()); + + write!(&mut f, "{} ", bytes_to_integer(&padded))?; } writeln!(&mut f)?; addr_counter += 1; diff --git a/spartan_parallel/src/custom_dense_mlpoly.rs b/spartan_parallel/src/custom_dense_mlpoly.rs index 18489e98..6c8c5b66 100644 --- a/spartan_parallel/src/custom_dense_mlpoly.rs +++ b/spartan_parallel/src/custom_dense_mlpoly.rs @@ -1,11 +1,11 @@ #![allow(clippy::too_many_arguments)] use std::cmp::min; +use super::math::Math; use crate::dense_mlpoly::DensePolynomial; +use crate::mle::Ext; use crate::scalar::SpartanExtensionField; -use super::math::Math; - const MODE_P: usize = 1; const MODE_Q: usize = 2; const MODE_W: usize = 3; @@ -328,7 +328,7 @@ impl DensePolynomialPqx { } // Convert to a (p, q_rev, x_rev) regular dense poly of form (p, q, x) - pub fn to_dense_poly(&self) -> DensePolynomial { + pub fn to_dense_poly(&self) -> DensePolynomial { let mut Z_poly = vec![ S::field_zero(); diff --git a/spartan_parallel/src/dense_mlpoly.rs b/spartan_parallel/src/dense_mlpoly.rs index abdcfbc0..bf2e977c 100644 --- a/spartan_parallel/src/dense_mlpoly.rs +++ b/spartan_parallel/src/dense_mlpoly.rs @@ -1,15 +1,15 @@ #![allow(clippy::too_many_arguments)] -use crate::scalar::SpartanExtensionField; use super::errors::ProofVerifyError; use super::math::Math; use super::random::RandomTape; use super::transcript::ProofTranscript; +use crate::mle::{Base, Ext, MLEType, MLE}; +use crate::scalar::SpartanExtensionField; use core::ops::Index; +use ff::Field; use merlin::Transcript; use serde::{Deserialize, Serialize}; -use std::collections::HashMap; -use ff::Field; -use crate::mle::{MLE, MLEType, Base, Ext}; +use std::{collections::HashMap, process::Output}; #[cfg(feature = "multicore")] use rayon::prelude::*; @@ -115,7 +115,7 @@ impl IdentityPolynomial { } } -impl DensePolynomial { +impl DensePolynomial { pub fn new(mut Z: Vec) -> Self { // If length of Z is not a power of 2, append Z with 0 let zero = S::field_zero(); @@ -127,17 +127,6 @@ impl DensePolynomial { } } - pub fn new_from_base(mut Z: Vec) -> Self { - // If length of Z is not a power of 2, append Z with 0 - let zero = S::BaseField::ZERO; - Z.extend(vec![zero; Z.len().next_power_of_two() - Z.len()]); - DensePolynomial { - num_vars: Z.len().log_2(), - len: Z.len(), - Z: MLE::::new(Z), - } - } - pub fn get_num_vars(&self) -> usize { self.num_vars } @@ -146,7 +135,7 @@ impl DensePolynomial { self.len } - pub fn split(&self, idx: usize) -> (DensePolynomial, DensePolynomial) { + pub fn split(&self, idx: usize) -> (DensePolynomial, DensePolynomial) { assert!(idx < self.len()); ( DensePolynomial::new(self.Z[..idx].to_vec()), @@ -261,7 +250,7 @@ impl DensePolynomial { assert_eq!(r.len(), self.get_num_vars()); let chis = EqPolynomial::new(r.to_vec()).evals(); assert_eq!(chis.len(), self.Z.len()); - Self::compute_dotproduct(&self.Z, &chis) + Self::compute_dotproduct(&self.Z[0..], &chis) } fn compute_dotproduct(a: &[S], b: &[S]) -> S { @@ -270,10 +259,10 @@ impl DensePolynomial { } fn vec(&self) -> &Vec { - &self.Z + &self.Z.inner_ref() } - pub fn extend(&mut self, other: &DensePolynomial) { + pub fn extend(&mut self, other: &DensePolynomial) { // TODO: allow extension even when some vars are bound assert_eq!(self.Z.len(), self.len); let other_vec = other.vec(); @@ -284,9 +273,9 @@ impl DensePolynomial { assert_eq!(self.Z.len(), self.len); } - pub fn merge<'a, I>(polys: I) -> DensePolynomial + pub fn merge<'a, I>(polys: I) -> DensePolynomial where - I: IntoIterator>, + I: IntoIterator>, { let mut Z: Vec = Vec::new(); for poly in polys.into_iter() { @@ -308,12 +297,34 @@ impl DensePolynomial { } } -impl Index for DensePolynomial { +impl DensePolynomial { + pub fn new_from_base(mut Z: Vec) -> Self { + // If length of Z is not a power of 2, append Z with 0 + let zero = S::BaseField::ZERO; + Z.extend(vec![zero; Z.len().next_power_of_two() - Z.len()]); + DensePolynomial { + num_vars: Z.len().log_2(), + len: Z.len(), + Z: MLE::::new(Z), + } + } +} + +impl Index for DensePolynomial { type Output = S; #[inline(always)] - fn index(&self, _index: usize) -> &S { - &(self.Z[_index]) + fn index(&self, index: usize) -> &Self::Output { + &(self.Z[index]) + } +} + +impl Index for DensePolynomial { + type Output = S::BaseField; + + #[inline(always)] + fn index(&self, index: usize) -> &Self::Output { + &(self.Z[index]) } } @@ -328,7 +339,7 @@ impl PolyEvalProof { } pub fn prove( - _poly: &DensePolynomial, + _poly: &DensePolynomial, _r: &[S], // point at which the polynomial is evaluated _Zr: &S, // evaluation of \widetilde{Z}(r) _transcript: &mut Transcript, @@ -360,7 +371,7 @@ impl PolyEvalProof { // Evaluation of multiple points on the same instance pub fn prove_batched_points( - _poly: &DensePolynomial, + _poly: &DensePolynomial, _r_list: Vec>, // point at which the polynomial is evaluated _Zr_list: Vec, // evaluation of \widetilde{Z}(r) on each point _transcript: &mut Transcript, @@ -383,9 +394,9 @@ impl PolyEvalProof { // Evaluation on multiple instances, each at different point // Size of each instance might be different, but all are larger than the evaluation point pub fn prove_batched_instances( - _poly_list: &Vec>, // list of instances - _r_list: Vec<&Vec>, // point at which the polynomial is evaluated - _Zr_list: &Vec, // evaluation of \widetilde{Z}(r) on each instance + _poly_list: &Vec>, // list of instances + _r_list: Vec<&Vec>, // point at which the polynomial is evaluated + _Zr_list: &Vec, // evaluation of \widetilde{Z}(r) on each instance _transcript: &mut Transcript, _random_tape: &mut RandomTape, ) -> Vec> { @@ -407,7 +418,7 @@ impl PolyEvalProof { // Like prove_batched_instances, but r is divided into rq ++ ry // Each polynomial is supplemented with num_proofs and num_inputs pub fn prove_batched_instances_disjoint_rounds( - _poly_list: &Vec<&DensePolynomial>, + _poly_list: &Vec<&DensePolynomial>, _num_proofs_list: &Vec, _num_inputs_list: &Vec, _rq: &[S], @@ -434,7 +445,7 @@ impl PolyEvalProof { // Treat the polynomial(s) as univariate and open on a single point pub fn prove_uni_batched_instances( - _poly_list: &Vec<&DensePolynomial>, + _poly_list: &Vec<&DensePolynomial>, _r: &S, // point at which the polynomial is evaluated _Zr: &Vec, // evaluation of \widetilde{Z}(r) _transcript: &mut Transcript, diff --git a/spartan_parallel/src/lib.rs b/spartan_parallel/src/lib.rs index 80f72d68..95269de2 100644 --- a/spartan_parallel/src/lib.rs +++ b/spartan_parallel/src/lib.rs @@ -21,11 +21,11 @@ extern crate rayon; mod custom_dense_mlpoly; mod dense_mlpoly; -mod mle; mod errors; /// R1CS instance used by libspartan pub mod instance; mod math; +mod mle; mod product_tree; mod r1csinstance; mod r1csproof; @@ -46,11 +46,13 @@ use std::{ use dense_mlpoly::{DensePolynomial, PolyEvalProof}; use errors::{ProofVerifyError, R1CSError}; +use goldilocks::SmallField; use halo2curves::serde::SerdeObject; use instance::Instance; use itertools::Itertools; use math::Math; use merlin::Transcript; +use mle::Ext; use r1csinstance::{R1CSCommitment, R1CSDecommitment, R1CSEvalProof, R1CSInstance}; use r1csproof::R1CSProof; use random::RandomTape; @@ -87,10 +89,12 @@ impl Assignment { /// Constructs a new `Assignment` from a vector pub fn new(assignment: &[[u8; 32]]) -> Result, R1CSError> { let bytes_to_scalar = |vec: &[[u8; 32]]| -> Result, R1CSError> { - vec - .into_iter() - .map(|v| S::BaseField::from_raw_bytes(v).ok_or(R1CSError::InvalidScalar)) - .collect() + Ok( + vec + .into_iter() + .map(|v| S::BaseField::from_raw_bytes_unchecked(v)) + .collect::>(), + ) }; let assignment_scalar = bytes_to_scalar(assignment); @@ -100,7 +104,10 @@ impl Assignment { /// Write the assignment into a file pub fn write(&self, f: &File) -> std::io::Result<()> { for assg in &self.assignment { - write_bytes(f, &assg.to_bytes())?; + let mut padded = [0; 32]; + padded[..8].copy_from_slice(&assg.to_raw_bytes()); + + write_bytes(f, &padded)?; } Ok(()) } @@ -142,7 +149,7 @@ struct IOProofs { impl IOProofs { // Given the polynomial in execution order, generate all proofs fn prove( - exec_poly_inputs: &DensePolynomial, + exec_poly_inputs: &DensePolynomial, num_ios: usize, num_inputs_unpadded: usize, @@ -314,8 +321,8 @@ struct ShiftProofs { impl ShiftProofs { fn prove( - orig_polys: Vec<&DensePolynomial>, - shifted_polys: Vec<&DensePolynomial>, + orig_polys: Vec<&DensePolynomial>, + shifted_polys: Vec<&DensePolynomial>, // For each orig_poly, how many entries at the front of proof 0 are non-zero? header_len_list: Vec, transcript: &mut Transcript, @@ -410,11 +417,11 @@ struct ProverWitnessSecInfo { // num_instances x num_proofs x num_inputs hypermatrix for all values w_mat: Vec>>, // One dense polynomial per instance - poly_w: Vec>, + poly_w: Vec>, } impl ProverWitnessSecInfo { - fn new(w_mat: Vec>>, poly_w: Vec>) -> ProverWitnessSecInfo { + fn new(w_mat: Vec>>, poly_w: Vec>) -> ProverWitnessSecInfo { ProverWitnessSecInfo { num_inputs: w_mat.iter().map(|i| i[0].len()).collect(), w_mat, @@ -843,38 +850,70 @@ impl SNARK { // unwrap the assignments let mut block_vars_mat = block_vars_mat .into_iter() - .map(|a| - a.into_iter().map(|v| - v.assignment - .into_iter() - .map(|a| S::from_base(&a)) - .collect::>()) - .collect::>>() - ) + .map(|a| { + a.into_iter() + .map(|v| { + v.assignment + .into_iter() + .map(|a| S::from_base(&a)) + .collect::>() + }) + .collect::>>() + }) .collect::>>>(); let mut exec_inputs_list = exec_inputs_list .into_iter() - .map(|v| v.assignment.into_iter().map(|a| S::from_base(&a)).collect::>()) + .map(|v| { + v.assignment + .into_iter() + .map(|a| S::from_base(&a)) + .collect::>() + }) .collect::>>(); let mut init_phy_mems_list = init_phy_mems_list .into_iter() - .map(|v| v.assignment.into_iter().map(|a| S::from_base(&a)).collect::>()) + .map(|v| { + v.assignment + .into_iter() + .map(|a| S::from_base(&a)) + .collect::>() + }) .collect::>>(); let mut init_vir_mems_list = init_vir_mems_list .into_iter() - .map(|v| v.assignment.into_iter().map(|a| S::from_base(&a)).collect::>()) + .map(|v| { + v.assignment + .into_iter() + .map(|a| S::from_base(&a)) + .collect::>() + }) .collect::>>(); let mut addr_phy_mems_list = addr_phy_mems_list .into_iter() - .map(|v| v.assignment.into_iter().map(|a| S::from_base(&a)).collect::>()) + .map(|v| { + v.assignment + .into_iter() + .map(|a| S::from_base(&a)) + .collect::>() + }) .collect::>>(); let mut addr_vir_mems_list = addr_vir_mems_list .into_iter() - .map(|v| v.assignment.into_iter().map(|a| S::from_base(&a)).collect::>()) + .map(|v| { + v.assignment + .into_iter() + .map(|a| S::from_base(&a)) + .collect::>() + }) .collect::>>(); let mut addr_ts_bits_list = addr_ts_bits_list .into_iter() - .map(|v| v.assignment.into_iter().map(|a| S::from_base(&a)).collect::>()) + .map(|v| { + v.assignment + .into_iter() + .map(|a| S::from_base(&a)) + .collect::>() + }) .collect::>>(); // -- @@ -2162,7 +2201,7 @@ impl SNARK { // PHY_MEM_BLOCK takes r = 4, VIR_MEM_BLOCK takes r = 6, everything else takes r = 2 let perm_poly_poly_list: Vec = (0..inst_map.len()) .map(|i| { - let p: &DensePolynomial = &perm_poly_w3_prover.poly_w[i]; + let p: &DensePolynomial = &perm_poly_w3_prover.poly_w[i]; let i = inst_map[i]; if i == vm_bl_id { p[6] diff --git a/spartan_parallel/src/mle.rs b/spartan_parallel/src/mle.rs index 0de49a3c..1bc349a8 100644 --- a/spartan_parallel/src/mle.rs +++ b/spartan_parallel/src/mle.rs @@ -1,53 +1,142 @@ use crate::scalar::SpartanExtensionField; use std::cmp::max; +use std::ops::{Index, IndexMut, Range, RangeFrom, RangeTo}; pub trait MLEType {} +#[derive(Debug, Clone)] pub struct Base; impl MLEType for Base {} +#[derive(Debug, Clone)] pub struct Ext; impl MLEType for Ext {} #[derive(Debug, Clone)] pub struct MLE { - t: T, + t: T, - // Depending on T, one of the following fields will be empty. - // For MLE, field elements can potentially be stored as elements - // in the base field (resource saving) or in the extended field. - ext_vec: Vec, - base_vec: Vec, + // Depending on T, one of the following fields will be empty. + // For MLE, field elements can potentially be stored as elements + // in the base field (resource saving) or in the extended field. + ext_vec: Vec, + base_vec: Vec, } -// Define general behavior of MLE -impl MLE -{ - pub fn len(&self) -> usize { - max(self.ext_vec.len(), self.base_vec.len()) - } +// Define universal behavior of MLE +impl MLE { + pub fn len(&self) -> usize { + max(self.ext_vec.len(), self.base_vec.len()) + } } // Define behavior of MLE when elements are in the base field -impl MLE -{ - pub fn new(vals: Vec) -> Self { - Self { - t: Base, - ext_vec: vec![], - base_vec: vals, - } +impl MLE { + pub fn new(vals: Vec) -> Self { + Self { + t: Base, + ext_vec: vec![], + base_vec: vals, } + } + + pub fn inner_ref(&self) -> &Vec { + &self.base_vec + } +} + +impl Index for MLE { + type Output = S::BaseField; + + fn index(&self, index: usize) -> &Self::Output { + &self.base_vec[index] + } +} + +impl Index> for MLE { + type Output = [S::BaseField]; + + fn index(&self, index: Range) -> &Self::Output { + &self.base_vec[index] + } +} + +impl Index> for MLE { + type Output = [S::BaseField]; + + fn index(&self, range: RangeTo) -> &Self::Output { + &self.base_vec[range] + } +} + +impl Index> for MLE { + type Output = [S::BaseField]; + + fn index(&self, range: RangeFrom) -> &Self::Output { + &self.base_vec[range] + } +} + +impl IndexMut for MLE { + fn index_mut(&mut self, index: usize) -> &mut Self::Output { + &mut self.base_vec[index] + } } // Define behavior of MLE when elements are in the extended field -impl MLE -{ - pub fn new(vals: Vec) -> Self { - Self { - t: Ext, - ext_vec: vals, - base_vec: vec![], - } +impl MLE { + pub fn new(vals: Vec) -> Self { + Self { + t: Ext, + ext_vec: vals, + base_vec: vec![], } -} \ No newline at end of file + } + + pub fn inner_ref(&self) -> &Vec { + &self.ext_vec + } + + pub fn extend(&mut self, other_vec: &Vec) -> &Vec { + self.ext_vec.extend(other_vec); + &self.ext_vec + } +} + +impl Index for MLE { + type Output = S; + + fn index(&self, index: usize) -> &Self::Output { + &self.ext_vec[index] + } +} + +impl Index> for MLE { + type Output = [S]; + + fn index(&self, index: Range) -> &Self::Output { + &self.ext_vec[index] + } +} + +impl Index> for MLE { + type Output = [S]; + + fn index(&self, range: RangeTo) -> &Self::Output { + &self.ext_vec[range] + } +} + +impl Index> for MLE { + type Output = [S]; + + fn index(&self, range: RangeFrom) -> &Self::Output { + &self.ext_vec[range] + } +} + +impl IndexMut for MLE { + fn index_mut(&mut self, index: usize) -> &mut Self::Output { + &mut self.ext_vec[index] + } +} diff --git a/spartan_parallel/src/product_tree.rs b/spartan_parallel/src/product_tree.rs index c42d9a79..70f53a43 100644 --- a/spartan_parallel/src/product_tree.rs +++ b/spartan_parallel/src/product_tree.rs @@ -1,4 +1,5 @@ #![allow(dead_code)] +use crate::mle::Ext; use crate::scalar::SpartanExtensionField; use super::dense_mlpoly::DensePolynomial; @@ -11,15 +12,15 @@ use serde::{Deserialize, Serialize}; #[derive(Debug, Clone)] pub struct ProductCircuit { - left_vec: Vec>, - right_vec: Vec>, + left_vec: Vec>, + right_vec: Vec>, } impl ProductCircuit { fn compute_layer( - inp_left: &DensePolynomial, - inp_right: &DensePolynomial, - ) -> (DensePolynomial, DensePolynomial) { + inp_left: &DensePolynomial, + inp_right: &DensePolynomial, + ) -> (DensePolynomial, DensePolynomial) { let len = inp_left.len() + inp_right.len(); let outp_left = (0..len / 4) .map(|i| inp_left[i] * inp_right[i]) @@ -34,9 +35,9 @@ impl ProductCircuit { ) } - pub fn new(poly: &DensePolynomial) -> Self { - let mut left_vec: Vec> = Vec::new(); - let mut right_vec: Vec> = Vec::new(); + pub fn new(poly: &DensePolynomial) -> Self { + let mut left_vec: Vec> = Vec::new(); + let mut right_vec: Vec> = Vec::new(); let num_layers = poly.len().log_2(); let (outp_left, outp_right) = poly.split(poly.len() / 2); @@ -66,16 +67,16 @@ impl ProductCircuit { #[derive(Clone)] pub struct DotProductCircuit { - left: DensePolynomial, - right: DensePolynomial, - weight: DensePolynomial, + left: DensePolynomial, + right: DensePolynomial, + weight: DensePolynomial, } impl DotProductCircuit { pub fn new( - left: DensePolynomial, - right: DensePolynomial, - weight: DensePolynomial, + left: DensePolynomial, + right: DensePolynomial, + weight: DensePolynomial, ) -> Self { assert_eq!(left.len(), right.len()); assert_eq!(left.len(), weight.len()); @@ -281,8 +282,8 @@ impl ProductCircuitEvalProofBatched { *poly_A_comp * *poly_B_comp * *poly_C_comp }; - let mut poly_A_batched_par: Vec<&mut DensePolynomial> = Vec::new(); - let mut poly_B_batched_par: Vec<&mut DensePolynomial> = Vec::new(); + let mut poly_A_batched_par: Vec<&mut DensePolynomial> = Vec::new(); + let mut poly_B_batched_par: Vec<&mut DensePolynomial> = Vec::new(); for prod_circuit in prod_circuit_vec.iter_mut() { poly_A_batched_par.push(&mut prod_circuit.left_vec[layer_id]); poly_B_batched_par.push(&mut prod_circuit.right_vec[layer_id]) @@ -294,9 +295,9 @@ impl ProductCircuitEvalProofBatched { ); // prepare sequential instances that don't share poly_C - let mut poly_A_batched_seq: Vec<&mut DensePolynomial> = Vec::new(); - let mut poly_B_batched_seq: Vec<&mut DensePolynomial> = Vec::new(); - let mut poly_C_batched_seq: Vec<&mut DensePolynomial> = Vec::new(); + let mut poly_A_batched_seq: Vec<&mut DensePolynomial> = Vec::new(); + let mut poly_B_batched_seq: Vec<&mut DensePolynomial> = Vec::new(); + let mut poly_C_batched_seq: Vec<&mut DensePolynomial> = Vec::new(); if layer_id == 0 && !dotp_circuit_vec.is_empty() { // add additional claims for item in dotp_circuit_vec.iter() { diff --git a/spartan_parallel/src/r1csproof.rs b/spartan_parallel/src/r1csproof.rs index 05a52d94..2eb1b9e3 100644 --- a/spartan_parallel/src/r1csproof.rs +++ b/spartan_parallel/src/r1csproof.rs @@ -8,6 +8,7 @@ use super::random::RandomTape; use super::sumcheck::SumcheckInstanceProof; use super::timer::Timer; use super::transcript::ProofTranscript; +use crate::mle::Ext; use crate::scalar::SpartanExtensionField; use crate::{ProverWitnessSecInfo, VerifierWitnessSecInfo}; use merlin::Transcript; @@ -30,9 +31,9 @@ impl R1CSProof { num_rounds_p: usize, num_proofs: &Vec, num_cons: &Vec, - evals_tau_p: &mut DensePolynomial, - evals_tau_q: &mut DensePolynomial, - evals_tau_x: &mut DensePolynomial, + evals_tau_p: &mut DensePolynomial, + evals_tau_q: &mut DensePolynomial, + evals_tau_x: &mut DensePolynomial, evals_Az: &mut DensePolynomialPqx, evals_Bz: &mut DensePolynomialPqx, evals_Cz: &mut DensePolynomialPqx, @@ -73,7 +74,7 @@ impl R1CSProof { num_witness_secs: usize, num_inputs: Vec, claim: &S, - evals_eq: &mut DensePolynomial, + evals_eq: &mut DensePolynomial, evals_ABC: &mut DensePolynomialPqx, evals_z: &mut DensePolynomialPqx, transcript: &mut Transcript, diff --git a/spartan_parallel/src/sparse_mlpoly.rs b/spartan_parallel/src/sparse_mlpoly.rs index 830d2803..fc85f109 100644 --- a/spartan_parallel/src/sparse_mlpoly.rs +++ b/spartan_parallel/src/sparse_mlpoly.rs @@ -1,6 +1,7 @@ #![allow(clippy::type_complexity)] #![allow(clippy::too_many_arguments)] #![allow(clippy::needless_range_loop)] +use crate::mle::Ext; use crate::scalar::SpartanExtensionField; use super::dense_mlpoly::DensePolynomial; @@ -36,13 +37,16 @@ pub struct SparseMatPolynomial { } pub struct Derefs { - row_ops_val: Vec>, - col_ops_val: Vec>, - comb: DensePolynomial, + row_ops_val: Vec>, + col_ops_val: Vec>, + comb: DensePolynomial, } impl Derefs { - pub fn new(row_ops_val: Vec>, col_ops_val: Vec>) -> Self { + pub fn new( + row_ops_val: Vec>, + col_ops_val: Vec>, + ) -> Self { assert_eq!(row_ops_val.len(), col_ops_val.len()); let ret_row_ops_val = row_ops_val.clone(); @@ -74,7 +78,7 @@ impl DerefsEvalProof { } fn prove_single( - joint_poly: &DensePolynomial, + joint_poly: &DensePolynomial, r: &[S], evals: Vec, transcript: &mut Transcript, @@ -187,9 +191,9 @@ impl DerefsEvalProof { #[derive(Clone)] struct AddrTimestamps { ops_addr_usize: Vec>, - ops_addr: Vec>, - read_ts: Vec>, - audit_ts: DensePolynomial, + ops_addr: Vec>, + read_ts: Vec>, + audit_ts: DensePolynomial, } impl AddrTimestamps { @@ -199,8 +203,8 @@ impl AddrTimestamps { } let mut audit_ts = vec![0usize; num_cells]; - let mut ops_addr_vec: Vec> = Vec::new(); - let mut read_ts_vec: Vec> = Vec::new(); + let mut ops_addr_vec: Vec> = Vec::new(); + let mut read_ts_vec: Vec> = Vec::new(); for ops_addr_inst in ops_addr.iter() { let mut read_ts = vec![0usize; num_ops]; @@ -228,7 +232,7 @@ impl AddrTimestamps { } } - fn deref_mem(addr: &[usize], mem_val: &[S]) -> DensePolynomial { + fn deref_mem(addr: &[usize], mem_val: &[S]) -> DensePolynomial { DensePolynomial::new( (0..addr.len()) .map(|i| { @@ -239,20 +243,20 @@ impl AddrTimestamps { ) } - pub fn deref(&self, mem_val: &[S]) -> Vec> { + pub fn deref(&self, mem_val: &[S]) -> Vec> { (0..self.ops_addr.len()) .map(|i| AddrTimestamps::deref_mem(&self.ops_addr_usize[i], mem_val)) - .collect::>>() + .collect::>>() } } pub struct MultiSparseMatPolynomialAsDense { batch_size: usize, - val: Vec>, + val: Vec>, row: AddrTimestamps, col: AddrTimestamps, - comb_ops: DensePolynomial, - comb_mem: DensePolynomial, + comb_ops: DensePolynomial, + comb_mem: DensePolynomial, } #[derive(Debug, Serialize, Deserialize, Clone)] @@ -314,7 +318,7 @@ impl SparseMatPolynomial { let mut ops_row_vec: Vec> = Vec::new(); let mut ops_col_vec: Vec> = Vec::new(); - let mut val_vec: Vec> = Vec::new(); + let mut val_vec: Vec> = Vec::new(); for poly in sparse_polys { let (ops_row, ops_col, val) = poly.sparse_to_dense_vecs(N); ops_row_vec.push(ops_row); @@ -502,16 +506,16 @@ struct Layers { impl Layers { fn build_hash_layer( eval_table: &[S], - addrs_vec: &[DensePolynomial], - derefs_vec: &[DensePolynomial], - read_ts_vec: &[DensePolynomial], - audit_ts: &DensePolynomial, + addrs_vec: &[DensePolynomial], + derefs_vec: &[DensePolynomial], + read_ts_vec: &[DensePolynomial], + audit_ts: &DensePolynomial, r_mem_check: &(S, S), ) -> ( - DensePolynomial, - Vec>, - Vec>, - DensePolynomial, + DensePolynomial, + Vec>, + Vec>, + DensePolynomial, ) { let (r_hash, r_multiset_check) = r_mem_check; @@ -539,8 +543,8 @@ impl Layers { ); // hash read and write that depends on #instances - let mut poly_read_hashed_vec: Vec> = Vec::new(); - let mut poly_write_hashed_vec: Vec> = Vec::new(); + let mut poly_read_hashed_vec: Vec> = Vec::new(); + let mut poly_write_hashed_vec: Vec> = Vec::new(); for i in 0..addrs_vec.len() { let (addrs, derefs, read_ts) = (&addrs_vec[i], &derefs_vec[i], &read_ts_vec[i]); assert_eq!(addrs.len(), derefs.len()); @@ -578,7 +582,7 @@ impl Layers { pub fn new( eval_table: &[S], addr_timestamps: &AddrTimestamps, - poly_ops_val: &[DensePolynomial], + poly_ops_val: &[DensePolynomial], r_mem_check: &(S, S), ) -> Self { let (poly_init_hashed, poly_read_hashed_vec, poly_write_hashed_vec, poly_audit_hashed) = diff --git a/spartan_parallel/src/sumcheck.rs b/spartan_parallel/src/sumcheck.rs index 57b11cf4..db549eb0 100644 --- a/spartan_parallel/src/sumcheck.rs +++ b/spartan_parallel/src/sumcheck.rs @@ -2,6 +2,7 @@ #![allow(clippy::type_complexity)] use crate::custom_dense_mlpoly::DensePolynomialPqx; use crate::math::Math; +use crate::mle::Ext; use crate::scalar::SpartanExtensionField; use super::dense_mlpoly::DensePolynomial; @@ -73,9 +74,9 @@ impl SumcheckInstanceProof { pub fn prove_cubic( claim: &S, num_rounds: usize, - poly_A: &mut DensePolynomial, - poly_B: &mut DensePolynomial, - poly_C: &mut DensePolynomial, + poly_A: &mut DensePolynomial, + poly_B: &mut DensePolynomial, + poly_C: &mut DensePolynomial, comb_func: F, transcript: &mut Transcript, ) -> (Self, Vec, Vec) @@ -147,14 +148,14 @@ impl SumcheckInstanceProof { claim: &S, num_rounds: usize, poly_vec_par: ( - &mut Vec<&mut DensePolynomial>, - &mut Vec<&mut DensePolynomial>, - &mut DensePolynomial, + &mut Vec<&mut DensePolynomial>, + &mut Vec<&mut DensePolynomial>, + &mut DensePolynomial, ), poly_vec_seq: ( - &mut Vec<&mut DensePolynomial>, - &mut Vec<&mut DensePolynomial>, - &mut Vec<&mut DensePolynomial>, + &mut Vec<&mut DensePolynomial>, + &mut Vec<&mut DensePolynomial>, + &mut Vec<&mut DensePolynomial>, ), coeffs: &[S], comb_func: F, @@ -327,7 +328,7 @@ impl SumcheckInstanceProof { single_inst: bool, // indicates whether poly_B only has one instance num_witness_secs: usize, mut num_inputs: Vec, - poly_A: &mut DensePolynomial, + poly_A: &mut DensePolynomial, poly_B: &mut DensePolynomialPqx, poly_C: &mut DensePolynomialPqx, comb_func: F, @@ -513,9 +514,9 @@ impl SumcheckInstanceProof { num_rounds_p: usize, mut num_proofs: Vec, mut num_cons: Vec, - poly_Ap: &mut DensePolynomial, - poly_Aq: &mut DensePolynomial, - poly_Ax: &mut DensePolynomial, + poly_Ap: &mut DensePolynomial, + poly_Aq: &mut DensePolynomial, + poly_Ax: &mut DensePolynomial, poly_B: &mut DensePolynomialPqx, poly_C: &mut DensePolynomialPqx, poly_D: &mut DensePolynomialPqx, From daa75d40638eef76c5422125eb5d31fef9da3169 Mon Sep 17 00:00:00 2001 From: Ray Gao Date: Mon, 9 Dec 2024 20:34:09 -0500 Subject: [PATCH 20/22] Remove merge duplicates --- spartan_parallel/src/dense_mlpoly.rs | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/spartan_parallel/src/dense_mlpoly.rs b/spartan_parallel/src/dense_mlpoly.rs index 66a6c072..bf2e977c 100644 --- a/spartan_parallel/src/dense_mlpoly.rs +++ b/spartan_parallel/src/dense_mlpoly.rs @@ -349,10 +349,6 @@ impl PolyEvalProof { PolyEvalProof { _phantom: S::field_zero(), } - // TODO: Alternative evaluation proof scheme - PolyEvalProof { - _phantom: S::field_zero(), - } } pub fn verify( @@ -360,7 +356,6 @@ impl PolyEvalProof { transcript: &mut Transcript, r: &[S], // point at which the polynomial is evaluated ) -> Result<(), ProofVerifyError> { - // TODO: Alternative evaluation proof scheme // TODO: Alternative evaluation proof scheme Ok(()) } @@ -384,8 +379,6 @@ impl PolyEvalProof { ) -> Vec> { // TODO: Alternative evaluation proof scheme vec![] - // TODO: Alternative evaluation proof scheme - vec![] } pub fn verify_plain_batched_points( @@ -393,10 +386,6 @@ impl PolyEvalProof { _transcript: &mut Transcript, _r_list: Vec>, // point at which the polynomial is evaluated _Zr_list: Vec, // commitment to \widetilde{Z}(r) on each point - _proof_list: &Vec>, - _transcript: &mut Transcript, - _r_list: Vec>, // point at which the polynomial is evaluated - _Zr_list: Vec, // commitment to \widetilde{Z}(r) on each point ) -> Result<(), ProofVerifyError> { // TODO: Alternative evaluation proof scheme Ok(()) From dad8e14d72e3fcdfbe4b98dde07625bd5b6cdb12 Mon Sep 17 00:00:00 2001 From: kunxian xia Date: Wed, 11 Dec 2024 15:07:18 +0800 Subject: [PATCH 21/22] more base field backed objects --- spartan_parallel/src/sparse_mlpoly.rs | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/spartan_parallel/src/sparse_mlpoly.rs b/spartan_parallel/src/sparse_mlpoly.rs index fc85f109..f968ca09 100644 --- a/spartan_parallel/src/sparse_mlpoly.rs +++ b/spartan_parallel/src/sparse_mlpoly.rs @@ -13,6 +13,7 @@ use super::random::RandomTape; use super::timer::Timer; use super::transcript::{AppendToTranscript, ProofTranscript}; use core::cmp::Ordering; +use ff::Field; use merlin::Transcript; use serde::{Deserialize, Serialize}; @@ -20,11 +21,11 @@ use serde::{Deserialize, Serialize}; pub struct SparseMatEntry { row: usize, col: usize, - val: S, + val: S::BaseField, } impl SparseMatEntry { - pub fn new(row: usize, col: usize, val: S) -> Self { + pub fn new(row: usize, col: usize, val: S::BaseField) -> Self { SparseMatEntry { row, col, val } } } @@ -264,6 +265,7 @@ pub struct SparseMatPolyCommitment { batch_size: usize, num_ops: usize, num_mem_cells: usize, + // TODO: add mpcs commitment _phantom: S, } @@ -288,11 +290,11 @@ impl SparseMatPolynomial { self.M.len().next_power_of_two() } - fn sparse_to_dense_vecs(&self, N: usize) -> (Vec, Vec, Vec) { + fn sparse_to_dense_vecs(&self, N: usize) -> (Vec, Vec, Vec) { assert!(N >= self.get_num_nz_entries()); let mut ops_row: Vec = vec![0; N]; let mut ops_col: Vec = vec![0; N]; - let mut val: Vec = vec![S::field_zero(); N]; + let mut val: Vec = vec![S::BaseField::ZERO; N]; for i in 0..self.M.len() { ops_row[i] = self.M[i].row; From ec71f082a7ff6f42a87ad0c14beb8711362daf79 Mon Sep 17 00:00:00 2001 From: kunxian xia Date: Wed, 11 Dec 2024 19:48:10 +0800 Subject: [PATCH 22/22] more base field backed objects --- spartan_parallel/src/sparse_mlpoly.rs | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/spartan_parallel/src/sparse_mlpoly.rs b/spartan_parallel/src/sparse_mlpoly.rs index f968ca09..4213f498 100644 --- a/spartan_parallel/src/sparse_mlpoly.rs +++ b/spartan_parallel/src/sparse_mlpoly.rs @@ -1,7 +1,7 @@ #![allow(clippy::type_complexity)] #![allow(clippy::too_many_arguments)] #![allow(clippy::needless_range_loop)] -use crate::mle::Ext; +use crate::mle::{Base, Ext}; use crate::scalar::SpartanExtensionField; use super::dense_mlpoly::DensePolynomial; @@ -192,9 +192,9 @@ impl DerefsEvalProof { #[derive(Clone)] struct AddrTimestamps { ops_addr_usize: Vec>, - ops_addr: Vec>, - read_ts: Vec>, - audit_ts: DensePolynomial, + ops_addr: Vec>, + read_ts: Vec>, + audit_ts: DensePolynomial, } impl AddrTimestamps { @@ -204,8 +204,8 @@ impl AddrTimestamps { } let mut audit_ts = vec![0usize; num_cells]; - let mut ops_addr_vec: Vec> = Vec::new(); - let mut read_ts_vec: Vec> = Vec::new(); + let mut ops_addr_vec: Vec> = Vec::new(); + let mut read_ts_vec: Vec> = Vec::new(); for ops_addr_inst in ops_addr.iter() { let mut read_ts = vec![0usize; num_ops]; @@ -253,11 +253,11 @@ impl AddrTimestamps { pub struct MultiSparseMatPolynomialAsDense { batch_size: usize, - val: Vec>, + val: Vec>, row: AddrTimestamps, col: AddrTimestamps, - comb_ops: DensePolynomial, - comb_mem: DensePolynomial, + comb_ops: DensePolynomial, + comb_mem: DensePolynomial, } #[derive(Debug, Serialize, Deserialize, Clone)]