From 5a675ead34f4eb2b4030e06a90ffdfac0d96d006 Mon Sep 17 00:00:00 2001 From: Srinath Setty Date: Mon, 18 Dec 2023 16:55:54 -0500 Subject: [PATCH 1/7] Support for multilinear KZG commitments (#269) * multilinear KZG PCS as a provider; builds * fix two tests * fix third test; cut duplicate code * Tidy up source code comments Signed-off-by: Greg Zaverucha * impl PairingGroup for bn256 * remove unneeded imports * simplify CommitmentKey * fix build; migrate G1Affine * fmt * checkpoint * migrate G2Affine and pairing * fix clippy; use unimplemented! * switch to affine form for compressed commitments * add a test with mlkzg * cargo fmt * cleanup * go back to compressed group * address clippy * rename * cleanup * add an alias * deduplicate * Revert "add an alias" This reverts commit 97cade6c8751deacbc8b5b0e0df1579e3baa1477. * Use an alias for PreprocessedGroupElements Signed-off-by: Greg Zaverucha * cargo fmt * update README.md --------- Signed-off-by: Greg Zaverucha Co-authored-by: Greg Zaverucha --- README.md | 2 +- src/lib.rs | 11 +- src/provider/bn256_grumpkin.rs | 96 +++- src/provider/mlkzg.rs | 866 +++++++++++++++++++++++++++++++++ src/provider/mod.rs | 1 + src/provider/pasta.rs | 16 + src/provider/traits.rs | 38 +- 7 files changed, 1024 insertions(+), 6 deletions(-) create mode 100644 src/provider/mlkzg.rs diff --git a/README.md b/README.md index e36179af8..d6ac2d137 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,4 @@ -# Nova: Recursive SNARKs without trusted setup +# Nova: High-speed recursive arguments from folding schemes > [!NOTE] > This repository is a fork of the original hosted at [https://github.com/microsoft/nova](https://github.com/microsoft/nova). It's an incubator for experimenting with more advanced variants of the original software and working out the kinks in them. diff --git a/src/lib.rs b/src/lib.rs index 5b38d10c8..5d9d6e610 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1000,8 +1000,8 @@ mod tests { use super::*; use crate::{ provider::{ - non_hiding_zeromorph::ZMPCS, traits::DlogGroup, Bn256Engine, Bn256EngineZM, GrumpkinEngine, - PallasEngine, Secp256k1Engine, Secq256k1Engine, VestaEngine, + mlkzg::Bn256EngineKZG, non_hiding_zeromorph::ZMPCS, traits::DlogGroup, Bn256Engine, + Bn256EngineZM, GrumpkinEngine, PallasEngine, Secp256k1Engine, Secq256k1Engine, VestaEngine, }, traits::{evaluation::EvaluationEngineTrait, snark::default_ck_hint}, }; @@ -1387,6 +1387,13 @@ mod tests { ZMPCS, EE<_>, >(); + + test_ivc_nontrivial_with_spark_compression_with::< + Bn256EngineKZG, + GrumpkinEngine, + provider::mlkzg::EvaluationEngine<_>, + EE<_>, + >(); } fn test_ivc_nontrivial_with_spark_compression_with() diff --git a/src/provider/bn256_grumpkin.rs b/src/provider/bn256_grumpkin.rs index 88f7bec45..d80d7b590 100644 --- a/src/provider/bn256_grumpkin.rs +++ b/src/provider/bn256_grumpkin.rs @@ -3,7 +3,7 @@ use crate::{ impl_traits, provider::{ msm::cpu_best_msm, - traits::{CompressedGroup, DlogGroup}, + traits::{CompressedGroup, DlogGroup, PairingGroup}, }, traits::{Group, PrimeFieldExt, TranscriptReprTrait}, }; @@ -19,7 +19,8 @@ use sha3::Shake256; use std::io::Read; use halo2curves::bn256::{ - G1Affine as Bn256Affine, G1Compressed as Bn256Compressed, G1 as Bn256Point, + pairing, G1Affine as Bn256Affine, G1Compressed as Bn256Compressed, G2Affine, G2Compressed, Gt, + G1 as Bn256Point, G2, }; use halo2curves::grumpkin::{ G1Affine as GrumpkinAffine, G1Compressed as GrumpkinCompressed, G1 as GrumpkinPoint, @@ -52,3 +53,94 @@ impl_traits!( "30644e72e131a029b85045b68181585d97816a916871ca8d3c208c16d87cfd47", "30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f0000001" ); + +impl PairingGroup for Bn256Point { + type G2 = G2; + type GT = Gt; + + fn pairing(p: &Self, q: &Self::G2) -> Self::GT { + pairing(&p.to_affine(), &q.to_affine()) + } +} + +impl Group for G2 { + type Base = bn256::Base; + type Scalar = bn256::Scalar; + + fn group_params() -> (Self::Base, Self::Base, BigInt, BigInt) { + let A = bn256::Point::a(); + let B = bn256::Point::b(); + let order = BigInt::from_str_radix( + "30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f0000001", + 16, + ) + .unwrap(); + let base = BigInt::from_str_radix( + "30644e72e131a029b85045b68181585d97816a916871ca8d3c208c16d87cfd47", + 16, + ) + .unwrap(); + + (A, B, order, base) + } +} + +impl DlogGroup for G2 { + type CompressedGroupElement = G2Compressed; + type PreprocessedGroupElement = G2Affine; + + fn vartime_multiscalar_mul( + scalars: &[Self::Scalar], + bases: &[Self::PreprocessedGroupElement], + ) -> Self { + cpu_best_msm(scalars, bases) + } + + fn preprocessed(&self) -> Self::PreprocessedGroupElement { + self.to_affine() + } + + fn group(p: &Self::PreprocessedGroupElement) -> Self { + G2::from(*p) + } + + fn compress(&self) -> Self::CompressedGroupElement { + self.to_bytes() + } + + fn from_label(_label: &'static [u8], _n: usize) -> Vec { + unimplemented!() + } + + fn zero() -> Self { + G2::identity() + } + + fn gen() -> Self { + G2::generator() + } + + fn to_coordinates(&self) -> (Self::Base, Self::Base, bool) { + unimplemented!() + } +} + +impl TranscriptReprTrait for G2Compressed { + fn to_transcript_bytes(&self) -> Vec { + self.as_ref().to_vec() + } +} + +impl CompressedGroup for G2Compressed { + type GroupElement = G2; + + fn decompress(&self) -> Option { + Some(G2::from_bytes(self).unwrap()) + } +} + +impl TranscriptReprTrait for G2Affine { + fn to_transcript_bytes(&self) -> Vec { + unimplemented!() + } +} diff --git a/src/provider/mlkzg.rs b/src/provider/mlkzg.rs new file mode 100644 index 000000000..90a5114ad --- /dev/null +++ b/src/provider/mlkzg.rs @@ -0,0 +1,866 @@ +//! This module implements Nova's evaluation engine using multilinear KZG +#![allow(non_snake_case)] +use crate::{ + errors::NovaError, + provider::{ + keccak::Keccak256Transcript, + poseidon::{PoseidonRO, PoseidonROCircuit}, + traits::{CompressedGroup, DlogGroup, PairingGroup}, + }, + traits::{ + commitment::{CommitmentEngineTrait, CommitmentTrait, Len}, + evaluation::EvaluationEngineTrait, + AbsorbInROTrait, Engine, ROTrait, TranscriptEngineTrait, TranscriptReprTrait, + }, +}; +use abomonation_derive::Abomonation; +use core::{ + marker::PhantomData, + ops::{Add, Mul, MulAssign}, +}; +use ff::Field; +use halo2curves::bn256::{Fq as Bn256Fq, Fr as Bn256Fr, G1 as Bn256G1}; +use itertools::Itertools as _; +use rand_core::OsRng; +use rayon::prelude::*; +use serde::{Deserialize, Serialize}; + +/// Alias to points on G1 that are in preprocessed form +type G1 = <::GE as DlogGroup>::PreprocessedGroupElement; + +/// Alias to points on G1 that are in preprocessed form +type G2 = <<::GE as PairingGroup>::G2 as DlogGroup>::PreprocessedGroupElement; + +/// KZG commitment key +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Abomonation)] +#[abomonation_omit_bounds] +pub struct CommitmentKey +where + E::GE: PairingGroup, +{ + #[abomonate_with(Vec<[u64; 8]>)] // this is a hack; we just assume the size of the element. + ck: Vec<::PreprocessedGroupElement>, + #[abomonate_with(Vec<[u64; 16]>)] // this is a hack; we just assume the size of the element. + tau_H: <::G2 as DlogGroup>::PreprocessedGroupElement, // needed only for the verifier key +} + +impl Len for CommitmentKey +where + E::GE: PairingGroup, +{ + fn length(&self) -> usize { + self.ck.len() + } +} + +/// A KZG commitment +#[derive(Clone, Copy, Debug, PartialEq, Eq, Serialize, Deserialize, Abomonation)] +#[serde(bound = "")] +#[abomonation_omit_bounds] +pub struct Commitment +where + E: Engine, + E::GE: PairingGroup, +{ + #[abomonate_with(Vec<[u64; 12]>)] // this is a hack; we just assume the size of the element. + comm: ::GE, +} + +/// A compressed commitment (suitable for serialization) +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +pub struct CompressedCommitment +where + E: Engine, + E::GE: PairingGroup, +{ + comm: ::CompressedGroupElement, +} + +impl CommitmentTrait for Commitment +where + E: Engine, + E::GE: PairingGroup, +{ + type CompressedCommitment = CompressedCommitment; + + fn compress(&self) -> Self::CompressedCommitment { + CompressedCommitment { + comm: self.comm.compress(), + } + } + + fn to_coordinates(&self) -> (E::Base, E::Base, bool) { + self.comm.to_coordinates() + } + + fn decompress(c: &Self::CompressedCommitment) -> Result { + let comm = <::GE as DlogGroup>::CompressedGroupElement::decompress(&c.comm); + if comm.is_none() { + return Err(NovaError::DecompressionError); + } + Ok(Commitment { + comm: comm.unwrap(), + }) + } +} + +impl Default for Commitment +where + E: Engine, + E::GE: PairingGroup, +{ + fn default() -> Self { + Commitment { + comm: E::GE::zero(), + } + } +} + +impl TranscriptReprTrait for Commitment +where + E: Engine, + E::GE: PairingGroup, +{ + fn to_transcript_bytes(&self) -> Vec { + let (x, y, is_infinity) = self.comm.to_coordinates(); + let is_infinity_byte = (!is_infinity).into(); + [ + x.to_transcript_bytes(), + y.to_transcript_bytes(), + [is_infinity_byte].to_vec(), + ] + .concat() + } +} + +impl AbsorbInROTrait for Commitment +where + E: Engine, + E::GE: PairingGroup, +{ + fn absorb_in_ro(&self, ro: &mut E::RO) { + let (x, y, is_infinity) = self.comm.to_coordinates(); + ro.absorb(x); + ro.absorb(y); + ro.absorb(if is_infinity { + E::Base::ONE + } else { + E::Base::ZERO + }); + } +} + +impl TranscriptReprTrait for CompressedCommitment +where + E::GE: PairingGroup, +{ + fn to_transcript_bytes(&self) -> Vec { + self.comm.to_transcript_bytes() + } +} + +impl MulAssign for Commitment +where + E: Engine, + E::GE: PairingGroup, +{ + fn mul_assign(&mut self, scalar: E::Scalar) { + let result = (self as &Commitment).comm * scalar; + *self = Commitment { comm: result }; + } +} + +impl<'a, 'b, E> Mul<&'b E::Scalar> for &'a Commitment +where + E: Engine, + E::GE: PairingGroup, +{ + type Output = Commitment; + + fn mul(self, scalar: &'b E::Scalar) -> Commitment { + Commitment { + comm: self.comm * scalar, + } + } +} + +impl Mul for Commitment +where + E: Engine, + E::GE: PairingGroup, +{ + type Output = Commitment; + + fn mul(self, scalar: E::Scalar) -> Commitment { + Commitment { + comm: self.comm * scalar, + } + } +} + +impl Add for Commitment +where + E: Engine, + E::GE: PairingGroup, +{ + type Output = Commitment; + + fn add(self, other: Commitment) -> Commitment { + Commitment { + comm: self.comm + other.comm, + } + } +} + +/// Provides a commitment engine +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +pub struct CommitmentEngine { + _p: PhantomData, +} + +impl CommitmentEngineTrait for CommitmentEngine +where + E: Engine, + E::GE: PairingGroup, +{ + type Commitment = Commitment; + type CommitmentKey = CommitmentKey; + + fn setup(_label: &'static [u8], n: usize) -> Self::CommitmentKey { + // NOTE: this is for testing purposes and should not be used in production + // TODO: we need to decide how to generate load/store parameters + let tau = E::Scalar::random(OsRng); + let num_gens = n.next_power_of_two(); + + // Compute powers of tau in E::Scalar, then scalar muls in parallel + let mut powers_of_tau: Vec = Vec::with_capacity(num_gens); + powers_of_tau.insert(0, E::Scalar::ONE); + for i in 1..num_gens { + powers_of_tau.insert(i, powers_of_tau[i - 1] * tau); + } + + let ck: Vec> = (0..num_gens) + .into_par_iter() + .map(|i| (::gen() * powers_of_tau[i]).preprocessed()) + .collect(); + + let tau_H = (<::G2 as DlogGroup>::gen() * tau).preprocessed(); + + Self::CommitmentKey { ck, tau_H } + } + + fn commit(ck: &Self::CommitmentKey, v: &[E::Scalar]) -> Self::Commitment { + assert!(ck.ck.len() >= v.len()); + Commitment { + comm: E::GE::vartime_multiscalar_mul(v, &ck.ck[..v.len()]), + } + } +} + +/// Provides an implementation of generators for proving evaluations +#[derive(Clone, Debug, Serialize, Deserialize, Abomonation)] +#[serde(bound = "")] +#[abomonation_omit_bounds] +pub struct ProverKey { + _p: PhantomData, +} + +/// A verifier key +#[derive(Clone, Debug, Serialize, Deserialize, Abomonation)] +#[serde(bound = "")] +#[abomonation_omit_bounds] +pub struct VerifierKey +where + E::GE: PairingGroup, +{ + #[abomonate_with([u64; 12])] // this is a hack; we just assume the size of the element. + G: G1, + #[abomonate_with([u64; 24])] // this is a hack; we just assume the size of the element. + H: G2, + #[abomonate_with([u64; 24])] // this is a hack; we just assume the size of the element. + tau_H: G2, +} + +/// Provides an implementation of a polynomial evaluation argument +#[derive(Clone, Debug, Serialize, Deserialize)] +#[serde(bound = "")] +pub struct EvaluationArgument +where + E::GE: PairingGroup, +{ + com: Vec>, + w: Vec>, + v: Vec>, +} + +/// Provides an implementation of a polynomial evaluation engine using KZG +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct EvaluationEngine { + _p: PhantomData, +} + +impl EvaluationEngine +where + E: Engine, + E::GE: PairingGroup, +{ + // This impl block defines helper functions that are not a part of + // EvaluationEngineTrait, but that we will use to implement the trait methods. + fn compute_challenge( + C: &G1, + y: &E::Scalar, + com: &[G1], + transcript: &mut ::TE, + ) -> E::Scalar { + transcript.absorb(b"C", C); + transcript.absorb(b"y", y); + transcript.absorb(b"c", &com.to_vec().as_slice()); + + transcript.squeeze(b"c").unwrap() + } + + // Compute challenge q = Hash(vk, C0, ..., C_{k-1}, u0, ...., u_{t-1}, + // (f_i(u_j))_{i=0..k-1,j=0..t-1}) + fn get_batch_challenge( + C: &[G1], + u: &[E::Scalar], + v: &[Vec], + transcript: &mut ::TE, + ) -> E::Scalar { + transcript.absorb(b"C", &C.to_vec().as_slice()); + transcript.absorb(b"u", &u.to_vec().as_slice()); + transcript.absorb( + b"v", + &v.iter() + .flatten() + .cloned() + .collect::>() + .as_slice(), + ); + + transcript.squeeze(b"r").unwrap() + } + + fn batch_challenge_powers(q: E::Scalar, k: usize) -> Vec { + // Compute powers of q : (1, q, q^2, ..., q^(k-1)) + let mut q_powers = vec![E::Scalar::ONE; k]; + for i in 1..k { + q_powers[i] = q_powers[i - 1] * q; + } + q_powers + } + + fn verifier_second_challenge( + C_B: &G1, + W: &[G1], + transcript: &mut ::TE, + ) -> E::Scalar { + transcript.absorb(b"C_b", C_B); + transcript.absorb(b"W", &W.to_vec().as_slice()); + + transcript.squeeze(b"d").unwrap() + } +} + +impl EvaluationEngineTrait for EvaluationEngine +where + E: Engine>, + E::GE: PairingGroup, +{ + type EvaluationArgument = EvaluationArgument; + type ProverKey = ProverKey; + type VerifierKey = VerifierKey; + + fn setup( + ck: &>::CommitmentKey, + ) -> (Self::ProverKey, Self::VerifierKey) { + let pk = ProverKey { + _p: Default::default(), + }; + + let vk = VerifierKey { + G: E::GE::gen().preprocessed(), + H: <::G2 as DlogGroup>::gen().preprocessed(), + tau_H: ck.tau_H.clone(), + }; + + (pk, vk) + } + + fn prove( + ck: &CommitmentKey, + _pk: &Self::ProverKey, + transcript: &mut ::TE, + C: &Commitment, + hat_P: &[E::Scalar], + point: &[E::Scalar], + eval: &E::Scalar, + ) -> Result { + let x: Vec = point.to_vec(); + + //////////////// begin helper closures ////////// + let kzg_open = |f: &[E::Scalar], u: E::Scalar| -> G1 { + // On input f(x) and u compute the witness polynomial used to prove + // that f(u) = v. The main part of this is to compute the + // division (f(x) - f(u)) / (x - u), but we don't use a general + // division algorithm, we make use of the fact that the division + // never has a remainder, and that the denominator is always a linear + // polynomial. The cost is (d-1) mults + (d-1) adds in E::Scalar, where + // d is the degree of f. + // + // We use the fact that if we compute the quotient of f(x)/(x-u), + // there will be a remainder, but it'll be v = f(u). Put another way + // the quotient of f(x)/(x-u) and (f(x) - f(v))/(x-u) is the + // same. One advantage is that computing f(u) could be decoupled + // from kzg_open, it could be done later or separate from computing W. + + let compute_witness_polynomial = |f: &[E::Scalar], u: E::Scalar| -> Vec { + let d = f.len(); + + // Compute h(x) = f(x)/(x - u) + let mut h = vec![E::Scalar::ZERO; d]; + for i in (1..d).rev() { + h[i - 1] = f[i] + h[i] * u; + } + + h + }; + + let h = compute_witness_polynomial(f, u); + + E::CE::commit(ck, &h).comm.preprocessed() + }; + + let kzg_open_batch = |C: &[G1], + f: &[Vec], + u: &[E::Scalar], + transcript: &mut ::TE| + -> (Vec>, Vec>) { + let poly_eval = |f: &[E::Scalar], u: E::Scalar| -> E::Scalar { + let mut v = f[0]; + let mut u_power = E::Scalar::ONE; + + for fi in f.iter().skip(1) { + u_power *= u; + v += u_power * fi; + } + + v + }; + + let scalar_vector_muladd = |a: &mut Vec, v: &Vec, s: E::Scalar| { + assert!(a.len() >= v.len()); + for i in 0..v.len() { + a[i] += s * v[i]; + } + }; + + let kzg_compute_batch_polynomial = |f: &[Vec], q: E::Scalar| -> Vec { + let k = f.len(); // Number of polynomials we're batching + + let q_powers = Self::batch_challenge_powers(q, k); + + // Compute B(x) = f[0] + q*f[1] + q^2 * f[2] + ... q^(k-1) * f[k-1] + let mut B = f[0].clone(); + for i in 1..k { + scalar_vector_muladd(&mut B, &f[i], q_powers[i]); // B += q_powers[i] * f[i] + } + + B + }; + ///////// END kzg_open_batch closure helpers + + let k = f.len(); + let t = u.len(); + assert!(C.len() == k); + + // The verifier needs f_i(u_j), so we compute them here + // (V will compute B(u_j) itself) + let mut v = vec![vec!(E::Scalar::ZERO; k); t]; + for i in 0..t { + // for each point u + for (j, f_j) in f.iter().enumerate().take(k) { + // for each poly f + v[i][j] = poly_eval(f_j, u[i]); // = f_j(u_i) + } + } + + let q = Self::get_batch_challenge(C, u, &v, transcript); + let B = kzg_compute_batch_polynomial(f, q); + + // Now open B at u0, ..., u_{t-1} + let mut w = Vec::with_capacity(t); + for ui in u { + let wi = kzg_open(&B, *ui); + w.push(wi); + } + + // Compute the commitment to the batched polynomial B(X) + let q_powers = Self::batch_challenge_powers(q, k); + let C_B = (::group(&C[0]) + + E::GE::vartime_multiscalar_mul(&q_powers[1..k], &C[1..k])) + .preprocessed(); + + // The prover computes the challenge to keep the transcript in the same + // state as that of the verifier + let _d_0 = Self::verifier_second_challenge(&C_B, &w, transcript); + + (w, v) + }; + + ///// END helper closures ////////// + + let ell = x.len(); + let n = hat_P.len(); + assert_eq!(n, 1 << ell); // Below we assume that n is a power of two + + // Phase 1 -- create commitments com_1, ..., com_\ell + let mut polys: Vec> = Vec::new(); + polys.push(hat_P.to_vec()); + for i in 0..ell { + let Pi_len = polys[i].len() / 2; + let mut Pi = vec![E::Scalar::ZERO; Pi_len]; + + #[allow(clippy::needless_range_loop)] + for j in 0..Pi_len { + Pi[j] = x[ell-i-1] * polys[i][2*j + 1] // Odd part of P^(i-1) + + (E::Scalar::ONE - x[ell-i-1]) * polys[i][2*j]; // Even part of P^(i-1) + } + + if i == ell - 1 && *eval != Pi[0] { + return Err(NovaError::UnSat); + } + + polys.push(Pi); + } + + // We do not need to commit to the first polynomial as it is already committed. + // Compute commitments in parallel + let com: Vec> = (1..polys.len()) + .into_par_iter() + .map(|i| E::CE::commit(ck, &polys[i]).comm.preprocessed()) + .collect(); + + // Phase 2 + // We do not need to add x to the transcript, because in our context x was + // obtained from the transcript. + let r = Self::compute_challenge(&C.comm.preprocessed(), eval, &com, transcript); + let u = vec![r, -r, r * r]; + + // Phase 3 -- create response + let mut com_all = com.clone(); + com_all.insert(0, C.comm.preprocessed()); + let (w, v) = kzg_open_batch(&com_all, &polys, &u, transcript); + + Ok(EvaluationArgument { com, w, v }) + } + + /// A method to verify purported evaluations of a batch of polynomials + fn verify( + vk: &Self::VerifierKey, + transcript: &mut ::TE, + C: &Commitment, + point: &[E::Scalar], + P_of_x: &E::Scalar, + pi: &Self::EvaluationArgument, + ) -> Result<(), NovaError> { + let x = point.to_vec(); + let y = P_of_x; + + // vk is hashed in transcript already, so we do not add it here + + let kzg_verify_batch = |vk: &VerifierKey, + C: &Vec>, + W: &Vec>, + u: &Vec, + v: &Vec>, + transcript: &mut ::TE| + -> bool { + let k = C.len(); + let t = u.len(); + + let q = Self::get_batch_challenge(C, u, v, transcript); + let q_powers = Self::batch_challenge_powers(q, k); // 1, q, q^2, ..., q^(k-1) + + // Compute the commitment to the batched polynomial B(X) + let C_B = (::group(&C[0]) + + E::GE::vartime_multiscalar_mul(&q_powers[1..k], &C[1..k])) + .preprocessed(); + + // Compute the batched openings + // compute B(u_i) = v[i][0] + q*v[i][1] + ... + q^(t-1) * v[i][t-1] + let B_u = (0..t) + .map(|i| { + assert_eq!(q_powers.len(), v[i].len()); + q_powers + .iter() + .zip_eq(v[i].iter()) + .map(|(a, b)| *a * *b) + .sum() + }) + .collect::>(); + + let d_0 = Self::verifier_second_challenge(&C_B, W, transcript); + let d = [d_0, d_0 * d_0]; + + // Shorthand to convert from preprocessed G1 elements to non-preprocessed + let from_ppG1 = |P: &G1| ::group(P); + // Shorthand to convert from preprocessed G2 elements to non-preprocessed + let from_ppG2 = |P: &G2| <::G2 as DlogGroup>::group(P); + + assert!(t == 3); + // We write a special case for t=3, since this what is required for + // mlkzg. Following the paper directly, we must compute: + // let L0 = C_B - vk.G * B_u[0] + W[0] * u[0]; + // let L1 = C_B - vk.G * B_u[1] + W[1] * u[1]; + // let L2 = C_B - vk.G * B_u[2] + W[2] * u[2]; + // let R0 = -W[0]; + // let R1 = -W[1]; + // let R2 = -W[2]; + // let L = L0 + L1*d[0] + L2*d[1]; + // let R = R0 + R1*d[0] + R2*d[1]; + // + // We group terms to reduce the number of scalar mults (to seven): + // In Rust, we could use MSMs for these, and speed up verification. + let L = from_ppG1(&C_B) * (E::Scalar::ONE + d[0] + d[1]) + - from_ppG1(&vk.G) * (B_u[0] + d[0] * B_u[1] + d[1] * B_u[2]) + + from_ppG1(&W[0]) * u[0] + + from_ppG1(&W[1]) * (u[1] * d[0]) + + from_ppG1(&W[2]) * (u[2] * d[1]); + + let R0 = from_ppG1(&W[0]); + let R1 = from_ppG1(&W[1]); + let R2 = from_ppG1(&W[2]); + let R = R0 + R1 * d[0] + R2 * d[1]; + + // Check that e(L, vk.H) == e(R, vk.tau_H) + (::pairing(&L, &from_ppG2(&vk.H))) + == (::pairing(&R, &from_ppG2(&vk.tau_H))) + }; + ////// END verify() closure helpers + + let ell = x.len(); + + let mut com = pi.com.clone(); + + // we do not need to add x to the transcript, because in our context x was + // obtained from the transcript + let r = Self::compute_challenge(&C.comm.preprocessed(), y, &com, transcript); + + if r == E::Scalar::ZERO || C.comm == E::GE::zero() { + return Err(NovaError::ProofVerifyError); + } + com.insert(0, C.comm.preprocessed()); // set com_0 = C, shifts other commitments to the right + + let u = vec![r, -r, r * r]; + + // Setup vectors (Y, ypos, yneg) from pi.v + let v = &pi.v; + if v.len() != 3 { + return Err(NovaError::ProofVerifyError); + } + if v[0].len() != ell + 1 || v[1].len() != ell + 1 || v[2].len() != ell + 1 { + return Err(NovaError::ProofVerifyError); + } + let ypos = &v[0]; + let yneg = &v[1]; + let Y = &v[2]; + + // Check consistency of (Y, ypos, yneg) + if Y[ell] != *y { + return Err(NovaError::ProofVerifyError); + } + + let two = E::Scalar::from(2u64); + for i in 0..ell { + if two * r * Y[i + 1] + != r * (E::Scalar::ONE - x[ell - i - 1]) * (ypos[i] + yneg[i]) + + x[ell - i - 1] * (ypos[i] - yneg[i]) + { + return Err(NovaError::ProofVerifyError); + } + // Note that we don't make any checks about Y[0] here, but our batching + // check below requires it + } + + // Check commitments to (Y, ypos, yneg) are valid + if !kzg_verify_batch(vk, &com, &pi.w, &u, &pi.v, transcript) { + return Err(NovaError::ProofVerifyError); + } + + Ok(()) + } +} + +/// An implementation of Nova traits with multilinear KZG over the BN256 curve +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub struct Bn256EngineKZG; + +impl Engine for Bn256EngineKZG { + type Base = Bn256Fq; + type Scalar = Bn256Fr; + type GE = Bn256G1; + type RO = PoseidonRO; + type ROCircuit = PoseidonROCircuit; + type TE = Keccak256Transcript; + type CE = CommitmentEngine; +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::{ + provider::keccak::Keccak256Transcript, spartan::polys::multilinear::MultilinearPolynomial, + }; + use bincode::Options; + use group::Curve; + use rand::SeedableRng; + + type E = Bn256EngineKZG; + type Fr = ::Scalar; + + #[test] + fn test_mlkzg_eval() { + // Test with poly(X1, X2) = 1 + X1 + X2 + X1*X2 + let n = 4; + let ck: CommitmentKey = CommitmentEngine::setup(b"test", n); + let (pk, _vk): (ProverKey, VerifierKey) = EvaluationEngine::setup(&ck); + + // poly is in eval. representation; evaluated at [(0,0), (0,1), (1,0), (1,1)] + let poly = vec![Fr::from(1), Fr::from(2), Fr::from(2), Fr::from(4)]; + + let C = CommitmentEngine::commit(&ck, &poly); + let mut tr = Keccak256Transcript::new(b"TestEval"); + + // Call the prover with a (point, eval) pair. The prover recomputes + // poly(point) = eval', and fails if eval' != eval + let point = vec![Fr::from(0), Fr::from(0)]; + let eval = Fr::ONE; + assert!(EvaluationEngine::prove(&ck, &pk, &mut tr, &C, &poly, &point, &eval).is_ok()); + + let point = vec![Fr::from(0), Fr::from(1)]; + let eval = Fr::from(2); + assert!(EvaluationEngine::prove(&ck, &pk, &mut tr, &C, &poly, &point, &eval).is_ok()); + + let point = vec![Fr::from(1), Fr::from(1)]; + let eval = Fr::from(4); + assert!(EvaluationEngine::prove(&ck, &pk, &mut tr, &C, &poly, &point, &eval).is_ok()); + + let point = vec![Fr::from(0), Fr::from(2)]; + let eval = Fr::from(3); + assert!(EvaluationEngine::prove(&ck, &pk, &mut tr, &C, &poly, &point, &eval).is_ok()); + + let point = vec![Fr::from(2), Fr::from(2)]; + let eval = Fr::from(9); + assert!(EvaluationEngine::prove(&ck, &pk, &mut tr, &C, &poly, &point, &eval).is_ok()); + + // Try a couple incorrect evaluations and expect failure + let point = vec![Fr::from(2), Fr::from(2)]; + let eval = Fr::from(50); + assert!(EvaluationEngine::prove(&ck, &pk, &mut tr, &C, &poly, &point, &eval).is_err()); + + let point = vec![Fr::from(0), Fr::from(2)]; + let eval = Fr::from(4); + assert!(EvaluationEngine::prove(&ck, &pk, &mut tr, &C, &poly, &point, &eval).is_err()); + } + + #[test] + fn test_mlkzg() { + let n = 4; + + // poly = [1, 2, 1, 4] + let poly = vec![Fr::ONE, Fr::from(2), Fr::from(1), Fr::from(4)]; + + // point = [4,3] + let point = vec![Fr::from(4), Fr::from(3)]; + + // eval = 28 + let eval = Fr::from(28); + + let ck: CommitmentKey = CommitmentEngine::setup(b"test", n); + let (pk, vk) = EvaluationEngine::setup(&ck); + + // make a commitment + let C = CommitmentEngine::commit(&ck, &poly); + + // prove an evaluation + let mut prover_transcript = Keccak256Transcript::new(b"TestEval"); + let proof = + EvaluationEngine::::prove(&ck, &pk, &mut prover_transcript, &C, &poly, &point, &eval) + .unwrap(); + let post_c_p = prover_transcript.squeeze(b"c").unwrap(); + + // verify the evaluation + let mut verifier_transcript = Keccak256Transcript::new(b"TestEval"); + assert!( + EvaluationEngine::verify(&vk, &mut verifier_transcript, &C, &point, &eval, &proof).is_ok() + ); + let post_c_v = verifier_transcript.squeeze(b"c").unwrap(); + + // check if the prover transcript and verifier transcript are kept in the + // same state + assert_eq!(post_c_p, post_c_v); + + let my_options = bincode::DefaultOptions::new() + .with_big_endian() + .with_fixint_encoding(); + let mut output_bytes = my_options.serialize(&vk).unwrap(); + output_bytes.append(&mut my_options.serialize(&C.compress()).unwrap()); + output_bytes.append(&mut my_options.serialize(&point).unwrap()); + output_bytes.append(&mut my_options.serialize(&eval).unwrap()); + output_bytes.append(&mut my_options.serialize(&proof).unwrap()); + println!("total output = {} bytes", output_bytes.len()); + + // Change the proof and expect verification to fail + let mut bad_proof = proof.clone(); + bad_proof.com[0] = (bad_proof.com[0] + bad_proof.com[1]).to_affine(); + let mut verifier_transcript2 = Keccak256Transcript::new(b"TestEval"); + assert!(EvaluationEngine::verify( + &vk, + &mut verifier_transcript2, + &C, + &point, + &eval, + &bad_proof + ) + .is_err()); + } + + #[test] + fn test_mlkzg_more() { + // test the mlkzg prover and verifier with random instances (derived from a seed) + for ell in [4, 5, 6] { + let mut rng = rand::rngs::StdRng::seed_from_u64(ell as u64); + + let n = 1 << ell; // n = 2^ell + + let poly = (0..n).map(|_| Fr::random(&mut rng)).collect::>(); + let point = (0..ell).map(|_| Fr::random(&mut rng)).collect::>(); + let eval = MultilinearPolynomial::evaluate_with(&poly, &point); + + let ck: CommitmentKey = CommitmentEngine::setup(b"test", n); + let (pk, vk) = EvaluationEngine::setup(&ck); + + // make a commitment + let C = CommitmentEngine::commit(&ck, &poly); + + // prove an evaluation + let mut prover_transcript = Keccak256Transcript::new(b"TestEval"); + let proof: EvaluationArgument = + EvaluationEngine::prove(&ck, &pk, &mut prover_transcript, &C, &poly, &point, &eval) + .unwrap(); + + // verify the evaluation + let mut verifier_tr = Keccak256Transcript::new(b"TestEval"); + assert!(EvaluationEngine::verify(&vk, &mut verifier_tr, &C, &point, &eval, &proof).is_ok()); + + // Change the proof and expect verification to fail + let mut bad_proof = proof.clone(); + bad_proof.com[0] = (bad_proof.com[0] + bad_proof.com[1]).to_affine(); + let mut verifier_tr2 = Keccak256Transcript::new(b"TestEval"); + assert!( + EvaluationEngine::verify(&vk, &mut verifier_tr2, &C, &point, &eval, &bad_proof).is_err() + ); + } + } +} diff --git a/src/provider/mod.rs b/src/provider/mod.rs index 1ba2e27b6..6be433424 100644 --- a/src/provider/mod.rs +++ b/src/provider/mod.rs @@ -2,6 +2,7 @@ // public modules to be used as an evaluation engine with Spartan pub mod ipa_pc; +pub mod mlkzg; pub mod non_hiding_zeromorph; // crate-public modules, made crate-public mostly for tests diff --git a/src/provider/pasta.rs b/src/provider/pasta.rs index 0adfe4db3..d9944f896 100644 --- a/src/provider/pasta.rs +++ b/src/provider/pasta.rs @@ -97,6 +97,10 @@ macro_rules! impl_traits { self.to_affine() } + fn group(p: &Self::PreprocessedGroupElement) -> Self { + $name::Point::from(*p) + } + fn compress(&self) -> Self::CompressedGroupElement { $name_compressed::new(self.to_bytes()) } @@ -151,6 +155,10 @@ macro_rules! impl_traits { $name::Point::identity() } + fn gen() -> Self { + $name::Point::generator() + } + fn to_coordinates(&self) -> (Self::Base, Self::Base, bool) { let coordinates = self.to_affine().coordinates(); if coordinates.is_some().unwrap_u8() == 1 { @@ -187,6 +195,14 @@ macro_rules! impl_traits { self.to_repr().to_vec() } } + + impl TranscriptReprTrait for $name::Affine { + fn to_transcript_bytes(&self) -> Vec { + let coords = self.coordinates().unwrap(); + + [coords.x().to_repr(), coords.y().to_repr()].concat() + } + } }; } diff --git a/src/provider/traits.rs b/src/provider/traits.rs index aca4d2903..fd9761a6d 100644 --- a/src/provider/traits.rs +++ b/src/provider/traits.rs @@ -65,7 +65,8 @@ pub trait DlogGroup: + Send + Sync + Serialize - + for<'de> Deserialize<'de>; + + for<'de> Deserialize<'de> + + TranscriptReprTrait; /// A method to compute a multiexponentation fn vartime_multiscalar_mul( @@ -82,13 +83,32 @@ pub trait DlogGroup: /// Produces a preprocessed element fn preprocessed(&self) -> Self::PreprocessedGroupElement; + /// Returns a group element from a preprocessed group element + fn group(p: &Self::PreprocessedGroupElement) -> Self; + /// Returns an element that is the additive identity of the group fn zero() -> Self; + /// Returns the generator of the group + fn gen() -> Self; + /// Returns the affine coordinates (x, y, infinty) for the point fn to_coordinates(&self) -> (::Base, ::Base, bool); } +/// A trait that defines extensions to the DlogGroup trait, to be implemented for +/// elliptic curve groups that are pairing friendly +pub trait PairingGroup: DlogGroup { + /// A type representing the second group + type G2: DlogGroup; + + /// A type representing the target group + type GT: PartialEq + Eq; + + /// A method to compute a pairing + fn pairing(p: &Self, q: &Self::G2) -> Self::GT; +} + /// This implementation behaves in ways specific to the halo2curves suite of curves in: // - to_coordinates, // - vartime_multiscalar_mul, where it does not call into accelerated implementations. @@ -132,6 +152,10 @@ macro_rules! impl_traits { self.to_affine() } + fn group(p: &Self::PreprocessedGroupElement) -> Self { + $name::Point::from(*p) + } + fn compress(&self) -> Self::CompressedGroupElement { self.to_bytes() } @@ -186,6 +210,10 @@ macro_rules! impl_traits { $name::Point::identity() } + fn gen() -> Self { + $name::Point::generator() + } + fn to_coordinates(&self) -> (Self::Base, Self::Base, bool) { let coordinates = self.to_affine().coordinates(); if coordinates.is_some().unwrap_u8() == 1 @@ -224,5 +252,13 @@ macro_rules! impl_traits { self.to_repr().to_vec() } } + + impl TranscriptReprTrait for $name::Affine { + fn to_transcript_bytes(&self) -> Vec { + let coords = self.coordinates().unwrap(); + + [coords.x().to_repr(), coords.y().to_repr()].concat() + } + } }; } From f618173e5aebed22667ae667ab5e9b82c004959d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fran=C3=A7ois=20Garillot?= Date: Mon, 18 Dec 2023 16:55:57 -0500 Subject: [PATCH 2/7] refactor: clean up the needed scaffolding in MLKZG Summary: - THe MLKZG implementation re-implements some group traits, so as to give it maximum generality and depende maximally on the Nova traits. - However, the way in which it imports a pairing (using pairing::Engine) already implicitly constrains perfrectly usable group implementations to be available on the same types. This commit therefore removes the boilerplate and uses those external traits. - Finally, so as to mutualize part of the pairing implementation, this commit also leverages the MultiMillerLoop trait, a subtrait of `pairing::Engine`. - In sum, this commit only moves types - no actual data was harmed in its making. In detail: - Removed the `PairingGroup` trait and its related implementations from the `traits.rs` and `bn256_grumpkin.rs` files. - Simplified the imports from `halo2curves::bn256` in `bn256_grumpkin.rs` and removed unused types such as `pairing`, `G2Affine`, `G2Compressed`, `Gt`, and `G2`. - Deleted substantial amount of code associated with `G2` from `bn256_grumpkin.rs`. --- src/lib.rs | 6 +- src/provider/bn256_grumpkin.rs | 96 +---- src/provider/mlkzg.rs | 618 ++++++++++----------------------- src/provider/mod.rs | 13 + src/provider/pasta.rs | 8 - src/provider/traits.rs | 27 -- 6 files changed, 205 insertions(+), 563 deletions(-) diff --git a/src/lib.rs b/src/lib.rs index 5d9d6e610..585b27a91 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1000,8 +1000,8 @@ mod tests { use super::*; use crate::{ provider::{ - mlkzg::Bn256EngineKZG, non_hiding_zeromorph::ZMPCS, traits::DlogGroup, Bn256Engine, - Bn256EngineZM, GrumpkinEngine, PallasEngine, Secp256k1Engine, Secq256k1Engine, VestaEngine, + non_hiding_zeromorph::ZMPCS, traits::DlogGroup, Bn256Engine, Bn256EngineKZG, Bn256EngineZM, + GrumpkinEngine, PallasEngine, Secp256k1Engine, Secq256k1Engine, VestaEngine, }, traits::{evaluation::EvaluationEngineTrait, snark::default_ck_hint}, }; @@ -1391,7 +1391,7 @@ mod tests { test_ivc_nontrivial_with_spark_compression_with::< Bn256EngineKZG, GrumpkinEngine, - provider::mlkzg::EvaluationEngine<_>, + provider::mlkzg::EvaluationEngine, EE<_>, >(); } diff --git a/src/provider/bn256_grumpkin.rs b/src/provider/bn256_grumpkin.rs index d80d7b590..88f7bec45 100644 --- a/src/provider/bn256_grumpkin.rs +++ b/src/provider/bn256_grumpkin.rs @@ -3,7 +3,7 @@ use crate::{ impl_traits, provider::{ msm::cpu_best_msm, - traits::{CompressedGroup, DlogGroup, PairingGroup}, + traits::{CompressedGroup, DlogGroup}, }, traits::{Group, PrimeFieldExt, TranscriptReprTrait}, }; @@ -19,8 +19,7 @@ use sha3::Shake256; use std::io::Read; use halo2curves::bn256::{ - pairing, G1Affine as Bn256Affine, G1Compressed as Bn256Compressed, G2Affine, G2Compressed, Gt, - G1 as Bn256Point, G2, + G1Affine as Bn256Affine, G1Compressed as Bn256Compressed, G1 as Bn256Point, }; use halo2curves::grumpkin::{ G1Affine as GrumpkinAffine, G1Compressed as GrumpkinCompressed, G1 as GrumpkinPoint, @@ -53,94 +52,3 @@ impl_traits!( "30644e72e131a029b85045b68181585d97816a916871ca8d3c208c16d87cfd47", "30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f0000001" ); - -impl PairingGroup for Bn256Point { - type G2 = G2; - type GT = Gt; - - fn pairing(p: &Self, q: &Self::G2) -> Self::GT { - pairing(&p.to_affine(), &q.to_affine()) - } -} - -impl Group for G2 { - type Base = bn256::Base; - type Scalar = bn256::Scalar; - - fn group_params() -> (Self::Base, Self::Base, BigInt, BigInt) { - let A = bn256::Point::a(); - let B = bn256::Point::b(); - let order = BigInt::from_str_radix( - "30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f0000001", - 16, - ) - .unwrap(); - let base = BigInt::from_str_radix( - "30644e72e131a029b85045b68181585d97816a916871ca8d3c208c16d87cfd47", - 16, - ) - .unwrap(); - - (A, B, order, base) - } -} - -impl DlogGroup for G2 { - type CompressedGroupElement = G2Compressed; - type PreprocessedGroupElement = G2Affine; - - fn vartime_multiscalar_mul( - scalars: &[Self::Scalar], - bases: &[Self::PreprocessedGroupElement], - ) -> Self { - cpu_best_msm(scalars, bases) - } - - fn preprocessed(&self) -> Self::PreprocessedGroupElement { - self.to_affine() - } - - fn group(p: &Self::PreprocessedGroupElement) -> Self { - G2::from(*p) - } - - fn compress(&self) -> Self::CompressedGroupElement { - self.to_bytes() - } - - fn from_label(_label: &'static [u8], _n: usize) -> Vec { - unimplemented!() - } - - fn zero() -> Self { - G2::identity() - } - - fn gen() -> Self { - G2::generator() - } - - fn to_coordinates(&self) -> (Self::Base, Self::Base, bool) { - unimplemented!() - } -} - -impl TranscriptReprTrait for G2Compressed { - fn to_transcript_bytes(&self) -> Vec { - self.as_ref().to_vec() - } -} - -impl CompressedGroup for G2Compressed { - type GroupElement = G2; - - fn decompress(&self) -> Option { - Some(G2::from_bytes(self).unwrap()) - } -} - -impl TranscriptReprTrait for G2Affine { - fn to_transcript_bytes(&self) -> Vec { - unimplemented!() - } -} diff --git a/src/provider/mlkzg.rs b/src/provider/mlkzg.rs index 90a5114ad..b0b8768f8 100644 --- a/src/provider/mlkzg.rs +++ b/src/provider/mlkzg.rs @@ -3,315 +3,59 @@ use crate::{ errors::NovaError, provider::{ - keccak::Keccak256Transcript, - poseidon::{PoseidonRO, PoseidonROCircuit}, - traits::{CompressedGroup, DlogGroup, PairingGroup}, + kzg_commitment::KZGCommitmentEngine, + non_hiding_kzg::{UVKZGProverKey, UVKZGVerifierKey, UVUniversalKZGParam}, + pedersen::Commitment, + traits::DlogGroup, }, traits::{ - commitment::{CommitmentEngineTrait, CommitmentTrait, Len}, + commitment::{CommitmentEngineTrait, Len}, evaluation::EvaluationEngineTrait, - AbsorbInROTrait, Engine, ROTrait, TranscriptEngineTrait, TranscriptReprTrait, + Engine as NovaEngine, Group, TranscriptEngineTrait, TranscriptReprTrait, }, }; -use abomonation_derive::Abomonation; -use core::{ - marker::PhantomData, - ops::{Add, Mul, MulAssign}, -}; -use ff::Field; -use halo2curves::bn256::{Fq as Bn256Fq, Fr as Bn256Fr, G1 as Bn256G1}; +use core::marker::PhantomData; +use ff::{Field, PrimeFieldBits}; +use group::{Curve, Group as _}; use itertools::Itertools as _; -use rand_core::OsRng; +use pairing::{Engine, MillerLoopResult, MultiMillerLoop}; use rayon::prelude::*; -use serde::{Deserialize, Serialize}; - -/// Alias to points on G1 that are in preprocessed form -type G1 = <::GE as DlogGroup>::PreprocessedGroupElement; - -/// Alias to points on G1 that are in preprocessed form -type G2 = <<::GE as PairingGroup>::G2 as DlogGroup>::PreprocessedGroupElement; - -/// KZG commitment key -#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Abomonation)] -#[abomonation_omit_bounds] -pub struct CommitmentKey -where - E::GE: PairingGroup, -{ - #[abomonate_with(Vec<[u64; 8]>)] // this is a hack; we just assume the size of the element. - ck: Vec<::PreprocessedGroupElement>, - #[abomonate_with(Vec<[u64; 16]>)] // this is a hack; we just assume the size of the element. - tau_H: <::G2 as DlogGroup>::PreprocessedGroupElement, // needed only for the verifier key -} - -impl Len for CommitmentKey -where - E::GE: PairingGroup, -{ - fn length(&self) -> usize { - self.ck.len() - } -} - -/// A KZG commitment -#[derive(Clone, Copy, Debug, PartialEq, Eq, Serialize, Deserialize, Abomonation)] -#[serde(bound = "")] -#[abomonation_omit_bounds] -pub struct Commitment -where - E: Engine, - E::GE: PairingGroup, -{ - #[abomonate_with(Vec<[u64; 12]>)] // this is a hack; we just assume the size of the element. - comm: ::GE, -} - -/// A compressed commitment (suitable for serialization) -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct CompressedCommitment -where - E: Engine, - E::GE: PairingGroup, -{ - comm: ::CompressedGroupElement, -} - -impl CommitmentTrait for Commitment -where - E: Engine, - E::GE: PairingGroup, -{ - type CompressedCommitment = CompressedCommitment; - - fn compress(&self) -> Self::CompressedCommitment { - CompressedCommitment { - comm: self.comm.compress(), - } - } - - fn to_coordinates(&self) -> (E::Base, E::Base, bool) { - self.comm.to_coordinates() - } - - fn decompress(c: &Self::CompressedCommitment) -> Result { - let comm = <::GE as DlogGroup>::CompressedGroupElement::decompress(&c.comm); - if comm.is_none() { - return Err(NovaError::DecompressionError); - } - Ok(Commitment { - comm: comm.unwrap(), - }) - } -} - -impl Default for Commitment -where - E: Engine, - E::GE: PairingGroup, -{ - fn default() -> Self { - Commitment { - comm: E::GE::zero(), - } - } -} - -impl TranscriptReprTrait for Commitment -where - E: Engine, - E::GE: PairingGroup, -{ - fn to_transcript_bytes(&self) -> Vec { - let (x, y, is_infinity) = self.comm.to_coordinates(); - let is_infinity_byte = (!is_infinity).into(); - [ - x.to_transcript_bytes(), - y.to_transcript_bytes(), - [is_infinity_byte].to_vec(), - ] - .concat() - } -} - -impl AbsorbInROTrait for Commitment -where - E: Engine, - E::GE: PairingGroup, -{ - fn absorb_in_ro(&self, ro: &mut E::RO) { - let (x, y, is_infinity) = self.comm.to_coordinates(); - ro.absorb(x); - ro.absorb(y); - ro.absorb(if is_infinity { - E::Base::ONE - } else { - E::Base::ZERO - }); - } -} - -impl TranscriptReprTrait for CompressedCommitment -where - E::GE: PairingGroup, -{ - fn to_transcript_bytes(&self) -> Vec { - self.comm.to_transcript_bytes() - } -} - -impl MulAssign for Commitment -where - E: Engine, - E::GE: PairingGroup, -{ - fn mul_assign(&mut self, scalar: E::Scalar) { - let result = (self as &Commitment).comm * scalar; - *self = Commitment { comm: result }; - } -} - -impl<'a, 'b, E> Mul<&'b E::Scalar> for &'a Commitment -where - E: Engine, - E::GE: PairingGroup, -{ - type Output = Commitment; - - fn mul(self, scalar: &'b E::Scalar) -> Commitment { - Commitment { - comm: self.comm * scalar, - } - } -} - -impl Mul for Commitment -where - E: Engine, - E::GE: PairingGroup, -{ - type Output = Commitment; - - fn mul(self, scalar: E::Scalar) -> Commitment { - Commitment { - comm: self.comm * scalar, - } - } -} - -impl Add for Commitment -where - E: Engine, - E::GE: PairingGroup, -{ - type Output = Commitment; - - fn add(self, other: Commitment) -> Commitment { - Commitment { - comm: self.comm + other.comm, - } - } -} - -/// Provides a commitment engine -#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -pub struct CommitmentEngine { - _p: PhantomData, -} - -impl CommitmentEngineTrait for CommitmentEngine -where - E: Engine, - E::GE: PairingGroup, -{ - type Commitment = Commitment; - type CommitmentKey = CommitmentKey; - - fn setup(_label: &'static [u8], n: usize) -> Self::CommitmentKey { - // NOTE: this is for testing purposes and should not be used in production - // TODO: we need to decide how to generate load/store parameters - let tau = E::Scalar::random(OsRng); - let num_gens = n.next_power_of_two(); - - // Compute powers of tau in E::Scalar, then scalar muls in parallel - let mut powers_of_tau: Vec = Vec::with_capacity(num_gens); - powers_of_tau.insert(0, E::Scalar::ONE); - for i in 1..num_gens { - powers_of_tau.insert(i, powers_of_tau[i - 1] * tau); - } - - let ck: Vec> = (0..num_gens) - .into_par_iter() - .map(|i| (::gen() * powers_of_tau[i]).preprocessed()) - .collect(); - - let tau_H = (<::G2 as DlogGroup>::gen() * tau).preprocessed(); - - Self::CommitmentKey { ck, tau_H } - } - - fn commit(ck: &Self::CommitmentKey, v: &[E::Scalar]) -> Self::Commitment { - assert!(ck.ck.len() >= v.len()); - Commitment { - comm: E::GE::vartime_multiscalar_mul(v, &ck.ck[..v.len()]), - } - } -} - -/// Provides an implementation of generators for proving evaluations -#[derive(Clone, Debug, Serialize, Deserialize, Abomonation)] -#[serde(bound = "")] -#[abomonation_omit_bounds] -pub struct ProverKey { - _p: PhantomData, -} - -/// A verifier key -#[derive(Clone, Debug, Serialize, Deserialize, Abomonation)] -#[serde(bound = "")] -#[abomonation_omit_bounds] -pub struct VerifierKey -where - E::GE: PairingGroup, -{ - #[abomonate_with([u64; 12])] // this is a hack; we just assume the size of the element. - G: G1, - #[abomonate_with([u64; 24])] // this is a hack; we just assume the size of the element. - H: G2, - #[abomonate_with([u64; 24])] // this is a hack; we just assume the size of the element. - tau_H: G2, -} +use serde::{de::DeserializeOwned, Deserialize, Serialize}; /// Provides an implementation of a polynomial evaluation argument #[derive(Clone, Debug, Serialize, Deserialize)] -#[serde(bound = "")] -pub struct EvaluationArgument -where - E::GE: PairingGroup, -{ - com: Vec>, - w: Vec>, - v: Vec>, +#[serde(bound( + serialize = "E::G1Affine: Serialize, E::Fr: Serialize", + deserialize = "E::G1Affine: Deserialize<'de>, E::Fr: Deserialize<'de>" +))] +pub struct EvaluationArgument { + com: Vec, + w: Vec, + v: Vec>, } /// Provides an implementation of a polynomial evaluation engine using KZG #[derive(Clone, Debug, Serialize, Deserialize)] -pub struct EvaluationEngine { - _p: PhantomData, +pub struct EvaluationEngine { + _p: PhantomData<(E, NE)>, } -impl EvaluationEngine +impl EvaluationEngine where E: Engine, - E::GE: PairingGroup, + NE: NovaEngine, + E::G1: DlogGroup, + E::Fr: TranscriptReprTrait, + E::G1Affine: TranscriptReprTrait, // TODO: this bound on DlogGroup is really unusable! { // This impl block defines helper functions that are not a part of // EvaluationEngineTrait, but that we will use to implement the trait methods. fn compute_challenge( - C: &G1, - y: &E::Scalar, - com: &[G1], - transcript: &mut ::TE, - ) -> E::Scalar { + C: &E::G1Affine, + y: &E::Fr, + com: &[E::G1Affine], + transcript: &mut impl TranscriptEngineTrait, + ) -> E::Fr { transcript.absorb(b"C", C); transcript.absorb(b"y", y); transcript.absorb(b"c", &com.to_vec().as_slice()); @@ -322,11 +66,11 @@ where // Compute challenge q = Hash(vk, C0, ..., C_{k-1}, u0, ...., u_{t-1}, // (f_i(u_j))_{i=0..k-1,j=0..t-1}) fn get_batch_challenge( - C: &[G1], - u: &[E::Scalar], - v: &[Vec], - transcript: &mut ::TE, - ) -> E::Scalar { + C: &[E::G1Affine], + u: &[E::Fr], + v: &[Vec], + transcript: &mut impl TranscriptEngineTrait, + ) -> E::Fr { transcript.absorb(b"C", &C.to_vec().as_slice()); transcript.absorb(b"u", &u.to_vec().as_slice()); transcript.absorb( @@ -334,16 +78,16 @@ where &v.iter() .flatten() .cloned() - .collect::>() + .collect::>() .as_slice(), ); transcript.squeeze(b"r").unwrap() } - fn batch_challenge_powers(q: E::Scalar, k: usize) -> Vec { + fn batch_challenge_powers(q: E::Fr, k: usize) -> Vec { // Compute powers of q : (1, q, q^2, ..., q^(k-1)) - let mut q_powers = vec![E::Scalar::ONE; k]; + let mut q_powers = vec![E::Fr::ONE; k]; for i in 1..k { q_powers[i] = q_powers[i - 1] * q; } @@ -351,10 +95,10 @@ where } fn verifier_second_challenge( - C_B: &G1, - W: &[G1], - transcript: &mut ::TE, - ) -> E::Scalar { + C_B: &E::G1Affine, + W: &[E::G1Affine], + transcript: &mut impl TranscriptEngineTrait, + ) -> E::Fr { transcript.absorb(b"C_b", C_B); transcript.absorb(b"W", &W.to_vec().as_slice()); @@ -362,44 +106,40 @@ where } } -impl EvaluationEngineTrait for EvaluationEngine +impl EvaluationEngineTrait for EvaluationEngine where - E: Engine>, - E::GE: PairingGroup, + E: MultiMillerLoop, + NE: NovaEngine>, + E::Fr: Serialize + DeserializeOwned, + E::G1Affine: Serialize + DeserializeOwned, + E::G2Affine: Serialize + DeserializeOwned, + E::G1: DlogGroup, + ::Base: TranscriptReprTrait, // Note: due to the move of the bound TranscriptReprTrait on G::Base from Group to Engine + E::Fr: PrimeFieldBits, // TODO due to use of gen_srs_for_testing, make optional + E::Fr: TranscriptReprTrait, + E::G1Affine: TranscriptReprTrait, { type EvaluationArgument = EvaluationArgument; - type ProverKey = ProverKey; - type VerifierKey = VerifierKey; - - fn setup( - ck: &>::CommitmentKey, - ) -> (Self::ProverKey, Self::VerifierKey) { - let pk = ProverKey { - _p: Default::default(), - }; + type ProverKey = UVKZGProverKey; + type VerifierKey = UVKZGVerifierKey; - let vk = VerifierKey { - G: E::GE::gen().preprocessed(), - H: <::G2 as DlogGroup>::gen().preprocessed(), - tau_H: ck.tau_H.clone(), - }; - - (pk, vk) + fn setup(ck: &UVUniversalKZGParam) -> (Self::ProverKey, Self::VerifierKey) { + ck.trim(ck.length() - 1) } fn prove( - ck: &CommitmentKey, + ck: &UVUniversalKZGParam, _pk: &Self::ProverKey, - transcript: &mut ::TE, - C: &Commitment, - hat_P: &[E::Scalar], - point: &[E::Scalar], - eval: &E::Scalar, + transcript: &mut ::TE, + C: &Commitment, + hat_P: &[E::Fr], + point: &[E::Fr], + eval: &E::Fr, ) -> Result { - let x: Vec = point.to_vec(); + let x: Vec = point.to_vec(); //////////////// begin helper closures ////////// - let kzg_open = |f: &[E::Scalar], u: E::Scalar| -> G1 { + let kzg_open = |f: &[E::Fr], u: E::Fr| -> E::G1Affine { // On input f(x) and u compute the witness polynomial used to prove // that f(u) = v. The main part of this is to compute the // division (f(x) - f(u)) / (x - u), but we don't use a general @@ -414,11 +154,11 @@ where // same. One advantage is that computing f(u) could be decoupled // from kzg_open, it could be done later or separate from computing W. - let compute_witness_polynomial = |f: &[E::Scalar], u: E::Scalar| -> Vec { + let compute_witness_polynomial = |f: &[E::Fr], u: E::Fr| -> Vec { let d = f.len(); // Compute h(x) = f(x)/(x - u) - let mut h = vec![E::Scalar::ZERO; d]; + let mut h = vec![E::Fr::ZERO; d]; for i in (1..d).rev() { h[i - 1] = f[i] + h[i] * u; } @@ -428,17 +168,19 @@ where let h = compute_witness_polynomial(f, u); - E::CE::commit(ck, &h).comm.preprocessed() + >::commit(ck, &h) + .comm + .preprocessed() }; - let kzg_open_batch = |C: &[G1], - f: &[Vec], - u: &[E::Scalar], - transcript: &mut ::TE| - -> (Vec>, Vec>) { - let poly_eval = |f: &[E::Scalar], u: E::Scalar| -> E::Scalar { + let kzg_open_batch = |C: &[E::G1Affine], + f: &[Vec], + u: &[E::Fr], + transcript: &mut ::TE| + -> (Vec, Vec>) { + let poly_eval = |f: &[E::Fr], u: E::Fr| -> E::Fr { let mut v = f[0]; - let mut u_power = E::Scalar::ONE; + let mut u_power = E::Fr::ONE; for fi in f.iter().skip(1) { u_power *= u; @@ -448,14 +190,14 @@ where v }; - let scalar_vector_muladd = |a: &mut Vec, v: &Vec, s: E::Scalar| { + let scalar_vector_muladd = |a: &mut Vec, v: &Vec, s: E::Fr| { assert!(a.len() >= v.len()); for i in 0..v.len() { a[i] += s * v[i]; } }; - let kzg_compute_batch_polynomial = |f: &[Vec], q: E::Scalar| -> Vec { + let kzg_compute_batch_polynomial = |f: &[Vec], q: E::Fr| -> Vec { let k = f.len(); // Number of polynomials we're batching let q_powers = Self::batch_challenge_powers(q, k); @@ -476,7 +218,7 @@ where // The verifier needs f_i(u_j), so we compute them here // (V will compute B(u_j) itself) - let mut v = vec![vec!(E::Scalar::ZERO; k); t]; + let mut v = vec![vec!(E::Fr::ZERO; k); t]; for i in 0..t { // for each point u for (j, f_j) in f.iter().enumerate().take(k) { @@ -497,9 +239,8 @@ where // Compute the commitment to the batched polynomial B(X) let q_powers = Self::batch_challenge_powers(q, k); - let C_B = (::group(&C[0]) - + E::GE::vartime_multiscalar_mul(&q_powers[1..k], &C[1..k])) - .preprocessed(); + let c_0: E::G1 = C[0].into(); + let C_B = (c_0 + NE::GE::vartime_multiscalar_mul(&q_powers[1..k], &C[1..k])).preprocessed(); // The prover computes the challenge to keep the transcript in the same // state as that of the verifier @@ -515,16 +256,16 @@ where assert_eq!(n, 1 << ell); // Below we assume that n is a power of two // Phase 1 -- create commitments com_1, ..., com_\ell - let mut polys: Vec> = Vec::new(); + let mut polys: Vec> = Vec::new(); polys.push(hat_P.to_vec()); for i in 0..ell { let Pi_len = polys[i].len() / 2; - let mut Pi = vec![E::Scalar::ZERO; Pi_len]; + let mut Pi = vec![E::Fr::ZERO; Pi_len]; #[allow(clippy::needless_range_loop)] for j in 0..Pi_len { Pi[j] = x[ell-i-1] * polys[i][2*j + 1] // Odd part of P^(i-1) - + (E::Scalar::ONE - x[ell-i-1]) * polys[i][2*j]; // Even part of P^(i-1) + + (E::Fr::ONE - x[ell-i-1]) * polys[i][2*j]; // Even part of P^(i-1) } if i == ell - 1 && *eval != Pi[0] { @@ -536,9 +277,13 @@ where // We do not need to commit to the first polynomial as it is already committed. // Compute commitments in parallel - let com: Vec> = (1..polys.len()) + let com: Vec = (1..polys.len()) .into_par_iter() - .map(|i| E::CE::commit(ck, &polys[i]).comm.preprocessed()) + .map(|i| { + >::commit(ck, &polys[i]) + .comm + .preprocessed() + }) .collect(); // Phase 2 @@ -558,10 +303,10 @@ where /// A method to verify purported evaluations of a batch of polynomials fn verify( vk: &Self::VerifierKey, - transcript: &mut ::TE, - C: &Commitment, - point: &[E::Scalar], - P_of_x: &E::Scalar, + transcript: &mut ::TE, + C: &Commitment, + point: &[E::Fr], + P_of_x: &E::Fr, pi: &Self::EvaluationArgument, ) -> Result<(), NovaError> { let x = point.to_vec(); @@ -569,12 +314,12 @@ where // vk is hashed in transcript already, so we do not add it here - let kzg_verify_batch = |vk: &VerifierKey, - C: &Vec>, - W: &Vec>, - u: &Vec, - v: &Vec>, - transcript: &mut ::TE| + let kzg_verify_batch = |vk: &UVKZGVerifierKey, + C: &Vec, + W: &Vec, + u: &Vec, + v: &Vec>, + transcript: &mut ::TE| -> bool { let k = C.len(); let t = u.len(); @@ -583,9 +328,8 @@ where let q_powers = Self::batch_challenge_powers(q, k); // 1, q, q^2, ..., q^(k-1) // Compute the commitment to the batched polynomial B(X) - let C_B = (::group(&C[0]) - + E::GE::vartime_multiscalar_mul(&q_powers[1..k], &C[1..k])) - .preprocessed(); + let c_0: E::G1 = C[0].into(); + let C_B = (c_0 + NE::GE::vartime_multiscalar_mul(&q_powers[1..k], &C[1..k])).preprocessed(); // Compute the batched openings // compute B(u_i) = v[i][0] + q*v[i][1] + ... + q^(t-1) * v[i][t-1] @@ -598,16 +342,11 @@ where .map(|(a, b)| *a * *b) .sum() }) - .collect::>(); + .collect::>(); let d_0 = Self::verifier_second_challenge(&C_B, W, transcript); let d = [d_0, d_0 * d_0]; - // Shorthand to convert from preprocessed G1 elements to non-preprocessed - let from_ppG1 = |P: &G1| ::group(P); - // Shorthand to convert from preprocessed G2 elements to non-preprocessed - let from_ppG2 = |P: &G2| <::G2 as DlogGroup>::group(P); - assert!(t == 3); // We write a special case for t=3, since this what is required for // mlkzg. Following the paper directly, we must compute: @@ -622,20 +361,25 @@ where // // We group terms to reduce the number of scalar mults (to seven): // In Rust, we could use MSMs for these, and speed up verification. - let L = from_ppG1(&C_B) * (E::Scalar::ONE + d[0] + d[1]) - - from_ppG1(&vk.G) * (B_u[0] + d[0] * B_u[1] + d[1] * B_u[2]) - + from_ppG1(&W[0]) * u[0] - + from_ppG1(&W[1]) * (u[1] * d[0]) - + from_ppG1(&W[2]) * (u[2] * d[1]); - - let R0 = from_ppG1(&W[0]); - let R1 = from_ppG1(&W[1]); - let R2 = from_ppG1(&W[2]); + let L = E::G1::from(C_B) * (E::Fr::ONE + d[0] + d[1]) + - E::G1::from(vk.g) * (B_u[0] + d[0] * B_u[1] + d[1] * B_u[2]) + + E::G1::from(W[0]) * u[0] + + E::G1::from(W[1]) * (u[1] * d[0]) + + E::G1::from(W[2]) * (u[2] * d[1]); + + let R0 = E::G1::from(W[0]); + let R1 = E::G1::from(W[1]); + let R2 = E::G1::from(W[2]); let R = R0 + R1 * d[0] + R2 * d[1]; // Check that e(L, vk.H) == e(R, vk.tau_H) - (::pairing(&L, &from_ppG2(&vk.H))) - == (::pairing(&R, &from_ppG2(&vk.tau_H))) + let pairing_inputs = [ + (&L.to_affine(), &E::G2Prepared::from(-vk.h)), + (&R.to_affine(), &E::G2Prepared::from(vk.beta_h)), + ]; + + let pairing_result = E::multi_miller_loop(&pairing_inputs).final_exponentiation(); + pairing_result.is_identity().into() }; ////// END verify() closure helpers @@ -647,7 +391,7 @@ where // obtained from the transcript let r = Self::compute_challenge(&C.comm.preprocessed(), y, &com, transcript); - if r == E::Scalar::ZERO || C.comm == E::GE::zero() { + if r == E::Fr::ZERO || C.comm == E::G1::zero() { return Err(NovaError::ProofVerifyError); } com.insert(0, C.comm.preprocessed()); // set com_0 = C, shifts other commitments to the right @@ -671,10 +415,10 @@ where return Err(NovaError::ProofVerifyError); } - let two = E::Scalar::from(2u64); + let two = E::Fr::from(2u64); for i in 0..ell { if two * r * Y[i + 1] - != r * (E::Scalar::ONE - x[ell - i - 1]) * (ypos[i] + yneg[i]) + != r * (E::Fr::ONE - x[ell - i - 1]) * (ypos[i] + yneg[i]) + x[ell - i - 1] * (ypos[i] - yneg[i]) { return Err(NovaError::ProofVerifyError); @@ -692,76 +436,64 @@ where } } -/// An implementation of Nova traits with multilinear KZG over the BN256 curve -#[derive(Clone, Copy, Debug, Eq, PartialEq)] -pub struct Bn256EngineKZG; - -impl Engine for Bn256EngineKZG { - type Base = Bn256Fq; - type Scalar = Bn256Fr; - type GE = Bn256G1; - type RO = PoseidonRO; - type ROCircuit = PoseidonROCircuit; - type TE = Keccak256Transcript; - type CE = CommitmentEngine; -} - #[cfg(test)] mod tests { use super::*; use crate::{ provider::keccak::Keccak256Transcript, spartan::polys::multilinear::MultilinearPolynomial, + traits::commitment::CommitmentTrait, CommitmentKey, }; use bincode::Options; - use group::Curve; use rand::SeedableRng; - type E = Bn256EngineKZG; - type Fr = ::Scalar; + type E = halo2curves::bn256::Bn256; + type NE = crate::provider::Bn256EngineKZG; + type Fr = ::Scalar; #[test] fn test_mlkzg_eval() { // Test with poly(X1, X2) = 1 + X1 + X2 + X1*X2 let n = 4; - let ck: CommitmentKey = CommitmentEngine::setup(b"test", n); - let (pk, _vk): (ProverKey, VerifierKey) = EvaluationEngine::setup(&ck); + let ck: CommitmentKey = + as CommitmentEngineTrait>::setup(b"test", n); + let (pk, _vk): (UVKZGProverKey, UVKZGVerifierKey) = EvaluationEngine::::setup(&ck); // poly is in eval. representation; evaluated at [(0,0), (0,1), (1,0), (1,1)] let poly = vec![Fr::from(1), Fr::from(2), Fr::from(2), Fr::from(4)]; - let C = CommitmentEngine::commit(&ck, &poly); - let mut tr = Keccak256Transcript::new(b"TestEval"); + let C = as CommitmentEngineTrait>::commit(&ck, &poly); + let mut tr = Keccak256Transcript::::new(b"TestEval"); // Call the prover with a (point, eval) pair. The prover recomputes // poly(point) = eval', and fails if eval' != eval let point = vec![Fr::from(0), Fr::from(0)]; let eval = Fr::ONE; - assert!(EvaluationEngine::prove(&ck, &pk, &mut tr, &C, &poly, &point, &eval).is_ok()); + assert!(EvaluationEngine::::prove(&ck, &pk, &mut tr, &C, &poly, &point, &eval).is_ok()); let point = vec![Fr::from(0), Fr::from(1)]; let eval = Fr::from(2); - assert!(EvaluationEngine::prove(&ck, &pk, &mut tr, &C, &poly, &point, &eval).is_ok()); + assert!(EvaluationEngine::::prove(&ck, &pk, &mut tr, &C, &poly, &point, &eval).is_ok()); let point = vec![Fr::from(1), Fr::from(1)]; let eval = Fr::from(4); - assert!(EvaluationEngine::prove(&ck, &pk, &mut tr, &C, &poly, &point, &eval).is_ok()); + assert!(EvaluationEngine::::prove(&ck, &pk, &mut tr, &C, &poly, &point, &eval).is_ok()); let point = vec![Fr::from(0), Fr::from(2)]; let eval = Fr::from(3); - assert!(EvaluationEngine::prove(&ck, &pk, &mut tr, &C, &poly, &point, &eval).is_ok()); + assert!(EvaluationEngine::::prove(&ck, &pk, &mut tr, &C, &poly, &point, &eval).is_ok()); let point = vec![Fr::from(2), Fr::from(2)]; let eval = Fr::from(9); - assert!(EvaluationEngine::prove(&ck, &pk, &mut tr, &C, &poly, &point, &eval).is_ok()); + assert!(EvaluationEngine::::prove(&ck, &pk, &mut tr, &C, &poly, &point, &eval).is_ok()); // Try a couple incorrect evaluations and expect failure let point = vec![Fr::from(2), Fr::from(2)]; let eval = Fr::from(50); - assert!(EvaluationEngine::prove(&ck, &pk, &mut tr, &C, &poly, &point, &eval).is_err()); + assert!(EvaluationEngine::::prove(&ck, &pk, &mut tr, &C, &poly, &point, &eval).is_err()); let point = vec![Fr::from(0), Fr::from(2)]; let eval = Fr::from(4); - assert!(EvaluationEngine::prove(&ck, &pk, &mut tr, &C, &poly, &point, &eval).is_err()); + assert!(EvaluationEngine::::prove(&ck, &pk, &mut tr, &C, &poly, &point, &eval).is_err()); } #[test] @@ -777,24 +509,31 @@ mod tests { // eval = 28 let eval = Fr::from(28); - let ck: CommitmentKey = CommitmentEngine::setup(b"test", n); - let (pk, vk) = EvaluationEngine::setup(&ck); + let ck: CommitmentKey = + as CommitmentEngineTrait>::setup(b"test", n); + let (pk, vk): (UVKZGProverKey, UVKZGVerifierKey) = EvaluationEngine::::setup(&ck); // make a commitment - let C = CommitmentEngine::commit(&ck, &poly); + let C = KZGCommitmentEngine::commit(&ck, &poly); // prove an evaluation let mut prover_transcript = Keccak256Transcript::new(b"TestEval"); let proof = - EvaluationEngine::::prove(&ck, &pk, &mut prover_transcript, &C, &poly, &point, &eval) + EvaluationEngine::::prove(&ck, &pk, &mut prover_transcript, &C, &poly, &point, &eval) .unwrap(); let post_c_p = prover_transcript.squeeze(b"c").unwrap(); // verify the evaluation - let mut verifier_transcript = Keccak256Transcript::new(b"TestEval"); - assert!( - EvaluationEngine::verify(&vk, &mut verifier_transcript, &C, &point, &eval, &proof).is_ok() - ); + let mut verifier_transcript = Keccak256Transcript::::new(b"TestEval"); + assert!(EvaluationEngine::::verify( + &vk, + &mut verifier_transcript, + &C, + &point, + &eval, + &proof + ) + .is_ok()); let post_c_v = verifier_transcript.squeeze(b"c").unwrap(); // check if the prover transcript and verifier transcript are kept in the @@ -814,8 +553,8 @@ mod tests { // Change the proof and expect verification to fail let mut bad_proof = proof.clone(); bad_proof.com[0] = (bad_proof.com[0] + bad_proof.com[1]).to_affine(); - let mut verifier_transcript2 = Keccak256Transcript::new(b"TestEval"); - assert!(EvaluationEngine::verify( + let mut verifier_transcript2 = Keccak256Transcript::::new(b"TestEval"); + assert!(EvaluationEngine::::verify( &vk, &mut verifier_transcript2, &C, @@ -838,29 +577,46 @@ mod tests { let point = (0..ell).map(|_| Fr::random(&mut rng)).collect::>(); let eval = MultilinearPolynomial::evaluate_with(&poly, &point); - let ck: CommitmentKey = CommitmentEngine::setup(b"test", n); - let (pk, vk) = EvaluationEngine::setup(&ck); + let ck: CommitmentKey = + as CommitmentEngineTrait>::setup(b"test", n); + let (pk, vk): (UVKZGProverKey, UVKZGVerifierKey) = + EvaluationEngine::::setup(&ck); // make a commitment - let C = CommitmentEngine::commit(&ck, &poly); + let C = as CommitmentEngineTrait>::commit(&ck, &poly); // prove an evaluation - let mut prover_transcript = Keccak256Transcript::new(b"TestEval"); - let proof: EvaluationArgument = - EvaluationEngine::prove(&ck, &pk, &mut prover_transcript, &C, &poly, &point, &eval) - .unwrap(); + let mut prover_transcript = Keccak256Transcript::::new(b"TestEval"); + let proof: EvaluationArgument = EvaluationEngine::::prove( + &ck, + &pk, + &mut prover_transcript, + &C, + &poly, + &point, + &eval, + ) + .unwrap(); // verify the evaluation - let mut verifier_tr = Keccak256Transcript::new(b"TestEval"); - assert!(EvaluationEngine::verify(&vk, &mut verifier_tr, &C, &point, &eval, &proof).is_ok()); + let mut verifier_tr = Keccak256Transcript::::new(b"TestEval"); + assert!( + EvaluationEngine::::verify(&vk, &mut verifier_tr, &C, &point, &eval, &proof).is_ok() + ); // Change the proof and expect verification to fail let mut bad_proof = proof.clone(); bad_proof.com[0] = (bad_proof.com[0] + bad_proof.com[1]).to_affine(); - let mut verifier_tr2 = Keccak256Transcript::new(b"TestEval"); - assert!( - EvaluationEngine::verify(&vk, &mut verifier_tr2, &C, &point, &eval, &bad_proof).is_err() - ); + let mut verifier_tr2 = Keccak256Transcript::::new(b"TestEval"); + assert!(EvaluationEngine::::verify( + &vk, + &mut verifier_tr2, + &C, + &point, + &eval, + &bad_proof + ) + .is_err()); } } } diff --git a/src/provider/mod.rs b/src/provider/mod.rs index 6be433424..e5ee00d3a 100644 --- a/src/provider/mod.rs +++ b/src/provider/mod.rs @@ -77,6 +77,19 @@ impl Engine for Bn256EngineZM { type TE = Keccak256Transcript; type CE = KZGCommitmentEngine; } +/// An implementation of Nova traits with multilinear KZG over the BN256 curve +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub struct Bn256EngineKZG; + +impl Engine for Bn256EngineKZG { + type Base = bn256::Base; + type Scalar = bn256::Scalar; + type GE = bn256::Point; + type RO = PoseidonRO; + type ROCircuit = PoseidonROCircuit; + type TE = Keccak256Transcript; + type CE = KZGCommitmentEngine; +} /// An implementation of the Nova `Engine` trait with Secp256k1 curve and Pedersen commitment scheme #[derive(Clone, Copy, Debug, Eq, PartialEq)] diff --git a/src/provider/pasta.rs b/src/provider/pasta.rs index d9944f896..7978a77fe 100644 --- a/src/provider/pasta.rs +++ b/src/provider/pasta.rs @@ -97,10 +97,6 @@ macro_rules! impl_traits { self.to_affine() } - fn group(p: &Self::PreprocessedGroupElement) -> Self { - $name::Point::from(*p) - } - fn compress(&self) -> Self::CompressedGroupElement { $name_compressed::new(self.to_bytes()) } @@ -155,10 +151,6 @@ macro_rules! impl_traits { $name::Point::identity() } - fn gen() -> Self { - $name::Point::generator() - } - fn to_coordinates(&self) -> (Self::Base, Self::Base, bool) { let coordinates = self.to_affine().coordinates(); if coordinates.is_some().unwrap_u8() == 1 { diff --git a/src/provider/traits.rs b/src/provider/traits.rs index fd9761a6d..1a6a2cf1c 100644 --- a/src/provider/traits.rs +++ b/src/provider/traits.rs @@ -83,32 +83,13 @@ pub trait DlogGroup: /// Produces a preprocessed element fn preprocessed(&self) -> Self::PreprocessedGroupElement; - /// Returns a group element from a preprocessed group element - fn group(p: &Self::PreprocessedGroupElement) -> Self; - /// Returns an element that is the additive identity of the group fn zero() -> Self; - /// Returns the generator of the group - fn gen() -> Self; - /// Returns the affine coordinates (x, y, infinty) for the point fn to_coordinates(&self) -> (::Base, ::Base, bool); } -/// A trait that defines extensions to the DlogGroup trait, to be implemented for -/// elliptic curve groups that are pairing friendly -pub trait PairingGroup: DlogGroup { - /// A type representing the second group - type G2: DlogGroup; - - /// A type representing the target group - type GT: PartialEq + Eq; - - /// A method to compute a pairing - fn pairing(p: &Self, q: &Self::G2) -> Self::GT; -} - /// This implementation behaves in ways specific to the halo2curves suite of curves in: // - to_coordinates, // - vartime_multiscalar_mul, where it does not call into accelerated implementations. @@ -152,10 +133,6 @@ macro_rules! impl_traits { self.to_affine() } - fn group(p: &Self::PreprocessedGroupElement) -> Self { - $name::Point::from(*p) - } - fn compress(&self) -> Self::CompressedGroupElement { self.to_bytes() } @@ -210,10 +187,6 @@ macro_rules! impl_traits { $name::Point::identity() } - fn gen() -> Self { - $name::Point::generator() - } - fn to_coordinates(&self) -> (Self::Base, Self::Base, bool) { let coordinates = self.to_affine().coordinates(); if coordinates.is_some().unwrap_u8() == 1 From 2e3de784c90f33f94df70c874da2daa615083744 Mon Sep 17 00:00:00 2001 From: Srinath Setty Date: Mon, 18 Dec 2023 16:56:00 -0500 Subject: [PATCH 3/7] make Minroot example generic over the supported curve cycles (#272) * make Minroot example generic over the supported curve cycles * upgrade version --- examples/minroot.rs | 84 ++++++++++++++++++++++++--------------------- 1 file changed, 44 insertions(+), 40 deletions(-) diff --git a/examples/minroot.rs b/examples/minroot.rs index 74cee7e5a..c81397018 100644 --- a/examples/minroot.rs +++ b/examples/minroot.rs @@ -2,28 +2,27 @@ //! iterations of the `MinRoot` function, thereby realizing a Nova-based verifiable delay function (VDF). //! We execute a configurable number of iterations of the `MinRoot` function per step of Nova's recursion. use arecibo::{ - provider::{PallasEngine, VestaEngine}, + provider::{Bn256EngineKZG, GrumpkinEngine}, traits::{ circuit::{StepCircuit, TrivialCircuit}, - snark::default_ck_hint, - Engine, + snark::RelaxedR1CSSNARKTrait, + Engine, Group, }, CompressedSNARK, PublicParams, RecursiveSNARK, }; use bellpepper_core::{num::AllocatedNum, ConstraintSystem, SynthesisError}; -use ff::PrimeField; +use ff::Field; use flate2::{write::ZlibEncoder, Compression}; +use halo2curves::bn256::Bn256; use num_bigint::BigUint; use std::time::Instant; use tracing_subscriber::{fmt, prelude::*, EnvFilter, Registry}; use tracing_texray::TeXRayLayer; -type E1 = PallasEngine; -type E2 = VestaEngine; - #[cfg(feature = "abomonate")] mod utils { use super::*; + use ff::PrimeField; use std::{io::Write, mem::size_of}; pub const FILEPATH: &str = "/tmp/data"; @@ -49,7 +48,7 @@ mod utils { std::ptr::write(f, std::ptr::read(mine)); rest } - impl abomonation::Abomonation for MinRootIteration { + impl abomonation::Abomonation for MinRootIteration { unsafe fn entomb(&self, bytes: &mut W) -> std::io::Result<()> { entomb_F(&self.x_i, bytes)?; entomb_F(&self.y_i, bytes)?; @@ -73,29 +72,32 @@ mod utils { } #[derive(Clone, Debug, PartialEq)] -struct MinRootIteration { - x_i: F, - y_i: F, - x_i_plus_1: F, - y_i_plus_1: F, +struct MinRootIteration { + x_i: G::Scalar, + y_i: G::Scalar, + x_i_plus_1: G::Scalar, + y_i_plus_1: G::Scalar, } -impl MinRootIteration { +impl MinRootIteration { // produces a sample non-deterministic advice, executing one invocation of MinRoot per step - fn new(num_iters: usize, x_0: &F, y_0: &F) -> (Vec, Vec) { - // although this code is written generically, it is tailored to Pallas' scalar field - // (p - 3 / 5) - let exp = BigUint::parse_bytes( - b"23158417847463239084714197001737581570690445185553317903743794198714690358477", - 10, - ) - .unwrap(); + fn new(num_iters: usize, x_0: &G::Scalar, y_0: &G::Scalar) -> (Vec, Vec) { + // exp = (p - 3 / 5), where p is the order of the group + // x^{exp} mod p provides the fifth root of x + let exp = { + let p = G::group_params().2.to_biguint().unwrap(); + let two = BigUint::parse_bytes(b"2", 10).unwrap(); + let three = BigUint::parse_bytes(b"3", 10).unwrap(); + let five = BigUint::parse_bytes(b"5", 10).unwrap(); + let five_inv = five.modpow(&(&p - &two), &p); + (&five_inv * (&p - &three)) % &p + }; let mut res = Vec::new(); let mut x_i = *x_0; let mut y_i = *y_0; for _i in 0..num_iters { - let x_i_plus_1 = (x_i + y_i).pow_vartime(exp.to_u64_digits()); // computes the fifth root of x_i + y_i + let x_i_plus_1 = (x_i + y_i).pow_vartime(&exp.to_u64_digits()); // computes the fifth root of x_i + y_i // sanity check if cfg!(debug_assertions) { @@ -125,21 +127,21 @@ impl MinRootIteration { } #[derive(Clone, Debug, PartialEq)] -struct MinRootCircuit { - seq: Vec>, +struct MinRootCircuit { + seq: Vec>, } -impl StepCircuit for MinRootCircuit { +impl StepCircuit for MinRootCircuit { fn arity(&self) -> usize { 2 } - fn synthesize>( + fn synthesize>( &self, cs: &mut CS, - z: &[AllocatedNum], - ) -> Result>, SynthesisError> { - let mut z_out: Result>, SynthesisError> = + z: &[AllocatedNum], + ) -> Result>, SynthesisError> { + let mut z_out: Result>, SynthesisError> = Err(SynthesisError::AssignmentMissing); // use the provided inputs @@ -220,13 +222,13 @@ fn main() { let pp = PublicParams::< E1, E2, - MinRootCircuit<::Scalar>, + MinRootCircuit<::GE>, TrivialCircuit<::Scalar>, >::setup( &circuit_primary, &circuit_secondary, - &*default_ck_hint(), - &*default_ck_hint(), + &*S1::ck_floor(), + &*S2::ck_floor(), ); println!("PublicParams::setup, took {:?} ", start.elapsed()); #[cfg(feature = "abomonate")] @@ -271,7 +273,7 @@ fn main() { PublicParams< E1, E2, - MinRootCircuit<::Scalar>, + MinRootCircuit<::GE>, TrivialCircuit<::Scalar>, >, >(&mut bytes) @@ -284,7 +286,7 @@ fn main() { } // produce non-deterministic advice - let (z0_primary, minroot_iterations) = MinRootIteration::new( + let (z0_primary, minroot_iterations) = MinRootIteration::<::GE>::new( num_iters_per_step * num_steps, &::Scalar::zero(), &::Scalar::one(), @@ -304,7 +306,7 @@ fn main() { let z0_secondary = vec![::Scalar::zero()]; - type C1 = MinRootCircuit<::Scalar>; + type C1 = MinRootCircuit<::GE>; type C2 = TrivialCircuit<::Scalar>; // produce a recursive SNARK println!("Generating a RecursiveSNARK..."); @@ -342,14 +344,16 @@ fn main() { assert!(res.is_ok()); // produce a compressed SNARK - println!("Generating a CompressedSNARK using Spartan with IPA-PC..."); + println!("Generating a CompressedSNARK using Spartan with multilinear KZG..."); let (pk, vk) = CompressedSNARK::<_, _, _, _, S1, S2>::setup(&pp).unwrap(); let start = Instant::now(); - type EE1 = arecibo::provider::ipa_pc::EvaluationEngine; + type E1 = Bn256EngineKZG; + type E2 = GrumpkinEngine; + type EE1 = arecibo::provider::mlkzg::EvaluationEngine; type EE2 = arecibo::provider::ipa_pc::EvaluationEngine; - type S1 = arecibo::spartan::snark::RelaxedR1CSSNARK; - type S2 = arecibo::spartan::snark::RelaxedR1CSSNARK; + type S1 = arecibo::spartan::snark::RelaxedR1CSSNARK; // non-preprocessing SNARK + type S2 = arecibo::spartan::snark::RelaxedR1CSSNARK; // non-preprocessing SNARK let res = CompressedSNARK::<_, _, _, _, S1, S2>::prove(&pp, &pk, &recursive_snark); println!( From f796b4eeed9e4c5d11cb5326631655bb4f132f5b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fran=C3=A7ois=20Garillot?= Date: Mon, 18 Dec 2023 16:56:03 -0500 Subject: [PATCH 4/7] refactor: Refactor and enhance point infinity handling in `to_transcript_bytes` - Enhanced the functionality of `to_transcript_bytes` method in `TranscriptReprTrait` for `Affine` in both `pasta.rs` and `traits.rs`. - Combined the x and y coordinates with the `is_infinity_byte` into a single byte stream for ease of handling. - Integrated additional checks for 'infinity' conditions to ensure accurate extractions of coordinate values. --- src/provider/pasta.rs | 21 ++++++++++++++++++--- src/provider/traits.rs | 18 +++++++++++++++--- 2 files changed, 33 insertions(+), 6 deletions(-) diff --git a/src/provider/pasta.rs b/src/provider/pasta.rs index 7978a77fe..e6113d9ac 100644 --- a/src/provider/pasta.rs +++ b/src/provider/pasta.rs @@ -190,9 +190,24 @@ macro_rules! impl_traits { impl TranscriptReprTrait for $name::Affine { fn to_transcript_bytes(&self) -> Vec { - let coords = self.coordinates().unwrap(); - - [coords.x().to_repr(), coords.y().to_repr()].concat() + let (x, y, is_infinity_byte) = { + let coordinates = self.coordinates(); + if coordinates.is_some().unwrap_u8() == 1 { + ( + *coordinates.unwrap().x(), + *coordinates.unwrap().y(), + u8::from(false), + ) + } else { + ($name::Base::zero(), $name::Base::zero(), u8::from(true)) + } + }; + + x.to_repr() + .into_iter() + .chain(y.to_repr().into_iter()) + .chain(std::iter::once(is_infinity_byte)) + .collect() } } }; diff --git a/src/provider/traits.rs b/src/provider/traits.rs index 1a6a2cf1c..22ba4696c 100644 --- a/src/provider/traits.rs +++ b/src/provider/traits.rs @@ -228,9 +228,21 @@ macro_rules! impl_traits { impl TranscriptReprTrait for $name::Affine { fn to_transcript_bytes(&self) -> Vec { - let coords = self.coordinates().unwrap(); - - [coords.x().to_repr(), coords.y().to_repr()].concat() + let (x, y, is_infinity_byte) = { + let coordinates = self.coordinates(); + if coordinates.is_some().unwrap_u8() == 1 && ($name_curve_affine::identity() != *self) { + let c = coordinates.unwrap(); + (*c.x(), *c.y(), u8::from(false)) + } else { + ($name::Base::zero(), $name::Base::zero(), u8::from(false)) + } + }; + + x.to_repr() + .into_iter() + .chain(y.to_repr().into_iter()) + .chain(std::iter::once(is_infinity_byte)) + .collect() } } }; From 5e726de92fc82d042b11e45ef5b0e7f7b0508dd7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fran=C3=A7ois=20Garillot?= Date: Mon, 18 Dec 2023 16:56:07 -0500 Subject: [PATCH 5/7] refactor: Relocate multi-scalar multiplication module - Restructure the `provider` module by moving `msm` to the `util` subdirectory. --- src/provider/bn256_grumpkin.rs | 2 +- src/provider/mod.rs | 3 +-- src/provider/pasta.rs | 2 +- src/provider/secp_secq.rs | 2 +- src/provider/util/mod.rs | 3 ++- src/provider/{ => util}/msm.rs | 0 6 files changed, 6 insertions(+), 6 deletions(-) rename src/provider/{ => util}/msm.rs (100%) diff --git a/src/provider/bn256_grumpkin.rs b/src/provider/bn256_grumpkin.rs index 88f7bec45..4b72e2976 100644 --- a/src/provider/bn256_grumpkin.rs +++ b/src/provider/bn256_grumpkin.rs @@ -2,8 +2,8 @@ use crate::{ impl_traits, provider::{ - msm::cpu_best_msm, traits::{CompressedGroup, DlogGroup}, + util::msm::cpu_best_msm, }, traits::{Group, PrimeFieldExt, TranscriptReprTrait}, }; diff --git a/src/provider/mod.rs b/src/provider/mod.rs index e5ee00d3a..6bbf9bec5 100644 --- a/src/provider/mod.rs +++ b/src/provider/mod.rs @@ -19,7 +19,6 @@ mod util; // crate-private modules mod keccak; -mod msm; use crate::{ provider::{ @@ -151,9 +150,9 @@ impl Engine for VestaEngine { mod tests { use crate::provider::{ bn256_grumpkin::{bn256, grumpkin}, - msm::cpu_best_msm, secp_secq::{secp256k1, secq256k1}, traits::DlogGroup, + util::msm::cpu_best_msm, }; use digest::{ExtendableOutput, Update}; use group::{ff::Field, Curve, Group}; diff --git a/src/provider/pasta.rs b/src/provider/pasta.rs index e6113d9ac..8be7c95ad 100644 --- a/src/provider/pasta.rs +++ b/src/provider/pasta.rs @@ -1,8 +1,8 @@ //! This module implements the Nova traits for `pallas::Point`, `pallas::Scalar`, `vesta::Point`, `vesta::Scalar`. use crate::{ provider::{ - msm::cpu_best_msm, traits::{CompressedGroup, DlogGroup}, + util::msm::cpu_best_msm, }, traits::{Group, PrimeFieldExt, TranscriptReprTrait}, }; diff --git a/src/provider/secp_secq.rs b/src/provider/secp_secq.rs index 39b582096..5be35bd6b 100644 --- a/src/provider/secp_secq.rs +++ b/src/provider/secp_secq.rs @@ -2,8 +2,8 @@ use crate::{ impl_traits, provider::{ - msm::cpu_best_msm, traits::{CompressedGroup, DlogGroup}, + util::msm::cpu_best_msm, }, traits::{Group, PrimeFieldExt, TranscriptReprTrait}, }; diff --git a/src/provider/util/mod.rs b/src/provider/util/mod.rs index 43a544123..40a0443fa 100644 --- a/src/provider/util/mod.rs +++ b/src/provider/util/mod.rs @@ -1,2 +1,3 @@ /// Utilities for provider module -pub(crate) mod fb_msm; +pub(in crate::provider) mod fb_msm; +pub(in crate::provider) mod msm; diff --git a/src/provider/msm.rs b/src/provider/util/msm.rs similarity index 100% rename from src/provider/msm.rs rename to src/provider/util/msm.rs From 624cbb98c10719c3ca82ccfd3404d2c0c82fc771 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fran=C3=A7ois=20Garillot?= Date: Mon, 18 Dec 2023 16:56:10 -0500 Subject: [PATCH 6/7] chore: Rename UV(KZG{ProverKey, VerifierKey}|UniversalKZGParam) -> \1 --- src/provider/kzg_commitment.rs | 6 ++--- src/provider/mlkzg.rs | 19 +++++++------- src/provider/non_hiding_kzg.rs | 38 ++++++++++++++-------------- src/provider/non_hiding_zeromorph.rs | 22 ++++++++-------- 4 files changed, 42 insertions(+), 43 deletions(-) diff --git a/src/provider/kzg_commitment.rs b/src/provider/kzg_commitment.rs index f1e6fa5ab..3663a379b 100644 --- a/src/provider/kzg_commitment.rs +++ b/src/provider/kzg_commitment.rs @@ -16,7 +16,7 @@ use crate::traits::{ }; use crate::provider::{ - non_hiding_kzg::{UVKZGCommitment, UVUniversalKZGParam}, + non_hiding_kzg::{UVKZGCommitment, UniversalKZGParam}, pedersen::Commitment, traits::DlogGroup, }; @@ -35,7 +35,7 @@ where E::G2Affine: Serialize + for<'de> Deserialize<'de>, E::Fr: PrimeFieldBits, // TODO due to use of gen_srs_for_testing, make optional { - type CommitmentKey = UVUniversalKZGParam; + type CommitmentKey = UniversalKZGParam; type Commitment = Commitment; fn setup(label: &'static [u8], n: usize) -> Self::CommitmentKey { @@ -44,7 +44,7 @@ where let len = label.len().min(32); bytes[..len].copy_from_slice(&label[..len]); let rng = &mut StdRng::from_seed(bytes); - UVUniversalKZGParam::gen_srs_for_testing(rng, n.next_power_of_two()) + UniversalKZGParam::gen_srs_for_testing(rng, n.next_power_of_two()) } fn commit(ck: &Self::CommitmentKey, v: &[::Scalar]) -> Self::Commitment { diff --git a/src/provider/mlkzg.rs b/src/provider/mlkzg.rs index b0b8768f8..838af9a56 100644 --- a/src/provider/mlkzg.rs +++ b/src/provider/mlkzg.rs @@ -4,7 +4,7 @@ use crate::{ errors::NovaError, provider::{ kzg_commitment::KZGCommitmentEngine, - non_hiding_kzg::{UVKZGProverKey, UVKZGVerifierKey, UVUniversalKZGParam}, + non_hiding_kzg::{KZGProverKey, KZGVerifierKey, UniversalKZGParam}, pedersen::Commitment, traits::DlogGroup, }, @@ -120,15 +120,15 @@ where E::G1Affine: TranscriptReprTrait, { type EvaluationArgument = EvaluationArgument; - type ProverKey = UVKZGProverKey; - type VerifierKey = UVKZGVerifierKey; + type ProverKey = KZGProverKey; + type VerifierKey = KZGVerifierKey; - fn setup(ck: &UVUniversalKZGParam) -> (Self::ProverKey, Self::VerifierKey) { + fn setup(ck: &UniversalKZGParam) -> (Self::ProverKey, Self::VerifierKey) { ck.trim(ck.length() - 1) } fn prove( - ck: &UVUniversalKZGParam, + ck: &UniversalKZGParam, _pk: &Self::ProverKey, transcript: &mut ::TE, C: &Commitment, @@ -314,7 +314,7 @@ where // vk is hashed in transcript already, so we do not add it here - let kzg_verify_batch = |vk: &UVKZGVerifierKey, + let kzg_verify_batch = |vk: &KZGVerifierKey, C: &Vec, W: &Vec, u: &Vec, @@ -456,7 +456,7 @@ mod tests { let n = 4; let ck: CommitmentKey = as CommitmentEngineTrait>::setup(b"test", n); - let (pk, _vk): (UVKZGProverKey, UVKZGVerifierKey) = EvaluationEngine::::setup(&ck); + let (pk, _vk): (KZGProverKey, KZGVerifierKey) = EvaluationEngine::::setup(&ck); // poly is in eval. representation; evaluated at [(0,0), (0,1), (1,0), (1,1)] let poly = vec![Fr::from(1), Fr::from(2), Fr::from(2), Fr::from(4)]; @@ -511,7 +511,7 @@ mod tests { let ck: CommitmentKey = as CommitmentEngineTrait>::setup(b"test", n); - let (pk, vk): (UVKZGProverKey, UVKZGVerifierKey) = EvaluationEngine::::setup(&ck); + let (pk, vk): (KZGProverKey, KZGVerifierKey) = EvaluationEngine::::setup(&ck); // make a commitment let C = KZGCommitmentEngine::commit(&ck, &poly); @@ -579,8 +579,7 @@ mod tests { let ck: CommitmentKey = as CommitmentEngineTrait>::setup(b"test", n); - let (pk, vk): (UVKZGProverKey, UVKZGVerifierKey) = - EvaluationEngine::::setup(&ck); + let (pk, vk): (KZGProverKey, KZGVerifierKey) = EvaluationEngine::::setup(&ck); // make a commitment let C = as CommitmentEngineTrait>::commit(&ck, &poly); diff --git a/src/provider/non_hiding_kzg.rs b/src/provider/non_hiding_kzg.rs index ff6bb57a4..f9b7af9a2 100644 --- a/src/provider/non_hiding_kzg.rs +++ b/src/provider/non_hiding_kzg.rs @@ -21,7 +21,7 @@ use crate::{ deserialize = "E::G1Affine: Deserialize<'de>, E::G2Affine: Deserialize<'de>" ))] #[abomonation_omit_bounds] -pub struct UVUniversalKZGParam { +pub struct UniversalKZGParam { /// Group elements of the form `{ β^i G }`, where `i` ranges from 0 to /// `degree`. #[abomonate_with(Vec<[u64; 8]>)] // // this is a hack; we just assume the size of the element. @@ -32,14 +32,14 @@ pub struct UVUniversalKZGParam { pub powers_of_h: Vec, } -impl PartialEq for UVUniversalKZGParam { - fn eq(&self, other: &UVUniversalKZGParam) -> bool { +impl PartialEq for UniversalKZGParam { + fn eq(&self, other: &UniversalKZGParam) -> bool { self.powers_of_g == other.powers_of_g && self.powers_of_h == other.powers_of_h } } // for the purpose of the Len trait, we count commitment bases, i.e. G1 elements -impl Len for UVUniversalKZGParam { +impl Len for UniversalKZGParam { fn length(&self) -> usize { self.powers_of_g.len() } @@ -52,7 +52,7 @@ impl Len for UVUniversalKZGParam { serialize = "E::G1Affine: Serialize", deserialize = "E::G1Affine: Deserialize<'de>" ))] -pub struct UVKZGProverKey { +pub struct KZGProverKey { /// generators #[abomonate_with(Vec<[u64; 8]>)] // this is a hack; we just assume the size of the element. pub powers_of_g: Vec, @@ -66,7 +66,7 @@ pub struct UVKZGProverKey { serialize = "E::G1Affine: Serialize, E::G2Affine: Serialize", deserialize = "E::G1Affine: Deserialize<'de>, E::G2Affine: Deserialize<'de>" ))] -pub struct UVKZGVerifierKey { +pub struct KZGVerifierKey { /// The generator of G1. #[abomonate_with([u64; 8])] // this is a hack; we just assume the size of the element. pub g: E::G1Affine, @@ -78,7 +78,7 @@ pub struct UVKZGVerifierKey { pub beta_h: E::G2Affine, } -impl UVUniversalKZGParam { +impl UniversalKZGParam { /// Returns the maximum supported degree pub fn max_degree(&self) -> usize { self.powers_of_g.len() @@ -88,21 +88,21 @@ impl UVUniversalKZGParam { /// /// # Panics /// if `supported_size` is greater than `self.max_degree()` - pub fn extract_prover_key(&self, supported_size: usize) -> UVKZGProverKey { + pub fn extract_prover_key(&self, supported_size: usize) -> KZGProverKey { let powers_of_g = self.powers_of_g[..=supported_size].to_vec(); - UVKZGProverKey { powers_of_g } + KZGProverKey { powers_of_g } } /// Returns the verifier parameters /// /// # Panics /// If self.prover_params is empty. - pub fn extract_verifier_key(&self, supported_size: usize) -> UVKZGVerifierKey { + pub fn extract_verifier_key(&self, supported_size: usize) -> KZGVerifierKey { assert!( self.powers_of_g.len() >= supported_size, "supported_size is greater than self.max_degree()" ); - UVKZGVerifierKey { + KZGVerifierKey { g: self.powers_of_g[0], h: self.powers_of_h[0], beta_h: self.powers_of_h[1], @@ -116,11 +116,11 @@ impl UVUniversalKZGParam { /// /// # Panics /// If `supported_size` is greater than `self.max_degree()`, or `self.max_degree()` is zero. - pub fn trim(&self, supported_size: usize) -> (UVKZGProverKey, UVKZGVerifierKey) { + pub fn trim(&self, supported_size: usize) -> (KZGProverKey, KZGVerifierKey) { let powers_of_g = self.powers_of_g[..=supported_size].to_vec(); - let pk = UVKZGProverKey { powers_of_g }; - let vk = UVKZGVerifierKey { + let pk = KZGProverKey { powers_of_g }; + let vk = KZGVerifierKey { g: self.powers_of_g[0], h: self.powers_of_h[0], beta_h: self.powers_of_h[1], @@ -129,7 +129,7 @@ impl UVUniversalKZGParam { } } -impl UVUniversalKZGParam +impl UniversalKZGParam where E::Fr: PrimeFieldBits, { @@ -239,7 +239,7 @@ where /// Generate a commitment for a polynomial /// Note that the scheme is not hidding pub fn commit( - prover_param: impl Borrow>, + prover_param: impl Borrow>, poly: &UVKZGPoly, ) -> Result, NovaError> { let prover_param = prover_param.borrow(); @@ -257,7 +257,7 @@ where /// On input a polynomial `p` and a point `point`, outputs a proof for the /// same. pub fn open( - prover_param: impl Borrow>, + prover_param: impl Borrow>, polynomial: &UVKZGPoly, point: &E::Fr, ) -> Result<(UVKZGProof, UVKZGEvaluation), NovaError> { @@ -287,7 +287,7 @@ where /// committed inside `comm`. #[allow(dead_code)] pub fn verify( - verifier_param: impl Borrow>, + verifier_param: impl Borrow>, commitment: &UVKZGCommitment, point: &E::Fr, proof: &UVKZGProof, @@ -334,7 +334,7 @@ mod tests { let mut rng = &mut thread_rng(); let degree = rng.gen_range(2..20); - let pp = UVUniversalKZGParam::::gen_srs_for_testing(&mut rng, degree); + let pp = UniversalKZGParam::::gen_srs_for_testing(&mut rng, degree); let (ck, vk) = pp.trim(degree); let p = random(degree, rng); let comm = UVKZGPCS::::commit(&ck, &p)?; diff --git a/src/provider/non_hiding_zeromorph.rs b/src/provider/non_hiding_zeromorph.rs index 7f9de2e73..d0d30b745 100644 --- a/src/provider/non_hiding_zeromorph.rs +++ b/src/provider/non_hiding_zeromorph.rs @@ -6,8 +6,8 @@ use crate::{ errors::{NovaError, PCSError}, provider::{ non_hiding_kzg::{ - UVKZGCommitment, UVKZGEvaluation, UVKZGPoly, UVKZGProof, UVKZGProverKey, UVKZGVerifierKey, - UVUniversalKZGParam, UVKZGPCS, + KZGProverKey, KZGVerifierKey, UVKZGCommitment, UVKZGEvaluation, UVKZGPoly, UVKZGProof, + UniversalKZGParam, UVKZGPCS, }, traits::DlogGroup, }, @@ -41,8 +41,8 @@ use crate::provider::kzg_commitment::KZGCommitmentEngine; deserialize = "E::G1Affine: Deserialize<'de>" ))] pub struct ZMProverKey { - commit_pp: UVKZGProverKey, - open_pp: UVKZGProverKey, + commit_pp: KZGProverKey, + open_pp: KZGProverKey, } /// `ZMVerifierKey` is used to check evaluation proofs for a given @@ -54,7 +54,7 @@ pub struct ZMProverKey { deserialize = "E::G1Affine: Deserialize<'de>, E::G2Affine: Deserialize<'de>" ))] pub struct ZMVerifierKey { - vp: UVKZGVerifierKey, + vp: KZGVerifierKey, #[abomonate_with([u64; 16])] // this is a hack; we just assume the size of the element. s_offset_h: E::G2Affine, } @@ -70,14 +70,14 @@ pub struct ZMVerifierKey { // TODO: important, we need a better way to handle that the commitment key should be 2^max_degree sized, // see the runtime error in commit() below pub fn trim( - params: &UVUniversalKZGParam, + params: &UniversalKZGParam, max_degree: usize, ) -> (ZMProverKey, ZMVerifierKey) { let (commit_pp, vp) = params.trim(max_degree); let offset = params.powers_of_g.len() - max_degree; let open_pp = { let offset_powers_of_g1 = params.powers_of_g[offset..].to_vec(); - UVKZGProverKey { + KZGProverKey { powers_of_g: offset_powers_of_g1, } }; @@ -474,12 +474,12 @@ where type EvaluationArgument = ZMProof; - fn setup(ck: &UVUniversalKZGParam) -> (Self::ProverKey, Self::VerifierKey) { + fn setup(ck: &UniversalKZGParam) -> (Self::ProverKey, Self::VerifierKey) { trim(ck, ck.length() - 1) } fn prove( - _ck: &UVUniversalKZGParam, + _ck: &UniversalKZGParam, pk: &Self::ProverKey, transcript: &mut NE::TE, comm: &Commitment, @@ -529,7 +529,7 @@ mod test { use crate::{ provider::{ keccak::Keccak256Transcript, - non_hiding_kzg::{UVKZGPoly, UVUniversalKZGParam}, + non_hiding_kzg::{UVKZGPoly, UniversalKZGParam}, non_hiding_zeromorph::{ batched_lifted_degree_quotient, eval_and_quotient_scalars, trim, ZMEvaluation, ZMPCS, }, @@ -549,7 +549,7 @@ mod test { let max_vars = 16; let mut rng = thread_rng(); let max_poly_size = 1 << (max_vars + 1); - let universal_setup = UVUniversalKZGParam::::gen_srs_for_testing(&mut rng, max_poly_size); + let universal_setup = UniversalKZGParam::::gen_srs_for_testing(&mut rng, max_poly_size); for num_vars in 3..max_vars { // Setup From 0ccb6c8af5a69b1cf11aae0445351e6b5260594c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fran=C3=A7ois=20Garillot?= Date: Mon, 18 Dec 2023 16:56:13 -0500 Subject: [PATCH 7/7] refactor: Apply univariate polynomial evaluation - chore: move comment - fix: standardize power sequences computation - fix: parallelize several poly computations refactor: Refactor `EvaluationArgument` struct in mlkzg.rs - Renamed several fields in `EvaluationArgument` struct within `src/provider/mlkzg.rs` for increased clarity. - Adjusted the `prove` and `verify` methods in `src/provider/mlkzg.rs` to reflect these name changes. - Modified test code to align with the updates in the `EvaluationArgument` structure. --- src/provider/mlkzg.rs | 93 +++++++++++++++++++------------------------ src/spartan/mod.rs | 9 ++--- 2 files changed, 45 insertions(+), 57 deletions(-) diff --git a/src/provider/mlkzg.rs b/src/provider/mlkzg.rs index 838af9a56..530246500 100644 --- a/src/provider/mlkzg.rs +++ b/src/provider/mlkzg.rs @@ -8,11 +8,13 @@ use crate::{ pedersen::Commitment, traits::DlogGroup, }, + spartan::polys::univariate::UniPoly, traits::{ commitment::{CommitmentEngineTrait, Len}, evaluation::EvaluationEngineTrait, Engine as NovaEngine, Group, TranscriptEngineTrait, TranscriptReprTrait, }, + zip_with, }; use core::marker::PhantomData; use ff::{Field, PrimeFieldBits}; @@ -20,6 +22,7 @@ use group::{Curve, Group as _}; use itertools::Itertools as _; use pairing::{Engine, MillerLoopResult, MultiMillerLoop}; use rayon::prelude::*; +use ref_cast::RefCast as _; use serde::{de::DeserializeOwned, Deserialize, Serialize}; /// Provides an implementation of a polynomial evaluation argument @@ -29,9 +32,9 @@ use serde::{de::DeserializeOwned, Deserialize, Serialize}; deserialize = "E::G1Affine: Deserialize<'de>, E::Fr: Deserialize<'de>" ))] pub struct EvaluationArgument { - com: Vec, - w: Vec, - v: Vec>, + evals_r: Vec, + evals_neg_r: Vec, + evals_r_squared: Vec>, } /// Provides an implementation of a polynomial evaluation engine using KZG @@ -40,6 +43,8 @@ pub struct EvaluationEngine { _p: PhantomData<(E, NE)>, } +// This impl block defines helper functions that are not a part of +// EvaluationEngineTrait, but that we will use to implement the trait methods. impl EvaluationEngine where E: Engine, @@ -48,8 +53,6 @@ where E::Fr: TranscriptReprTrait, E::G1Affine: TranscriptReprTrait, // TODO: this bound on DlogGroup is really unusable! { - // This impl block defines helper functions that are not a part of - // EvaluationEngineTrait, but that we will use to implement the trait methods. fn compute_challenge( C: &E::G1Affine, y: &E::Fr, @@ -87,11 +90,9 @@ where fn batch_challenge_powers(q: E::Fr, k: usize) -> Vec { // Compute powers of q : (1, q, q^2, ..., q^(k-1)) - let mut q_powers = vec![E::Fr::ONE; k]; - for i in 1..k { - q_powers[i] = q_powers[i - 1] * q; - } - q_powers + std::iter::successors(Some(E::Fr::ONE), |&x| Some(x * q)) + .take(k) + .collect() } fn verifier_second_challenge( @@ -178,23 +179,12 @@ where u: &[E::Fr], transcript: &mut ::TE| -> (Vec, Vec>) { - let poly_eval = |f: &[E::Fr], u: E::Fr| -> E::Fr { - let mut v = f[0]; - let mut u_power = E::Fr::ONE; - - for fi in f.iter().skip(1) { - u_power *= u; - v += u_power * fi; - } - - v - }; - let scalar_vector_muladd = |a: &mut Vec, v: &Vec, s: E::Fr| { assert!(a.len() >= v.len()); - for i in 0..v.len() { - a[i] += s * v[i]; - } + #[allow(clippy::disallowed_methods)] + a.par_iter_mut() + .zip(v.par_iter()) + .for_each(|(c, v)| *c += s * v); }; let kzg_compute_batch_polynomial = |f: &[Vec], q: E::Fr| -> Vec { @@ -219,23 +209,19 @@ where // The verifier needs f_i(u_j), so we compute them here // (V will compute B(u_j) itself) let mut v = vec![vec!(E::Fr::ZERO; k); t]; - for i in 0..t { + v.par_iter_mut().enumerate().for_each(|(i, v_i)| { // for each point u - for (j, f_j) in f.iter().enumerate().take(k) { + v_i.par_iter_mut().zip_eq(f).for_each(|(v_ij, f)| { // for each poly f - v[i][j] = poly_eval(f_j, u[i]); // = f_j(u_i) - } - } + *v_ij = UniPoly::ref_cast(f).evaluate(&u[i]); + }); + }); let q = Self::get_batch_challenge(C, u, &v, transcript); let B = kzg_compute_batch_polynomial(f, q); // Now open B at u0, ..., u_{t-1} - let mut w = Vec::with_capacity(t); - for ui in u { - let wi = kzg_open(&B, *ui); - w.push(wi); - } + let w = u.par_iter().map(|ui| kzg_open(&B, *ui)).collect::>(); // Compute the commitment to the batched polynomial B(X) let q_powers = Self::batch_challenge_powers(q, k); @@ -297,7 +283,11 @@ where com_all.insert(0, C.comm.preprocessed()); let (w, v) = kzg_open_batch(&com_all, &polys, &u, transcript); - Ok(EvaluationArgument { com, w, v }) + Ok(EvaluationArgument { + evals_r: com, + evals_neg_r: w, + evals_r_squared: v, + }) } /// A method to verify purported evaluations of a batch of polynomials @@ -333,15 +323,9 @@ where // Compute the batched openings // compute B(u_i) = v[i][0] + q*v[i][1] + ... + q^(t-1) * v[i][t-1] - let B_u = (0..t) - .map(|i| { - assert_eq!(q_powers.len(), v[i].len()); - q_powers - .iter() - .zip_eq(v[i].iter()) - .map(|(a, b)| *a * *b) - .sum() - }) + let B_u = v + .iter() + .map(|v_i| zip_with!(iter, (q_powers, v_i), |a, b| *a * *b).sum()) .collect::>(); let d_0 = Self::verifier_second_challenge(&C_B, W, transcript); @@ -374,7 +358,7 @@ where // Check that e(L, vk.H) == e(R, vk.tau_H) let pairing_inputs = [ - (&L.to_affine(), &E::G2Prepared::from(-vk.h)), + (&(-L).to_affine(), &E::G2Prepared::from(vk.h)), (&R.to_affine(), &E::G2Prepared::from(vk.beta_h)), ]; @@ -385,7 +369,7 @@ where let ell = x.len(); - let mut com = pi.com.clone(); + let mut com = pi.evals_r.clone(); // we do not need to add x to the transcript, because in our context x was // obtained from the transcript @@ -399,7 +383,7 @@ where let u = vec![r, -r, r * r]; // Setup vectors (Y, ypos, yneg) from pi.v - let v = &pi.v; + let v = &pi.evals_r_squared; if v.len() != 3 { return Err(NovaError::ProofVerifyError); } @@ -428,7 +412,14 @@ where } // Check commitments to (Y, ypos, yneg) are valid - if !kzg_verify_batch(vk, &com, &pi.w, &u, &pi.v, transcript) { + if !kzg_verify_batch( + vk, + &com, + &pi.evals_neg_r, + &u, + &pi.evals_r_squared, + transcript, + ) { return Err(NovaError::ProofVerifyError); } @@ -552,7 +543,7 @@ mod tests { // Change the proof and expect verification to fail let mut bad_proof = proof.clone(); - bad_proof.com[0] = (bad_proof.com[0] + bad_proof.com[1]).to_affine(); + bad_proof.evals_r[0] = (bad_proof.evals_r[0] + bad_proof.evals_r[1]).to_affine(); let mut verifier_transcript2 = Keccak256Transcript::::new(b"TestEval"); assert!(EvaluationEngine::::verify( &vk, @@ -605,7 +596,7 @@ mod tests { // Change the proof and expect verification to fail let mut bad_proof = proof.clone(); - bad_proof.com[0] = (bad_proof.com[0] + bad_proof.com[1]).to_affine(); + bad_proof.evals_r[0] = (bad_proof.evals_r[0] + bad_proof.evals_r[1]).to_affine(); let mut verifier_tr2 = Keccak256Transcript::::new(b"TestEval"); assert!(EvaluationEngine::::verify( &vk, diff --git a/src/spartan/mod.rs b/src/spartan/mod.rs index 215ba13d9..1e97b2f5f 100644 --- a/src/spartan/mod.rs +++ b/src/spartan/mod.rs @@ -30,12 +30,9 @@ use rayon::{iter::IntoParallelRefIterator, prelude::*}; // Creates a vector of the first `n` powers of `s`. fn powers(s: &E::Scalar, n: usize) -> Vec { assert!(n >= 1); - let mut powers = Vec::with_capacity(n); - powers.push(E::Scalar::ONE); - for i in 1..n { - powers.push(powers[i - 1] * s); - } - powers + std::iter::successors(Some(E::Scalar::ONE), |&x| Some(x * s)) + .take(n) + .collect() } /// A type that holds a witness to a polynomial evaluation instance