diff --git a/Cargo.toml b/Cargo.toml index 8d6b1079..7e59fe23 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -36,6 +36,9 @@ plonk-core = { path = "plonk-core" } plonk-hashing = { path = "plonk-hashing" } [dev-dependencies] +ark-ff = "0.3" +ark-sponge = "0.3" +ark-std = "0.3" ark-bls12-377 = "0.3" ark-bls12-381 = "0.3" ark-ec = "0.3" @@ -49,6 +52,8 @@ paste = "1.0.6" rand = "0.8.0" rand_core = "0.6" tempdir = "0.3" +ark-vesta = "0.3" + [[bench]] name = "plonk" diff --git a/benches/plonk.rs b/benches/plonk.rs index 3e67d60c..a9d11b78 100644 --- a/benches/plonk.rs +++ b/benches/plonk.rs @@ -11,54 +11,47 @@ use ark_bls12_381::{Bls12_381, Fr as BlsScalar}; use ark_ec::{PairingEngine, TEModelParameters}; use ark_ed_on_bls12_381::EdwardsParameters; -use ark_poly::univariate::DensePolynomial; -use ark_poly_commit::kzg10::KZG10; +use ark_ff::{FftField, PrimeField}; +use ark_poly_commit::PolynomialCommitment; use core::marker::PhantomData; use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion}; +use plonk::commitment::KZG10; use plonk::prelude::*; -use rand_core::OsRng; +use rand::rngs::OsRng; /// Benchmark Circuit #[derive(derivative::Derivative)] #[derivative(Debug, Default)] -pub struct BenchCircuit -where - E: PairingEngine, - P: TEModelParameters, -{ +pub struct BenchCircuit { /// Circuit Size size: usize, - /// Type Parameter Marker - __: PhantomData<(E, P)>, + /// Field and parameters + _phantom: PhantomData<(F, P)>, } -impl BenchCircuit -where - E: PairingEngine, - P: TEModelParameters, -{ +impl BenchCircuit { /// Builds a new circuit with a constraint count of `2^degree`. #[inline] pub fn new(degree: usize) -> Self { Self { size: 1 << degree, - __: PhantomData, + _phantom: PhantomData::<(F, P)>, } } } -impl Circuit for BenchCircuit +impl Circuit for BenchCircuit where - E: PairingEngine, - P: TEModelParameters, + F: FftField + PrimeField, + P: TEModelParameters, { const CIRCUIT_ID: [u8; 32] = [0xff; 32]; #[inline] fn gadget( &mut self, - composer: &mut StandardComposer, + composer: &mut StandardComposer, ) -> Result<(), Error> { while composer.circuit_size() < self.size - 1 { composer.add_dummy_constraints(); @@ -79,23 +72,28 @@ fn constraint_system_benchmark(c: &mut Criterion) { const MINIMUM_DEGREE: usize = 5; const MAXIMUM_DEGREE: usize = 19; - let pp = KZG10::>::setup( + let pp = KZG10::::setup( // +1 per wire, +2 for the permutation poly 1 << MAXIMUM_DEGREE + 6, - false, + None, &mut OsRng, ) .expect("Unable to sample public parameters."); let mut compiling_benchmarks = c.benchmark_group("compile"); for degree in MINIMUM_DEGREE..MAXIMUM_DEGREE { - let mut circuit = BenchCircuit::<_, EdwardsParameters>::new(degree); + let mut circuit = BenchCircuit::< + BlsScalar, + ark_ed_on_bls12_381::EdwardsParameters, + >::new(degree); compiling_benchmarks.bench_with_input( BenchmarkId::from_parameter(degree), °ree, |b, _| { b.iter(|| { - circuit.compile(&pp).expect("Unable to compile circuit.") + circuit + .compile::>(&pp) + .expect("Unable to compile circuit.") }) }, ); @@ -104,14 +102,26 @@ fn constraint_system_benchmark(c: &mut Criterion) { let mut proving_benchmarks = c.benchmark_group("prove"); for degree in MINIMUM_DEGREE..MAXIMUM_DEGREE { - let mut circuit = BenchCircuit::<_, EdwardsParameters>::new(degree); - let (pk_p, _) = - circuit.compile(&pp).expect("Unable to compile circuit."); + let mut circuit = BenchCircuit::< + BlsScalar, + ark_ed_on_bls12_381::EdwardsParameters, + >::new(degree); + let (pk_p, _) = circuit + .compile::>(&pp) + .expect("Unable to compile circuit."); proving_benchmarks.bench_with_input( BenchmarkId::from_parameter(degree), °ree, |b, _| { - b.iter(|| circuit.gen_proof(&pp, pk_p.clone(), &label).unwrap()) + b.iter(|| { + circuit + .gen_proof::>( + &pp, + pk_p.clone(), + &label, + ) + .unwrap() + }) }, ); } @@ -119,23 +129,27 @@ fn constraint_system_benchmark(c: &mut Criterion) { let mut verifying_benchmarks = c.benchmark_group("verify"); for degree in MINIMUM_DEGREE..MAXIMUM_DEGREE { - let mut circuit = BenchCircuit::<_, EdwardsParameters>::new(degree); + let mut circuit = BenchCircuit::< + BlsScalar, + ark_ed_on_bls12_381::EdwardsParameters, + >::new(degree); let (pk_p, verifier_data) = circuit.compile(&pp).expect("Unable to compile circuit."); - let proof = circuit.gen_proof(&pp, pk_p.clone(), &label).unwrap(); + let proof = circuit + .gen_proof::>(&pp, pk_p.clone(), &label) + .unwrap(); let VerifierData { key, pi_pos } = verifier_data; verifying_benchmarks.bench_with_input( BenchmarkId::from_parameter(degree), °ree, |b, _| { b.iter(|| { - plonk::circuit::verify_proof( - &pp, - key.clone(), - &proof, - &[], - &pi_pos, - &label, + plonk::circuit::verify_proof::< + ::Fr, + EdwardsParameters, + KZG10, + >( + &pp, key.clone(), &proof, &[], &pi_pos, &label ) .expect("Unable to verify benchmark circuit."); }) diff --git a/plonk-core/Cargo.toml b/plonk-core/Cargo.toml index 3ce6b61c..5595a2fd 100644 --- a/plonk-core/Cargo.toml +++ b/plonk-core/Cargo.toml @@ -22,7 +22,7 @@ default = [ "asm", "itertools/default", "parallel", - "rand_core/std", + "rand/std", ] # Raw Assembly @@ -49,6 +49,9 @@ trace = [] trace-print = ["trace"] [dependencies] +ark-bls12-381 = "0.3" +ark-std = { version = "0.3", features = ["std"] } +blake2 = "0.9" ark-ec = { version = "0.3", default-features = false } ark-ff = { version = "0.3", default-features = false } ark-poly = { version = "0.3" } @@ -59,7 +62,7 @@ hashbrown = { version = "0.11.2", default-features = false, features = ["ahash"] itertools = { version = "0.10.1", default-features = false } merlin = { version = "3.0", default-features = false } num-traits = { version = "0.2.14" } -rand_core = { version = "0.6", default-features = false, features = ["getrandom"] } +rand = { version = "0.8", default-features = false, features = ["getrandom"] } [dev-dependencies] ark-bls12-377 = "0.3" diff --git a/plonk-core/src/circuit.rs b/plonk-core/src/circuit.rs index d1a8ab33..a3ac5930 100644 --- a/plonk-core/src/circuit.rs +++ b/plonk-core/src/circuit.rs @@ -6,39 +6,25 @@ //! Tools & traits for PLONK circuits -use crate::error::Error; -use crate::prelude::StandardComposer; -use crate::proof_system::{Proof, Prover, ProverKey, Verifier, VerifierKey}; -use ark_ec::models::TEModelParameters; +use crate::{ + commitment::HomomorphicCommitment, + error::{to_pc_error, Error}, + prelude::StandardComposer, + proof_system::{Proof, Prover, ProverKey, Verifier, VerifierKey}, +}; use ark_ec::{ - twisted_edwards_extended::{GroupAffine, GroupProjective}, - PairingEngine, ProjectiveCurve, + models::{SWModelParameters, TEModelParameters}, + short_weierstrass_jacobian::{ + GroupAffine as SWGroupAffine, GroupProjective as SWGroupProjective, + }, + twisted_edwards_extended::{ + GroupAffine as TEGroupAffine, GroupProjective as TEGroupProjective, + }, + ProjectiveCurve, }; -use ark_ff::PrimeField; -use ark_poly::univariate::DensePolynomial; -use ark_poly_commit::kzg10::{self, Powers, UniversalParams}; -use ark_poly_commit::sonic_pc::SonicKZG10; -use ark_poly_commit::PolynomialCommitment; +use ark_ff::{Field, PrimeField}; use ark_serialize::*; -/// Field Element Into Public Input -/// -/// The reason for introducing these two traits, `FeIntoPubInput` and -/// `GeIntoPubInput` is to have a workaround for not being able to -/// implement `From<_> for Values` for both `PrimeField` and `GroupAffine`. The -/// reason why this is not possible is because both the trait `PrimeField` and -/// the struct `GroupAffine` are external to the crate, and therefore the -/// compiler cannot be sure that `PrimeField` will never be implemented for -/// `GroupAffine`. In which case, the two implementations of `From` would be -/// inconsistent. To this end, we create to helper traits, `FeIntoPubInput` and -/// `GeIntoPubInput`, that stand for "Field Element Into Public Input" and -/// "Group Element Into Public Input" respectively. -pub trait FeIntoPubInput { - /// Ad hoc `Into` implementation. Serves the same purpose as `Into`, but as - /// a different trait. Read documentation of Trait for more details. - fn into_pi(self) -> T; -} - /// Group Element Into Public Input /// /// The reason for introducing these two traits is to have a workaround for not @@ -60,42 +46,62 @@ pub trait GeIntoPubInput { /// scalar representation. #[derive(CanonicalDeserialize, CanonicalSerialize, derivative::Derivative)] #[derivative(Clone, Debug, Default)] -pub struct PublicInputValue

+pub struct PublicInputValue where - P: TEModelParameters, + F: Field, { /// Field Values - pub(crate) values: Vec, + pub(crate) values: Vec, } -impl

FeIntoPubInput> for P::BaseField +impl From for PublicInputValue +where + F: Field, +{ + fn from(p: F) -> PublicInputValue { + PublicInputValue { values: vec![p] } + } +} + +impl

GeIntoPubInput> for TEGroupAffine

where P: TEModelParameters, { #[inline] - fn into_pi(self) -> PublicInputValue

{ - PublicInputValue { values: vec![self] } + fn into_pi(self) -> PublicInputValue { + PublicInputValue { + values: vec![self.x, self.y], + } } } -impl

GeIntoPubInput> for GroupAffine

+impl

GeIntoPubInput> for TEGroupProjective

where P: TEModelParameters, { #[inline] - fn into_pi(self) -> PublicInputValue

{ + fn into_pi(self) -> PublicInputValue { + GeIntoPubInput::into_pi(self.into_affine()) + } +} +impl

GeIntoPubInput> for SWGroupAffine

+where + P: SWModelParameters, +{ + #[inline] + fn into_pi(self) -> PublicInputValue { PublicInputValue { values: vec![self.x, self.y], } } } -impl

GeIntoPubInput> for GroupProjective

+impl

GeIntoPubInput> for SWGroupProjective

where - P: TEModelParameters, + P: SWModelParameters, { #[inline] - fn into_pi(self) -> PublicInputValue

{ + fn into_pi(self) -> PublicInputValue { GeIntoPubInput::into_pi(self.into_affine()) } } @@ -107,35 +113,35 @@ where #[derive(CanonicalDeserialize, CanonicalSerialize, derivative::Derivative)] #[derivative( Clone(bound = ""), - Debug(bound = ""), - Eq(bound = ""), - PartialEq(bound = "") + Debug(bound = "VerifierKey: std::fmt::Debug"), + Eq(bound = "VerifierKey: Eq"), + PartialEq(bound = "VerifierKey: PartialEq") )] -pub struct VerifierData +pub struct VerifierData where - E: PairingEngine, - P: TEModelParameters, + F: PrimeField, + PC: HomomorphicCommitment, { /// Verifier Key - pub key: VerifierKey, + pub key: VerifierKey, /// Public Input Positions pub pi_pos: Vec, } -impl VerifierData +impl VerifierData where - E: PairingEngine, - P: TEModelParameters, + F: PrimeField, + PC: HomomorphicCommitment, { /// Creates a new `VerifierData` from a [`VerifierKey`] and the public /// input positions of the circuit that it represents. - pub fn new(key: VerifierKey, pi_pos: Vec) -> Self { + pub fn new(key: VerifierKey, pi_pos: Vec) -> Self { Self { key, pi_pos } } /// Returns a reference to the contained [`VerifierKey`]. - pub fn key(&self) -> &VerifierKey { + pub fn key(&self) -> &VerifierKey { &self.key } @@ -160,13 +166,16 @@ where /// EdwardsAffine as JubJubAffine, EdwardsParameters as JubJubParameters, /// EdwardsProjective as JubJubProjective, Fr as JubJubScalar, /// }; -/// use ark_ff::{PrimeField, BigInteger}; +/// use ark_ff::{FftField, PrimeField, BigInteger}; +/// use plonk_core::circuit::{Circuit, PublicInputValue, verify_proof, GeIntoPubInput}; +/// use plonk_core::constraint_system::StandardComposer; +/// use plonk_core::error::{to_pc_error,Error}; /// use ark_poly::polynomial::univariate::DensePolynomial; -/// use ark_poly_commit::kzg10::KZG10; +/// use ark_poly_commit::{PolynomialCommitment, sonic_pc::SonicKZG10}; +/// use plonk_core::prelude::*; /// use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; /// use num_traits::{Zero, One}; -/// use plonk_core::prelude::*; -/// use rand_core::OsRng; +/// use rand::rngs::OsRng; /// /// fn main() -> Result<(), Error> { /// // Implements a circuit that checks: @@ -177,28 +186,29 @@ where /// // 5) JubJub::GENERATOR * e(JubJubScalar) = f where F is a PI /// #[derive(derivative::Derivative)] /// #[derivative(Debug(bound = ""), Default(bound = ""))] -/// pub struct TestCircuit< -/// E: PairingEngine, -/// P: TEModelParameters, -/// > { -/// a: E::Fr, -/// b: E::Fr, -/// c: E::Fr, -/// d: E::Fr, +/// pub struct TestCircuit +/// where +/// F: PrimeField, +/// P: TEModelParameters, +/// { +/// a: F, +/// b: F, +/// c: F, +/// d: F, /// e: P::ScalarField, /// f: GroupAffine

, /// } /// -/// impl Circuit for TestCircuit -/// where -/// E: PairingEngine, -/// P: TEModelParameters, +/// impl Circuit for TestCircuit +/// where +/// F: PrimeField, +/// P: TEModelParameters, /// { /// const CIRCUIT_ID: [u8; 32] = [0xff; 32]; /// /// fn gadget( /// &mut self, -/// composer: &mut StandardComposer, +/// composer: &mut StandardComposer, /// ) -> Result<(), Error> { /// let a = composer.add_input(self.a); /// let b = composer.add_input(self.b); @@ -207,7 +217,7 @@ where /// // Make first constraint a + b = c (as public input) /// composer.arithmetic_gate(|gate| { /// gate.witness(a, b, Some(zero)) -/// .add(E::Fr::one(), E::Fr::one()) +/// .add(F::one(), F::one()) /// .pi(-self.c) /// }); /// @@ -216,15 +226,14 @@ where /// composer.range_gate(b, 1 << 5); /// // Make second constraint a * b = d /// composer.arithmetic_gate(|gate| { -/// gate.witness(a, b, Some(zero)).mul(E::Fr::one()).pi(-self.d) +/// gate.witness(a, b, Some(zero)).mul(F::one()).pi(-self.d) /// }); /// let e = composer -/// .add_input(from_embedded_curve_scalar::(self.e)); +/// .add_input(from_embedded_curve_scalar::(self.e)); /// let (x, y) = P::AFFINE_GENERATOR_COEFFS; /// let generator = GroupAffine::new(x, y); /// let scalar_mul_result = /// composer.fixed_base_scalar_mul(e, generator); -/// /// // Apply the constrain /// composer.assert_equal_public_point(scalar_mul_result, self.f); /// Ok(()) @@ -236,15 +245,14 @@ where /// } /// /// // Generate CRS -/// let pp = KZG10::>::setup( -/// 1 << 12, -/// false, -/// &mut OsRng, -/// )?; +/// type PC = SonicKZG10::>; +/// let pp = PC::setup( +/// 1 << 12, None, &mut OsRng +/// )?; /// -/// let mut circuit = TestCircuit::::default(); +/// let mut circuit = TestCircuit::::default(); /// // Compile the circuit -/// let (pk_p, verifier_data) = circuit.compile(&pp)?; +/// let (pk_p, verifier_data) = circuit.compile::(&pp)?; /// /// let (x, y) = JubJubParameters::AFFINE_GENERATOR_COEFFS; /// let generator: GroupAffine = GroupAffine::new(x, y); @@ -255,7 +263,7 @@ where /// .into_affine(); /// // Prover POV /// let proof = { -/// let mut circuit: TestCircuit = TestCircuit { +/// let mut circuit: TestCircuit = TestCircuit { /// a: BlsScalar::from(20u64), /// b: BlsScalar::from(5u64), /// c: BlsScalar::from(25u64), @@ -263,27 +271,27 @@ where /// e: JubJubScalar::from(2u64), /// f: point_f_pi, /// }; +/// circuit.gen_proof::(&pp, pk_p, b"Test") +/// }?; /// -/// circuit.gen_proof(&pp, pk_p, b"Test")? -/// }; /// // Test serialisation for verifier_data /// let mut verifier_data_bytes = Vec::new(); /// verifier_data.serialize(&mut verifier_data_bytes).unwrap(); /// -/// let verif_data: VerifierData = +/// let verif_data: VerifierData = /// VerifierData::deserialize(verifier_data_bytes.as_slice()).unwrap(); /// -/// assert!(verif_data == verifier_data); +/// // assert!(verif_data == verifier_data); /// // Verifier POV -/// let public_inputs: Vec> = vec![ -/// BlsScalar::from(25u64).into_pi(), -/// BlsScalar::from(100u64).into_pi(), +/// let public_inputs: Vec> = vec![ +/// BlsScalar::from(25u64).into(), +/// BlsScalar::from(100u64).into(), /// GeIntoPubInput::into_pi(point_f_pi), /// ]; /// /// let VerifierData { key, pi_pos } = verifier_data; /// // TODO: non-ideal hack for a first functional version. -/// verify_proof::( +/// verify_proof::( /// &pp, /// key, /// &proof, @@ -293,10 +301,10 @@ where /// ) /// } /// ``` -pub trait Circuit +pub trait Circuit where - E: PairingEngine, - P: TEModelParameters, + F: PrimeField, + P: TEModelParameters, { /// Circuit identifier associated constant. const CIRCUIT_ID: [u8; 32]; @@ -304,43 +312,41 @@ where /// Gadget implementation used to fill the composer. fn gadget( &mut self, - composer: &mut StandardComposer, + composer: &mut StandardComposer, ) -> Result<(), Error>; /// Compiles the circuit by using a function that returns a `Result` /// with the `ProverKey`, `VerifierKey` and the circuit size. #[allow(clippy::type_complexity)] // NOTE: Clippy is too hash here. - fn compile( + fn compile( &mut self, - u_params: &UniversalParams, - ) -> Result<(ProverKey, VerifierData), Error> { + u_params: &PC::UniversalParams, + ) -> Result<(ProverKey, VerifierData), Error> + where + F: PrimeField, + PC: HomomorphicCommitment, + { // Setup PublicParams - // XXX: KZG10 does not have a trim function so we use sonics and - // then do a transformation between sonic CommiterKey to KZG10 - // powers let circuit_size = self.padded_circuit_size(); - let (ck, _) = SonicKZG10::>::trim( + let (ck, _) = PC::trim( u_params, // +1 per wire, +2 for the permutation poly circuit_size + 6, 0, None, ) - .unwrap(); - let powers = Powers { - powers_of_g: ck.powers_of_g.into(), - powers_of_gamma_g: ck.powers_of_gamma_g.into(), - }; + .map_err(to_pc_error::)?; + //Generate & save `ProverKey` with some random values. - let mut prover = Prover::new(b"CircuitCompilation"); + let mut prover = Prover::::new(b"CircuitCompilation"); self.gadget(prover.mut_cs())?; let pi_pos = prover.mut_cs().pi_positions(); - prover.preprocess(&powers)?; + prover.preprocess(&ck)?; // Generate & save `VerifierKey` with some random values. let mut verifier = Verifier::new(b"CircuitCompilation"); self.gadget(verifier.mut_cs())?; - verifier.preprocess(&powers)?; + verifier.preprocess(&ck)?; Ok(( prover .prover_key @@ -356,35 +362,33 @@ where /// Generates a proof using the provided `CircuitInputs` & `ProverKey` /// instances. - fn gen_proof( + fn gen_proof( &mut self, - u_params: &UniversalParams, - prover_key: ProverKey, + u_params: &PC::UniversalParams, + prover_key: ProverKey, transcript_init: &'static [u8], - ) -> Result, Error> { - // XXX: KZG10 does not have a trim function so we use sonics and - // then do a transformation between sonic CommiterKey to KZG10 - // powers + ) -> Result, Error> + where + F: PrimeField, + P: TEModelParameters, + PC: HomomorphicCommitment, + { let circuit_size = self.padded_circuit_size(); - let (ck, _) = SonicKZG10::>::trim( + let (ck, _) = PC::trim( u_params, // +1 per wire, +2 for the permutation poly circuit_size + 6, 0, None, ) - .unwrap(); - let powers = Powers { - powers_of_g: ck.powers_of_g.into(), - powers_of_gamma_g: ck.powers_of_gamma_g.into(), - }; + .map_err(to_pc_error::)?; // New Prover instance let mut prover = Prover::new(transcript_init); // Fill witnesses for Prover self.gadget(prover.mut_cs())?; // Add ProverKey to Prover prover.prover_key = Some(prover_key); - prover.prove(&powers) + prover.prove(&ck) } /// Returns the Circuit size padded to the next power of two. @@ -393,39 +397,30 @@ where /// Verifies a proof using the provided `CircuitInputs` & `VerifierKey` /// instances. -pub fn verify_proof( - u_params: &UniversalParams, - plonk_verifier_key: VerifierKey, - proof: &Proof, - pub_inputs_values: &[PublicInputValue

], +pub fn verify_proof( + u_params: &PC::UniversalParams, + plonk_verifier_key: VerifierKey, + proof: &Proof, + pub_inputs_values: &[PublicInputValue], pub_inputs_positions: &[usize], transcript_init: &'static [u8], ) -> Result<(), Error> where - E: PairingEngine, - P: TEModelParameters, + F: PrimeField, + P: TEModelParameters, + PC: HomomorphicCommitment, { - let mut verifier: Verifier = Verifier::new(transcript_init); + let mut verifier: Verifier = Verifier::new(transcript_init); let padded_circuit_size = plonk_verifier_key.padded_circuit_size(); - // let key: VerifierKey = *plonk_verifier_key; verifier.verifier_key = Some(plonk_verifier_key); - let (_, sonic_vk) = SonicKZG10::>::trim( + let (_, vk) = PC::trim( u_params, // +1 per wire, +2 for the permutation poly padded_circuit_size + 6, 0, None, ) - .unwrap(); - - let vk = kzg10::VerifierKey { - g: sonic_vk.g, - gamma_g: sonic_vk.gamma_g, - h: sonic_vk.h, - beta_h: sonic_vk.beta_h, - prepared_h: sonic_vk.prepared_h, - prepared_beta_h: sonic_vk.prepared_beta_h, - }; + .map_err(to_pc_error::)?; verifier.verify( proof, @@ -436,14 +431,13 @@ where } /// Build PI vector for Proof verifications. -fn build_pi( - pub_input_values: &[PublicInputValue

], +fn build_pi( + pub_input_values: &[PublicInputValue], pub_input_pos: &[usize], trim_size: usize, ) -> Vec where - F: PrimeField, - P: TEModelParameters, + F: Field, { let mut pi = vec![F::zero(); trim_size]; pub_input_values @@ -462,11 +456,11 @@ mod test { use crate::{constraint_system::StandardComposer, util}; use ark_bls12_377::Bls12_377; use ark_bls12_381::Bls12_381; - use ark_ec::twisted_edwards_extended::GroupAffine; - use ark_ec::AffineCurve; - use ark_poly_commit::kzg10::KZG10; - use num_traits::One; - use rand_core::OsRng; + use ark_ec::{ + twisted_edwards_extended::GroupAffine, AffineCurve, PairingEngine, + }; + use ark_ff::{FftField, PrimeField}; + use rand::rngs::OsRng; // Implements a circuit that checks: // 1) a + b = c where C is a PI @@ -476,28 +470,25 @@ mod test { // 5) JubJub::GENERATOR * e(JubJubScalar) = f where F is a PI #[derive(derivative::Derivative)] #[derivative(Debug(bound = ""), Default(bound = ""))] - pub struct TestCircuit< - E: PairingEngine, - P: TEModelParameters, - > { - a: E::Fr, - b: E::Fr, - c: E::Fr, - d: E::Fr, + pub struct TestCircuit> { + a: F, + b: F, + c: F, + d: F, e: P::ScalarField, f: GroupAffine

, } - impl Circuit for TestCircuit + impl Circuit for TestCircuit where - E: PairingEngine, - P: TEModelParameters, + F: PrimeField, + P: TEModelParameters, { const CIRCUIT_ID: [u8; 32] = [0xff; 32]; fn gadget( &mut self, - composer: &mut StandardComposer, + composer: &mut StandardComposer, ) -> Result<(), Error> { let a = composer.add_input(self.a); let b = composer.add_input(self.b); @@ -506,7 +497,7 @@ mod test { // Make first constraint a + b = c (as public input) composer.arithmetic_gate(|gate| { gate.witness(a, b, Some(zero)) - .add(E::Fr::one(), E::Fr::one()) + .add(F::one(), F::one()) .pi(-self.c) }); @@ -515,10 +506,10 @@ mod test { composer.range_gate(b, 1 << 5); // Make second constraint a * b = d composer.arithmetic_gate(|gate| { - gate.witness(a, b, Some(zero)).mul(E::Fr::one()).pi(-self.d) + gate.witness(a, b, Some(zero)).mul(F::one()).pi(-self.d) }); let e = composer - .add_input(util::from_embedded_curve_scalar::(self.e)); + .add_input(util::from_embedded_curve_scalar::(self.e)); let (x, y) = P::AFFINE_GENERATOR_COEFFS; let generator = GroupAffine::new(x, y); let scalar_mul_result = @@ -530,23 +521,25 @@ mod test { } fn padded_circuit_size(&self) -> usize { - 1 << 11 + 1 << 9 } } - fn test_full>( - ) -> Result<(), Error> { + fn test_full() -> Result<(), Error> + where + F: PrimeField, + P: TEModelParameters, + PC: HomomorphicCommitment, + VerifierData: PartialEq, + { // Generate CRS - let pp = KZG10::>::setup( - 1 << 12, - false, - &mut OsRng, - )?; + let pp = PC::setup(1 << 19, None, &mut OsRng) + .map_err(to_pc_error::)?; - let mut circuit = TestCircuit::::default(); + let mut circuit = TestCircuit::::default(); // Compile the circuit - let (pk_p, verifier_data) = circuit.compile(&pp)?; + let (pk_p, verifier_data) = circuit.compile::(&pp)?; let (x, y) = P::AFFINE_GENERATOR_COEFFS; let generator: GroupAffine

= GroupAffine::new(x, y); @@ -558,38 +551,38 @@ mod test { // Prover POV let proof = { - let mut circuit: TestCircuit = TestCircuit { - a: E::Fr::from(20u64), - b: E::Fr::from(5u64), - c: E::Fr::from(25u64), - d: E::Fr::from(100u64), + let mut circuit: TestCircuit = TestCircuit { + a: F::from(20u64), + b: F::from(5u64), + c: F::from(25u64), + d: F::from(100u64), e: P::ScalarField::from(2u64), f: point_f_pi, }; - circuit.gen_proof(&pp, pk_p, b"Test")? + circuit.gen_proof::(&pp, pk_p, b"Test")? }; // Test serialisation for verifier_data let mut verifier_data_bytes = Vec::new(); verifier_data.serialize(&mut verifier_data_bytes).unwrap(); - let verif_data: VerifierData = + let deserialized_verifier_data: VerifierData = VerifierData::deserialize(verifier_data_bytes.as_slice()).unwrap(); - assert!(verif_data == verifier_data); + assert!(deserialized_verifier_data == verifier_data); // Verifier POV - let public_inputs: Vec> = vec![ - E::Fr::from(25u64).into_pi(), - E::Fr::from(100u64).into_pi(), + let public_inputs: Vec> = vec![ + F::from(25u64).into(), + F::from(100u64).into(), GeIntoPubInput::into_pi(point_f_pi), ]; let VerifierData { key, pi_pos } = verifier_data; // TODO: non-ideal hack for a first functional version. - assert!(verify_proof::( + assert!(verify_proof::( &pp, key, &proof, @@ -605,12 +598,45 @@ mod test { #[test] #[allow(non_snake_case)] fn test_full_on_Bls12_381() -> Result<(), Error> { - test_full::() + test_full::< + ::Fr, + ark_ed_on_bls12_381::EdwardsParameters, + crate::commitment::KZG10, + >() + } + + #[test] + #[allow(non_snake_case)] + fn test_full_on_Bls12_381_ipa() -> Result<(), Error> { + test_full::< + ::Fr, + ark_ed_on_bls12_381::EdwardsParameters, + crate::commitment::IPA< + ::G1Affine, + blake2::Blake2b, + >, + >() } #[test] #[allow(non_snake_case)] fn test_full_on_Bls12_377() -> Result<(), Error> { - test_full::() + test_full::< + ::Fr, + ark_ed_on_bls12_377::EdwardsParameters, + crate::commitment::KZG10, + >() + } + #[test] + #[allow(non_snake_case)] + fn test_full_on_Bls12_377_ipa() -> Result<(), Error> { + test_full::< + ::Fr, + ark_ed_on_bls12_377::EdwardsParameters, + crate::commitment::IPA< + ::G1Affine, + blake2::Blake2b, + >, + >() } } diff --git a/plonk-core/src/commitment.rs b/plonk-core/src/commitment.rs new file mode 100644 index 00000000..308d3e7e --- /dev/null +++ b/plonk-core/src/commitment.rs @@ -0,0 +1,129 @@ +//! Useful commitment stuff +use ark_ec::{msm::VariableBaseMSM, AffineCurve, PairingEngine}; +use ark_ff::{Field, PrimeField}; +use ark_poly::univariate::DensePolynomial; +use ark_poly_commit::{sonic_pc::SonicKZG10, PolynomialCommitment}; + +/// A homomorphic polynomial commitment +pub trait HomomorphicCommitment: + PolynomialCommitment> +where + F: PrimeField, + Self::VerifierKey: std::fmt::Debug, +{ + /// Combine a linear combination of homomorphic commitments + fn multi_scalar_mul( + commitments: &[Self::Commitment], + scalars: &[F], + ) -> Self::Commitment; +} + +/// The Default KZG-style commitment scheme +pub type KZG10 = SonicKZG10::Fr>>; +/// A single KZG10 commitment +pub type KZG10Commitment = as PolynomialCommitment< + ::Fr, + DensePolynomial<::Fr>, +>>::Commitment; + +impl HomomorphicCommitment for KZG10 +where + E: PairingEngine, +{ + fn multi_scalar_mul( + commitments: &[KZG10Commitment], + scalars: &[E::Fr], + ) -> KZG10Commitment { + let scalars_repr = scalars + .iter() + .map(::into_repr) + .collect::>(); + + let points_repr = commitments.iter().map(|c| c.0).collect::>(); + + ark_poly_commit::kzg10::Commitment::( + VariableBaseMSM::multi_scalar_mul(&points_repr, &scalars_repr) + .into(), + ) + } +} + +/// Shortened type for Inner Product Argument polynomial commitment schemes +pub type IPA = ark_poly_commit::ipa_pc::InnerProductArgPC< + G, + D, + DensePolynomial<::ScalarField>, +>; +/// Shortened type for an Inner Product Argument polynomial commitment +pub type IPACommitment = as PolynomialCommitment< + ::ScalarField, + DensePolynomial<::ScalarField>, +>>::Commitment; + +use blake2::digest::Digest; +impl HomomorphicCommitment<::ScalarField> + for IPA +where + G: AffineCurve, + D: Digest, +{ + fn multi_scalar_mul( + commitments: &[IPACommitment], + scalars: &[::ScalarField], + ) -> IPACommitment { + let scalars_repr = scalars + .iter() + .map(::ScalarField::into_repr) + .collect::>(); + + let points_repr = + commitments.iter().map(|c| c.comm).collect::>(); + + IPACommitment:: { + comm: VariableBaseMSM::multi_scalar_mul( + &points_repr, + &scalars_repr, + ) + .into(), + shifted_comm: None, // TODO: support degree bounds? + } + } +} + +/// Computes a linear combination of the polynomial evaluations and polynomial +/// commitments provided a challenge. +// TODO: complete doc +pub fn linear_combination( + evals: &[F], + commitments: &[H::Commitment], + challenge: F, +) -> (H::Commitment, F) +where + F: PrimeField, + H: HomomorphicCommitment, +{ + assert_eq!(evals.len(), commitments.len()); + let powers = crate::util::powers_of(challenge) + .take(evals.len()) + .collect::>(); + let combined_eval = evals + .iter() + .zip(powers.iter()) + .map(|(&eval, power)| eval * power) + .sum(); + let combined_commitment = H::multi_scalar_mul(commitments, &powers); + (combined_commitment, combined_eval) +} + +/// Aggregate polynomials +pub fn aggregate_polynomials( + polynomials: &[DensePolynomial], + challenge: F, +) -> DensePolynomial { + use num_traits::Zero; + use std::ops::Add; + crate::util::powers_of(challenge) + .zip(polynomials) + .map(|(challenge, poly)| poly * challenge) + .fold(Zero::zero(), Add::add) +} diff --git a/plonk-core/src/constraint_system/arithmetic.rs b/plonk-core/src/constraint_system/arithmetic.rs index 9f1128e1..f59b3bc4 100644 --- a/plonk-core/src/constraint_system/arithmetic.rs +++ b/plonk-core/src/constraint_system/arithmetic.rs @@ -6,37 +6,45 @@ //! Simple Arithmetic Gates -use crate::constraint_system::StandardComposer; -use crate::constraint_system::Variable; -use ark_ec::{PairingEngine, TEModelParameters}; -use num_traits::{One, Zero}; +use crate::constraint_system::{StandardComposer, Variable}; +use ark_ec::TEModelParameters; +use ark_ff::PrimeField; #[derive(Debug, Clone, Copy)] -pub struct ArithmeticGate { +pub struct ArithmeticGate +where + F: PrimeField, +{ pub(crate) witness: Option<(Variable, Variable, Option)>, - pub(crate) fan_in_3: Option<(E::Fr, Variable)>, - pub(crate) mul_selector: E::Fr, - pub(crate) add_selectors: (E::Fr, E::Fr), - pub(crate) out_selector: E::Fr, - pub(crate) const_selector: E::Fr, - pub(crate) pi: Option, + pub(crate) fan_in_3: Option<(F, Variable)>, + pub(crate) mul_selector: F, + pub(crate) add_selectors: (F, F), + pub(crate) out_selector: F, + pub(crate) const_selector: F, + pub(crate) pi: Option, } -impl Default for ArithmeticGate { +impl Default for ArithmeticGate +where + F: PrimeField, +{ fn default() -> Self { Self { witness: None, fan_in_3: None, - mul_selector: E::Fr::zero(), - add_selectors: (E::Fr::zero(), E::Fr::zero()), - out_selector: -E::Fr::one(), - const_selector: E::Fr::zero(), + mul_selector: F::zero(), + add_selectors: (F::zero(), F::zero()), + out_selector: -F::one(), + const_selector: F::zero(), pi: None, } } } -impl ArithmeticGate { +impl ArithmeticGate +where + F: PrimeField, +{ pub fn new() -> Self { Self::default() } @@ -51,32 +59,32 @@ impl ArithmeticGate { self } - pub fn fan_in_3(&mut self, q_4: E::Fr, w_4: Variable) -> &mut Self { + pub fn fan_in_3(&mut self, q_4: F, w_4: Variable) -> &mut Self { self.fan_in_3 = Some((q_4, w_4)); self } - pub fn mul(&mut self, q_m: E::Fr) -> &mut Self { + pub fn mul(&mut self, q_m: F) -> &mut Self { self.mul_selector = q_m; self } - pub fn add(&mut self, q_l: E::Fr, q_r: E::Fr) -> &mut Self { + pub fn add(&mut self, q_l: F, q_r: F) -> &mut Self { self.add_selectors = (q_l, q_r); self } - pub fn out(&mut self, q_o: E::Fr) -> &mut Self { + pub fn out(&mut self, q_o: F) -> &mut Self { self.out_selector = q_o; self } - pub fn constant(&mut self, q_c: E::Fr) -> &mut Self { + pub fn constant(&mut self, q_c: F) -> &mut Self { self.const_selector = q_c; self } - pub fn pi(&mut self, pi: E::Fr) -> &mut Self { + pub fn pi(&mut self, pi: F) -> &mut Self { self.pi = Some(pi); self } @@ -86,16 +94,18 @@ impl ArithmeticGate { } } -impl> - StandardComposer +impl StandardComposer +where + F: PrimeField, + P: TEModelParameters, { /// Function used to generate any arithmetic gate with fan-in-2 or fan-in-3. - pub fn arithmetic_gate(&mut self, func: F) -> Variable + pub fn arithmetic_gate(&mut self, func: Fn) -> Variable where - F: FnOnce(&mut ArithmeticGate) -> &mut ArithmeticGate, + Fn: FnOnce(&mut ArithmeticGate) -> &mut ArithmeticGate, { let gate = { - let mut gate = ArithmeticGate::::new(); + let mut gate = ArithmeticGate::::new(); func(&mut gate).build() }; @@ -103,7 +113,7 @@ impl> panic!("Missing left and right wire witnesses") } - let (q4, w4) = gate.fan_in_3.unwrap_or((E::Fr::zero(), self.zero_var)); + let (q4, w4) = gate.fan_in_3.unwrap_or((F::zero(), self.zero_var)); self.w_4.push(w4); self.q_4.push(q4); @@ -118,11 +128,11 @@ impl> self.q_o.push(gate.out_selector); self.q_c.push(gate.const_selector); - self.q_arith.push(E::Fr::one()); - self.q_range.push(E::Fr::zero()); - self.q_logic.push(E::Fr::zero()); - self.q_fixed_group_add.push(E::Fr::zero()); - self.q_variable_group_add.push(E::Fr::zero()); + self.q_arith.push(F::one()); + self.q_range.push(F::zero()); + self.q_logic.push(F::zero()); + self.q_fixed_group_add.push(F::zero()); + self.q_variable_group_add.push(F::zero()); if let Some(pi) = gate.pi { let insert_res = self.public_inputs_sparse_store.insert(self.n, pi); @@ -162,44 +172,44 @@ impl> #[cfg(test)] mod test { use super::*; - use crate::batch_test; - use crate::constraint_system::helper::*; + use crate::{ + batch_test, commitment::HomomorphicCommitment, + constraint_system::helper::*, + }; use ark_bls12_377::Bls12_377; use ark_bls12_381::Bls12_381; - use ark_ec::PairingEngine; - use ark_ec::TEModelParameters; - use num_traits::One; - fn test_public_inputs() + fn test_public_inputs() where - E: PairingEngine, - P: TEModelParameters, + F: PrimeField, + P: TEModelParameters, + PC: HomomorphicCommitment, { - let res = gadget_tester( - |composer: &mut StandardComposer| { - let var_one = composer.add_input(E::Fr::one()); + let res = gadget_tester::( + |composer: &mut StandardComposer| { + let var_one = composer.add_input(F::one()); let should_be_three = composer.arithmetic_gate(|gate| { gate.witness(var_one, var_one, None) - .add(E::Fr::one(), E::Fr::one()) - .pi(E::Fr::one()) + .add(F::one(), F::one()) + .pi(F::one()) }); composer.constrain_to_constant( should_be_three, - E::Fr::from(3u64), + F::from(3u64), None, ); let should_be_four = composer.arithmetic_gate(|gate| { gate.witness(var_one, var_one, None) - .add(E::Fr::one(), E::Fr::one()) - .pi(E::Fr::from(2u64)) + .add(F::one(), F::one()) + .pi(F::from(2u64)) }); composer.constrain_to_constant( should_be_four, - E::Fr::from(4u64), + F::from(4u64), None, ); }, @@ -208,29 +218,30 @@ mod test { assert!(res.is_ok(), "{:?}", res.err().unwrap()); } - fn test_correct_add_mul_gate() + fn test_correct_add_mul_gate() where - E: PairingEngine, - P: TEModelParameters, + F: PrimeField, + P: TEModelParameters, + PC: HomomorphicCommitment, { - let res = gadget_tester( - |composer: &mut StandardComposer| { + let res = gadget_tester::( + |composer: &mut StandardComposer| { // Verify that (4+5+5) * (6+7+7) = 280 - let four = composer.add_input(E::Fr::from(4u64)); - let five = composer.add_input(E::Fr::from(5u64)); - let six = composer.add_input(E::Fr::from(6u64)); - let seven = composer.add_input(E::Fr::from(7u64)); + let four = composer.add_input(F::from(4u64)); + let five = composer.add_input(F::from(5u64)); + let six = composer.add_input(F::from(6u64)); + let seven = composer.add_input(F::from(7u64)); let fourteen = composer.arithmetic_gate(|gate| { gate.witness(four, five, None) - .add(E::Fr::one(), E::Fr::one()) - .pi(E::Fr::from(5u64)) + .add(F::one(), F::one()) + .pi(F::from(5u64)) }); let twenty = composer.arithmetic_gate(|gate| { gate.witness(six, seven, None) - .add(E::Fr::one(), E::Fr::one()) - .fan_in_3(E::Fr::one(), seven) + .add(F::one(), F::one()) + .fan_in_3(F::one(), seven) }); // There are quite a few ways to check the equation is correct, @@ -241,102 +252,97 @@ mod test { // is public, we can also constrain the output wire of the mul // gate to it. This is what this test does let output = composer.arithmetic_gate(|gate| { - gate.witness(fourteen, twenty, None).mul(E::Fr::one()) + gate.witness(fourteen, twenty, None).mul(F::one()) }); - composer.constrain_to_constant( - output, - E::Fr::from(280u64), - None, - ); + composer.constrain_to_constant(output, F::from(280u64), None); }, 200, ); assert!(res.is_ok(), "{:?}", res.err().unwrap()); } - fn test_correct_add_gate() + fn test_correct_add_gate() where - E: PairingEngine, - P: TEModelParameters, + F: PrimeField, + P: TEModelParameters, + PC: HomomorphicCommitment, { - let res = gadget_tester( - |composer: &mut StandardComposer| { + let res = gadget_tester::( + |composer: &mut StandardComposer| { let zero = composer.zero_var(); - let one = composer.add_input(E::Fr::one()); + let one = composer.add_input(F::one()); let c = composer.arithmetic_gate(|gate| { gate.witness(one, zero, None) - .add(E::Fr::one(), E::Fr::one()) - .constant(E::Fr::from(2u64)) + .add(F::one(), F::one()) + .constant(F::from(2u64)) }); - composer.constrain_to_constant(c, E::Fr::from(3u64), None); + composer.constrain_to_constant(c, F::from(3u64), None); }, 32, ); assert!(res.is_ok(), "{:?}", res.err().unwrap()); } - fn test_correct_big_add_mul_gate() + fn test_correct_big_add_mul_gate() where - E: PairingEngine, - P: TEModelParameters, + F: PrimeField, + P: TEModelParameters, + PC: HomomorphicCommitment, { - let res = gadget_tester( - |composer: &mut StandardComposer| { + let res = gadget_tester::( + |composer: &mut StandardComposer| { // Verify that (4+5+5) * (6+7+7) + (8*9) = 352 - let four = composer.add_input(E::Fr::from(4u64)); - let five = composer.add_input(E::Fr::from(5u64)); - let six = composer.add_input(E::Fr::from(6u64)); - let seven = composer.add_input(E::Fr::from(7u64)); - let nine = composer.add_input(E::Fr::from(9u64)); + let four = composer.add_input(F::from(4u64)); + let five = composer.add_input(F::from(5u64)); + let six = composer.add_input(F::from(6u64)); + let seven = composer.add_input(F::from(7u64)); + let nine = composer.add_input(F::from(9u64)); let fourteen = composer.arithmetic_gate(|gate| { gate.witness(four, five, None) - .add(E::Fr::one(), E::Fr::one()) - .fan_in_3(E::Fr::one(), five) + .add(F::one(), F::one()) + .fan_in_3(F::one(), five) }); let twenty = composer.arithmetic_gate(|gate| { gate.witness(six, seven, None) - .add(E::Fr::one(), E::Fr::one()) - .fan_in_3(E::Fr::one(), seven) + .add(F::one(), F::one()) + .fan_in_3(F::one(), seven) }); let output = composer.arithmetic_gate(|gate| { gate.witness(fourteen, twenty, None) - .mul(E::Fr::one()) - .fan_in_3(E::Fr::from(8u64), nine) + .mul(F::one()) + .fan_in_3(F::from(8u64), nine) }); - composer.constrain_to_constant( - output, - E::Fr::from(352u64), - None, - ); + composer.constrain_to_constant(output, F::from(352u64), None); }, 200, ); assert!(res.is_ok()); } - fn test_correct_big_arith_gate() + fn test_correct_big_arith_gate() where - E: PairingEngine, - P: TEModelParameters, + F: PrimeField, + P: TEModelParameters, + PC: HomomorphicCommitment, { - let res = gadget_tester( - |composer: &mut StandardComposer| { + let res = gadget_tester::( + |composer: &mut StandardComposer| { // Verify that (4*5)*6 + 4*7 + 5*8 + 9*10 + 11 = 289 - let a = composer.add_input(E::Fr::from(4u64)); - let b = composer.add_input(E::Fr::from(5u64)); - let q_m = E::Fr::from(6u64); - let q_l = E::Fr::from(7u64); - let q_r = E::Fr::from(8u64); - let d = composer.add_input(E::Fr::from(9u64)); - let q_4 = E::Fr::from(10u64); - let q_c = E::Fr::from(11u64); + let a = composer.add_input(F::from(4u64)); + let b = composer.add_input(F::from(5u64)); + let q_m = F::from(6u64); + let q_l = F::from(7u64); + let q_r = F::from(8u64); + let d = composer.add_input(F::from(9u64)); + let q_4 = F::from(10u64); + let q_c = F::from(11u64); let output = composer.arithmetic_gate(|gate| { gate.witness(a, b, None) @@ -346,33 +352,30 @@ mod test { .constant(q_c) }); - composer.constrain_to_constant( - output, - E::Fr::from(289u64), - None, - ); + composer.constrain_to_constant(output, F::from(289u64), None); }, 200, ); assert!(res.is_ok()); } - fn test_incorrect_big_arith_gate() + fn test_incorrect_big_arith_gate() where - E: PairingEngine, - P: TEModelParameters, + F: PrimeField, + P: TEModelParameters, + PC: HomomorphicCommitment, { - let res = gadget_tester( - |composer: &mut StandardComposer| { + let res = gadget_tester::( + |composer: &mut StandardComposer| { // Verify that (4*5)*6 + 4*7 + 5*8 + 9*12 + 11 != 289 - let a = composer.add_input(E::Fr::from(4u64)); - let b = composer.add_input(E::Fr::from(5u64)); - let q_m = E::Fr::from(6u64); - let q_l = E::Fr::from(7u64); - let q_r = E::Fr::from(8u64); - let d = composer.add_input(E::Fr::from(9u64)); - let q_4 = E::Fr::from(12u64); - let q_c = E::Fr::from(11u64); + let a = composer.add_input(F::from(4u64)); + let b = composer.add_input(F::from(5u64)); + let q_m = F::from(6u64); + let q_l = F::from(7u64); + let q_r = F::from(8u64); + let d = composer.add_input(F::from(9u64)); + let q_4 = F::from(12u64); + let q_c = F::from(11u64); let output = composer.arithmetic_gate(|gate| { gate.witness(a, b, None) @@ -382,49 +385,40 @@ mod test { .constant(q_c) }); - composer.constrain_to_constant( - output, - E::Fr::from(289u64), - None, - ); + composer.constrain_to_constant(output, F::from(289u64), None); }, 200, ); assert!(res.is_err()); } - fn test_incorrect_add_mul_gate() + fn test_incorrect_add_mul_gate() where - E: PairingEngine, - P: TEModelParameters, + F: PrimeField, + P: TEModelParameters, + PC: HomomorphicCommitment, { - let res = gadget_tester( - |composer: &mut StandardComposer| { + let res = gadget_tester::( + |composer: &mut StandardComposer| { // Verify that (5+5) * (6+7) != 117 - let five = composer.add_input(E::Fr::from(5u64)); - let six = composer.add_input(E::Fr::from(6u64)); - let seven = composer.add_input(E::Fr::from(7u64)); + let five = composer.add_input(F::from(5u64)); + let six = composer.add_input(F::from(6u64)); + let seven = composer.add_input(F::from(7u64)); let five_plus_five = composer.arithmetic_gate(|gate| { - gate.witness(five, five, None) - .add(E::Fr::one(), E::Fr::one()) + gate.witness(five, five, None).add(F::one(), F::one()) }); let six_plus_seven = composer.arithmetic_gate(|gate| { - gate.witness(six, seven, None) - .add(E::Fr::one(), E::Fr::one()) + gate.witness(six, seven, None).add(F::one(), F::one()) }); let output = composer.arithmetic_gate(|gate| { gate.witness(five_plus_five, six_plus_seven, None) - .add(E::Fr::one(), E::Fr::one()) + .add(F::one(), F::one()) }); - composer.constrain_to_constant( - output, - E::Fr::from(117u64), - None, - ); + composer.constrain_to_constant(output, F::from(117u64), None); }, 200, ); @@ -443,8 +437,7 @@ mod test { test_incorrect_big_arith_gate ], [] => ( - Bls12_381, - ark_ed_on_bls12_381::EdwardsParameters + Bls12_381, ark_ed_on_bls12_381::EdwardsParameters ) ); @@ -460,8 +453,7 @@ mod test { test_incorrect_big_arith_gate ], [] => ( - Bls12_377, - ark_ed_on_bls12_377::EdwardsParameters + Bls12_377, ark_ed_on_bls12_377::EdwardsParameters ) ); } diff --git a/plonk-core/src/constraint_system/boolean.rs b/plonk-core/src/constraint_system/boolean.rs index 3d96a515..57de0e93 100644 --- a/plonk-core/src/constraint_system/boolean.rs +++ b/plonk-core/src/constraint_system/boolean.rs @@ -7,13 +7,13 @@ //! Boolean Gates use crate::constraint_system::{StandardComposer, Variable}; -use ark_ec::{PairingEngine, TEModelParameters}; -use num_traits::{One, Zero}; +use ark_ec::ModelParameters; +use ark_ff::PrimeField; -impl StandardComposer +impl StandardComposer where - E: PairingEngine, - P: TEModelParameters, + F: PrimeField, + P: ModelParameters, { /// Adds a boolean constraint (also known as binary constraint) where /// the gate eq. will enforce that the [`Variable`] received is either `0` @@ -28,18 +28,18 @@ where self.w_o.push(a); self.w_4.push(self.zero_var); - self.q_m.push(E::Fr::one()); - self.q_l.push(E::Fr::zero()); - self.q_r.push(E::Fr::zero()); - self.q_o.push(-E::Fr::one()); - self.q_c.push(E::Fr::zero()); - self.q_4.push(E::Fr::zero()); - self.q_arith.push(E::Fr::one()); + self.q_m.push(F::one()); + self.q_l.push(F::zero()); + self.q_r.push(F::zero()); + self.q_o.push(-F::one()); + self.q_c.push(F::zero()); + self.q_4.push(F::zero()); + self.q_arith.push(F::one()); - self.q_range.push(E::Fr::zero()); - self.q_logic.push(E::Fr::zero()); - self.q_fixed_group_add.push(E::Fr::zero()); - self.q_variable_group_add.push(E::Fr::zero()); + self.q_range.push(F::zero()); + self.q_logic.push(F::zero()); + self.q_fixed_group_add.push(F::zero()); + self.q_variable_group_add.push(F::zero()); self.perm .add_variables_to_map(a, a, a, self.zero_var, self.n); @@ -53,21 +53,24 @@ where #[cfg(test)] mod test { use super::*; - use crate::batch_test; - use crate::constraint_system::helper::*; + use crate::{ + batch_test, commitment::HomomorphicCommitment, + constraint_system::helper::*, + }; use ark_bls12_377::Bls12_377; use ark_bls12_381::Bls12_381; - use num_traits::One; + use ark_ec::TEModelParameters; - fn test_correct_bool_gate() + fn test_correct_bool_gate() where - E: PairingEngine, - P: TEModelParameters, + F: PrimeField, + P: TEModelParameters, + PC: HomomorphicCommitment, { - let res = gadget_tester( - |composer: &mut StandardComposer| { + let res = gadget_tester::( + |composer: &mut StandardComposer| { let zero = composer.zero_var(); - let one = composer.add_input(E::Fr::one()); + let one = composer.add_input(F::one()); composer.boolean_gate(zero); composer.boolean_gate(one); }, @@ -76,15 +79,16 @@ mod test { assert!(res.is_ok()) } - fn test_incorrect_bool_gate() + fn test_incorrect_bool_gate() where - E: PairingEngine, - P: TEModelParameters, + F: PrimeField, + P: TEModelParameters, + PC: HomomorphicCommitment, { - let res = gadget_tester( - |composer: &mut StandardComposer| { - let zero = composer.add_input(E::Fr::from(5u64)); - let one = composer.add_input(E::Fr::one()); + let res = gadget_tester::( + |composer: &mut StandardComposer| { + let zero = composer.add_input(F::from(5u64)); + let one = composer.add_input(F::one()); composer.boolean_gate(zero); composer.boolean_gate(one); }, @@ -100,8 +104,7 @@ mod test { test_incorrect_bool_gate ], [] => ( - Bls12_381, - ark_ed_on_bls12_381::EdwardsParameters + Bls12_381, ark_ed_on_bls12_381::EdwardsParameters ) ); @@ -112,8 +115,6 @@ mod test { test_incorrect_bool_gate ], [] => ( - Bls12_377, - ark_ed_on_bls12_377::EdwardsParameters - ) + Bls12_377, ark_ed_on_bls12_377::EdwardsParameters ) ); } diff --git a/plonk-core/src/constraint_system/composer.rs b/plonk-core/src/constraint_system/composer.rs index 2c99b7a3..414e8ec9 100644 --- a/plonk-core/src/constraint_system/composer.rs +++ b/plonk-core/src/constraint_system/composer.rs @@ -14,17 +14,13 @@ //! It allows us not only to build Add and Mul constraints but also to build //! ECC op. gates, Range checks, Logical gates (Bitwise ops) etc. -use crate::constraint_system::Variable; -use crate::permutation::Permutation; +use crate::{constraint_system::Variable, permutation::Permutation}; use alloc::collections::BTreeMap; -use ark_ec::models::TEModelParameters; -use ark_ec::PairingEngine; -use ark_ff::Field; -#[cfg(feature = "trace")] -use ark_ff::{BigInteger, PrimeField}; + +use ark_ec::{models::TEModelParameters, ModelParameters}; +use ark_ff::PrimeField; use core::marker::PhantomData; use hashbrown::HashMap; -use num_traits::{One, Zero}; /// The StandardComposer is the circuit-builder tool that the `plonk` repository /// provides so that circuit descriptions can be written, stored and transformed @@ -53,41 +49,41 @@ use num_traits::{One, Zero}; /// the StandardComposer as a builder. #[derive(derivative::Derivative)] #[derivative(Debug)] -pub struct StandardComposer +pub struct StandardComposer where - E: PairingEngine, - P: TEModelParameters, + F: PrimeField, + P: ModelParameters, { /// Number of arithmetic gates in the circuit pub(crate) n: usize, // Selector vectors /// Multiplier selector - pub(crate) q_m: Vec, + pub(crate) q_m: Vec, /// Left wire selector - pub(crate) q_l: Vec, + pub(crate) q_l: Vec, /// Right wire selector - pub(crate) q_r: Vec, + pub(crate) q_r: Vec, /// Output wire selector - pub(crate) q_o: Vec, + pub(crate) q_o: Vec, /// Fourth wire selector - pub(crate) q_4: Vec, + pub(crate) q_4: Vec, /// Constant wire selector - pub(crate) q_c: Vec, + pub(crate) q_c: Vec, /// Arithmetic wire selector - pub(crate) q_arith: Vec, + pub(crate) q_arith: Vec, /// Range selector - pub(crate) q_range: Vec, + pub(crate) q_range: Vec, /// Logic selector - pub(crate) q_logic: Vec, + pub(crate) q_logic: Vec, /// Fixed base group addition selector - pub(crate) q_fixed_group_add: Vec, + pub(crate) q_fixed_group_add: Vec, /// Variable base group addition selector - pub(crate) q_variable_group_add: Vec, + pub(crate) q_variable_group_add: Vec, /// Sparse representation of the Public Inputs linking the positions of the /// non-zero ones to it's actual values. - pub(crate) public_inputs_sparse_store: BTreeMap, + pub(crate) public_inputs_sparse_store: BTreeMap, // Witness vectors /// Left wire witness vector. @@ -106,19 +102,19 @@ where pub(crate) zero_var: Variable, /// These are the actual variable values. - pub(crate) variables: HashMap, + pub(crate) variables: HashMap, /// Permutation argument. - pub(crate) perm: Permutation, + pub(crate) perm: Permutation, /// Type Parameter Marker __: PhantomData

, } -impl StandardComposer +impl StandardComposer where - E: PairingEngine, - P: TEModelParameters, + F: PrimeField, + P: ModelParameters, { /// Returns the number of gates in the circuit pub fn circuit_size(&self) -> usize { @@ -127,8 +123,8 @@ where /// Constructs a dense vector of the Public Inputs from the positions and /// the sparse vector that contains the values. - pub fn construct_dense_pi_vec(&self) -> Vec { - let mut pi = vec![E::Fr::zero(); self.n]; + pub fn construct_dense_pi_vec(&self) -> Vec { + let mut pi = vec![F::zero(); self.n]; self.public_inputs_sparse_store .iter() .for_each(|(pos, value)| { @@ -146,10 +142,10 @@ where } } -impl Default for StandardComposer +impl Default for StandardComposer where - E: PairingEngine, - P: TEModelParameters, + F: PrimeField, + P: TEModelParameters, { #[inline] fn default() -> Self { @@ -157,10 +153,10 @@ where } } -impl StandardComposer +impl StandardComposer where - E: PairingEngine, - P: TEModelParameters, + F: PrimeField, + P: TEModelParameters, { /// Generates a new empty `StandardComposer` with all of it's fields /// set to hold an initial capacity of 0. @@ -170,16 +166,13 @@ where /// The usage of this may cause lots of re-allocations since the `Composer` /// holds `Vec` for every polynomial, and these will need to be re-allocated /// each time the circuit grows considerably. - pub fn new() -> StandardComposer { - StandardComposer::with_expected_size(0) + pub fn new() -> Self { + Self::with_expected_size(0) } /// Fixes a [`Variable`] in the witness to be a part of the circuit /// description. - pub fn add_witness_to_circuit_description( - &mut self, - value: E::Fr, - ) -> Variable { + pub fn add_witness_to_circuit_description(&mut self, value: F) -> Variable { let var = self.add_input(value); self.constrain_to_constant(var, value, None); var @@ -190,7 +183,7 @@ where /// since the `Vec`s will already have an appropriate allocation at the /// beginning of the composing stage. pub fn with_expected_size(expected_size: usize) -> Self { - let mut composer = StandardComposer { + let mut composer = Self { n: 0, q_m: Vec::with_capacity(expected_size), q_l: Vec::with_capacity(expected_size), @@ -211,12 +204,12 @@ where zero_var: Variable(0), variables: HashMap::with_capacity(expected_size), perm: Permutation::new(), - __: PhantomData, + __: PhantomData::

, }; // Reserve the first variable to be zero composer.zero_var = - composer.add_witness_to_circuit_description(E::Fr::zero()); + composer.add_witness_to_circuit_description(F::zero()); // Add dummy constraints composer.add_dummy_constraints(); @@ -232,14 +225,12 @@ where /// Add Input first calls the Permutation /// to generate and allocate a new [`Variable`] `var`. /// - /// The Composer then links the variable to the [`E::Fr`] + /// The Composer then links the variable to the [`PrimeField`] /// and returns it for its use in the system. - /// - /// [`E::Fr`]: PairingEngine::Fr - pub fn add_input(&mut self, s: E::Fr) -> Variable { + pub fn add_input(&mut self, s: F) -> Variable { // Get a new Variable from the permutation let var = self.perm.new_variable(); - // The composer now links the E::Fr to the Variable returned from + // The composer now links the F to the Variable returned from // the Permutation self.variables.insert(var, s); @@ -260,12 +251,12 @@ where a: Variable, b: Variable, c: Variable, - q_m: E::Fr, - q_l: E::Fr, - q_r: E::Fr, - q_o: E::Fr, - q_c: E::Fr, - pi: Option, + q_m: F, + q_l: F, + q_r: F, + q_o: F, + q_c: F, + pi: Option, ) -> (Variable, Variable, Variable) { self.w_l.push(a); self.w_r.push(b); @@ -278,13 +269,13 @@ where self.q_m.push(q_m); self.q_o.push(q_o); self.q_c.push(q_c); - self.q_4.push(E::Fr::zero()); - self.q_arith.push(E::Fr::one()); + self.q_4.push(F::zero()); + self.q_arith.push(F::one()); - self.q_range.push(E::Fr::zero()); - self.q_logic.push(E::Fr::zero()); - self.q_fixed_group_add.push(E::Fr::zero()); - self.q_variable_group_add.push(E::Fr::zero()); + self.q_range.push(F::zero()); + self.q_logic.push(F::zero()); + self.q_fixed_group_add.push(F::zero()); + self.q_variable_group_add.push(F::zero()); if let Some(pi) = pi { assert!(self @@ -307,17 +298,17 @@ where pub fn constrain_to_constant( &mut self, a: Variable, - constant: E::Fr, - pi: Option, + constant: F, + pi: Option, ) { self.poly_gate( a, a, a, - E::Fr::zero(), - E::Fr::one(), - E::Fr::zero(), - E::Fr::zero(), + F::zero(), + F::one(), + F::zero(), + F::zero(), -constant, pi, ); @@ -330,11 +321,11 @@ where a, b, self.zero_var, - E::Fr::zero(), - E::Fr::one(), - -E::Fr::one(), - E::Fr::zero(), - E::Fr::zero(), + F::zero(), + F::one(), + -F::one(), + F::zero(), + F::zero(), None, ); } @@ -344,10 +335,10 @@ where pub fn is_zero_with_output(&mut self, a: Variable) -> Variable { // Get relevant field values let a_value = self.variables.get(&a).unwrap(); - let y_value = a_value.inverse().unwrap_or_else(E::Fr::one); + let y_value = a_value.inverse().unwrap_or_else(F::one); // This has value 1 if input value is zero, value 0 otherwise - let b_value = E::Fr::one() - *a_value * y_value; + let b_value = F::one() - *a_value * y_value; let y = self.add_input(y_value); let b = self.add_input(b_value); @@ -358,14 +349,14 @@ where // a * b = 0 // where y is auxiliary and b is the boolean (a == 0). let _a_times_b = self.arithmetic_gate(|gate| { - gate.witness(a, b, Some(zero)).mul(E::Fr::one()) + gate.witness(a, b, Some(zero)).mul(F::one()) }); let _first_constraint = self.arithmetic_gate(|gate| { gate.witness(a, y, Some(zero)) - .mul(E::Fr::one()) - .fan_in_3(E::Fr::one(), b) - .constant(-E::Fr::one()) + .mul(F::one()) + .fan_in_3(F::one(), b) + .constant(-F::one()) }); b @@ -375,7 +366,7 @@ where /// two input variables have equal values and whose value is 0 otherwise. pub fn is_eq_with_output(&mut self, a: Variable, b: Variable) -> Variable { let difference = self.arithmetic_gate(|gate| { - gate.witness(a, b, None).add(E::Fr::one(), -E::Fr::one()) + gate.witness(a, b, None).add(F::one(), -F::one()) }); self.is_zero_with_output(difference) } @@ -399,25 +390,25 @@ where let zero = self.zero_var; // bit * choice_a let bit_times_a = self.arithmetic_gate(|gate| { - gate.witness(bit, choice_a, None).mul(E::Fr::one()) + gate.witness(bit, choice_a, None).mul(F::one()) }); // 1 - bit let one_min_bit = self.arithmetic_gate(|gate| { gate.witness(bit, zero, None) - .add(-E::Fr::one(), E::Fr::zero()) - .constant(E::Fr::one()) + .add(-F::one(), F::zero()) + .constant(F::one()) }); // (1 - bit) * b let one_min_bit_choice_b = self.arithmetic_gate(|gate| { - gate.witness(one_min_bit, choice_b, None).mul(E::Fr::one()) + gate.witness(one_min_bit, choice_b, None).mul(F::one()) }); // [ (1 - bit) * b ] + [ bit * a ] self.arithmetic_gate(|gate| { gate.witness(one_min_bit_choice_b, bit_times_a, None) - .add(E::Fr::one(), E::Fr::one()) + .add(F::one(), F::one()) }) } @@ -437,7 +428,7 @@ where ) -> Variable { // returns bit * value self.arithmetic_gate(|gate| { - gate.witness(bit, value, None).mul(E::Fr::one()) + gate.witness(bit, value, None).mul(F::one()) }) } @@ -458,19 +449,18 @@ where let value_scalar = self.variables.get(&value).unwrap(); let bit_scalar = self.variables.get(&bit).unwrap(); - let f_x_scalar = - E::Fr::one() - bit_scalar + (*bit_scalar * value_scalar); + let f_x_scalar = F::one() - bit_scalar + (*bit_scalar * value_scalar); let f_x = self.add_input(f_x_scalar); self.poly_gate( bit, value, f_x, - E::Fr::one(), - -E::Fr::one(), - E::Fr::zero(), - -E::Fr::one(), - E::Fr::one(), + F::one(), + -F::one(), + F::zero(), + -F::one(), + F::one(), None, ); @@ -482,21 +472,21 @@ where /// description which are guaranteed to always satisfy the gate equation. pub fn add_dummy_constraints(&mut self) { // Add a dummy constraint so that we do not have zero polynomials - self.q_m.push(E::Fr::from(1u64)); - self.q_l.push(E::Fr::from(2u64)); - self.q_r.push(E::Fr::from(3u64)); - self.q_o.push(E::Fr::from(4u64)); - self.q_c.push(E::Fr::from(4u64)); - self.q_4.push(E::Fr::one()); - self.q_arith.push(E::Fr::one()); - self.q_range.push(E::Fr::zero()); - self.q_logic.push(E::Fr::zero()); - self.q_fixed_group_add.push(E::Fr::zero()); - self.q_variable_group_add.push(E::Fr::zero()); - let var_six = self.add_input(E::Fr::from(6u64)); - let var_one = self.add_input(E::Fr::from(1u64)); - let var_seven = self.add_input(E::Fr::from(7u64)); - let var_min_twenty = self.add_input(-E::Fr::from(20u64)); + self.q_m.push(F::from(1u64)); + self.q_l.push(F::from(2u64)); + self.q_r.push(F::from(3u64)); + self.q_o.push(F::from(4u64)); + self.q_c.push(F::from(4u64)); + self.q_4.push(F::one()); + self.q_arith.push(F::one()); + self.q_range.push(F::zero()); + self.q_logic.push(F::zero()); + self.q_fixed_group_add.push(F::zero()); + self.q_variable_group_add.push(F::zero()); + let var_six = self.add_input(F::from(6u64)); + let var_one = self.add_input(F::from(1u64)); + let var_seven = self.add_input(F::from(7u64)); + let var_min_twenty = self.add_input(-F::from(20u64)); self.w_l.push(var_six); self.w_r.push(var_seven); self.w_o.push(var_min_twenty); @@ -511,17 +501,17 @@ where self.n += 1; //Add another dummy constraint so that we do not get the identity // permutation - self.q_m.push(E::Fr::from(1u64)); - self.q_l.push(E::Fr::from(1u64)); - self.q_r.push(E::Fr::from(1u64)); - self.q_o.push(E::Fr::from(1u64)); - self.q_c.push(E::Fr::from(127u64)); - self.q_4.push(E::Fr::zero()); - self.q_arith.push(E::Fr::one()); - self.q_range.push(E::Fr::zero()); - self.q_logic.push(E::Fr::zero()); - self.q_fixed_group_add.push(E::Fr::zero()); - self.q_variable_group_add.push(E::Fr::zero()); + self.q_m.push(F::from(1u64)); + self.q_l.push(F::from(1u64)); + self.q_r.push(F::from(1u64)); + self.q_o.push(F::from(1u64)); + self.q_c.push(F::from(127u64)); + self.q_4.push(F::zero()); + self.q_arith.push(F::one()); + self.q_range.push(F::zero()); + self.q_logic.push(F::zero()); + self.q_fixed_group_add.push(F::zero()); + self.q_variable_group_add.push(F::zero()); self.w_l.push(var_min_twenty); self.w_r.push(var_six); self.w_o.push(var_seven); @@ -549,35 +539,36 @@ where /// the cause is an unsatisfied gate equation, the function will panic. #[cfg(feature = "trace")] pub fn check_circuit_satisfied(&self) { - let w_l: Vec<&E::Fr> = self + use ark_ff::BigInteger; + let w_l: Vec<&F> = self .w_l .iter() .map(|w_l_i| self.variables.get(w_l_i).unwrap()) .collect(); - let w_r: Vec<&E::Fr> = self + let w_r: Vec<&F> = self .w_r .iter() .map(|w_r_i| self.variables.get(w_r_i).unwrap()) .collect(); - let w_o: Vec<&E::Fr> = self + let w_o: Vec<&F> = self .w_o .iter() .map(|w_o_i| self.variables.get(w_o_i).unwrap()) .collect(); - let w_4: Vec<&E::Fr> = self + let w_4: Vec<&F> = self .w_4 .iter() .map(|w_4_i| self.variables.get(w_4_i).unwrap()) .collect(); // Computes f(f-1)(f-2)(f-3) - let delta = |f: E::Fr| -> E::Fr { - let f_1 = f - E::Fr::one(); - let f_2 = f - E::Fr::from(2u64); - let f_3 = f - E::Fr::from(3u64); + let delta = |f: F| -> F { + let f_1 = f - F::one(); + let f_2 = f - F::from(2u64); + let f_3 = f - F::from(3u64); f * f_1 * f_2 * f_3 }; let pi_vec = self.construct_dense_pi_vec(); - let four = E::Fr::from(4u64); + let four = F::from(4u64); for i in 0..self.n { let qm = self.q_m[i]; let ql = self.q_l[i]; @@ -641,66 +632,66 @@ where d ); - let k = - qarith - * ((qm * a * b) - + (ql * a) - + (qr * b) - + (qo * c) - + (q4 * d) - + pi - + qc) - + qlogic - * (((delta(*a_next - four * a) - - delta(*b_next - four * b)) - * c) - + delta(*a_next - four * a) - + delta(*b_next - four * b) - + delta(*d_next - four * d) - + match ( - qlogic == E::Fr::one(), - qlogic == -E::Fr::one(), - ) { - (true, false) => { - let a_bits = a.into_repr().to_bits_le(); - let b_bits = b.into_repr().to_bits_le(); - let a_and_b = a_bits - .iter() - .zip(b_bits) - .map(|(a_bit, b_bit)| a_bit & b_bit) - .collect::>(); - - E::Fr::from_repr( - ::BigInt::from_bits_le( + let k = qarith + * ((qm * a * b) + + (ql * a) + + (qr * b) + + (qo * c) + + (q4 * d) + + pi + + qc) + + qlogic + * (((delta(*a_next - four * a) + - delta(*b_next - four * b)) + * c) + + delta(*a_next - four * a) + + delta(*b_next - four * b) + + delta(*d_next - four * d) + + match (qlogic == F::one(), qlogic == -F::one()) { + (true, false) => { + let a_bits = a.into_repr().to_bits_le(); + let b_bits = b.into_repr().to_bits_le(); + let a_and_b = a_bits + .iter() + .zip(b_bits) + .map(|(a_bit, b_bit)| a_bit & b_bit) + .collect::>(); + + F::from_repr( + ::BigInt::from_bits_le( &a_and_b, ), - ).unwrap() - *d - } - (false, true) => { - let a_bits = a.into_repr().to_bits_le(); - let b_bits = b.into_repr().to_bits_le(); - let a_xor_b = a_bits - .iter() - .zip(b_bits) - .map(|(a_bit, b_bit)| a_bit ^ b_bit) - .collect::>(); - - E::Fr::from_repr( - ::BigInt::from_bits_le( + ) + .unwrap() + - *d + } + (false, true) => { + let a_bits = a.into_repr().to_bits_le(); + let b_bits = b.into_repr().to_bits_le(); + let a_xor_b = a_bits + .iter() + .zip(b_bits) + .map(|(a_bit, b_bit)| a_bit ^ b_bit) + .collect::>(); + + F::from_repr( + ::BigInt::from_bits_le( &a_xor_b, ), - ).unwrap() - *d - } - (false, false) => E::Fr::zero(), - _ => unreachable!(), - }) - + qrange - * (delta(*c - four * d) - + delta(*b - four * c) - + delta(*a - four * b) - + delta(*d_next - four * a)); - - assert_eq!(k, E::Fr::zero(), "Check failed at gate {}", i,); + ) + .unwrap() + - *d + } + (false, false) => F::zero(), + _ => unreachable!(), + }) + + qrange + * (delta(*c - four * d) + + delta(*b - four * c) + + delta(*a - four * b) + + delta(*d_next - four * a)); + + assert_eq!(k, F::zero(), "Check failed at gate {}", i,); } } } @@ -708,22 +699,21 @@ where #[cfg(test)] mod test { use super::*; - use crate::batch_test; - use crate::constraint_system::helper::*; - use crate::proof_system::{Prover, Verifier}; + use crate::{ + batch_test, batch_test_field_params, + commitment::HomomorphicCommitment, + constraint_system::helper::*, + proof_system::{Prover, Verifier}, + }; use ark_bls12_377::Bls12_377; use ark_bls12_381::Bls12_381; - use ark_poly::univariate::DensePolynomial; - use ark_poly_commit::kzg10::{self, Powers, UniversalParams, KZG10}; - use ark_poly_commit::sonic_pc::SonicKZG10; - use ark_poly_commit::PolynomialCommitment; - use rand_core::OsRng; + use rand::rngs::OsRng; /// Tests that a circuit initially has 3 gates. - fn test_initial_circuit_size() + fn test_initial_circuit_size() where - E: PairingEngine, - P: TEModelParameters, + F: PrimeField, + P: TEModelParameters, { // NOTE: Circuit size is n+3 because // - We have an extra gate which forces the first witness to be zero. @@ -732,29 +722,32 @@ mod test { // not the identity and // - Another gate which ensures that the selector polynomials are not // all zeroes - assert_eq!(3, StandardComposer::::new().circuit_size()) + assert_eq!(3, StandardComposer::::new().circuit_size()) } /// Tests that an empty circuit proof passes. - fn test_prove_verify() + fn test_prove_verify() where - E: PairingEngine, - P: TEModelParameters, + F: PrimeField, + P: TEModelParameters, + PC: HomomorphicCommitment, { // NOTE: Does nothing except add the dummy constraints. - let res = gadget_tester(|_: &mut StandardComposer| {}, 200); + let res = + gadget_tester::(|_: &mut StandardComposer| {}, 200); assert!(res.is_ok()); } - fn test_correct_is_zero_with_output() + fn test_correct_is_zero_with_output() where - E: PairingEngine, - P: TEModelParameters, + F: PrimeField, + P: TEModelParameters, + PC: HomomorphicCommitment, { // Check that it gives true on zero input: - let res = gadget_tester( - |composer: &mut StandardComposer| { - let one = composer.add_input(E::Fr::one()); + let res = gadget_tester::( + |composer: &mut StandardComposer| { + let one = composer.add_input(F::one()); let is_zero = composer.is_zero_with_output(composer.zero_var()); composer.assert_equal(is_zero, one); }, @@ -762,9 +755,9 @@ mod test { ); // Check that it gives false on non-zero input: - let res2 = gadget_tester( - |composer: &mut StandardComposer| { - let one = composer.add_input(E::Fr::one()); + let res2 = gadget_tester::( + |composer: &mut StandardComposer| { + let one = composer.add_input(F::one()); let is_zero = composer.is_zero_with_output(one); composer.assert_equal(is_zero, composer.zero_var()); }, @@ -774,17 +767,18 @@ mod test { assert!(res.is_ok() && res2.is_ok()) } - fn test_correct_is_eq_with_output() + fn test_correct_is_eq_with_output() where - E: PairingEngine, - P: TEModelParameters, + F: PrimeField, + P: TEModelParameters, + PC: HomomorphicCommitment, { // Check that it gives true on equal inputs: - let res = gadget_tester( - |composer: &mut StandardComposer| { - let one = composer.add_input(E::Fr::one()); + let res = gadget_tester::( + |composer: &mut StandardComposer| { + let one = composer.add_input(F::one()); - let field_element = E::Fr::one().double(); + let field_element = F::one().double(); let a = composer.add_input(field_element); let b = composer.add_input(field_element); let is_eq = composer.is_eq_with_output(a, b); @@ -794,9 +788,9 @@ mod test { ); // Check that it gives false on non-equal inputs: - let res2 = gadget_tester( - |composer: &mut StandardComposer| { - let field_element = E::Fr::one().double(); + let res2 = gadget_tester::( + |composer: &mut StandardComposer| { + let field_element = F::one().double(); let a = composer.add_input(field_element); let b = composer.add_input(field_element.double()); let is_eq = composer.is_eq_with_output(a, b); @@ -808,18 +802,19 @@ mod test { assert!(res.is_ok() && res2.is_ok()) } - fn test_conditional_select() + fn test_conditional_select() where - E: PairingEngine, - P: TEModelParameters, + F: PrimeField, + P: TEModelParameters, + PC: HomomorphicCommitment, { - let res = gadget_tester( - |composer: &mut StandardComposer| { - let bit_1 = composer.add_input(E::Fr::one()); + let res = gadget_tester::( + |composer: &mut StandardComposer| { + let bit_1 = composer.add_input(F::one()); let bit_0 = composer.zero_var(); - let choice_a = composer.add_input(E::Fr::from(10u64)); - let choice_b = composer.add_input(E::Fr::from(20u64)); + let choice_a = composer.add_input(F::from(10u64)); + let choice_b = composer.add_input(F::from(20u64)); let choice = composer.conditional_select(bit_1, choice_a, choice_b); @@ -835,40 +830,25 @@ mod test { } // FIXME: Move this to integration tests - fn test_multiple_proofs() + fn test_multiple_proofs() where - E: PairingEngine, - P: TEModelParameters, + F: PrimeField, + P: TEModelParameters, + PC: HomomorphicCommitment, { - let u_params: UniversalParams = - KZG10::>::setup( - 2 * 30, - false, - &mut OsRng, - ) - .unwrap(); + let u_params = PC::setup(2 * 30, None, &mut OsRng).unwrap(); // Create a prover struct - let mut prover: Prover = Prover::new(b"demo"); + let mut prover: Prover = Prover::new(b"demo"); // Add gadgets dummy_gadget(10, prover.mut_cs()); // Commit Key - let (ck, _) = SonicKZG10::>::trim( - &u_params, - 2 * 20, - 0, - None, - ) - .unwrap(); - let powers = Powers { - powers_of_g: ck.powers_of_g.into(), - powers_of_gamma_g: ck.powers_of_gamma_g.into(), - }; + let (ck, vk) = PC::trim(&u_params, 2 * 20, 0, None).unwrap(); // Preprocess circuit - prover.preprocess(&powers).unwrap(); + prover.preprocess(&ck).unwrap(); let public_inputs = prover.cs.construct_dense_pi_vec(); @@ -876,7 +856,7 @@ mod test { // Compute multiple proofs for _ in 0..3 { - proofs.push(prover.prove(&powers).unwrap()); + proofs.push(prover.prove(&ck).unwrap()); // Add another witness instance dummy_gadget(10, prover.mut_cs()); @@ -884,46 +864,45 @@ mod test { // Verifier // - let mut verifier: Verifier = Verifier::new(b"demo"); + let mut verifier = Verifier::::new(b"demo"); // Add gadgets dummy_gadget(10, verifier.mut_cs()); - // Commit and Verifier Key - let (sonic_ck, sonic_vk) = - SonicKZG10::>::trim( - &u_params, - 2 * 20, - 0, - None, - ) - .unwrap(); - let powers = Powers { - powers_of_g: sonic_ck.powers_of_g.into(), - powers_of_gamma_g: sonic_ck.powers_of_gamma_g.into(), - }; - - let vk = kzg10::VerifierKey { - g: sonic_vk.g, - gamma_g: sonic_vk.gamma_g, - h: sonic_vk.h, - beta_h: sonic_vk.beta_h, - prepared_h: sonic_vk.prepared_h, - prepared_beta_h: sonic_vk.prepared_beta_h, - }; - // Preprocess - verifier.preprocess(&powers).unwrap(); + verifier.preprocess(&ck).unwrap(); for proof in proofs { assert!(verifier.verify(&proof, &vk, &public_inputs).is_ok()); } } + // Tests for Bls12_381 + batch_test_field_params!( + [ + test_initial_circuit_size + ], + [] => ( + Bls12_381, + ark_ed_on_bls12_381::EdwardsParameters + + ) + ); + + // Tests for Bls12_377 + batch_test_field_params!( + [ + test_initial_circuit_size + ], + [] => ( + Bls12_377, + ark_ed_on_bls12_377::EdwardsParameters + ) + ); + // Tests for Bls12_381 batch_test!( [ - test_initial_circuit_size, test_prove_verify, test_correct_is_zero_with_output, test_correct_is_eq_with_output, @@ -939,7 +918,6 @@ mod test { // Tests for Bls12_377 batch_test!( [ - test_initial_circuit_size, test_prove_verify, test_correct_is_zero_with_output, test_correct_is_eq_with_output, diff --git a/plonk-core/src/constraint_system/ecc/curve_addition/fixed_base_gate.rs b/plonk-core/src/constraint_system/ecc/curve_addition/fixed_base_gate.rs index f9cc2b68..d60472b4 100644 --- a/plonk-core/src/constraint_system/ecc/curve_addition/fixed_base_gate.rs +++ b/plonk-core/src/constraint_system/ecc/curve_addition/fixed_base_gate.rs @@ -6,21 +6,17 @@ //! Fixed-Base Curve Addition Gate -use crate::constraint_system::StandardComposer; -use crate::constraint_system::Variable; +use crate::constraint_system::{StandardComposer, Variable}; use ark_ec::models::TEModelParameters; -use ark_ec::PairingEngine; -use core::marker::PhantomData; -use num_traits::{One, Zero}; +use ark_ff::PrimeField; /// Contains all of the components needed to verify that a bit scalar /// multiplication was computed correctly. #[derive(derivative::Derivative)] #[derivative(Clone, Copy, Debug)] -pub struct WnafRound +pub struct WnafRound

where - E: PairingEngine, - P: TEModelParameters, + P: TEModelParameters, { /// This is the accumulated x coordinate point that we wish to add (so /// far, it depends on where you are in the scalar mul) it is linked to @@ -49,15 +45,12 @@ where /// This is the multiplication of x_\beta * y_\beta pub xy_beta: P::BaseField, - - /// Type Parameter Marker - __: PhantomData, } -impl StandardComposer +impl StandardComposer where - E: PairingEngine, - P: TEModelParameters, + F: PrimeField, + P: TEModelParameters, { /// Generates a new structure for preparing a [`WnafRound`] ROUND. pub(crate) fn new_wnaf( @@ -65,10 +58,10 @@ where acc_y: Variable, accumulated_bit: Variable, xy_alpha: Variable, - x_beta: P::BaseField, - y_beta: P::BaseField, - xy_beta: P::BaseField, - ) -> WnafRound { + x_beta: F, + y_beta: F, + xy_beta: F, + ) -> WnafRound

{ WnafRound { acc_x, acc_y, @@ -77,12 +70,11 @@ where x_beta, y_beta, xy_beta, - __: PhantomData, } } /// Fixed group addition of a point. - pub(crate) fn fixed_group_add(&mut self, wnaf_round: WnafRound) { + pub(crate) fn fixed_group_add(&mut self, wnaf_round: WnafRound

) { self.w_l.push(wnaf_round.acc_x); self.w_r.push(wnaf_round.acc_y); self.w_o.push(wnaf_round.xy_alpha); @@ -92,15 +84,15 @@ where self.q_r.push(wnaf_round.y_beta); self.q_c.push(wnaf_round.xy_beta); - self.q_o.push(E::Fr::zero()); - self.q_fixed_group_add.push(E::Fr::one()); - self.q_variable_group_add.push(E::Fr::zero()); - - self.q_m.push(E::Fr::zero()); - self.q_4.push(E::Fr::zero()); - self.q_arith.push(E::Fr::zero()); - self.q_range.push(E::Fr::zero()); - self.q_logic.push(E::Fr::zero()); + self.q_o.push(F::zero()); + self.q_fixed_group_add.push(F::one()); + self.q_variable_group_add.push(F::zero()); + + self.q_m.push(F::zero()); + self.q_4.push(F::zero()); + self.q_arith.push(F::zero()); + self.q_range.push(F::zero()); + self.q_logic.push(F::zero()); self.perm.add_variables_to_map( wnaf_round.acc_x, diff --git a/plonk-core/src/constraint_system/ecc/curve_addition/variable_base_gate.rs b/plonk-core/src/constraint_system/ecc/curve_addition/variable_base_gate.rs index 71767a6e..796c2a00 100644 --- a/plonk-core/src/constraint_system/ecc/curve_addition/variable_base_gate.rs +++ b/plonk-core/src/constraint_system/ecc/curve_addition/variable_base_gate.rs @@ -6,17 +6,16 @@ //! Variable-base Curve Addition Gate -use crate::constraint_system::ecc::Point; -use crate::constraint_system::StandardComposer; -use ark_ec::models::twisted_edwards_extended::GroupAffine; -use ark_ec::models::TEModelParameters; -use ark_ec::PairingEngine; -use num_traits::{One, Zero}; - -impl StandardComposer +use crate::constraint_system::{ecc::Point, StandardComposer}; +use ark_ec::models::{ + twisted_edwards_extended::GroupAffine as TEGroupAffine, TEModelParameters, +}; +use ark_ff::PrimeField; + +impl StandardComposer where - E: PairingEngine, - P: TEModelParameters, + F: PrimeField, + P: TEModelParameters, { /// Adds two curve points together using a curve addition gate /// Note that since the points are not fixed the generator is not a part of @@ -24,9 +23,9 @@ where /// width of 4. pub fn point_addition_gate( &mut self, - point_a: Point, - point_b: Point, - ) -> Point { + point_a: Point

, + point_b: Point

, + ) -> Point

{ // In order to verify that two points were correctly added // without going over a degree 4 polynomial, we will need // x_1, y_1, x_2, y_2 @@ -43,8 +42,8 @@ where let x_2_scalar = self.variables.get(&x_2).unwrap(); let y_2_scalar = self.variables.get(&y_2).unwrap(); - let p1 = GroupAffine::

::new(*x_1_scalar, *y_1_scalar); - let p2 = GroupAffine::

::new(*x_2_scalar, *y_2_scalar); + let p1 = TEGroupAffine::

::new(*x_1_scalar, *y_1_scalar); + let p2 = TEGroupAffine::

::new(*x_2_scalar, *y_2_scalar); let point = p1 + p2; let x_3_scalar = point.x; @@ -61,7 +60,7 @@ where self.w_r.extend(&[y_1, y_3]); self.w_o.extend(&[x_2, self.zero_var]); self.w_4.extend(&[y_2, x_1_y_2]); - let zeros = [E::Fr::zero(), E::Fr::zero()]; + let zeros = [F::zero(), F::zero()]; self.q_l.extend(&zeros); self.q_r.extend(&zeros); @@ -74,8 +73,8 @@ where self.q_logic.extend(&zeros); self.q_fixed_group_add.extend(&zeros); - self.q_variable_group_add.push(E::Fr::one()); - self.q_variable_group_add.push(E::Fr::zero()); + self.q_variable_group_add.push(F::one()); + self.q_variable_group_add.push(F::zero()); self.perm.add_variables_to_map(x_1, y_1, x_2, y_2, self.n); self.n += 1; @@ -89,7 +88,7 @@ where ); self.n += 1; - Point::::new(x_3, y_3) + Point::

::new(x_3, y_3) } } @@ -99,19 +98,20 @@ mod test { use crate::{batch_test, constraint_system::helper::*}; use ark_bls12_377::Bls12_377; use ark_bls12_381::Bls12_381; - use ark_ff::Field; + + use crate::commitment::HomomorphicCommitment; /// Adds two curve points together using the classical point addition /// algorithm. This method is slower than WNAF and is just meant to be the /// source of truth to test the WNAF method. - pub fn classical_point_addition( - composer: &mut StandardComposer, - point_a: Point, - point_b: Point, - ) -> Point + pub fn classical_point_addition( + composer: &mut StandardComposer, + point_a: Point

, + point_b: Point

, + ) -> Point

where - E: PairingEngine, - P: TEModelParameters, + F: PrimeField, + P: TEModelParameters, { let zero = composer.zero_var; let x1 = point_a.x; @@ -121,21 +121,17 @@ mod test { let y2 = point_b.y; // x1 * y2 - let x1_y2 = composer.arithmetic_gate(|gate| { - gate.mul(E::Fr::one()).witness(x1, y2, None) - }); + let x1_y2 = composer + .arithmetic_gate(|gate| gate.mul(F::one()).witness(x1, y2, None)); // y1 * x2 - let y1_x2 = composer.arithmetic_gate(|gate| { - gate.mul(E::Fr::one()).witness(y1, x2, None) - }); + let y1_x2 = composer + .arithmetic_gate(|gate| gate.mul(F::one()).witness(y1, x2, None)); // y1 * y2 - let y1_y2 = composer.arithmetic_gate(|gate| { - gate.mul(E::Fr::one()).witness(y1, y2, None) - }); + let y1_y2 = composer + .arithmetic_gate(|gate| gate.mul(F::one()).witness(y1, y2, None)); // x1 * x2 - let x1_x2 = composer.arithmetic_gate(|gate| { - gate.mul(E::Fr::one()).witness(x1, x2, None) - }); + let x1_x2 = composer + .arithmetic_gate(|gate| gate.mul(F::one()).witness(x1, x2, None)); // d x1x2 * y1y2 let d_x1_x2_y1_y2 = composer.arithmetic_gate(|gate| { gate.mul(P::COEFF_D).witness(x1_x2, y1_y2, None) @@ -143,21 +139,19 @@ mod test { // x1y2 + y1x2 let x_numerator = composer.arithmetic_gate(|gate| { - gate.witness(x1_y2, y1_x2, None) - .add(E::Fr::one(), E::Fr::one()) + gate.witness(x1_y2, y1_x2, None).add(F::one(), F::one()) }); // y1y2 - a * x1x2 let y_numerator = composer.arithmetic_gate(|gate| { - gate.witness(y1_y2, x1_x2, None) - .add(E::Fr::one(), -P::COEFF_A) + gate.witness(y1_y2, x1_x2, None).add(F::one(), -P::COEFF_A) }); // 1 + dx1x2y1y2 let x_denominator = composer.arithmetic_gate(|gate| { gate.witness(d_x1_x2_y1_y2, zero, None) - .add(E::Fr::one(), E::Fr::zero()) - .constant(E::Fr::one()) + .add(F::one(), F::zero()) + .constant(F::one()) }); // Compute the inverse @@ -173,15 +167,15 @@ mod test { // inv_x * x = 1 composer.arithmetic_gate(|gate| { gate.witness(x_denominator, inv_x_denom, Some(zero)) - .mul(E::Fr::one()) - .constant(-E::Fr::one()) + .mul(F::one()) + .constant(-F::one()) }); // 1 - dx1x2y1y2 let y_denominator = composer.arithmetic_gate(|gate| { gate.witness(d_x1_x2_y1_y2, zero, None) - .add(-E::Fr::one(), E::Fr::zero()) - .constant(E::Fr::one()) + .add(-F::one(), F::zero()) + .constant(F::one()) }); let inv_y_denom = composer @@ -195,35 +189,34 @@ mod test { // Assert that we actually have the inverse // inv_y * y = 1 composer.arithmetic_gate(|gate| { - gate.mul(E::Fr::one()) + gate.mul(F::one()) .witness(y_denominator, inv_y_denom, Some(zero)) - .constant(-E::Fr::one()) + .constant(-F::one()) }); // We can now use the inverses let x_3 = composer.arithmetic_gate(|gate| { - gate.mul(E::Fr::one()) - .witness(inv_x_denom, x_numerator, None) + gate.mul(F::one()).witness(inv_x_denom, x_numerator, None) }); let y_3 = composer.arithmetic_gate(|gate| { - gate.mul(E::Fr::one()) - .witness(inv_y_denom, y_numerator, None) + gate.mul(F::one()).witness(inv_y_denom, y_numerator, None) }); Point::new(x_3, y_3) } - fn test_curve_addition() + fn test_curve_addition() where - E: PairingEngine, - P: TEModelParameters, + F: PrimeField, + P: TEModelParameters, + PC: HomomorphicCommitment, { - let res = gadget_tester( - |composer: &mut StandardComposer| { + let res = gadget_tester::( + |composer: &mut StandardComposer| { let (x, y) = P::AFFINE_GENERATOR_COEFFS; - let generator = GroupAffine::new(x, y); + let generator = TEGroupAffine::

::new(x, y); let x_var = composer.add_input(x); let y_var = composer.add_input(y); let expected_point = generator + generator; diff --git a/plonk-core/src/constraint_system/ecc/mod.rs b/plonk-core/src/constraint_system/ecc/mod.rs index 3488213e..572a107c 100644 --- a/plonk-core/src/constraint_system/ecc/mod.rs +++ b/plonk-core/src/constraint_system/ecc/mod.rs @@ -11,18 +11,18 @@ pub mod scalar_mul; use crate::constraint_system::{variable::Variable, StandardComposer}; use ark_ec::{ - twisted_edwards_extended::GroupAffine, PairingEngine, TEModelParameters, + twisted_edwards_extended::GroupAffine as TEGroupAffine, ModelParameters, + TEModelParameters, }; +use ark_ff::PrimeField; use core::marker::PhantomData; -use num_traits::{One, Zero}; /// Represents a point of the embeded curve in the circuit #[derive(derivative::Derivative)] #[derivative(Clone, Copy, Debug)] -pub struct Point +pub struct Point

where - E: PairingEngine, - P: TEModelParameters, + P: ModelParameters, { /// `X`-coordinate x: Variable, @@ -31,13 +31,13 @@ where y: Variable, /// Type Parameter Marker - __: PhantomData<(E, P)>, + __: PhantomData

, } -impl Point +impl Point

where - E: PairingEngine, - P: TEModelParameters, + F: PrimeField, + P: TEModelParameters, { /// Builds a new [`Point`] from `X` and `Y` coordinates. /// @@ -55,8 +55,9 @@ where } /// Returns an identity point. - pub fn identity(composer: &mut StandardComposer) -> Self { - let one = composer.add_witness_to_circuit_description(E::Fr::one()); + pub fn identity(composer: &mut StandardComposer) -> Self { + let one = + composer.add_witness_to_circuit_description(P::BaseField::one()); Self::new(composer.zero_var, one) } @@ -71,23 +72,23 @@ where } } -impl StandardComposer +impl StandardComposer where - E: PairingEngine, - P: TEModelParameters, + F: PrimeField, + P: TEModelParameters, { /// Converts an embeded curve point into a constraint system Point /// without constraining the values - pub fn add_affine(&mut self, affine: GroupAffine

) -> Point { + pub fn add_affine(&mut self, affine: TEGroupAffine

) -> Point

{ Point::new(self.add_input(affine.x), self.add_input(affine.y)) } /// Converts an embeded curve point into a constraint system Point /// without constraining the values - pub fn add_public_affine(&mut self, affine: GroupAffine

) -> Point { + pub fn add_public_affine(&mut self, affine: TEGroupAffine

) -> Point

{ let point = self.add_affine(affine); - self.constrain_to_constant(point.x, E::Fr::zero(), Some(-affine.x)); - self.constrain_to_constant(point.y, E::Fr::zero(), Some(-affine.y)); + self.constrain_to_constant(point.x, F::zero(), Some(-affine.x)); + self.constrain_to_constant(point.y, F::zero(), Some(-affine.y)); point } @@ -95,8 +96,8 @@ where /// constrained witness value pub fn add_affine_to_circuit_description( &mut self, - affine: GroupAffine

, - ) -> Point { + affine: TEGroupAffine

, + ) -> Point

{ // NOTE: Not using individual gates because one of these may be zero. Point::new( self.add_witness_to_circuit_description(affine.x), @@ -108,24 +109,22 @@ where /// point. pub fn assert_equal_public_point( &mut self, - point: Point, - public_point: GroupAffine

, + point: Point

, + public_point: TEGroupAffine

, ) { - self.constrain_to_constant( - point.x, - E::Fr::zero(), - Some(-public_point.x), - ); - self.constrain_to_constant( - point.y, - E::Fr::zero(), - Some(-public_point.y), - ); + self.constrain_to_constant(point.x, F::zero(), Some(-public_point.x)); + self.constrain_to_constant(point.y, F::zero(), Some(-public_point.y)); } +} +impl StandardComposer +where + F: PrimeField, + P: TEModelParameters, +{ /// Asserts that a point in the circuit is equal to another point in the /// circuit. - pub fn assert_equal_point(&mut self, lhs: Point, rhs: Point) { + pub fn assert_equal_point(&mut self, lhs: Point

, rhs: Point

) { self.assert_equal(lhs.x, rhs.x); self.assert_equal(lhs.y, rhs.y); } @@ -145,10 +144,10 @@ where /// See: [`StandardComposer::boolean_gate`]. pub fn conditional_point_select( &mut self, - point_1: Point, - point_0: Point, + point_1: Point

, + point_0: Point

, bit: Variable, - ) -> Point { + ) -> Point

{ Point::new( self.conditional_select(bit, point_1.x, point_0.x), self.conditional_select(bit, point_1.y, point_0.y), @@ -166,16 +165,15 @@ where pub fn conditional_point_neg( &mut self, bit: Variable, - point_b: Point, - ) -> Point { + point_b: Point

, + ) -> Point

{ let zero = self.zero_var; let x = point_b.x; let y = point_b.y; // negation of point (x, y) is (-x, y) let x_neg = self.arithmetic_gate(|gate| { - gate.witness(x, zero, None) - .add(-E::Fr::one(), E::Fr::zero()) + gate.witness(x, zero, None).add(-F::one(), F::zero()) }); let x_updated = self.conditional_select(bit, x_neg, x); @@ -199,8 +197,8 @@ where fn conditional_select_identity( &mut self, bit: Variable, - point: Point, - ) -> Point { + point: Point

, + ) -> Point

{ Point::new( self.conditional_select_zero(bit, point.x), self.conditional_select_one(bit, point.y), @@ -211,24 +209,28 @@ where #[cfg(test)] mod test { use super::*; - use crate::{batch_test, constraint_system::helper::*}; + use crate::{ + batch_test, commitment::HomomorphicCommitment, + constraint_system::helper::*, + }; use ark_bls12_377::Bls12_377; use ark_bls12_381::Bls12_381; - fn test_conditional_select_point() + fn test_conditional_select_point() where - E: PairingEngine, - P: TEModelParameters, + F: PrimeField, + P: TEModelParameters, + PC: HomomorphicCommitment, { - let res = gadget_tester( - |composer: &mut StandardComposer| { - let bit_1 = composer.add_input(E::Fr::one()); + let res = gadget_tester::( + |composer: &mut StandardComposer| { + let bit_1 = composer.add_input(F::one()); let bit_0 = composer.zero_var(); - let point_a = Point::identity(composer); + let point_a = Point::

::identity(composer); let point_b = Point::new( - composer.add_input(E::Fr::from(10u64)), - composer.add_input(E::Fr::from(20u64)), + composer.add_input(F::from(10u64)), + composer.add_input(F::from(20u64)), ); let choice = @@ -245,18 +247,19 @@ mod test { assert!(res.is_ok()); } - fn test_conditional_point_neg() + fn test_conditional_point_neg() where - E: PairingEngine, - P: TEModelParameters, + F: PrimeField, + P: TEModelParameters, + PC: HomomorphicCommitment, { - gadget_tester( - |composer: &mut StandardComposer| { - let bit_1 = composer.add_input(E::Fr::one()); + gadget_tester::( + |composer: &mut StandardComposer| { + let bit_1 = composer.add_input(F::one()); let bit_0 = composer.zero_var(); let point = - GroupAffine::new(E::Fr::from(10u64), E::Fr::from(20u64)); + TEGroupAffine::

::new(F::from(10u64), F::from(20u64)); let point_var = Point::new( composer.add_input(point.x), composer.add_input(point.y), diff --git a/plonk-core/src/constraint_system/ecc/scalar_mul/fixed_base.rs b/plonk-core/src/constraint_system/ecc/scalar_mul/fixed_base.rs index e45cd878..8aafc0f9 100644 --- a/plonk-core/src/constraint_system/ecc/scalar_mul/fixed_base.rs +++ b/plonk-core/src/constraint_system/ecc/scalar_mul/fixed_base.rs @@ -6,23 +6,25 @@ //! Fixed-base Scalar Multiplication Gate -use crate::constraint_system::ecc::Point; -use crate::constraint_system::{variable::Variable, StandardComposer}; -use ark_ec::models::twisted_edwards_extended::{GroupAffine, GroupProjective}; -use ark_ec::models::TEModelParameters; -use ark_ec::{PairingEngine, ProjectiveCurve}; +use crate::constraint_system::{ + ecc::Point, variable::Variable, StandardComposer, +}; +use ark_ec::models::twisted_edwards_extended::{ + GroupAffine as TEGroupAffine, GroupProjective as TEGroupProjective, +}; +use ark_ec::{models::TEModelParameters, ProjectiveCurve}; use ark_ff::{BigInteger, FpParameters, PrimeField}; -use num_traits::{One, Zero}; +use num_traits::Zero; fn compute_wnaf_point_multiples

( - base_point: GroupProjective

, -) -> Vec> + base_point: TEGroupProjective

, +) -> Vec> where P: TEModelParameters, P::BaseField: PrimeField, { let mut multiples = vec![ - GroupProjective::

::default(); + TEGroupProjective::

::default(); ::Params::MODULUS_BITS as usize ]; @@ -33,10 +35,10 @@ where ProjectiveCurve::batch_normalization_into_affine(&multiples) } -impl StandardComposer +impl StandardComposer where - E: PairingEngine, - P: TEModelParameters, + F: PrimeField, + P: TEModelParameters, { /// Adds an elliptic curve scalar multiplication gate to the circuit /// description. @@ -49,10 +51,9 @@ where pub fn fixed_base_scalar_mul( &mut self, scalar: Variable, - base_point: GroupAffine

, - ) -> Point { - let num_bits = - ::Params::MODULUS_BITS as usize; + base_point: TEGroupAffine

, + ) -> Point

{ + let num_bits = ::Params::MODULUS_BITS as usize; // compute 2^iG let mut point_multiples = compute_wnaf_point_multiples(base_point.into()); @@ -68,17 +69,17 @@ where // Initialise the accumulators let mut scalar_acc = Vec::with_capacity(num_bits); - scalar_acc.push(E::Fr::zero()); + scalar_acc.push(F::zero()); let mut point_acc = Vec::with_capacity(num_bits); - point_acc.push(GroupAffine::

::zero()); + point_acc.push(TEGroupAffine::

::zero()); // Auxillary point to help with checks on the backend let mut xy_alphas = Vec::with_capacity(num_bits); let n_trailing_zeros = num_bits - wnaf_entries.len(); - scalar_acc.extend(vec![E::Fr::zero(); n_trailing_zeros]); - point_acc.extend(vec![GroupAffine::

::zero(); n_trailing_zeros]); - xy_alphas.extend(vec![E::Fr::zero(); n_trailing_zeros]); + scalar_acc.extend(vec![F::zero(); n_trailing_zeros]); + point_acc.extend(vec![TEGroupAffine::

::zero(); n_trailing_zeros]); + xy_alphas.extend(vec![F::zero(); n_trailing_zeros]); // Load values into accumulators based on wnaf entries for (i, entry) in wnaf_entries.iter().rev().enumerate() { @@ -87,14 +88,14 @@ where // Based on the WNAF, we decide what scalar and point to add let (scalar_to_add, point_to_add) = match entry { - 0 => { (E::Fr::zero(), GroupAffine::

::zero())}, - -1 => {(-E::Fr::one(), -point_multiples[index])}, - 1 => {(E::Fr::one(), point_multiples[index])}, + 0 => { (F::zero(), TEGroupAffine::

::zero())}, + -1 => {(-F::one(), -point_multiples[index])}, + 1 => {(F::one(), point_multiples[index])}, _ => unreachable!("Currently WNAF_2(k) is supported. The possible values are 1, -1 and 0. Current entry is {}", entry), }; - let prev_accumulator = E::Fr::from(2u64) * scalar_acc[index]; + let prev_accumulator = F::from(2u64) * scalar_acc[index]; scalar_acc.push(prev_accumulator + scalar_to_add); point_acc.push(point_acc[index] + point_to_add); @@ -113,13 +114,9 @@ where // We constrain the point accumulator to start from the Identity // point and the Scalar accumulator to start from zero if i == 0 { - self.constrain_to_constant(acc_x, E::Fr::zero(), None); - self.constrain_to_constant(acc_y, E::Fr::one(), None); - self.constrain_to_constant( - accumulated_bit, - E::Fr::zero(), - None, - ); + self.constrain_to_constant(acc_x, F::zero(), None); + self.constrain_to_constant(acc_y, F::one(), None); + self.constrain_to_constant(accumulated_bit, F::zero(), None); } let x_beta = point_multiples[i].x; @@ -129,7 +126,7 @@ where let xy_beta = x_beta * y_beta; - let wnaf_round = StandardComposer::::new_wnaf( + let wnaf_round = StandardComposer::::new_wnaf( acc_x, acc_y, accumulated_bit, @@ -151,8 +148,8 @@ where self.arithmetic_gate(|gate| { gate.witness(acc_x, acc_y, Some(xy_alpha)) - .fan_in_3(E::Fr::zero(), last_accumulated_bit) - .out(E::Fr::zero()) + .fan_in_3(F::zero(), last_accumulated_bit) + .out(F::zero()) }); // Constrain the last element in the accumulator to be equal to the @@ -166,20 +163,23 @@ where #[cfg(test)] mod tests { use super::*; - use crate::{batch_test, constraint_system::helper::*, util}; + use crate::{ + batch_test, commitment::HomomorphicCommitment, + constraint_system::helper::*, util, + }; use ark_bls12_377::Bls12_377; use ark_bls12_381::Bls12_381; use ark_ec::{group::Group, AffineCurve}; - use ark_ff::PrimeField; - fn test_ecc_constraint() + fn test_ecc_constraint() where - E: PairingEngine, - P: TEModelParameters, + F: PrimeField, + P: TEModelParameters, + PC: HomomorphicCommitment, { - let res = gadget_tester( - |composer: &mut StandardComposer| { - let scalar = E::Fr::from_le_bytes_mod_order(&[ + let res = gadget_tester::( + |composer: &mut StandardComposer| { + let scalar = F::from_le_bytes_mod_order(&[ 182, 44, 247, 214, 94, 14, 151, 208, 130, 16, 200, 204, 147, 32, 104, 166, 0, 59, 52, 1, 1, 59, 103, 6, 169, 175, 51, 101, 234, 180, 125, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, @@ -189,10 +189,10 @@ mod tests { let secret_scalar = composer.add_input(scalar); let (x, y) = P::AFFINE_GENERATOR_COEFFS; - let generator = GroupAffine::new(x, y); - let expected_point: GroupAffine

= AffineCurve::mul( + let generator = TEGroupAffine::new(x, y); + let expected_point: TEGroupAffine

= AffineCurve::mul( &generator, - util::to_embedded_curve_scalar::(scalar), + util::to_embedded_curve_scalar::(scalar), ) .into(); @@ -207,21 +207,22 @@ mod tests { assert!(res.is_ok()); } - fn test_ecc_constraint_zero() + fn test_ecc_constraint_zero() where - E: PairingEngine, - P: TEModelParameters, + F: PrimeField, + P: TEModelParameters, + PC: HomomorphicCommitment, { - let res = gadget_tester( - |composer: &mut StandardComposer| { - let scalar = E::Fr::zero(); + let res = gadget_tester::( + |composer: &mut StandardComposer| { + let scalar = F::zero(); let secret_scalar = composer.add_input(scalar); let (x, y) = P::AFFINE_GENERATOR_COEFFS; - let generator = GroupAffine::new(x, y); - let expected_point: GroupAffine

= AffineCurve::mul( + let generator = TEGroupAffine::new(x, y); + let expected_point = AffineCurve::mul( &generator, - util::to_embedded_curve_scalar::(scalar), + util::to_embedded_curve_scalar::(scalar), ) .into(); @@ -236,24 +237,25 @@ mod tests { assert!(res.is_ok()); } - fn test_ecc_constraint_should_fail() + fn test_ecc_constraint_should_fail() where - E: PairingEngine, - P: TEModelParameters, + F: PrimeField, + P: TEModelParameters, + PC: HomomorphicCommitment, { - let res = gadget_tester( - |composer: &mut StandardComposer| { - let scalar = E::Fr::from(100u64); + let res = gadget_tester::( + |composer: &mut StandardComposer| { + let scalar = F::from(100u64); let secret_scalar = composer.add_input(scalar); // Fails because we are not multiplying by the GENERATOR, it is // double let (x, y) = P::AFFINE_GENERATOR_COEFFS; - let generator = GroupAffine::new(x, y); + let generator = TEGroupAffine::new(x, y); let double_gen = generator.double(); - let expected_point: GroupAffine

= AffineCurve::mul( + let expected_point: TEGroupAffine

= AffineCurve::mul( &double_gen, - util::to_embedded_curve_scalar::(scalar), + util::to_embedded_curve_scalar::(scalar), ) .into(); @@ -269,15 +271,16 @@ mod tests { assert!(res.is_err()); } - fn test_point_addition() + fn test_point_addition() where - E: PairingEngine, - P: TEModelParameters, + F: PrimeField, + P: TEModelParameters, + PC: HomomorphicCommitment, { - let res = gadget_tester( - |composer: &mut StandardComposer| { + let res = gadget_tester::( + |composer: &mut StandardComposer| { let (x, y) = P::AFFINE_GENERATOR_COEFFS; - let generator = GroupAffine::new(x, y); + let generator = TEGroupAffine::new(x, y); let point_a = generator; let point_b = point_a.double(); @@ -289,10 +292,10 @@ mod tests { let var_point_a_x = composer.add_input(affine_point_a.x); let var_point_a_y = composer.add_input(affine_point_a.y); - let point_a = Point::::new(var_point_a_x, var_point_a_y); + let point_a = Point::

::new(var_point_a_x, var_point_a_y); let var_point_b_x = composer.add_input(affine_point_b.x); let var_point_b_y = composer.add_input(affine_point_b.y); - let point_b = Point::::new(var_point_b_x, var_point_b_y); + let point_b = Point::

::new(var_point_b_x, var_point_b_y); let new_point = composer.point_addition_gate(point_a, point_b); composer.assert_equal_public_point( @@ -306,42 +309,43 @@ mod tests { assert!(res.is_ok()); } - fn test_pedersen_hash() + fn test_pedersen_hash() where - E: PairingEngine, - P: TEModelParameters, + F: PrimeField, + P: TEModelParameters, + PC: HomomorphicCommitment, { - let res = gadget_tester( - |composer: &mut StandardComposer| { + let res = gadget_tester::( + |composer: &mut StandardComposer| { let (x, y) = P::AFFINE_GENERATOR_COEFFS; - let generator = GroupAffine::new(x, y); + let generator = TEGroupAffine::new(x, y); // First component - let scalar_a = E::Fr::from(112233u64); + let scalar_a = F::from(112233u64); let secret_scalar_a = composer.add_input(scalar_a); let point_a = generator; - let expected_component_a: GroupAffine

= AffineCurve::mul( + let expected_component_a: TEGroupAffine

= AffineCurve::mul( &point_a, - util::to_embedded_curve_scalar::(scalar_a), + util::to_embedded_curve_scalar::(scalar_a), ) .into(); // Second component - let scalar_b = E::Fr::from(445566u64); + let scalar_b = F::from(445566u64); let secret_scalar_b = composer.add_input(scalar_b); let point_b = point_a.double() + point_a; - let expected_component_b: GroupAffine

= AffineCurve::mul( + let expected_component_b: TEGroupAffine

= AffineCurve::mul( &point_b, - util::to_embedded_curve_scalar::(scalar_b), + util::to_embedded_curve_scalar::(scalar_b), ) .into(); // Expected pedersen hash - let expected_point: GroupAffine

= (AffineCurve::mul( + let expected_point = (AffineCurve::mul( &point_a, - util::to_embedded_curve_scalar::(scalar_a), + util::to_embedded_curve_scalar::(scalar_a), ) + AffineCurve::mul( &point_b, - util::to_embedded_curve_scalar::(scalar_b), + util::to_embedded_curve_scalar::(scalar_b), )) .into(); @@ -380,40 +384,41 @@ mod tests { assert!(res.is_ok()); } - fn test_pedersen_balance() + fn test_pedersen_balance() where - E: PairingEngine, - P: TEModelParameters, + F: PrimeField, + P: TEModelParameters, + PC: HomomorphicCommitment, { - let res = gadget_tester( - |composer: &mut StandardComposer| { + let res = gadget_tester::( + |composer: &mut StandardComposer| { let (x, y) = P::AFFINE_GENERATOR_COEFFS; - let generator = GroupAffine::new(x, y); + let generator = TEGroupAffine::new(x, y); // First component - let scalar_a = E::Fr::from(25u64); + let scalar_a = F::from(25u64); let secret_scalar_a = composer.add_input(scalar_a); // Second component - let scalar_b = E::Fr::from(30u64); + let scalar_b = F::from(30u64); let secret_scalar_b = composer.add_input(scalar_b); // Third component - let scalar_c = E::Fr::from(10u64); + let scalar_c = F::from(10u64); let secret_scalar_c = composer.add_input(scalar_c); // Fourth component - let scalar_d = E::Fr::from(45u64); + let scalar_d = F::from(45u64); let secret_scalar_d = composer.add_input(scalar_d); let (x, y) = P::AFFINE_GENERATOR_COEFFS; - let gen: GroupAffine

= GroupAffine::new(x, y); + let gen = TEGroupAffine::

::new(x, y); - let expected_lhs: GroupAffine

= AffineCurve::mul( + let expected_lhs: TEGroupAffine

= AffineCurve::mul( &gen, - util::to_embedded_curve_scalar::(scalar_a + scalar_b), + util::to_embedded_curve_scalar::(scalar_a + scalar_b), ) .into(); - let expected_rhs: GroupAffine

= AffineCurve::mul( + let expected_rhs: TEGroupAffine

= AffineCurve::mul( &gen, - util::to_embedded_curve_scalar::(scalar_c + scalar_d), + util::to_embedded_curve_scalar::(scalar_c + scalar_d), ) .into(); diff --git a/plonk-core/src/constraint_system/ecc/scalar_mul/variable_base.rs b/plonk-core/src/constraint_system/ecc/scalar_mul/variable_base.rs index ab958c5f..db19122d 100644 --- a/plonk-core/src/constraint_system/ecc/scalar_mul/variable_base.rs +++ b/plonk-core/src/constraint_system/ecc/scalar_mul/variable_base.rs @@ -6,17 +6,16 @@ //! Variable-base Scalar Multiplication Gate -use crate::constraint_system::ecc::Point; -use crate::constraint_system::{variable::Variable, StandardComposer}; -use ark_ec::models::TEModelParameters; -use ark_ec::PairingEngine; -use ark_ff::{BigInteger, Field, FpParameters, PrimeField}; -use num_traits::{One, Zero}; - -impl StandardComposer +use crate::constraint_system::{ + ecc::Point, variable::Variable, StandardComposer, +}; +use ark_ec::TEModelParameters; +use ark_ff::{BigInteger, FpParameters, PrimeField}; + +impl StandardComposer where - E: PairingEngine, - P: TEModelParameters, + F: PrimeField, + P: TEModelParameters, { /// Adds a variable-base scalar multiplication to the circuit description. /// @@ -28,8 +27,8 @@ where pub fn variable_base_scalar_mul( &mut self, curve_var: Variable, - point: Point, - ) -> Point { + point: Point

, + ) -> Point

{ // Turn scalar into bits let raw_scalar = *self .variables @@ -57,7 +56,7 @@ where fn scalar_decomposition( &mut self, witness_var: Variable, - witness_scalar: E::Fr, + witness_scalar: F, ) -> Vec { // Decompose the bits let scalar_bits_iter = witness_scalar.into_repr().to_bits_le(); @@ -65,30 +64,30 @@ where // Add all the bits into the composer let scalar_bits_var: Vec = scalar_bits_iter .iter() - .map(|bit| self.add_input(E::Fr::from(*bit as u64))) + .map(|bit| self.add_input(F::from(*bit as u64))) .collect(); // Take the first 252 bits let scalar_bits_var = scalar_bits_var - [..::Params::MODULUS_BITS as usize] + [..::Params::MODULUS_BITS as usize] .to_vec(); // Now ensure that the bits correctly accumulate to the witness given let mut accumulator_var = self.zero_var; - let mut accumulator_scalar = E::Fr::zero(); + let mut accumulator_scalar = F::zero(); for (power, bit) in scalar_bits_var.iter().enumerate() { self.boolean_gate(*bit); - let two_pow = E::Fr::from(2u64).pow([power as u64, 0, 0, 0]); + let two_pow = F::from(2u64).pow([power as u64, 0, 0, 0]); accumulator_var = self.arithmetic_gate(|gate| { gate.witness(*bit, accumulator_var, None) - .add(two_pow, E::Fr::one()) + .add(two_pow, F::one()) }); accumulator_scalar += - two_pow * E::Fr::from(scalar_bits_iter[power] as u64); + two_pow * F::from(scalar_bits_iter[power] as u64); } self.assert_equal(accumulator_var, witness_var); @@ -99,19 +98,26 @@ where #[cfg(test)] mod test { use super::*; - use crate::{batch_test, constraint_system::helper::*, util}; + use crate::{ + batch_test, commitment::HomomorphicCommitment, + constraint_system::helper::*, util, + }; use ark_bls12_377::Bls12_377; use ark_bls12_381::Bls12_381; - use ark_ec::{twisted_edwards_extended::GroupAffine, AffineCurve}; + use ark_ec::{ + twisted_edwards_extended::GroupAffine as TEGroupAffine, AffineCurve, + TEModelParameters, + }; - fn test_var_base_scalar_mul() + fn test_var_base_scalar_mul() where - E: PairingEngine, - P: TEModelParameters, + F: PrimeField, + P: TEModelParameters, + PC: HomomorphicCommitment, { - let res = gadget_tester( - |composer: &mut StandardComposer| { - let scalar = E::Fr::from_le_bytes_mod_order(&[ + let res = gadget_tester::( + |composer: &mut StandardComposer| { + let scalar = F::from_le_bytes_mod_order(&[ 182, 44, 247, 214, 94, 14, 151, 208, 130, 16, 200, 204, 147, 32, 104, 166, 0, 59, 52, 1, 1, 59, 103, 6, 169, 175, 51, 101, 234, 180, 125, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, @@ -121,11 +127,11 @@ mod test { let secret_scalar = composer.add_input(scalar); let (x, y) = P::AFFINE_GENERATOR_COEFFS; - let generator = GroupAffine::new(x, y); + let generator = TEGroupAffine::new(x, y); - let expected_point: GroupAffine

= AffineCurve::mul( + let expected_point: TEGroupAffine

= AffineCurve::mul( &generator, - util::to_embedded_curve_scalar::(scalar), + util::to_embedded_curve_scalar::(scalar), ) .into(); diff --git a/plonk-core/src/constraint_system/helper.rs b/plonk-core/src/constraint_system/helper.rs index 14eee33f..26323683 100644 --- a/plonk-core/src/constraint_system/helper.rs +++ b/plonk-core/src/constraint_system/helper.rs @@ -5,31 +5,30 @@ // Copyright (c) DUSK NETWORK. All rights reserved. use super::StandardComposer; -use crate::error::Error; -use crate::proof_system::{Prover, Verifier}; -use ark_ec::{PairingEngine, TEModelParameters}; -use ark_poly::univariate::DensePolynomial; -use ark_poly_commit::kzg10::{self, Powers, KZG10}; -use ark_poly_commit::sonic_pc::SonicKZG10; -use ark_poly_commit::PolynomialCommitment; -use num_traits::One; -use rand_core::OsRng; +use crate::{ + commitment::HomomorphicCommitment, + error::{to_pc_error, Error}, + proof_system::{Prover, Verifier}, +}; +use ark_ec::TEModelParameters; +use rand::rngs::OsRng; + +use ark_ff::PrimeField; /// Adds dummy constraints using arithmetic gates. #[allow(dead_code)] -pub(crate) fn dummy_gadget( +pub(crate) fn dummy_gadget( n: usize, - composer: &mut StandardComposer, + composer: &mut StandardComposer, ) where - E: PairingEngine, - P: TEModelParameters, + F: PrimeField, + P: TEModelParameters, { - let one = E::Fr::one(); + let one = F::one(); let var_one = composer.add_input(one); for _ in 0..n { composer.arithmetic_gate(|gate| { - gate.witness(var_one, var_one, None) - .add(E::Fr::one(), E::Fr::one()) + gate.witness(var_one, var_one, None).add(F::one(), F::one()) }); } } @@ -37,25 +36,27 @@ pub(crate) fn dummy_gadget( /// Takes a generic gadget function with no auxillary input and tests whether it /// passes an end-to-end test. #[allow(dead_code)] -pub(crate) fn gadget_tester( - gadget: fn(&mut StandardComposer), +pub(crate) fn gadget_tester( + gadget: fn(&mut StandardComposer), n: usize, -) -> Result<(), Error> +) -> Result, Error> where - E: PairingEngine, - P: TEModelParameters, + F: PrimeField, + P: TEModelParameters, + PC: HomomorphicCommitment, { // Common View - let universal_params = KZG10::>::setup( - // +1 per wire, +2 for the permutation poly - 2 * n + 6, - false, + let universal_params = PC::setup( + 2 * n + 6, // +1 per wire, +2 for the permutation poly + None, &mut OsRng, - )?; + ) + .map_err(to_pc_error::)?; + // Provers View let (proof, public_inputs) = { // Create a prover struct - let mut prover = Prover::new(b"demo"); + let mut prover = Prover::::new(b"demo"); // Additionally key the transcript prover.key_transcript(b"key", b"additional seed information"); @@ -64,27 +65,24 @@ where gadget(prover.mut_cs()); // Commit Key - let (ck, _) = SonicKZG10::>::trim( + let (ck, _) = PC::trim( &universal_params, // +1 per wire, +2 for the permutation poly prover.circuit_size().next_power_of_two() + 6, 0, None, ) - .unwrap(); - let powers = Powers { - powers_of_g: ck.powers_of_g.into(), - powers_of_gamma_g: ck.powers_of_gamma_g.into(), - }; + .map_err(to_pc_error::)?; + // Preprocess circuit - prover.preprocess(&powers)?; + prover.preprocess(&ck)?; // Once the prove method is called, the public inputs are cleared // So pre-fetch these before calling Prove let public_inputs = prover.cs.construct_dense_pi_vec(); // Compute Proof - (prover.prove(&powers)?, public_inputs) + (prover.prove(&ck)?, public_inputs) }; // Verifiers view // @@ -98,29 +96,18 @@ where gadget(verifier.mut_cs()); // Compute Commit and Verifier Key - let (sonic_ck, sonic_vk) = SonicKZG10::>::trim( + let (ck, vk) = PC::trim( &universal_params, verifier.circuit_size().next_power_of_two() + 6, 0, None, ) - .unwrap(); - let powers = Powers { - powers_of_g: sonic_ck.powers_of_g.into(), - powers_of_gamma_g: sonic_ck.powers_of_gamma_g.into(), - }; + .map_err(to_pc_error::)?; - let vk = kzg10::VerifierKey { - g: sonic_vk.g, - gamma_g: sonic_vk.gamma_g, - h: sonic_vk.h, - beta_h: sonic_vk.beta_h, - prepared_h: sonic_vk.prepared_h, - prepared_beta_h: sonic_vk.prepared_beta_h, - }; // Preprocess circuit - verifier.preprocess(&powers)?; + verifier.preprocess(&ck)?; // Verify proof - verifier.verify(&proof, &vk, &public_inputs) + verifier.verify(&proof, &vk, &public_inputs)?; + Ok(proof) } diff --git a/plonk-core/src/constraint_system/logic.rs b/plonk-core/src/constraint_system/logic.rs index a2df75e5..8d7fee0a 100644 --- a/plonk-core/src/constraint_system/logic.rs +++ b/plonk-core/src/constraint_system/logic.rs @@ -10,14 +10,13 @@ //! `AND` gate. use crate::constraint_system::{StandardComposer, Variable, WireData}; -use ark_ec::{PairingEngine, TEModelParameters}; +use ark_ec::TEModelParameters; use ark_ff::{BigInteger, PrimeField}; -use num_traits::{One, Zero}; -impl StandardComposer +impl StandardComposer where - E: PairingEngine, - P: TEModelParameters, + F: PrimeField, + P: TEModelParameters, { /// Performs a logical AND or XOR op between the inputs provided for the /// specified number of bits. @@ -48,9 +47,9 @@ where // representing both numbers. let num_quads = num_bits >> 1; // Allocate accumulators for gate construction. - let mut left_accumulator = E::Fr::zero(); - let mut right_accumulator = E::Fr::zero(); - let mut out_accumulator = E::Fr::zero(); + let mut left_accumulator = F::zero(); + let mut right_accumulator = F::zero(); + let mut out_accumulator = F::zero(); let mut left_quad: u8; let mut right_quad: u8; // Get vars as bits and reverse them to get the Little Endian repr. @@ -122,21 +121,21 @@ where let idx = i << 1; ((b_bits[idx] as u8) << 1) + (b_bits[idx + 1] as u8) }; - let left_quad_fr = E::Fr::from(left_quad as u64); - let right_quad_fr = E::Fr::from(right_quad as u64); + let left_quad_fr = F::from(left_quad as u64); + let right_quad_fr = F::from(right_quad as u64); // The `out_quad` is the result of the bitwise ops `&` or `^` // between the left and right quads. The op is decided // with a boolean flag set as input of the function. let out_quad_fr = match is_xor_gate { - true => E::Fr::from((left_quad ^ right_quad) as u64), - false => E::Fr::from((left_quad & right_quad) as u64), + true => F::from((left_quad ^ right_quad) as u64), + false => F::from((left_quad & right_quad) as u64), }; // We also need to allocate a helper item which is the result // of the product between the left and right quads. // This param is identified as `w` in the program memory and // is needed to prevent the degree of our quotient polynomial from // blowing up - let prod_quad_fr = E::Fr::from((left_quad * right_quad) as u64); + let prod_quad_fr = F::from((left_quad * right_quad) as u64); // Now that we've computed this round results, we need to apply the // logic transition constraint that will check the following: @@ -173,24 +172,24 @@ where // i === (bits/2 - j) // j = 0 // - left_accumulator *= E::Fr::from(4u64); + left_accumulator *= F::from(4u64); left_accumulator += left_quad_fr; - right_accumulator *= E::Fr::from(4u64); + right_accumulator *= F::from(4u64); right_accumulator += right_quad_fr; - out_accumulator *= E::Fr::from(4u64); + out_accumulator *= F::from(4u64); out_accumulator += out_quad_fr; // Apply logic transition constraints. assert!( - left_accumulator - (prev_left_accum * E::Fr::from(4u64)) - < E::Fr::from(4u64) + left_accumulator - (prev_left_accum * F::from(4u64)) + < F::from(4u64) ); assert!( - right_accumulator - (prev_right_accum * E::Fr::from(4u64)) - < E::Fr::from(4u64) + right_accumulator - (prev_right_accum * F::from(4u64)) + < F::from(4u64) ); assert!( - out_accumulator - (prev_out_accum * E::Fr::from(4u64)) - < E::Fr::from(4u64) + out_accumulator - (prev_out_accum * F::from(4u64)) + < F::from(4u64) ); // Get variables pointing to the previous accumulated values. @@ -239,39 +238,39 @@ where // Now we just need to extend the selector polynomials with the // appropriate coefficients to form complete logic gates. for _ in 0..num_quads { - self.q_m.push(E::Fr::zero()); - self.q_l.push(E::Fr::zero()); - self.q_r.push(E::Fr::zero()); - self.q_arith.push(E::Fr::zero()); - self.q_o.push(E::Fr::zero()); - self.q_4.push(E::Fr::zero()); - self.q_range.push(E::Fr::zero()); - self.q_fixed_group_add.push(E::Fr::zero()); - self.q_variable_group_add.push(E::Fr::zero()); + self.q_m.push(F::zero()); + self.q_l.push(F::zero()); + self.q_r.push(F::zero()); + self.q_arith.push(F::zero()); + self.q_o.push(F::zero()); + self.q_4.push(F::zero()); + self.q_range.push(F::zero()); + self.q_fixed_group_add.push(F::zero()); + self.q_variable_group_add.push(F::zero()); match is_xor_gate { true => { - self.q_c.push(-E::Fr::one()); - self.q_logic.push(-E::Fr::one()); + self.q_c.push(-F::one()); + self.q_logic.push(-F::one()); } false => { - self.q_c.push(E::Fr::one()); - self.q_logic.push(E::Fr::one()); + self.q_c.push(F::one()); + self.q_logic.push(F::one()); } }; } // For the last gate, `q_c` and `q_logic` we use no-op values (Zero). - self.q_m.push(E::Fr::zero()); - self.q_l.push(E::Fr::zero()); - self.q_r.push(E::Fr::zero()); - self.q_arith.push(E::Fr::zero()); - self.q_o.push(E::Fr::zero()); - self.q_4.push(E::Fr::zero()); - self.q_range.push(E::Fr::zero()); - self.q_fixed_group_add.push(E::Fr::zero()); - self.q_variable_group_add.push(E::Fr::zero()); + self.q_m.push(F::zero()); + self.q_l.push(F::zero()); + self.q_r.push(F::zero()); + self.q_arith.push(F::zero()); + self.q_o.push(F::zero()); + self.q_4.push(F::zero()); + self.q_range.push(F::zero()); + self.q_fixed_group_add.push(F::zero()); + self.q_variable_group_add.push(F::zero()); - self.q_c.push(E::Fr::zero()); - self.q_logic.push(E::Fr::zero()); + self.q_c.push(F::zero()); + self.q_logic.push(F::zero()); // Now we need to assert that the sum of accumulated values // matches the original values provided to the fn. @@ -293,15 +292,15 @@ where // assert_eq!( // self.variables[&a].into_repr() - // & (E::Fr::from(2u64).pow(&[(num_bits) as u64, 0, 0, 0]) - // - E::Fr::one()) + // & (F::from(2u64).pow(&[(num_bits) as u64, 0, 0, 0]) + // - F::one()) // .into_repr(), // self.variables[&self.w_l[self.n - 1]] // ); // assert_eq!( // self.variables[&b] - // & (E::Fr::from(2u64).pow(&[(num_bits) as u64, 0, 0, 0]) - // - E::Fr::one()), + // & (F::from(2u64).pow(&[(num_bits) as u64, 0, 0, 0]) + // - F::one()), // self.variables[&self.w_r[self.n - 1]] // ); @@ -346,27 +345,30 @@ where #[cfg(test)] mod test { - use super::*; - use crate::constraint_system::helper::*; - use crate::{batch_test, constraint_system::StandardComposer}; + use crate::{ + batch_test, commitment::HomomorphicCommitment, + constraint_system::helper::*, constraint_system::StandardComposer, + }; use ark_bls12_377::Bls12_377; use ark_bls12_381::Bls12_381; - - fn test_logic_xor_and_constraint() + use ark_ec::TEModelParameters; + use ark_ff::PrimeField; + fn test_logic_xor_and_constraint() where - E: PairingEngine, - P: TEModelParameters, + F: PrimeField, + P: TEModelParameters, + PC: HomomorphicCommitment, { // Should pass since the XOR result is correct and the bit-num is even. - let res = gadget_tester( - |composer: &mut StandardComposer| { - let witness_a = composer.add_input(E::Fr::from(500u64)); - let witness_b = composer.add_input(E::Fr::from(357u64)); + let res = gadget_tester::( + |composer: &mut StandardComposer| { + let witness_a = composer.add_input(F::from(500u64)); + let witness_b = composer.add_input(F::from(357u64)); let xor_res = composer.xor_gate(witness_a, witness_b, 10); // Check that the XOR result is indeed what we are expecting. composer.constrain_to_constant( xor_res, - E::Fr::from(500u64 ^ 357u64), + F::from(500u64 ^ 357u64), None, ); }, @@ -375,15 +377,15 @@ mod test { assert!(res.is_ok()); // Should pass since the AND result is correct even the bit-num is even. - let res = gadget_tester( - |composer: &mut StandardComposer| { - let witness_a = composer.add_input(E::Fr::from(469u64)); - let witness_b = composer.add_input(E::Fr::from(321u64)); + let res = gadget_tester::( + |composer: &mut StandardComposer| { + let witness_a = composer.add_input(F::from(469u64)); + let witness_b = composer.add_input(F::from(321u64)); let xor_res = composer.and_gate(witness_a, witness_b, 10); // Check that the AND result is indeed what we are expecting. composer.constrain_to_constant( xor_res, - E::Fr::from(469u64 & 321u64), + F::from(469u64 & 321u64), None, ); }, @@ -393,15 +395,15 @@ mod test { // Should not pass since the XOR result is not correct even the bit-num // is even. - let res = gadget_tester( - |composer: &mut StandardComposer| { - let witness_a = composer.add_input(E::Fr::from(139u64)); - let witness_b = composer.add_input(E::Fr::from(33u64)); + let res = gadget_tester::( + |composer: &mut StandardComposer| { + let witness_a = composer.add_input(F::from(139u64)); + let witness_b = composer.add_input(F::from(33u64)); let xor_res = composer.xor_gate(witness_a, witness_b, 10); // Check that the XOR result is indeed what we are expecting. composer.constrain_to_constant( xor_res, - E::Fr::from(139u64 & 33u64), + F::from(139u64 & 33u64), None, ); }, @@ -410,15 +412,15 @@ mod test { assert!(res.is_err()); // Should pass even the bitnum is less than the number bit-size - let res = gadget_tester( - |composer: &mut StandardComposer| { - let witness_a = composer.add_input(E::Fr::from(256u64)); - let witness_b = composer.add_input(E::Fr::from(235u64)); + let res = gadget_tester::( + |composer: &mut StandardComposer| { + let witness_a = composer.add_input(F::from(256u64)); + let witness_b = composer.add_input(F::from(235u64)); let xor_res = composer.xor_gate(witness_a, witness_b, 2); // Check that the XOR result is indeed what we are expecting. composer.constrain_to_constant( xor_res, - E::Fr::from(256u64 ^ 235u64), + F::from(256u64 ^ 235u64), None, ); }, @@ -427,23 +429,20 @@ mod test { assert!(res.is_err()); } - fn test_logical_gate_odd_bit_num() + fn test_logical_gate_odd_bit_num() where - E: PairingEngine, - P: TEModelParameters, + F: PrimeField, + P: TEModelParameters, + PC: HomomorphicCommitment, { // Should fail since the bit-num is odd. - let _ = gadget_tester( - |composer: &mut StandardComposer| { - let witness_a = composer.add_input(E::Fr::from(500u64)); - let witness_b = composer.add_input(E::Fr::from(499u64)); + let _ = gadget_tester::( + |composer: &mut StandardComposer| { + let witness_a = composer.add_input(F::from(500u64)); + let witness_b = composer.add_input(F::from(499u64)); let xor_res = composer.xor_gate(witness_a, witness_b, 9); // Check that the XOR result is indeed what we are expecting. - composer.constrain_to_constant( - xor_res, - E::Fr::from(7u64), - None, - ); + composer.constrain_to_constant(xor_res, F::from(7u64), None); }, 200, ); @@ -454,9 +453,7 @@ mod test { [test_logic_xor_and_constraint], [test_logical_gate_odd_bit_num] => ( - Bls12_381, - ark_ed_on_bls12_381::EdwardsParameters - ) + Bls12_381, ark_ed_on_bls12_381::EdwardsParameters ) ); // Test for Bls12_377 @@ -464,8 +461,6 @@ mod test { [test_logic_xor_and_constraint], [test_logical_gate_odd_bit_num] => ( - Bls12_377, - ark_ed_on_bls12_377::EdwardsParameters - ) + Bls12_377, ark_ed_on_bls12_377::EdwardsParameters ) ); } diff --git a/plonk-core/src/constraint_system/range.rs b/plonk-core/src/constraint_system/range.rs index b1b9d137..9a204bce 100644 --- a/plonk-core/src/constraint_system/range.rs +++ b/plonk-core/src/constraint_system/range.rs @@ -7,14 +7,13 @@ //! Range Gate use crate::constraint_system::{StandardComposer, Variable, WireData}; -use ark_ec::{PairingEngine, TEModelParameters}; +use ark_ec::TEModelParameters; use ark_ff::{BigInteger, PrimeField}; -use num_traits::{One, Zero}; -impl StandardComposer +impl StandardComposer where - E: PairingEngine, - P: TEModelParameters, + F: PrimeField, + P: TEModelParameters, { /// Adds a range-constraint gate that checks and constrains a /// [`Variable`] to be inside of the range \[0,num_bits\]. @@ -28,7 +27,7 @@ where pub fn range_gate(&mut self, witness: Variable, num_bits: usize) { // Adds `variable` into the appropriate witness position // based on the accumulator number a_i - let add_wire = |composer: &mut StandardComposer, + let add_wire = |composer: &mut StandardComposer, i: usize, variable: Variable| { // Since four quads can fit into one gate, the gate index does @@ -135,8 +134,8 @@ where // We collect the set of accumulators to return back to the user // and keep a running count of the current accumulator let mut accumulators: Vec = Vec::new(); - let mut accumulator = E::Fr::zero(); - let four = E::Fr::from(4u64); + let mut accumulator = F::zero(); + let four = F::from(4u64); // First we pad our gates by the necessary amount for i in 0..pad { @@ -152,7 +151,7 @@ where // Compute the next accumulator term accumulator = four * accumulator; - accumulator += E::Fr::from(quad); + accumulator += F::from(quad); let accumulator_var = self.add_input(accumulator); accumulators.push(accumulator_var); @@ -161,8 +160,8 @@ where } // Set the selector polynomials for all of the gates we used - let zeros = vec![E::Fr::zero(); used_gates]; - let ones = vec![E::Fr::one(); used_gates]; + let zeros = vec![F::zero(); used_gates]; + let ones = vec![F::one(); used_gates]; self.q_m.extend(zeros.iter()); self.q_l.extend(zeros.iter()); @@ -181,7 +180,7 @@ where // last gate Remember; it will contain one quad in the fourth // wire, which will be used in the gate before it // Furthermore, we set the left, right and output wires to zero - *self.q_range.last_mut().unwrap() = E::Fr::zero(); + *self.q_range.last_mut().unwrap() = F::zero(); self.w_l.push(self.zero_var); self.w_r.push(self.zero_var); self.w_o.push(self.zero_var); @@ -198,20 +197,25 @@ where #[cfg(test)] mod test { use super::*; - use crate::{batch_test, constraint_system::helper::*}; + use crate::{ + batch_test, commitment::HomomorphicCommitment, + constraint_system::helper::*, + }; use ark_bls12_377::Bls12_377; use ark_bls12_381::Bls12_381; - - fn test_range_constraint() + use ark_ec::models::TEModelParameters; + use ark_ff::PrimeField; + fn test_range_constraint() where - E: PairingEngine, - P: TEModelParameters, + F: PrimeField, + P: TEModelParameters, + PC: HomomorphicCommitment, { // Should fail as the number is not 32 bits - let res = gadget_tester( - |composer: &mut StandardComposer| { - let witness = composer - .add_input(E::Fr::from((u32::max_value() as u64) + 1)); + let res = gadget_tester::( + |composer: &mut StandardComposer| { + let witness = + composer.add_input(F::from((u32::max_value() as u64) + 1)); composer.range_gate(witness, 32); }, 200, @@ -219,9 +223,9 @@ mod test { assert!(res.is_err()); // Should fail as number is greater than 32 bits - let res = gadget_tester( - |composer: &mut StandardComposer| { - let witness = composer.add_input(E::Fr::from(u64::max_value())); + let res = gadget_tester::( + |composer: &mut StandardComposer| { + let witness = composer.add_input(F::from(u64::max_value())); composer.range_gate(witness, 32); }, 200, @@ -229,9 +233,9 @@ mod test { assert!(res.is_err()); // Should pass as the number is within 34 bits - let res = gadget_tester( - |composer: &mut StandardComposer| { - let witness = composer.add_input(E::Fr::from(2u64.pow(34) - 1)); + let res = gadget_tester::( + |composer: &mut StandardComposer| { + let witness = composer.add_input(F::from(2u64.pow(34) - 1)); composer.range_gate(witness, 34); }, 200, @@ -239,16 +243,17 @@ mod test { assert!(res.is_ok()); } - fn test_odd_bit_range() + fn test_odd_bit_range() where - E: PairingEngine, - P: TEModelParameters, + F: PrimeField, + P: TEModelParameters, + PC: HomomorphicCommitment, { // Should fail as the number we we need a even number of bits - let _ok = gadget_tester( - |composer: &mut StandardComposer| { + let _ok = gadget_tester::( + |composer: &mut StandardComposer| { let witness = - composer.add_input(E::Fr::from(u32::max_value() as u64)); + composer.add_input(F::from(u32::max_value() as u64)); composer.range_gate(witness, 33); }, 200, @@ -260,8 +265,7 @@ mod test { [test_range_constraint], [test_odd_bit_range] => ( - Bls12_381, - ark_ed_on_bls12_381::EdwardsParameters + Bls12_381, ark_ed_on_bls12_381::EdwardsParameters ) ); @@ -270,8 +274,7 @@ mod test { [test_range_constraint], [test_odd_bit_range] => ( - Bls12_377, - ark_ed_on_bls12_377::EdwardsParameters + Bls12_377, ark_ed_on_bls12_377::EdwardsParameters ) ); } diff --git a/plonk-core/src/error.rs b/plonk-core/src/error.rs index c1e2fa51..ccb3eee7 100644 --- a/plonk-core/src/error.rs +++ b/plonk-core/src/error.rs @@ -15,8 +15,8 @@ pub enum Error { InvalidEvalDomainSize { /// Log size of the group log_size_of_group: u32, - /// Two adacity generated - adacity: u32, + /// Two adicity generated + adicity: u32, }, // Prover/Verifier errors @@ -42,7 +42,7 @@ pub enum Error { /// Polynomial Commitment errors PCError { /// Polynomial Commitment errors - error: ark_poly_commit::error::Error, + error: String, }, // KZG10 errors @@ -78,7 +78,23 @@ pub enum Error { impl From for Error { fn from(error: ark_poly_commit::error::Error) -> Self { - Self::PCError { error } + Self::PCError { + error: format!("Polynomial Commitment Error: {:?}", error), + } + } +} + +/// Convert an ark_poly_commit error +pub fn to_pc_error(error: PC::Error) -> Error +where + F: ark_ff::Field, + PC: ark_poly_commit::PolynomialCommitment< + F, + ark_poly::univariate::DensePolynomial, + >, +{ + Error::PCError { + error: format!("Polynomial Commitment Error: {:?}", error), } } @@ -87,12 +103,12 @@ impl std::fmt::Display for Error { match self { Self::InvalidEvalDomainSize { log_size_of_group, - adacity, + adicity, } => write!( f, - "Log-size of the EvaluationDomain group > TWO_ADACITY\ - Size: {:?} > TWO_ADACITY = {:?}", - log_size_of_group, adacity + "Log-size of the EvaluationDomain group > TWO_ADICITY\ + Size: {:?} > TWO_ADICITY = {:?}", + log_size_of_group, adicity ), Self::ProofVerificationError => { write!(f, "proof verification failed") diff --git a/plonk-core/src/lib.rs b/plonk-core/src/lib.rs index 1a3822eb..602dd43b 100644 --- a/plonk-core/src/lib.rs +++ b/plonk-core/src/lib.rs @@ -40,6 +40,7 @@ mod transcript; mod util; pub mod circuit; +pub mod commitment; pub mod constraint_system; pub mod error; pub mod prelude; diff --git a/plonk-core/src/permutation/constants.rs b/plonk-core/src/permutation/constants.rs index 4e0a8248..038c84ae 100644 --- a/plonk-core/src/permutation/constants.rs +++ b/plonk-core/src/permutation/constants.rs @@ -9,16 +9,16 @@ #![allow(non_snake_case)] -use ark_ff::PrimeField; +use ark_ff::FftField; -pub(crate) fn K1() -> F { +pub(crate) fn K1() -> F { F::from(7_u64) } -pub(crate) fn K2() -> F { +pub(crate) fn K2() -> F { F::from(13_u64) } -pub(crate) fn K3() -> F { +pub(crate) fn K3() -> F { F::from(17_u64) } diff --git a/plonk-core/src/permutation/mod.rs b/plonk-core/src/permutation/mod.rs index d0ceb24e..0e970fe9 100644 --- a/plonk-core/src/permutation/mod.rs +++ b/plonk-core/src/permutation/mod.rs @@ -9,35 +9,28 @@ pub(crate) mod constants; use crate::constraint_system::{Variable, WireData}; -use ark_ff::PrimeField; -use ark_poly::domain::{EvaluationDomain, GeneralEvaluationDomain}; -use ark_poly::{univariate::DensePolynomial, UVPolynomial}; +use ark_ff::FftField; +use ark_poly::{ + domain::{EvaluationDomain, GeneralEvaluationDomain}, + univariate::DensePolynomial, + UVPolynomial, +}; use constants::*; -use core::marker::PhantomData; use hashbrown::HashMap; use itertools::izip; -use rand_core::RngCore; +use rand::RngCore; /// Permutation provides the necessary state information and functions /// to create the permutation polynomial. In the literature, Z(X) is the /// "accumulator", this is what this codebase calls the permutation polynomial. #[derive(derivative::Derivative)] #[derivative(Debug)] -pub(crate) struct Permutation -where - F: PrimeField, -{ +pub(crate) struct Permutation { /// Maps a variable to the wires that it is associated to. pub variable_map: HashMap>, - - /// Type Parameter Marker - __: PhantomData, } -impl Permutation -where - F: PrimeField, -{ +impl Permutation { /// Creates a Permutation struct with an expected capacity of zero. pub fn new() -> Self { Permutation::with_capacity(0) @@ -47,7 +40,6 @@ where pub fn with_capacity(expected_size: usize) -> Self { Self { variable_map: HashMap::with_capacity(expected_size), - __: PhantomData, } } @@ -105,7 +97,6 @@ where let vec_wire_data = self.variable_map.get_mut(&var).unwrap(); vec_wire_data.push(wire_data); } - /// Performs shift by one permutation and computes `sigma_1`, `sigma_2` and /// `sigma_3`, `sigma_4` permutations from the variable maps. pub(super) fn compute_sigma_permutations( @@ -144,8 +135,9 @@ where sigmas } - - fn compute_permutation_lagrange( +} +impl Permutation { + fn compute_permutation_lagrange( &self, sigma_mapping: &[WireData], domain: &GeneralEvaluationDomain, @@ -179,7 +171,7 @@ where /// Computes the sigma polynomials which are used to build the permutation /// polynomial. - pub fn compute_sigma_polynomials( + pub fn compute_sigma_polynomials( &mut self, n: usize, domain: &GeneralEvaluationDomain, @@ -222,7 +214,7 @@ where } #[allow(dead_code)] - fn compute_slow_permutation_poly( + fn compute_slow_permutation_poly( &self, domain: &GeneralEvaluationDomain, w_l: I, @@ -412,7 +404,7 @@ where } #[allow(dead_code)] - fn compute_fast_permutation_poly( + fn compute_fast_permutation_poly( &self, domain: &GeneralEvaluationDomain, w_l: &[F], @@ -635,11 +627,17 @@ where // These are the formulas for the irreducible factors used in the product // argument - fn numerator_irreducible(root: F, w: F, k: F, beta: F, gamma: F) -> F { + fn numerator_irreducible( + root: F, + w: F, + k: F, + beta: F, + gamma: F, + ) -> F { w + beta * k * root + gamma } - fn denominator_irreducible( + fn denominator_irreducible( _root: F, w: F, sigma: F, @@ -652,7 +650,7 @@ where // This can be adapted into a general product argument // for any number of wires, with specific formulas defined // in the numerator_irreducible and denominator_irreducible functions - pub fn compute_permutation_poly( + pub fn compute_permutation_poly( &self, domain: &GeneralEvaluationDomain, wires: (&[F], &[F], &[F], &[F]), @@ -758,43 +756,41 @@ where /// The `bls_12-381` library does not provide a `random` method for `F`. /// We wil use this helper function to compensate. #[allow(dead_code)] -pub(crate) fn random_scalar(rng: &mut R) -> F { +pub(crate) fn random_scalar(rng: &mut R) -> F { F::rand(rng) } #[cfg(test)] mod test { use super::*; - use crate::batch_test; + use crate::{batch_test_field, batch_test_field_params}; use crate::{ constraint_system::StandardComposer, util::EvaluationDomainExt, }; use ark_bls12_377::Bls12_377; use ark_bls12_381::Bls12_381; - use ark_ff::Field; - use ark_ff::UniformRand; + use ark_ec::TEModelParameters; + use ark_ff::{Field, PrimeField}; use ark_poly::univariate::DensePolynomial; use ark_poly::Polynomial; - use num_traits::{One, Zero}; - // use rand::{rngs::StdRng, SeedableRng}; - use ark_ec::{PairingEngine, TEModelParameters}; - use rand_core::OsRng; - - fn test_multizip_permutation_poly< - E: PairingEngine, - P: TEModelParameters, - >() { - let mut cs: StandardComposer = - StandardComposer::with_expected_size(4); - - let zero = E::Fr::zero(); - let one = E::Fr::one(); + use rand::rngs::OsRng; + + fn test_multizip_permutation_poly() + where + F: PrimeField, + P: TEModelParameters, + { + let mut cs: StandardComposer = + StandardComposer::::with_expected_size(4); + + let zero = F::zero(); + let one = F::one(); let two = one + one; - let x1 = cs.add_input(E::Fr::from(4u64)); - let x2 = cs.add_input(E::Fr::from(12u64)); - let x3 = cs.add_input(E::Fr::from(8u64)); - let x4 = cs.add_input(E::Fr::from(3u64)); + let x1 = cs.add_input(F::from(4u64)); + let x2 = cs.add_input(F::from(12u64)); + let x3 = cs.add_input(F::from(8u64)); + let x4 = cs.add_input(F::from(3u64)); // x1 * x4 = x2 cs.poly_gate(x1, x4, x2, one, zero, zero, -one, zero, None); @@ -809,15 +805,15 @@ mod test { cs.poly_gate(x3, x4, x2, one, zero, zero, -two, zero, None); let domain = - GeneralEvaluationDomain::::new(cs.circuit_size()).unwrap(); - let pad = vec![E::Fr::zero(); domain.size() - cs.w_l.len()]; - let mut w_l_scalar: Vec = + GeneralEvaluationDomain::::new(cs.circuit_size()).unwrap(); + let pad = vec![F::zero(); domain.size() - cs.w_l.len()]; + let mut w_l_scalar: Vec = cs.w_l.iter().map(|v| cs.variables[v]).collect(); - let mut w_r_scalar: Vec = + let mut w_r_scalar: Vec = cs.w_r.iter().map(|v| cs.variables[v]).collect(); - let mut w_o_scalar: Vec = + let mut w_o_scalar: Vec = cs.w_o.iter().map(|v| cs.variables[v]).collect(); - let mut w_4_scalar: Vec = + let mut w_4_scalar: Vec = cs.w_4.iter().map(|v| cs.variables[v]).collect(); w_l_scalar.extend(&pad); @@ -825,17 +821,17 @@ mod test { w_o_scalar.extend(&pad); w_4_scalar.extend(&pad); - let sigmas: Vec> = cs + let sigmas: Vec> = cs .perm .compute_sigma_permutations(7) .iter() .map(|wd| cs.perm.compute_permutation_lagrange(wd, &domain)) .collect(); - let beta = E::Fr::rand(&mut OsRng); - let gamma = E::Fr::rand(&mut OsRng); + let beta = F::rand(&mut OsRng); + let gamma = F::rand(&mut OsRng); - let sigma_polys: Vec> = sigmas + let sigma_polys: Vec> = sigmas .iter() .map(|v| DensePolynomial::from_coefficients_vec(domain.ifft(v))) .collect(); @@ -871,14 +867,13 @@ mod test { ), )); - assert!(mz == old_z); + assert_eq!(mz, old_z); } - fn test_permutation_format< - E: PairingEngine, - P: TEModelParameters, - >() { - let mut perm: Permutation = Permutation::new(); + #[test] + #[allow(non_snake_case)] + fn test_permutation_format() { + let mut perm: Permutation = Permutation::new(); let num_variables = 10u8; for i in 0..num_variables { @@ -909,11 +904,8 @@ mod test { } } - fn test_permutation_compute_sigmas_only_left_wires< - E: PairingEngine, - P: TEModelParameters, - >() { - let mut perm = Permutation::::new(); + fn test_permutation_compute_sigmas_only_left_wires() { + let mut perm = Permutation::new(); let var_zero = perm.new_variable(); let var_two = perm.new_variable(); @@ -979,7 +971,7 @@ mod test { assert_eq!(fourth_sigma[3], WireData::Fourth(0)); let domain = - GeneralEvaluationDomain::::new(num_wire_mappings).unwrap(); + GeneralEvaluationDomain::::new(num_wire_mappings).unwrap(); let w = domain.group_gen(); let w_squared = w.pow(&[2, 0, 0, 0]); let w_cubed = w.pow(&[3, 0, 0, 0]); @@ -989,10 +981,10 @@ mod test { // Should turn into {1 * K1, w^2, w^3, 1} let encoded_left_sigma = perm.compute_permutation_lagrange(left_sigma, &domain); - assert_eq!(encoded_left_sigma[0], E::Fr::one() * K1::()); + assert_eq!(encoded_left_sigma[0], F::one() * K1::()); assert_eq!(encoded_left_sigma[1], w_squared); assert_eq!(encoded_left_sigma[2], w_cubed); - assert_eq!(encoded_left_sigma[3], E::Fr::one()); + assert_eq!(encoded_left_sigma[3], F::one()); // Check the right sigmas have been encoded properly // Right_sigma = {L1, R1, R2, R3} @@ -1000,9 +992,9 @@ mod test { let encoded_right_sigma = perm.compute_permutation_lagrange(right_sigma, &domain); assert_eq!(encoded_right_sigma[0], w); - assert_eq!(encoded_right_sigma[1], w * K1::()); - assert_eq!(encoded_right_sigma[2], w_squared * K1::()); - assert_eq!(encoded_right_sigma[3], w_cubed * K1::()); + assert_eq!(encoded_right_sigma[1], w * K1::()); + assert_eq!(encoded_right_sigma[2], w_squared * K1::()); + assert_eq!(encoded_right_sigma[3], w_cubed * K1::()); // Check the output sigmas have been encoded properly // Out_sigma = {O0, O1, O2, O3} @@ -1010,31 +1002,26 @@ mod test { let encoded_output_sigma = perm.compute_permutation_lagrange(out_sigma, &domain); - assert_eq!(encoded_output_sigma[0], E::Fr::one() * K2::()); - assert_eq!(encoded_output_sigma[1], w * K2::()); - assert_eq!(encoded_output_sigma[2], w_squared * K2::()); - assert_eq!(encoded_output_sigma[3], w_cubed * K2::()); + assert_eq!(encoded_output_sigma[0], F::one() * K2::()); + assert_eq!(encoded_output_sigma[1], w * K2::()); + assert_eq!(encoded_output_sigma[2], w_squared * K2::()); + assert_eq!(encoded_output_sigma[3], w_cubed * K2::()); // Check the fourth sigmas have been encoded properly // Out_sigma = {F1, F2, F3, F0} // Should turn into {w * K3, w^2 * K3, w^3 * K3, 1 * K3} let encoded_fourth_sigma = perm.compute_permutation_lagrange(fourth_sigma, &domain); - assert_eq!(encoded_fourth_sigma[0], w * K3::()); - assert_eq!(encoded_fourth_sigma[1], w_squared * K3::()); - assert_eq!(encoded_fourth_sigma[2], w_cubed * K3::()); + assert_eq!(encoded_fourth_sigma[0], w * K3::()); + assert_eq!(encoded_fourth_sigma[1], w_squared * K3::()); + assert_eq!(encoded_fourth_sigma[2], w_cubed * K3::()); assert_eq!(encoded_fourth_sigma[3], K3()); - let w_l = vec![ - E::Fr::from(2u64), - E::Fr::from(2u64), - E::Fr::from(2u64), - E::Fr::from(2u64), - ]; - let w_r = - vec![E::Fr::from(2_u64), E::Fr::one(), E::Fr::one(), E::Fr::one()]; - let w_o = vec![E::Fr::one(), E::Fr::one(), E::Fr::one(), E::Fr::one()]; - let w_4 = vec![E::Fr::one(), E::Fr::one(), E::Fr::one(), E::Fr::one()]; + let w_l = + vec![F::from(2u64), F::from(2u64), F::from(2u64), F::from(2u64)]; + let w_r = vec![F::from(2_u64), F::one(), F::one(), F::one()]; + let w_o = vec![F::one(), F::one(), F::one(), F::one()]; + let w_4 = vec![F::one(), F::one(), F::one(), F::one()]; test_correct_permutation_poly( num_wire_mappings, @@ -1046,11 +1033,8 @@ mod test { w_4, ); } - fn test_permutation_compute_sigmas< - E: PairingEngine, - P: TEModelParameters, - >() { - let mut perm: Permutation = Permutation::new(); + fn test_permutation_compute_sigmas() { + let mut perm: Permutation = Permutation::new(); let var_one = perm.new_variable(); let var_two = perm.new_variable(); @@ -1118,7 +1102,7 @@ mod test { When encoded using w, K1, K2,K3 we have {w * K3, w^2 * K3, w^3 * K3, 1 * K3} */ let domain = - GeneralEvaluationDomain::::new(num_wire_mappings).unwrap(); + GeneralEvaluationDomain::::new(num_wire_mappings).unwrap(); let w = domain.group_gen(); let w_squared = w.pow(&[2, 0, 0, 0]); let w_cubed = w.pow(&[3, 0, 0, 0]); @@ -1126,43 +1110,40 @@ mod test { let encoded_left_sigma = perm.compute_permutation_lagrange(left_sigma, &domain); assert_eq!(encoded_left_sigma[0], K1()); - assert_eq!(encoded_left_sigma[1], w * K2::()); - assert_eq!(encoded_left_sigma[2], w_squared * K1::()); - assert_eq!(encoded_left_sigma[3], E::Fr::one() * K2::()); + assert_eq!(encoded_left_sigma[1], w * K2::()); + assert_eq!(encoded_left_sigma[2], w_squared * K1::()); + assert_eq!(encoded_left_sigma[3], F::one() * K2::()); // check the right sigmas have been encoded properly let encoded_right_sigma = perm.compute_permutation_lagrange(right_sigma, &domain); - assert_eq!(encoded_right_sigma[0], w * K1::()); - assert_eq!(encoded_right_sigma[1], w_squared * K2::()); - assert_eq!(encoded_right_sigma[2], w_cubed * K2::()); - assert_eq!(encoded_right_sigma[3], E::Fr::one()); + assert_eq!(encoded_right_sigma[0], w * K1::()); + assert_eq!(encoded_right_sigma[1], w_squared * K2::()); + assert_eq!(encoded_right_sigma[2], w_cubed * K2::()); + assert_eq!(encoded_right_sigma[3], F::one()); // check the output sigmas have been encoded properly let encoded_output_sigma = perm.compute_permutation_lagrange(out_sigma, &domain); assert_eq!(encoded_output_sigma[0], w); assert_eq!(encoded_output_sigma[1], w_cubed); - assert_eq!(encoded_output_sigma[2], w_cubed * K1::()); + assert_eq!(encoded_output_sigma[2], w_cubed * K1::()); assert_eq!(encoded_output_sigma[3], w_squared); // check the fourth sigmas have been encoded properly let encoded_fourth_sigma = perm.compute_permutation_lagrange(fourth_sigma, &domain); - assert_eq!(encoded_fourth_sigma[0], w * K3::()); - assert_eq!(encoded_fourth_sigma[1], w_squared * K3::()); - assert_eq!(encoded_fourth_sigma[2], w_cubed * K3::()); + assert_eq!(encoded_fourth_sigma[0], w * K3::()); + assert_eq!(encoded_fourth_sigma[1], w_squared * K3::()); + assert_eq!(encoded_fourth_sigma[2], w_cubed * K3::()); assert_eq!(encoded_fourth_sigma[3], K3()); } - fn test_basic_slow_permutation_poly< - E: PairingEngine, - P: TEModelParameters, - >() { + fn test_basic_slow_permutation_poly() { let num_wire_mappings = 2; let mut perm = Permutation::new(); let domain = - GeneralEvaluationDomain::::new(num_wire_mappings).unwrap(); + GeneralEvaluationDomain::::new(num_wire_mappings).unwrap(); let var_one = perm.new_variable(); let var_two = perm.new_variable(); @@ -1172,10 +1153,10 @@ mod test { perm.add_variables_to_map(var_one, var_two, var_three, var_four, 0); perm.add_variables_to_map(var_three, var_two, var_one, var_four, 1); - let w_l = vec![E::Fr::one(), E::Fr::from(3u64)]; - let w_r = vec![E::Fr::from(2u64), E::Fr::from(2u64)]; - let w_o = vec![E::Fr::from(3u64), E::Fr::one()]; - let w_4 = vec![E::Fr::one(), E::Fr::one()]; + let w_l = vec![F::one(), F::from(3u64)]; + let w_r = vec![F::from(2u64), F::from(2u64)]; + let w_o = vec![F::from(3u64), F::one()]; + let w_4 = vec![F::one(), F::one()]; test_correct_permutation_poly( num_wire_mappings, @@ -1189,16 +1170,16 @@ mod test { } // shifts the polynomials by one root of unity - fn shift_poly_by_one(z_coefficients: Vec) -> Vec { + fn shift_poly_by_one(z_coefficients: Vec) -> Vec { let mut shifted_z_coefficients = z_coefficients; shifted_z_coefficients.push(shifted_z_coefficients[0]); shifted_z_coefficients.remove(0); shifted_z_coefficients } - fn test_correct_permutation_poly( + fn test_correct_permutation_poly( n: usize, - mut perm: Permutation, + mut perm: Permutation, domain: &GeneralEvaluationDomain, w_l: Vec, w_r: Vec, @@ -1336,32 +1317,48 @@ mod test { } // Test on Bls12-381 - batch_test!( - [test_multizip_permutation_poly, - test_permutation_format, - test_permutation_compute_sigmas_only_left_wires, + batch_test_field!( + [test_permutation_compute_sigmas_only_left_wires, test_permutation_compute_sigmas, test_basic_slow_permutation_poly ], [] => ( - Bls12_381, - ark_ed_on_bls12_381::EdwardsParameters + Bls12_381 ) ); // Test on Bls12-377 - batch_test!( - [test_multizip_permutation_poly, - test_permutation_format, - test_permutation_compute_sigmas_only_left_wires, + batch_test_field!( + [test_permutation_compute_sigmas_only_left_wires, test_permutation_compute_sigmas, test_basic_slow_permutation_poly ], [] => ( - Bls12_377, - ark_ed_on_bls12_377::EdwardsParameters + Bls12_377 + ) + ); + + // Test on Bls12-381 + batch_test_field_params!( + [test_multizip_permutation_poly + ], + [] + => ( + Bls12_381, + ark_ed_on_bls12_381::EdwardsParameters + ) + ); + + // Test on Bls12-377 + batch_test_field_params!( + [test_multizip_permutation_poly + ], + [] + => ( + Bls12_377, + ark_ed_on_bls12_377::EdwardsParameters ) ); } diff --git a/plonk-core/src/prelude.rs b/plonk-core/src/prelude.rs index 74743970..71b24fc7 100644 --- a/plonk-core/src/prelude.rs +++ b/plonk-core/src/prelude.rs @@ -11,8 +11,8 @@ pub use crate::{ circuit::{ - self, verify_proof, Circuit, FeIntoPubInput, GeIntoPubInput, - PublicInputValue, VerifierData, + self, verify_proof, Circuit, GeIntoPubInput, PublicInputValue, + VerifierData, }, constraint_system::{ecc::Point, StandardComposer, Variable}, error::Error, diff --git a/plonk-core/src/proof_system/linearisation_poly.rs b/plonk-core/src/proof_system/linearisation_poly.rs index 926344b8..58b9e910 100644 --- a/plonk-core/src/proof_system/linearisation_poly.rs +++ b/plonk-core/src/proof_system/linearisation_poly.rs @@ -4,19 +4,22 @@ // // Copyright (c) DUSK NETWORK. All rights reserved. -use crate::proof_system::ecc::{CurveAddition, FixedBaseScalarMul}; -use crate::proof_system::logic::Logic; -use crate::proof_system::range::Range; -use crate::proof_system::widget::GateConstraint; -use crate::proof_system::GateValues; -use crate::proof_system::ProverKey; -use crate::util::EvaluationDomainExt; +use crate::{ + error::Error, + proof_system::{ + ecc::{CurveAddition, FixedBaseScalarMul}, + logic::Logic, + range::Range, + widget::GateConstraint, + GateValues, ProverKey, + }, + util::EvaluationDomainExt, +}; use ark_ec::TEModelParameters; -use ark_ff::Field; -use ark_ff::PrimeField; -use ark_poly::EvaluationDomain; +use ark_ff::{FftField, Field}; use ark_poly::{ - univariate::DensePolynomial, GeneralEvaluationDomain, Polynomial, + univariate::DensePolynomial, EvaluationDomain, GeneralEvaluationDomain, + Polynomial, }; use ark_serialize::{ CanonicalDeserialize, CanonicalSerialize, Read, SerializationError, Write, @@ -28,7 +31,7 @@ use ark_serialize::{ /// * w` where `w` is a root of unit. pub struct Evaluations where - F: PrimeField, + F: Field, { /// Proof-relevant Evaluations pub proof: ProofEvaluations, @@ -101,7 +104,7 @@ where /// Compute the linearisation polynomial. pub fn compute( domain: &GeneralEvaluationDomain, - prover_key: &ProverKey, + prover_key: &ProverKey, alpha: &F, beta: &F, gamma: &F, @@ -116,9 +119,9 @@ pub fn compute( w_4_poly: &DensePolynomial, t_x_poly: &DensePolynomial, z_poly: &DensePolynomial, -) -> (DensePolynomial, Evaluations) +) -> Result<(DensePolynomial, Evaluations), Error> where - F: PrimeField, + F: FftField, P: TEModelParameters, { let quot_eval = t_x_poly.evaluate(z_challenge); @@ -145,7 +148,7 @@ where let d_next_eval = w_4_poly.evaluate(&shifted_z_challenge); let permutation_eval = z_poly.evaluate(&shifted_z_challenge); - let gate_constraints = compute_gate_constraint_satisfiability( + let gate_constraints = compute_gate_constraint_satisfiability::( range_separation_challenge, logic_separation_challenge, fixed_base_separation_challenge, @@ -172,13 +175,13 @@ where (left_sigma_eval, right_sigma_eval, out_sigma_eval), permutation_eval, z_poly, - ); + )?; let linearisation_polynomial = gate_constraints + permutation; let linearisation_polynomial_eval = linearisation_polynomial.evaluate(z_challenge); - ( + Ok(( linearisation_polynomial, Evaluations { proof: ProofEvaluations { @@ -201,7 +204,7 @@ where }, quot_eval, }, - ) + )) } /// Computes the gate constraint satisfiability portion of the linearisation @@ -222,10 +225,10 @@ fn compute_gate_constraint_satisfiability( q_c_eval: F, q_l_eval: F, q_r_eval: F, - prover_key: &ProverKey, + prover_key: &ProverKey, ) -> DensePolynomial where - F: PrimeField, + F: FftField, P: TEModelParameters, { let values = GateValues { @@ -248,7 +251,6 @@ where d_eval, q_arith_eval, ); - let range = Range::linearisation_term( &prover_key.range_selector.0, *range_separation_challenge, @@ -261,13 +263,13 @@ where values, ); - let fixed_base_scalar_mul = FixedBaseScalarMul::<_, P>::linearisation_term( + let fixed_base_scalar_mul = FixedBaseScalarMul::::linearisation_term( &prover_key.fixed_group_add_selector.0, *fixed_base_separation_challenge, values, ); - let curve_addition = CurveAddition::<_, P>::linearisation_term( + let curve_addition = CurveAddition::::linearisation_term( &prover_key.variable_group_add_selector.0, *var_base_separation_challenge, values, diff --git a/plonk-core/src/proof_system/permutation.rs b/plonk-core/src/proof_system/permutation.rs index 7e2b8a94..c446f7a9 100644 --- a/plonk-core/src/proof_system/permutation.rs +++ b/plonk-core/src/proof_system/permutation.rs @@ -6,14 +6,17 @@ //! PLONK Permutation Prover and Verifier Data -use crate::permutation::constants::{K1, K2, K3}; -use crate::proof_system::linearisation_poly::ProofEvaluations; -use ark_ec::PairingEngine; -use ark_ff::Field; -use ark_ff::PrimeField; -use ark_poly::polynomial::univariate::DensePolynomial; -use ark_poly::{EvaluationDomain, Evaluations, GeneralEvaluationDomain}; -use ark_poly_commit::sonic_pc::Commitment; +use crate::{ + error::Error, + permutation::constants::{K1, K2, K3}, + proof_system::linearisation_poly::ProofEvaluations, +}; +use ark_ff::FftField; +use ark_poly::{ + polynomial::univariate::DensePolynomial, EvaluationDomain, Evaluations, + GeneralEvaluationDomain, +}; +use ark_poly_commit::PCCommitment; use ark_serialize::*; /// Permutation Prover Key @@ -26,7 +29,7 @@ use ark_serialize::*; )] pub struct ProverKey where - F: PrimeField, + F: FftField, { /// Left Permutation pub left_sigma: (DensePolynomial, Evaluations), @@ -52,7 +55,7 @@ where impl ProverKey where - F: PrimeField, + F: FftField, { /// Computes the quotient polynomial at the `i`th domain point. pub fn compute_quotient_i( @@ -158,7 +161,7 @@ where (sigma_1_eval, sigma_2_eval, sigma_3_eval): (F, F, F), z_eval: F, z_poly: &DensePolynomial, - ) -> DensePolynomial { + ) -> Result, Error> { let a = self.compute_lineariser_identity_range_check( (a_eval, b_eval, c_eval, d_eval), z_challenge, @@ -174,14 +177,18 @@ where (alpha, beta, gamma), &self.fourth_sigma.0, ); - let domain = GeneralEvaluationDomain::new(n).unwrap(); + let domain = GeneralEvaluationDomain::new(n).ok_or(Error::InvalidEvalDomainSize { + log_size_of_group: n.trailing_zeros(), + adicity: + <::FftParams as ark_ff::FftParameters>::TWO_ADICITY, + })?; let c = self.compute_lineariser_check_is_one( &domain, z_challenge, alpha.square(), z_poly, ); - &(&a + &b) + &c + Ok(&(&a + &b) + &c) } /// Computes the following: @@ -290,41 +297,41 @@ where #[derive(CanonicalDeserialize, CanonicalSerialize, derivative::Derivative)] #[derivative( Clone(bound = ""), - Debug(bound = ""), - Eq(bound = ""), - PartialEq(bound = "") + Debug(bound = "PCC: std::fmt::Debug"), + Eq(bound = "PCC: Eq"), + PartialEq(bound = "PCC: PartialEq") )] -pub struct VerifierKey +pub struct VerifierKey where - E: PairingEngine, + PCC: PCCommitment + Default, { /// Left Permutation Commitment - pub left_sigma: Commitment, + pub left_sigma: PCC, /// Right Permutation Commitment - pub right_sigma: Commitment, + pub right_sigma: PCC, /// Output Permutation Commitment - pub out_sigma: Commitment, + pub out_sigma: PCC, /// Fourth Permutation Commitment - pub fourth_sigma: Commitment, + pub fourth_sigma: PCC, } -impl VerifierKey +impl VerifierKey where - E: PairingEngine, + PCC: PCCommitment + Default, { /// Computes the linearisation commitments. - pub fn compute_linearisation_commitment( + pub fn compute_linearisation_commitment( &self, - scalars: &mut Vec, - points: &mut Vec, - evaluations: &ProofEvaluations, - z_challenge: E::Fr, - (alpha, beta, gamma): (E::Fr, E::Fr, E::Fr), - l1_eval: E::Fr, - z_comm: E::G1Affine, + scalars: &mut Vec, + points: &mut Vec, + evaluations: &ProofEvaluations, + z_challenge: F, + (alpha, beta, gamma): (F, F, F), + l1_eval: F, + z_comm: PCC, ) { let alpha_sq = alpha.square(); @@ -335,13 +342,13 @@ where let beta_z = beta * z_challenge; let q_0 = evaluations.a_eval + beta_z + gamma; - let beta_k1_z = beta * K1::() * z_challenge; + let beta_k1_z = beta * K1::() * z_challenge; let q_1 = evaluations.b_eval + beta_k1_z + gamma; - let beta_k2_z = beta * K2::() * z_challenge; + let beta_k2_z = beta * K2::() * z_challenge; let q_2 = evaluations.c_eval + beta_k2_z + gamma; - let beta_k3_z = beta * K3::() * z_challenge; + let beta_k3_z = beta * K3::() * z_challenge; let q_3 = (evaluations.d_eval + beta_k3_z + gamma) * alpha; q_0 * q_1 * q_2 * q_3 @@ -372,6 +379,6 @@ where }; scalars.push(y); - points.push(self.fourth_sigma.0); + points.push(self.fourth_sigma.clone()); } } diff --git a/plonk-core/src/proof_system/preprocess.rs b/plonk-core/src/proof_system/preprocess.rs index 1b652533..98e3fc26 100644 --- a/plonk-core/src/proof_system/preprocess.rs +++ b/plonk-core/src/proof_system/preprocess.rs @@ -6,16 +6,21 @@ //! Methods to preprocess the constraint system for use in a proof. -use crate::constraint_system::StandardComposer; -use crate::error::Error; -use crate::proof_system::{widget, ProverKey}; -use crate::transcript::TranscriptWrapper; -use ark_ec::{PairingEngine, TEModelParameters}; -use ark_ff::PrimeField; -use ark_poly::{polynomial::univariate::DensePolynomial, UVPolynomial}; -use ark_poly::{EvaluationDomain, Evaluations, GeneralEvaluationDomain}; -use ark_poly_commit::kzg10::{Powers, KZG10}; -use num_traits::{One, Zero}; +use crate::{ + commitment::HomomorphicCommitment, + constraint_system::StandardComposer, + error::{to_pc_error, Error}, + label_polynomial, + proof_system::{widget, ProverKey}, +}; +use ark_ec::TEModelParameters; +use ark_ff::{FftField, PrimeField}; +use ark_poly::{ + polynomial::univariate::DensePolynomial, EvaluationDomain, Evaluations, + GeneralEvaluationDomain, UVPolynomial, +}; +use core::marker::PhantomData; +use merlin::Transcript; /// Struct that contains all of the selector and permutation [`Polynomial`]s in /// PLONK. @@ -23,7 +28,7 @@ use num_traits::{One, Zero}; /// [`Polynomial`]: DensePolynomial pub struct SelectorPolynomials where - F: PrimeField, + F: FftField, { q_m: DensePolynomial, q_l: DensePolynomial, @@ -42,10 +47,10 @@ where fourth_sigma: DensePolynomial, } -impl StandardComposer +impl StandardComposer where - E: PairingEngine, - P: TEModelParameters, + F: PrimeField, + P: TEModelParameters, { /// Pads the circuit to the next power of two. /// @@ -53,7 +58,7 @@ where /// `diff` is the difference between circuit size and next power of two. fn pad(&mut self, diff: usize) { // Add a zero variable to circuit - let zero_scalar = E::Fr::zero(); + let zero_scalar = F::zero(); let zero_var = self.zero_var(); let zeroes_scalar = vec![zero_scalar; diff]; @@ -104,21 +109,34 @@ where Err(Error::MismatchedPolyLen) } } - +} +impl StandardComposer +where + F: PrimeField, + P: TEModelParameters, +{ /// These are the parts of preprocessing that the prover must compute /// Although the prover does not need the verification key, he must compute /// the commitments in order to seed the transcript, allowing both the /// prover and verifier to have the same view - pub fn preprocess_prover( + pub fn preprocess_prover( &mut self, - commit_key: &Powers, - transcript: &mut TranscriptWrapper, - ) -> Result, Error> { + commit_key: &PC::CommitterKey, + transcript: &mut Transcript, + _pc: PhantomData, + ) -> Result, Error> + where + PC: HomomorphicCommitment, + { let (_, selectors, domain) = - self.preprocess_shared(commit_key, transcript)?; + self.preprocess_shared(commit_key, transcript, _pc)?; let domain_8n = - GeneralEvaluationDomain::new(8 * domain.size()).unwrap(); + GeneralEvaluationDomain::new(8 * domain.size()).ok_or(Error::InvalidEvalDomainSize { + log_size_of_group: (8 * domain.size()).trailing_zeros(), + adicity: + <::FftParams as ark_ff::FftParameters>::TWO_ADICITY, + })?; let q_m_eval_8n = Evaluations::from_vec_and_domain( domain_8n.coset_fft(&selectors.q_m), domain_8n, @@ -182,7 +200,7 @@ where ); // XXX: Remove this and compute it on the fly let linear_eval_8n = Evaluations::from_vec_and_domain( - domain_8n.coset_fft(&[E::Fr::zero(), E::Fr::one()]), + domain_8n.coset_fft(&[F::zero(), F::one()]), domain_8n, ); @@ -214,14 +232,18 @@ where /// The verifier only requires the commitments in order to verify a /// [`Proof`](super::Proof) We can therefore speed up preprocessing for the - /// verifier by skipping the FFTs needed to compute the 8n evaluations. - pub fn preprocess_verifier( + /// verifier by skipping the FFTs needed to compute the 4n evaluations. + pub fn preprocess_verifier( &mut self, - commit_key: &Powers, - transcript: &mut TranscriptWrapper, - ) -> Result, Error> { + commit_key: &PC::CommitterKey, + transcript: &mut Transcript, + _pc: PhantomData, + ) -> Result, Error> + where + PC: HomomorphicCommitment, + { let (verifier_key, _, _) = - self.preprocess_shared(commit_key, transcript)?; + self.preprocess_shared(commit_key, transcript, _pc)?; Ok(verifier_key) } @@ -230,59 +252,66 @@ where /// polynomials in order to commit to them and have the same transcript /// view. #[allow(clippy::type_complexity)] // FIXME: Add struct for prover side (last two tuple items). - fn preprocess_shared( + fn preprocess_shared( &mut self, - commit_key: &Powers, - transcript: &mut TranscriptWrapper, + commit_key: &PC::CommitterKey, + transcript: &mut Transcript, + _pc: PhantomData, ) -> Result< ( - widget::VerifierKey, - SelectorPolynomials, - GeneralEvaluationDomain, + widget::VerifierKey, + SelectorPolynomials, + GeneralEvaluationDomain, ), Error, - > { - let domain = GeneralEvaluationDomain::new(self.circuit_size()).unwrap(); - + > + where + PC: HomomorphicCommitment, + { + let domain = GeneralEvaluationDomain::new(self.circuit_size()).ok_or(Error::InvalidEvalDomainSize { + log_size_of_group: (self.circuit_size()).trailing_zeros(), + adicity: + <::FftParams as ark_ff::FftParameters>::TWO_ADICITY, + })?; // Check that the length of the wires is consistent. self.check_poly_same_len()?; // 1. Pad circuit to a power of two self.pad(domain.size() as usize - self.n); - let q_m_poly: DensePolynomial = + let q_m_poly: DensePolynomial = DensePolynomial::from_coefficients_vec(domain.ifft(&self.q_m)); - let q_r_poly: DensePolynomial = + let q_r_poly: DensePolynomial = DensePolynomial::from_coefficients_vec(domain.ifft(&self.q_r)); - let q_l_poly: DensePolynomial = + let q_l_poly: DensePolynomial = DensePolynomial::from_coefficients_vec(domain.ifft(&self.q_l)); - let q_o_poly: DensePolynomial = + let q_o_poly: DensePolynomial = DensePolynomial::from_coefficients_vec(domain.ifft(&self.q_o)); - let q_c_poly: DensePolynomial = + let q_c_poly: DensePolynomial = DensePolynomial::from_coefficients_vec(domain.ifft(&self.q_c)); - let q_4_poly: DensePolynomial = + let q_4_poly: DensePolynomial = DensePolynomial::from_coefficients_vec(domain.ifft(&self.q_4)); - let q_arith_poly: DensePolynomial = + let q_arith_poly: DensePolynomial = DensePolynomial::from_coefficients_vec(domain.ifft(&self.q_arith)); - let q_range_poly: DensePolynomial = + let q_range_poly: DensePolynomial = DensePolynomial::from_coefficients_vec(domain.ifft(&self.q_range)); - let q_logic_poly: DensePolynomial = + let q_logic_poly: DensePolynomial = DensePolynomial::from_coefficients_vec(domain.ifft(&self.q_logic)); - let q_fixed_group_add_poly: DensePolynomial = + let q_fixed_group_add_poly: DensePolynomial = DensePolynomial::from_coefficients_vec( domain.ifft(&self.q_fixed_group_add), ); - let q_variable_group_add_poly: DensePolynomial = + let q_variable_group_add_poly: DensePolynomial = DensePolynomial::from_coefficients_vec( domain.ifft(&self.q_variable_group_add), ); @@ -295,115 +324,47 @@ where fourth_sigma_poly, ) = self.perm.compute_sigma_polynomials(self.n, &domain); - let q_m_poly_commit = KZG10::>::commit( - commit_key, &q_m_poly, None, None, - )?; - - let q_l_poly_commit = KZG10::>::commit( - commit_key, &q_l_poly, None, None, - )?; - - let q_r_poly_commit = KZG10::>::commit( - commit_key, &q_r_poly, None, None, - )?; - - let q_o_poly_commit = KZG10::>::commit( - commit_key, &q_o_poly, None, None, - )?; - - let q_c_poly_commit = KZG10::>::commit( - commit_key, &q_c_poly, None, None, - )?; - - let q_4_poly_commit = KZG10::>::commit( - commit_key, &q_4_poly, None, None, - )?; - - let q_arith_poly_commit = KZG10::>::commit( + let (commitments, _) = PC::commit( commit_key, - &q_arith_poly, - None, + [ + label_polynomial!(q_m_poly), + label_polynomial!(q_l_poly), + label_polynomial!(q_r_poly), + label_polynomial!(q_o_poly), + label_polynomial!(q_4_poly), + label_polynomial!(q_c_poly), + label_polynomial!(q_arith_poly), + label_polynomial!(q_range_poly), + label_polynomial!(q_logic_poly), + label_polynomial!(q_fixed_group_add_poly), + label_polynomial!(q_variable_group_add_poly), + label_polynomial!(left_sigma_poly), + label_polynomial!(right_sigma_poly), + label_polynomial!(out_sigma_poly), + label_polynomial!(fourth_sigma_poly), + ] + .iter(), None, - )?; - - let q_range_poly_commit = KZG10::>::commit( - commit_key, - &q_range_poly, - None, - None, - )?; - - let q_logic_poly_commit = KZG10::>::commit( - commit_key, - &q_logic_poly, - None, - None, - )?; - - let q_fixed_group_add_poly_commit = - KZG10::>::commit( - commit_key, - &q_fixed_group_add_poly, - None, - None, - )?; - - let q_variable_group_add_poly_commit = - KZG10::>::commit( - commit_key, - &q_variable_group_add_poly, - None, - None, - )?; - - let left_sigma_poly_commit = - KZG10::>::commit( - commit_key, - &left_sigma_poly, - None, - None, - )?; - - let right_sigma_poly_commit = - KZG10::>::commit( - commit_key, - &right_sigma_poly, - None, - None, - )?; - - let out_sigma_poly_commit = KZG10::>::commit( - commit_key, - &out_sigma_poly, - None, - None, - )?; - - let fourth_sigma_poly_commit = - KZG10::>::commit( - commit_key, - &fourth_sigma_poly, - None, - None, - )?; + ) + .map_err(to_pc_error::)?; let verifier_key = widget::VerifierKey::from_polynomial_commitments( self.circuit_size(), - q_m_poly_commit.0, - q_l_poly_commit.0, - q_r_poly_commit.0, - q_o_poly_commit.0, - q_4_poly_commit.0, - q_c_poly_commit.0, - q_arith_poly_commit.0, - q_range_poly_commit.0, - q_logic_poly_commit.0, - q_fixed_group_add_poly_commit.0, - q_variable_group_add_poly_commit.0, - left_sigma_poly_commit.0, - right_sigma_poly_commit.0, - out_sigma_poly_commit.0, - fourth_sigma_poly_commit.0, + commitments[0].commitment().clone(), // q_m_poly_commit.0, + commitments[1].commitment().clone(), // q_l_poly_commit.0, + commitments[2].commitment().clone(), // q_r_poly_commit.0, + commitments[3].commitment().clone(), // q_o_poly_commit.0, + commitments[4].commitment().clone(), // q_4_poly_commit.0, + commitments[5].commitment().clone(), // q_c_poly_commit.0, + commitments[6].commitment().clone(), // q_arith_poly_commit.0, + commitments[7].commitment().clone(), // q_range_poly_commit.0, + commitments[8].commitment().clone(), // q_logic_poly_commit.0, + commitments[9].commitment().clone(), /* q_fixed_group_add_poly_commit.0, */ + commitments[10].commitment().clone(), /* q_variable_group_add_poly_commit.0, */ + commitments[11].commitment().clone(), // left_sigma_poly_commit.0, + commitments[12].commitment().clone(), // right_sigma_poly_commit.0, + commitments[13].commitment().clone(), // out_sigma_poly_commit.0, + commitments[14].commitment().clone(), /* fourth_sigma_poly_commit.0, */ ); let selectors = SelectorPolynomials { @@ -439,7 +400,7 @@ pub fn compute_vanishing_poly_over_coset( poly_degree: u64, // degree of the vanishing polynomial ) -> Evaluations where - F: PrimeField, + F: FftField, D: EvaluationDomain, { assert!( @@ -462,18 +423,18 @@ where #[cfg(test)] mod test { use super::*; - use crate::{batch_test, constraint_system::helper::*}; + use crate::{batch_test_field_params, constraint_system::helper::*}; use ark_bls12_377::Bls12_377; use ark_bls12_381::Bls12_381; /// Tests that the circuit gets padded to the correct length. // FIXME: We can do this test without dummy_gadget method. - fn test_pad() + fn test_pad() where - E: PairingEngine, - P: TEModelParameters, + F: PrimeField, + P: TEModelParameters, { - let mut composer: StandardComposer = StandardComposer::new(); + let mut composer: StandardComposer = StandardComposer::new(); dummy_gadget(100, &mut composer); // Pad the circuit to next power of two @@ -482,23 +443,23 @@ mod test { let size = composer.n; assert!(size.is_power_of_two()); - assert!(composer.q_m.len() == size); - assert!(composer.q_l.len() == size); - assert!(composer.q_o.len() == size); - assert!(composer.q_r.len() == size); - assert!(composer.q_c.len() == size); - assert!(composer.q_arith.len() == size); - assert!(composer.q_range.len() == size); - assert!(composer.q_logic.len() == size); - assert!(composer.q_fixed_group_add.len() == size); - assert!(composer.q_variable_group_add.len() == size); - assert!(composer.w_l.len() == size); - assert!(composer.w_r.len() == size); - assert!(composer.w_o.len() == size); + assert_eq!(composer.q_m.len(), size); + assert_eq!(composer.q_l.len(), size); + assert_eq!(composer.q_o.len(), size); + assert_eq!(composer.q_r.len(), size); + assert_eq!(composer.q_c.len(), size); + assert_eq!(composer.q_arith.len(), size); + assert_eq!(composer.q_range.len(), size); + assert_eq!(composer.q_logic.len(), size); + assert_eq!(composer.q_fixed_group_add.len(), size); + assert_eq!(composer.q_variable_group_add.len(), size); + assert_eq!(composer.w_l.len(), size); + assert_eq!(composer.w_r.len(), size); + assert_eq!(composer.w_o.len(), size); } // Bls12-381 tests - batch_test!( + batch_test_field_params!( [test_pad], [] => ( Bls12_381, @@ -507,7 +468,7 @@ mod test { ); // Bls12-377 tests - batch_test!( + batch_test_field_params!( [test_pad], [] => ( Bls12_377, diff --git a/plonk-core/src/proof_system/proof.rs b/plonk-core/src/proof_system/proof.rs index e021b17f..5d9b67df 100644 --- a/plonk-core/src/proof_system/proof.rs +++ b/plonk-core/src/proof_system/proof.rs @@ -10,29 +10,28 @@ //! This module contains the implementation of the `StandardComposer`s //! `Proof` structure and it's methods. -use crate::proof_system::ecc::CurveAddition; -use crate::proof_system::ecc::FixedBaseScalarMul; -use crate::proof_system::linearisation_poly::ProofEvaluations; -use crate::proof_system::logic::Logic; -use crate::proof_system::range::Range; -use crate::proof_system::GateConstraint; -use crate::proof_system::VerifierKey as PlonkVerifierKey; -use crate::transcript::TranscriptProtocol; -use crate::util; -use crate::util::EvaluationDomainExt; -use crate::{error::Error, transcript::TranscriptWrapper}; -use ark_ec::{msm::VariableBaseMSM, AffineCurve, TEModelParameters}; -use ark_ec::{PairingEngine, ProjectiveCurve}; -use ark_ff::{fields::batch_inversion, Field, PrimeField}; -use ark_poly::univariate::DensePolynomial; +use crate::{ + commitment::HomomorphicCommitment, + error::Error, + label_commitment, + proof_system::{ + ecc::{CurveAddition, FixedBaseScalarMul}, + linearisation_poly::ProofEvaluations, + logic::Logic, + range::Range, + GateConstraint, VerifierKey as PlonkVerifierKey, + }, + transcript::TranscriptProtocol, + util::EvaluationDomainExt, +}; +use ark_ec::TEModelParameters; + +use ark_ff::{fields::batch_inversion, FftField, PrimeField}; use ark_poly::{EvaluationDomain, GeneralEvaluationDomain}; -use ark_poly_commit::kzg10; -use ark_poly_commit::kzg10::{Commitment, VerifierKey, KZG10}; use ark_serialize::{ CanonicalDeserialize, CanonicalSerialize, Read, SerializationError, Write, }; -use core::marker::PhantomData; -use rand_core::OsRng; +use merlin::Transcript; /// A Proof is a composition of `Commitment`s to the Witness, Permutation, /// Quotient, Shifted and Opening polynomials as well as the @@ -46,73 +45,77 @@ use rand_core::OsRng; /// construct the Proof. #[derive(CanonicalDeserialize, CanonicalSerialize, derivative::Derivative)] #[derivative( - Clone(bound = ""), - Debug(bound = ""), - Default(bound = ""), - Eq(bound = ""), - PartialEq(bound = "") + Clone(bound = "PC::Commitment: Clone, PC::Proof: Clone"), + Debug( + bound = "PC::Commitment: std::fmt::Debug, PC::Proof: std::fmt::Debug" + ), + Default(bound = "PC::Commitment: Default, PC::Proof: Default"), + Eq(bound = "PC::Commitment: Eq, PC::Proof: Eq"), + PartialEq(bound = "PC::Commitment: PartialEq, PC::Proof: PartialEq") )] -pub struct Proof +pub struct Proof where - E: PairingEngine, - P: TEModelParameters, + F: PrimeField, + PC: HomomorphicCommitment, { /// Commitment to the witness polynomial for the left wires. - pub(crate) a_comm: Commitment, + pub(crate) a_comm: PC::Commitment, /// Commitment to the witness polynomial for the right wires. - pub(crate) b_comm: Commitment, + pub(crate) b_comm: PC::Commitment, /// Commitment to the witness polynomial for the output wires. - pub(crate) c_comm: Commitment, + pub(crate) c_comm: PC::Commitment, /// Commitment to the witness polynomial for the fourth wires. - pub(crate) d_comm: Commitment, + pub(crate) d_comm: PC::Commitment, /// Commitment to the permutation polynomial. - pub(crate) z_comm: Commitment, + pub(crate) z_comm: PC::Commitment, /// Commitment to the quotient polynomial. - pub(crate) t_1_comm: Commitment, + pub(crate) t_1_comm: PC::Commitment, /// Commitment to the quotient polynomial. - pub(crate) t_2_comm: Commitment, + pub(crate) t_2_comm: PC::Commitment, /// Commitment to the quotient polynomial. - pub(crate) t_3_comm: Commitment, + pub(crate) t_3_comm: PC::Commitment, /// Commitment to the quotient polynomial. - pub(crate) t_4_comm: Commitment, + pub(crate) t_4_comm: PC::Commitment, - /// Commitment to the opening proof polynomial. - pub(crate) w_z_comm: Commitment, + /// Batch opening proof of the aggregated witnesses + pub aw_opening: PC::Proof, - /// Commitment to the shifted opening proof polynomial. - pub(crate) w_zw_comm: Commitment, + /// Batch opening proof of the shifted aggregated witnesses + pub saw_opening: PC::Proof, /// Subset of all of the evaluations added to the proof. - pub(crate) evaluations: ProofEvaluations, - - /// Type Parameter Marker - pub(crate) __: PhantomData

, + pub(crate) evaluations: ProofEvaluations, } -impl Proof +impl Proof where - E: PairingEngine, - P: TEModelParameters, + F: PrimeField, + PC: HomomorphicCommitment, { /// Performs the verification of a [`Proof`] returning a boolean result. - pub(crate) fn verify( + pub(crate) fn verify

( &self, - plonk_verifier_key: &PlonkVerifierKey, - transcript: &mut TranscriptWrapper, - verifier_key: &VerifierKey, - pub_inputs: &[E::Fr], - ) -> Result<(), Error> { + plonk_verifier_key: &PlonkVerifierKey, + transcript: &mut Transcript, + verifier_key: &PC::VerifierKey, + pub_inputs: &[F], + ) -> Result<(), Error> + where + P: TEModelParameters, + { let domain = - GeneralEvaluationDomain::::new(plonk_verifier_key.n) - .unwrap(); + GeneralEvaluationDomain::::new(plonk_verifier_key.n).ok_or(Error::InvalidEvalDomainSize { + log_size_of_group: plonk_verifier_key.n.trailing_zeros(), + adicity: <::FftParams as ark_ff::FftParameters>::TWO_ADICITY, + })?; // Subgroup checks are done when the proof is deserialised. @@ -124,20 +127,20 @@ where // same challenges // // Add commitment to witness polynomials to transcript - transcript.append_commitment(b"w_l", &self.a_comm); - transcript.append_commitment(b"w_r", &self.b_comm); - transcript.append_commitment(b"w_o", &self.c_comm); - transcript.append_commitment(b"w_4", &self.d_comm); + transcript.append(b"w_l", &self.a_comm); + transcript.append(b"w_r", &self.b_comm); + transcript.append(b"w_o", &self.c_comm); + transcript.append(b"w_4", &self.d_comm); // Compute beta and gamma challenges let beta = transcript.challenge_scalar(b"beta"); - transcript.append_scalar(b"beta", &beta); + transcript.append(b"beta", &beta); let gamma = transcript.challenge_scalar(b"gamma"); assert!(beta != gamma, "challenges must be different"); // Add commitment to permutation polynomial to transcript - transcript.append_commitment(b"z", &self.z_comm); + transcript.append(b"z", &self.z_comm); // Compute quotient challenge let alpha = transcript.challenge_scalar(b"alpha"); @@ -151,10 +154,10 @@ where transcript.challenge_scalar(b"variable base separation challenge"); // Add commitment to quotient polynomial to transcript - transcript.append_commitment(b"t_1", &self.t_1_comm); - transcript.append_commitment(b"t_2", &self.t_2_comm); - transcript.append_commitment(b"t_3", &self.t_3_comm); - transcript.append_commitment(b"t_4", &self.t_4_comm); + transcript.append(b"t_1", &self.t_1_comm); + transcript.append(b"t_2", &self.t_2_comm); + transcript.append(b"t_3", &self.t_3_comm); + transcript.append(b"t_4", &self.t_4_comm); // Compute evaluation point challenge let z_challenge = transcript.challenge_scalar(b"z"); @@ -186,36 +189,28 @@ where self.compute_quotient_commitment(&z_challenge, domain.size()); // Add evaluations to transcript - transcript.append_scalar(b"a_eval", &self.evaluations.a_eval); - transcript.append_scalar(b"b_eval", &self.evaluations.b_eval); - transcript.append_scalar(b"c_eval", &self.evaluations.c_eval); - transcript.append_scalar(b"d_eval", &self.evaluations.d_eval); - transcript.append_scalar(b"a_next_eval", &self.evaluations.a_next_eval); - transcript.append_scalar(b"b_next_eval", &self.evaluations.b_next_eval); - transcript.append_scalar(b"d_next_eval", &self.evaluations.d_next_eval); + transcript.append(b"a_eval", &self.evaluations.a_eval); + transcript.append(b"b_eval", &self.evaluations.b_eval); + transcript.append(b"c_eval", &self.evaluations.c_eval); + transcript.append(b"d_eval", &self.evaluations.d_eval); + transcript.append(b"a_next_eval", &self.evaluations.a_next_eval); + transcript.append(b"b_next_eval", &self.evaluations.b_next_eval); + transcript.append(b"d_next_eval", &self.evaluations.d_next_eval); + transcript.append(b"left_sig_eval", &self.evaluations.left_sigma_eval); transcript - .append_scalar(b"left_sig_eval", &self.evaluations.left_sigma_eval); - transcript.append_scalar( - b"right_sig_eval", - &self.evaluations.right_sigma_eval, - ); - transcript - .append_scalar(b"out_sig_eval", &self.evaluations.out_sigma_eval); - transcript - .append_scalar(b"q_arith_eval", &self.evaluations.q_arith_eval); - transcript.append_scalar(b"q_c_eval", &self.evaluations.q_c_eval); - transcript.append_scalar(b"q_l_eval", &self.evaluations.q_l_eval); - transcript.append_scalar(b"q_r_eval", &self.evaluations.q_r_eval); + .append(b"right_sig_eval", &self.evaluations.right_sigma_eval); + transcript.append(b"out_sig_eval", &self.evaluations.out_sigma_eval); + transcript.append(b"q_arith_eval", &self.evaluations.q_arith_eval); + transcript.append(b"q_c_eval", &self.evaluations.q_c_eval); + transcript.append(b"q_l_eval", &self.evaluations.q_l_eval); + transcript.append(b"q_r_eval", &self.evaluations.q_r_eval); + transcript.append(b"perm_eval", &self.evaluations.permutation_eval); + transcript.append(b"t_eval", &t_eval); transcript - .append_scalar(b"perm_eval", &self.evaluations.permutation_eval); - transcript.append_scalar(b"t_eval", &t_eval); - transcript.append_scalar( - b"r_eval", - &self.evaluations.linearisation_polynomial_eval, - ); + .append(b"r_eval", &self.evaluations.linearisation_polynomial_eval); // Compute linearisation commitment - let r_comm = self.compute_linearisation_commitment( + let lin_comm = self.compute_linearisation_commitment::

( alpha, beta, gamma, @@ -243,117 +238,92 @@ where // Reconstruct the Aggregated Proof commitments and evals // The proof consists of the witness commitment with no blinder - let (aggregate_proof_commitment, aggregate_proof_eval) = self - .gen_aggregate_proof( - t_eval, - t_comm, - r_comm, - plonk_verifier_key, - transcript, - ); - let aggregate_proof = kzg10::Proof { - w: self.w_z_comm.0, - random_v: None, - }; - // Reconstruct the Aggregated Shift Proof commitments and evals - // The proof consists of the witness commitment with no blinder - let (aggregate_shift_proof_commitment, aggregate_shift_proof_eval) = - self.gen_shift_aggregate_proof(transcript); - - let aggregate_shift_proof = kzg10::Proof { - w: self.w_zw_comm.0, - random_v: None, - }; - - // Add commitment to openings to transcript - transcript.append_commitment(b"w_z", &self.w_z_comm); - transcript.append_commitment(b"w_z_w", &self.w_zw_comm); - - let group_gen = domain.group_gen(); - - match KZG10::<_, DensePolynomial<_>>::batch_check( + // Compute aggregate witness to polynomials evaluated at the evaluation + // challenge `z` + let aw_challenge: F = transcript.challenge_scalar(b"aggregate_witness"); + + let aw_commits = [ + label_commitment!(t_comm), + label_commitment!(lin_comm), + label_commitment!(plonk_verifier_key.permutation.left_sigma), + label_commitment!(plonk_verifier_key.permutation.right_sigma), + label_commitment!(plonk_verifier_key.permutation.out_sigma), + label_commitment!(self.a_comm), + label_commitment!(self.b_comm), + label_commitment!(self.c_comm), + label_commitment!(self.d_comm), + ]; + + let aw_evals = [ + t_eval, + self.evaluations.linearisation_polynomial_eval, + self.evaluations.left_sigma_eval, + self.evaluations.right_sigma_eval, + self.evaluations.out_sigma_eval, + self.evaluations.a_eval, + self.evaluations.b_eval, + self.evaluations.c_eval, + self.evaluations.d_eval, + ]; + + let saw_challenge: F = + transcript.challenge_scalar(b"aggregate_witness"); + + let saw_commits = [ + label_commitment!(self.z_comm), + label_commitment!(self.a_comm), + label_commitment!(self.b_comm), + label_commitment!(self.d_comm), + ]; + let saw_evals = [ + self.evaluations.permutation_eval, + self.evaluations.a_next_eval, + self.evaluations.b_next_eval, + self.evaluations.d_next_eval, + ]; + match PC::check( verifier_key, - &[aggregate_proof_commitment, aggregate_shift_proof_commitment], - &[z_challenge, (z_challenge * group_gen)], - &[aggregate_proof_eval, aggregate_shift_proof_eval], - &[aggregate_proof, aggregate_shift_proof], - &mut OsRng, + &aw_commits, + &z_challenge, + aw_evals, + &self.aw_opening, + aw_challenge, + None, ) { Ok(true) => Ok(()), Ok(false) => Err(Error::ProofVerificationError), Err(e) => panic!("{:?}", e), } + .and_then(|_| { + match PC::check( + verifier_key, + &saw_commits, + &(z_challenge * domain.element(1)), + saw_evals, + &self.saw_opening, + saw_challenge, + None, + ) { + Ok(true) => Ok(()), + Ok(false) => Err(Error::ProofVerificationError), + Err(e) => panic!("{:?}", e), + } + }) } - - // TODO: Doc this - fn gen_aggregate_proof( - &self, - t_eval: E::Fr, - t_comm: Commitment, - r_comm: Commitment, - plonk_verifier_key: &PlonkVerifierKey, - transcript: &mut TranscriptWrapper, - ) -> (Commitment, E::Fr) { - let challenge = transcript.challenge_scalar(b"aggregate_witness"); - util::linear_combination( - &[ - t_eval, - self.evaluations.linearisation_polynomial_eval, - self.evaluations.a_eval, - self.evaluations.b_eval, - self.evaluations.c_eval, - self.evaluations.d_eval, - self.evaluations.left_sigma_eval, - self.evaluations.right_sigma_eval, - self.evaluations.out_sigma_eval, - ], - &[ - t_comm, - r_comm, - self.a_comm, - self.b_comm, - self.c_comm, - self.d_comm, - plonk_verifier_key.permutation.left_sigma, - plonk_verifier_key.permutation.right_sigma, - plonk_verifier_key.permutation.out_sigma, - ], - challenge, - ) - } - - // TODO: Doc this - fn gen_shift_aggregate_proof( - &self, - transcript: &mut TranscriptWrapper, - ) -> (Commitment, E::Fr) { - let challenge = transcript.challenge_scalar(b"aggregate_witness"); - util::linear_combination( - &[ - self.evaluations.permutation_eval, - self.evaluations.a_next_eval, - self.evaluations.b_next_eval, - self.evaluations.d_next_eval, - ], - &[self.z_comm, self.a_comm, self.b_comm, self.d_comm], - challenge, - ) - } - // TODO: Doc this fn compute_quotient_evaluation( &self, - domain: &GeneralEvaluationDomain, - pub_inputs: &[E::Fr], - alpha: E::Fr, - beta: E::Fr, - gamma: E::Fr, - z_challenge: E::Fr, - z_h_eval: E::Fr, - l1_eval: E::Fr, - z_hat_eval: E::Fr, - ) -> E::Fr { + domain: &GeneralEvaluationDomain, + pub_inputs: &[F], + alpha: F, + beta: F, + gamma: F, + z_challenge: F, + z_h_eval: F, + l1_eval: F, + z_hat_eval: F, + ) -> F { // Compute the public input polynomial evaluated at `z_challenge` let pi_eval = compute_barycentric_eval(pub_inputs, z_challenge, domain); @@ -384,38 +354,45 @@ where // Return t_eval (a - b - c) * z_h_eval.inverse().unwrap() } - /// Computes the quotient polynomial commitment at `z_challenge`. fn compute_quotient_commitment( &self, - z_challenge: &E::Fr, + z_challenge: &F, n: usize, - ) -> Commitment { + ) -> PC::Commitment { let n = n as u64; let z_n = z_challenge.pow(&[n, 0, 0, 0]); let z_two_n = z_challenge.pow(&[2 * n, 0, 0, 0]); let z_three_n = z_challenge.pow(&[3 * n, 0, 0, 0]); - let t_comm = self.t_1_comm.0.into_projective() - + self.t_2_comm.0.mul(z_n.into_repr()) - + self.t_3_comm.0.mul(z_two_n.into_repr()) - + self.t_4_comm.0.mul(z_three_n.into_repr()); - Commitment(t_comm.into_affine()) + + PC::multi_scalar_mul( + &[ + self.t_1_comm.clone(), + self.t_2_comm.clone(), + self.t_3_comm.clone(), + self.t_4_comm.clone(), + ], + &[F::one(), z_n, z_two_n, z_three_n], + ) } /// Computes the commitment to `[r]_1`. - fn compute_linearisation_commitment( + fn compute_linearisation_commitment

( &self, - alpha: E::Fr, - beta: E::Fr, - gamma: E::Fr, - range_sep_challenge: E::Fr, - logic_sep_challenge: E::Fr, - fixed_base_sep_challenge: E::Fr, - var_base_sep_challenge: E::Fr, - z_challenge: E::Fr, - l1_eval: E::Fr, - plonk_verifier_key: &PlonkVerifierKey, - ) -> Commitment { + alpha: F, + beta: F, + gamma: F, + range_sep_challenge: F, + logic_sep_challenge: F, + fixed_base_sep_challenge: F, + var_base_sep_challenge: F, + z_challenge: F, + l1_eval: F, + plonk_verifier_key: &PlonkVerifierKey, + ) -> PC::Commitment + where + P: TEModelParameters, + { let mut scalars = Vec::with_capacity(6); let mut points = Vec::with_capacity(6); @@ -426,39 +403,36 @@ where &mut points, &self.evaluations, ); - - Range::extend_linearisation_commitment::( - plonk_verifier_key.range_selector_commitment, + Range::extend_linearisation_commitment::( + &plonk_verifier_key.range_selector_commitment, range_sep_challenge, &self.evaluations, &mut scalars, &mut points, ); - Logic::extend_linearisation_commitment::( - plonk_verifier_key.logic_selector_commitment, + Logic::extend_linearisation_commitment::( + &plonk_verifier_key.logic_selector_commitment, logic_sep_challenge, &self.evaluations, &mut scalars, &mut points, ); - FixedBaseScalarMul::<_, P>::extend_linearisation_commitment::( - plonk_verifier_key.fixed_group_add_selector_commitment, + FixedBaseScalarMul::<_, P>::extend_linearisation_commitment::( + &plonk_verifier_key.fixed_group_add_selector_commitment, fixed_base_sep_challenge, &self.evaluations, &mut scalars, &mut points, ); - - CurveAddition::<_, P>::extend_linearisation_commitment::( - plonk_verifier_key.variable_group_add_selector_commitment, + CurveAddition::<_, P>::extend_linearisation_commitment::( + &plonk_verifier_key.variable_group_add_selector_commitment, var_base_sep_challenge, &self.evaluations, &mut scalars, &mut points, ); - plonk_verifier_key .permutation .compute_linearisation_commitment( @@ -468,15 +442,9 @@ where z_challenge, (alpha, beta, gamma), l1_eval, - self.z_comm.0, + self.z_comm.clone(), ); - - let scalars_repr = - scalars.iter().map(E::Fr::into_repr).collect::>(); - - Commitment( - VariableBaseMSM::multi_scalar_mul(&points, &scalars_repr).into(), - ) + PC::multi_scalar_mul(&points, &scalars) } } @@ -556,74 +524,45 @@ where #[cfg(test)] mod test { use super::*; - use crate::batch_test; + use crate::batch_test_kzg; use ark_bls12_377::Bls12_377; use ark_bls12_381::Bls12_381; - use ark_ff::UniformRand; - use rand_core::OsRng; - fn test_serde_proof() + fn test_serde_proof() where - E: PairingEngine, - P: TEModelParameters, + F: PrimeField, + P: TEModelParameters, + PC: HomomorphicCommitment, + Proof: std::fmt::Debug + PartialEq, { - let proof = Proof:: { - a_comm: Default::default(), - b_comm: Default::default(), - c_comm: Default::default(), - d_comm: Default::default(), - z_comm: Default::default(), - t_1_comm: Default::default(), - t_2_comm: Default::default(), - t_3_comm: Default::default(), - t_4_comm: Default::default(), - w_z_comm: Default::default(), - w_zw_comm: Default::default(), - evaluations: ProofEvaluations { - a_eval: E::Fr::rand(&mut OsRng), - b_eval: E::Fr::rand(&mut OsRng), - c_eval: E::Fr::rand(&mut OsRng), - d_eval: E::Fr::rand(&mut OsRng), - a_next_eval: E::Fr::rand(&mut OsRng), - b_next_eval: E::Fr::rand(&mut OsRng), - d_next_eval: E::Fr::rand(&mut OsRng), - q_arith_eval: E::Fr::rand(&mut OsRng), - q_c_eval: E::Fr::rand(&mut OsRng), - q_l_eval: E::Fr::rand(&mut OsRng), - q_r_eval: E::Fr::rand(&mut OsRng), - left_sigma_eval: E::Fr::rand(&mut OsRng), - right_sigma_eval: E::Fr::rand(&mut OsRng), - out_sigma_eval: E::Fr::rand(&mut OsRng), - linearisation_polynomial_eval: E::Fr::rand(&mut OsRng), - permutation_eval: E::Fr::rand(&mut OsRng), - }, - __: PhantomData, - }; + let proof = + crate::constraint_system::helper::gadget_tester::( + |_: &mut crate::constraint_system::StandardComposer| {}, + 200, + ) + .expect("Empty circuit failed"); let mut proof_bytes = vec![]; proof.serialize(&mut proof_bytes).unwrap(); let obtained_proof = - Proof::deserialize(proof_bytes.as_slice()).unwrap(); + Proof::::deserialize(proof_bytes.as_slice()).unwrap(); - assert!(proof == obtained_proof); + assert_eq!(proof, obtained_proof); } // Bls12-381 tests - batch_test!( + batch_test_kzg!( [test_serde_proof], [] => ( - Bls12_381, - ark_ed_on_bls12_381::EdwardsParameters + Bls12_381, ark_ed_on_bls12_381::EdwardsParameters ) ); - // Bls12-377 tests - batch_test!( + batch_test_kzg!( [test_serde_proof], [] => ( - Bls12_377, - ark_ed_on_bls12_377::EdwardsParameters + Bls12_377, ark_ed_on_bls12_377::EdwardsParameters ) ); } diff --git a/plonk-core/src/proof_system/prover.rs b/plonk-core/src/proof_system/prover.rs index f1d7c141..4433972f 100644 --- a/plonk-core/src/proof_system/prover.rs +++ b/plonk-core/src/proof_system/prover.rs @@ -7,57 +7,60 @@ //! Prover-side of the PLONK Proving System use crate::{ + commitment::HomomorphicCommitment, constraint_system::{StandardComposer, Variable}, - error::Error, + error::{to_pc_error, Error}, + label_polynomial, proof_system::{ linearisation_poly, proof::Proof, quotient_poly, ProverKey, }, - transcript::{TranscriptProtocol, TranscriptWrapper}, - util, + transcript::TranscriptProtocol, }; -use ark_ec::{PairingEngine, TEModelParameters}; -use ark_ff::{Field, UniformRand}; +use ark_ec::{ModelParameters, TEModelParameters}; +use ark_ff::PrimeField; use ark_poly::{ univariate::{DensePolynomial, SparsePolynomial}, EvaluationDomain, GeneralEvaluationDomain, UVPolynomial, }; -use ark_poly_commit::kzg10::{Powers, KZG10}; use core::marker::PhantomData; -use core::ops::Add; -use num_traits::{One, Zero}; -use rand_core::OsRng; +use merlin::Transcript; +use rand::rngs::OsRng; /// Abstraction structure designed to construct a circuit and generate /// [`Proof`]s for it. -pub struct Prover +pub struct Prover where - E: PairingEngine, - P: TEModelParameters, + F: PrimeField, + P: ModelParameters, + PC: HomomorphicCommitment, { /// Proving Key which is used to create proofs about a specific PLONK /// circuit. - pub prover_key: Option>, + pub prover_key: Option>, /// Circuit Description - pub(crate) cs: StandardComposer, + pub(crate) cs: StandardComposer, /// Store the messages exchanged during the preprocessing stage. /// /// This is copied each time, we make a proof. - pub preprocessed_transcript: TranscriptWrapper, -} + pub preprocessed_transcript: Transcript, -impl Prover + _phantom: PhantomData, +} +impl Prover where - E: PairingEngine, - P: TEModelParameters, + F: PrimeField, + P: TEModelParameters, + PC: HomomorphicCommitment, { /// Creates a new `Prover` instance. pub fn new(label: &'static [u8]) -> Self { Self { prover_key: None, cs: StandardComposer::new(), - preprocessed_transcript: TranscriptWrapper::new(label), + preprocessed_transcript: Transcript::new(label), + _phantom: PhantomData::, } } @@ -66,12 +69,13 @@ where Self { prover_key: None, cs: StandardComposer::with_expected_size(size), - preprocessed_transcript: TranscriptWrapper::new(label), + preprocessed_transcript: Transcript::new(label), + _phantom: PhantomData::, } } /// Returns a mutable copy of the underlying [`StandardComposer`]. - pub fn mut_cs(&mut self) -> &mut StandardComposer { + pub fn mut_cs(&mut self) -> &mut StandardComposer { &mut self.cs } @@ -82,13 +86,18 @@ where } /// Preprocesses the underlying constraint system. - pub fn preprocess(&mut self, commit_key: &Powers) -> Result<(), Error> { + pub fn preprocess( + &mut self, + commit_key: &PC::CommitterKey, + ) -> Result<(), Error> { if self.prover_key.is_some() { return Err(Error::CircuitAlreadyPreprocessed); } - let pk = self - .cs - .preprocess_prover(commit_key, &mut self.preprocessed_transcript)?; + let pk = self.cs.preprocess_prover( + commit_key, + &mut self.preprocessed_transcript, + PhantomData::, + )?; self.prover_key = Some(pk); Ok(()) } @@ -99,12 +108,12 @@ where fn split_tx_poly( &self, n: usize, - t_x: &DensePolynomial, + t_x: &DensePolynomial, ) -> ( - DensePolynomial, - DensePolynomial, - DensePolynomial, - DensePolynomial, + DensePolynomial, + DensePolynomial, + DensePolynomial, + DensePolynomial, ) { ( DensePolynomial::from_coefficients_vec(t_x[0..n].to_vec()), @@ -117,12 +126,12 @@ where /// Computes the quotient Opening [`DensePolynomial`]. fn compute_quotient_opening_poly( n: usize, - t_1_poly: &DensePolynomial, - t_2_poly: &DensePolynomial, - t_3_poly: &DensePolynomial, - t_4_poly: &DensePolynomial, - z_challenge: &E::Fr, - ) -> DensePolynomial { + t_1_poly: &DensePolynomial, + t_2_poly: &DensePolynomial, + t_3_poly: &DensePolynomial, + t_4_poly: &DensePolynomial, + z_challenge: &F, + ) -> DensePolynomial { // Compute z^n , z^2n , z^3n let z_n = z_challenge.pow(&[n as u64, 0, 0, 0]); let z_two_n = z_challenge.pow(&[2 * n as u64, 0, 0, 0]); @@ -135,7 +144,7 @@ where } /// Convert variables to their actual witness values. - fn to_scalars(&self, vars: &[Variable]) -> Vec { + fn to_scalars(&self, vars: &[Variable]) -> Vec { vars.iter().map(|var| self.cs.variables[var]).collect() } @@ -154,7 +163,7 @@ where pub fn clear(&mut self) { self.clear_witness(); self.prover_key = None; - self.preprocessed_transcript = TranscriptWrapper::new(b"plonk"); + self.preprocessed_transcript = Transcript::new(b"plonk"); } /// Keys the [`Transcript`] with additional seed information @@ -163,27 +172,7 @@ where /// [`Transcript`]: merlin::Transcript /// [`Transcript::append_message`]: merlin::Transcript::append_message pub fn key_transcript(&mut self, label: &'static [u8], message: &[u8]) { - self.preprocessed_transcript - .transcript - .append_message(label, message); - } - - /// Computes a single witness for multiple polynomials at the same point, by - /// taking a random linear combination of the individual witnesses. - /// - /// The result does not depend on `z`, thus we can remove the term `f(z)`. - fn compute_aggregate_witness( - polynomials: &[DensePolynomial], - point: &E::Fr, - challenge: E::Fr, - ) -> DensePolynomial { - util::ruffini( - util::powers_of(challenge) - .zip(polynomials) - .map(|(challenge, poly)| poly * challenge) - .fold(Zero::zero(), Add::add), - *point, - ) + self.preprocessed_transcript.append_message(label, message); } /// Adds to a given polynomial a blinder term of the form: @@ -191,34 +180,33 @@ where /// where k is the hiding_degree and Z_h = X^n - 1, the vanishing /// polynomial. fn add_blinder( - polynomial: &DensePolynomial, + polynomial: &DensePolynomial, n: usize, hiding_degree: usize, - ) -> DensePolynomial { + ) -> DensePolynomial { if hiding_degree < n / 2 { - let z_h: DensePolynomial = + let z_h: DensePolynomial = SparsePolynomial::from_coefficients_slice(&[ - (0, -E::Fr::one()), - (n, E::Fr::one()), + (0, -F::one()), + (n, F::one()), ]) .into(); - let rand_poly = DensePolynomial::from_coefficients_vec(vec![ - E::Fr::rand( - &mut OsRng - ); + let rand_poly = + DensePolynomial::from_coefficients_vec(vec![ + F::rand(&mut OsRng); hiding_degree + 1 ]); let blinder_poly = &rand_poly * &z_h; polynomial + &blinder_poly } else { let mut sparse_blinder_vec = - vec![(0, E::Fr::zero()); 2 * (hiding_degree + 1)]; + vec![(0, F::zero()); 2 * (hiding_degree + 1)]; // Computes the multiplication of (b0 + b1X + ..+ bk X^k) (X^n -1) // = (- b0 -b1 X ... -bk X^k ..., b0 X^n + b1 X^(n+1) + ... bk // X^(n+k) as long as k< n/2 for i in 0..=hiding_degree { - let random_blinder = E::Fr::rand(&mut OsRng); + let random_blinder = F::rand(&mut OsRng); sparse_blinder_vec[i] = (i, -random_blinder); sparse_blinder_vec[hiding_degree + 1 + i] = (n + i, random_blinder); @@ -240,11 +228,15 @@ where /// This is automatically done when [`Prover::prove`] is called. pub fn prove_with_preprocessed( &self, - commit_key: &Powers, - prover_key: &ProverKey, - ) -> Result, Error> { + commit_key: &PC::CommitterKey, + prover_key: &ProverKey, + _data: PhantomData, + ) -> Result, Error> { let domain = - GeneralEvaluationDomain::new(self.cs.circuit_size()).unwrap(); + GeneralEvaluationDomain::new(self.cs.circuit_size()).ok_or(Error::InvalidEvalDomainSize { + log_size_of_group: self.cs.circuit_size().trailing_zeros(), + adicity: <::FftParams as ark_ff::FftParameters>::TWO_ADICITY, + })?; let n = domain.size(); // Since the caller is passing a pre-processed circuit @@ -256,7 +248,7 @@ where // // Convert Variables to scalars padding them to the // correct domain size. - let pad = vec![E::Fr::zero(); n - self.cs.w_l.len()]; + let pad = vec![F::zero(); n - self.cs.w_l.len()]; let w_l_scalar = &[&self.to_scalars(&self.cs.w_l)[..], &pad].concat(); let w_r_scalar = &[&self.to_scalars(&self.cs.w_r)[..], &pad].concat(); let w_o_scalar = &[&self.to_scalars(&self.cs.w_o)[..], &pad].concat(); @@ -278,24 +270,29 @@ where w_r_poly = Self::add_blinder(&w_r_poly, n, 1); w_o_poly = Self::add_blinder(&w_o_poly, n, 1); w_4_poly = Self::add_blinder(&w_4_poly, n, 1); + let w_polys = [ + label_polynomial!(w_l_poly), + label_polynomial!(w_r_poly), + label_polynomial!(w_o_poly), + label_polynomial!(w_4_poly), + ]; // Commit to witness polynomials. - let w_l_poly_commit = KZG10::commit(commit_key, &w_l_poly, None, None)?; - let w_r_poly_commit = KZG10::commit(commit_key, &w_r_poly, None, None)?; - let w_o_poly_commit = KZG10::commit(commit_key, &w_o_poly, None, None)?; - let w_4_poly_commit = KZG10::commit(commit_key, &w_4_poly, None, None)?; + let (w_commits, w_rands) = PC::commit(commit_key, w_polys.iter(), None) + .map_err(to_pc_error::)?; // Add witness polynomial commitments to transcript. - transcript.append_commitment(b"w_l", &w_l_poly_commit.0); - transcript.append_commitment(b"w_r", &w_r_poly_commit.0); - transcript.append_commitment(b"w_o", &w_o_poly_commit.0); - transcript.append_commitment(b"w_4", &w_4_poly_commit.0); + //transcript.append_commitments(&*w_commits, PhantomData::); + transcript.append(b"w_l", w_commits[0].commitment()); + transcript.append(b"w_r", w_commits[1].commitment()); + transcript.append(b"w_o", w_commits[2].commitment()); + transcript.append(b"w_4", w_commits[3].commitment()); // 2. Compute permutation polynomial // // Compute permutation challenges; `beta` and `gamma`. let beta = transcript.challenge_scalar(b"beta"); - transcript.append_scalar(b"beta", &beta); + transcript.append(b"beta", &beta); let gamma = transcript.challenge_scalar(b"gamma"); assert!(beta != gamma, "challenges must be different"); @@ -319,12 +316,12 @@ where z_poly = Self::add_blinder(&z_poly, n, 2); // Commit to permutation polynomial. - let z_poly_commit = KZG10::>::commit( - commit_key, &z_poly, None, None, - )?; + let (z_poly_commit, _) = + PC::commit(commit_key, &[label_polynomial!(z_poly)], None) + .map_err(to_pc_error::)?; // Add permutation polynomial commitment to transcript. - transcript.append_commitment(b"z", &z_poly_commit.0); + transcript.append(b"z", z_poly_commit[0].commitment()); // 3. Compute public inputs polynomial. let pi_poly = DensePolynomial::from_coefficients_vec( @@ -345,7 +342,7 @@ where let var_base_sep_challenge = transcript.challenge_scalar(b"variable base separation challenge"); - let t_poly = quotient_poly::compute( + let t_poly = quotient_poly::compute::( &domain, prover_key, &z_poly, @@ -368,23 +365,30 @@ where self.split_tx_poly(n, &t_poly); // Commit to splitted quotient polynomial - let t_1_commit = KZG10::commit(commit_key, &t_1_poly, None, None)?; - let t_2_commit = KZG10::commit(commit_key, &t_2_poly, None, None)?; - let t_3_commit = KZG10::commit(commit_key, &t_3_poly, None, None)?; - let t_4_commit = KZG10::commit(commit_key, &t_4_poly, None, None)?; + let (t_commits, _) = PC::commit( + commit_key, + &[ + label_polynomial!(t_1_poly), + label_polynomial!(t_2_poly), + label_polynomial!(t_3_poly), + label_polynomial!(t_4_poly), + ], + None, + ) + .map_err(to_pc_error::)?; // Add quotient polynomial commitments to transcript - transcript.append_commitment(b"t_1", &t_1_commit.0); - transcript.append_commitment(b"t_2", &t_2_commit.0); - transcript.append_commitment(b"t_3", &t_3_commit.0); - transcript.append_commitment(b"t_4", &t_4_commit.0); + transcript.append(b"t_1", t_commits[0].commitment()); + transcript.append(b"t_2", t_commits[1].commitment()); + transcript.append(b"t_3", t_commits[2].commitment()); + transcript.append(b"t_4", t_commits[3].commitment()); // 4. Compute linearisation polynomial // // Compute evaluation challenge; `z`. let z_challenge = transcript.challenge_scalar(b"z"); - let (lin_poly, evaluations) = linearisation_poly::compute( + let (lin_poly, evaluations) = linearisation_poly::compute::( &domain, prover_key, &alpha, @@ -401,38 +405,27 @@ where &w_4_poly, &t_poly, &z_poly, - ); + )?; // Add evaluations to transcript. - transcript.append_scalar(b"a_eval", &evaluations.proof.a_eval); - transcript.append_scalar(b"b_eval", &evaluations.proof.b_eval); - transcript.append_scalar(b"c_eval", &evaluations.proof.c_eval); - transcript.append_scalar(b"d_eval", &evaluations.proof.d_eval); - transcript - .append_scalar(b"a_next_eval", &evaluations.proof.a_next_eval); - transcript - .append_scalar(b"b_next_eval", &evaluations.proof.b_next_eval); + transcript.append(b"a_eval", &evaluations.proof.a_eval); + transcript.append(b"b_eval", &evaluations.proof.b_eval); + transcript.append(b"c_eval", &evaluations.proof.c_eval); + transcript.append(b"d_eval", &evaluations.proof.d_eval); + transcript.append(b"a_next_eval", &evaluations.proof.a_next_eval); + transcript.append(b"b_next_eval", &evaluations.proof.b_next_eval); + transcript.append(b"d_next_eval", &evaluations.proof.d_next_eval); + transcript.append(b"left_sig_eval", &evaluations.proof.left_sigma_eval); transcript - .append_scalar(b"d_next_eval", &evaluations.proof.d_next_eval); - transcript.append_scalar( - b"left_sig_eval", - &evaluations.proof.left_sigma_eval, - ); - transcript.append_scalar( - b"right_sig_eval", - &evaluations.proof.right_sigma_eval, - ); - transcript - .append_scalar(b"out_sig_eval", &evaluations.proof.out_sigma_eval); - transcript - .append_scalar(b"q_arith_eval", &evaluations.proof.q_arith_eval); - transcript.append_scalar(b"q_c_eval", &evaluations.proof.q_c_eval); - transcript.append_scalar(b"q_l_eval", &evaluations.proof.q_l_eval); - transcript.append_scalar(b"q_r_eval", &evaluations.proof.q_r_eval); - transcript - .append_scalar(b"perm_eval", &evaluations.proof.permutation_eval); - transcript.append_scalar(b"t_eval", &evaluations.quot_eval); - transcript.append_scalar( + .append(b"right_sig_eval", &evaluations.proof.right_sigma_eval); + transcript.append(b"out_sig_eval", &evaluations.proof.out_sigma_eval); + transcript.append(b"q_arith_eval", &evaluations.proof.q_arith_eval); + transcript.append(b"q_c_eval", &evaluations.proof.q_c_eval); + transcript.append(b"q_l_eval", &evaluations.proof.q_l_eval); + transcript.append(b"q_r_eval", &evaluations.proof.q_r_eval); + transcript.append(b"perm_eval", &evaluations.proof.permutation_eval); + transcript.append(b"t_eval", &evaluations.quot_eval); + transcript.append( b"r_eval", &evaluations.proof.linearisation_polynomial_eval, ); @@ -441,6 +434,7 @@ where // // We merge the quotient polynomial using the `z_challenge` so the SRS // is linear in the circuit size `n` + let quot = Self::compute_quotient_opening_poly( n, &t_1_poly, @@ -452,60 +446,67 @@ where // Compute aggregate witness to polynomials evaluated at the evaluation // challenge `z` - let aw_challenge: E::Fr = - transcript.challenge_scalar(b"aggregate_witness"); - let aggregate_witness = Self::compute_aggregate_witness( - &[ - quot, - lin_poly, - w_l_poly.clone(), - w_r_poly.clone(), - w_o_poly, - w_4_poly.clone(), - prover_key.permutation.left_sigma.0.clone(), - prover_key.permutation.right_sigma.0.clone(), - prover_key.permutation.out_sigma.0.clone(), - ], + let aw_challenge: F = transcript.challenge_scalar(b"aggregate_witness"); + + let aw_polys = [ + label_polynomial!(quot), + label_polynomial!(lin_poly), + label_polynomial!(prover_key.permutation.left_sigma.0.clone()), + label_polynomial!(prover_key.permutation.right_sigma.0.clone()), + label_polynomial!(prover_key.permutation.out_sigma.0.clone()), + ]; + + let (aw_commits, aw_rands) = PC::commit(commit_key, &aw_polys, None) + .map_err(to_pc_error::)?; + + let aw_opening = PC::open( + commit_key, + aw_polys.iter().chain(w_polys.iter()), + aw_commits.iter().chain(w_commits.iter()), &z_challenge, aw_challenge, - ); - let w_z_comm = KZG10::>::commit( - commit_key, - &aggregate_witness, + aw_rands.iter().chain(w_rands.iter()), None, - None, - )?; + ) + .map_err(to_pc_error::)?; - // Compute aggregate witness to polynomials evaluated at the shifted - // evaluation challenge - let saw_challenge: E::Fr = + let saw_challenge: F = transcript.challenge_scalar(b"aggregate_witness"); - let shifted_aggregate_witness = Self::compute_aggregate_witness( - &[z_poly, w_l_poly, w_r_poly, w_4_poly], + + let saw_polys = [ + label_polynomial!(z_poly), + label_polynomial!(w_l_poly), + label_polynomial!(w_r_poly), + label_polynomial!(w_4_poly), + ]; + + let (saw_commits, saw_rands) = PC::commit(commit_key, &saw_polys, None) + .map_err(to_pc_error::)?; + + let saw_opening = PC::open( + commit_key, + &saw_polys, + &saw_commits, &(z_challenge * domain.element(1)), saw_challenge, - ); - let w_zw_comm = KZG10::>::commit( - commit_key, - &shifted_aggregate_witness, + &saw_rands, None, - None, - )?; + ) + .map_err(to_pc_error::)?; Ok(Proof { - a_comm: w_l_poly_commit.0, - b_comm: w_r_poly_commit.0, - c_comm: w_o_poly_commit.0, - d_comm: w_4_poly_commit.0, - z_comm: z_poly_commit.0, - t_1_comm: t_1_commit.0, - t_2_comm: t_2_commit.0, - t_3_comm: t_3_commit.0, - t_4_comm: t_4_commit.0, - w_z_comm: w_z_comm.0, - w_zw_comm: w_zw_comm.0, + a_comm: w_commits[0].commitment().clone(), + b_comm: w_commits[1].commitment().clone(), + c_comm: w_commits[2].commitment().clone(), + d_comm: w_commits[3].commitment().clone(), + z_comm: saw_commits[0].commitment().clone(), + t_1_comm: t_commits[0].commitment().clone(), + t_2_comm: t_commits[1].commitment().clone(), + t_3_comm: t_commits[2].commitment().clone(), + t_4_comm: t_commits[3].commitment().clone(), + aw_opening, + saw_opening, evaluations: evaluations.proof, - __: PhantomData, }) } @@ -514,20 +515,25 @@ where /// also be computed. pub fn prove( &mut self, - commit_key: &Powers, - ) -> Result, Error> { + commit_key: &PC::CommitterKey, + ) -> Result, Error> { if self.prover_key.is_none() { // Preprocess circuit and store preprocessed circuit and transcript // in the Prover. self.prover_key = Some(self.cs.preprocess_prover( commit_key, &mut self.preprocessed_transcript, + PhantomData::, )?); } let prover_key = self.prover_key.as_ref().unwrap(); - let proof = self.prove_with_preprocessed(commit_key, prover_key)?; + let proof = self.prove_with_preprocessed( + commit_key, + prover_key, + PhantomData::, + )?; // Clear witness and reset composer variables self.clear_witness(); @@ -536,10 +542,11 @@ where } } -impl Default for Prover +impl Default for Prover where - E: PairingEngine, - P: TEModelParameters, + F: PrimeField, + P: TEModelParameters, + PC: HomomorphicCommitment, { #[inline] fn default() -> Self { diff --git a/plonk-core/src/proof_system/quotient_poly.rs b/plonk-core/src/proof_system/quotient_poly.rs index ab64f2c7..30472cce 100644 --- a/plonk-core/src/proof_system/quotient_poly.rs +++ b/plonk-core/src/proof_system/quotient_poly.rs @@ -4,15 +4,18 @@ // // Copyright (c) DUSK NETWORK. All rights reserved. -use crate::proof_system::ecc::CurveAddition; -use crate::proof_system::ecc::FixedBaseScalarMul; -use crate::proof_system::logic::Logic; -use crate::proof_system::range::Range; -use crate::proof_system::widget::GateConstraint; -use crate::proof_system::GateValues; -use crate::{error::Error, proof_system::ProverKey}; +use crate::{ + error::Error, + proof_system::{ + ecc::{CurveAddition, FixedBaseScalarMul}, + logic::Logic, + range::Range, + widget::GateConstraint, + GateValues, ProverKey, + }, +}; use ark_ec::TEModelParameters; -use ark_ff::PrimeField; +use ark_ff::{FftField, PrimeField}; use ark_poly::{ univariate::DensePolynomial, EvaluationDomain, GeneralEvaluationDomain, UVPolynomial, @@ -22,7 +25,7 @@ use ark_poly::{ /// [`ProverKey`], and some other info. pub fn compute( domain: &GeneralEvaluationDomain, - prover_key: &ProverKey, + prover_key: &ProverKey, z_poly: &DensePolynomial, w_l_poly: &DensePolynomial, w_r_poly: &DensePolynomial, @@ -41,8 +44,12 @@ where F: PrimeField, P: TEModelParameters, { - let domain_8n = - GeneralEvaluationDomain::::new(8 * domain.size()).unwrap(); + let domain_8n = GeneralEvaluationDomain::::new(8 * domain.size()) + .ok_or(Error::InvalidEvalDomainSize { + log_size_of_group: (8 * domain.size()).trailing_zeros(), + adicity: + <::FftParams as ark_ff::FftParameters>::TWO_ADICITY, + })?; let mut z_eval_8n = domain_8n.coset_fft(z_poly); z_eval_8n.push(z_eval_8n[0]); @@ -86,7 +93,7 @@ where w4_eval_8n.push(w4_eval_8n[6]); w4_eval_8n.push(w4_eval_8n[7]); - let gate_constraints = compute_gate_constraint_satisfiability( + let gate_constraints = compute_gate_constraint_satisfiability::( domain, *range_challenge, *logic_challenge, @@ -98,9 +105,9 @@ where &wo_eval_8n, &w4_eval_8n, public_inputs_poly, - ); + )?; - let permutation = compute_permutation_checks( + let permutation = compute_permutation_checks::( domain, prover_key, &wl_eval_8n, @@ -111,7 +118,7 @@ where *alpha, *beta, *gamma, - ); + )?; let quotient = (0..domain_8n.size()) .map(|i| { @@ -133,22 +140,26 @@ fn compute_gate_constraint_satisfiability( logic_challenge: F, fixed_base_challenge: F, var_base_challenge: F, - prover_key: &ProverKey, + prover_key: &ProverKey, wl_eval_8n: &[F], wr_eval_8n: &[F], wo_eval_8n: &[F], w4_eval_8n: &[F], pi_poly: &DensePolynomial, -) -> Vec +) -> Result, Error> where F: PrimeField, P: TEModelParameters, { - let domain_8n = - GeneralEvaluationDomain::::new(8 * domain.size()).unwrap(); + let domain_8n = GeneralEvaluationDomain::::new(8 * domain.size()) + .ok_or(Error::InvalidEvalDomainSize { + log_size_of_group: (8 * domain.size()).trailing_zeros(), + adicity: + <::FftParams as ark_ff::FftParameters>::TWO_ADICITY, + })?; let pi_eval_8n = domain_8n.coset_fft(pi_poly); - (0..domain_8n.size()) + Ok((0..domain_8n.size()) .map(|i| { let values = GateValues { left: wl_eval_8n[i], @@ -202,14 +213,14 @@ where + fixed_base_scalar_mul + curve_addition }) - .collect() + .collect()) } /// Computes the permutation contribution to the quotient polynomial over /// `domain`. -fn compute_permutation_checks( +fn compute_permutation_checks( domain: &GeneralEvaluationDomain, - prover_key: &ProverKey, + prover_key: &ProverKey, wl_eval_8n: &[F], wr_eval_8n: &[F], wo_eval_8n: &[F], @@ -218,18 +229,21 @@ fn compute_permutation_checks( alpha: F, beta: F, gamma: F, -) -> Vec +) -> Result, Error> where - F: PrimeField, - P: TEModelParameters, + F: FftField, { - let domain_8n = - GeneralEvaluationDomain::::new(8 * domain.size()).unwrap(); + let domain_8n = GeneralEvaluationDomain::::new(8 * domain.size()) + .ok_or(Error::InvalidEvalDomainSize { + log_size_of_group: (8 * domain.size()).trailing_zeros(), + adicity: + <::FftParams as ark_ff::FftParameters>::TWO_ADICITY, + })?; let l1_poly_alpha = compute_first_lagrange_poly_scaled(domain, alpha.square()); let l1_alpha_sq_evals = domain_8n.coset_fft(&l1_poly_alpha.coeffs); - (0..domain_8n.size()) + Ok((0..domain_8n.size()) .map(|i| { prover_key.permutation.compute_quotient_i( i, @@ -245,7 +259,7 @@ where gamma, ) }) - .collect() + .collect()) } /// Computes the first lagrange polynomial with the given `scale` over `domain`. @@ -254,7 +268,7 @@ fn compute_first_lagrange_poly_scaled( scale: F, ) -> DensePolynomial where - F: PrimeField, + F: FftField, { let mut x_evals = vec![F::zero(); domain.size()]; x_evals[0] = scale; diff --git a/plonk-core/src/proof_system/verifier.rs b/plonk-core/src/proof_system/verifier.rs index 0c85866b..5aa0e5f7 100644 --- a/plonk-core/src/proof_system/verifier.rs +++ b/plonk-core/src/proof_system/verifier.rs @@ -6,25 +6,30 @@ //! Verifier-side of the PLONK Proving System -use crate::constraint_system::StandardComposer; -use crate::error::Error; -use crate::proof_system::widget::VerifierKey as PlonkVerifierKey; -use crate::proof_system::Proof; -use crate::transcript::TranscriptWrapper; -use ark_ec::{PairingEngine, TEModelParameters}; -use ark_poly_commit::kzg10::{Powers, VerifierKey}; +//use crate::circuit::EmbeddedCurve; +use crate::{ + commitment::HomomorphicCommitment, + constraint_system::StandardComposer, + error::Error, + proof_system::{widget::VerifierKey as PlonkVerifierKey, Proof}, +}; +use ark_ec::TEModelParameters; +use ark_ff::PrimeField; +use core::marker::PhantomData; +use merlin::Transcript; /// Abstraction structure designed verify [`Proof`]s. -pub struct Verifier +pub struct Verifier where - E: PairingEngine, - P: TEModelParameters, + F: PrimeField, + P: TEModelParameters, + PC: HomomorphicCommitment, { /// VerificationKey which is used to verify a specific PLONK circuit - pub verifier_key: Option>, + pub verifier_key: Option>, /// Circuit Description - pub(crate) cs: StandardComposer, + pub(crate) cs: StandardComposer, /// Store the messages exchanged during the preprocessing stage. /// @@ -32,20 +37,21 @@ where /// verifier to verify multiple proofs from the same circuit. If this is /// not copied, then the verification procedure will modify the transcript, /// making it unusable for future proofs. - pub preprocessed_transcript: TranscriptWrapper, + pub preprocessed_transcript: Transcript, } -impl Verifier +impl Verifier where - E: PairingEngine, - P: TEModelParameters, + F: PrimeField, + P: TEModelParameters, + PC: HomomorphicCommitment, { /// Creates a new `Verifier` instance. pub fn new(label: &'static [u8]) -> Self { Self { verifier_key: None, cs: StandardComposer::new(), - preprocessed_transcript: TranscriptWrapper::new(label), + preprocessed_transcript: Transcript::new(label), } } @@ -54,7 +60,7 @@ where Self { verifier_key: None, cs: StandardComposer::with_expected_size(size), - preprocessed_transcript: TranscriptWrapper::new(label), + preprocessed_transcript: Transcript::new(label), } } @@ -64,17 +70,21 @@ where } /// Returns a mutable copy of the underlying composer. - pub fn mut_cs(&mut self) -> &mut StandardComposer { + pub fn mut_cs(&mut self) -> &mut StandardComposer { &mut self.cs } - /// Preprocess a circuit to obtain a [`VerifierKey`] and a circuit - /// descriptor so that the `Verifier` instance can verify [`Proof`]s - /// for this circuit descriptor instance. - pub fn preprocess(&mut self, commit_key: &Powers) -> Result<(), Error> { + /// Preprocess a circuit to obtain a [`PlonkVerifierKey`] and a + /// circuit descriptor so that the `Verifier` instance can verify + /// [`Proof`]s for this circuit descriptor instance. + pub fn preprocess( + &mut self, + commit_key: &PC::CommitterKey, + ) -> Result<(), Error> { let vk = self.cs.preprocess_verifier( commit_key, &mut self.preprocessed_transcript, + PhantomData::, )?; self.verifier_key = Some(vk); @@ -87,19 +97,17 @@ where /// [`Transcript`]: merlin::Transcript /// [`Transcript::append_message`]: merlin::Transcript::append_message pub fn key_transcript(&mut self, label: &'static [u8], message: &[u8]) { - self.preprocessed_transcript - .transcript - .append_message(label, message); + self.preprocessed_transcript.append_message(label, message); } /// Verifies a [`Proof`] using `pc_verifier_key` and `public_inputs`. pub fn verify( &self, - proof: &Proof, - pc_verifier_key: &VerifierKey, - public_inputs: &[E::Fr], + proof: &Proof, + pc_verifier_key: &PC::VerifierKey, + public_inputs: &[F], ) -> Result<(), Error> { - proof.verify( + proof.verify::

( self.verifier_key.as_ref().unwrap(), &mut self.preprocessed_transcript.clone(), pc_verifier_key, @@ -108,13 +116,14 @@ where } } -impl Default for Verifier +impl Default for Verifier where - E: PairingEngine, - P: TEModelParameters, + F: PrimeField, + P: TEModelParameters, + PC: HomomorphicCommitment, { #[inline] - fn default() -> Verifier { + fn default() -> Verifier { Verifier::new(b"plonk") } } diff --git a/plonk-core/src/proof_system/widget/arithmetic.rs b/plonk-core/src/proof_system/widget/arithmetic.rs index 22d9eaeb..bf8d46ef 100644 --- a/plonk-core/src/proof_system/widget/arithmetic.rs +++ b/plonk-core/src/proof_system/widget/arithmetic.rs @@ -7,11 +7,9 @@ //! Arithmetic Gates use crate::proof_system::linearisation_poly::ProofEvaluations; -use ark_ec::PairingEngine; -use ark_ff::PrimeField; -use ark_poly::polynomial::univariate::DensePolynomial; -use ark_poly::Evaluations; -use ark_poly_commit::sonic_pc::Commitment; +use ark_ff::{FftField, PrimeField}; +use ark_poly::{polynomial::univariate::DensePolynomial, Evaluations}; +use ark_poly_commit::PolynomialCommitment; use ark_serialize::*; /// Arithmetic Gates Prover Key @@ -19,7 +17,7 @@ use ark_serialize::*; #[derivative(Clone, Debug, Eq, PartialEq)] pub struct ProverKey where - F: PrimeField, + F: FftField, { /// Multiplication Selector pub q_m: (DensePolynomial, Evaluations), @@ -45,7 +43,7 @@ where impl ProverKey where - F: PrimeField, + F: FftField, { /// Computes the arithmetic gate contribution to the quotient polynomial at /// the element of the domain at the given `index`. @@ -88,63 +86,71 @@ where /// Arithmetic Gates Verifier Key #[derive(CanonicalDeserialize, CanonicalSerialize, derivative::Derivative)] -#[derivative(Clone, Copy, Debug, Eq, PartialEq)] -pub struct VerifierKey +#[derivative( + Clone, + Copy(bound = "PC::Commitment: Copy"), + Debug(bound = "PC::Commitment: std::fmt::Debug"), + Eq(bound = "PC::Commitment: Eq"), + PartialEq(bound = "PC::Commitment: PartialEq") +)] +pub struct VerifierKey where - E: PairingEngine, + F: PrimeField, + PC: PolynomialCommitment>, { /// Multiplication Selector Commitment - pub q_m: Commitment, + pub q_m: PC::Commitment, /// Left Selector Commitment - pub q_l: Commitment, + pub q_l: PC::Commitment, /// Right Selector Commitment - pub q_r: Commitment, + pub q_r: PC::Commitment, /// Output Selector Commitment - pub q_o: Commitment, + pub q_o: PC::Commitment, /// Fourth Selector Commitment - pub q_4: Commitment, + pub q_4: PC::Commitment, /// Constant Selector Commitment - pub q_c: Commitment, + pub q_c: PC::Commitment, /// Arithmetic Selector Commitment - pub q_arith: Commitment, + pub q_arith: PC::Commitment, } -impl VerifierKey +impl VerifierKey where - E: PairingEngine, + F: PrimeField, + PC: PolynomialCommitment>, { /// Computes arithmetic gate contribution to the linearisation polynomial /// commitment. pub fn compute_linearisation_commitment( &self, - scalars: &mut Vec, - points: &mut Vec, - evaluations: &ProofEvaluations, + scalars: &mut Vec, + points: &mut Vec, + evaluations: &ProofEvaluations, ) { let q_arith_eval = evaluations.q_arith_eval; scalars.push(evaluations.a_eval * evaluations.b_eval * q_arith_eval); - points.push(self.q_m.0); + points.push(self.q_m.clone()); scalars.push(evaluations.a_eval * q_arith_eval); - points.push(self.q_l.0); + points.push(self.q_l.clone()); scalars.push(evaluations.b_eval * q_arith_eval); - points.push(self.q_r.0); + points.push(self.q_r.clone()); scalars.push(evaluations.c_eval * q_arith_eval); - points.push(self.q_o.0); + points.push(self.q_o.clone()); scalars.push(evaluations.d_eval * q_arith_eval); - points.push(self.q_4.0); + points.push(self.q_4.clone()); scalars.push(q_arith_eval); - points.push(self.q_c.0); + points.push(self.q_c.clone()); } } diff --git a/plonk-core/src/proof_system/widget/ecc/curve_addition.rs b/plonk-core/src/proof_system/widget/ecc/curve_addition.rs index 03243e23..8e5ef71d 100644 --- a/plonk-core/src/proof_system/widget/ecc/curve_addition.rs +++ b/plonk-core/src/proof_system/widget/ecc/curve_addition.rs @@ -7,7 +7,7 @@ //! Elliptic Curve Point Addition Gate use crate::proof_system::widget::{GateConstraint, GateValues}; -use ark_ec::TEModelParameters; +use ark_ec::{ModelParameters, TEModelParameters}; use ark_ff::Field; use core::marker::PhantomData; @@ -17,7 +17,7 @@ use core::marker::PhantomData; pub struct CurveAddition(PhantomData<(F, P)>) where F: Field, - P: TEModelParameters; + P: ModelParameters; impl GateConstraint for CurveAddition where diff --git a/plonk-core/src/proof_system/widget/ecc/fixed_base_scalar_mul.rs b/plonk-core/src/proof_system/widget/ecc/fixed_base_scalar_mul.rs index 3a446775..ff8368ef 100644 --- a/plonk-core/src/proof_system/widget/ecc/fixed_base_scalar_mul.rs +++ b/plonk-core/src/proof_system/widget/ecc/fixed_base_scalar_mul.rs @@ -19,7 +19,7 @@ //! base2 bit. use crate::proof_system::widget::{GateConstraint, GateValues}; -use ark_ec::TEModelParameters; +use ark_ec::{ModelParameters, TEModelParameters}; use ark_ff::Field; use core::marker::PhantomData; @@ -29,7 +29,7 @@ use core::marker::PhantomData; pub struct FixedBaseScalarMul(PhantomData<(F, P)>) where F: Field, - P: TEModelParameters; + P: ModelParameters; impl GateConstraint for FixedBaseScalarMul where diff --git a/plonk-core/src/proof_system/widget/mod.rs b/plonk-core/src/proof_system/widget/mod.rs index 19e81a23..9b17a292 100644 --- a/plonk-core/src/proof_system/widget/mod.rs +++ b/plonk-core/src/proof_system/widget/mod.rs @@ -11,15 +11,15 @@ pub mod ecc; pub mod logic; pub mod range; -use crate::proof_system::linearisation_poly::ProofEvaluations; -use crate::proof_system::permutation; -use crate::transcript::TranscriptProtocol; -use ark_ec::{PairingEngine, TEModelParameters}; -use ark_ff::{Field, PrimeField}; +use crate::{ + commitment::HomomorphicCommitment, + proof_system::{linearisation_poly::ProofEvaluations, permutation}, + transcript::TranscriptProtocol, +}; +use ark_ff::{FftField, Field, PrimeField}; use ark_poly::{univariate::DensePolynomial, Evaluations}; -use ark_poly_commit::sonic_pc::Commitment; +use ark_poly_commit::PolynomialCommitment; use ark_serialize::*; -use core::marker::PhantomData; /// Gate Values /// @@ -105,14 +105,14 @@ where /// Extends `scalars` and `points` to build the linearisation commitment /// with the given instantiation of `evaluations` and /// `separation_challenge`. - fn extend_linearisation_commitment( - selector_commitment: Commitment, - separation_challenge: E::Fr, - evaluations: &ProofEvaluations, - scalars: &mut Vec, - points: &mut Vec, + fn extend_linearisation_commitment( + selector_commitment: &PC::Commitment, + separation_challenge: F, + evaluations: &ProofEvaluations, + scalars: &mut Vec, + points: &mut Vec, ) where - E: PairingEngine, + PC: PolynomialCommitment>, { let coefficient = Self::constraints( separation_challenge, @@ -130,7 +130,7 @@ where }, ); scalars.push(coefficient); - points.push(selector_commitment.0); + points.push(selector_commitment.clone()); } } @@ -141,65 +141,66 @@ where #[derive(CanonicalDeserialize, CanonicalSerialize, derivative::Derivative)] #[derivative( Clone(bound = ""), - Debug(bound = ""), - Eq(bound = ""), - PartialEq(bound = "") + Debug( + bound = "arithmetic::VerifierKey: std::fmt::Debug, PC::Commitment: std::fmt::Debug" + ), + Eq(bound = "arithmetic::VerifierKey: Eq, PC::Commitment: Eq"), + PartialEq( + bound = "arithmetic::VerifierKey: PartialEq, PC::Commitment: PartialEq" + ) )] -pub struct VerifierKey +pub struct VerifierKey where - E: PairingEngine, - P: TEModelParameters, + F: PrimeField, + PC: HomomorphicCommitment, { /// Circuit size (not padded to a power of two). pub(crate) n: usize, /// Arithmetic Verifier Key - pub(crate) arithmetic: arithmetic::VerifierKey, + pub(crate) arithmetic: arithmetic::VerifierKey, /// Range Gate Selector Commitment - pub(crate) range_selector_commitment: Commitment, + pub(crate) range_selector_commitment: PC::Commitment, /// Logic Gate Selector Commitment - pub(crate) logic_selector_commitment: Commitment, + pub(crate) logic_selector_commitment: PC::Commitment, /// Fixed Group Addition Selector Commitment - pub(crate) fixed_group_add_selector_commitment: Commitment, + pub(crate) fixed_group_add_selector_commitment: PC::Commitment, /// Variable Group Addition Selector Commitment - pub(crate) variable_group_add_selector_commitment: Commitment, + pub(crate) variable_group_add_selector_commitment: PC::Commitment, /// VerifierKey for permutation checks - pub(crate) permutation: permutation::VerifierKey, - - /// Type Parameter Marker - __: PhantomData

, + pub(crate) permutation: permutation::VerifierKey, } -impl VerifierKey +impl VerifierKey where - E: PairingEngine, - P: TEModelParameters, + F: PrimeField, + PC: HomomorphicCommitment, { /// Constructs a [`VerifierKey`] from the widget VerifierKey's that are /// constructed based on the selector polynomial commitments and the /// sigma polynomial commitments. pub(crate) fn from_polynomial_commitments( n: usize, - q_m: Commitment, - q_l: Commitment, - q_r: Commitment, - q_o: Commitment, - q_4: Commitment, - q_c: Commitment, - q_arith: Commitment, - q_range: Commitment, - q_logic: Commitment, - q_fixed_group_add: Commitment, - q_variable_group_add: Commitment, - left_sigma: Commitment, - right_sigma: Commitment, - out_sigma: Commitment, - fourth_sigma: Commitment, + q_m: PC::Commitment, + q_l: PC::Commitment, + q_r: PC::Commitment, + q_o: PC::Commitment, + q_4: PC::Commitment, + q_c: PC::Commitment, + q_arith: PC::Commitment, + q_range: PC::Commitment, + q_logic: PC::Commitment, + q_fixed_group_add: PC::Commitment, + q_variable_group_add: PC::Commitment, + left_sigma: PC::Commitment, + right_sigma: PC::Commitment, + out_sigma: PC::Commitment, + fourth_sigma: PC::Commitment, ) -> Self { Self { n, @@ -222,7 +223,6 @@ where out_sigma, fourth_sigma, }, - __: PhantomData, } } @@ -232,42 +232,37 @@ where } } -impl VerifierKey +impl VerifierKey where - E: PairingEngine, - P: TEModelParameters, + F: PrimeField, + PC: HomomorphicCommitment, { /// Adds the circuit description to the transcript. pub(crate) fn seed_transcript(&self, transcript: &mut T) where - T: TranscriptProtocol, + T: TranscriptProtocol, { - transcript.append_commitment(b"q_m", &self.arithmetic.q_m); - transcript.append_commitment(b"q_l", &self.arithmetic.q_l); - transcript.append_commitment(b"q_r", &self.arithmetic.q_r); - transcript.append_commitment(b"q_o", &self.arithmetic.q_o); - transcript.append_commitment(b"q_c", &self.arithmetic.q_c); - transcript.append_commitment(b"q_4", &self.arithmetic.q_4); - transcript.append_commitment(b"q_arith", &self.arithmetic.q_arith); - transcript - .append_commitment(b"q_range", &self.range_selector_commitment); - transcript - .append_commitment(b"q_logic", &self.logic_selector_commitment); - transcript.append_commitment( + transcript.append(b"q_m", &self.arithmetic.q_m); + transcript.append(b"q_l", &self.arithmetic.q_l); + transcript.append(b"q_r", &self.arithmetic.q_r); + transcript.append(b"q_o", &self.arithmetic.q_o); + transcript.append(b"q_c", &self.arithmetic.q_c); + transcript.append(b"q_4", &self.arithmetic.q_4); + transcript.append(b"q_arith", &self.arithmetic.q_arith); + transcript.append(b"q_range", &self.range_selector_commitment); + transcript.append(b"q_logic", &self.logic_selector_commitment); + transcript.append( b"q_variable_group_add", &self.variable_group_add_selector_commitment, ); - transcript.append_commitment( + transcript.append( b"q_fixed_group_add", &self.fixed_group_add_selector_commitment, ); - transcript - .append_commitment(b"left_sigma", &self.permutation.left_sigma); - transcript - .append_commitment(b"right_sigma", &self.permutation.right_sigma); - transcript.append_commitment(b"out_sigma", &self.permutation.out_sigma); - transcript - .append_commitment(b"fourth_sigma", &self.permutation.fourth_sigma); + transcript.append(b"left_sigma", &self.permutation.left_sigma); + transcript.append(b"right_sigma", &self.permutation.right_sigma); + transcript.append(b"out_sigma", &self.permutation.out_sigma); + transcript.append(b"fourth_sigma", &self.permutation.fourth_sigma); transcript.circuit_domain_sep(self.n as u64); } } @@ -283,10 +278,9 @@ where Eq(bound = ""), PartialEq(bound = "") )] -pub struct ProverKey +pub struct ProverKey where - F: PrimeField, - P: TEModelParameters, + F: FftField, { /// Circuit size pub(crate) n: usize, @@ -317,15 +311,11 @@ where /// in their evaluation phase and divide by the quotient /// polynomial without having to perform IFFT pub(crate) v_h_coset_8n: Evaluations, - - /// Type Parameter Marker - __: PhantomData

, } -impl ProverKey +impl ProverKey where - F: PrimeField, - P: TEModelParameters, + F: FftField, { pub(crate) fn v_h_coset_8n(&self) -> &Evaluations { &self.v_h_coset_8n @@ -377,7 +367,6 @@ where linear_evaluations, }, v_h_coset_8n, - __: PhantomData, } } } @@ -385,39 +374,34 @@ where #[cfg(test)] mod test { use super::*; + use crate::batch_test; + use ark_bls12_377::Bls12_377; use ark_bls12_381::Bls12_381; - use ark_bls12_381::Fr as BlsScalar; - use ark_bls12_381::G1Affine; - use ark_ed_on_bls12_381::EdwardsParameters; - use ark_ff::{Fp256, UniformRand}; + use ark_ec::models::TEModelParameters; use ark_poly::polynomial::univariate::DensePolynomial; use ark_poly::{EvaluationDomain, GeneralEvaluationDomain, UVPolynomial}; - use ark_poly_commit::kzg10::Commitment; - use rand_core::OsRng; + use rand::rngs::OsRng; - fn rand_poly_eval( - n: usize, - ) -> ( - DensePolynomial>, - Evaluations>, - ) { + fn rand_poly_eval(n: usize) -> (DensePolynomial, Evaluations) + where + F: FftField, + { let polynomial = DensePolynomial::rand(n, &mut OsRng); (polynomial, rand_evaluations(n)) } - fn rand_evaluations( - n: usize, - ) -> Evaluations> { - let domain: GeneralEvaluationDomain< - Fp256, - > = GeneralEvaluationDomain::new(4 * n).unwrap(); - let values: Vec<_> = - (0..8 * n).map(|_| BlsScalar::rand(&mut OsRng)).collect(); + fn rand_evaluations(n: usize) -> Evaluations + where + F: FftField, + { + let domain = GeneralEvaluationDomain::new(4 * n).unwrap(); + let values: Vec<_> = (0..8 * n).map(|_| F::rand(&mut OsRng)).collect(); Evaluations::from_vec_and_domain(values, domain) } #[test] fn test_serialise_deserialise_prover_key() { + type F = ark_bls12_381::Fr; let n = 1 << 11; let q_m = rand_poly_eval(n); @@ -466,37 +450,40 @@ mod test { .serialize_unchecked(&mut prover_key_bytes) .unwrap(); - let obtained_pk: ProverKey< - Fp256, - ark_ed_on_bls12_381::EdwardsParameters, - > = ProverKey::deserialize_unchecked(prover_key_bytes.as_slice()) - .unwrap(); + let obtained_pk: ProverKey = + ProverKey::deserialize_unchecked(prover_key_bytes.as_slice()) + .unwrap(); - assert!(prover_key == obtained_pk); + assert_eq!(prover_key, obtained_pk); } - #[test] - fn test_serialise_deserialise_verifier_key() { + fn test_serialise_deserialise_verifier_key() + where + F: PrimeField, + P: TEModelParameters, + PC: HomomorphicCommitment, + VerifierKey: PartialEq, + { let n = 2usize.pow(5); - let q_m = Commitment::(G1Affine::default()); - let q_l = Commitment(G1Affine::default()); - let q_r = Commitment(G1Affine::default()); - let q_o = Commitment(G1Affine::default()); - let q_4 = Commitment(G1Affine::default()); - let q_c = Commitment(G1Affine::default()); - let q_arith = Commitment(G1Affine::default()); - let q_range = Commitment(G1Affine::default()); - let q_logic = Commitment(G1Affine::default()); - let q_fixed_group_add = Commitment(G1Affine::default()); - let q_variable_group_add = Commitment(G1Affine::default()); - - let left_sigma = Commitment(G1Affine::default()); - let right_sigma = Commitment(G1Affine::default()); - let out_sigma = Commitment(G1Affine::default()); - let fourth_sigma = Commitment(G1Affine::default()); - - let verifier_key = VerifierKey::from_polynomial_commitments( + let q_m = PC::Commitment::default(); + let q_l = PC::Commitment::default(); + let q_r = PC::Commitment::default(); + let q_o = PC::Commitment::default(); + let q_4 = PC::Commitment::default(); + let q_c = PC::Commitment::default(); + let q_arith = PC::Commitment::default(); + let q_range = PC::Commitment::default(); + let q_logic = PC::Commitment::default(); + let q_fixed_group_add = PC::Commitment::default(); + let q_variable_group_add = PC::Commitment::default(); + + let left_sigma = PC::Commitment::default(); + let right_sigma = PC::Commitment::default(); + let out_sigma = PC::Commitment::default(); + let fourth_sigma = PC::Commitment::default(); + + let verifier_key = VerifierKey::::from_polynomial_commitments( n, q_m, q_l, @@ -520,10 +507,24 @@ mod test { .serialize_unchecked(&mut verifier_key_bytes) .unwrap(); - let obtained_vk: VerifierKey = + let obtained_vk: VerifierKey = VerifierKey::deserialize_unchecked(verifier_key_bytes.as_slice()) .unwrap(); assert!(verifier_key == obtained_vk); } + + // Test for Bls12_381 + batch_test!( + [test_serialise_deserialise_verifier_key], + [] => ( + Bls12_381, ark_ed_on_bls12_381::EdwardsParameters ) + ); + + // Test for Bls12_377 + batch_test!( + [test_serialise_deserialise_verifier_key], + [] => ( + Bls12_377, ark_ed_on_bls12_377::EdwardsParameters ) + ); } diff --git a/plonk-core/src/proof_system/widget/range.rs b/plonk-core/src/proof_system/widget/range.rs index 4cbae32c..f666d49d 100644 --- a/plonk-core/src/proof_system/widget/range.rs +++ b/plonk-core/src/proof_system/widget/range.rs @@ -6,8 +6,7 @@ //! Range Gate -use crate::proof_system::GateConstraint; -use crate::proof_system::GateValues; +use crate::proof_system::{GateConstraint, GateValues}; use ark_ff::Field; use core::marker::PhantomData; diff --git a/plonk-core/src/test.rs b/plonk-core/src/test.rs index b1763e6f..6c80a7b7 100644 --- a/plonk-core/src/test.rs +++ b/plonk-core/src/test.rs @@ -10,6 +10,76 @@ /// /// The set of tests is split in two. The first set between `[]` is for regular /// tests that should not panic. The second set is for tests that should panic. + +#[macro_export] +macro_rules! batch_test_field { + ( [$($test_set:ident),*], [$($test_panic_set:ident),*] => ($engine:ty) ) => { + paste::item! { + $( + #[test] + #[allow(non_snake_case)] + fn [< $test_set _on_ $engine>]() { + $test_set::<<$engine as ark_ec::PairingEngine>::Fr>() + } + )* + $( + #[test] + #[should_panic] + #[allow(non_snake_case)] + fn [< $test_panic_set _on_ $engine>]() { + $test_panic_set::<<$engine as ark_ec::PairingEngine>::Fr>() + } + )* + } + } +} + +#[macro_export] +macro_rules! batch_test_field_params { + ( [$($test_set:ident),*], [$($test_panic_set:ident),*] => ($engine:ty, $params:ty) ) => { + paste::item! { + $( + #[test] + #[allow(non_snake_case)] + fn [< $test_set _on_ $engine>]() { + $test_set::<<$engine as ark_ec::PairingEngine>::Fr, $params>() + } + )* + $( + #[test] + #[should_panic] + #[allow(non_snake_case)] + fn [< $test_panic_set _on_ $engine>]() { + $test_panic_set::<<$engine as ark_ec::PairingEngine>::Fr, $params>() + } + )* + } + } +} + +#[macro_export] +macro_rules! batch_test_kzg { + ( [$($test_set:ident),*], [$($test_panic_set:ident),*] => ($engine:ty, $params:ty) ) => { + paste::item! { + $( + #[test] + #[allow(non_snake_case)] + fn [< $test_set _on_ $engine>]() { + $test_set::<<$engine as ark_ec::PairingEngine>::Fr, $params, crate::commitment::KZG10<$engine>>() + } + )* + $( + #[test] + #[should_panic] + #[allow(non_snake_case)] + fn [< $test_panic_set _on_ $engine>]() { + $test_panic_set::<<$engine as ark_ec::PairingEngine>::Fr, $params, crate::commitment::KZG10<$engine>>() + } + )* + } + } +} + #[macro_export] macro_rules! batch_test { ( [$($test_set:ident),*], [$($test_panic_set:ident),*] => ($engine:ty, $params:ty) ) => { @@ -18,7 +88,12 @@ macro_rules! batch_test { #[test] #[allow(non_snake_case)] fn [< $test_set _on_ $engine>]() { - $test_set::<$engine, $params>() + $test_set::<<$engine as ark_ec::PairingEngine>::Fr, $params, crate::commitment::KZG10<$engine>>() + } + #[test] + #[allow(non_snake_case)] + fn [< $test_set _on_ $engine _ipa>]() { + $test_set::<<$engine as ark_ec::PairingEngine>::Fr, $params, ark_poly_commit::ipa_pc::InnerProductArgPC<<$engine as ark_ec::PairingEngine>::G1Affine, blake2::Blake2s, ark_poly::univariate::DensePolynomial<<$engine as ark_ec::PairingEngine>::Fr>>>() } )* $( @@ -26,7 +101,13 @@ macro_rules! batch_test { #[should_panic] #[allow(non_snake_case)] fn [< $test_panic_set _on_ $engine>]() { - $test_panic_set::<$engine, $params>() + $test_panic_set::<<$engine as ark_ec::PairingEngine>::Fr, $params, crate::commitment::KZG10<$engine>>() + } + #[test] + #[should_panic] + #[allow(non_snake_case)] + fn [< $test_panic_set _on_ $engine _ipa>]() { + $test_panic_set::<<$engine as ark_ec::PairingEngine>::Fr, $params, ark_poly_commit::ipa_pc::InnerProductArgPC<<$engine as ark_ec::PairingEngine>::G1Affine, blake2::Blake2s, ark_poly::univariate::DensePolynomial<<$engine as ark_ec::PairingEngine>::Fr>>>() } )* } diff --git a/plonk-core/src/transcript.rs b/plonk-core/src/transcript.rs index c69c2d36..7b99fb57 100644 --- a/plonk-core/src/transcript.rs +++ b/plonk-core/src/transcript.rs @@ -7,91 +7,76 @@ //! This is an extension over the [Merlin Transcript](Transcript) which adds a //! few extra functionalities. -use ark_ec::PairingEngine; use ark_ff::{Field, PrimeField}; -use ark_poly_commit::kzg10::Commitment; +use ark_poly::univariate::DensePolynomial; +use ark_poly_commit::{LabeledCommitment, PolynomialCommitment}; use ark_serialize::CanonicalSerialize; use core::marker::PhantomData; use merlin::Transcript; -/// Wrapper around [`Transcript`] -#[derive(derivative::Derivative)] -#[derivative(Clone)] -pub struct TranscriptWrapper -where - E: PairingEngine, -{ - /// Base Transcript - pub transcript: Transcript, - - /// Type Parameter Marker - __: PhantomData, -} - -impl TranscriptWrapper -where - E: PairingEngine, -{ - /// Builds a new [`TranscriptWrapper`] with the given `label`. - #[inline] - pub fn new(label: &'static [u8]) -> Self { - Self { - transcript: Transcript::new(label), - __: PhantomData, - } - } -} - /// Transcript adds an abstraction over the Merlin transcript /// For convenience -pub(crate) trait TranscriptProtocol -where - E: PairingEngine, -{ - /// Append a `commitment` with the given `label`. - fn append_commitment(&mut self, label: &'static [u8], comm: &Commitment); +pub(crate) trait TranscriptProtocol { + /// Append an `item` with the given `label`. + fn append(&mut self, label: &'static [u8], item: &impl CanonicalSerialize); - /// Append a scalar with the given `label`. - fn append_scalar(&mut self, label: &'static [u8], s: &E::Fr); + /// Append some number of LabeledCommitments + fn append_commitments<'a, F, PC>( + &mut self, + commitments: impl IntoIterator>, + _phantom: PhantomData, + ) where + F: Field, + PC: 'a + PolynomialCommitment>; /// Compute a `label`ed challenge variable. - fn challenge_scalar(&mut self, label: &'static [u8]) -> E::Fr; + fn challenge_scalar(&mut self, label: &'static [u8]) -> F; /// Append domain separator for the circuit size. fn circuit_domain_sep(&mut self, n: u64); } -impl TranscriptProtocol for TranscriptWrapper -where - E: PairingEngine, -{ - fn append_commitment( - &mut self, - label: &'static [u8], - comm: &Commitment, - ) { +impl TranscriptProtocol for Transcript { + fn append(&mut self, label: &'static [u8], item: &impl CanonicalSerialize) { let mut bytes = Vec::new(); - comm.0.serialize(&mut bytes).unwrap(); - self.transcript.append_message(label, &bytes); + item.serialize(&mut bytes).unwrap(); + self.append_message(label, &bytes) } - - fn append_scalar(&mut self, label: &'static [u8], s: &E::Fr) { - let mut bytes = Vec::new(); - s.serialize(&mut bytes).unwrap(); - self.transcript.append_message(label, &bytes) + fn append_commitments<'a, F, PC>( + &mut self, + commitments: impl IntoIterator>, + _phantom: PhantomData, + ) where + F: Field, + PC: 'a + PolynomialCommitment>, + { + for commitment in commitments { + self.append( + // TODO: don't leak memory here by allowing + // Transcript::append_message to take non-static lifetimes + Box::leak::<'static>( + commitment.label().clone().into_boxed_str(), + ) + .as_bytes(), + commitment.commitment(), + ) + } } - fn challenge_scalar(&mut self, label: &'static [u8]) -> E::Fr { + fn challenge_scalar(&mut self, label: &'static [u8]) -> F + where + F: PrimeField, + { // XXX: review this: assure from_random_bytes returnes a valid Field // element - let size = E::Fr::size_in_bits() / 8; + let size = F::size_in_bits() / 8; let mut buf = vec![0u8; size]; - self.transcript.challenge_bytes(label, &mut buf); - E::Fr::from_random_bytes(&buf).unwrap() + self.challenge_bytes(label, &mut buf); + F::from_random_bytes(&buf).unwrap() } fn circuit_domain_sep(&mut self, n: u64) { - self.transcript.append_message(b"dom-sep", b"circuit_size"); - self.transcript.append_u64(b"n", n); + self.append_message(b"dom-sep", b"circuit_size"); + self.append_u64(b"n", n); } } diff --git a/plonk-core/src/util.rs b/plonk-core/src/util.rs index 1c0ddcb5..6fee8555 100644 --- a/plonk-core/src/util.rs +++ b/plonk-core/src/util.rs @@ -4,13 +4,9 @@ // // Copyright (c) DUSK NETWORK. All rights reserved. -use ark_ec::{AffineCurve, ModelParameters, PairingEngine, TEModelParameters}; +use ark_ec::{ModelParameters, TEModelParameters}; use ark_ff::{BigInteger, FftField, Field, FpParameters, PrimeField}; -use ark_poly::{ - univariate::DensePolynomial, EvaluationDomain, GeneralEvaluationDomain, - Polynomial, UVPolynomial, -}; -use ark_poly_commit::kzg10::Commitment; +use ark_poly::{EvaluationDomain, GeneralEvaluationDomain}; /// Returns an iterator over increasing powers of the given `scalar` starting /// at `0`. @@ -22,35 +18,6 @@ where core::iter::successors(Some(F::one()), move |p| Some(*p * scalar)) } -/// Performs polynomial division by `(x - z)` with `x` indeterminant using -/// Ruffini's algorithm. -pub fn ruffini(poly: DensePolynomial, z: F) -> DensePolynomial -where - F: PrimeField, -{ - let mut quotient = Vec::with_capacity(poly.degree()); - let mut k = F::zero(); - - // Reverse the results and use Ruffini's method to compute the quotient - // The coefficients must be reversed as Ruffini's method - // starts with the leading coefficient, while Polynomials - // are stored in increasing order i.e. the leading coefficient is the - // last element - for coeff in poly.coeffs.into_iter().rev() { - let t = coeff + k; - quotient.push(t); - k = z * t; - } - - // Pop off the last element, it is the remainder term - // For PLONK, we only care about perfect factors - quotient.pop(); - - // Reverse the results for storage in the Polynomial struct - quotient.reverse(); - DensePolynomial::from_coefficients_vec(quotient) -} - /// Evaluation Domain Extension Trait pub trait EvaluationDomainExt: EvaluationDomain where @@ -123,18 +90,17 @@ where /// curve. Panics if the embedded scalar is greater than the modulus of the /// pairing firendly curve scalar field #[allow(dead_code)] -pub fn from_embedded_curve_scalar( +pub fn from_embedded_curve_scalar( embedded_scalar:

::ScalarField, -) -> E::Fr +) -> F where - E: PairingEngine, - P: TEModelParameters, + F: PrimeField, + P: TEModelParameters, { let scalar_repr = embedded_scalar.into_repr(); - - let modulus = <::Params as FpParameters>::MODULUS; + let modulus = <::Params as FpParameters>::MODULUS; if modulus.num_bits() >= scalar_repr.num_bits() { - let s = <::BigInt as BigInteger>::from_bits_le( + let s = <::BigInt as BigInteger>::from_bits_le( &scalar_repr.to_bits_le(), ); assert!(s < modulus, @@ -146,19 +112,17 @@ where assert!(scalar_repr < m, "The embedded scalar exceeds the capacity representation of the outter curve scalar"); } - E::Fr::from_le_bytes_mod_order(&scalar_repr.to_bytes_le()) + F::from_le_bytes_mod_order(&scalar_repr.to_bytes_le()) } /// Get a embedded curve scalar `P::ScalarField` from a scalar of the pariring /// friendly curve. Panics if the pairing frindly curve scalar is greater than /// the modulus of the embedded curve scalar field #[allow(dead_code)] -pub(crate) fn to_embedded_curve_scalar( - pfc_scalar: E::Fr, -) -> P::ScalarField +pub(crate) fn to_embedded_curve_scalar(pfc_scalar: F) -> P::ScalarField where - E: PairingEngine, - P: TEModelParameters, + F: PrimeField, + P: TEModelParameters, { let scalar_repr = pfc_scalar.into_repr(); let modulus = @@ -170,7 +134,7 @@ where assert!(s < modulus, "The embedded scalar exceeds the capacity representation of the outter curve scalar"); } else { - let m = <::BigInt as BigInteger>::from_bits_le( + let m = <::BigInt as BigInteger>::from_bits_le( &modulus.to_bits_le(), ); assert!(scalar_repr < m, @@ -179,31 +143,27 @@ where P::ScalarField::from_le_bytes_mod_order(&scalar_repr.to_bytes_le()) } -/// Computes a linear combination of the polynomial evaluations and polynomial -/// commitments provided a challenge. -// TODO: complete doc -pub fn linear_combination( - evals: &[E::Fr], - commitments: &[Commitment], - challenge: E::Fr, -) -> (Commitment, E::Fr) -where - E: PairingEngine, -{ - assert_eq!(evals.len(), commitments.len()); - let powers = powers_of(challenge).take(evals.len()).collect::>(); - let combined_eval = evals - .iter() - .zip(powers.iter()) - .map(|(&eval, power)| eval * power) - .sum(); - let combined_commitment = Commitment( - commitments - .iter() - .zip(powers.iter()) - .map(|(commit, &power)| commit.0.mul(power)) - .sum::() - .into(), - ); - (combined_commitment, combined_eval) +/// Macro to quickly label polynomials +#[macro_export] +macro_rules! label_polynomial { + ($poly:expr) => { + ark_poly_commit::LabeledPolynomial::new( + stringify!($poly).to_owned(), + $poly.clone(), + None, + None, + ) + }; +} + +/// Macro to quickly label polynomial commitments +#[macro_export] +macro_rules! label_commitment { + ($comm:expr) => { + ark_poly_commit::LabeledCommitment::new( + stringify!($comm).to_owned(), + $comm.clone(), + None, + ) + }; }