diff --git a/README.md b/README.md index e0fdf8b..a3ac355 100644 --- a/README.md +++ b/README.md @@ -20,11 +20,16 @@ Required inputs: ```rust use dep::kzg_verifier::verify_kzg_commit; -fn main() -> bool { + assert(verify_kzg_commit(srs, x, y, commitment, proof)) -} ``` +## Limitations + +- Only BLS12_381 curve is supported currently. + +- Only commitment verification is available. Other operations such as commit, and open are yet to be supported. + ## Acknowledgements [Lamdaworks](https://github.com/RajeshRk18/lambdaworks) diff --git a/src/lib.nr b/src/lib.nr index bec03c6..1862bce 100644 --- a/src/lib.nr +++ b/src/lib.nr @@ -32,7 +32,7 @@ pub fn verify_kzg_commit( } #[test] -fn test_verify_works() { +fn test_verify1() { let toxic_waste = PrimeField::from_u56(100 as u56); let BLS12_381 { curve } = bls12_381(); @@ -46,7 +46,7 @@ fn test_verify_works() { for i in 0..50 { let mut exponent = PrimeField::one(); - for j in 0..i { + for _j in 0..i { exponent = toxic_waste.mul(exponent); } g1_points.push(g1_scalar_mul(g1_curve.gen, exponent)); @@ -78,4 +78,3 @@ fn test_verify_works() { let proof = g1_curve.gen; assert(verify_kzg_commit(srs, x, y, commitment, proof)); } - diff --git a/src/pairing.nr b/src/pairing.nr index d7e7cf3..e753e4b 100644 --- a/src/pairing.nr +++ b/src/pairing.nr @@ -7,7 +7,7 @@ use dep::bls12_381::curve::{bls12_381, bls12_381_g2, BLS12_381, BLS12_381G2 }; use dep::std; // f: G_1 -> x * G_1 -fn g1_scalar_mul(point: G1Point, scalar: PrimeField) -> G1Point { +pub(crate) fn g1_scalar_mul(point: G1Point, scalar: PrimeField) -> G1Point { let BLS12_381 { curve } = bls12_381(); let mut res = G1Point::zero(); @@ -23,7 +23,7 @@ fn g1_scalar_mul(point: G1Point, scalar: PrimeField) -> G1Point { } // f: G_2 -> x * G_2 -fn g2_scalar_mul(point: G2Point, scalar: PrimeField) -> G2Point { +pub(crate) fn g2_scalar_mul(point: G2Point, scalar: PrimeField) -> G2Point { let BLS12_381G2 { curve } = bls12_381_g2(); let mut res = G2Point::zero(); @@ -39,21 +39,21 @@ fn g2_scalar_mul(point: G2Point, scalar: PrimeField) -> G2Point { } // f: (G_1, G_1) -> G_1 + G_1 -fn g1_add(point1: G1Point, point2: G1Point) -> G1Point { +pub(crate) fn g1_add(point1: G1Point, point2: G1Point) -> G1Point { let BLS12_381 { curve } = bls12_381(); curve.add(point1, point2) } // f: (G_2, G_2) -> G_2 + G_2 -fn g2_add(point1: G2Point, point2: G2Point) -> G2Point { +pub(crate) fn g2_add(point1: G2Point, point2: G2Point) -> G2Point { let BLS12_381G2 { curve } = bls12_381_g2(); curve.add(point1, point2) } // computes pairing with 2 pairs. -fn compute_pairing(pair1: (G1Point, G2Point), pair2: (G1Point, G2Point)) -> Fp12 { +pub(crate) fn compute_pairing(pair1: (G1Point, G2Point), pair2: (G1Point, G2Point)) -> Fp12 { let pair1_miller = miller_loop(pair1.1, pair1.0); let pair2_miller = miller_loop(pair2.1, pair2.0); diff --git a/src/poly.nr b/src/poly.nr index 865d804..4a74673 100644 --- a/src/poly.nr +++ b/src/poly.nr @@ -6,12 +6,13 @@ use dep::bls12_381::curve::pairing::{pair, final_exponentiation, miller_loop}; use dep::bls12_381::curve::{bls12_381, bls12_381_g2, BLS12_381, BLS12_381G2 }; use dep::std::collections::vec::Vec; -/// This wont work if the polynomial is of size larger than MAX_POLY_SIZE as the loop is unbounded and the compiler expects it to be bounded in compole time. +/// This wont work if the polynomial is of size larger than MAX_POLY_SIZE as the loop is unbounded and the compiler expects it to be bounded in compile time. /// This should not impact the verifier api because verifier does not need to evaluate any polynomial. /// If we want to work with polynomials of size larger than MAX_POLY_SIZE, maybe we can do the following: /// - sub divide a polynomial of size MAX_SUB_POLY_SIZE and iterating upto `MAX_ITERATIONS` for ops. /// - With this trick, we can operate over polynomials upto degree MAX_SUB_POLY_SIZE * MAX_ITERATIONS - 1. +/// We can have a rust, or any other language wrapper which does the sub dividing. /// NOTE: This trick will add lots of complexities to the code. /// As the loop needs to be bounded, we restrict the polynomial size. This is highly inefficient and generates more constraints than needed