diff --git a/mk/generate_curves.py b/mk/generate_curves.py index 86a882401..c1939da02 100644 --- a/mk/generate_curves.py +++ b/mk/generate_curves.py @@ -33,7 +33,7 @@ use super::{ elem::{binary_op, binary_op_assign}, - elem_sqr_mul, elem_sqr_mul_acc, Modulus, *, + elem_sqr_mul, elem_sqr_mul_acc, PublicModulus, *, }; pub(super) const NUM_LIMBS: usize = (%(bits)d + LIMB_BITS - 1) / LIMB_BITS; @@ -42,9 +42,9 @@ num_limbs: elem::NumLimbs::P%(bits)s, order_bits: %(bits)d, - q: Modulus { + q: PublicModulus { p: limbs_from_hex("%(q)x"), - rr: limbs_from_hex(%(q_rr)s), + rr: PublicElem::from_hex(%(q_rr)s), }, n: PublicElem::from_hex("%(n)x"), diff --git a/src/ec/suite_b.rs b/src/ec/suite_b.rs index b0069c2f5..8f3809ecb 100644 --- a/src/ec/suite_b.rs +++ b/src/ec/suite_b.rs @@ -31,10 +31,12 @@ use crate::{arithmetic::montgomery::*, cpu, ec, error, io::der, pkcs8}; // fn verify_affine_point_is_on_the_curve( ops: &CommonOps, + q: &Modulus, (x, y): (&Elem, &Elem), ) -> Result<(), error::Unspecified> { verify_affine_point_is_on_the_curve_scaled( ops, + q, (x, y), &Elem::from(&ops.a), &Elem::from(&ops.b), @@ -52,6 +54,7 @@ fn verify_affine_point_is_on_the_curve( // This function also verifies that the point is not at infinity. fn verify_jacobian_point_is_on_the_curve( ops: &CommonOps, + q: &Modulus, p: &Point, ) -> Result, error::Unspecified> { let z = ops.point_z(p); @@ -109,7 +112,7 @@ fn verify_jacobian_point_is_on_the_curve( let z4_a = ops.elem_product(&z4, &Elem::from(&ops.a)); let z6 = ops.elem_product(&z4, &z2); let z6_b = ops.elem_product(&z6, &Elem::from(&ops.b)); - verify_affine_point_is_on_the_curve_scaled(ops, (&x, &y), &z4_a, &z6_b)?; + verify_affine_point_is_on_the_curve_scaled(ops, q, (&x, &y), &z4_a, &z6_b)?; Ok(z2) } @@ -140,6 +143,7 @@ fn verify_jacobian_point_is_on_the_curve( // Jean-Pierre Seifert. fn verify_affine_point_is_on_the_curve_scaled( ops: &CommonOps, + q: &Modulus, (x, y): (&Elem, &Elem), a_scaled: &Elem, b_scaled: &Elem, @@ -147,9 +151,9 @@ fn verify_affine_point_is_on_the_curve_scaled( let lhs = ops.elem_squared(y); let mut rhs = ops.elem_squared(x); - ops.elem_add(&mut rhs, a_scaled); + q.elem_add(&mut rhs, a_scaled); ops.elem_mul(&mut rhs, x); - ops.elem_add(&mut rhs, b_scaled); + q.elem_add(&mut rhs, b_scaled); if !ops.elems_are_equal(&lhs, &rhs).leak() { return Err(error::Unspecified); diff --git a/src/ec/suite_b/ecdh.rs b/src/ec/suite_b/ecdh.rs index 62f626422..855df5ed9 100644 --- a/src/ec/suite_b/ecdh.rs +++ b/src/ec/suite_b/ecdh.rs @@ -93,6 +93,8 @@ fn ecdh( // The "NSA Guide" steps are from section 3.1 of the NSA guide, "Ephemeral // Unified Model." + let q = public_key_ops.common.elem_modulus(); + // NSA Guide Step 1 is handled separately. // NIST SP 800-56Ar2 5.6.2.2.2. @@ -101,7 +103,7 @@ fn ecdh( // `parse_uncompressed_point` verifies that the point is not at infinity // and that it is on the curve, using the Partial Public-Key Validation // Routine. - let peer_public_key = parse_uncompressed_point(public_key_ops, peer_public_key, cpu)?; + let peer_public_key = parse_uncompressed_point(public_key_ops, &q, peer_public_key, cpu)?; // NIST SP 800-56Ar2 Step 1. // NSA Guide Step 3 (except point at infinity check). @@ -134,7 +136,7 @@ fn ecdh( // `big_endian_affine_from_jacobian` verifies that the result is not at // infinity and also does an extra check to verify that the point is on // the curve. - big_endian_affine_from_jacobian(private_key_ops, out, None, &product, cpu) + big_endian_affine_from_jacobian(private_key_ops, &q, out, None, &product, cpu) // NSA Guide Step 5 & 6 are deferred to the caller. Again, we have a // pretty liberal interpretation of the NIST's spec's "Destroy" that diff --git a/src/ec/suite_b/ecdsa/digest_scalar.rs b/src/ec/suite_b/ecdsa/digest_scalar.rs index 76206d9ee..fbcb28fb4 100644 --- a/src/ec/suite_b/ecdsa/digest_scalar.rs +++ b/src/ec/suite_b/ecdsa/digest_scalar.rs @@ -42,18 +42,18 @@ use crate::{digest, ec::suite_b::ops::*}; /// right will give a value less than 2**255, which is less than `n`. The /// analogous argument applies for P-384. However, it does *not* apply in /// general; for example, it doesn't apply to P-521. -pub fn digest_scalar(ops: &ScalarOps, msg: digest::Digest) -> Scalar { - digest_scalar_(ops, msg.as_ref()) +pub fn digest_scalar(ops: &ScalarOps, n: &Modulus, msg: digest::Digest) -> Scalar { + digest_scalar_(ops, n, msg.as_ref()) } #[cfg(test)] -pub(crate) fn digest_bytes_scalar(ops: &ScalarOps, digest: &[u8]) -> Scalar { - digest_scalar_(ops, digest) +pub(crate) fn digest_bytes_scalar(ops: &ScalarOps, n: &Modulus, digest: &[u8]) -> Scalar { + digest_scalar_(ops, n, digest) } // This is a separate function solely so that we can test specific digest // values like all-zero values and values larger than `n`. -fn digest_scalar_(ops: &ScalarOps, digest: &[u8]) -> Scalar { +fn digest_scalar_(ops: &ScalarOps, n: &Modulus, digest: &[u8]) -> Scalar { let len = ops.scalar_bytes_len(); let digest = if digest.len() > len { &digest[..len] @@ -61,11 +61,8 @@ fn digest_scalar_(ops: &ScalarOps, digest: &[u8]) -> Scalar { digest }; - scalar_parse_big_endian_partially_reduced_variable_consttime( - ops.common, - untrusted::Input::from(digest), - ) - .unwrap() + scalar_parse_big_endian_partially_reduced_variable_consttime(n, untrusted::Input::from(digest)) + .unwrap() } #[cfg(test)] @@ -94,6 +91,7 @@ mod tests { panic!("Unsupported curve+digest: {}+{}", curve_name, digest_name); } }; + let n = ops.scalar_ops.scalar_modulus(); assert_eq!(input.len(), digest_alg.output_len()); assert_eq!(output.len(), ops.scalar_ops.scalar_bytes_len()); @@ -105,7 +103,7 @@ mod tests { ) .unwrap(); - let actual = digest_bytes_scalar(ops.scalar_ops, &input); + let actual = digest_bytes_scalar(ops.scalar_ops, &n, &input); assert_eq!( ops.scalar_ops.leak_limbs(&actual), ops.scalar_ops.leak_limbs(&expected) diff --git a/src/ec/suite_b/ecdsa/signing.rs b/src/ec/suite_b/ecdsa/signing.rs index 4a625ef66..558970248 100644 --- a/src/ec/suite_b/ecdsa/signing.rs +++ b/src/ec/suite_b/ecdsa/signing.rs @@ -238,6 +238,8 @@ impl EcdsaKeyPair { let scalar_ops = ops.scalar_ops; let cops = scalar_ops.common; let private_key_ops = self.alg.private_key_ops; + let q = cops.elem_modulus(); + let n = scalar_ops.scalar_modulus(); for _ in 0..100 { // XXX: iteration conut? @@ -250,9 +252,9 @@ impl EcdsaKeyPair { // Step 3. let r = { - let (x, _) = private_key::affine_from_jacobian(private_key_ops, &r, cpu)?; + let (x, _) = private_key::affine_from_jacobian(private_key_ops, &q, &r, cpu)?; let x = cops.elem_unencoded(&x); - elem_reduced_to_scalar(cops, &x) + n.elem_reduced_to_scalar(&x) }; if cops.is_zero(&r) { continue; @@ -261,12 +263,12 @@ impl EcdsaKeyPair { // Step 4 is done by the caller. // Step 5. - let e = digest_scalar(scalar_ops, h); + let e = digest_scalar(scalar_ops, &n, h); // Step 6. let s = { - let dr = scalar_ops.scalar_product(&self.d, &r, cpu); - let e_plus_dr = scalar_sum(cops, &e, dr); + let mut e_plus_dr = scalar_ops.scalar_product(&self.d, &r, cpu); + n.elem_add(&mut e_plus_dr, &e); scalar_ops.scalar_product(&k_inv, &e_plus_dr, cpu) }; if cops.is_zero(&s) { diff --git a/src/ec/suite_b/ecdsa/verification.rs b/src/ec/suite_b/ecdsa/verification.rs index 6d9ecb102..d93aacfe0 100644 --- a/src/ec/suite_b/ecdsa/verification.rs +++ b/src/ec/suite_b/ecdsa/verification.rs @@ -63,7 +63,8 @@ impl signature::VerificationAlgorithm for EcdsaVerificationAlgorithm { // NSA Guide Step 3: "Convert the bit string H to an integer e as // described in Appendix B.2." - digest_scalar(self.ops.scalar_ops, h) + let n = self.ops.scalar_ops.scalar_modulus(); + digest_scalar(self.ops.scalar_ops, &n, h) }; self.verify_digest(public_key, e, signature) @@ -84,6 +85,7 @@ impl EcdsaVerificationAlgorithm { let public_key_ops = self.ops.public_key_ops; let scalar_ops = self.ops.scalar_ops; + let q = public_key_ops.common.elem_modulus(); // NSA Guide Prerequisites: // @@ -102,7 +104,7 @@ impl EcdsaVerificationAlgorithm { // can do. Prerequisite #2 is handled implicitly as the domain // parameters are hard-coded into the source. Prerequisite #3 is // handled by `parse_uncompressed_point`. - let peer_pub_key = parse_uncompressed_point(public_key_ops, public_key, cpu)?; + let peer_pub_key = parse_uncompressed_point(public_key_ops, &q, public_key, cpu)?; let (r, s) = signature.read_all(error::Unspecified, |input| { (self.split_rs)(scalar_ops, input) @@ -134,7 +136,7 @@ impl EcdsaVerificationAlgorithm { // `verify_affine_point_is_on_the_curve_scaled` for details on why). // But, we're going to avoid converting to affine for performance // reasons, so we do the verification using the Jacobian coordinates. - let z2 = verify_jacobian_point_is_on_the_curve(public_key_ops.common, &product)?; + let z2 = verify_jacobian_point_is_on_the_curve(public_key_ops.common, &q, &product)?; // NSA Guide Step 7: "Compute v = xR mod n." // NSA Guide Step 8: "Compare v and r0. If v = r0, output VALID; @@ -158,9 +160,9 @@ impl EcdsaVerificationAlgorithm { if sig_r_equals_x(self.ops, &r, &x, &z2) { return Ok(()); } - if self.ops.elem_less_than_vartime(&r, &self.ops.q_minus_n) { + if q.elem_less_than_vartime(&r, &self.ops.q_minus_n) { let n = Elem::from(self.ops.n()); - self.ops.scalar_ops.common.elem_add(&mut r, &n); + q.elem_add(&mut r, &n); if sig_r_equals_x(self.ops, &r, &x, &z2) { return Ok(()); } @@ -316,9 +318,11 @@ mod tests { panic!("Unsupported curve: {}", curve_name); } }; + let n = alg.ops.scalar_ops.scalar_modulus(); let digest = super::super::digest_scalar::digest_bytes_scalar( alg.ops.scalar_ops, + &n, &digest[..], ); let actual_result = alg.verify_digest( diff --git a/src/ec/suite_b/ops.rs b/src/ec/suite_b/ops.rs index a9a6366d0..a1796b9e4 100644 --- a/src/ec/suite_b/ops.rs +++ b/src/ec/suite_b/ops.rs @@ -38,6 +38,13 @@ type PublicScalar = elem::PublicElem; #[derive(Clone, Copy)] pub enum N {} +pub(super) struct Modulus { + // TODO: [Limb; elem::NumLimbs::MAX] + limbs: &'static [Limb; elem::NumLimbs::MAX], + num_limbs: elem::NumLimbs, + m: PhantomData, +} + pub struct Point { // The coordinates are stored in a contiguous array, where the first // `ops.num_limbs` elements are the X coordinate, the next @@ -58,7 +65,7 @@ impl Point { /// Operations and values needed by all curve operations. pub struct CommonOps { num_limbs: elem::NumLimbs, - q: Modulus, + q: PublicModulus, n: PublicElem, pub a: PublicElem, // Must be -3 mod q @@ -72,6 +79,15 @@ pub struct CommonOps { } impl CommonOps { + pub(super) fn elem_modulus(&'static self) -> Modulus { + Modulus { + // TODO: limbs: self.q.p.map(Limb::from), + limbs: &self.q.p, + num_limbs: self.num_limbs, + m: PhantomData, + } + } + // The length of a field element, which is the same as the length of a // scalar, in bytes. pub fn len(&self) -> usize { @@ -82,17 +98,21 @@ impl CommonOps { pub(super) fn n_limbs(&self) -> &[Limb] { &self.n.limbs[..self.num_limbs.into()] } +} +impl Modulus { #[inline] - pub fn elem_add(&self, a: &mut Elem, b: &Elem) { + pub fn elem_add(&self, a: &mut elem::Elem, b: &elem::Elem) { let num_limbs = self.num_limbs.into(); limbs_add_assign_mod( &mut a.limbs[..num_limbs], &b.limbs[..num_limbs], - &self.q.p[..num_limbs], + &self.limbs[..num_limbs], ); } +} +impl CommonOps { #[inline] pub fn elems_are_equal(&self, a: &Elem, b: &Elem) -> LimbMask { let num_limbs = self.num_limbs.into(); @@ -175,9 +195,9 @@ impl CommonOps { } } -struct Modulus { +struct PublicModulus { p: [LeakyLimb; elem::NumLimbs::MAX], - rr: [LeakyLimb; elem::NumLimbs::MAX], + rr: PublicElem, } /// Operations on private keys, for ECDH and ECDSA signing. @@ -249,13 +269,14 @@ impl PublicKeyOps { let encoded_value = input.read_bytes(self.common.len())?; let parsed = elem_parse_big_endian_fixed_consttime(self.common, encoded_value)?; let mut r = Elem::zero(); + let rr = Elem::from(&self.common.q.rr); // Montgomery encode (elem_to_mont). // TODO: do something about this. unsafe { (self.common.elem_mul_mont)( r.limbs.as_mut_ptr(), parsed.limbs.as_ptr(), - self.common.q.rr.as_ptr(), + rr.limbs.as_ptr(), ) } Ok(r) @@ -271,11 +292,22 @@ pub struct ScalarOps { } impl ScalarOps { + pub(super) fn scalar_modulus(&'static self) -> Modulus { + Modulus { + // TODO: limbs: self.n.limbs.map(Limb::from), + limbs: &self.common.n.limbs, + num_limbs: self.common.num_limbs, + m: PhantomData, + } + } + // The (maximum) length of a scalar, not including any padding. pub fn scalar_bytes_len(&self) -> usize { self.common.len() } +} +impl ScalarOps { pub fn leak_limbs<'s>(&self, s: &'s Scalar) -> &'s [Limb] { &s.limbs[..self.common.num_limbs.into()] } @@ -325,14 +357,19 @@ impl PublicScalarOps { pub fn elem_equals_vartime(&self, a: &Elem, b: &Elem) -> bool { let num_limbs = self.public_key_ops.common.num_limbs.into(); - a.limbs[..num_limbs] == b.limbs[..num_limbs] + limbs_equal_limbs_consttime(&a.limbs[..num_limbs], &b.limbs[..num_limbs]).leak() } +} +impl Modulus { pub fn elem_less_than_vartime(&self, a: &Elem, b: &PublicElem) -> bool { - let num_limbs = self.public_key_ops.common.num_limbs.into(); + let num_limbs = self.num_limbs.into(); + // TODO: let b = Elem::from(b); limbs_less_than_limbs_vartime(&a.limbs[..num_limbs], &b.limbs[..num_limbs]) } +} +impl PublicScalarOps { pub(super) fn scalar_inv_to_mont_vartime( &self, s: &Scalar, @@ -379,27 +416,19 @@ fn twin_mul_inefficient( } // This assumes n < q < 2*n. -pub fn elem_reduced_to_scalar(ops: &CommonOps, elem: &Elem) -> Scalar { - let num_limbs = ops.num_limbs.into(); - let mut r_limbs = elem.limbs; - limbs_reduce_once_constant_time(&mut r_limbs[..num_limbs], &ops.n.limbs[..num_limbs]); - Scalar { - limbs: r_limbs, - m: PhantomData, - encoding: PhantomData, +impl Modulus { + pub fn elem_reduced_to_scalar(&self, elem: &Elem) -> Scalar { + let num_limbs = self.num_limbs.into(); + let mut r_limbs = elem.limbs; + limbs_reduce_once_constant_time(&mut r_limbs[..num_limbs], &self.limbs[..num_limbs]); + Scalar { + limbs: r_limbs, + m: PhantomData, + encoding: PhantomData, + } } } -pub fn scalar_sum(ops: &CommonOps, a: &Scalar, mut b: Scalar) -> Scalar { - let num_limbs = ops.num_limbs.into(); - limbs_add_assign_mod( - &mut b.limbs[..num_limbs], - &a.limbs[..num_limbs], - &ops.n.limbs[..num_limbs], - ); - b -} - // Returns (`a` squared `squarings` times) * `b`. fn elem_sqr_mul(ops: &CommonOps, a: &Elem, squarings: LeakyWord, b: &Elem) -> Elem { debug_assert!(squarings >= 1); @@ -454,16 +483,15 @@ pub fn scalar_parse_big_endian_variable( } pub fn scalar_parse_big_endian_partially_reduced_variable_consttime( - ops: &CommonOps, + n: &Modulus, bytes: untrusted::Input, ) -> Result { - let num_limbs = ops.num_limbs.into(); + let num_limbs = n.num_limbs.into(); let mut r = Scalar::zero(); - { let r = &mut r.limbs[..num_limbs]; parse_big_endian_and_pad_consttime(bytes, r)?; - limbs_reduce_once_constant_time(r, &ops.n.limbs[..num_limbs]); + limbs_reduce_once_constant_time(r, &n.limbs[..num_limbs]); } Ok(r) @@ -522,8 +550,9 @@ mod tests { fn q_minus_n_plus_n_equals_0_test(ops: &PublicScalarOps) { let cops = ops.scalar_ops.common; + let q = cops.elem_modulus(); let mut x = Elem::from(&ops.q_minus_n); - cops.elem_add(&mut x, &Elem::from(&cops.n)); + q.elem_add(&mut x, &Elem::from(&cops.n)); assert!(cops.is_zero(&x)); } @@ -554,20 +583,21 @@ mod tests { } fn elem_add_test(ops: &PublicScalarOps, test_file: test::File) { + let cops = ops.public_key_ops.common; + let q = cops.elem_modulus(); test::run(test_file, |section, test_case| { assert_eq!(section, ""); - let cops = ops.public_key_ops.common; let a = consume_elem(cops, test_case, "a"); let b = consume_elem(cops, test_case, "b"); let expected_sum = consume_elem(cops, test_case, "r"); let mut actual_sum = a; - ops.public_key_ops.common.elem_add(&mut actual_sum, &b); + q.elem_add(&mut actual_sum, &b); assert_limbs_are_equal(cops, &actual_sum.limbs, &expected_sum.limbs); let mut actual_sum = b; - ops.public_key_ops.common.elem_add(&mut actual_sum, &a); + q.elem_add(&mut actual_sum, &a); assert_limbs_are_equal(cops, &actual_sum.limbs, &expected_sum.limbs); Ok(()) @@ -778,7 +808,7 @@ mod tests { #[test] fn p256_scalar_square_test() { prefixed_extern! { - fn p256_scalar_sqr_rep_mont(r: *mut Limb, a: *const Limb, rep: Limb); + fn p256_scalar_sqr_rep_mont(r: *mut Limb, a: *const Limb, rep: LeakyWord); } scalar_square_test( &p256::SCALAR_OPS, @@ -792,7 +822,7 @@ mod tests { fn scalar_square_test( ops: &ScalarOps, - sqr_rep: unsafe extern "C" fn(r: *mut Limb, a: *const Limb, rep: Limb), + sqr_rep: unsafe extern "C" fn(r: *mut Limb, a: *const Limb, rep: LeakyWord), test_file: test::File, ) { test::run(test_file, |section, test_case| { @@ -1013,7 +1043,7 @@ mod tests { ) { let cpu = cpu::features(); let cops = pub_ops.common; - + let q = cops.elem_modulus(); test::run(test_file, |section, test_case| { assert_eq!(section, ""); let p_scalar = consume_scalar(cops, test_case, "p_scalar"); @@ -1021,6 +1051,7 @@ mod tests { let p = test_case.consume_bytes("p"); let p = super::super::public_key::parse_uncompressed_point( pub_ops, + &q, untrusted::Input::from(&p), cpu, ) @@ -1035,6 +1066,7 @@ mod tests { let (x, y) = actual_result[1..].split_at_mut(cops.len()); super::super::private_key::big_endian_affine_from_jacobian( priv_ops, + &q, x, Some(y), &product, diff --git a/src/ec/suite_b/ops/elem.rs b/src/ec/suite_b/ops/elem.rs index 25eb36b17..0deb05d05 100644 --- a/src/ec/suite_b/ops/elem.rs +++ b/src/ec/suite_b/ops/elem.rs @@ -122,11 +122,7 @@ pub fn binary_op( a: &Elem, b: &Elem, ) -> Elem { - let mut r = Elem { - limbs: [0; NumLimbs::MAX], - m: PhantomData, - encoding: PhantomData, - }; + let mut r = Elem::zero(); unsafe { f(r.limbs.as_mut_ptr(), a.limbs.as_ptr(), b.limbs.as_ptr()) } r } @@ -147,11 +143,7 @@ pub fn unary_op( f: unsafe extern "C" fn(r: *mut Limb, a: *const Limb), a: &Elem, ) -> Elem { - let mut r = Elem { - limbs: [0; NumLimbs::MAX], - m: PhantomData, - encoding: PhantomData, - }; + let mut r = Elem::zero(); unsafe { f(r.limbs.as_mut_ptr(), a.limbs.as_ptr()) } r } diff --git a/src/ec/suite_b/ops/p256.rs b/src/ec/suite_b/ops/p256.rs index d586587a4..4648a1e0c 100644 --- a/src/ec/suite_b/ops/p256.rs +++ b/src/ec/suite_b/ops/p256.rs @@ -14,7 +14,7 @@ use super::{ elem::{binary_op, binary_op_assign}, - elem_sqr_mul, elem_sqr_mul_acc, Modulus, *, + elem_sqr_mul, elem_sqr_mul_acc, PublicModulus, *, }; pub(super) const NUM_LIMBS: usize = 256 / LIMB_BITS; @@ -22,9 +22,9 @@ pub(super) const NUM_LIMBS: usize = 256 / LIMB_BITS; pub static COMMON_OPS: CommonOps = CommonOps { num_limbs: elem::NumLimbs::P256, - q: Modulus { + q: PublicModulus { p: limbs_from_hex("ffffffff00000001000000000000000000000000ffffffffffffffffffffffff"), - rr: limbs_from_hex("4fffffffdfffffffffffffffefffffffbffffffff0000000000000003"), + rr: PublicElem::from_hex("4fffffffdfffffffffffffffefffffffbffffffff0000000000000003"), }, n: PublicElem::from_hex("ffffffff00000000ffffffffffffffffbce6faada7179e84f3b9cac2fc632551"), @@ -301,7 +301,7 @@ prefixed_extern! { fn p256_scalar_sqr_rep_mont( r: *mut Limb, // [COMMON_OPS.num_limbs] a: *const Limb, // [COMMON_OPS.num_limbs] - rep: Limb, + rep: LeakyWord, ); } diff --git a/src/ec/suite_b/ops/p384.rs b/src/ec/suite_b/ops/p384.rs index faef4e07c..d7d7b68d4 100644 --- a/src/ec/suite_b/ops/p384.rs +++ b/src/ec/suite_b/ops/p384.rs @@ -14,7 +14,7 @@ use super::{ elem::{binary_op, binary_op_assign}, - elem_sqr_mul, elem_sqr_mul_acc, Modulus, *, + elem_sqr_mul, elem_sqr_mul_acc, PublicModulus, *, }; pub(super) const NUM_LIMBS: usize = 384 / LIMB_BITS; @@ -22,9 +22,9 @@ pub(super) const NUM_LIMBS: usize = 384 / LIMB_BITS; pub static COMMON_OPS: CommonOps = CommonOps { num_limbs: elem::NumLimbs::P384, - q: Modulus { + q: PublicModulus { p: limbs_from_hex("fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffeffffffff0000000000000000ffffffff"), - rr: limbs_from_hex("10000000200000000fffffffe000000000000000200000000fffffffe00000001"), + rr: PublicElem::from_hex("10000000200000000fffffffe000000000000000200000000fffffffe00000001"), }, n: PublicElem::from_hex("ffffffffffffffffffffffffffffffffffffffffffffffffc7634d81f4372ddf581a0db248b0a77aecec196accc52973"), diff --git a/src/ec/suite_b/private_key.rs b/src/ec/suite_b/private_key.rs index 043208b7e..212e11b3d 100644 --- a/src/ec/suite_b/private_key.rs +++ b/src/ec/suite_b/private_key.rs @@ -130,6 +130,7 @@ pub(super) fn public_from_private( my_private_key: &ec::Seed, cpu: cpu::Features, ) -> Result<(), error::Unspecified> { + let q = ops.common.elem_modulus(); let elem_and_scalar_bytes = ops.common.len(); debug_assert_eq!(public_out.len(), 1 + (2 * elem_and_scalar_bytes)); let my_private_key = private_key_as_scalar(ops, my_private_key); @@ -139,11 +140,12 @@ pub(super) fn public_from_private( // `big_endian_affine_from_jacobian` verifies that the point is not at // infinity and is on the curve. - big_endian_affine_from_jacobian(ops, x_out, Some(y_out), &my_public_key, cpu) + big_endian_affine_from_jacobian(ops, &q, x_out, Some(y_out), &my_public_key, cpu) } pub(super) fn affine_from_jacobian( ops: &PrivateKeyOps, + q: &Modulus, p: &Point, cpu: cpu::Features, ) -> Result<(Elem, Elem), error::Unspecified> { @@ -173,19 +175,20 @@ pub(super) fn affine_from_jacobian( // If we validated our inputs correctly and then computed (x, y, z), then // (x, y, z) will be on the curve. See // `verify_affine_point_is_on_the_curve_scaled` for the motivation. - verify_affine_point_is_on_the_curve(ops.common, (&x_aff, &y_aff))?; + verify_affine_point_is_on_the_curve(ops.common, q, (&x_aff, &y_aff))?; Ok((x_aff, y_aff)) } pub(super) fn big_endian_affine_from_jacobian( ops: &PrivateKeyOps, + q: &Modulus, x_out: &mut [u8], y_out: Option<&mut [u8]>, p: &Point, cpu: cpu::Features, ) -> Result<(), error::Unspecified> { - let (x_aff, y_aff) = affine_from_jacobian(ops, p, cpu)?; + let (x_aff, y_aff) = affine_from_jacobian(ops, q, p, cpu)?; let x = ops.common.elem_unencoded(&x_aff); limb::big_endian_from_limbs(ops.leak_limbs(&x), x_out); if let Some(y_out) = y_out { diff --git a/src/ec/suite_b/public_key.rs b/src/ec/suite_b/public_key.rs index f62727ebd..a5cebaeb9 100644 --- a/src/ec/suite_b/public_key.rs +++ b/src/ec/suite_b/public_key.rs @@ -28,6 +28,7 @@ use crate::{arithmetic::montgomery::*, cpu, error}; /// http://nvlpubs.nist.gov/nistpubs/SpecialPublications/NIST.SP.800-56Ar2.pdf pub(super) fn parse_uncompressed_point( ops: &PublicKeyOps, + q: &Modulus, input: untrusted::Input, cpu: cpu::Features, ) -> Result<(Elem, Elem), error::Unspecified> { @@ -52,7 +53,7 @@ pub(super) fn parse_uncompressed_point( // NIST SP 800-56A Step 3: "If q is an odd prime p, verify that // yQ**2 = xQ**3 + axQ + b in GF(p), where the arithmetic is performed // modulo p." - verify_affine_point_is_on_the_curve(ops.common, (&x, &y))?; + verify_affine_point_is_on_the_curve(ops.common, q, (&x, &y))?; // NIST SP 800-56A Note: "Since its order is not verified, there is no // check that the public key is in the correct EC subgroup." @@ -85,8 +86,9 @@ mod tests { let is_valid = test_case.consume_string("Result") == "P"; let curve_ops = public_key_ops_from_curve_name(&curve_name); + let q = curve_ops.common.elem_modulus(); - let result = parse_uncompressed_point(curve_ops, public_key, cpu); + let result = parse_uncompressed_point(curve_ops, &q, public_key, cpu); assert_eq!(is_valid, result.is_ok()); // TODO: Verify that we when we re-serialize the parsed (x, y), the