From d7a133b7e032f1536c1e0a8d370b85779f8e2864 Mon Sep 17 00:00:00 2001 From: Brian Smith Date: Fri, 6 Dec 2024 16:25:16 -0800 Subject: [PATCH] ec/suite_b: Make more LeakyLimb->Limb conversions explicit. Rename `Modulus` to `PublicModulus` and add a new `Modulus`: PublicModulus::limbs: [LeakyLimb; _] Modulus::limbs: [Limb; _] Refactor the callers to (approximately) minimize the number of types a `Modulus` is constructed, as it will require a copy if/when `Limb` becomes a type distinct from `LeakyLimb`. (In the future, it is likely that more methods of `CommonOps` and `ScalarOps` will move to `Modulus` and `Modulus`, respectively.) --- mk/generate_curves.py | 6 +- src/ec/suite_b.rs | 10 +- src/ec/suite_b/ecdh.rs | 9 +- src/ec/suite_b/ecdsa/digest_scalar.rs | 25 ++- src/ec/suite_b/ecdsa/signing.rs | 18 +- src/ec/suite_b/ecdsa/verification.rs | 23 +-- src/ec/suite_b/ops.rs | 268 +++++++++++++++----------- src/ec/suite_b/ops/elem.rs | 12 +- src/ec/suite_b/ops/p256.rs | 8 +- src/ec/suite_b/ops/p384.rs | 6 +- src/ec/suite_b/private_key.rs | 32 +-- src/ec/suite_b/public_key.rs | 10 +- 12 files changed, 240 insertions(+), 187 deletions(-) diff --git a/mk/generate_curves.py b/mk/generate_curves.py index 86a882401a..c1939da02f 100644 --- a/mk/generate_curves.py +++ b/mk/generate_curves.py @@ -33,7 +33,7 @@ use super::{ elem::{binary_op, binary_op_assign}, - elem_sqr_mul, elem_sqr_mul_acc, Modulus, *, + elem_sqr_mul, elem_sqr_mul_acc, PublicModulus, *, }; pub(super) const NUM_LIMBS: usize = (%(bits)d + LIMB_BITS - 1) / LIMB_BITS; @@ -42,9 +42,9 @@ num_limbs: elem::NumLimbs::P%(bits)s, order_bits: %(bits)d, - q: Modulus { + q: PublicModulus { p: limbs_from_hex("%(q)x"), - rr: limbs_from_hex(%(q_rr)s), + rr: PublicElem::from_hex(%(q_rr)s), }, n: PublicElem::from_hex("%(n)x"), diff --git a/src/ec/suite_b.rs b/src/ec/suite_b.rs index b0069c2f5b..8f3809ecb0 100644 --- a/src/ec/suite_b.rs +++ b/src/ec/suite_b.rs @@ -31,10 +31,12 @@ use crate::{arithmetic::montgomery::*, cpu, ec, error, io::der, pkcs8}; // fn verify_affine_point_is_on_the_curve( ops: &CommonOps, + q: &Modulus, (x, y): (&Elem, &Elem), ) -> Result<(), error::Unspecified> { verify_affine_point_is_on_the_curve_scaled( ops, + q, (x, y), &Elem::from(&ops.a), &Elem::from(&ops.b), @@ -52,6 +54,7 @@ fn verify_affine_point_is_on_the_curve( // This function also verifies that the point is not at infinity. fn verify_jacobian_point_is_on_the_curve( ops: &CommonOps, + q: &Modulus, p: &Point, ) -> Result, error::Unspecified> { let z = ops.point_z(p); @@ -109,7 +112,7 @@ fn verify_jacobian_point_is_on_the_curve( let z4_a = ops.elem_product(&z4, &Elem::from(&ops.a)); let z6 = ops.elem_product(&z4, &z2); let z6_b = ops.elem_product(&z6, &Elem::from(&ops.b)); - verify_affine_point_is_on_the_curve_scaled(ops, (&x, &y), &z4_a, &z6_b)?; + verify_affine_point_is_on_the_curve_scaled(ops, q, (&x, &y), &z4_a, &z6_b)?; Ok(z2) } @@ -140,6 +143,7 @@ fn verify_jacobian_point_is_on_the_curve( // Jean-Pierre Seifert. fn verify_affine_point_is_on_the_curve_scaled( ops: &CommonOps, + q: &Modulus, (x, y): (&Elem, &Elem), a_scaled: &Elem, b_scaled: &Elem, @@ -147,9 +151,9 @@ fn verify_affine_point_is_on_the_curve_scaled( let lhs = ops.elem_squared(y); let mut rhs = ops.elem_squared(x); - ops.elem_add(&mut rhs, a_scaled); + q.elem_add(&mut rhs, a_scaled); ops.elem_mul(&mut rhs, x); - ops.elem_add(&mut rhs, b_scaled); + q.elem_add(&mut rhs, b_scaled); if !ops.elems_are_equal(&lhs, &rhs).leak() { return Err(error::Unspecified); diff --git a/src/ec/suite_b/ecdh.rs b/src/ec/suite_b/ecdh.rs index 62f626422f..b40186d91b 100644 --- a/src/ec/suite_b/ecdh.rs +++ b/src/ec/suite_b/ecdh.rs @@ -93,6 +93,8 @@ fn ecdh( // The "NSA Guide" steps are from section 3.1 of the NSA guide, "Ephemeral // Unified Model." + let q = public_key_ops.common.elem_modulus(); + // NSA Guide Step 1 is handled separately. // NIST SP 800-56Ar2 5.6.2.2.2. @@ -101,7 +103,7 @@ fn ecdh( // `parse_uncompressed_point` verifies that the point is not at infinity // and that it is on the curve, using the Partial Public-Key Validation // Routine. - let peer_public_key = parse_uncompressed_point(public_key_ops, peer_public_key, cpu)?; + let peer_public_key = parse_uncompressed_point(public_key_ops, &q, peer_public_key, cpu)?; // NIST SP 800-56Ar2 Step 1. // NSA Guide Step 3 (except point at infinity check). @@ -123,7 +125,8 @@ fn ecdh( // information about their values can be recovered. This doesn't meet the // NSA guide's explicit requirement to "zeroize" them though. // TODO: this only needs common scalar ops - let my_private_key = private_key_as_scalar(private_key_ops, my_private_key); + let n = private_key_ops.common.scalar_modulus(); + let my_private_key = private_key_as_scalar(&n, my_private_key); let product = private_key_ops.point_mul(&my_private_key, &peer_public_key, cpu); // NIST SP 800-56Ar2 Steps 2, 3, 4, and 5. @@ -134,7 +137,7 @@ fn ecdh( // `big_endian_affine_from_jacobian` verifies that the result is not at // infinity and also does an extra check to verify that the point is on // the curve. - big_endian_affine_from_jacobian(private_key_ops, out, None, &product, cpu) + big_endian_affine_from_jacobian(private_key_ops, &q, out, None, &product, cpu) // NSA Guide Step 5 & 6 are deferred to the caller. Again, we have a // pretty liberal interpretation of the NIST's spec's "Destroy" that diff --git a/src/ec/suite_b/ecdsa/digest_scalar.rs b/src/ec/suite_b/ecdsa/digest_scalar.rs index 76206d9ee5..0cc2ca66e5 100644 --- a/src/ec/suite_b/ecdsa/digest_scalar.rs +++ b/src/ec/suite_b/ecdsa/digest_scalar.rs @@ -42,30 +42,27 @@ use crate::{digest, ec::suite_b::ops::*}; /// right will give a value less than 2**255, which is less than `n`. The /// analogous argument applies for P-384. However, it does *not* apply in /// general; for example, it doesn't apply to P-521. -pub fn digest_scalar(ops: &ScalarOps, msg: digest::Digest) -> Scalar { - digest_scalar_(ops, msg.as_ref()) +pub(super) fn digest_scalar(n: &Modulus, msg: digest::Digest) -> Scalar { + digest_scalar_(n, msg.as_ref()) } #[cfg(test)] -pub(crate) fn digest_bytes_scalar(ops: &ScalarOps, digest: &[u8]) -> Scalar { - digest_scalar_(ops, digest) +pub(super) fn digest_bytes_scalar(n: &Modulus, digest: &[u8]) -> Scalar { + digest_scalar_(n, digest) } // This is a separate function solely so that we can test specific digest // values like all-zero values and values larger than `n`. -fn digest_scalar_(ops: &ScalarOps, digest: &[u8]) -> Scalar { - let len = ops.scalar_bytes_len(); +fn digest_scalar_(n: &Modulus, digest: &[u8]) -> Scalar { + let len = n.bytes_len(); let digest = if digest.len() > len { &digest[..len] } else { digest }; - scalar_parse_big_endian_partially_reduced_variable_consttime( - ops.common, - untrusted::Input::from(digest), - ) - .unwrap() + scalar_parse_big_endian_partially_reduced_variable_consttime(n, untrusted::Input::from(digest)) + .unwrap() } #[cfg(test)] @@ -94,18 +91,20 @@ mod tests { panic!("Unsupported curve+digest: {}+{}", curve_name, digest_name); } }; + let n = ops.scalar_ops.scalar_modulus(); assert_eq!(input.len(), digest_alg.output_len()); assert_eq!(output.len(), ops.scalar_ops.scalar_bytes_len()); + assert_eq!(output.len(), n.bytes_len()); let expected = scalar_parse_big_endian_variable( - ops.public_key_ops.common, + &n, limb::AllowZero::Yes, untrusted::Input::from(&output), ) .unwrap(); - let actual = digest_bytes_scalar(ops.scalar_ops, &input); + let actual = digest_bytes_scalar(&n, &input); assert_eq!( ops.scalar_ops.leak_limbs(&actual), ops.scalar_ops.leak_limbs(&expected) diff --git a/src/ec/suite_b/ecdsa/signing.rs b/src/ec/suite_b/ecdsa/signing.rs index 4a625ef66a..ba4763cf6e 100644 --- a/src/ec/suite_b/ecdsa/signing.rs +++ b/src/ec/suite_b/ecdsa/signing.rs @@ -154,8 +154,10 @@ impl EcdsaKeyPair { rng: &dyn rand::SecureRandom, ) -> Result { let cpu = cpu::features(); + let (seed, public_key) = key_pair.split(); - let d = private_key::private_key_as_scalar(alg.private_key_ops, &seed); + let n = alg.private_scalar_ops.scalar_ops.scalar_modulus(); + let d = private_key::private_key_as_scalar(&n, &seed); let d = alg.private_scalar_ops.to_mont(&d, cpu); let nonce_key = NonceRandomKey::new(alg, &seed, rng)?; @@ -238,11 +240,13 @@ impl EcdsaKeyPair { let scalar_ops = ops.scalar_ops; let cops = scalar_ops.common; let private_key_ops = self.alg.private_key_ops; + let q = cops.elem_modulus(); + let n = scalar_ops.scalar_modulus(); for _ in 0..100 { // XXX: iteration conut? // Step 1. - let k = private_key::random_scalar(self.alg.private_key_ops, rng)?; + let k = private_key::random_scalar(self.alg.private_key_ops, &n, rng)?; let k_inv = ops.scalar_inv_to_mont(&k, cpu); // Step 2. @@ -250,9 +254,9 @@ impl EcdsaKeyPair { // Step 3. let r = { - let (x, _) = private_key::affine_from_jacobian(private_key_ops, &r, cpu)?; + let (x, _) = private_key::affine_from_jacobian(private_key_ops, &q, &r, cpu)?; let x = cops.elem_unencoded(&x); - elem_reduced_to_scalar(cops, &x) + n.elem_reduced_to_scalar(&x) }; if cops.is_zero(&r) { continue; @@ -261,12 +265,12 @@ impl EcdsaKeyPair { // Step 4 is done by the caller. // Step 5. - let e = digest_scalar(scalar_ops, h); + let e = digest_scalar(&n, h); // Step 6. let s = { - let dr = scalar_ops.scalar_product(&self.d, &r, cpu); - let e_plus_dr = scalar_sum(cops, &e, dr); + let mut e_plus_dr = scalar_ops.scalar_product(&self.d, &r, cpu); + n.elem_add(&mut e_plus_dr, &e); scalar_ops.scalar_product(&k_inv, &e_plus_dr, cpu) }; if cops.is_zero(&s) { diff --git a/src/ec/suite_b/ecdsa/verification.rs b/src/ec/suite_b/ecdsa/verification.rs index 6d9ecb1024..7c6181b7f7 100644 --- a/src/ec/suite_b/ecdsa/verification.rs +++ b/src/ec/suite_b/ecdsa/verification.rs @@ -63,7 +63,8 @@ impl signature::VerificationAlgorithm for EcdsaVerificationAlgorithm { // NSA Guide Step 3: "Convert the bit string H to an integer e as // described in Appendix B.2." - digest_scalar(self.ops.scalar_ops, h) + let n = self.ops.scalar_ops.scalar_modulus(); + digest_scalar(&n, h) }; self.verify_digest(public_key, e, signature) @@ -84,6 +85,8 @@ impl EcdsaVerificationAlgorithm { let public_key_ops = self.ops.public_key_ops; let scalar_ops = self.ops.scalar_ops; + let q = public_key_ops.common.elem_modulus(); + let n = scalar_ops.scalar_modulus(); // NSA Guide Prerequisites: // @@ -102,7 +105,7 @@ impl EcdsaVerificationAlgorithm { // can do. Prerequisite #2 is handled implicitly as the domain // parameters are hard-coded into the source. Prerequisite #3 is // handled by `parse_uncompressed_point`. - let peer_pub_key = parse_uncompressed_point(public_key_ops, public_key, cpu)?; + let peer_pub_key = parse_uncompressed_point(public_key_ops, &q, public_key, cpu)?; let (r, s) = signature.read_all(error::Unspecified, |input| { (self.split_rs)(scalar_ops, input) @@ -110,8 +113,8 @@ impl EcdsaVerificationAlgorithm { // NSA Guide Step 1: "If r and s are not both integers in the interval // [1, n − 1], output INVALID." - let r = scalar_parse_big_endian_variable(public_key_ops.common, limb::AllowZero::No, r)?; - let s = scalar_parse_big_endian_variable(public_key_ops.common, limb::AllowZero::No, s)?; + let r = scalar_parse_big_endian_variable(&n, limb::AllowZero::No, r)?; + let s = scalar_parse_big_endian_variable(&n, limb::AllowZero::No, s)?; // NSA Guide Step 4: "Compute w = s**−1 mod n, using the routine in // Appendix B.1." @@ -134,7 +137,7 @@ impl EcdsaVerificationAlgorithm { // `verify_affine_point_is_on_the_curve_scaled` for details on why). // But, we're going to avoid converting to affine for performance // reasons, so we do the verification using the Jacobian coordinates. - let z2 = verify_jacobian_point_is_on_the_curve(public_key_ops.common, &product)?; + let z2 = verify_jacobian_point_is_on_the_curve(public_key_ops.common, &q, &product)?; // NSA Guide Step 7: "Compute v = xR mod n." // NSA Guide Step 8: "Compare v and r0. If v = r0, output VALID; @@ -158,9 +161,9 @@ impl EcdsaVerificationAlgorithm { if sig_r_equals_x(self.ops, &r, &x, &z2) { return Ok(()); } - if self.ops.elem_less_than_vartime(&r, &self.ops.q_minus_n) { + if q.elem_less_than_vartime(&r, &self.ops.q_minus_n) { let n = Elem::from(self.ops.n()); - self.ops.scalar_ops.common.elem_add(&mut r, &n); + q.elem_add(&mut r, &n); if sig_r_equals_x(self.ops, &r, &x, &z2) { return Ok(()); } @@ -316,11 +319,9 @@ mod tests { panic!("Unsupported curve: {}", curve_name); } }; + let n = alg.ops.scalar_ops.scalar_modulus(); - let digest = super::super::digest_scalar::digest_bytes_scalar( - alg.ops.scalar_ops, - &digest[..], - ); + let digest = super::super::digest_scalar::digest_bytes_scalar(&n, &digest[..]); let actual_result = alg.verify_digest( untrusted::Input::from(&public_key[..]), digest, diff --git a/src/ec/suite_b/ops.rs b/src/ec/suite_b/ops.rs index a9a6366d0a..d153782858 100644 --- a/src/ec/suite_b/ops.rs +++ b/src/ec/suite_b/ops.rs @@ -38,6 +38,13 @@ type PublicScalar = elem::PublicElem; #[derive(Clone, Copy)] pub enum N {} +pub(super) struct Modulus { + // TODO: [Limb; elem::NumLimbs::MAX] + limbs: &'static [Limb; elem::NumLimbs::MAX], + num_limbs: elem::NumLimbs, + m: PhantomData, +} + pub struct Point { // The coordinates are stored in a contiguous array, where the first // `ops.num_limbs` elements are the X coordinate, the next @@ -58,7 +65,7 @@ impl Point { /// Operations and values needed by all curve operations. pub struct CommonOps { num_limbs: elem::NumLimbs, - q: Modulus, + q: PublicModulus, n: PublicElem, pub a: PublicElem, // Must be -3 mod q @@ -72,9 +79,28 @@ pub struct CommonOps { } impl CommonOps { + pub(super) fn elem_modulus(&'static self) -> Modulus { + Modulus { + // TODO: limbs: self.q.p.map(Limb::from), + limbs: &self.q.p, + num_limbs: self.num_limbs, + m: PhantomData, + } + } + + pub(super) fn scalar_modulus(&'static self) -> Modulus { + Modulus { + // TODO: limbs: self.n.limbs.map(Limb::from), + limbs: &self.n.limbs, + num_limbs: self.num_limbs, + m: PhantomData, + } + } + // The length of a field element, which is the same as the length of a // scalar, in bytes. pub fn len(&self) -> usize { + // Keep in sync with `Modulus::len()` self.num_limbs.into() * LIMB_BYTES } @@ -82,17 +108,26 @@ impl CommonOps { pub(super) fn n_limbs(&self) -> &[Limb] { &self.n.limbs[..self.num_limbs.into()] } +} + +impl Modulus { + // Keep in sync with `CommonOps::len()`. + pub fn bytes_len(&self) -> usize { + self.num_limbs.into() * LIMB_BYTES + } #[inline] - pub fn elem_add(&self, a: &mut Elem, b: &Elem) { + pub fn elem_add(&self, a: &mut elem::Elem, b: &elem::Elem) { let num_limbs = self.num_limbs.into(); limbs_add_assign_mod( &mut a.limbs[..num_limbs], &b.limbs[..num_limbs], - &self.q.p[..num_limbs], + &self.limbs[..num_limbs], ); } +} +impl CommonOps { #[inline] pub fn elems_are_equal(&self, a: &Elem, b: &Elem) -> LimbMask { let num_limbs = self.num_limbs.into(); @@ -175,9 +210,9 @@ impl CommonOps { } } -struct Modulus { +struct PublicModulus { p: [LeakyLimb; elem::NumLimbs::MAX], - rr: [LeakyLimb; elem::NumLimbs::MAX], + rr: PublicElem, } /// Operations on private keys, for ECDH and ECDSA signing. @@ -242,20 +277,22 @@ impl PublicKeyOps { // in the interval [0, p-1] in the case that q is an odd prime p[.]" pub(super) fn elem_parse( &self, + q: &Modulus, input: &mut untrusted::Reader, _cpu: cpu::Features, ) -> Result, error::Unspecified> { let _cpu = cpu::features(); let encoded_value = input.read_bytes(self.common.len())?; - let parsed = elem_parse_big_endian_fixed_consttime(self.common, encoded_value)?; + let parsed = elem_parse_big_endian_fixed_consttime(q, encoded_value)?; let mut r = Elem::zero(); + let rr = Elem::from(&self.common.q.rr); // Montgomery encode (elem_to_mont). // TODO: do something about this. unsafe { (self.common.elem_mul_mont)( r.limbs.as_mut_ptr(), parsed.limbs.as_ptr(), - self.common.q.rr.as_ptr(), + rr.limbs.as_ptr(), ) } Ok(r) @@ -271,11 +308,17 @@ pub struct ScalarOps { } impl ScalarOps { + pub(super) fn scalar_modulus(&'static self) -> Modulus { + self.common.scalar_modulus() + } + // The (maximum) length of a scalar, not including any padding. pub fn scalar_bytes_len(&self) -> usize { self.common.len() } +} +impl ScalarOps { pub fn leak_limbs<'s>(&self, s: &'s Scalar) -> &'s [Limb] { &s.limbs[..self.common.num_limbs.into()] } @@ -325,14 +368,19 @@ impl PublicScalarOps { pub fn elem_equals_vartime(&self, a: &Elem, b: &Elem) -> bool { let num_limbs = self.public_key_ops.common.num_limbs.into(); - a.limbs[..num_limbs] == b.limbs[..num_limbs] + limbs_equal_limbs_consttime(&a.limbs[..num_limbs], &b.limbs[..num_limbs]).leak() } +} +impl Modulus { pub fn elem_less_than_vartime(&self, a: &Elem, b: &PublicElem) -> bool { - let num_limbs = self.public_key_ops.common.num_limbs.into(); + let num_limbs = self.num_limbs.into(); + // TODO: let b = Elem::from(b); limbs_less_than_limbs_vartime(&a.limbs[..num_limbs], &b.limbs[..num_limbs]) } +} +impl PublicScalarOps { pub(super) fn scalar_inv_to_mont_vartime( &self, s: &Scalar, @@ -379,27 +427,19 @@ fn twin_mul_inefficient( } // This assumes n < q < 2*n. -pub fn elem_reduced_to_scalar(ops: &CommonOps, elem: &Elem) -> Scalar { - let num_limbs = ops.num_limbs.into(); - let mut r_limbs = elem.limbs; - limbs_reduce_once_constant_time(&mut r_limbs[..num_limbs], &ops.n.limbs[..num_limbs]); - Scalar { - limbs: r_limbs, - m: PhantomData, - encoding: PhantomData, +impl Modulus { + pub fn elem_reduced_to_scalar(&self, elem: &Elem) -> Scalar { + let num_limbs = self.num_limbs.into(); + let mut r_limbs = elem.limbs; + limbs_reduce_once_constant_time(&mut r_limbs[..num_limbs], &self.limbs[..num_limbs]); + Scalar { + limbs: r_limbs, + m: PhantomData, + encoding: PhantomData, + } } } -pub fn scalar_sum(ops: &CommonOps, a: &Scalar, mut b: Scalar) -> Scalar { - let num_limbs = ops.num_limbs.into(); - limbs_add_assign_mod( - &mut b.limbs[..num_limbs], - &a.limbs[..num_limbs], - &ops.n.limbs[..num_limbs], - ); - b -} - // Returns (`a` squared `squarings` times) * `b`. fn elem_sqr_mul(ops: &CommonOps, a: &Elem, squarings: LeakyWord, b: &Elem) -> Elem { debug_assert!(squarings >= 1); @@ -420,72 +460,67 @@ fn elem_sqr_mul_acc(ops: &CommonOps, acc: &mut Elem, squarings: LeakyWord, b: } #[inline] -pub fn elem_parse_big_endian_fixed_consttime( - ops: &CommonOps, +pub(super) fn elem_parse_big_endian_fixed_consttime( + q: &Modulus, bytes: untrusted::Input, ) -> Result, error::Unspecified> { - parse_big_endian_fixed_consttime(ops, bytes, AllowZero::Yes, &ops.q.p) + parse_big_endian_fixed_consttime(q, bytes, AllowZero::Yes) } #[inline] -pub fn scalar_parse_big_endian_fixed_consttime( - ops: &CommonOps, +pub(super) fn scalar_parse_big_endian_fixed_consttime( + n: &Modulus, bytes: untrusted::Input, ) -> Result { - parse_big_endian_fixed_consttime(ops, bytes, AllowZero::No, &ops.n.limbs) + parse_big_endian_fixed_consttime(n, bytes, AllowZero::No) } #[inline] -pub fn scalar_parse_big_endian_variable( - ops: &CommonOps, +pub(super) fn scalar_parse_big_endian_variable( + n: &Modulus, allow_zero: AllowZero, bytes: untrusted::Input, ) -> Result { - let num_limbs = ops.num_limbs.into(); - let n = ops.n.limbs.map(Limb::from); + let num_limbs = n.num_limbs.into(); let mut r = Scalar::zero(); parse_big_endian_in_range_and_pad_consttime( bytes, allow_zero, - &n[..num_limbs], + &n.limbs[..num_limbs], &mut r.limbs[..num_limbs], )?; Ok(r) } -pub fn scalar_parse_big_endian_partially_reduced_variable_consttime( - ops: &CommonOps, +pub(super) fn scalar_parse_big_endian_partially_reduced_variable_consttime( + n: &Modulus, bytes: untrusted::Input, ) -> Result { - let num_limbs = ops.num_limbs.into(); + let num_limbs = n.num_limbs.into(); let mut r = Scalar::zero(); - { let r = &mut r.limbs[..num_limbs]; parse_big_endian_and_pad_consttime(bytes, r)?; - limbs_reduce_once_constant_time(r, &ops.n.limbs[..num_limbs]); + limbs_reduce_once_constant_time(r, &n.limbs[..num_limbs]); } Ok(r) } fn parse_big_endian_fixed_consttime( - ops: &CommonOps, + m: &Modulus, bytes: untrusted::Input, allow_zero: AllowZero, - max_exclusive: &[LeakyLimb; elem::NumLimbs::MAX], ) -> Result, error::Unspecified> { - let num_limbs = ops.num_limbs.into(); - let max_exclusive = max_exclusive.map(Limb::from); - - if bytes.len() != ops.len() { + let num_limbs = m.num_limbs.into(); + if bytes.len() != m.bytes_len() { return Err(error::Unspecified); } let mut r = elem::Elem::zero(); parse_big_endian_in_range_and_pad_consttime( bytes, allow_zero, - &max_exclusive[..num_limbs], + &m.limbs[..num_limbs], &mut r.limbs[..num_limbs], )?; Ok(r) @@ -522,8 +557,9 @@ mod tests { fn q_minus_n_plus_n_equals_0_test(ops: &PublicScalarOps) { let cops = ops.scalar_ops.common; + let q = cops.elem_modulus(); let mut x = Elem::from(&ops.q_minus_n); - cops.elem_add(&mut x, &Elem::from(&cops.n)); + q.elem_add(&mut x, &Elem::from(&cops.n)); assert!(cops.is_zero(&x)); } @@ -554,20 +590,21 @@ mod tests { } fn elem_add_test(ops: &PublicScalarOps, test_file: test::File) { + let cops = ops.public_key_ops.common; + let q = &cops.elem_modulus(); test::run(test_file, |section, test_case| { assert_eq!(section, ""); - let cops = ops.public_key_ops.common; - let a = consume_elem(cops, test_case, "a"); - let b = consume_elem(cops, test_case, "b"); - let expected_sum = consume_elem(cops, test_case, "r"); + let a = consume_elem(q, test_case, "a"); + let b = consume_elem(q, test_case, "b"); + let expected_sum = consume_elem(q, test_case, "r"); let mut actual_sum = a; - ops.public_key_ops.common.elem_add(&mut actual_sum, &b); + q.elem_add(&mut actual_sum, &b); assert_limbs_are_equal(cops, &actual_sum.limbs, &expected_sum.limbs); let mut actual_sum = b; - ops.public_key_ops.common.elem_add(&mut actual_sum, &a); + q.elem_add(&mut actual_sum, &a); assert_limbs_are_equal(cops, &actual_sum.limbs, &expected_sum.limbs); Ok(()) @@ -590,16 +627,17 @@ mod tests { } fn elem_sub_test( - ops: &CommonOps, + ops: &'static CommonOps, elem_sub: unsafe extern "C" fn(r: *mut Limb, a: *const Limb, b: *const Limb), test_file: test::File, ) { + let q = &ops.elem_modulus(); test::run(test_file, |section, test_case| { assert_eq!(section, ""); - let a = consume_elem(ops, test_case, "a"); - let b = consume_elem(ops, test_case, "b"); - let r = consume_elem(ops, test_case, "r"); + let a = consume_elem(q, test_case, "a"); + let b = consume_elem(q, test_case, "b"); + let r = consume_elem(q, test_case, "r"); let mut actual_difference = Elem::::zero(); unsafe { @@ -641,15 +679,16 @@ mod tests { } fn elem_div_by_2_test( - ops: &CommonOps, + ops: &'static CommonOps, elem_div_by_2: unsafe extern "C" fn(r: *mut Limb, a: *const Limb), test_file: test::File, ) { + let q = &ops.elem_modulus(); test::run(test_file, |section, test_case| { assert_eq!(section, ""); - let a = consume_elem(ops, test_case, "a"); - let r = consume_elem(ops, test_case, "r"); + let a = consume_elem(q, test_case, "a"); + let r = consume_elem(q, test_case, "r"); let mut actual_result = Elem::::zero(); unsafe { @@ -688,15 +727,16 @@ mod tests { } fn elem_neg_test( - ops: &CommonOps, + ops: &'static CommonOps, elem_neg: unsafe extern "C" fn(r: *mut Limb, a: *const Limb), test_file: test::File, ) { + let q = &ops.elem_modulus(); test::run(test_file, |section, test_case| { assert_eq!(section, ""); - let a = consume_elem(ops, test_case, "a"); - let b = consume_elem(ops, test_case, "b"); + let a = consume_elem(q, test_case, "a"); + let b = consume_elem(q, test_case, "b"); // Verify -a == b. { @@ -730,13 +770,14 @@ mod tests { elem_mul_test(&p384::COMMON_OPS, test_file!("ops/p384_elem_mul_tests.txt")); } - fn elem_mul_test(ops: &CommonOps, test_file: test::File) { + fn elem_mul_test(ops: &'static CommonOps, test_file: test::File) { + let q = &ops.elem_modulus(); test::run(test_file, |section, test_case| { assert_eq!(section, ""); - let mut a = consume_elem(ops, test_case, "a"); - let b = consume_elem(ops, test_case, "b"); - let r = consume_elem(ops, test_case, "r"); + let mut a = consume_elem(q, test_case, "a"); + let b = consume_elem(q, test_case, "b"); + let r = consume_elem(q, test_case, "r"); ops.elem_mul(&mut a, &b); assert_limbs_are_equal(ops, &a.limbs, &r.limbs); @@ -762,12 +803,13 @@ mod tests { fn scalar_mul_test(ops: &ScalarOps, test_file: test::File) { let cpu = cpu::features(); + let cops = ops.common; + let n = cops.scalar_modulus(); test::run(test_file, |section, test_case| { assert_eq!(section, ""); - let cops = ops.common; - let a = consume_scalar(cops, test_case, "a"); - let b = consume_scalar_mont(cops, test_case, "b"); - let expected_result = consume_scalar(cops, test_case, "r"); + let a = consume_scalar(&n, test_case, "a"); + let b = consume_scalar_mont(&n, test_case, "b"); + let expected_result = consume_scalar(&n, test_case, "r"); let actual_result = ops.scalar_product(&a, &b, cpu); assert_limbs_are_equal(cops, &actual_result.limbs, &expected_result.limbs); @@ -778,7 +820,7 @@ mod tests { #[test] fn p256_scalar_square_test() { prefixed_extern! { - fn p256_scalar_sqr_rep_mont(r: *mut Limb, a: *const Limb, rep: Limb); + fn p256_scalar_sqr_rep_mont(r: *mut Limb, a: *const Limb, rep: LeakyWord); } scalar_square_test( &p256::SCALAR_OPS, @@ -792,15 +834,16 @@ mod tests { fn scalar_square_test( ops: &ScalarOps, - sqr_rep: unsafe extern "C" fn(r: *mut Limb, a: *const Limb, rep: Limb), + sqr_rep: unsafe extern "C" fn(r: *mut Limb, a: *const Limb, rep: LeakyWord), test_file: test::File, ) { + let cops = ops.common; + let n = cops.scalar_modulus(); test::run(test_file, |section, test_case| { assert_eq!(section, ""); let cpu = cpu::features(); - let cops = &ops.common; - let a = consume_scalar(cops, test_case, "a"); - let expected_result = consume_scalar(cops, test_case, "r"); + let a = consume_scalar(&n, test_case, "a"); + let expected_result = consume_scalar(&n, test_case, "r"); { let mut actual_result: Scalar = Scalar { @@ -1013,14 +1056,16 @@ mod tests { ) { let cpu = cpu::features(); let cops = pub_ops.common; - + let q = cops.elem_modulus(); + let n = cops.scalar_modulus(); test::run(test_file, |section, test_case| { assert_eq!(section, ""); - let p_scalar = consume_scalar(cops, test_case, "p_scalar"); + let p_scalar = consume_scalar(&n, test_case, "p_scalar"); let p = test_case.consume_bytes("p"); let p = super::super::public_key::parse_uncompressed_point( pub_ops, + &q, untrusted::Input::from(&p), cpu, ) @@ -1035,6 +1080,7 @@ mod tests { let (x, y) = actual_result[1..].split_at_mut(cops.len()); super::super::private_key::big_endian_affine_from_jacobian( priv_ops, + &q, x, Some(y), &product, @@ -1073,9 +1119,10 @@ mod tests { test_file: test::File, ) { let cpu = cpu::features(); + let n = ops.common.scalar_modulus(); test::run(test_file, |section, test_case| { assert_eq!(section, ""); - let g_scalar = consume_scalar(ops.common, test_case, "g_scalar"); + let g_scalar = consume_scalar(&n, test_case, "g_scalar"); let expected_result: TestPoint = consume_point(ops, test_case, "r"); let actual_result = f(&g_scalar, cpu); assert_point_actual_equals_expected(ops, &actual_result, &expected_result); @@ -1124,13 +1171,14 @@ mod tests { test_case: &mut test::TestCase, name: &str, ) -> Point { + let q = &ops.common.elem_modulus(); let input = test_case.consume_string(name); let elems = input.split(", ").collect::>(); assert_eq!(elems.len(), 3); let mut p = Point::new_at_infinity(); - consume_point_elem(ops.common, &mut p.xyz, &elems, 0); - consume_point_elem(ops.common, &mut p.xyz, &elems, 1); - consume_point_elem(ops.common, &mut p.xyz, &elems, 2); + consume_point_elem(q, &mut p.xyz, &elems, 0); + consume_point_elem(q, &mut p.xyz, &elems, 1); + consume_point_elem(q, &mut p.xyz, &elems, 2); p } @@ -1143,22 +1191,23 @@ mod tests { test_case: &mut test::TestCase, name: &str, ) -> AffinePoint { + let q = &ops.common.elem_modulus(); let input = test_case.consume_string(name); let elems = input.split(", ").collect::>(); assert_eq!(elems.len(), 2); let mut p = AffinePoint { xy: [0; 2 * elem::NumLimbs::MAX], }; - consume_point_elem(ops.common, &mut p.xy, &elems, 0); - consume_point_elem(ops.common, &mut p.xy, &elems, 1); + consume_point_elem(q, &mut p.xy, &elems, 0); + consume_point_elem(q, &mut p.xy, &elems, 1); p } - fn consume_point_elem(ops: &CommonOps, limbs_out: &mut [Limb], elems: &[&str], i: usize) { - let num_limbs = ops.num_limbs.into(); + fn consume_point_elem(q: &Modulus, limbs_out: &mut [Limb], elems: &[&str], i: usize) { + let num_limbs = q.num_limbs.into(); let bytes = test::from_hex(elems[i]).unwrap(); let bytes = untrusted::Input::from(&bytes); - let r: Elem = elem_parse_big_endian_fixed_consttime(ops, bytes).unwrap(); + let r: Elem = elem_parse_big_endian_fixed_consttime(q, bytes).unwrap(); // XXX: “Transmute” this to `Elem` limbs. limbs_out[(i * num_limbs)..((i + 1) * num_limbs)].copy_from_slice(&r.limbs[..num_limbs]); } @@ -1173,11 +1222,12 @@ mod tests { test_case: &mut test::TestCase, name: &str, ) -> TestPoint { - fn consume_point_elem(ops: &CommonOps, elems: &[&str], i: usize) -> Elem { + let q = &ops.common.elem_modulus(); + fn consume_point_elem(q: &Modulus, elems: &[&str], i: usize) -> Elem { let bytes = test::from_hex(elems[i]).unwrap(); let bytes = untrusted::Input::from(&bytes); let unencoded: Elem = - elem_parse_big_endian_fixed_consttime(ops, bytes).unwrap(); + elem_parse_big_endian_fixed_consttime(q, bytes).unwrap(); // XXX: “Transmute” this to `Elem` limbs. Elem { limbs: unencoded.limbs, @@ -1192,8 +1242,8 @@ mod tests { } let elems = input.split(", ").collect::>(); assert_eq!(elems.len(), 2); - let x = consume_point_elem(ops.common, &elems, 0); - let y = consume_point_elem(ops.common, &elems, 1); + let x = consume_point_elem(q, &elems, 0); + let y = consume_point_elem(q, &elems, 1); TestPoint::Affine(x, y) } @@ -1224,10 +1274,13 @@ mod tests { } } - fn consume_elem(ops: &CommonOps, test_case: &mut test::TestCase, name: &str) -> Elem { - let bytes = consume_padded_bytes(ops, test_case, name); + fn consume_elem(q: &Modulus, test_case: &mut test::TestCase, name: &str) -> Elem { + let unpadded_bytes = test_case.consume_bytes(name); + let mut bytes = vec![0; q.bytes_len() - unpadded_bytes.len()]; + bytes.extend(&unpadded_bytes); + let bytes = untrusted::Input::from(&bytes); - let r: Elem = elem_parse_big_endian_fixed_consttime(ops, bytes).unwrap(); + let r: Elem = elem_parse_big_endian_fixed_consttime(q, bytes).unwrap(); // XXX: “Transmute” this to an `Elem`. Elem { limbs: r.limbs, @@ -1236,20 +1289,20 @@ mod tests { } } - fn consume_scalar(ops: &CommonOps, test_case: &mut test::TestCase, name: &str) -> Scalar { + fn consume_scalar(n: &Modulus, test_case: &mut test::TestCase, name: &str) -> Scalar { let bytes = test_case.consume_bytes(name); let bytes = untrusted::Input::from(&bytes); - scalar_parse_big_endian_variable(ops, AllowZero::Yes, bytes).unwrap() + scalar_parse_big_endian_variable(n, AllowZero::Yes, bytes).unwrap() } fn consume_scalar_mont( - ops: &CommonOps, + n: &Modulus, test_case: &mut test::TestCase, name: &str, ) -> Scalar { let bytes = test_case.consume_bytes(name); let bytes = untrusted::Input::from(&bytes); - let s = scalar_parse_big_endian_variable(ops, AllowZero::Yes, bytes).unwrap(); + let s = scalar_parse_big_endian_variable(n, AllowZero::Yes, bytes).unwrap(); // “Transmute” it to a `Scalar`. Scalar { limbs: s.limbs, @@ -1257,17 +1310,6 @@ mod tests { encoding: PhantomData, } } - - fn consume_padded_bytes( - ops: &CommonOps, - test_case: &mut test::TestCase, - name: &str, - ) -> Vec { - let unpadded_bytes = test_case.consume_bytes(name); - let mut bytes = vec![0; ops.len() - unpadded_bytes.len()]; - bytes.extend(&unpadded_bytes); - bytes - } } mod elem; diff --git a/src/ec/suite_b/ops/elem.rs b/src/ec/suite_b/ops/elem.rs index 25eb36b17c..0deb05d050 100644 --- a/src/ec/suite_b/ops/elem.rs +++ b/src/ec/suite_b/ops/elem.rs @@ -122,11 +122,7 @@ pub fn binary_op( a: &Elem, b: &Elem, ) -> Elem { - let mut r = Elem { - limbs: [0; NumLimbs::MAX], - m: PhantomData, - encoding: PhantomData, - }; + let mut r = Elem::zero(); unsafe { f(r.limbs.as_mut_ptr(), a.limbs.as_ptr(), b.limbs.as_ptr()) } r } @@ -147,11 +143,7 @@ pub fn unary_op( f: unsafe extern "C" fn(r: *mut Limb, a: *const Limb), a: &Elem, ) -> Elem { - let mut r = Elem { - limbs: [0; NumLimbs::MAX], - m: PhantomData, - encoding: PhantomData, - }; + let mut r = Elem::zero(); unsafe { f(r.limbs.as_mut_ptr(), a.limbs.as_ptr()) } r } diff --git a/src/ec/suite_b/ops/p256.rs b/src/ec/suite_b/ops/p256.rs index d586587a47..4648a1e0c7 100644 --- a/src/ec/suite_b/ops/p256.rs +++ b/src/ec/suite_b/ops/p256.rs @@ -14,7 +14,7 @@ use super::{ elem::{binary_op, binary_op_assign}, - elem_sqr_mul, elem_sqr_mul_acc, Modulus, *, + elem_sqr_mul, elem_sqr_mul_acc, PublicModulus, *, }; pub(super) const NUM_LIMBS: usize = 256 / LIMB_BITS; @@ -22,9 +22,9 @@ pub(super) const NUM_LIMBS: usize = 256 / LIMB_BITS; pub static COMMON_OPS: CommonOps = CommonOps { num_limbs: elem::NumLimbs::P256, - q: Modulus { + q: PublicModulus { p: limbs_from_hex("ffffffff00000001000000000000000000000000ffffffffffffffffffffffff"), - rr: limbs_from_hex("4fffffffdfffffffffffffffefffffffbffffffff0000000000000003"), + rr: PublicElem::from_hex("4fffffffdfffffffffffffffefffffffbffffffff0000000000000003"), }, n: PublicElem::from_hex("ffffffff00000000ffffffffffffffffbce6faada7179e84f3b9cac2fc632551"), @@ -301,7 +301,7 @@ prefixed_extern! { fn p256_scalar_sqr_rep_mont( r: *mut Limb, // [COMMON_OPS.num_limbs] a: *const Limb, // [COMMON_OPS.num_limbs] - rep: Limb, + rep: LeakyWord, ); } diff --git a/src/ec/suite_b/ops/p384.rs b/src/ec/suite_b/ops/p384.rs index faef4e07c5..d7d7b68d44 100644 --- a/src/ec/suite_b/ops/p384.rs +++ b/src/ec/suite_b/ops/p384.rs @@ -14,7 +14,7 @@ use super::{ elem::{binary_op, binary_op_assign}, - elem_sqr_mul, elem_sqr_mul_acc, Modulus, *, + elem_sqr_mul, elem_sqr_mul_acc, PublicModulus, *, }; pub(super) const NUM_LIMBS: usize = 384 / LIMB_BITS; @@ -22,9 +22,9 @@ pub(super) const NUM_LIMBS: usize = 384 / LIMB_BITS; pub static COMMON_OPS: CommonOps = CommonOps { num_limbs: elem::NumLimbs::P384, - q: Modulus { + q: PublicModulus { p: limbs_from_hex("fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffeffffffff0000000000000000ffffffff"), - rr: limbs_from_hex("10000000200000000fffffffe000000000000000200000000fffffffe00000001"), + rr: PublicElem::from_hex("10000000200000000fffffffe000000000000000200000000fffffffe00000001"), }, n: PublicElem::from_hex("ffffffffffffffffffffffffffffffffffffffffffffffffc7634d81f4372ddf581a0db248b0a77aecec196accc52973"), diff --git a/src/ec/suite_b/private_key.rs b/src/ec/suite_b/private_key.rs index 043208b7eb..bbe854bd94 100644 --- a/src/ec/suite_b/private_key.rs +++ b/src/ec/suite_b/private_key.rs @@ -19,14 +19,15 @@ use super::{ops::*, verify_affine_point_is_on_the_curve}; use crate::{arithmetic::montgomery::R, cpu, ec, error, limb, rand}; /// Generates a random scalar in the range [1, n). -pub fn random_scalar( +pub(super) fn random_scalar( ops: &PrivateKeyOps, + n: &Modulus, rng: &dyn rand::SecureRandom, ) -> Result { let mut bytes = [0; ec::SCALAR_MAX_BYTES]; let bytes = &mut bytes[..ops.common.len()]; generate_private_scalar_bytes(ops, rng, bytes)?; - scalar_from_big_endian_bytes(ops, bytes) + scalar_from_big_endian_bytes(n, bytes) } pub fn generate_private_scalar_bytes( @@ -84,17 +85,18 @@ pub fn generate_private_scalar_bytes( // private key that way, which means we have to convert it to a Scalar whenever // we need to use it. #[inline] -pub fn private_key_as_scalar(ops: &PrivateKeyOps, private_key: &ec::Seed) -> Scalar { +pub(super) fn private_key_as_scalar(n: &Modulus, private_key: &ec::Seed) -> Scalar { // This cannot fail because we know the private key is valid. - scalar_from_big_endian_bytes(ops, private_key.bytes_less_safe()).unwrap() + scalar_from_big_endian_bytes(n, private_key.bytes_less_safe()).unwrap() } -pub fn check_scalar_big_endian_bytes( +pub(super) fn check_scalar_big_endian_bytes( ops: &PrivateKeyOps, bytes: &[u8], ) -> Result<(), error::Unspecified> { debug_assert_eq!(bytes.len(), ops.common.len()); - scalar_from_big_endian_bytes(ops, bytes).map(|_| ()) + let n = ops.common.scalar_modulus(); + scalar_from_big_endian_bytes(&n, bytes).map(|_| ()) } // Parses a fixed-length (zero-padded) big-endian-encoded scalar in the range @@ -102,8 +104,8 @@ pub fn check_scalar_big_endian_bytes( // value is actually in range. In other words, this won't leak anything about a // valid value, but it might leak small amounts of information about an invalid // value (which constraint it failed). -pub fn scalar_from_big_endian_bytes( - ops: &PrivateKeyOps, +pub(super) fn scalar_from_big_endian_bytes( + n: &Modulus, bytes: &[u8], ) -> Result { // [NSA Suite B Implementer's Guide to ECDSA] Appendix A.1.2, and @@ -121,7 +123,7 @@ pub fn scalar_from_big_endian_bytes( // way, we avoid needing to compute or store the value (n - 1), we avoid the // need to implement a function to add one to a scalar, and we avoid needing // to convert the scalar back into an array of bytes. - scalar_parse_big_endian_fixed_consttime(ops.common, untrusted::Input::from(bytes)) + scalar_parse_big_endian_fixed_consttime(n, untrusted::Input::from(bytes)) } pub(super) fn public_from_private( @@ -130,20 +132,23 @@ pub(super) fn public_from_private( my_private_key: &ec::Seed, cpu: cpu::Features, ) -> Result<(), error::Unspecified> { + let q = ops.common.elem_modulus(); let elem_and_scalar_bytes = ops.common.len(); debug_assert_eq!(public_out.len(), 1 + (2 * elem_and_scalar_bytes)); - let my_private_key = private_key_as_scalar(ops, my_private_key); + let n = ops.common.scalar_modulus(); + let my_private_key = private_key_as_scalar(&n, my_private_key); let my_public_key = ops.point_mul_base(&my_private_key, cpu); public_out[0] = 4; // Uncompressed encoding. let (x_out, y_out) = public_out[1..].split_at_mut(elem_and_scalar_bytes); // `big_endian_affine_from_jacobian` verifies that the point is not at // infinity and is on the curve. - big_endian_affine_from_jacobian(ops, x_out, Some(y_out), &my_public_key, cpu) + big_endian_affine_from_jacobian(ops, &q, x_out, Some(y_out), &my_public_key, cpu) } pub(super) fn affine_from_jacobian( ops: &PrivateKeyOps, + q: &Modulus, p: &Point, cpu: cpu::Features, ) -> Result<(Elem, Elem), error::Unspecified> { @@ -173,19 +178,20 @@ pub(super) fn affine_from_jacobian( // If we validated our inputs correctly and then computed (x, y, z), then // (x, y, z) will be on the curve. See // `verify_affine_point_is_on_the_curve_scaled` for the motivation. - verify_affine_point_is_on_the_curve(ops.common, (&x_aff, &y_aff))?; + verify_affine_point_is_on_the_curve(ops.common, q, (&x_aff, &y_aff))?; Ok((x_aff, y_aff)) } pub(super) fn big_endian_affine_from_jacobian( ops: &PrivateKeyOps, + q: &Modulus, x_out: &mut [u8], y_out: Option<&mut [u8]>, p: &Point, cpu: cpu::Features, ) -> Result<(), error::Unspecified> { - let (x_aff, y_aff) = affine_from_jacobian(ops, p, cpu)?; + let (x_aff, y_aff) = affine_from_jacobian(ops, q, p, cpu)?; let x = ops.common.elem_unencoded(&x_aff); limb::big_endian_from_limbs(ops.leak_limbs(&x), x_out); if let Some(y_out) = y_out { diff --git a/src/ec/suite_b/public_key.rs b/src/ec/suite_b/public_key.rs index f62727ebd0..fba7466fdb 100644 --- a/src/ec/suite_b/public_key.rs +++ b/src/ec/suite_b/public_key.rs @@ -28,6 +28,7 @@ use crate::{arithmetic::montgomery::*, cpu, error}; /// http://nvlpubs.nist.gov/nistpubs/SpecialPublications/NIST.SP.800-56Ar2.pdf pub(super) fn parse_uncompressed_point( ops: &PublicKeyOps, + q: &Modulus, input: untrusted::Input, cpu: cpu::Features, ) -> Result<(Elem, Elem), error::Unspecified> { @@ -44,15 +45,15 @@ pub(super) fn parse_uncompressed_point( // NIST SP 800-56A Step 2: "Verify that xQ and yQ are integers in the // interval [0, p-1] in the case that q is an odd prime p[.]" - let x = ops.elem_parse(input, cpu)?; - let y = ops.elem_parse(input, cpu)?; + let x = ops.elem_parse(q, input, cpu)?; + let y = ops.elem_parse(q, input, cpu)?; Ok((x, y)) })?; // NIST SP 800-56A Step 3: "If q is an odd prime p, verify that // yQ**2 = xQ**3 + axQ + b in GF(p), where the arithmetic is performed // modulo p." - verify_affine_point_is_on_the_curve(ops.common, (&x, &y))?; + verify_affine_point_is_on_the_curve(ops.common, q, (&x, &y))?; // NIST SP 800-56A Note: "Since its order is not verified, there is no // check that the public key is in the correct EC subgroup." @@ -85,8 +86,9 @@ mod tests { let is_valid = test_case.consume_string("Result") == "P"; let curve_ops = public_key_ops_from_curve_name(&curve_name); + let q = curve_ops.common.elem_modulus(); - let result = parse_uncompressed_point(curve_ops, public_key, cpu); + let result = parse_uncompressed_point(curve_ops, &q, public_key, cpu); assert_eq!(is_valid, result.is_ok()); // TODO: Verify that we when we re-serialize the parsed (x, y), the