From eb6bebaee3947f8bca46816fa6bf6182085f1b56 Mon Sep 17 00:00:00 2001 From: Jonas Nick Date: Wed, 4 Jan 2023 11:21:04 +0000 Subject: [PATCH] scalar: restrict split_lambda args, improve doc and VERIFY_CHECKs VERIFY_CHECK(r1 != r2) is added because otherwise the verify_scalar_split fails. --- src/scalar.h | 7 ++++--- src/scalar_impl.h | 8 ++++++-- 2 files changed, 10 insertions(+), 5 deletions(-) diff --git a/src/scalar.h b/src/scalar.h index b9cb6b059c06d..63c0d646a3c26 100644 --- a/src/scalar.h +++ b/src/scalar.h @@ -88,9 +88,10 @@ static int secp256k1_scalar_eq(const secp256k1_scalar *a, const secp256k1_scalar /** Find r1 and r2 such that r1+r2*2^128 = k. */ static void secp256k1_scalar_split_128(secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *k); -/** Find r1 and r2 such that r1+r2*lambda = k, - * where r1 and r2 or their negations are maximum 128 bits long (see secp256k1_ge_mul_lambda). */ -static void secp256k1_scalar_split_lambda(secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *k); +/** Find r1 and r2 such that r1+r2*lambda = k, where r1 and r2 or their + * negations are maximum 128 bits long (see secp256k1_ge_mul_lambda). It is + * required that r1, r2, and k all point to different objects. */ +static void secp256k1_scalar_split_lambda(secp256k1_scalar * SECP256K1_RESTRICT r1, secp256k1_scalar * SECP256K1_RESTRICT r2, const secp256k1_scalar * SECP256K1_RESTRICT k); /** Multiply a and b (without taking the modulus!), divide by 2**shift, and round to the nearest integer. Shift must be at least 256. */ static void secp256k1_scalar_mul_shift_var(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b, unsigned int shift); diff --git a/src/scalar_impl.h b/src/scalar_impl.h index 9e72df2e50e6b..bed7f95fcb0df 100644 --- a/src/scalar_impl.h +++ b/src/scalar_impl.h @@ -52,7 +52,10 @@ static int secp256k1_scalar_set_b32_seckey(secp256k1_scalar *r, const unsigned c * nontrivial to get full test coverage for the exhaustive tests. We therefore * (arbitrarily) set r2 = k + 5 (mod n) and r1 = k - r2 * lambda (mod n). */ -static void secp256k1_scalar_split_lambda(secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *k) { +static void secp256k1_scalar_split_lambda(secp256k1_scalar * SECP256K1_RESTRICT r1, secp256k1_scalar * SECP256K1_RESTRICT r2, const secp256k1_scalar * SECP256K1_RESTRICT k) { + VERIFY_CHECK(r1 != k); + VERIFY_CHECK(r2 != k); + VERIFY_CHECK(r1 != r2); *r2 = (*k + 5) % EXHAUSTIVE_TEST_ORDER; *r1 = (*k + (EXHAUSTIVE_TEST_ORDER - *r2) * EXHAUSTIVE_TEST_LAMBDA) % EXHAUSTIVE_TEST_ORDER; } @@ -119,7 +122,7 @@ static void secp256k1_scalar_split_lambda_verify(const secp256k1_scalar *r1, con * * See proof below. */ -static void secp256k1_scalar_split_lambda(secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *k) { +static void secp256k1_scalar_split_lambda(secp256k1_scalar * SECP256K1_RESTRICT r1, secp256k1_scalar * SECP256K1_RESTRICT r2, const secp256k1_scalar * SECP256K1_RESTRICT k) { secp256k1_scalar c1, c2; static const secp256k1_scalar minus_b1 = SECP256K1_SCALAR_CONST( 0x00000000UL, 0x00000000UL, 0x00000000UL, 0x00000000UL, @@ -139,6 +142,7 @@ static void secp256k1_scalar_split_lambda(secp256k1_scalar *r1, secp256k1_scalar ); VERIFY_CHECK(r1 != k); VERIFY_CHECK(r2 != k); + VERIFY_CHECK(r1 != r2); /* these _var calls are constant time since the shift amount is constant */ secp256k1_scalar_mul_shift_var(&c1, k, &g1, 384); secp256k1_scalar_mul_shift_var(&c2, k, &g2, 384);