From 246ba22f788dfb71cdda247c4b18352ab0046820 Mon Sep 17 00:00:00 2001 From: "Jan Winkelmann (keks)" Date: Tue, 5 Nov 2024 15:57:32 +0100 Subject: [PATCH 01/18] add proc-macro helper crate from hacl-rs --- Cargo.lock | 4 +++ Cargo.toml | 1 + libcrux-hacl-rs-krml/Cargo.toml | 11 ++++++++ libcrux-hacl-rs-krml/src/lib.rs | 46 +++++++++++++++++++++++++++++++++ 4 files changed, 62 insertions(+) create mode 100644 libcrux-hacl-rs-krml/Cargo.toml create mode 100644 libcrux-hacl-rs-krml/src/lib.rs diff --git a/Cargo.lock b/Cargo.lock index 4bd462b69..d51b56b59 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -990,6 +990,10 @@ dependencies = [ "wasm-bindgen-test", ] +[[package]] +name = "libcrux-hacl-rs-krml" +version = "0.1.0" + [[package]] name = "libcrux-hkdf" version = "0.0.2-beta.2" diff --git a/Cargo.toml b/Cargo.toml index 22a1c40af..835db1f78 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -17,6 +17,7 @@ members = [ "libcrux-ecdh", "libcrux-psq", "cavp", + "libcrux-hacl-rs-krml", ] [workspace.package] diff --git a/libcrux-hacl-rs-krml/Cargo.toml b/libcrux-hacl-rs-krml/Cargo.toml new file mode 100644 index 000000000..0c4972b18 --- /dev/null +++ b/libcrux-hacl-rs-krml/Cargo.toml @@ -0,0 +1,11 @@ +[package] +name = "libcrux-hacl-rs-krml" +version = "0.1.0" +edition = "2021" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] + +[lib] +proc-macro=true diff --git a/libcrux-hacl-rs-krml/src/lib.rs b/libcrux-hacl-rs-krml/src/lib.rs new file mode 100644 index 000000000..18f6fe094 --- /dev/null +++ b/libcrux-hacl-rs-krml/src/lib.rs @@ -0,0 +1,46 @@ +use proc_macro::{TokenStream,TokenTree,Delimiter}; + +fn skip_comma>(ts: &mut T) { + match ts.next() { + | Some (TokenTree::Punct(p)) => assert_eq!(p.as_char(), ','), + | _ => panic!("Expected comma") + } +} + +fn accept_token>(ts: &mut T) -> TokenTree { + match ts.next() { + | Some(t) => t, + | _ => panic!("early end") + } +} + +fn brace(ts: TokenStream) -> TokenTree { + TokenTree::Group(proc_macro::Group::new(Delimiter::Brace, ts)) +} + +#[proc_macro] +pub fn unroll_for(ts: TokenStream) -> TokenStream { + let mut i = ts.into_iter(); + let n_loops = accept_token(&mut i).to_string().parse::().unwrap(); + skip_comma(&mut i); + let var = accept_token(&mut i).to_string(); + let var = &var[1..var.len()-1]; + skip_comma(&mut i); + let start = accept_token(&mut i).to_string(); + skip_comma(&mut i); + let increment = accept_token(&mut i).to_string(); + skip_comma(&mut i); + let grouped_body = brace(TokenStream::from_iter(i)); + let chunks = + (0..n_loops).map(|i| { + let chunks = [ + format!("const {}: u32 = {} + {} * {};", var, start, i, increment).parse().unwrap(), + TokenStream::from(grouped_body.clone()), + ";".parse().unwrap() + ]; + TokenStream::from(brace(TokenStream::from_iter(chunks))) + }) + ; + TokenStream::from(brace(TokenStream::from_iter(chunks.into_iter().flatten()))) + // "{ let i = 0; println!(\"FROM MACRO{}\", i); }".parse().unwrap() +} From 8f21c13cfd155799a422b9d0194abc1e58a27ab2 Mon Sep 17 00:00:00 2001 From: "Jan Winkelmann (keks)" Date: Tue, 5 Nov 2024 16:02:49 +0100 Subject: [PATCH 02/18] add hacl-rs hashing code, make libcrux use hacl-rs impl for sha2 --- Cargo.lock | 1 + Cargo.toml | 67 +- src/digest.rs | 44 +- src/hacl_rs/fstar.rs | 5 + src/hacl_rs/fstar/uint128.rs | 79 ++ src/hacl_rs/fstar/uint16.rs | 21 + src/hacl_rs/fstar/uint32.rs | 21 + src/hacl_rs/fstar/uint64.rs | 21 + src/hacl_rs/fstar/uint8.rs | 22 + src/hacl_rs/hash_sha1.rs | 378 +++++++++ src/hacl_rs/hash_sha2.rs | 1310 +++++++++++++++++++++++++++++ src/hacl_rs/hkdf.rs | 452 ++++++++++ src/hacl_rs/hmac.rs | 764 +++++++++++++++++ src/hacl_rs/lowstar.rs | 2 + src/hacl_rs/lowstar/endianness.rs | 53 ++ src/hacl_rs/lowstar/ignore.rs | 1 + src/hacl_rs/mod.rs | 9 + src/hacl_rs/streaming_types.rs | 41 + src/lib.rs | 1 + tests/sha2.rs | 9 +- 20 files changed, 3264 insertions(+), 37 deletions(-) create mode 100644 src/hacl_rs/fstar.rs create mode 100644 src/hacl_rs/fstar/uint128.rs create mode 100644 src/hacl_rs/fstar/uint16.rs create mode 100644 src/hacl_rs/fstar/uint32.rs create mode 100644 src/hacl_rs/fstar/uint64.rs create mode 100644 src/hacl_rs/fstar/uint8.rs create mode 100644 src/hacl_rs/hash_sha1.rs create mode 100644 src/hacl_rs/hash_sha2.rs create mode 100644 src/hacl_rs/hkdf.rs create mode 100644 src/hacl_rs/hmac.rs create mode 100644 src/hacl_rs/lowstar.rs create mode 100644 src/hacl_rs/lowstar/endianness.rs create mode 100644 src/hacl_rs/lowstar/ignore.rs create mode 100644 src/hacl_rs/mod.rs create mode 100644 src/hacl_rs/streaming_types.rs diff --git a/Cargo.lock b/Cargo.lock index d51b56b59..5a9dc41ce 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -940,6 +940,7 @@ dependencies = [ "libcrux", "libcrux-ecdh", "libcrux-hacl", + "libcrux-hacl-rs-krml", "libcrux-hkdf", "libcrux-hmac", "libcrux-kem", diff --git a/Cargo.toml b/Cargo.toml index 835db1f78..ae8f8bfc6 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,23 +1,23 @@ [workspace] members = [ - "sys/hacl", - "sys/libjade", - "sys/platform", - "sys/pqclean", - "sys/lib25519", - "benchmarks", - "fuzz", - "libcrux-ml-kem", - "libcrux-sha3", - "libcrux-ml-dsa", - "libcrux-intrinsics", - "libcrux-kem", - "libcrux-hmac", - "libcrux-hkdf", - "libcrux-ecdh", - "libcrux-psq", - "cavp", + "sys/hacl", + "sys/libjade", + "sys/platform", + "sys/pqclean", + "sys/lib25519", + "benchmarks", + "fuzz", + "libcrux-ml-kem", + "libcrux-sha3", + "libcrux-ml-dsa", + "libcrux-intrinsics", + "libcrux-kem", + "libcrux-hmac", + "libcrux-hkdf", + "libcrux-ecdh", + "libcrux-psq", "libcrux-hacl-rs-krml", + "cavp", ] [workspace.package] @@ -44,15 +44,15 @@ readme.workspace = true documentation = "https://docs.rs/libcrux/" description = "The Formally Verified Cryptography Library" exclude = [ - "/tests", - "/specs", - "/proofs", - "/*.py", - "/wasm-demo", - "/fuzz", - "/git-hooks", - "/architecture", - "/libcrux.fst.config.json", + "/tests", + "/specs", + "/proofs", + "/*.py", + "/wasm-demo", + "/fuzz", + "/git-hooks", + "/architecture", + "/libcrux.fst.config.json", ] [lib] @@ -63,6 +63,7 @@ bench = false # so libtest doesn't eat the argumen libcrux-platform = { version = "=0.0.2-beta.2", path = "sys/platform" } [dependencies] +krml = { package = "libcrux-hacl-rs-krml", path = "libcrux-hacl-rs-krml" } libcrux-hacl = { version = "=0.0.2-beta.2", path = "sys/hacl" } libcrux-platform = { version = "=0.0.2-beta.2", path = "sys/platform" } libcrux-hkdf = { version = "=0.0.2-beta.2", path = "libcrux-hkdf" } @@ -113,11 +114,11 @@ panic = "abort" [lints.rust] unexpected_cfgs = { level = "warn", check-cfg = [ - 'cfg(hax)', - 'cfg(eurydice)', - 'cfg(doc_cfg)', - 'cfg(libjade)', - 'cfg(simd128)', - 'cfg(simd256)', - 'cfg(aes_ni)', + 'cfg(hax)', + 'cfg(eurydice)', + 'cfg(doc_cfg)', + 'cfg(libjade)', + 'cfg(simd128)', + 'cfg(simd256)', + 'cfg(aes_ni)', ] } diff --git a/src/digest.rs b/src/digest.rs index 3f8dc4a4e..f40702a2f 100644 --- a/src/digest.rs +++ b/src/digest.rs @@ -291,10 +291,52 @@ macro_rules! impl_streaming { }; } impl_streaming!(Sha2_224, Sha224, Sha2_224Digest); -impl_streaming!(Sha2_256, Sha256, Sha2_256Digest); impl_streaming!(Sha2_384, Sha384, Sha2_384Digest); impl_streaming!(Sha2_512, Sha512, Sha2_512Digest); +// Streaming API - This is the recommended one. +// For implementations based on hacl_rs (over hacl-c) +macro_rules! impl_streaming_hacl_rs { + ($name:ident, $state:ty, $result:ty) => { + #[derive(Clone)] + pub struct $name { + state: $state, + } + impl $name { + /// Initialize a new digest state. + pub fn new() -> Self { + Self { + state: <$state>::new(), + } + } + + /// Add the `payload` to the digest. + pub fn update(&mut self, payload: &[u8]) { + self.state.update(payload); + } + + /// Get the digest. + /// + /// Note that the digest state can be continued to be used, to extend the + /// digest. + pub fn finish(&self, digest: &mut $result) { + self.state.finish(digest) + } + } + + impl Default for $name { + fn default() -> Self { + Self::new() + } + } + }; +} + +impl_streaming_hacl_rs!( + Sha2_256, + crate::hacl_rs::hash_sha2::HaclRs_Sha2_Sha256, + Sha2_256Digest +); // SHAKE messages from SHA 3 #[cfg(simd256)] diff --git a/src/hacl_rs/fstar.rs b/src/hacl_rs/fstar.rs new file mode 100644 index 000000000..e15b79144 --- /dev/null +++ b/src/hacl_rs/fstar.rs @@ -0,0 +1,5 @@ +pub mod uint128; +pub mod uint16; +pub mod uint32; +pub mod uint64; +pub mod uint8; diff --git a/src/hacl_rs/fstar/uint128.rs b/src/hacl_rs/fstar/uint128.rs new file mode 100644 index 000000000..1c8b2446c --- /dev/null +++ b/src/hacl_rs/fstar/uint128.rs @@ -0,0 +1,79 @@ +#![allow(non_camel_case_types)] + +pub type uint128 = u128; + +pub fn add(x: uint128, y: uint128) -> uint128 { + x.wrapping_add(y) +} +pub fn add_mod(x: uint128, y: uint128) -> uint128 { + x.wrapping_add(y) +} +pub fn sub(x: uint128, y: uint128) -> uint128 { + x.wrapping_sub(y) +} +pub fn sub_mod(x: uint128, y: uint128) -> uint128 { + x.wrapping_sub(y) +} +pub fn logand(x: uint128, y: uint128) -> uint128 { + x & y +} +pub fn logxor(x: uint128, y: uint128) -> uint128 { + x ^ y +} +pub fn logor(x: uint128, y: uint128) -> uint128 { + x | y +} +pub fn lognot(x: uint128) -> uint128 { + !x +} +pub fn shift_left(x: uint128, y: u32) -> uint128 { + x.wrapping_shl(y) +} +pub fn shift_right(x: uint128, y: u32) -> uint128 { + x.wrapping_shr(y) +} +pub fn eq(x: uint128, y: uint128) -> bool { + x == y +} +pub fn gt(x: uint128, y: uint128) -> bool { + x > y +} +pub fn lt(x: uint128, y: uint128) -> bool { + x < y +} +pub fn gte(x: uint128, y: uint128) -> bool { + x >= y +} +pub fn lte(x: uint128, y: uint128) -> bool { + x <= y +} +pub fn eq_mask(a: uint128, b: uint128) -> uint128 { + let x = a ^ b; + let minus_x = (!x).wrapping_add(1u128); + let x_or_minus_x = x | minus_x; + let xnx = x_or_minus_x.wrapping_shr(127); + xnx.wrapping_sub(1u128) +} +pub fn gte_mask(a: uint128, b: uint128) -> uint128 { + let x = a; + let y = b; + let x_xor_y = x ^ y; + let x_sub_y = x.wrapping_sub(y); + let x_sub_y_xor_y = x_sub_y ^ y; + let q = x_xor_y | x_sub_y_xor_y; + let x_xor_q = x ^ q; + let x_xor_q_ = x_xor_q.wrapping_shr(127); + x_xor_q_.wrapping_sub(1u128) +} +pub fn uint64_to_uint128(x: u64) -> uint128 { + x as u128 +} +pub fn uint128_to_uint64(x: uint128) -> u64 { + x as u64 +} +pub fn mul32(x: u64, y: u32) -> uint128 { + (x as u128) * (y as u128) +} +pub fn mul_wide(x: u64, y: u64) -> uint128 { + (x as u128) * (y as u128) +} diff --git a/src/hacl_rs/fstar/uint16.rs b/src/hacl_rs/fstar/uint16.rs new file mode 100644 index 000000000..138e938f7 --- /dev/null +++ b/src/hacl_rs/fstar/uint16.rs @@ -0,0 +1,21 @@ +pub fn eq_mask(a: u16, b: u16) -> u16 +{ + let x = a ^ b; + let minus_x = (!x).wrapping_add(1u16); + let x_or_minus_x = x | minus_x; + let xnx = x_or_minus_x.wrapping_shr(15); + xnx.wrapping_sub(1u16) +} + +pub fn gte_mask(a: u16, b: u16) -> u16 +{ + let x = a; + let y = b; + let x_xor_y = x ^ y; + let x_sub_y = x.wrapping_sub(y); + let x_sub_y_xor_y = x_sub_y ^ y; + let q = x_xor_y | x_sub_y_xor_y; + let x_xor_q = x ^ q; + let x_xor_q_ = x_xor_q.wrapping_shr(15); + x_xor_q_.wrapping_sub(1u16) +} diff --git a/src/hacl_rs/fstar/uint32.rs b/src/hacl_rs/fstar/uint32.rs new file mode 100644 index 000000000..95520a4fd --- /dev/null +++ b/src/hacl_rs/fstar/uint32.rs @@ -0,0 +1,21 @@ +pub fn eq_mask(a: u32, b: u32) -> u32 +{ + let x = a ^ b; + let minus_x = (!x).wrapping_add(1u32); + let x_or_minus_x = x | minus_x; + let xnx = x_or_minus_x.wrapping_shr(31); + xnx.wrapping_sub(1u32) +} + +pub fn gte_mask(a: u32, b: u32) -> u32 +{ + let x = a; + let y = b; + let x_xor_y = x ^ y; + let x_sub_y = x.wrapping_sub(y); + let x_sub_y_xor_y = x_sub_y ^ y; + let q = x_xor_y | x_sub_y_xor_y; + let x_xor_q = x ^ q; + let x_xor_q_ = x_xor_q.wrapping_shr(31); + x_xor_q_.wrapping_sub(1u32) +} diff --git a/src/hacl_rs/fstar/uint64.rs b/src/hacl_rs/fstar/uint64.rs new file mode 100644 index 000000000..0c7d45896 --- /dev/null +++ b/src/hacl_rs/fstar/uint64.rs @@ -0,0 +1,21 @@ +pub fn eq_mask(a: u64, b: u64) -> u64 +{ + let x = a ^ b; + let minus_x = (!x).wrapping_add(1u64); + let x_or_minus_x = x | minus_x; + let xnx = x_or_minus_x.wrapping_shr(63); + xnx.wrapping_sub(1u64) +} + +pub fn gte_mask(a: u64, b: u64) -> u64 +{ + let x = a; + let y = b; + let x_xor_y = x ^ y; + let x_sub_y = x.wrapping_sub(y); + let x_sub_y_xor_y = x_sub_y ^ y; + let q = x_xor_y | x_sub_y_xor_y; + let x_xor_q = x ^ q; + let x_xor_q_ = x_xor_q.wrapping_shr(63); + x_xor_q_.wrapping_sub(1u64) +} diff --git a/src/hacl_rs/fstar/uint8.rs b/src/hacl_rs/fstar/uint8.rs new file mode 100644 index 000000000..0c80314d8 --- /dev/null +++ b/src/hacl_rs/fstar/uint8.rs @@ -0,0 +1,22 @@ +pub fn eq_mask(a: u8, b: u8) -> u8 +{ + let x = a ^ b; + let minus_x = (!x).wrapping_add(1u8); + let x_or_minus_x = x | minus_x; + let xnx = x_or_minus_x.wrapping_shr(7); + xnx.wrapping_sub(1u8) +} + +pub fn gte_mask(a: u8, b: u8) -> u8 +{ + let x = a; + let y = b; + let x_xor_y = x ^ y; + let x_sub_y = x.wrapping_sub(y); + let x_sub_y_xor_y = x_sub_y ^ y; + let q = x_xor_y | x_sub_y_xor_y; + let x_xor_q = x ^ q; + let x_xor_q_ = x_xor_q.wrapping_shr(7); + x_xor_q_.wrapping_sub(1u8) +} + diff --git a/src/hacl_rs/hash_sha1.rs b/src/hacl_rs/hash_sha1.rs new file mode 100644 index 000000000..2c6d4ffdf --- /dev/null +++ b/src/hacl_rs/hash_sha1.rs @@ -0,0 +1,378 @@ +#![allow(non_snake_case)] +#![allow(non_upper_case_globals)] +#![allow(non_camel_case_types)] +#![allow(unused_assignments)] +#![allow(unreachable_patterns)] + +use super::lowstar; + +const _h0: [u32; 5] = [ + 0x67452301u32, + 0xefcdab89u32, + 0x98badcfeu32, + 0x10325476u32, + 0xc3d2e1f0u32, +]; + +pub(crate) fn init(s: &mut [u32]) { + krml::unroll_for!( + 5, + "i", + 0u32, + 1u32, + s[i as usize] = (&crate::hacl_rs::hash_sha1::_h0)[i as usize] + ) +} + +fn update(h: &mut [u32], l: &[u8]) { + let ha: u32 = h[0usize]; + let hb: u32 = h[1usize]; + let hc: u32 = h[2usize]; + let hd: u32 = h[3usize]; + let he: u32 = h[4usize]; + let mut _w: [u32; 80] = [0u32; 80usize]; + for i in 0u32..80u32 { + let v: u32 = if i < 16u32 { + let b: (&[u8], &[u8]) = l.split_at(i.wrapping_mul(4u32) as usize); + let u: u32 = lowstar::endianness::load32_be(b.1); + u + } else { + let wmit3: u32 = (&_w)[i.wrapping_sub(3u32) as usize]; + let wmit8: u32 = (&_w)[i.wrapping_sub(8u32) as usize]; + let wmit14: u32 = (&_w)[i.wrapping_sub(14u32) as usize]; + let wmit16: u32 = (&_w)[i.wrapping_sub(16u32) as usize]; + (wmit3 ^ (wmit8 ^ (wmit14 ^ wmit16))).wrapping_shl(1u32) + | (wmit3 ^ (wmit8 ^ (wmit14 ^ wmit16))).wrapping_shr(31u32) + }; + (&mut _w)[i as usize] = v + } + for i in 0u32..80u32 { + let _a: u32 = h[0usize]; + let _b: u32 = h[1usize]; + let _c: u32 = h[2usize]; + let _d: u32 = h[3usize]; + let _e: u32 = h[4usize]; + let wmit: u32 = (&_w)[i as usize]; + let ite: u32 = if i < 20u32 { + _b & _c ^ !_b & _d + } else if 39u32 < i && i < 60u32 { + _b & _c ^ (_b & _d ^ _c & _d) + } else { + _b ^ (_c ^ _d) + }; + let ite0: u32 = if i < 20u32 { + 0x5a827999u32 + } else if i < 40u32 { + 0x6ed9eba1u32 + } else if i < 60u32 { + 0x8f1bbcdcu32 + } else { + 0xca62c1d6u32 + }; + let _T: u32 = (_a.wrapping_shl(5u32) | _a.wrapping_shr(27u32)) + .wrapping_add(ite) + .wrapping_add(_e) + .wrapping_add(ite0) + .wrapping_add(wmit); + h[0usize] = _T; + h[1usize] = _a; + h[2usize] = _b.wrapping_shl(30u32) | _b.wrapping_shr(2u32); + h[3usize] = _c; + h[4usize] = _d + } + for i in 0u32..80u32 { + (&mut _w)[i as usize] = 0u32 + } + let sta: u32 = h[0usize]; + let stb: u32 = h[1usize]; + let stc: u32 = h[2usize]; + let std: u32 = h[3usize]; + let ste: u32 = h[4usize]; + h[0usize] = sta.wrapping_add(ha); + h[1usize] = stb.wrapping_add(hb); + h[2usize] = stc.wrapping_add(hc); + h[3usize] = std.wrapping_add(hd); + h[4usize] = ste.wrapping_add(he) +} + +fn pad(len: u64, dst: &mut [u8]) { + let dst1: (&mut [u8], &mut [u8]) = dst.split_at_mut(0usize); + dst1.1[0usize] = 0x80u8; + let dst2: (&mut [u8], &mut [u8]) = dst1.1.split_at_mut(1usize); + for i in 0u32..128u32 + .wrapping_sub(9u32.wrapping_add(len.wrapping_rem(64u32 as u64) as u32)) + .wrapping_rem(64u32) + { + dst2.1[i as usize] = 0u8 + } + let dst3: (&mut [u8], &mut [u8]) = dst2.1.split_at_mut( + 128u32 + .wrapping_sub(9u32.wrapping_add(len.wrapping_rem(64u32 as u64) as u32)) + .wrapping_rem(64u32) as usize, + ); + lowstar::endianness::store64_be(dst3.1, len.wrapping_shl(3u32)) +} + +pub(crate) fn finish(s: &[u32], dst: &mut [u8]) { + krml::unroll_for!( + 5, + "i", + 0u32, + 1u32, + lowstar::endianness::store32_be( + &mut dst[i.wrapping_mul(4u32) as usize..], + (&s[0usize..])[i as usize] + ) + ) +} + +pub(crate) fn update_multi(s: &mut [u32], blocks: &[u8], n_blocks: u32) { + for i in 0u32..n_blocks { + let sz: u32 = 64u32; + let block: (&[u8], &[u8]) = blocks.split_at(sz.wrapping_mul(i) as usize); + crate::hacl_rs::hash_sha1::update(s, block.1) + } +} + +pub(crate) fn update_last(s: &mut [u32], prev_len: u64, input: &[u8], input_len: u32) { + let blocks_n: u32 = input_len.wrapping_div(64u32); + let blocks_len: u32 = blocks_n.wrapping_mul(64u32); + let blocks: (&[u8], &[u8]) = input.split_at(0usize); + let rest_len: u32 = input_len.wrapping_sub(blocks_len); + let rest: (&[u8], &[u8]) = blocks.1.split_at(blocks_len as usize); + crate::hacl_rs::hash_sha1::update_multi(s, rest.0, blocks_n); + let total_input_len: u64 = prev_len.wrapping_add(input_len as u64); + let pad_len: u32 = 1u32 + .wrapping_add( + 128u32 + .wrapping_sub(9u32.wrapping_add(total_input_len.wrapping_rem(64u32 as u64) as u32)) + .wrapping_rem(64u32), + ) + .wrapping_add(8u32); + let tmp_len: u32 = rest_len.wrapping_add(pad_len); + let mut tmp_twoblocks: [u8; 128] = [0u8; 128usize]; + let tmp: (&mut [u8], &mut [u8]) = tmp_twoblocks.split_at_mut(0usize); + let tmp_rest: (&mut [u8], &mut [u8]) = tmp.1.split_at_mut(0usize); + let tmp_pad: (&mut [u8], &mut [u8]) = tmp_rest.1.split_at_mut(rest_len as usize); + (tmp_pad.0[0usize..rest_len as usize]).copy_from_slice(&rest.1[0usize..rest_len as usize]); + crate::hacl_rs::hash_sha1::pad(total_input_len, tmp_pad.1); + crate::hacl_rs::hash_sha1::update_multi(s, tmp.1, tmp_len.wrapping_div(64u32)) +} + +pub(crate) fn hash_oneshot(output: &mut [u8], input: &[u8], input_len: u32) { + let mut s: [u32; 5] = [ + 0x67452301u32, + 0xefcdab89u32, + 0x98badcfeu32, + 0x10325476u32, + 0xc3d2e1f0u32, + ]; + let blocks_n: u32 = input_len.wrapping_div(64u32); + let blocks_n1: u32 = if input_len.wrapping_rem(64u32) == 0u32 && blocks_n > 0u32 { + blocks_n.wrapping_sub(1u32) + } else { + blocks_n + }; + let blocks_len: u32 = blocks_n1.wrapping_mul(64u32); + let blocks: (&[u8], &[u8]) = input.split_at(0usize); + let rest_len: u32 = input_len.wrapping_sub(blocks_len); + let rest: (&[u8], &[u8]) = blocks.1.split_at(blocks_len as usize); + let blocks_n0: u32 = blocks_n1; + let blocks_len0: u32 = blocks_len; + let blocks0: &[u8] = rest.0; + let rest_len0: u32 = rest_len; + let rest0: &[u8] = rest.1; + crate::hacl_rs::hash_sha1::update_multi(&mut s, blocks0, blocks_n0); + crate::hacl_rs::hash_sha1::update_last(&mut s, blocks_len0 as u64, rest0, rest_len0); + crate::hacl_rs::hash_sha1::finish(&s, output) +} + +pub type state_t = crate::hacl_rs::streaming_types::state_32; + +pub fn malloc() -> Box<[crate::hacl_rs::streaming_types::state_32]> { + let buf: Box<[u8]> = vec![0u8; 64usize].into_boxed_slice(); + let mut block_state: Box<[u32]> = vec![0u32; 5usize].into_boxed_slice(); + crate::hacl_rs::hash_sha1::init(&mut block_state); + let s: crate::hacl_rs::streaming_types::state_32 = crate::hacl_rs::streaming_types::state_32 { + block_state, + buf, + total_len: 0u32 as u64, + }; + let p: Box<[crate::hacl_rs::streaming_types::state_32]> = vec![s].into_boxed_slice(); + p +} + +pub fn reset(state: &mut [crate::hacl_rs::streaming_types::state_32]) { + let block_state: &mut [u32] = &mut (state[0usize]).block_state; + crate::hacl_rs::hash_sha1::init(block_state); + let total_len: u64 = 0u32 as u64; + (state[0usize]).total_len = total_len +} + +/** +0 = success, 1 = max length exceeded +*/ +pub fn update0( + state: &mut [crate::hacl_rs::streaming_types::state_32], + chunk: &[u8], + chunk_len: u32, +) -> crate::hacl_rs::streaming_types::error_code { + let block_state: &mut [u32] = &mut (state[0usize]).block_state; + let total_len: u64 = (state[0usize]).total_len; + if chunk_len as u64 > 2305843009213693951u64.wrapping_sub(total_len) { + crate::hacl_rs::streaming_types::error_code::MaximumLengthExceeded + } else { + let sz: u32 = if total_len.wrapping_rem(64u32 as u64) == 0u64 && total_len > 0u64 { + 64u32 + } else { + total_len.wrapping_rem(64u32 as u64) as u32 + }; + if chunk_len <= 64u32.wrapping_sub(sz) { + let buf: &mut [u8] = &mut (state[0usize]).buf; + let total_len1: u64 = (state[0usize]).total_len; + let sz1: u32 = if total_len1.wrapping_rem(64u32 as u64) == 0u64 && total_len1 > 0u64 { + 64u32 + } else { + total_len1.wrapping_rem(64u32 as u64) as u32 + }; + let buf2: (&mut [u8], &mut [u8]) = buf.split_at_mut(sz1 as usize); + (buf2.1[0usize..chunk_len as usize]) + .copy_from_slice(&chunk[0usize..chunk_len as usize]); + let total_len2: u64 = total_len1.wrapping_add(chunk_len as u64); + (state[0usize]).total_len = total_len2 + } else if sz == 0u32 { + let buf: &mut [u8] = &mut (state[0usize]).buf; + let total_len1: u64 = (state[0usize]).total_len; + let sz1: u32 = if total_len1.wrapping_rem(64u32 as u64) == 0u64 && total_len1 > 0u64 { + 64u32 + } else { + total_len1.wrapping_rem(64u32 as u64) as u32 + }; + if sz1 != 0u32 { + crate::hacl_rs::hash_sha1::update_multi(block_state, buf, 1u32) + }; + let ite: u32 = if (chunk_len as u64).wrapping_rem(64u32 as u64) == 0u64 + && chunk_len as u64 > 0u64 + { + 64u32 + } else { + (chunk_len as u64).wrapping_rem(64u32 as u64) as u32 + }; + let n_blocks: u32 = chunk_len.wrapping_sub(ite).wrapping_div(64u32); + let data1_len: u32 = n_blocks.wrapping_mul(64u32); + let data2_len: u32 = chunk_len.wrapping_sub(data1_len); + let data1: (&[u8], &[u8]) = chunk.split_at(0usize); + let data2: (&[u8], &[u8]) = data1.1.split_at(data1_len as usize); + crate::hacl_rs::hash_sha1::update_multi( + block_state, + data2.0, + data1_len.wrapping_div(64u32), + ); + let dst: (&mut [u8], &mut [u8]) = buf.split_at_mut(0usize); + (dst.1[0usize..data2_len as usize]) + .copy_from_slice(&data2.1[0usize..data2_len as usize]); + (state[0usize]).total_len = total_len1.wrapping_add(chunk_len as u64) + } else { + let diff: u32 = 64u32.wrapping_sub(sz); + let chunk1: (&[u8], &[u8]) = chunk.split_at(0usize); + let chunk2: (&[u8], &[u8]) = chunk1.1.split_at(diff as usize); + let buf: &mut [u8] = &mut (state[0usize]).buf; + let total_len1: u64 = (state[0usize]).total_len; + let sz1: u32 = if total_len1.wrapping_rem(64u32 as u64) == 0u64 && total_len1 > 0u64 { + 64u32 + } else { + total_len1.wrapping_rem(64u32 as u64) as u32 + }; + let buf2: (&mut [u8], &mut [u8]) = buf.split_at_mut(sz1 as usize); + (buf2.1[0usize..diff as usize]).copy_from_slice(&chunk2.0[0usize..diff as usize]); + let total_len2: u64 = total_len1.wrapping_add(diff as u64); + (state[0usize]).total_len = total_len2; + let buf0: &mut [u8] = &mut (state[0usize]).buf; + let total_len10: u64 = (state[0usize]).total_len; + let sz10: u32 = if total_len10.wrapping_rem(64u32 as u64) == 0u64 && total_len10 > 0u64 + { + 64u32 + } else { + total_len10.wrapping_rem(64u32 as u64) as u32 + }; + if sz10 != 0u32 { + crate::hacl_rs::hash_sha1::update_multi(block_state, buf0, 1u32) + }; + let ite: u32 = if (chunk_len.wrapping_sub(diff) as u64).wrapping_rem(64u32 as u64) + == 0u64 + && chunk_len.wrapping_sub(diff) as u64 > 0u64 + { + 64u32 + } else { + (chunk_len.wrapping_sub(diff) as u64).wrapping_rem(64u32 as u64) as u32 + }; + let n_blocks: u32 = chunk_len + .wrapping_sub(diff) + .wrapping_sub(ite) + .wrapping_div(64u32); + let data1_len: u32 = n_blocks.wrapping_mul(64u32); + let data2_len: u32 = chunk_len.wrapping_sub(diff).wrapping_sub(data1_len); + let data1: (&[u8], &[u8]) = chunk2.1.split_at(0usize); + let data2: (&[u8], &[u8]) = data1.1.split_at(data1_len as usize); + crate::hacl_rs::hash_sha1::update_multi( + block_state, + data2.0, + data1_len.wrapping_div(64u32), + ); + let dst: (&mut [u8], &mut [u8]) = buf0.split_at_mut(0usize); + (dst.1[0usize..data2_len as usize]) + .copy_from_slice(&data2.1[0usize..data2_len as usize]); + (state[0usize]).total_len = + total_len10.wrapping_add(chunk_len.wrapping_sub(diff) as u64) + }; + crate::hacl_rs::streaming_types::error_code::Success + } +} + +pub fn digest(state: &[crate::hacl_rs::streaming_types::state_32], output: &mut [u8]) { + let block_state: &[u32] = &(state[0usize]).block_state; + let buf_: &[u8] = &(state[0usize]).buf; + let total_len: u64 = (state[0usize]).total_len; + let r: u32 = if total_len.wrapping_rem(64u32 as u64) == 0u64 && total_len > 0u64 { + 64u32 + } else { + total_len.wrapping_rem(64u32 as u64) as u32 + }; + let buf_1: (&[u8], &[u8]) = buf_.split_at(0usize); + let mut tmp_block_state: [u32; 5] = [0u32; 5usize]; + ((&mut tmp_block_state)[0usize..5usize]).copy_from_slice(&block_state[0usize..5usize]); + let buf_multi: (&[u8], &[u8]) = buf_1.1.split_at(0usize); + let ite: u32 = if r.wrapping_rem(64u32) == 0u32 && r > 0u32 { + 64u32 + } else { + r.wrapping_rem(64u32) + }; + let buf_last: (&[u8], &[u8]) = buf_multi.1.split_at(r.wrapping_sub(ite) as usize); + crate::hacl_rs::hash_sha1::update_multi(&mut tmp_block_state, buf_last.0, 0u32); + let prev_len_last: u64 = total_len.wrapping_sub(r as u64); + crate::hacl_rs::hash_sha1::update_last(&mut tmp_block_state, prev_len_last, buf_last.1, r); + crate::hacl_rs::hash_sha1::finish(&tmp_block_state, output) +} + +pub fn copy( + state: &[crate::hacl_rs::streaming_types::state_32], +) -> Box<[crate::hacl_rs::streaming_types::state_32]> { + let block_state0: &[u32] = &(state[0usize]).block_state; + let buf0: &[u8] = &(state[0usize]).buf; + let total_len0: u64 = (state[0usize]).total_len; + let mut buf: Box<[u8]> = vec![0u8; 64usize].into_boxed_slice(); + ((&mut buf)[0usize..64usize]).copy_from_slice(&buf0[0usize..64usize]); + let mut block_state: Box<[u32]> = vec![0u32; 5usize].into_boxed_slice(); + ((&mut block_state)[0usize..5usize]).copy_from_slice(&block_state0[0usize..5usize]); + let s: crate::hacl_rs::streaming_types::state_32 = crate::hacl_rs::streaming_types::state_32 { + block_state, + buf, + total_len: total_len0, + }; + let p: Box<[crate::hacl_rs::streaming_types::state_32]> = vec![s].into_boxed_slice(); + p +} + +pub fn hash(output: &mut [u8], input: &[u8], input_len: u32) { + crate::hacl_rs::hash_sha1::hash_oneshot(output, input, input_len) +} diff --git a/src/hacl_rs/hash_sha2.rs b/src/hacl_rs/hash_sha2.rs new file mode 100644 index 000000000..362deda67 --- /dev/null +++ b/src/hacl_rs/hash_sha2.rs @@ -0,0 +1,1310 @@ +#![allow(non_snake_case)] +#![allow(non_upper_case_globals)] +#![allow(non_camel_case_types)] +#![allow(unused_assignments)] +#![allow(unreachable_patterns)] + +use super::fstar; +use super::lowstar; + +pub(crate) const h224: [u32; 8] = [ + 0xc1059ed8u32, + 0x367cd507u32, + 0x3070dd17u32, + 0xf70e5939u32, + 0xffc00b31u32, + 0x68581511u32, + 0x64f98fa7u32, + 0xbefa4fa4u32, +]; + +pub(crate) const h256: [u32; 8] = [ + 0x6a09e667u32, + 0xbb67ae85u32, + 0x3c6ef372u32, + 0xa54ff53au32, + 0x510e527fu32, + 0x9b05688cu32, + 0x1f83d9abu32, + 0x5be0cd19u32, +]; + +pub(crate) const h384: [u64; 8] = [ + 0xcbbb9d5dc1059ed8u64, + 0x629a292a367cd507u64, + 0x9159015a3070dd17u64, + 0x152fecd8f70e5939u64, + 0x67332667ffc00b31u64, + 0x8eb44a8768581511u64, + 0xdb0c2e0d64f98fa7u64, + 0x47b5481dbefa4fa4u64, +]; + +pub(crate) const h512: [u64; 8] = [ + 0x6a09e667f3bcc908u64, + 0xbb67ae8584caa73bu64, + 0x3c6ef372fe94f82bu64, + 0xa54ff53a5f1d36f1u64, + 0x510e527fade682d1u64, + 0x9b05688c2b3e6c1fu64, + 0x1f83d9abfb41bd6bu64, + 0x5be0cd19137e2179u64, +]; + +pub(crate) const k224_256: [u32; 64] = [ + 0x428a2f98u32, + 0x71374491u32, + 0xb5c0fbcfu32, + 0xe9b5dba5u32, + 0x3956c25bu32, + 0x59f111f1u32, + 0x923f82a4u32, + 0xab1c5ed5u32, + 0xd807aa98u32, + 0x12835b01u32, + 0x243185beu32, + 0x550c7dc3u32, + 0x72be5d74u32, + 0x80deb1feu32, + 0x9bdc06a7u32, + 0xc19bf174u32, + 0xe49b69c1u32, + 0xefbe4786u32, + 0x0fc19dc6u32, + 0x240ca1ccu32, + 0x2de92c6fu32, + 0x4a7484aau32, + 0x5cb0a9dcu32, + 0x76f988dau32, + 0x983e5152u32, + 0xa831c66du32, + 0xb00327c8u32, + 0xbf597fc7u32, + 0xc6e00bf3u32, + 0xd5a79147u32, + 0x06ca6351u32, + 0x14292967u32, + 0x27b70a85u32, + 0x2e1b2138u32, + 0x4d2c6dfcu32, + 0x53380d13u32, + 0x650a7354u32, + 0x766a0abbu32, + 0x81c2c92eu32, + 0x92722c85u32, + 0xa2bfe8a1u32, + 0xa81a664bu32, + 0xc24b8b70u32, + 0xc76c51a3u32, + 0xd192e819u32, + 0xd6990624u32, + 0xf40e3585u32, + 0x106aa070u32, + 0x19a4c116u32, + 0x1e376c08u32, + 0x2748774cu32, + 0x34b0bcb5u32, + 0x391c0cb3u32, + 0x4ed8aa4au32, + 0x5b9cca4fu32, + 0x682e6ff3u32, + 0x748f82eeu32, + 0x78a5636fu32, + 0x84c87814u32, + 0x8cc70208u32, + 0x90befffau32, + 0xa4506cebu32, + 0xbef9a3f7u32, + 0xc67178f2u32, +]; + +pub(crate) const k384_512: [u64; 80] = [ + 0x428a2f98d728ae22u64, + 0x7137449123ef65cdu64, + 0xb5c0fbcfec4d3b2fu64, + 0xe9b5dba58189dbbcu64, + 0x3956c25bf348b538u64, + 0x59f111f1b605d019u64, + 0x923f82a4af194f9bu64, + 0xab1c5ed5da6d8118u64, + 0xd807aa98a3030242u64, + 0x12835b0145706fbeu64, + 0x243185be4ee4b28cu64, + 0x550c7dc3d5ffb4e2u64, + 0x72be5d74f27b896fu64, + 0x80deb1fe3b1696b1u64, + 0x9bdc06a725c71235u64, + 0xc19bf174cf692694u64, + 0xe49b69c19ef14ad2u64, + 0xefbe4786384f25e3u64, + 0x0fc19dc68b8cd5b5u64, + 0x240ca1cc77ac9c65u64, + 0x2de92c6f592b0275u64, + 0x4a7484aa6ea6e483u64, + 0x5cb0a9dcbd41fbd4u64, + 0x76f988da831153b5u64, + 0x983e5152ee66dfabu64, + 0xa831c66d2db43210u64, + 0xb00327c898fb213fu64, + 0xbf597fc7beef0ee4u64, + 0xc6e00bf33da88fc2u64, + 0xd5a79147930aa725u64, + 0x06ca6351e003826fu64, + 0x142929670a0e6e70u64, + 0x27b70a8546d22ffcu64, + 0x2e1b21385c26c926u64, + 0x4d2c6dfc5ac42aedu64, + 0x53380d139d95b3dfu64, + 0x650a73548baf63deu64, + 0x766a0abb3c77b2a8u64, + 0x81c2c92e47edaee6u64, + 0x92722c851482353bu64, + 0xa2bfe8a14cf10364u64, + 0xa81a664bbc423001u64, + 0xc24b8b70d0f89791u64, + 0xc76c51a30654be30u64, + 0xd192e819d6ef5218u64, + 0xd69906245565a910u64, + 0xf40e35855771202au64, + 0x106aa07032bbd1b8u64, + 0x19a4c116b8d2d0c8u64, + 0x1e376c085141ab53u64, + 0x2748774cdf8eeb99u64, + 0x34b0bcb5e19b48a8u64, + 0x391c0cb3c5c95a63u64, + 0x4ed8aa4ae3418acbu64, + 0x5b9cca4f7763e373u64, + 0x682e6ff3d6b2b8a3u64, + 0x748f82ee5defb2fcu64, + 0x78a5636f43172f60u64, + 0x84c87814a1f0ab72u64, + 0x8cc702081a6439ecu64, + 0x90befffa23631e28u64, + 0xa4506cebde82bde9u64, + 0xbef9a3f7b2c67915u64, + 0xc67178f2e372532bu64, + 0xca273eceea26619cu64, + 0xd186b8c721c0c207u64, + 0xeada7dd6cde0eb1eu64, + 0xf57d4f7fee6ed178u64, + 0x06f067aa72176fbau64, + 0x0a637dc5a2c898a6u64, + 0x113f9804bef90daeu64, + 0x1b710b35131c471bu64, + 0x28db77f523047d84u64, + 0x32caab7b40c72493u64, + 0x3c9ebe0a15c9bebcu64, + 0x431d67c49c100d4cu64, + 0x4cc5d4becb3e42b6u64, + 0x597f299cfc657e2au64, + 0x5fcb6fab3ad6faecu64, + 0x6c44198c4a475817u64, +]; + +pub(crate) fn sha256_init(hash: &mut [u32]) { + krml::unroll_for!(8, "i", 0u32, 1u32, { + let x: u32 = (&crate::hacl_rs::hash_sha2::h256)[i as usize]; + let os: (&mut [u32], &mut [u32]) = hash.split_at_mut(0usize); + os.1[i as usize] = x + }) +} + +#[inline] +fn sha256_update(b: &[u8], hash: &mut [u32]) { + let mut hash_old: [u32; 8] = [0u32; 8usize]; + let mut ws: [u32; 16] = [0u32; 16usize]; + ((&mut hash_old)[0usize..8usize]).copy_from_slice(&hash[0usize..8usize]); + let b1: &[u8] = b; + let u: u32 = lowstar::endianness::load32_be(&b1[0usize..]); + (&mut ws)[0usize] = u; + let u0: u32 = lowstar::endianness::load32_be(&b1[4usize..]); + (&mut ws)[1usize] = u0; + let u1: u32 = lowstar::endianness::load32_be(&b1[8usize..]); + (&mut ws)[2usize] = u1; + let u2: u32 = lowstar::endianness::load32_be(&b1[12usize..]); + (&mut ws)[3usize] = u2; + let u3: u32 = lowstar::endianness::load32_be(&b1[16usize..]); + (&mut ws)[4usize] = u3; + let u4: u32 = lowstar::endianness::load32_be(&b1[20usize..]); + (&mut ws)[5usize] = u4; + let u5: u32 = lowstar::endianness::load32_be(&b1[24usize..]); + (&mut ws)[6usize] = u5; + let u6: u32 = lowstar::endianness::load32_be(&b1[28usize..]); + (&mut ws)[7usize] = u6; + let u7: u32 = lowstar::endianness::load32_be(&b1[32usize..]); + (&mut ws)[8usize] = u7; + let u8: u32 = lowstar::endianness::load32_be(&b1[36usize..]); + (&mut ws)[9usize] = u8; + let u9: u32 = lowstar::endianness::load32_be(&b1[40usize..]); + (&mut ws)[10usize] = u9; + let u10: u32 = lowstar::endianness::load32_be(&b1[44usize..]); + (&mut ws)[11usize] = u10; + let u11: u32 = lowstar::endianness::load32_be(&b1[48usize..]); + (&mut ws)[12usize] = u11; + let u12: u32 = lowstar::endianness::load32_be(&b1[52usize..]); + (&mut ws)[13usize] = u12; + let u13: u32 = lowstar::endianness::load32_be(&b1[56usize..]); + (&mut ws)[14usize] = u13; + let u14: u32 = lowstar::endianness::load32_be(&b1[60usize..]); + (&mut ws)[15usize] = u14; + krml::unroll_for!(4, "i", 0u32, 1u32, { + krml::unroll_for!(16, "i0", 0u32, 1u32, { + let k_t: u32 = (&crate::hacl_rs::hash_sha2::k224_256) + [16u32.wrapping_mul(i).wrapping_add(i0) as usize]; + let ws_t: u32 = (&ws)[i0 as usize]; + let a0: u32 = hash[0usize]; + let b0: u32 = hash[1usize]; + let c0: u32 = hash[2usize]; + let d0: u32 = hash[3usize]; + let e0: u32 = hash[4usize]; + let f0: u32 = hash[5usize]; + let g0: u32 = hash[6usize]; + let h02: u32 = hash[7usize]; + let k_e_t: u32 = k_t; + let t1: u32 = h02 + .wrapping_add( + (e0.wrapping_shl(26u32) | e0.wrapping_shr(6u32)) + ^ ((e0.wrapping_shl(21u32) | e0.wrapping_shr(11u32)) + ^ (e0.wrapping_shl(7u32) | e0.wrapping_shr(25u32))), + ) + .wrapping_add(e0 & f0 ^ !e0 & g0) + .wrapping_add(k_e_t) + .wrapping_add(ws_t); + let t2: u32 = ((a0.wrapping_shl(30u32) | a0.wrapping_shr(2u32)) + ^ ((a0.wrapping_shl(19u32) | a0.wrapping_shr(13u32)) + ^ (a0.wrapping_shl(10u32) | a0.wrapping_shr(22u32)))) + .wrapping_add(a0 & b0 ^ (a0 & c0 ^ b0 & c0)); + let a1: u32 = t1.wrapping_add(t2); + let b10: u32 = a0; + let c1: u32 = b0; + let d1: u32 = c0; + let e1: u32 = d0.wrapping_add(t1); + let f1: u32 = e0; + let g1: u32 = f0; + let h12: u32 = g0; + hash[0usize] = a1; + hash[1usize] = b10; + hash[2usize] = c1; + hash[3usize] = d1; + hash[4usize] = e1; + hash[5usize] = f1; + hash[6usize] = g1; + hash[7usize] = h12 + }); + if i < 3u32 { + krml::unroll_for!(16, "i0", 0u32, 1u32, { + let t16: u32 = (&ws)[i0 as usize]; + let t15: u32 = (&ws)[i0.wrapping_add(1u32).wrapping_rem(16u32) as usize]; + let t7: u32 = (&ws)[i0.wrapping_add(9u32).wrapping_rem(16u32) as usize]; + let t2: u32 = (&ws)[i0.wrapping_add(14u32).wrapping_rem(16u32) as usize]; + let s1: u32 = (t2.wrapping_shl(15u32) | t2.wrapping_shr(17u32)) + ^ ((t2.wrapping_shl(13u32) | t2.wrapping_shr(19u32)) ^ t2.wrapping_shr(10u32)); + let s0: u32 = (t15.wrapping_shl(25u32) | t15.wrapping_shr(7u32)) + ^ ((t15.wrapping_shl(14u32) | t15.wrapping_shr(18u32)) + ^ t15.wrapping_shr(3u32)); + (&mut ws)[i0 as usize] = s1.wrapping_add(t7).wrapping_add(s0).wrapping_add(t16) + }) + } + }); + krml::unroll_for!(8, "i", 0u32, 1u32, { + let x: u32 = (hash[i as usize]).wrapping_add((&hash_old)[i as usize]); + let os: (&mut [u32], &mut [u32]) = hash.split_at_mut(0usize); + os.1[i as usize] = x + }) +} + +pub(crate) fn sha256_update_nblocks(len: u32, b: &[u8], st: &mut [u32]) { + let blocks: u32 = len.wrapping_div(64u32); + for i in 0u32..blocks { + let b0: &[u8] = b; + let mb: (&[u8], &[u8]) = b0.split_at(i.wrapping_mul(64u32) as usize); + crate::hacl_rs::hash_sha2::sha256_update(mb.1, st) + } +} + +pub(crate) fn sha256_update_last(totlen: u64, len: u32, b: &[u8], hash: &mut [u32]) { + let blocks: u32 = if len.wrapping_add(8u32).wrapping_add(1u32) <= 64u32 { + 1u32 + } else { + 2u32 + }; + let fin: u32 = blocks.wrapping_mul(64u32); + let mut last: [u8; 128] = [0u8; 128usize]; + let mut totlen_buf: [u8; 8] = [0u8; 8usize]; + let total_len_bits: u64 = totlen.wrapping_shl(3u32); + lowstar::endianness::store64_be(&mut totlen_buf, total_len_bits); + let b0: &[u8] = b; + ((&mut last)[0usize..len as usize]).copy_from_slice(&b0[0usize..len as usize]); + (&mut last)[len as usize] = 0x80u8; + ((&mut last)[fin.wrapping_sub(8u32) as usize..fin.wrapping_sub(8u32) as usize + 8usize]) + .copy_from_slice(&(&totlen_buf)[0usize..8usize]); + let last0: (&[u8], &[u8]) = last.split_at(0usize); + let last1: (&[u8], &[u8]) = last0.1.split_at(64usize); + let l0: &[u8] = last1.0; + let l1: &[u8] = last1.1; + let lb0: &[u8] = l0; + let lb1: &[u8] = l1; + let last00: &[u8] = lb0; + let last10: &[u8] = lb1; + crate::hacl_rs::hash_sha2::sha256_update(last00, hash); + if blocks > 1u32 { + crate::hacl_rs::hash_sha2::sha256_update(last10, hash) + } +} + +pub(crate) fn sha256_finish(st: &[u32], h: &mut [u8]) { + let mut hbuf: [u8; 32] = [0u8; 32usize]; + krml::unroll_for!( + 8, + "i", + 0u32, + 1u32, + lowstar::endianness::store32_be( + &mut (&mut hbuf)[i.wrapping_mul(4u32) as usize..], + st[i as usize] + ) + ); + (h[0usize..32usize]).copy_from_slice(&(&(&hbuf)[0usize..])[0usize..32usize]) +} + +#[inline] +fn sha224_init(hash: &mut [u32]) { + krml::unroll_for!(8, "i", 0u32, 1u32, { + let x: u32 = (&crate::hacl_rs::hash_sha2::h224)[i as usize]; + let os: (&mut [u32], &mut [u32]) = hash.split_at_mut(0usize); + os.1[i as usize] = x + }) +} + +#[inline] +fn sha224_update_nblocks(len: u32, b: &[u8], st: &mut [u32]) { + crate::hacl_rs::hash_sha2::sha256_update_nblocks(len, b, st) +} + +fn sha224_update_last(totlen: u64, len: u32, b: &[u8], st: &mut [u32]) { + crate::hacl_rs::hash_sha2::sha256_update_last(totlen, len, b, st) +} + +#[inline] +fn sha224_finish(st: &[u32], h: &mut [u8]) { + let mut hbuf: [u8; 32] = [0u8; 32usize]; + krml::unroll_for!( + 8, + "i", + 0u32, + 1u32, + lowstar::endianness::store32_be( + &mut (&mut hbuf)[i.wrapping_mul(4u32) as usize..], + st[i as usize] + ) + ); + (h[0usize..28usize]).copy_from_slice(&(&(&hbuf)[0usize..])[0usize..28usize]) +} + +pub(crate) fn sha512_init(hash: &mut [u64]) { + krml::unroll_for!(8, "i", 0u32, 1u32, { + let x: u64 = (&crate::hacl_rs::hash_sha2::h512)[i as usize]; + let os: (&mut [u64], &mut [u64]) = hash.split_at_mut(0usize); + os.1[i as usize] = x + }) +} + +#[inline] +fn sha512_update(b: &[u8], hash: &mut [u64]) { + let mut hash_old: [u64; 8] = [0u64; 8usize]; + let mut ws: [u64; 16] = [0u64; 16usize]; + ((&mut hash_old)[0usize..8usize]).copy_from_slice(&hash[0usize..8usize]); + let b1: &[u8] = b; + let u: u64 = lowstar::endianness::load64_be(&b1[0usize..]); + (&mut ws)[0usize] = u; + let u0: u64 = lowstar::endianness::load64_be(&b1[8usize..]); + (&mut ws)[1usize] = u0; + let u1: u64 = lowstar::endianness::load64_be(&b1[16usize..]); + (&mut ws)[2usize] = u1; + let u2: u64 = lowstar::endianness::load64_be(&b1[24usize..]); + (&mut ws)[3usize] = u2; + let u3: u64 = lowstar::endianness::load64_be(&b1[32usize..]); + (&mut ws)[4usize] = u3; + let u4: u64 = lowstar::endianness::load64_be(&b1[40usize..]); + (&mut ws)[5usize] = u4; + let u5: u64 = lowstar::endianness::load64_be(&b1[48usize..]); + (&mut ws)[6usize] = u5; + let u6: u64 = lowstar::endianness::load64_be(&b1[56usize..]); + (&mut ws)[7usize] = u6; + let u7: u64 = lowstar::endianness::load64_be(&b1[64usize..]); + (&mut ws)[8usize] = u7; + let u8: u64 = lowstar::endianness::load64_be(&b1[72usize..]); + (&mut ws)[9usize] = u8; + let u9: u64 = lowstar::endianness::load64_be(&b1[80usize..]); + (&mut ws)[10usize] = u9; + let u10: u64 = lowstar::endianness::load64_be(&b1[88usize..]); + (&mut ws)[11usize] = u10; + let u11: u64 = lowstar::endianness::load64_be(&b1[96usize..]); + (&mut ws)[12usize] = u11; + let u12: u64 = lowstar::endianness::load64_be(&b1[104usize..]); + (&mut ws)[13usize] = u12; + let u13: u64 = lowstar::endianness::load64_be(&b1[112usize..]); + (&mut ws)[14usize] = u13; + let u14: u64 = lowstar::endianness::load64_be(&b1[120usize..]); + (&mut ws)[15usize] = u14; + krml::unroll_for!(5, "i", 0u32, 1u32, { + krml::unroll_for!(16, "i0", 0u32, 1u32, { + let k_t: u64 = (&crate::hacl_rs::hash_sha2::k384_512) + [16u32.wrapping_mul(i).wrapping_add(i0) as usize]; + let ws_t: u64 = (&ws)[i0 as usize]; + let a0: u64 = hash[0usize]; + let b0: u64 = hash[1usize]; + let c0: u64 = hash[2usize]; + let d0: u64 = hash[3usize]; + let e0: u64 = hash[4usize]; + let f0: u64 = hash[5usize]; + let g0: u64 = hash[6usize]; + let h02: u64 = hash[7usize]; + let k_e_t: u64 = k_t; + let t1: u64 = h02 + .wrapping_add( + (e0.wrapping_shl(50u32) | e0.wrapping_shr(14u32)) + ^ ((e0.wrapping_shl(46u32) | e0.wrapping_shr(18u32)) + ^ (e0.wrapping_shl(23u32) | e0.wrapping_shr(41u32))), + ) + .wrapping_add(e0 & f0 ^ !e0 & g0) + .wrapping_add(k_e_t) + .wrapping_add(ws_t); + let t2: u64 = ((a0.wrapping_shl(36u32) | a0.wrapping_shr(28u32)) + ^ ((a0.wrapping_shl(30u32) | a0.wrapping_shr(34u32)) + ^ (a0.wrapping_shl(25u32) | a0.wrapping_shr(39u32)))) + .wrapping_add(a0 & b0 ^ (a0 & c0 ^ b0 & c0)); + let a1: u64 = t1.wrapping_add(t2); + let b10: u64 = a0; + let c1: u64 = b0; + let d1: u64 = c0; + let e1: u64 = d0.wrapping_add(t1); + let f1: u64 = e0; + let g1: u64 = f0; + let h12: u64 = g0; + hash[0usize] = a1; + hash[1usize] = b10; + hash[2usize] = c1; + hash[3usize] = d1; + hash[4usize] = e1; + hash[5usize] = f1; + hash[6usize] = g1; + hash[7usize] = h12 + }); + if i < 4u32 { + krml::unroll_for!(16, "i0", 0u32, 1u32, { + let t16: u64 = (&ws)[i0 as usize]; + let t15: u64 = (&ws)[i0.wrapping_add(1u32).wrapping_rem(16u32) as usize]; + let t7: u64 = (&ws)[i0.wrapping_add(9u32).wrapping_rem(16u32) as usize]; + let t2: u64 = (&ws)[i0.wrapping_add(14u32).wrapping_rem(16u32) as usize]; + let s1: u64 = (t2.wrapping_shl(45u32) | t2.wrapping_shr(19u32)) + ^ ((t2.wrapping_shl(3u32) | t2.wrapping_shr(61u32)) ^ t2.wrapping_shr(6u32)); + let s0: u64 = (t15.wrapping_shl(63u32) | t15.wrapping_shr(1u32)) + ^ ((t15.wrapping_shl(56u32) | t15.wrapping_shr(8u32)) ^ t15.wrapping_shr(7u32)); + (&mut ws)[i0 as usize] = s1.wrapping_add(t7).wrapping_add(s0).wrapping_add(t16) + }) + } + }); + krml::unroll_for!(8, "i", 0u32, 1u32, { + let x: u64 = (hash[i as usize]).wrapping_add((&hash_old)[i as usize]); + let os: (&mut [u64], &mut [u64]) = hash.split_at_mut(0usize); + os.1[i as usize] = x + }) +} + +pub(crate) fn sha512_update_nblocks(len: u32, b: &[u8], st: &mut [u64]) { + let blocks: u32 = len.wrapping_div(128u32); + for i in 0u32..blocks { + let b0: &[u8] = b; + let mb: (&[u8], &[u8]) = b0.split_at(i.wrapping_mul(128u32) as usize); + crate::hacl_rs::hash_sha2::sha512_update(mb.1, st) + } +} + +pub(crate) fn sha512_update_last( + totlen: fstar::uint128::uint128, + len: u32, + b: &[u8], + hash: &mut [u64], +) { + let blocks: u32 = if len.wrapping_add(16u32).wrapping_add(1u32) <= 128u32 { + 1u32 + } else { + 2u32 + }; + let fin: u32 = blocks.wrapping_mul(128u32); + let mut last: [u8; 256] = [0u8; 256usize]; + let mut totlen_buf: [u8; 16] = [0u8; 16usize]; + let total_len_bits: fstar::uint128::uint128 = fstar::uint128::shift_left(totlen, 3u32); + lowstar::endianness::store128_be(&mut totlen_buf, total_len_bits); + let b0: &[u8] = b; + ((&mut last)[0usize..len as usize]).copy_from_slice(&b0[0usize..len as usize]); + (&mut last)[len as usize] = 0x80u8; + ((&mut last)[fin.wrapping_sub(16u32) as usize..fin.wrapping_sub(16u32) as usize + 16usize]) + .copy_from_slice(&(&totlen_buf)[0usize..16usize]); + let last0: (&[u8], &[u8]) = last.split_at(0usize); + let last1: (&[u8], &[u8]) = last0.1.split_at(128usize); + let l0: &[u8] = last1.0; + let l1: &[u8] = last1.1; + let lb0: &[u8] = l0; + let lb1: &[u8] = l1; + let last00: &[u8] = lb0; + let last10: &[u8] = lb1; + crate::hacl_rs::hash_sha2::sha512_update(last00, hash); + if blocks > 1u32 { + crate::hacl_rs::hash_sha2::sha512_update(last10, hash) + } +} + +pub(crate) fn sha512_finish(st: &[u64], h: &mut [u8]) { + let mut hbuf: [u8; 64] = [0u8; 64usize]; + krml::unroll_for!( + 8, + "i", + 0u32, + 1u32, + lowstar::endianness::store64_be( + &mut (&mut hbuf)[i.wrapping_mul(8u32) as usize..], + st[i as usize] + ) + ); + (h[0usize..64usize]).copy_from_slice(&(&(&hbuf)[0usize..])[0usize..64usize]) +} + +pub(crate) fn sha384_init(hash: &mut [u64]) { + krml::unroll_for!(8, "i", 0u32, 1u32, { + let x: u64 = (&crate::hacl_rs::hash_sha2::h384)[i as usize]; + let os: (&mut [u64], &mut [u64]) = hash.split_at_mut(0usize); + os.1[i as usize] = x + }) +} + +pub(crate) fn sha384_update_nblocks(len: u32, b: &[u8], st: &mut [u64]) { + crate::hacl_rs::hash_sha2::sha512_update_nblocks(len, b, st) +} + +pub(crate) fn sha384_update_last( + totlen: fstar::uint128::uint128, + len: u32, + b: &[u8], + st: &mut [u64], +) { + crate::hacl_rs::hash_sha2::sha512_update_last(totlen, len, b, st) +} + +pub(crate) fn sha384_finish(st: &[u64], h: &mut [u8]) { + let mut hbuf: [u8; 64] = [0u8; 64usize]; + krml::unroll_for!( + 8, + "i", + 0u32, + 1u32, + lowstar::endianness::store64_be( + &mut (&mut hbuf)[i.wrapping_mul(8u32) as usize..], + st[i as usize] + ) + ); + (h[0usize..48usize]).copy_from_slice(&(&(&hbuf)[0usize..])[0usize..48usize]) +} + +pub type state_t_224 = crate::hacl_rs::streaming_types::state_32; + +pub type state_t_256 = crate::hacl_rs::streaming_types::state_32; + +pub type state_t_384 = crate::hacl_rs::streaming_types::state_64; + +pub type state_t_512 = crate::hacl_rs::streaming_types::state_64; + +/** +Allocate initial state for the SHA2_256 hash. The state is to be freed by +calling `free_256`. +*/ +pub fn malloc_256() -> Box<[crate::hacl_rs::streaming_types::state_32]> { + let buf: Box<[u8]> = vec![0u8; 64usize].into_boxed_slice(); + let mut block_state: Box<[u32]> = vec![0u32; 8usize].into_boxed_slice(); + crate::hacl_rs::hash_sha2::sha256_init(&mut block_state); + let s: crate::hacl_rs::streaming_types::state_32 = crate::hacl_rs::streaming_types::state_32 { + block_state, + buf, + total_len: 0u32 as u64, + }; + let p: Box<[crate::hacl_rs::streaming_types::state_32]> = vec![s].into_boxed_slice(); + p +} + +/** +Copies the state passed as argument into a newly allocated state (deep copy). +The state is to be freed by calling `free_256`. Cloning the state this way is +useful, for instance, if your control-flow diverges and you need to feed +more (different) data into the hash in each branch. +*/ +pub fn copy_256( + state: &[crate::hacl_rs::streaming_types::state_32], +) -> Box<[crate::hacl_rs::streaming_types::state_32]> { + let block_state0: &[u32] = &(state[0usize]).block_state; + let buf0: &[u8] = &(state[0usize]).buf; + let total_len0: u64 = (state[0usize]).total_len; + let mut buf: Box<[u8]> = vec![0u8; 64usize].into_boxed_slice(); + ((&mut buf)[0usize..64usize]).copy_from_slice(&buf0[0usize..64usize]); + let mut block_state: Box<[u32]> = vec![0u32; 8usize].into_boxed_slice(); + ((&mut block_state)[0usize..8usize]).copy_from_slice(&block_state0[0usize..8usize]); + let s: crate::hacl_rs::streaming_types::state_32 = crate::hacl_rs::streaming_types::state_32 { + block_state, + buf, + total_len: total_len0, + }; + let p: Box<[crate::hacl_rs::streaming_types::state_32]> = vec![s].into_boxed_slice(); + p +} + +/** +Reset an existing state to the initial hash state with empty data. +*/ +pub fn reset_256(state: &mut [crate::hacl_rs::streaming_types::state_32]) { + let block_state: &mut [u32] = &mut (state[0usize]).block_state; + crate::hacl_rs::hash_sha2::sha256_init(block_state); + let total_len: u64 = 0u32 as u64; + (state[0usize]).total_len = total_len +} + +#[inline] +fn update_224_256( + state: &mut [crate::hacl_rs::streaming_types::state_32], + chunk: &[u8], + chunk_len: u32, +) -> crate::hacl_rs::streaming_types::error_code { + let block_state: &mut [u32] = &mut (state[0usize]).block_state; + let total_len: u64 = (state[0usize]).total_len; + if chunk_len as u64 > 2305843009213693951u64.wrapping_sub(total_len) { + crate::hacl_rs::streaming_types::error_code::MaximumLengthExceeded + } else { + let sz: u32 = if total_len.wrapping_rem(64u32 as u64) == 0u64 && total_len > 0u64 { + 64u32 + } else { + total_len.wrapping_rem(64u32 as u64) as u32 + }; + if chunk_len <= 64u32.wrapping_sub(sz) { + let buf: &mut [u8] = &mut (state[0usize]).buf; + let total_len1: u64 = (state[0usize]).total_len; + let sz1: u32 = if total_len1.wrapping_rem(64u32 as u64) == 0u64 && total_len1 > 0u64 { + 64u32 + } else { + total_len1.wrapping_rem(64u32 as u64) as u32 + }; + let buf2: (&mut [u8], &mut [u8]) = buf.split_at_mut(sz1 as usize); + (buf2.1[0usize..chunk_len as usize]) + .copy_from_slice(&chunk[0usize..chunk_len as usize]); + let total_len2: u64 = total_len1.wrapping_add(chunk_len as u64); + (state[0usize]).total_len = total_len2 + } else if sz == 0u32 { + let buf: &mut [u8] = &mut (state[0usize]).buf; + let total_len1: u64 = (state[0usize]).total_len; + let sz1: u32 = if total_len1.wrapping_rem(64u32 as u64) == 0u64 && total_len1 > 0u64 { + 64u32 + } else { + total_len1.wrapping_rem(64u32 as u64) as u32 + }; + if sz1 != 0u32 { + crate::hacl_rs::hash_sha2::sha256_update_nblocks(64u32, buf, block_state) + }; + let ite: u32 = if (chunk_len as u64).wrapping_rem(64u32 as u64) == 0u64 + && chunk_len as u64 > 0u64 + { + 64u32 + } else { + (chunk_len as u64).wrapping_rem(64u32 as u64) as u32 + }; + let n_blocks: u32 = chunk_len.wrapping_sub(ite).wrapping_div(64u32); + let data1_len: u32 = n_blocks.wrapping_mul(64u32); + let data2_len: u32 = chunk_len.wrapping_sub(data1_len); + let data1: (&[u8], &[u8]) = chunk.split_at(0usize); + let data2: (&[u8], &[u8]) = data1.1.split_at(data1_len as usize); + crate::hacl_rs::hash_sha2::sha256_update_nblocks( + data1_len.wrapping_div(64u32).wrapping_mul(64u32), + data2.0, + block_state, + ); + let dst: (&mut [u8], &mut [u8]) = buf.split_at_mut(0usize); + (dst.1[0usize..data2_len as usize]) + .copy_from_slice(&data2.1[0usize..data2_len as usize]); + (state[0usize]).total_len = total_len1.wrapping_add(chunk_len as u64) + } else { + let diff: u32 = 64u32.wrapping_sub(sz); + let chunk1: (&[u8], &[u8]) = chunk.split_at(0usize); + let chunk2: (&[u8], &[u8]) = chunk1.1.split_at(diff as usize); + let buf: &mut [u8] = &mut (state[0usize]).buf; + let total_len1: u64 = (state[0usize]).total_len; + let sz1: u32 = if total_len1.wrapping_rem(64u32 as u64) == 0u64 && total_len1 > 0u64 { + 64u32 + } else { + total_len1.wrapping_rem(64u32 as u64) as u32 + }; + let buf2: (&mut [u8], &mut [u8]) = buf.split_at_mut(sz1 as usize); + (buf2.1[0usize..diff as usize]).copy_from_slice(&chunk2.0[0usize..diff as usize]); + let total_len2: u64 = total_len1.wrapping_add(diff as u64); + (state[0usize]).total_len = total_len2; + let buf0: &mut [u8] = &mut (state[0usize]).buf; + let total_len10: u64 = (state[0usize]).total_len; + let sz10: u32 = if total_len10.wrapping_rem(64u32 as u64) == 0u64 && total_len10 > 0u64 + { + 64u32 + } else { + total_len10.wrapping_rem(64u32 as u64) as u32 + }; + if sz10 != 0u32 { + crate::hacl_rs::hash_sha2::sha256_update_nblocks(64u32, buf0, block_state) + }; + let ite: u32 = if (chunk_len.wrapping_sub(diff) as u64).wrapping_rem(64u32 as u64) + == 0u64 + && chunk_len.wrapping_sub(diff) as u64 > 0u64 + { + 64u32 + } else { + (chunk_len.wrapping_sub(diff) as u64).wrapping_rem(64u32 as u64) as u32 + }; + let n_blocks: u32 = chunk_len + .wrapping_sub(diff) + .wrapping_sub(ite) + .wrapping_div(64u32); + let data1_len: u32 = n_blocks.wrapping_mul(64u32); + let data2_len: u32 = chunk_len.wrapping_sub(diff).wrapping_sub(data1_len); + let data1: (&[u8], &[u8]) = chunk2.1.split_at(0usize); + let data2: (&[u8], &[u8]) = data1.1.split_at(data1_len as usize); + crate::hacl_rs::hash_sha2::sha256_update_nblocks( + data1_len.wrapping_div(64u32).wrapping_mul(64u32), + data2.0, + block_state, + ); + let dst: (&mut [u8], &mut [u8]) = buf0.split_at_mut(0usize); + (dst.1[0usize..data2_len as usize]) + .copy_from_slice(&data2.1[0usize..data2_len as usize]); + (state[0usize]).total_len = + total_len10.wrapping_add(chunk_len.wrapping_sub(diff) as u64) + }; + crate::hacl_rs::streaming_types::error_code::Success + } +} + +/** +Feed an arbitrary amount of data into the hash. This function returns 0 for +success, or 1 if the combined length of all of the data passed to `update_256` +(since the last call to `reset_256`) exceeds 2^61-1 bytes. + +This function is identical to the update function for SHA2_224. +*/ +pub fn update_256( + state: &mut [crate::hacl_rs::streaming_types::state_32], + input: &[u8], + input_len: u32, +) -> crate::hacl_rs::streaming_types::error_code { + crate::hacl_rs::hash_sha2::update_224_256(state, input, input_len) +} + +/** +Write the resulting hash into `output`, an array of 32 bytes. The state remains +valid after a call to `digest_256`, meaning the user may feed more data into +the hash via `update_256`. (The digest_256 function operates on an internal copy of +the state and therefore does not invalidate the client-held state `p`.) +*/ +pub fn digest_256(state: &[crate::hacl_rs::streaming_types::state_32], output: &mut [u8]) { + let block_state: &[u32] = &(state[0usize]).block_state; + let buf_: &[u8] = &(state[0usize]).buf; + let total_len: u64 = (state[0usize]).total_len; + let r: u32 = if total_len.wrapping_rem(64u32 as u64) == 0u64 && total_len > 0u64 { + 64u32 + } else { + total_len.wrapping_rem(64u32 as u64) as u32 + }; + let buf_1: (&[u8], &[u8]) = buf_.split_at(0usize); + let mut tmp_block_state: [u32; 8] = [0u32; 8usize]; + ((&mut tmp_block_state)[0usize..8usize]).copy_from_slice(&block_state[0usize..8usize]); + let buf_multi: (&[u8], &[u8]) = buf_1.1.split_at(0usize); + let ite: u32 = if r.wrapping_rem(64u32) == 0u32 && r > 0u32 { + 64u32 + } else { + r.wrapping_rem(64u32) + }; + let buf_last: (&[u8], &[u8]) = buf_multi.1.split_at(r.wrapping_sub(ite) as usize); + crate::hacl_rs::hash_sha2::sha256_update_nblocks(0u32, buf_last.0, &mut tmp_block_state); + let prev_len_last: u64 = total_len.wrapping_sub(r as u64); + crate::hacl_rs::hash_sha2::sha256_update_last( + prev_len_last.wrapping_add(r as u64), + r, + buf_last.1, + &mut tmp_block_state, + ); + crate::hacl_rs::hash_sha2::sha256_finish(&tmp_block_state, output) +} + +/** +Hash `input`, of len `input_len`, into `output`, an array of 32 bytes. +*/ +pub fn hash_256(output: &mut [u8], input: &[u8], input_len: u32) { + let ib: &[u8] = input; + let rb: &mut [u8] = output; + let mut st: [u32; 8] = [0u32; 8usize]; + crate::hacl_rs::hash_sha2::sha256_init(&mut st); + let rem: u32 = input_len.wrapping_rem(64u32); + let lenĀ·: u64 = input_len as u64; + crate::hacl_rs::hash_sha2::sha256_update_nblocks(input_len, ib, &mut st); + let rem1: u32 = input_len.wrapping_rem(64u32); + let b0: &[u8] = ib; + let lb: (&[u8], &[u8]) = b0.split_at(input_len.wrapping_sub(rem1) as usize); + crate::hacl_rs::hash_sha2::sha256_update_last(lenĀ·, rem, lb.1, &mut st); + crate::hacl_rs::hash_sha2::sha256_finish(&st, rb) +} + +pub fn malloc_224() -> Box<[crate::hacl_rs::streaming_types::state_32]> { + let buf: Box<[u8]> = vec![0u8; 64usize].into_boxed_slice(); + let mut block_state: Box<[u32]> = vec![0u32; 8usize].into_boxed_slice(); + crate::hacl_rs::hash_sha2::sha224_init(&mut block_state); + let s: crate::hacl_rs::streaming_types::state_32 = crate::hacl_rs::streaming_types::state_32 { + block_state, + buf, + total_len: 0u32 as u64, + }; + let p: Box<[crate::hacl_rs::streaming_types::state_32]> = vec![s].into_boxed_slice(); + p +} + +pub fn reset_224(state: &mut [crate::hacl_rs::streaming_types::state_32]) { + let block_state: &mut [u32] = &mut (state[0usize]).block_state; + crate::hacl_rs::hash_sha2::sha224_init(block_state); + let total_len: u64 = 0u32 as u64; + (state[0usize]).total_len = total_len +} + +pub fn update_224( + state: &mut [crate::hacl_rs::streaming_types::state_32], + input: &[u8], + input_len: u32, +) -> crate::hacl_rs::streaming_types::error_code { + crate::hacl_rs::hash_sha2::update_224_256(state, input, input_len) +} + +/** +Write the resulting hash into `output`, an array of 28 bytes. The state remains +valid after a call to `digest_224`, meaning the user may feed more data into +the hash via `update_224`. +*/ +pub fn digest_224(state: &[crate::hacl_rs::streaming_types::state_32], output: &mut [u8]) { + let block_state: &[u32] = &(state[0usize]).block_state; + let buf_: &[u8] = &(state[0usize]).buf; + let total_len: u64 = (state[0usize]).total_len; + let r: u32 = if total_len.wrapping_rem(64u32 as u64) == 0u64 && total_len > 0u64 { + 64u32 + } else { + total_len.wrapping_rem(64u32 as u64) as u32 + }; + let buf_1: (&[u8], &[u8]) = buf_.split_at(0usize); + let mut tmp_block_state: [u32; 8] = [0u32; 8usize]; + ((&mut tmp_block_state)[0usize..8usize]).copy_from_slice(&block_state[0usize..8usize]); + let buf_multi: (&[u8], &[u8]) = buf_1.1.split_at(0usize); + let ite: u32 = if r.wrapping_rem(64u32) == 0u32 && r > 0u32 { + 64u32 + } else { + r.wrapping_rem(64u32) + }; + let buf_last: (&[u8], &[u8]) = buf_multi.1.split_at(r.wrapping_sub(ite) as usize); + crate::hacl_rs::hash_sha2::sha224_update_nblocks(0u32, buf_last.0, &mut tmp_block_state); + let prev_len_last: u64 = total_len.wrapping_sub(r as u64); + crate::hacl_rs::hash_sha2::sha224_update_last( + prev_len_last.wrapping_add(r as u64), + r, + buf_last.1, + &mut tmp_block_state, + ); + crate::hacl_rs::hash_sha2::sha224_finish(&tmp_block_state, output) +} + +/** +Hash `input`, of len `input_len`, into `output`, an array of 28 bytes. +*/ +pub fn hash_224(output: &mut [u8], input: &[u8], input_len: u32) { + let ib: &[u8] = input; + let rb: &mut [u8] = output; + let mut st: [u32; 8] = [0u32; 8usize]; + crate::hacl_rs::hash_sha2::sha224_init(&mut st); + let rem: u32 = input_len.wrapping_rem(64u32); + let lenĀ·: u64 = input_len as u64; + crate::hacl_rs::hash_sha2::sha224_update_nblocks(input_len, ib, &mut st); + let rem1: u32 = input_len.wrapping_rem(64u32); + let b0: &[u8] = ib; + let lb: (&[u8], &[u8]) = b0.split_at(input_len.wrapping_sub(rem1) as usize); + crate::hacl_rs::hash_sha2::sha224_update_last(lenĀ·, rem, lb.1, &mut st); + crate::hacl_rs::hash_sha2::sha224_finish(&st, rb) +} + +pub fn malloc_512() -> Box<[crate::hacl_rs::streaming_types::state_64]> { + let buf: Box<[u8]> = vec![0u8; 128usize].into_boxed_slice(); + let mut block_state: Box<[u64]> = vec![0u64; 8usize].into_boxed_slice(); + crate::hacl_rs::hash_sha2::sha512_init(&mut block_state); + let s: crate::hacl_rs::streaming_types::state_64 = crate::hacl_rs::streaming_types::state_64 { + block_state, + buf, + total_len: 0u32 as u64, + }; + let p: Box<[crate::hacl_rs::streaming_types::state_64]> = vec![s].into_boxed_slice(); + p +} + +/** +Copies the state passed as argument into a newly allocated state (deep copy). +The state is to be freed by calling `free_512`. Cloning the state this way is +useful, for instance, if your control-flow diverges and you need to feed +more (different) data into the hash in each branch. +*/ +pub fn copy_512( + state: &[crate::hacl_rs::streaming_types::state_64], +) -> Box<[crate::hacl_rs::streaming_types::state_64]> { + let block_state0: &[u64] = &(state[0usize]).block_state; + let buf0: &[u8] = &(state[0usize]).buf; + let total_len0: u64 = (state[0usize]).total_len; + let mut buf: Box<[u8]> = vec![0u8; 128usize].into_boxed_slice(); + ((&mut buf)[0usize..128usize]).copy_from_slice(&buf0[0usize..128usize]); + let mut block_state: Box<[u64]> = vec![0u64; 8usize].into_boxed_slice(); + ((&mut block_state)[0usize..8usize]).copy_from_slice(&block_state0[0usize..8usize]); + let s: crate::hacl_rs::streaming_types::state_64 = crate::hacl_rs::streaming_types::state_64 { + block_state, + buf, + total_len: total_len0, + }; + let p: Box<[crate::hacl_rs::streaming_types::state_64]> = vec![s].into_boxed_slice(); + p +} + +pub fn reset_512(state: &mut [crate::hacl_rs::streaming_types::state_64]) { + let block_state: &mut [u64] = &mut (state[0usize]).block_state; + crate::hacl_rs::hash_sha2::sha512_init(block_state); + let total_len: u64 = 0u32 as u64; + (state[0usize]).total_len = total_len +} + +#[inline] +fn update_384_512( + state: &mut [crate::hacl_rs::streaming_types::state_64], + chunk: &[u8], + chunk_len: u32, +) -> crate::hacl_rs::streaming_types::error_code { + let block_state: &mut [u64] = &mut (state[0usize]).block_state; + let total_len: u64 = (state[0usize]).total_len; + if chunk_len as u64 > 18446744073709551615u64.wrapping_sub(total_len) { + crate::hacl_rs::streaming_types::error_code::MaximumLengthExceeded + } else { + let sz: u32 = if total_len.wrapping_rem(128u32 as u64) == 0u64 && total_len > 0u64 { + 128u32 + } else { + total_len.wrapping_rem(128u32 as u64) as u32 + }; + if chunk_len <= 128u32.wrapping_sub(sz) { + let buf: &mut [u8] = &mut (state[0usize]).buf; + let total_len1: u64 = (state[0usize]).total_len; + let sz1: u32 = if total_len1.wrapping_rem(128u32 as u64) == 0u64 && total_len1 > 0u64 { + 128u32 + } else { + total_len1.wrapping_rem(128u32 as u64) as u32 + }; + let buf2: (&mut [u8], &mut [u8]) = buf.split_at_mut(sz1 as usize); + (buf2.1[0usize..chunk_len as usize]) + .copy_from_slice(&chunk[0usize..chunk_len as usize]); + let total_len2: u64 = total_len1.wrapping_add(chunk_len as u64); + (state[0usize]).total_len = total_len2 + } else if sz == 0u32 { + let buf: &mut [u8] = &mut (state[0usize]).buf; + let total_len1: u64 = (state[0usize]).total_len; + let sz1: u32 = if total_len1.wrapping_rem(128u32 as u64) == 0u64 && total_len1 > 0u64 { + 128u32 + } else { + total_len1.wrapping_rem(128u32 as u64) as u32 + }; + if sz1 != 0u32 { + crate::hacl_rs::hash_sha2::sha512_update_nblocks(128u32, buf, block_state) + }; + let ite: u32 = if (chunk_len as u64).wrapping_rem(128u32 as u64) == 0u64 + && chunk_len as u64 > 0u64 + { + 128u32 + } else { + (chunk_len as u64).wrapping_rem(128u32 as u64) as u32 + }; + let n_blocks: u32 = chunk_len.wrapping_sub(ite).wrapping_div(128u32); + let data1_len: u32 = n_blocks.wrapping_mul(128u32); + let data2_len: u32 = chunk_len.wrapping_sub(data1_len); + let data1: (&[u8], &[u8]) = chunk.split_at(0usize); + let data2: (&[u8], &[u8]) = data1.1.split_at(data1_len as usize); + crate::hacl_rs::hash_sha2::sha512_update_nblocks( + data1_len.wrapping_div(128u32).wrapping_mul(128u32), + data2.0, + block_state, + ); + let dst: (&mut [u8], &mut [u8]) = buf.split_at_mut(0usize); + (dst.1[0usize..data2_len as usize]) + .copy_from_slice(&data2.1[0usize..data2_len as usize]); + (state[0usize]).total_len = total_len1.wrapping_add(chunk_len as u64) + } else { + let diff: u32 = 128u32.wrapping_sub(sz); + let chunk1: (&[u8], &[u8]) = chunk.split_at(0usize); + let chunk2: (&[u8], &[u8]) = chunk1.1.split_at(diff as usize); + let buf: &mut [u8] = &mut (state[0usize]).buf; + let total_len1: u64 = (state[0usize]).total_len; + let sz1: u32 = if total_len1.wrapping_rem(128u32 as u64) == 0u64 && total_len1 > 0u64 { + 128u32 + } else { + total_len1.wrapping_rem(128u32 as u64) as u32 + }; + let buf2: (&mut [u8], &mut [u8]) = buf.split_at_mut(sz1 as usize); + (buf2.1[0usize..diff as usize]).copy_from_slice(&chunk2.0[0usize..diff as usize]); + let total_len2: u64 = total_len1.wrapping_add(diff as u64); + (state[0usize]).total_len = total_len2; + let buf0: &mut [u8] = &mut (state[0usize]).buf; + let total_len10: u64 = (state[0usize]).total_len; + let sz10: u32 = if total_len10.wrapping_rem(128u32 as u64) == 0u64 && total_len10 > 0u64 + { + 128u32 + } else { + total_len10.wrapping_rem(128u32 as u64) as u32 + }; + if sz10 != 0u32 { + crate::hacl_rs::hash_sha2::sha512_update_nblocks(128u32, buf0, block_state) + }; + let ite: u32 = if (chunk_len.wrapping_sub(diff) as u64).wrapping_rem(128u32 as u64) + == 0u64 + && chunk_len.wrapping_sub(diff) as u64 > 0u64 + { + 128u32 + } else { + (chunk_len.wrapping_sub(diff) as u64).wrapping_rem(128u32 as u64) as u32 + }; + let n_blocks: u32 = chunk_len + .wrapping_sub(diff) + .wrapping_sub(ite) + .wrapping_div(128u32); + let data1_len: u32 = n_blocks.wrapping_mul(128u32); + let data2_len: u32 = chunk_len.wrapping_sub(diff).wrapping_sub(data1_len); + let data1: (&[u8], &[u8]) = chunk2.1.split_at(0usize); + let data2: (&[u8], &[u8]) = data1.1.split_at(data1_len as usize); + crate::hacl_rs::hash_sha2::sha512_update_nblocks( + data1_len.wrapping_div(128u32).wrapping_mul(128u32), + data2.0, + block_state, + ); + let dst: (&mut [u8], &mut [u8]) = buf0.split_at_mut(0usize); + (dst.1[0usize..data2_len as usize]) + .copy_from_slice(&data2.1[0usize..data2_len as usize]); + (state[0usize]).total_len = + total_len10.wrapping_add(chunk_len.wrapping_sub(diff) as u64) + }; + crate::hacl_rs::streaming_types::error_code::Success + } +} + +/** +Feed an arbitrary amount of data into the hash. This function returns 0 for +success, or 1 if the combined length of all of the data passed to `update_512` +(since the last call to `reset_512`) exceeds 2^125-1 bytes. + +This function is identical to the update function for SHA2_384. +*/ +pub fn update_512( + state: &mut [crate::hacl_rs::streaming_types::state_64], + input: &[u8], + input_len: u32, +) -> crate::hacl_rs::streaming_types::error_code { + crate::hacl_rs::hash_sha2::update_384_512(state, input, input_len) +} + +/** +Write the resulting hash into `output`, an array of 64 bytes. The state remains +valid after a call to `digest_512`, meaning the user may feed more data into +the hash via `update_512`. (The digest_512 function operates on an internal copy of +the state and therefore does not invalidate the client-held state `p`.) +*/ +pub fn digest_512(state: &[crate::hacl_rs::streaming_types::state_64], output: &mut [u8]) { + let block_state: &[u64] = &(state[0usize]).block_state; + let buf_: &[u8] = &(state[0usize]).buf; + let total_len: u64 = (state[0usize]).total_len; + let r: u32 = if total_len.wrapping_rem(128u32 as u64) == 0u64 && total_len > 0u64 { + 128u32 + } else { + total_len.wrapping_rem(128u32 as u64) as u32 + }; + let buf_1: (&[u8], &[u8]) = buf_.split_at(0usize); + let mut tmp_block_state: [u64; 8] = [0u64; 8usize]; + ((&mut tmp_block_state)[0usize..8usize]).copy_from_slice(&block_state[0usize..8usize]); + let buf_multi: (&[u8], &[u8]) = buf_1.1.split_at(0usize); + let ite: u32 = if r.wrapping_rem(128u32) == 0u32 && r > 0u32 { + 128u32 + } else { + r.wrapping_rem(128u32) + }; + let buf_last: (&[u8], &[u8]) = buf_multi.1.split_at(r.wrapping_sub(ite) as usize); + crate::hacl_rs::hash_sha2::sha512_update_nblocks(0u32, buf_last.0, &mut tmp_block_state); + let prev_len_last: u64 = total_len.wrapping_sub(r as u64); + crate::hacl_rs::hash_sha2::sha512_update_last( + fstar::uint128::add( + fstar::uint128::uint64_to_uint128(prev_len_last), + fstar::uint128::uint64_to_uint128(r as u64), + ), + r, + buf_last.1, + &mut tmp_block_state, + ); + crate::hacl_rs::hash_sha2::sha512_finish(&tmp_block_state, output) +} + +/** +Hash `input`, of len `input_len`, into `output`, an array of 64 bytes. +*/ +pub fn hash_512(output: &mut [u8], input: &[u8], input_len: u32) { + let ib: &[u8] = input; + let rb: &mut [u8] = output; + let mut st: [u64; 8] = [0u64; 8usize]; + crate::hacl_rs::hash_sha2::sha512_init(&mut st); + let rem: u32 = input_len.wrapping_rem(128u32); + let lenĀ·: fstar::uint128::uint128 = fstar::uint128::uint64_to_uint128(input_len as u64); + crate::hacl_rs::hash_sha2::sha512_update_nblocks(input_len, ib, &mut st); + let rem1: u32 = input_len.wrapping_rem(128u32); + let b0: &[u8] = ib; + let lb: (&[u8], &[u8]) = b0.split_at(input_len.wrapping_sub(rem1) as usize); + crate::hacl_rs::hash_sha2::sha512_update_last(lenĀ·, rem, lb.1, &mut st); + crate::hacl_rs::hash_sha2::sha512_finish(&st, rb) +} + +pub fn malloc_384() -> Box<[crate::hacl_rs::streaming_types::state_64]> { + let buf: Box<[u8]> = vec![0u8; 128usize].into_boxed_slice(); + let mut block_state: Box<[u64]> = vec![0u64; 8usize].into_boxed_slice(); + crate::hacl_rs::hash_sha2::sha384_init(&mut block_state); + let s: crate::hacl_rs::streaming_types::state_64 = crate::hacl_rs::streaming_types::state_64 { + block_state, + buf, + total_len: 0u32 as u64, + }; + let p: Box<[crate::hacl_rs::streaming_types::state_64]> = vec![s].into_boxed_slice(); + p +} + +pub fn reset_384(state: &mut [crate::hacl_rs::streaming_types::state_64]) { + let block_state: &mut [u64] = &mut (state[0usize]).block_state; + crate::hacl_rs::hash_sha2::sha384_init(block_state); + let total_len: u64 = 0u32 as u64; + (state[0usize]).total_len = total_len +} + +pub fn update_384( + state: &mut [crate::hacl_rs::streaming_types::state_64], + input: &[u8], + input_len: u32, +) -> crate::hacl_rs::streaming_types::error_code { + crate::hacl_rs::hash_sha2::update_384_512(state, input, input_len) +} + +/** +Write the resulting hash into `output`, an array of 48 bytes. The state remains +valid after a call to `digest_384`, meaning the user may feed more data into +the hash via `update_384`. +*/ +pub fn digest_384(state: &[crate::hacl_rs::streaming_types::state_64], output: &mut [u8]) { + let block_state: &[u64] = &(state[0usize]).block_state; + let buf_: &[u8] = &(state[0usize]).buf; + let total_len: u64 = (state[0usize]).total_len; + let r: u32 = if total_len.wrapping_rem(128u32 as u64) == 0u64 && total_len > 0u64 { + 128u32 + } else { + total_len.wrapping_rem(128u32 as u64) as u32 + }; + let buf_1: (&[u8], &[u8]) = buf_.split_at(0usize); + let mut tmp_block_state: [u64; 8] = [0u64; 8usize]; + ((&mut tmp_block_state)[0usize..8usize]).copy_from_slice(&block_state[0usize..8usize]); + let buf_multi: (&[u8], &[u8]) = buf_1.1.split_at(0usize); + let ite: u32 = if r.wrapping_rem(128u32) == 0u32 && r > 0u32 { + 128u32 + } else { + r.wrapping_rem(128u32) + }; + let buf_last: (&[u8], &[u8]) = buf_multi.1.split_at(r.wrapping_sub(ite) as usize); + crate::hacl_rs::hash_sha2::sha384_update_nblocks(0u32, buf_last.0, &mut tmp_block_state); + let prev_len_last: u64 = total_len.wrapping_sub(r as u64); + crate::hacl_rs::hash_sha2::sha384_update_last( + fstar::uint128::add( + fstar::uint128::uint64_to_uint128(prev_len_last), + fstar::uint128::uint64_to_uint128(r as u64), + ), + r, + buf_last.1, + &mut tmp_block_state, + ); + crate::hacl_rs::hash_sha2::sha384_finish(&tmp_block_state, output) +} + +/** +Hash `input`, of len `input_len`, into `output`, an array of 48 bytes. +*/ +pub fn hash_384(output: &mut [u8], input: &[u8], input_len: u32) { + let ib: &[u8] = input; + let rb: &mut [u8] = output; + let mut st: [u64; 8] = [0u64; 8usize]; + crate::hacl_rs::hash_sha2::sha384_init(&mut st); + let rem: u32 = input_len.wrapping_rem(128u32); + let lenĀ·: fstar::uint128::uint128 = fstar::uint128::uint64_to_uint128(input_len as u64); + crate::hacl_rs::hash_sha2::sha384_update_nblocks(input_len, ib, &mut st); + let rem1: u32 = input_len.wrapping_rem(128u32); + let b0: &[u8] = ib; + let lb: (&[u8], &[u8]) = b0.split_at(input_len.wrapping_sub(rem1) as usize); + crate::hacl_rs::hash_sha2::sha384_update_last(lenĀ·, rem, lb.1, &mut st); + crate::hacl_rs::hash_sha2::sha384_finish(&st, rb) +} + +// END GENERATED CODE + +macro_rules! impl_hash { + ($name:ident, $digest_size:literal, $state:ty, $malloc:expr, $reset:expr, $update:expr, $finish:expr, $copy:expr, $hash:expr) => { + pub struct $name { + state: $state, + } + + impl $name { + /// Initialize a new digest state for streaming use. + pub fn new() -> $name { + $name { state: $malloc() } + } + + /// Add the `payload` to the digest. + pub fn update(&mut self, payload: &[u8]) { + $update(self.state.as_mut(), payload, payload.len() as u32); + } + + /// Get the digest. + /// + /// Note that the digest state can be continued to be used, to extend the + /// digest. + pub fn finish(&self, digest: &mut [u8]) { + $finish(self.state.as_ref(), digest); + } + + /// Return the digest for the given input byte slice, in immediate mode. + pub fn hash(digest: &mut [u8], input: &[u8]) { + $hash(digest, input, input.len() as u32) + } + } + + impl Clone for $name { + fn clone(&self) -> Self { + Self { + state: $copy(self.state.as_ref()), + } + } + } + }; +} + +impl_hash!( + HaclRs_Sha2_Sha256, + 32, + Box<[crate::hacl_rs::streaming_types::state_32]>, + malloc_256, + wat, + update_256, + digest_256, + copy_256, + hash_256 +); diff --git a/src/hacl_rs/hkdf.rs b/src/hacl_rs/hkdf.rs new file mode 100644 index 000000000..9b51de11b --- /dev/null +++ b/src/hacl_rs/hkdf.rs @@ -0,0 +1,452 @@ +#![allow(non_snake_case)] +#![allow(non_upper_case_globals)] +#![allow(non_camel_case_types)] +#![allow(unused_assignments)] +#![allow(unreachable_patterns)] + +/** +Expand pseudorandom key to desired length. + +@param okm Pointer to `len` bytes of memory where output keying material is written to. +@param prk Pointer to at least `HashLen` bytes of memory where pseudorandom key is read from. Usually, this points to the output from the extract step. +@param prklen Length of pseudorandom key. +@param info Pointer to `infolen` bytes of memory where context and application specific information is read from. Can be a zero-length string. +@param infolen Length of context and application specific information. +@param len Length of output keying material. +*/ +pub fn expand_sha2_256( + okm: &mut [u8], + prk: &[u8], + prklen: u32, + info: &[u8], + infolen: u32, + len: u32, +) { + let tlen: u32 = 32u32; + let n: u32 = len.wrapping_div(tlen); + let output: (&mut [u8], &mut [u8]) = okm.split_at_mut(0usize); + let mut text: Box<[u8]> = + vec![0u8; tlen.wrapping_add(infolen).wrapping_add(1u32) as usize].into_boxed_slice(); + let text0: (&mut [u8], &mut [u8]) = text.split_at_mut(tlen as usize); + let tag: (&mut [u8], &mut [u8]) = text0.1.split_at_mut(0usize - tlen as usize); + let ctr: (&mut [u8], &mut [u8]) = tag.1.split_at_mut(tlen.wrapping_add(infolen) as usize); + ((&mut (&mut text)[tlen as usize..])[0usize..infolen as usize]) + .copy_from_slice(&info[0usize..infolen as usize]); + for i in 0u32..n { + ctr.1[0usize] = i.wrapping_add(1u32) as u8; + if i == 0u32 { + crate::hacl_rs::hmac::compute_sha2_256( + ctr.0, + prk, + prklen, + tag.0, + infolen.wrapping_add(1u32), + ) + } else { + crate::hacl_rs::hmac::compute_sha2_256( + ctr.0, + prk, + prklen, + &text, + tlen.wrapping_add(infolen).wrapping_add(1u32), + ) + }; + ((&mut output.1[i.wrapping_mul(tlen) as usize..])[0usize..tlen as usize]) + .copy_from_slice(&ctr.0[0usize..tlen as usize]) + } + if n.wrapping_mul(tlen) < len { + ctr.1[0usize] = n.wrapping_add(1u32) as u8; + if n == 0u32 { + crate::hacl_rs::hmac::compute_sha2_256( + ctr.0, + prk, + prklen, + tag.0, + infolen.wrapping_add(1u32), + ) + } else { + crate::hacl_rs::hmac::compute_sha2_256( + ctr.0, + prk, + prklen, + &text, + tlen.wrapping_add(infolen).wrapping_add(1u32), + ) + }; + let block: (&mut [u8], &mut [u8]) = output.1.split_at_mut(n.wrapping_mul(tlen) as usize); + (block.1[0usize..len.wrapping_sub(n.wrapping_mul(tlen)) as usize]).copy_from_slice( + &(&ctr.0[0usize..])[0usize..len.wrapping_sub(n.wrapping_mul(tlen)) as usize], + ) + } +} + +/** +Extract a fixed-length pseudorandom key from input keying material. + +@param prk Pointer to `HashLen` bytes of memory where pseudorandom key is written to. +@param salt Pointer to `saltlen` bytes of memory where salt value is read from. +@param saltlen Length of salt value. +@param ikm Pointer to `ikmlen` bytes of memory where input keying material is read from. +@param ikmlen Length of input keying material. +*/ +pub fn extract_sha2_256(prk: &mut [u8], salt: &[u8], saltlen: u32, ikm: &[u8], ikmlen: u32) { + crate::hacl_rs::hmac::compute_sha2_256(prk, salt, saltlen, ikm, ikmlen) +} + +/** +Expand pseudorandom key to desired length. + +@param okm Pointer to `len` bytes of memory where output keying material is written to. +@param prk Pointer to at least `HashLen` bytes of memory where pseudorandom key is read from. Usually, this points to the output from the extract step. +@param prklen Length of pseudorandom key. +@param info Pointer to `infolen` bytes of memory where context and application specific information is read from. Can be a zero-length string. +@param infolen Length of context and application specific information. +@param len Length of output keying material. +*/ +pub fn expand_sha2_384( + okm: &mut [u8], + prk: &[u8], + prklen: u32, + info: &[u8], + infolen: u32, + len: u32, +) { + let tlen: u32 = 48u32; + let n: u32 = len.wrapping_div(tlen); + let output: (&mut [u8], &mut [u8]) = okm.split_at_mut(0usize); + let mut text: Box<[u8]> = + vec![0u8; tlen.wrapping_add(infolen).wrapping_add(1u32) as usize].into_boxed_slice(); + let text0: (&mut [u8], &mut [u8]) = text.split_at_mut(tlen as usize); + let tag: (&mut [u8], &mut [u8]) = text0.1.split_at_mut(0usize - tlen as usize); + let ctr: (&mut [u8], &mut [u8]) = tag.1.split_at_mut(tlen.wrapping_add(infolen) as usize); + ((&mut (&mut text)[tlen as usize..])[0usize..infolen as usize]) + .copy_from_slice(&info[0usize..infolen as usize]); + for i in 0u32..n { + ctr.1[0usize] = i.wrapping_add(1u32) as u8; + if i == 0u32 { + crate::hacl_rs::hmac::compute_sha2_384( + ctr.0, + prk, + prklen, + tag.0, + infolen.wrapping_add(1u32), + ) + } else { + crate::hacl_rs::hmac::compute_sha2_384( + ctr.0, + prk, + prklen, + &text, + tlen.wrapping_add(infolen).wrapping_add(1u32), + ) + }; + ((&mut output.1[i.wrapping_mul(tlen) as usize..])[0usize..tlen as usize]) + .copy_from_slice(&ctr.0[0usize..tlen as usize]) + } + if n.wrapping_mul(tlen) < len { + ctr.1[0usize] = n.wrapping_add(1u32) as u8; + if n == 0u32 { + crate::hacl_rs::hmac::compute_sha2_384( + ctr.0, + prk, + prklen, + tag.0, + infolen.wrapping_add(1u32), + ) + } else { + crate::hacl_rs::hmac::compute_sha2_384( + ctr.0, + prk, + prklen, + &text, + tlen.wrapping_add(infolen).wrapping_add(1u32), + ) + }; + let block: (&mut [u8], &mut [u8]) = output.1.split_at_mut(n.wrapping_mul(tlen) as usize); + (block.1[0usize..len.wrapping_sub(n.wrapping_mul(tlen)) as usize]).copy_from_slice( + &(&ctr.0[0usize..])[0usize..len.wrapping_sub(n.wrapping_mul(tlen)) as usize], + ) + } +} + +/** +Extract a fixed-length pseudorandom key from input keying material. + +@param prk Pointer to `HashLen` bytes of memory where pseudorandom key is written to. +@param salt Pointer to `saltlen` bytes of memory where salt value is read from. +@param saltlen Length of salt value. +@param ikm Pointer to `ikmlen` bytes of memory where input keying material is read from. +@param ikmlen Length of input keying material. +*/ +pub fn extract_sha2_384(prk: &mut [u8], salt: &[u8], saltlen: u32, ikm: &[u8], ikmlen: u32) { + crate::hacl_rs::hmac::compute_sha2_384(prk, salt, saltlen, ikm, ikmlen) +} + +/** +Expand pseudorandom key to desired length. + +@param okm Pointer to `len` bytes of memory where output keying material is written to. +@param prk Pointer to at least `HashLen` bytes of memory where pseudorandom key is read from. Usually, this points to the output from the extract step. +@param prklen Length of pseudorandom key. +@param info Pointer to `infolen` bytes of memory where context and application specific information is read from. Can be a zero-length string. +@param infolen Length of context and application specific information. +@param len Length of output keying material. +*/ +pub fn expand_sha2_512( + okm: &mut [u8], + prk: &[u8], + prklen: u32, + info: &[u8], + infolen: u32, + len: u32, +) { + let tlen: u32 = 64u32; + let n: u32 = len.wrapping_div(tlen); + let output: (&mut [u8], &mut [u8]) = okm.split_at_mut(0usize); + let mut text: Box<[u8]> = + vec![0u8; tlen.wrapping_add(infolen).wrapping_add(1u32) as usize].into_boxed_slice(); + let text0: (&mut [u8], &mut [u8]) = text.split_at_mut(tlen as usize); + let tag: (&mut [u8], &mut [u8]) = text0.1.split_at_mut(0usize - tlen as usize); + let ctr: (&mut [u8], &mut [u8]) = tag.1.split_at_mut(tlen.wrapping_add(infolen) as usize); + ((&mut (&mut text)[tlen as usize..])[0usize..infolen as usize]) + .copy_from_slice(&info[0usize..infolen as usize]); + for i in 0u32..n { + ctr.1[0usize] = i.wrapping_add(1u32) as u8; + if i == 0u32 { + crate::hacl_rs::hmac::compute_sha2_512( + ctr.0, + prk, + prklen, + tag.0, + infolen.wrapping_add(1u32), + ) + } else { + crate::hacl_rs::hmac::compute_sha2_512( + ctr.0, + prk, + prklen, + &text, + tlen.wrapping_add(infolen).wrapping_add(1u32), + ) + }; + ((&mut output.1[i.wrapping_mul(tlen) as usize..])[0usize..tlen as usize]) + .copy_from_slice(&ctr.0[0usize..tlen as usize]) + } + if n.wrapping_mul(tlen) < len { + ctr.1[0usize] = n.wrapping_add(1u32) as u8; + if n == 0u32 { + crate::hacl_rs::hmac::compute_sha2_512( + ctr.0, + prk, + prklen, + tag.0, + infolen.wrapping_add(1u32), + ) + } else { + crate::hacl_rs::hmac::compute_sha2_512( + ctr.0, + prk, + prklen, + &text, + tlen.wrapping_add(infolen).wrapping_add(1u32), + ) + }; + let block: (&mut [u8], &mut [u8]) = output.1.split_at_mut(n.wrapping_mul(tlen) as usize); + (block.1[0usize..len.wrapping_sub(n.wrapping_mul(tlen)) as usize]).copy_from_slice( + &(&ctr.0[0usize..])[0usize..len.wrapping_sub(n.wrapping_mul(tlen)) as usize], + ) + } +} + +/** +Extract a fixed-length pseudorandom key from input keying material. + +@param prk Pointer to `HashLen` bytes of memory where pseudorandom key is written to. +@param salt Pointer to `saltlen` bytes of memory where salt value is read from. +@param saltlen Length of salt value. +@param ikm Pointer to `ikmlen` bytes of memory where input keying material is read from. +@param ikmlen Length of input keying material. +*/ +pub fn extract_sha2_512(prk: &mut [u8], salt: &[u8], saltlen: u32, ikm: &[u8], ikmlen: u32) { + crate::hacl_rs::hmac::compute_sha2_512(prk, salt, saltlen, ikm, ikmlen) +} + +/* no blake2 for now +/** +Expand pseudorandom key to desired length. + +@param okm Pointer to `len` bytes of memory where output keying material is written to. +@param prk Pointer to at least `HashLen` bytes of memory where pseudorandom key is read from. Usually, this points to the output from the extract step. +@param prklen Length of pseudorandom key. +@param info Pointer to `infolen` bytes of memory where context and application specific information is read from. Can be a zero-length string. +@param infolen Length of context and application specific information. +@param len Length of output keying material. +*/ +pub fn expand_blake2s_32( + okm: &mut [u8], + prk: &[u8], + prklen: u32, + info: &[u8], + infolen: u32, + len: u32, +) { + let tlen: u32 = 32u32; + let n: u32 = len.wrapping_div(tlen); + let output: (&mut [u8], &mut [u8]) = okm.split_at_mut(0usize); + let mut text: Box<[u8]> = + vec![0u8; tlen.wrapping_add(infolen).wrapping_add(1u32) as usize].into_boxed_slice(); + let text0: (&mut [u8], &mut [u8]) = text.split_at_mut(tlen as usize); + let tag: (&mut [u8], &mut [u8]) = text0.1.split_at_mut(0usize - tlen as usize); + let ctr: (&mut [u8], &mut [u8]) = tag.1.split_at_mut(tlen.wrapping_add(infolen) as usize); + ((&mut (&mut text)[tlen as usize..])[0usize..infolen as usize]) + .copy_from_slice(&info[0usize..infolen as usize]); + for i in 0u32..n { + ctr.1[0usize] = i.wrapping_add(1u32) as u8; + if i == 0u32 { + crate::hacl_rs::hmac::compute_blake2s_32( + ctr.0, + prk, + prklen, + tag.0, + infolen.wrapping_add(1u32), + ) + } else { + crate::hacl_rs::hmac::compute_blake2s_32( + ctr.0, + prk, + prklen, + &text, + tlen.wrapping_add(infolen).wrapping_add(1u32), + ) + }; + ((&mut output.1[i.wrapping_mul(tlen) as usize..])[0usize..tlen as usize]) + .copy_from_slice(&ctr.0[0usize..tlen as usize]) + } + if n.wrapping_mul(tlen) < len { + ctr.1[0usize] = n.wrapping_add(1u32) as u8; + if n == 0u32 { + crate::hacl_rs::hmac::compute_blake2s_32( + ctr.0, + prk, + prklen, + tag.0, + infolen.wrapping_add(1u32), + ) + } else { + crate::hacl_rs::hmac::compute_blake2s_32( + ctr.0, + prk, + prklen, + &text, + tlen.wrapping_add(infolen).wrapping_add(1u32), + ) + }; + let block: (&mut [u8], &mut [u8]) = output.1.split_at_mut(n.wrapping_mul(tlen) as usize); + (block.1[0usize..len.wrapping_sub(n.wrapping_mul(tlen)) as usize]).copy_from_slice( + &(&ctr.0[0usize..])[0usize..len.wrapping_sub(n.wrapping_mul(tlen)) as usize], + ) + } +} + +/** +Extract a fixed-length pseudorandom key from input keying material. + +@param prk Pointer to `HashLen` bytes of memory where pseudorandom key is written to. +@param salt Pointer to `saltlen` bytes of memory where salt value is read from. +@param saltlen Length of salt value. +@param ikm Pointer to `ikmlen` bytes of memory where input keying material is read from. +@param ikmlen Length of input keying material. +*/ +pub fn extract_blake2s_32(prk: &mut [u8], salt: &[u8], saltlen: u32, ikm: &[u8], ikmlen: u32) { + crate::hacl_rs::hmac::compute_blake2s_32(prk, salt, saltlen, ikm, ikmlen) +} + +/** +Expand pseudorandom key to desired length. + +@param okm Pointer to `len` bytes of memory where output keying material is written to. +@param prk Pointer to at least `HashLen` bytes of memory where pseudorandom key is read from. Usually, this points to the output from the extract step. +@param prklen Length of pseudorandom key. +@param info Pointer to `infolen` bytes of memory where context and application specific information is read from. Can be a zero-length string. +@param infolen Length of context and application specific information. +@param len Length of output keying material. +*/ +pub fn expand_blake2b_32( + okm: &mut [u8], + prk: &[u8], + prklen: u32, + info: &[u8], + infolen: u32, + len: u32, +) { + let tlen: u32 = 64u32; + let n: u32 = len.wrapping_div(tlen); + let output: (&mut [u8], &mut [u8]) = okm.split_at_mut(0usize); + let mut text: Box<[u8]> = + vec![0u8; tlen.wrapping_add(infolen).wrapping_add(1u32) as usize].into_boxed_slice(); + let text0: (&mut [u8], &mut [u8]) = text.split_at_mut(tlen as usize); + let tag: (&mut [u8], &mut [u8]) = text0.1.split_at_mut(0usize - tlen as usize); + let ctr: (&mut [u8], &mut [u8]) = tag.1.split_at_mut(tlen.wrapping_add(infolen) as usize); + ((&mut (&mut text)[tlen as usize..])[0usize..infolen as usize]) + .copy_from_slice(&info[0usize..infolen as usize]); + for i in 0u32..n { + ctr.1[0usize] = i.wrapping_add(1u32) as u8; + if i == 0u32 { + crate::hacl_rs::hmac::compute_blake2b_32( + ctr.0, + prk, + prklen, + tag.0, + infolen.wrapping_add(1u32), + ) + } else { + crate::hacl_rs::hmac::compute_blake2b_32( + ctr.0, + prk, + prklen, + &text, + tlen.wrapping_add(infolen).wrapping_add(1u32), + ) + }; + ((&mut output.1[i.wrapping_mul(tlen) as usize..])[0usize..tlen as usize]) + .copy_from_slice(&ctr.0[0usize..tlen as usize]) + } + if n.wrapping_mul(tlen) < len { + ctr.1[0usize] = n.wrapping_add(1u32) as u8; + if n == 0u32 { + crate::hacl_rs::hmac::compute_blake2b_32( + ctr.0, + prk, + prklen, + tag.0, + infolen.wrapping_add(1u32), + ) + } else { + crate::hacl_rs::hmac::compute_blake2b_32( + ctr.0, + prk, + prklen, + &text, + tlen.wrapping_add(infolen).wrapping_add(1u32), + ) + }; + let block: (&mut [u8], &mut [u8]) = output.1.split_at_mut(n.wrapping_mul(tlen) as usize); + (block.1[0usize..len.wrapping_sub(n.wrapping_mul(tlen)) as usize]).copy_from_slice( + &(&ctr.0[0usize..])[0usize..len.wrapping_sub(n.wrapping_mul(tlen)) as usize], + ) + } +} + +/** +Extract a fixed-length pseudorandom key from input keying material. + +@param prk Pointer to `HashLen` bytes of memory where pseudorandom key is written to. +@param salt Pointer to `saltlen` bytes of memory where salt value is read from. +@param saltlen Length of salt value. +@param ikm Pointer to `ikmlen` bytes of memory where input keying material is read from. +@param ikmlen Length of input keying material. +*/ +pub fn extract_blake2b_32(prk: &mut [u8], salt: &[u8], saltlen: u32, ikm: &[u8], ikmlen: u32) { + crate::hacl_rs::hmac::compute_blake2b_32(prk, salt, saltlen, ikm, ikmlen) +} +*/ diff --git a/src/hacl_rs/hmac.rs b/src/hacl_rs/hmac.rs new file mode 100644 index 000000000..a30347d58 --- /dev/null +++ b/src/hacl_rs/hmac.rs @@ -0,0 +1,764 @@ +#![allow(non_snake_case)] +#![allow(non_upper_case_globals)] +#![allow(non_camel_case_types)] +#![allow(unused_assignments)] +#![allow(unreachable_patterns)] + +use super::fstar; +use super::lowstar; + +#[derive(PartialEq, Clone, Copy)] +pub(crate) struct __uint32_t_uint32_t { + pub fst: u32, + pub snd: u32, +} + +/** +Write the HMAC-SHA-1 MAC of a message (`data`) by using a key (`key`) into `dst`. + +The key can be any length and will be hashed if it is longer and padded if it is shorter than 64 byte. +`dst` must point to 20 bytes of memory. +*/ +pub fn compute_sha1(dst: &mut [u8], key: &[u8], key_len: u32, data: &[u8], data_len: u32) { + let l: u32 = 64u32; + let mut key_block: Box<[u8]> = vec![0x00u8; l as usize].into_boxed_slice(); + let nkey: (&mut [u8], &mut [u8]) = key_block.split_at_mut(0usize); + let ite: u32 = if key_len <= 64u32 { key_len } else { 20u32 }; + let zeroes: (&mut [u8], &mut [u8]) = nkey.1.split_at_mut(ite as usize); + lowstar::ignore::ignore::<&[u8]>(zeroes.1); + if key_len <= 64u32 { + (zeroes.0[0usize..key_len as usize]).copy_from_slice(&key[0usize..key_len as usize]) + } else { + crate::hacl_rs::hash_sha1::hash_oneshot(zeroes.0, key, key_len) + }; + let mut ipad: Box<[u8]> = vec![0x36u8; l as usize].into_boxed_slice(); + for i in 0u32..l { + let xi: u8 = (&ipad)[i as usize]; + let yi: u8 = (&key_block)[i as usize]; + (&mut ipad)[i as usize] = xi ^ yi + } + let mut opad: Box<[u8]> = vec![0x5cu8; l as usize].into_boxed_slice(); + for i in 0u32..l { + let xi: u8 = (&opad)[i as usize]; + let yi: u8 = (&key_block)[i as usize]; + (&mut opad)[i as usize] = xi ^ yi + } + let mut s: [u32; 5] = [ + 0x67452301u32, + 0xefcdab89u32, + 0x98badcfeu32, + 0x10325476u32, + 0xc3d2e1f0u32, + ]; + if data_len == 0u32 { + crate::hacl_rs::hash_sha1::update_last(&mut s, 0u64, &ipad, 64u32) + } else { + let block_len: u32 = 64u32; + let n_blocks: u32 = data_len.wrapping_div(block_len); + let rem: u32 = data_len.wrapping_rem(block_len); + let scrut: crate::hacl_rs::hmac::__uint32_t_uint32_t = if n_blocks > 0u32 && rem == 0u32 { + let n_blocksĀ·: u32 = n_blocks.wrapping_sub(1u32); + crate::hacl_rs::hmac::__uint32_t_uint32_t { + fst: n_blocksĀ·, + snd: data_len.wrapping_sub(n_blocksĀ·.wrapping_mul(block_len)), + } + } else { + crate::hacl_rs::hmac::__uint32_t_uint32_t { + fst: n_blocks, + snd: rem, + } + }; + let n_blocks0: u32 = scrut.fst; + let rem_len: u32 = scrut.snd; + let full_blocks_len: u32 = n_blocks0.wrapping_mul(block_len); + let full_blocks: (&[u8], &[u8]) = data.split_at(0usize); + let rem0: (&[u8], &[u8]) = full_blocks.1.split_at(full_blocks_len as usize); + crate::hacl_rs::hash_sha1::update_multi(&mut s, &ipad, 1u32); + crate::hacl_rs::hash_sha1::update_multi(&mut s, rem0.0, n_blocks0); + crate::hacl_rs::hash_sha1::update_last( + &mut s, + (64u32 as u64).wrapping_add(full_blocks_len as u64), + rem0.1, + rem_len, + ) + }; + let dst1: (&mut [u8], &mut [u8]) = ipad.split_at_mut(0usize); + crate::hacl_rs::hash_sha1::finish(&s, dst1.1); + let hash1: (&[u8], &[u8]) = dst1.1.split_at(0usize); + crate::hacl_rs::hash_sha1::init(&mut s); + let block_len: u32 = 64u32; + let n_blocks: u32 = 20u32.wrapping_div(block_len); + let rem: u32 = 20u32.wrapping_rem(block_len); + let scrut: crate::hacl_rs::hmac::__uint32_t_uint32_t = if n_blocks > 0u32 && rem == 0u32 { + let n_blocksĀ·: u32 = n_blocks.wrapping_sub(1u32); + crate::hacl_rs::hmac::__uint32_t_uint32_t { + fst: n_blocksĀ·, + snd: 20u32.wrapping_sub(n_blocksĀ·.wrapping_mul(block_len)), + } + } else { + crate::hacl_rs::hmac::__uint32_t_uint32_t { + fst: n_blocks, + snd: rem, + } + }; + let n_blocks0: u32 = scrut.fst; + let rem_len: u32 = scrut.snd; + let full_blocks_len: u32 = n_blocks0.wrapping_mul(block_len); + let full_blocks: (&[u8], &[u8]) = hash1.1.split_at(0usize); + let rem0: (&[u8], &[u8]) = full_blocks.1.split_at(full_blocks_len as usize); + crate::hacl_rs::hash_sha1::update_multi(&mut s, &opad, 1u32); + crate::hacl_rs::hash_sha1::update_multi(&mut s, rem0.0, n_blocks0); + crate::hacl_rs::hash_sha1::update_last( + &mut s, + (64u32 as u64).wrapping_add(full_blocks_len as u64), + rem0.1, + rem_len, + ); + crate::hacl_rs::hash_sha1::finish(&s, dst) +} + +/** +Write the HMAC-SHA-2-256 MAC of a message (`data`) by using a key (`key`) into `dst`. + +The key can be any length and will be hashed if it is longer and padded if it is shorter than 64 bytes. +`dst` must point to 32 bytes of memory. +*/ +pub fn compute_sha2_256(dst: &mut [u8], key: &[u8], key_len: u32, data: &[u8], data_len: u32) { + let l: u32 = 64u32; + let mut key_block: Box<[u8]> = vec![0x00u8; l as usize].into_boxed_slice(); + let nkey: (&mut [u8], &mut [u8]) = key_block.split_at_mut(0usize); + let ite: u32 = if key_len <= 64u32 { key_len } else { 32u32 }; + let zeroes: (&mut [u8], &mut [u8]) = nkey.1.split_at_mut(ite as usize); + lowstar::ignore::ignore::<&[u8]>(zeroes.1); + if key_len <= 64u32 { + (zeroes.0[0usize..key_len as usize]).copy_from_slice(&key[0usize..key_len as usize]) + } else { + crate::hacl_rs::hash_sha2::hash_256(zeroes.0, key, key_len) + }; + let mut ipad: Box<[u8]> = vec![0x36u8; l as usize].into_boxed_slice(); + for i in 0u32..l { + let xi: u8 = (&ipad)[i as usize]; + let yi: u8 = (&key_block)[i as usize]; + (&mut ipad)[i as usize] = xi ^ yi + } + let mut opad: Box<[u8]> = vec![0x5cu8; l as usize].into_boxed_slice(); + for i in 0u32..l { + let xi: u8 = (&opad)[i as usize]; + let yi: u8 = (&key_block)[i as usize]; + (&mut opad)[i as usize] = xi ^ yi + } + let mut st: [u32; 8] = [0u32; 8usize]; + krml::unroll_for!(8, "i", 0u32, 1u32, { + let x: u32 = (&crate::hacl_rs::hash_sha2::h256)[i as usize]; + let os: (&mut [u32], &mut [u32]) = st.split_at_mut(0usize); + os.1[i as usize] = x + }); + let s: &mut [u32] = &mut st; + if data_len == 0u32 { + crate::hacl_rs::hash_sha2::sha256_update_last( + 0u64.wrapping_add(64u32 as u64), + 64u32, + &ipad, + s, + ) + } else { + let block_len: u32 = 64u32; + let n_blocks: u32 = data_len.wrapping_div(block_len); + let rem: u32 = data_len.wrapping_rem(block_len); + let scrut: crate::hacl_rs::hmac::__uint32_t_uint32_t = if n_blocks > 0u32 && rem == 0u32 { + let n_blocksĀ·: u32 = n_blocks.wrapping_sub(1u32); + crate::hacl_rs::hmac::__uint32_t_uint32_t { + fst: n_blocksĀ·, + snd: data_len.wrapping_sub(n_blocksĀ·.wrapping_mul(block_len)), + } + } else { + crate::hacl_rs::hmac::__uint32_t_uint32_t { + fst: n_blocks, + snd: rem, + } + }; + let n_blocks0: u32 = scrut.fst; + let rem_len: u32 = scrut.snd; + let full_blocks_len: u32 = n_blocks0.wrapping_mul(block_len); + let full_blocks: (&[u8], &[u8]) = data.split_at(0usize); + let rem0: (&[u8], &[u8]) = full_blocks.1.split_at(full_blocks_len as usize); + crate::hacl_rs::hash_sha2::sha256_update_nblocks(64u32, &ipad, s); + crate::hacl_rs::hash_sha2::sha256_update_nblocks(n_blocks0.wrapping_mul(64u32), rem0.0, s); + crate::hacl_rs::hash_sha2::sha256_update_last( + (64u32 as u64) + .wrapping_add(full_blocks_len as u64) + .wrapping_add(rem_len as u64), + rem_len, + rem0.1, + s, + ) + }; + let dst1: (&mut [u8], &mut [u8]) = ipad.split_at_mut(0usize); + crate::hacl_rs::hash_sha2::sha256_finish(s, dst1.1); + let hash1: (&[u8], &[u8]) = dst1.1.split_at(0usize); + crate::hacl_rs::hash_sha2::sha256_init(s); + let block_len: u32 = 64u32; + let n_blocks: u32 = 32u32.wrapping_div(block_len); + let rem: u32 = 32u32.wrapping_rem(block_len); + let scrut: crate::hacl_rs::hmac::__uint32_t_uint32_t = if n_blocks > 0u32 && rem == 0u32 { + let n_blocksĀ·: u32 = n_blocks.wrapping_sub(1u32); + crate::hacl_rs::hmac::__uint32_t_uint32_t { + fst: n_blocksĀ·, + snd: 32u32.wrapping_sub(n_blocksĀ·.wrapping_mul(block_len)), + } + } else { + crate::hacl_rs::hmac::__uint32_t_uint32_t { + fst: n_blocks, + snd: rem, + } + }; + let n_blocks0: u32 = scrut.fst; + let rem_len: u32 = scrut.snd; + let full_blocks_len: u32 = n_blocks0.wrapping_mul(block_len); + let full_blocks: (&[u8], &[u8]) = hash1.1.split_at(0usize); + let rem0: (&[u8], &[u8]) = full_blocks.1.split_at(full_blocks_len as usize); + crate::hacl_rs::hash_sha2::sha256_update_nblocks(64u32, &opad, s); + crate::hacl_rs::hash_sha2::sha256_update_nblocks(n_blocks0.wrapping_mul(64u32), rem0.0, s); + crate::hacl_rs::hash_sha2::sha256_update_last( + (64u32 as u64) + .wrapping_add(full_blocks_len as u64) + .wrapping_add(rem_len as u64), + rem_len, + rem0.1, + s, + ); + crate::hacl_rs::hash_sha2::sha256_finish(s, dst) +} + +/** +Write the HMAC-SHA-2-384 MAC of a message (`data`) by using a key (`key`) into `dst`. + +The key can be any length and will be hashed if it is longer and padded if it is shorter than 128 bytes. +`dst` must point to 48 bytes of memory. +*/ +pub fn compute_sha2_384(dst: &mut [u8], key: &[u8], key_len: u32, data: &[u8], data_len: u32) { + let l: u32 = 128u32; + let mut key_block: Box<[u8]> = vec![0x00u8; l as usize].into_boxed_slice(); + let nkey: (&mut [u8], &mut [u8]) = key_block.split_at_mut(0usize); + let ite: u32 = if key_len <= 128u32 { key_len } else { 48u32 }; + let zeroes: (&mut [u8], &mut [u8]) = nkey.1.split_at_mut(ite as usize); + lowstar::ignore::ignore::<&[u8]>(zeroes.1); + if key_len <= 128u32 { + (zeroes.0[0usize..key_len as usize]).copy_from_slice(&key[0usize..key_len as usize]) + } else { + crate::hacl_rs::hash_sha2::hash_384(zeroes.0, key, key_len) + }; + let mut ipad: Box<[u8]> = vec![0x36u8; l as usize].into_boxed_slice(); + for i in 0u32..l { + let xi: u8 = (&ipad)[i as usize]; + let yi: u8 = (&key_block)[i as usize]; + (&mut ipad)[i as usize] = xi ^ yi + } + let mut opad: Box<[u8]> = vec![0x5cu8; l as usize].into_boxed_slice(); + for i in 0u32..l { + let xi: u8 = (&opad)[i as usize]; + let yi: u8 = (&key_block)[i as usize]; + (&mut opad)[i as usize] = xi ^ yi + } + let mut st: [u64; 8] = [0u64; 8usize]; + krml::unroll_for!(8, "i", 0u32, 1u32, { + let x: u64 = (&crate::hacl_rs::hash_sha2::h384)[i as usize]; + let os: (&mut [u64], &mut [u64]) = st.split_at_mut(0usize); + os.1[i as usize] = x + }); + let s: &mut [u64] = &mut st; + if data_len == 0u32 { + crate::hacl_rs::hash_sha2::sha384_update_last( + fstar::uint128::add( + fstar::uint128::uint64_to_uint128(0u64), + fstar::uint128::uint64_to_uint128(128u32 as u64), + ), + 128u32, + &ipad, + s, + ) + } else { + let block_len: u32 = 128u32; + let n_blocks: u32 = data_len.wrapping_div(block_len); + let rem: u32 = data_len.wrapping_rem(block_len); + let scrut: crate::hacl_rs::hmac::__uint32_t_uint32_t = if n_blocks > 0u32 && rem == 0u32 { + let n_blocksĀ·: u32 = n_blocks.wrapping_sub(1u32); + crate::hacl_rs::hmac::__uint32_t_uint32_t { + fst: n_blocksĀ·, + snd: data_len.wrapping_sub(n_blocksĀ·.wrapping_mul(block_len)), + } + } else { + crate::hacl_rs::hmac::__uint32_t_uint32_t { + fst: n_blocks, + snd: rem, + } + }; + let n_blocks0: u32 = scrut.fst; + let rem_len: u32 = scrut.snd; + let full_blocks_len: u32 = n_blocks0.wrapping_mul(block_len); + let full_blocks: (&[u8], &[u8]) = data.split_at(0usize); + let rem0: (&[u8], &[u8]) = full_blocks.1.split_at(full_blocks_len as usize); + crate::hacl_rs::hash_sha2::sha384_update_nblocks(128u32, &ipad, s); + crate::hacl_rs::hash_sha2::sha384_update_nblocks(n_blocks0.wrapping_mul(128u32), rem0.0, s); + crate::hacl_rs::hash_sha2::sha384_update_last( + fstar::uint128::add( + fstar::uint128::add( + fstar::uint128::uint64_to_uint128(128u32 as u64), + fstar::uint128::uint64_to_uint128(full_blocks_len as u64), + ), + fstar::uint128::uint64_to_uint128(rem_len as u64), + ), + rem_len, + rem0.1, + s, + ) + }; + let dst1: (&mut [u8], &mut [u8]) = ipad.split_at_mut(0usize); + crate::hacl_rs::hash_sha2::sha384_finish(s, dst1.1); + let hash1: (&[u8], &[u8]) = dst1.1.split_at(0usize); + crate::hacl_rs::hash_sha2::sha384_init(s); + let block_len: u32 = 128u32; + let n_blocks: u32 = 48u32.wrapping_div(block_len); + let rem: u32 = 48u32.wrapping_rem(block_len); + let scrut: crate::hacl_rs::hmac::__uint32_t_uint32_t = if n_blocks > 0u32 && rem == 0u32 { + let n_blocksĀ·: u32 = n_blocks.wrapping_sub(1u32); + crate::hacl_rs::hmac::__uint32_t_uint32_t { + fst: n_blocksĀ·, + snd: 48u32.wrapping_sub(n_blocksĀ·.wrapping_mul(block_len)), + } + } else { + crate::hacl_rs::hmac::__uint32_t_uint32_t { + fst: n_blocks, + snd: rem, + } + }; + let n_blocks0: u32 = scrut.fst; + let rem_len: u32 = scrut.snd; + let full_blocks_len: u32 = n_blocks0.wrapping_mul(block_len); + let full_blocks: (&[u8], &[u8]) = hash1.1.split_at(0usize); + let rem0: (&[u8], &[u8]) = full_blocks.1.split_at(full_blocks_len as usize); + crate::hacl_rs::hash_sha2::sha384_update_nblocks(128u32, &opad, s); + crate::hacl_rs::hash_sha2::sha384_update_nblocks(n_blocks0.wrapping_mul(128u32), rem0.0, s); + crate::hacl_rs::hash_sha2::sha384_update_last( + fstar::uint128::add( + fstar::uint128::add( + fstar::uint128::uint64_to_uint128(128u32 as u64), + fstar::uint128::uint64_to_uint128(full_blocks_len as u64), + ), + fstar::uint128::uint64_to_uint128(rem_len as u64), + ), + rem_len, + rem0.1, + s, + ); + crate::hacl_rs::hash_sha2::sha384_finish(s, dst) +} + +/** +Write the HMAC-SHA-2-512 MAC of a message (`data`) by using a key (`key`) into `dst`. + +The key can be any length and will be hashed if it is longer and padded if it is shorter than 128 bytes. +`dst` must point to 64 bytes of memory. +*/ +pub fn compute_sha2_512(dst: &mut [u8], key: &[u8], key_len: u32, data: &[u8], data_len: u32) { + let l: u32 = 128u32; + let mut key_block: Box<[u8]> = vec![0x00u8; l as usize].into_boxed_slice(); + let nkey: (&mut [u8], &mut [u8]) = key_block.split_at_mut(0usize); + let ite: u32 = if key_len <= 128u32 { key_len } else { 64u32 }; + let zeroes: (&mut [u8], &mut [u8]) = nkey.1.split_at_mut(ite as usize); + lowstar::ignore::ignore::<&[u8]>(zeroes.1); + if key_len <= 128u32 { + (zeroes.0[0usize..key_len as usize]).copy_from_slice(&key[0usize..key_len as usize]) + } else { + crate::hacl_rs::hash_sha2::hash_512(zeroes.0, key, key_len) + }; + let mut ipad: Box<[u8]> = vec![0x36u8; l as usize].into_boxed_slice(); + for i in 0u32..l { + let xi: u8 = (&ipad)[i as usize]; + let yi: u8 = (&key_block)[i as usize]; + (&mut ipad)[i as usize] = xi ^ yi + } + let mut opad: Box<[u8]> = vec![0x5cu8; l as usize].into_boxed_slice(); + for i in 0u32..l { + let xi: u8 = (&opad)[i as usize]; + let yi: u8 = (&key_block)[i as usize]; + (&mut opad)[i as usize] = xi ^ yi + } + let mut st: [u64; 8] = [0u64; 8usize]; + krml::unroll_for!(8, "i", 0u32, 1u32, { + let x: u64 = (&crate::hacl_rs::hash_sha2::h512)[i as usize]; + let os: (&mut [u64], &mut [u64]) = st.split_at_mut(0usize); + os.1[i as usize] = x + }); + let s: &mut [u64] = &mut st; + if data_len == 0u32 { + crate::hacl_rs::hash_sha2::sha512_update_last( + fstar::uint128::add( + fstar::uint128::uint64_to_uint128(0u64), + fstar::uint128::uint64_to_uint128(128u32 as u64), + ), + 128u32, + &ipad, + s, + ) + } else { + let block_len: u32 = 128u32; + let n_blocks: u32 = data_len.wrapping_div(block_len); + let rem: u32 = data_len.wrapping_rem(block_len); + let scrut: crate::hacl_rs::hmac::__uint32_t_uint32_t = if n_blocks > 0u32 && rem == 0u32 { + let n_blocksĀ·: u32 = n_blocks.wrapping_sub(1u32); + crate::hacl_rs::hmac::__uint32_t_uint32_t { + fst: n_blocksĀ·, + snd: data_len.wrapping_sub(n_blocksĀ·.wrapping_mul(block_len)), + } + } else { + crate::hacl_rs::hmac::__uint32_t_uint32_t { + fst: n_blocks, + snd: rem, + } + }; + let n_blocks0: u32 = scrut.fst; + let rem_len: u32 = scrut.snd; + let full_blocks_len: u32 = n_blocks0.wrapping_mul(block_len); + let full_blocks: (&[u8], &[u8]) = data.split_at(0usize); + let rem0: (&[u8], &[u8]) = full_blocks.1.split_at(full_blocks_len as usize); + crate::hacl_rs::hash_sha2::sha512_update_nblocks(128u32, &ipad, s); + crate::hacl_rs::hash_sha2::sha512_update_nblocks(n_blocks0.wrapping_mul(128u32), rem0.0, s); + crate::hacl_rs::hash_sha2::sha512_update_last( + fstar::uint128::add( + fstar::uint128::add( + fstar::uint128::uint64_to_uint128(128u32 as u64), + fstar::uint128::uint64_to_uint128(full_blocks_len as u64), + ), + fstar::uint128::uint64_to_uint128(rem_len as u64), + ), + rem_len, + rem0.1, + s, + ) + }; + let dst1: (&mut [u8], &mut [u8]) = ipad.split_at_mut(0usize); + crate::hacl_rs::hash_sha2::sha512_finish(s, dst1.1); + let hash1: (&[u8], &[u8]) = dst1.1.split_at(0usize); + crate::hacl_rs::hash_sha2::sha512_init(s); + let block_len: u32 = 128u32; + let n_blocks: u32 = 64u32.wrapping_div(block_len); + let rem: u32 = 64u32.wrapping_rem(block_len); + let scrut: crate::hacl_rs::hmac::__uint32_t_uint32_t = if n_blocks > 0u32 && rem == 0u32 { + let n_blocksĀ·: u32 = n_blocks.wrapping_sub(1u32); + crate::hacl_rs::hmac::__uint32_t_uint32_t { + fst: n_blocksĀ·, + snd: 64u32.wrapping_sub(n_blocksĀ·.wrapping_mul(block_len)), + } + } else { + crate::hacl_rs::hmac::__uint32_t_uint32_t { + fst: n_blocks, + snd: rem, + } + }; + let n_blocks0: u32 = scrut.fst; + let rem_len: u32 = scrut.snd; + let full_blocks_len: u32 = n_blocks0.wrapping_mul(block_len); + let full_blocks: (&[u8], &[u8]) = hash1.1.split_at(0usize); + let rem0: (&[u8], &[u8]) = full_blocks.1.split_at(full_blocks_len as usize); + crate::hacl_rs::hash_sha2::sha512_update_nblocks(128u32, &opad, s); + crate::hacl_rs::hash_sha2::sha512_update_nblocks(n_blocks0.wrapping_mul(128u32), rem0.0, s); + crate::hacl_rs::hash_sha2::sha512_update_last( + fstar::uint128::add( + fstar::uint128::add( + fstar::uint128::uint64_to_uint128(128u32 as u64), + fstar::uint128::uint64_to_uint128(full_blocks_len as u64), + ), + fstar::uint128::uint64_to_uint128(rem_len as u64), + ), + rem_len, + rem0.1, + s, + ); + crate::hacl_rs::hash_sha2::sha512_finish(s, dst) +} + +/* no blake2 for now + +/** +Write the HMAC-BLAKE2s MAC of a message (`data`) by using a key (`key`) into `dst`. + +The key can be any length and will be hashed if it is longer and padded if it is shorter than 64 bytes. +`dst` must point to 32 bytes of memory. +*/ +pub fn compute_blake2s_32(dst: &mut [u8], key: &[u8], key_len: u32, data: &[u8], data_len: u32) { + let l: u32 = 64u32; + let mut key_block: Box<[u8]> = vec![0x00u8; l as usize].into_boxed_slice(); + let nkey: (&mut [u8], &mut [u8]) = key_block.split_at_mut(0usize); + let ite: u32 = if key_len <= 64u32 { key_len } else { 32u32 }; + let zeroes: (&mut [u8], &mut [u8]) = nkey.1.split_at_mut(ite as usize); + lowstar::ignore::ignore::<&[u8]>(zeroes.1); + if key_len <= 64u32 { + (zeroes.0[0usize..key_len as usize]).copy_from_slice(&key[0usize..key_len as usize]) + } else { + crate::hacl_rs::hash_blake2s::hash_with_key(zeroes.0, 32u32, key, key_len, &[], 0u32) + }; + let mut ipad: Box<[u8]> = vec![0x36u8; l as usize].into_boxed_slice(); + for i in 0u32..l { + let xi: u8 = (&ipad)[i as usize]; + let yi: u8 = (&key_block)[i as usize]; + (&mut ipad)[i as usize] = xi ^ yi + } + let mut opad: Box<[u8]> = vec![0x5cu8; l as usize].into_boxed_slice(); + for i in 0u32..l { + let xi: u8 = (&opad)[i as usize]; + let yi: u8 = (&key_block)[i as usize]; + (&mut opad)[i as usize] = xi ^ yi + } + let mut s: [u32; 16] = [0u32; 16usize]; + crate::hacl_rs::hash_blake2s::init(&mut s, 0u32, 32u32); + let s0: &mut [u32] = &mut s; + if data_len == 0u32 { + let mut wv: [u32; 16] = [0u32; 16usize]; + crate::hacl_rs::hash_blake2s::update_last(64u32, &mut wv, s0, false, 0u64, 64u32, &ipad) + } else { + let block_len: u32 = 64u32; + let n_blocks: u32 = data_len.wrapping_div(block_len); + let rem: u32 = data_len.wrapping_rem(block_len); + let scrut: crate::hacl_rs::hmac::__uint32_t_uint32_t = if n_blocks > 0u32 && rem == 0u32 { + let n_blocksĀ·: u32 = n_blocks.wrapping_sub(1u32); + crate::hacl_rs::hmac::__uint32_t_uint32_t { + fst: n_blocksĀ·, + snd: data_len.wrapping_sub(n_blocksĀ·.wrapping_mul(block_len)), + } + } else { + crate::hacl_rs::hmac::__uint32_t_uint32_t { + fst: n_blocks, + snd: rem, + } + }; + let n_blocks0: u32 = scrut.fst; + let rem_len: u32 = scrut.snd; + let full_blocks_len: u32 = n_blocks0.wrapping_mul(block_len); + let full_blocks: (&[u8], &[u8]) = data.split_at(0usize); + let rem0: (&[u8], &[u8]) = full_blocks.1.split_at(full_blocks_len as usize); + let mut wv: [u32; 16] = [0u32; 16usize]; + crate::hacl_rs::hash_blake2s::update_multi(64u32, &mut wv, s0, 0u64, &ipad, 1u32); + let mut wv0: [u32; 16] = [0u32; 16usize]; + crate::hacl_rs::hash_blake2s::update_multi( + n_blocks0.wrapping_mul(64u32), + &mut wv0, + s0, + block_len as u64, + rem0.0, + n_blocks0, + ); + let mut wv1: [u32; 16] = [0u32; 16usize]; + crate::hacl_rs::hash_blake2s::update_last( + rem_len, + &mut wv1, + s0, + false, + (64u32 as u64).wrapping_add(full_blocks_len as u64), + rem_len, + rem0.1, + ) + }; + let dst1: (&mut [u8], &mut [u8]) = ipad.split_at_mut(0usize); + crate::hacl_rs::hash_blake2s::finish(32u32, dst1.1, s0); + let hash1: (&[u8], &[u8]) = dst1.1.split_at(0usize); + crate::hacl_rs::hash_blake2s::init(s0, 0u32, 32u32); + let block_len: u32 = 64u32; + let n_blocks: u32 = 32u32.wrapping_div(block_len); + let rem: u32 = 32u32.wrapping_rem(block_len); + let scrut: crate::hacl_rs::hmac::__uint32_t_uint32_t = if n_blocks > 0u32 && rem == 0u32 { + let n_blocksĀ·: u32 = n_blocks.wrapping_sub(1u32); + crate::hacl_rs::hmac::__uint32_t_uint32_t { + fst: n_blocksĀ·, + snd: 32u32.wrapping_sub(n_blocksĀ·.wrapping_mul(block_len)), + } + } else { + crate::hacl_rs::hmac::__uint32_t_uint32_t { + fst: n_blocks, + snd: rem, + } + }; + let n_blocks0: u32 = scrut.fst; + let rem_len: u32 = scrut.snd; + let full_blocks_len: u32 = n_blocks0.wrapping_mul(block_len); + let full_blocks: (&[u8], &[u8]) = hash1.1.split_at(0usize); + let rem0: (&[u8], &[u8]) = full_blocks.1.split_at(full_blocks_len as usize); + let mut wv: [u32; 16] = [0u32; 16usize]; + crate::hacl_rs::hash_blake2s::update_multi(64u32, &mut wv, s0, 0u64, &opad, 1u32); + let mut wv0: [u32; 16] = [0u32; 16usize]; + crate::hacl_rs::hash_blake2s::update_multi( + n_blocks0.wrapping_mul(64u32), + &mut wv0, + s0, + block_len as u64, + rem0.0, + n_blocks0, + ); + let mut wv1: [u32; 16] = [0u32; 16usize]; + crate::hacl_rs::hash_blake2s::update_last( + rem_len, + &mut wv1, + s0, + false, + (64u32 as u64).wrapping_add(full_blocks_len as u64), + rem_len, + rem0.1, + ); + crate::hacl_rs::hash_blake2s::finish(32u32, dst, s0) +} + +/** +Write the HMAC-BLAKE2b MAC of a message (`data`) by using a key (`key`) into `dst`. + +The key can be any length and will be hashed if it is longer and padded if it is shorter than 128 bytes. +`dst` must point to 64 bytes of memory. +*/ +pub fn compute_blake2b_32(dst: &mut [u8], key: &[u8], key_len: u32, data: &[u8], data_len: u32) { + let l: u32 = 128u32; + let mut key_block: Box<[u8]> = vec![0x00u8; l as usize].into_boxed_slice(); + let nkey: (&mut [u8], &mut [u8]) = key_block.split_at_mut(0usize); + let ite: u32 = if key_len <= 128u32 { key_len } else { 64u32 }; + let zeroes: (&mut [u8], &mut [u8]) = nkey.1.split_at_mut(ite as usize); + lowstar::ignore::ignore::<&[u8]>(zeroes.1); + if key_len <= 128u32 { + (zeroes.0[0usize..key_len as usize]).copy_from_slice(&key[0usize..key_len as usize]) + } else { + crate::hacl_rs::hash_blake2b::hash_with_key(zeroes.0, 64u32, key, key_len, &[], 0u32) + }; + let mut ipad: Box<[u8]> = vec![0x36u8; l as usize].into_boxed_slice(); + for i in 0u32..l { + let xi: u8 = (&ipad)[i as usize]; + let yi: u8 = (&key_block)[i as usize]; + (&mut ipad)[i as usize] = xi ^ yi + } + let mut opad: Box<[u8]> = vec![0x5cu8; l as usize].into_boxed_slice(); + for i in 0u32..l { + let xi: u8 = (&opad)[i as usize]; + let yi: u8 = (&key_block)[i as usize]; + (&mut opad)[i as usize] = xi ^ yi + } + let mut s: [u64; 16] = [0u64; 16usize]; + crate::hacl_rs::hash_blake2b::init(&mut s, 0u32, 64u32); + let s0: &mut [u64] = &mut s; + if data_len == 0u32 { + let mut wv: [u64; 16] = [0u64; 16usize]; + crate::hacl_rs::hash_blake2b::update_last( + 128u32, + &mut wv, + s0, + false, + fstar::uint128::uint64_to_uint128(0u64), + 128u32, + &ipad, + ) + } else { + let block_len: u32 = 128u32; + let n_blocks: u32 = data_len.wrapping_div(block_len); + let rem: u32 = data_len.wrapping_rem(block_len); + let scrut: crate::hacl_rs::hmac::__uint32_t_uint32_t = if n_blocks > 0u32 && rem == 0u32 { + let n_blocksĀ·: u32 = n_blocks.wrapping_sub(1u32); + crate::hacl_rs::hmac::__uint32_t_uint32_t { + fst: n_blocksĀ·, + snd: data_len.wrapping_sub(n_blocksĀ·.wrapping_mul(block_len)), + } + } else { + crate::hacl_rs::hmac::__uint32_t_uint32_t { + fst: n_blocks, + snd: rem, + } + }; + let n_blocks0: u32 = scrut.fst; + let rem_len: u32 = scrut.snd; + let full_blocks_len: u32 = n_blocks0.wrapping_mul(block_len); + let full_blocks: (&[u8], &[u8]) = data.split_at(0usize); + let rem0: (&[u8], &[u8]) = full_blocks.1.split_at(full_blocks_len as usize); + let mut wv: [u64; 16] = [0u64; 16usize]; + crate::hacl_rs::hash_blake2b::update_multi( + 128u32, + &mut wv, + s0, + fstar::uint128::uint64_to_uint128(0u64), + &ipad, + 1u32, + ); + let mut wv0: [u64; 16] = [0u64; 16usize]; + crate::hacl_rs::hash_blake2b::update_multi( + n_blocks0.wrapping_mul(128u32), + &mut wv0, + s0, + fstar::uint128::uint64_to_uint128(block_len as u64), + rem0.0, + n_blocks0, + ); + let mut wv1: [u64; 16] = [0u64; 16usize]; + crate::hacl_rs::hash_blake2b::update_last( + rem_len, + &mut wv1, + s0, + false, + fstar::uint128::add( + fstar::uint128::uint64_to_uint128(128u32 as u64), + fstar::uint128::uint64_to_uint128(full_blocks_len as u64), + ), + rem_len, + rem0.1, + ) + }; + let dst1: (&mut [u8], &mut [u8]) = ipad.split_at_mut(0usize); + crate::hacl_rs::hash_blake2b::finish(64u32, dst1.1, s0); + let hash1: (&[u8], &[u8]) = dst1.1.split_at(0usize); + crate::hacl_rs::hash_blake2b::init(s0, 0u32, 64u32); + let block_len: u32 = 128u32; + let n_blocks: u32 = 64u32.wrapping_div(block_len); + let rem: u32 = 64u32.wrapping_rem(block_len); + let scrut: crate::hacl_rs::hmac::__uint32_t_uint32_t = if n_blocks > 0u32 && rem == 0u32 { + let n_blocksĀ·: u32 = n_blocks.wrapping_sub(1u32); + crate::hacl_rs::hmac::__uint32_t_uint32_t { + fst: n_blocksĀ·, + snd: 64u32.wrapping_sub(n_blocksĀ·.wrapping_mul(block_len)), + } + } else { + crate::hacl_rs::hmac::__uint32_t_uint32_t { + fst: n_blocks, + snd: rem, + } + }; + let n_blocks0: u32 = scrut.fst; + let rem_len: u32 = scrut.snd; + let full_blocks_len: u32 = n_blocks0.wrapping_mul(block_len); + let full_blocks: (&[u8], &[u8]) = hash1.1.split_at(0usize); + let rem0: (&[u8], &[u8]) = full_blocks.1.split_at(full_blocks_len as usize); + let mut wv: [u64; 16] = [0u64; 16usize]; + crate::hacl_rs::hash_blake2b::update_multi( + 128u32, + &mut wv, + s0, + fstar::uint128::uint64_to_uint128(0u64), + &opad, + 1u32, + ); + let mut wv0: [u64; 16] = [0u64; 16usize]; + crate::hacl_rs::hash_blake2b::update_multi( + n_blocks0.wrapping_mul(128u32), + &mut wv0, + s0, + fstar::uint128::uint64_to_uint128(block_len as u64), + rem0.0, + n_blocks0, + ); + let mut wv1: [u64; 16] = [0u64; 16usize]; + crate::hacl_rs::hash_blake2b::update_last( + rem_len, + &mut wv1, + s0, + false, + fstar::uint128::add( + fstar::uint128::uint64_to_uint128(128u32 as u64), + fstar::uint128::uint64_to_uint128(full_blocks_len as u64), + ), + rem_len, + rem0.1, + ); + crate::hacl_rs::hash_blake2b::finish(64u32, dst, s0) +} +*/ diff --git a/src/hacl_rs/lowstar.rs b/src/hacl_rs/lowstar.rs new file mode 100644 index 000000000..f63af5cbe --- /dev/null +++ b/src/hacl_rs/lowstar.rs @@ -0,0 +1,2 @@ +pub mod endianness; +pub mod ignore; diff --git a/src/hacl_rs/lowstar/endianness.rs b/src/hacl_rs/lowstar/endianness.rs new file mode 100644 index 000000000..00d3ea9c5 --- /dev/null +++ b/src/hacl_rs/lowstar/endianness.rs @@ -0,0 +1,53 @@ +use std::convert::TryInto; + +// Little Endian + +pub fn load16_le(bytes: &[u8]) -> u16 { + u16::from_le_bytes(bytes[0..2].try_into().unwrap()) +} + +pub fn store16_le(bytes: &mut[u8], x: u16) { + bytes[0..2].copy_from_slice(&u16::to_le_bytes(x)) +} + +pub fn load32_le(bytes: &[u8]) -> u32 { + u32::from_le_bytes(bytes[0..4].try_into().unwrap()) +} + +pub fn store32_le(bytes: &mut[u8], x: u32) { + bytes[0..4].copy_from_slice(&u32::to_le_bytes(x)) +} + +pub fn load64_le(bytes: &[u8]) -> u64 { + u64::from_le_bytes(bytes[0..8].try_into().unwrap()) +} + +pub fn store64_le(bytes: &mut[u8], x: u64) { + bytes[0..8].copy_from_slice(&u64::to_le_bytes(x)) +} + +// Big Endian + +pub fn load32_be(bytes: &[u8]) -> u32 { + u32::from_be_bytes(bytes[0..4].try_into().unwrap()) +} + +pub fn store32_be(bytes: &mut[u8], x: u32) { + bytes[0..4].copy_from_slice(&u32::to_be_bytes(x)) +} + +pub fn load64_be(bytes: &[u8]) -> u64 { + u64::from_be_bytes(bytes[0..8].try_into().unwrap()) +} + +pub fn store64_be(bytes: &mut[u8], x: u64) { + bytes[0..8].copy_from_slice(&u64::to_be_bytes(x)) +} + +pub fn load128_be(bytes: &[u8]) -> u128 { + u128::from_be_bytes(bytes[0..16].try_into().unwrap()) +} + +pub fn store128_be(bytes: &mut[u8], x: u128) { + bytes[0..16].copy_from_slice(&u128::to_be_bytes(x)) +} diff --git a/src/hacl_rs/lowstar/ignore.rs b/src/hacl_rs/lowstar/ignore.rs new file mode 100644 index 000000000..919eb52f9 --- /dev/null +++ b/src/hacl_rs/lowstar/ignore.rs @@ -0,0 +1 @@ +pub fn ignore(_: T) {} diff --git a/src/hacl_rs/mod.rs b/src/hacl_rs/mod.rs new file mode 100644 index 000000000..aa1ef1d51 --- /dev/null +++ b/src/hacl_rs/mod.rs @@ -0,0 +1,9 @@ +// Utility modules. In the generated hacl-rs, these are individual crates. +mod fstar; +mod lowstar; + +pub(crate) mod hash_sha1; +pub(crate) mod hash_sha2; +//pub(crate) mod hkdf; +pub(crate) mod hmac; +pub(crate) mod streaming_types; diff --git a/src/hacl_rs/streaming_types.rs b/src/hacl_rs/streaming_types.rs new file mode 100644 index 000000000..886aad904 --- /dev/null +++ b/src/hacl_rs/streaming_types.rs @@ -0,0 +1,41 @@ +#![allow(non_snake_case)] +#![allow(non_upper_case_globals)] +#![allow(non_camel_case_types)] +#![allow(unused_assignments)] +#![allow(unreachable_patterns)] + +#[derive(PartialEq, Clone, Copy)] +pub enum hash_alg +{ + SHA2_224, + SHA2_256, + SHA2_384, + SHA2_512, + SHA1, + MD5, + Blake2S, + Blake2B, + SHA3_256, + SHA3_224, + SHA3_384, + SHA3_512, + Shake128, + Shake256 +} + +#[derive(PartialEq, Clone, Copy)] +pub enum error_code +{ + Success, + InvalidAlgorithm, + InvalidLength, + MaximumLengthExceeded +} + +#[derive(PartialEq, Clone)] +pub struct state_32 +{ pub block_state: Box<[u32]>, pub buf: Box<[u8]>, pub total_len: u64 } + +#[derive(PartialEq, Clone)] +pub struct state_64 +{ pub block_state: Box<[u64]>, pub buf: Box<[u8]>, pub total_len: u64 } diff --git a/src/lib.rs b/src/lib.rs index 8642eaa91..cf2e6304b 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -14,6 +14,7 @@ pub(crate) mod jasmin; // HACL pub(crate) mod hacl; +pub(crate) mod hacl_rs; // libcrux pub mod aead; diff --git a/tests/sha2.rs b/tests/sha2.rs index 1e504ff3e..ac1e44ded 100644 --- a/tests/sha2.rs +++ b/tests/sha2.rs @@ -4,8 +4,9 @@ #[test] fn sha256_kat_streaming() { let mut digest = libcrux::digest::Sha2_256::new(); + let mut d = [0u8; 32]; digest.update(b"libcrux sha2 256 tests"); - let d = digest.finish(); + digest.finish(&mut d); let expected = "8683520e19e5b33db33c8fb90918c0c96fcdfd9a17c695ce0f0ea2eaa0c95956"; assert_eq!(hex::encode(&d), expected); @@ -38,8 +39,10 @@ fn sha2_clone() { let mut hasher256_2 = hasher_256.clone(); hasher_256.update(b"more 256"); hasher256_2.update(b"more 256"); - let digest = hasher_256.finish(); - let digest_2 = hasher256_2.finish(); + let mut digest = [0u8; 32]; + let mut digest_2 = [0u8; 32]; + hasher_256.finish(&mut digest); + hasher256_2.finish(&mut digest_2); assert_eq!(digest, digest_2); assert_eq!(digest, libcrux::digest::sha2_256(b"test 256more 256")); From 879ebea3ed052d4650b9f17e5488056a189d4081 Mon Sep 17 00:00:00 2001 From: "Jan Winkelmann (keks)" Date: Tue, 5 Nov 2024 16:49:26 +0100 Subject: [PATCH 03/18] no hacl-c sha2 anymore --- src/digest.rs | 100 +++++++++++++++++---------------------- src/hacl_rs/hash_sha2.rs | 47 ++++++++++++++++-- tests/sha2.rs | 18 ++++--- 3 files changed, 99 insertions(+), 66 deletions(-) diff --git a/src/digest.rs b/src/digest.rs index f40702a2f..4a561b97f 100644 --- a/src/digest.rs +++ b/src/digest.rs @@ -13,14 +13,7 @@ //! On x64 CPUs the libjade implementation is used and if AVX2 is available, the //! optimised libjade implementation is used. -use crate::hacl::{ - blake2, - sha2::{ - self, - streaming::{Sha224, Sha256, Sha384, Sha512}, - }, - sha3, -}; +use crate::hacl::{blake2, sha3}; use libcrux_platform::{simd128_support, simd256_support}; @@ -179,10 +172,10 @@ pub fn hash(alg: Algorithm, payload: &[u8]) -> Vec { // So we only use streaming. match alg { Algorithm::Sha1 => todo!(), - Algorithm::Sha224 => sha2::sha224(payload).into(), - Algorithm::Sha256 => sha2::sha256(payload).into(), - Algorithm::Sha384 => sha2::sha384(payload).into(), - Algorithm::Sha512 => sha2::sha512(payload).into(), + Algorithm::Sha224 => sha2_224(payload).into(), + Algorithm::Sha256 => sha2_256(payload).into(), + Algorithm::Sha384 => sha2_384(payload).into(), + Algorithm::Sha512 => sha2_512(payload).into(), Algorithm::Blake2s => blake2s(payload, &[]), Algorithm::Blake2b => blake2b(payload, &[]), Algorithm::Sha3_224 => sha3_224(payload).into(), @@ -236,64 +229,34 @@ fn blake2b(payload: &[u8], key: &[u8]) -> Vec { /// SHA2 224 pub fn sha2_224(payload: &[u8]) -> Sha2_224Digest { - sha2::sha224(payload) + let mut digest = Sha2_224Digest::default(); + Sha2_224::hash(&mut digest, payload); + digest } /// SHA2 256 pub fn sha2_256(payload: &[u8]) -> Sha2_256Digest { - sha2::sha256(payload) + let mut digest = Sha2_256Digest::default(); + Sha2_256::hash(&mut digest, payload); + digest } /// SHA2 384 pub fn sha2_384(payload: &[u8]) -> Sha2_384Digest { - sha2::sha384(payload) + // NB: this doesn't have default and we can't implement it. + let mut digest = [0; 48]; + Sha2_384::hash(&mut digest, payload); + digest } /// SHA2 512 pub fn sha2_512(payload: &[u8]) -> Sha2_512Digest { - sha2::sha512(payload) + // NB: this doesn't have default and we can't implement it. + let mut digest = [0; 64]; + Sha2_512::hash(&mut digest, payload); + digest } -// Streaming API - This is the recommended one. -macro_rules! impl_streaming { - ($name:ident, $state:ty, $result:ty) => { - #[derive(Clone)] - pub struct $name { - state: $state, - } - impl $name { - /// Initialize a new digest state. - pub fn new() -> Self { - Self { - state: <$state>::new(), - } - } - - /// Add the `payload` to the digest. - pub fn update(&mut self, payload: &[u8]) { - self.state.update(payload); - } - - /// Get the digest. - /// - /// Note that the digest state can be continued to be used, to extend the - /// digest. - pub fn finish(&mut self) -> $result { - self.state.finish() - } - } - - impl Default for $name { - fn default() -> Self { - Self::new() - } - } - }; -} -impl_streaming!(Sha2_224, Sha224, Sha2_224Digest); -impl_streaming!(Sha2_384, Sha384, Sha2_384Digest); -impl_streaming!(Sha2_512, Sha512, Sha2_512Digest); - // Streaming API - This is the recommended one. // For implementations based on hacl_rs (over hacl-c) macro_rules! impl_streaming_hacl_rs { @@ -303,6 +266,11 @@ macro_rules! impl_streaming_hacl_rs { state: $state, } impl $name { + /// Return the digest for the given input byte slice, in immediate mode. + pub fn hash(digest: &mut [u8], input: &[u8]) { + <$state>::hash(digest, input) + } + /// Initialize a new digest state. pub fn new() -> Self { Self { @@ -322,6 +290,11 @@ macro_rules! impl_streaming_hacl_rs { pub fn finish(&self, digest: &mut $result) { self.state.finish(digest) } + + /// Reset the digest state. + pub fn reset(&mut self) { + self.state.reset() + } } impl Default for $name { @@ -332,11 +305,26 @@ macro_rules! impl_streaming_hacl_rs { }; } +impl_streaming_hacl_rs!( + Sha2_224, + crate::hacl_rs::hash_sha2::HaclRs_Sha2_Sha256_224, + Sha2_224Digest +); impl_streaming_hacl_rs!( Sha2_256, crate::hacl_rs::hash_sha2::HaclRs_Sha2_Sha256, Sha2_256Digest ); +impl_streaming_hacl_rs!( + Sha2_384, + crate::hacl_rs::hash_sha2::HaclRs_Sha2_Sha512_384, + Sha2_384Digest +); +impl_streaming_hacl_rs!( + Sha2_512, + crate::hacl_rs::hash_sha2::HaclRs_Sha2_Sha512, + Sha2_512Digest +); // SHAKE messages from SHA 3 #[cfg(simd256)] diff --git a/src/hacl_rs/hash_sha2.rs b/src/hacl_rs/hash_sha2.rs index 362deda67..6307289e5 100644 --- a/src/hacl_rs/hash_sha2.rs +++ b/src/hacl_rs/hash_sha2.rs @@ -1263,6 +1263,11 @@ macro_rules! impl_hash { } impl $name { + /// Return the digest for the given input byte slice, in immediate mode. + pub fn hash(digest: &mut [u8], input: &[u8]) { + $hash(digest, input, input.len() as u32) + } + /// Initialize a new digest state for streaming use. pub fn new() -> $name { $name { state: $malloc() } @@ -1281,9 +1286,9 @@ macro_rules! impl_hash { $finish(self.state.as_ref(), digest); } - /// Return the digest for the given input byte slice, in immediate mode. - pub fn hash(digest: &mut [u8], input: &[u8]) { - $hash(digest, input, input.len() as u32) + /// Reset the digest state. + pub fn reset(&mut self) { + $reset(self.state.as_mut()); } } @@ -1302,9 +1307,43 @@ impl_hash!( 32, Box<[crate::hacl_rs::streaming_types::state_32]>, malloc_256, - wat, + reset_256, update_256, digest_256, copy_256, hash_256 ); +impl_hash!( + HaclRs_Sha2_Sha256_224, + 28, + Box<[crate::hacl_rs::streaming_types::state_32]>, + malloc_224, + reset_224, + update_224, + digest_224, + copy_256, + hash_224 +); + +impl_hash!( + HaclRs_Sha2_Sha512, + 64, + Box<[crate::hacl_rs::streaming_types::state_64]>, + malloc_512, + reset_512, + update_512, + digest_512, + copy_512, + hash_512 +); +impl_hash!( + HaclRs_Sha2_Sha512_384, + 48, + Box<[crate::hacl_rs::streaming_types::state_64]>, + malloc_384, + reset_384, + update_384, + digest_384, + copy_512, + hash_384 +); diff --git a/tests/sha2.rs b/tests/sha2.rs index ac1e44ded..9e66da586 100644 --- a/tests/sha2.rs +++ b/tests/sha2.rs @@ -28,8 +28,10 @@ fn sha2_clone() { let mut hasher224_2 = hasher_224.clone(); hasher_224.update(b"more 224"); hasher224_2.update(b"more 224"); - let digest = hasher_224.finish(); - let digest_2 = hasher224_2.finish(); + let mut digest = [0u8; 28]; + let mut digest_2 = [0u8; 28]; + hasher_224.finish(&mut digest); + hasher224_2.finish(&mut digest_2); assert_eq!(digest, digest_2); assert_eq!(digest, libcrux::digest::sha2_224(b"test 224more 224")); @@ -52,8 +54,10 @@ fn sha2_clone() { let mut hasher384_2 = hasher_384.clone(); hasher_384.update(b"more 384"); hasher384_2.update(b"more 384"); - let digest = hasher_384.finish(); - let digest_2 = hasher384_2.finish(); + let mut digest = [0u8; 48]; + let mut digest_2 = [0u8; 48]; + hasher_384.finish(&mut digest); + hasher384_2.finish(&mut digest_2); assert_eq!(digest, digest_2); assert_eq!(digest, libcrux::digest::sha2_384(b"test 384more 384")); @@ -63,8 +67,10 @@ fn sha2_clone() { let mut hasher512_2 = hasher_512.clone(); hasher_512.update(b"more 512"); hasher512_2.update(b"more 512"); - let digest = hasher_512.finish(); - let digest_2 = hasher512_2.finish(); + let mut digest = [0u8; 64]; + let mut digest_2 = [0u8; 64]; + hasher_512.finish(&mut digest); + hasher512_2.finish(&mut digest_2); assert_eq!(digest, digest_2); assert_eq!(digest, libcrux::digest::sha2_512(b"test 512more 512")); From eec025eca7afb66890435f0f388ac416ffdac8d9 Mon Sep 17 00:00:00 2001 From: "Jan Winkelmann (keks)" Date: Tue, 5 Nov 2024 16:50:23 +0100 Subject: [PATCH 04/18] completely remove hacl-c sha2 --- src/hacl.rs | 1 - src/hacl/sha2.rs | 185 ----------------------------------------------- 2 files changed, 186 deletions(-) delete mode 100644 src/hacl/sha2.rs diff --git a/src/hacl.rs b/src/hacl.rs index 3cd78a864..4dce96182 100644 --- a/src/hacl.rs +++ b/src/hacl.rs @@ -18,7 +18,6 @@ pub(crate) mod drbg; pub(crate) mod ed25519; pub(crate) mod p256; -pub(crate) mod sha2; pub(crate) mod sha3; /// Unified error type. diff --git a/src/hacl/sha2.rs b/src/hacl/sha2.rs deleted file mode 100644 index c824e2783..000000000 --- a/src/hacl/sha2.rs +++ /dev/null @@ -1,185 +0,0 @@ -use libcrux_hacl::{ - Hacl_Hash_SHA2_hash_224, Hacl_Hash_SHA2_hash_256, Hacl_Hash_SHA2_hash_384, - Hacl_Hash_SHA2_hash_512, -}; - -/// SHA2 224 -/// -/// Note the function panics when `payload` is larger than 2^32 bytes. -pub fn sha224(payload: &[u8]) -> [u8; 28] { - let mut digest = [0u8; 28]; - unsafe { - Hacl_Hash_SHA2_hash_224( - digest.as_mut_ptr(), - payload.as_ptr() as _, - payload.len().try_into().unwrap(), - ); - } - digest -} - -/// SHA2 256 -/// -/// Note the function panics when `payload` is larger than 2^32 bytes. -pub fn sha256(payload: &[u8]) -> [u8; 32] { - let mut digest = [0u8; 32]; - unsafe { - Hacl_Hash_SHA2_hash_256( - digest.as_mut_ptr(), - payload.as_ptr() as _, - payload.len().try_into().unwrap(), - ); - } - digest -} - -/// SHA2 384 -/// -/// Note the function panics when `payload` is larger than 2^32 bytes. -pub fn sha384(payload: &[u8]) -> [u8; 48] { - let mut digest = [0u8; 48]; - unsafe { - Hacl_Hash_SHA2_hash_384( - digest.as_mut_ptr(), - payload.as_ptr() as _, - payload.len().try_into().unwrap(), - ); - } - digest -} - -/// SHA2 512 -/// -/// Note the function panics when `payload` is larger than 2^32 bytes. -pub fn sha512(payload: &[u8]) -> [u8; 64] { - let mut digest = [0u8; 64]; - unsafe { - Hacl_Hash_SHA2_hash_512( - digest.as_mut_ptr(), - payload.as_ptr() as _, - payload.len().try_into().unwrap(), - ); - } - digest -} - -pub mod streaming { - use libcrux_hacl::{ - Hacl_Hash_SHA2_copy_256, Hacl_Hash_SHA2_copy_512, Hacl_Hash_SHA2_digest_224, - Hacl_Hash_SHA2_digest_256, Hacl_Hash_SHA2_digest_384, Hacl_Hash_SHA2_digest_512, - Hacl_Hash_SHA2_free_224, Hacl_Hash_SHA2_free_256, Hacl_Hash_SHA2_free_384, - Hacl_Hash_SHA2_free_512, Hacl_Hash_SHA2_malloc_224, Hacl_Hash_SHA2_malloc_256, - Hacl_Hash_SHA2_malloc_384, Hacl_Hash_SHA2_malloc_512, Hacl_Hash_SHA2_reset_224, - Hacl_Hash_SHA2_reset_256, Hacl_Hash_SHA2_reset_384, Hacl_Hash_SHA2_reset_512, - Hacl_Hash_SHA2_state_t_224, Hacl_Hash_SHA2_state_t_384, Hacl_Hash_SHA2_update_224, - Hacl_Hash_SHA2_update_256, Hacl_Hash_SHA2_update_384, Hacl_Hash_SHA2_update_512, - }; - - macro_rules! impl_streaming { - ($name:ident, $digest_size:literal, $state:ty, $malloc:expr, $reset:expr, $update:expr, $finish:expr, $free:expr, $copy:expr) => { - pub struct $name { - state: *mut $state, - } - - impl $name { - /// Initialize a new digest state. - pub fn new() -> $name { - let state = $name { - state: unsafe { $malloc() }, - }; - unsafe { $reset(state.state) }; - state - } - - /// Add the `payload` to the digest. - pub fn update(&mut self, payload: &[u8]) { - // Note that we don't really need mut here because the mutability is - // only in unsafe C code. - // But this way we force the borrow checker to do the right thing. - unsafe { $update(self.state, payload.as_ptr() as _, payload.len() as u32) }; - } - - /// Get the digest. - /// - /// Note that the digest state can be continued to be used, to extend the - /// digest. - pub fn finish(&mut self) -> [u8; $digest_size] { - // Note that we don't really need mut here because the mutability is - // only in unsafe C code. - // But this way we force the borrow checker to do the right thing. - let mut digest = [0u8; $digest_size]; - unsafe { - $finish(self.state, digest.as_mut_ptr()); - } - digest - } - } - - impl Drop for $name { - fn drop(&mut self) { - unsafe { $free(self.state) }; - } - } - - impl Clone for $name { - fn clone(&self) -> Self { - unsafe { - Self { - state: $copy(self.state), - } - } - } - } - - unsafe impl Send for $name {} - }; - } - - impl_streaming!( - Sha224, - 28, - Hacl_Hash_SHA2_state_t_224, - Hacl_Hash_SHA2_malloc_224, - Hacl_Hash_SHA2_reset_224, - Hacl_Hash_SHA2_update_224, - Hacl_Hash_SHA2_digest_224, - Hacl_Hash_SHA2_free_224, - Hacl_Hash_SHA2_copy_256 - ); - - impl_streaming!( - Sha256, - 32, - Hacl_Hash_SHA2_state_t_224, - Hacl_Hash_SHA2_malloc_256, - Hacl_Hash_SHA2_reset_256, - Hacl_Hash_SHA2_update_256, - Hacl_Hash_SHA2_digest_256, - Hacl_Hash_SHA2_free_256, - Hacl_Hash_SHA2_copy_256 - ); - - impl_streaming!( - Sha384, - 48, - Hacl_Hash_SHA2_state_t_384, - Hacl_Hash_SHA2_malloc_384, - Hacl_Hash_SHA2_reset_384, - Hacl_Hash_SHA2_update_384, - Hacl_Hash_SHA2_digest_384, - Hacl_Hash_SHA2_free_384, - Hacl_Hash_SHA2_copy_512 - ); - - impl_streaming!( - Sha512, - 64, - Hacl_Hash_SHA2_state_t_384, - Hacl_Hash_SHA2_malloc_512, - Hacl_Hash_SHA2_reset_512, - Hacl_Hash_SHA2_update_512, - Hacl_Hash_SHA2_digest_512, - Hacl_Hash_SHA2_free_512, - Hacl_Hash_SHA2_copy_512 - ); -} From 5e48543f0e016ea140f6db52aa6cde2d3541a6eb Mon Sep 17 00:00:00 2001 From: "Jan Winkelmann (keks)" Date: Tue, 5 Nov 2024 16:59:09 +0100 Subject: [PATCH 05/18] remove hash state wrapper for sha2 --- src/digest.rs | 69 +++------------------------------------- src/hacl_rs/hash_sha2.rs | 6 ++-- tests/sha2.rs | 4 +-- 3 files changed, 9 insertions(+), 70 deletions(-) diff --git a/src/digest.rs b/src/digest.rs index 4a561b97f..27a3f1291 100644 --- a/src/digest.rs +++ b/src/digest.rs @@ -259,72 +259,11 @@ pub fn sha2_512(payload: &[u8]) -> Sha2_512Digest { // Streaming API - This is the recommended one. // For implementations based on hacl_rs (over hacl-c) -macro_rules! impl_streaming_hacl_rs { - ($name:ident, $state:ty, $result:ty) => { - #[derive(Clone)] - pub struct $name { - state: $state, - } - impl $name { - /// Return the digest for the given input byte slice, in immediate mode. - pub fn hash(digest: &mut [u8], input: &[u8]) { - <$state>::hash(digest, input) - } - - /// Initialize a new digest state. - pub fn new() -> Self { - Self { - state: <$state>::new(), - } - } - - /// Add the `payload` to the digest. - pub fn update(&mut self, payload: &[u8]) { - self.state.update(payload); - } - - /// Get the digest. - /// - /// Note that the digest state can be continued to be used, to extend the - /// digest. - pub fn finish(&self, digest: &mut $result) { - self.state.finish(digest) - } - - /// Reset the digest state. - pub fn reset(&mut self) { - self.state.reset() - } - } - - impl Default for $name { - fn default() -> Self { - Self::new() - } - } - }; -} +pub use crate::hacl_rs::hash_sha2::HaclRs_Sha2_Sha256 as Sha2_256; +pub use crate::hacl_rs::hash_sha2::HaclRs_Sha2_Sha256_224 as Sha2_224; +pub use crate::hacl_rs::hash_sha2::HaclRs_Sha2_Sha512 as Sha2_512; +pub use crate::hacl_rs::hash_sha2::HaclRs_Sha2_Sha512_384 as Sha2_384; -impl_streaming_hacl_rs!( - Sha2_224, - crate::hacl_rs::hash_sha2::HaclRs_Sha2_Sha256_224, - Sha2_224Digest -); -impl_streaming_hacl_rs!( - Sha2_256, - crate::hacl_rs::hash_sha2::HaclRs_Sha2_Sha256, - Sha2_256Digest -); -impl_streaming_hacl_rs!( - Sha2_384, - crate::hacl_rs::hash_sha2::HaclRs_Sha2_Sha512_384, - Sha2_384Digest -); -impl_streaming_hacl_rs!( - Sha2_512, - crate::hacl_rs::hash_sha2::HaclRs_Sha2_Sha512, - Sha2_512Digest -); // SHAKE messages from SHA 3 #[cfg(simd256)] diff --git a/src/hacl_rs/hash_sha2.rs b/src/hacl_rs/hash_sha2.rs index 6307289e5..c6fa143e4 100644 --- a/src/hacl_rs/hash_sha2.rs +++ b/src/hacl_rs/hash_sha2.rs @@ -1264,8 +1264,8 @@ macro_rules! impl_hash { impl $name { /// Return the digest for the given input byte slice, in immediate mode. - pub fn hash(digest: &mut [u8], input: &[u8]) { - $hash(digest, input, input.len() as u32) + pub fn hash(digest: &mut [u8; $digest_size], payload: &[u8]) { + $hash(digest, payload, payload.len() as u32) } /// Initialize a new digest state for streaming use. @@ -1282,7 +1282,7 @@ macro_rules! impl_hash { /// /// Note that the digest state can be continued to be used, to extend the /// digest. - pub fn finish(&self, digest: &mut [u8]) { + pub fn finish(&self, digest: &mut [u8; $digest_size]) { $finish(self.state.as_ref(), digest); } diff --git a/tests/sha2.rs b/tests/sha2.rs index 9e66da586..6209410b6 100644 --- a/tests/sha2.rs +++ b/tests/sha2.rs @@ -9,7 +9,7 @@ fn sha256_kat_streaming() { digest.finish(&mut d); let expected = "8683520e19e5b33db33c8fb90918c0c96fcdfd9a17c695ce0f0ea2eaa0c95956"; - assert_eq!(hex::encode(&d), expected); + assert_eq!(hex::encode(d), expected); } // #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)] @@ -18,7 +18,7 @@ fn sha256_kat_oneshot() { let d = libcrux::digest::sha2_256(b"libcrux sha2 256 tests"); let expected = "8683520e19e5b33db33c8fb90918c0c96fcdfd9a17c695ce0f0ea2eaa0c95956"; - assert_eq!(hex::encode(&d), expected); + assert_eq!(hex::encode(d), expected); } #[test] From cc2f9c2aea6d2cccc494de09c614e4ea0447938b Mon Sep 17 00:00:00 2001 From: "Jan Winkelmann (keks)" Date: Tue, 5 Nov 2024 17:26:49 +0100 Subject: [PATCH 06/18] move hacl-rs to own subcrate --- Cargo.lock | 9 +- Cargo.toml | 2 +- libcrux-hacl-rs/Cargo.toml | 11 + {src/hacl_rs => libcrux-hacl-rs/src}/fstar.rs | 0 .../src}/fstar/uint128.rs | 0 .../src}/fstar/uint16.rs | 0 .../src}/fstar/uint32.rs | 0 .../src}/fstar/uint64.rs | 0 .../src}/fstar/uint8.rs | 0 .../src}/hash_sha1.rs | 72 ++-- .../src}/hash_sha2.rs | 322 +++++++----------- {src/hacl_rs => libcrux-hacl-rs/src}/hkdf.rs | 86 ++--- {src/hacl_rs => libcrux-hacl-rs/src}/hmac.rs | 219 ++++++------ .../mod.rs => libcrux-hacl-rs/src/lib.rs | 8 +- .../src}/lowstar.rs | 0 .../src}/lowstar/endianness.rs | 0 .../src}/lowstar/ignore.rs | 0 .../src}/streaming_types.rs | 24 +- src/digest.rs | 96 +++++- src/lib.rs | 1 - 20 files changed, 407 insertions(+), 443 deletions(-) create mode 100644 libcrux-hacl-rs/Cargo.toml rename {src/hacl_rs => libcrux-hacl-rs/src}/fstar.rs (100%) rename {src/hacl_rs => libcrux-hacl-rs/src}/fstar/uint128.rs (100%) rename {src/hacl_rs => libcrux-hacl-rs/src}/fstar/uint16.rs (100%) rename {src/hacl_rs => libcrux-hacl-rs/src}/fstar/uint32.rs (100%) rename {src/hacl_rs => libcrux-hacl-rs/src}/fstar/uint64.rs (100%) rename {src/hacl_rs => libcrux-hacl-rs/src}/fstar/uint8.rs (100%) rename {src/hacl_rs => libcrux-hacl-rs/src}/hash_sha1.rs (83%) rename {src/hacl_rs => libcrux-hacl-rs/src}/hash_sha2.rs (80%) rename {src/hacl_rs => libcrux-hacl-rs/src}/hkdf.rs (86%) rename {src/hacl_rs => libcrux-hacl-rs/src}/hmac.rs (77%) rename src/hacl_rs/mod.rs => libcrux-hacl-rs/src/lib.rs (53%) rename {src/hacl_rs => libcrux-hacl-rs/src}/lowstar.rs (100%) rename {src/hacl_rs => libcrux-hacl-rs/src}/lowstar/endianness.rs (100%) rename {src/hacl_rs => libcrux-hacl-rs/src}/lowstar/ignore.rs (100%) rename {src/hacl_rs => libcrux-hacl-rs/src}/streaming_types.rs (63%) diff --git a/Cargo.lock b/Cargo.lock index 5a9dc41ce..36051777b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -940,7 +940,7 @@ dependencies = [ "libcrux", "libcrux-ecdh", "libcrux-hacl", - "libcrux-hacl-rs-krml", + "libcrux-hacl-rs", "libcrux-hkdf", "libcrux-hmac", "libcrux-kem", @@ -991,6 +991,13 @@ dependencies = [ "wasm-bindgen-test", ] +[[package]] +name = "libcrux-hacl-rs" +version = "0.1.0" +dependencies = [ + "libcrux-hacl-rs-krml", +] + [[package]] name = "libcrux-hacl-rs-krml" version = "0.1.0" diff --git a/Cargo.toml b/Cargo.toml index ae8f8bfc6..029c434c4 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -63,7 +63,7 @@ bench = false # so libtest doesn't eat the argumen libcrux-platform = { version = "=0.0.2-beta.2", path = "sys/platform" } [dependencies] -krml = { package = "libcrux-hacl-rs-krml", path = "libcrux-hacl-rs-krml" } +libcrux-hacl-rs = { path = "libcrux-hacl-rs" } libcrux-hacl = { version = "=0.0.2-beta.2", path = "sys/hacl" } libcrux-platform = { version = "=0.0.2-beta.2", path = "sys/platform" } libcrux-hkdf = { version = "=0.0.2-beta.2", path = "libcrux-hkdf" } diff --git a/libcrux-hacl-rs/Cargo.toml b/libcrux-hacl-rs/Cargo.toml new file mode 100644 index 000000000..7f0fbc790 --- /dev/null +++ b/libcrux-hacl-rs/Cargo.toml @@ -0,0 +1,11 @@ +[package] +name = "libcrux-hacl-rs" +version = "0.1.0" +edition = "2021" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +krml = { package = "libcrux-hacl-rs-krml", path = "../libcrux-hacl-rs-krml" } + +[lib] diff --git a/src/hacl_rs/fstar.rs b/libcrux-hacl-rs/src/fstar.rs similarity index 100% rename from src/hacl_rs/fstar.rs rename to libcrux-hacl-rs/src/fstar.rs diff --git a/src/hacl_rs/fstar/uint128.rs b/libcrux-hacl-rs/src/fstar/uint128.rs similarity index 100% rename from src/hacl_rs/fstar/uint128.rs rename to libcrux-hacl-rs/src/fstar/uint128.rs diff --git a/src/hacl_rs/fstar/uint16.rs b/libcrux-hacl-rs/src/fstar/uint16.rs similarity index 100% rename from src/hacl_rs/fstar/uint16.rs rename to libcrux-hacl-rs/src/fstar/uint16.rs diff --git a/src/hacl_rs/fstar/uint32.rs b/libcrux-hacl-rs/src/fstar/uint32.rs similarity index 100% rename from src/hacl_rs/fstar/uint32.rs rename to libcrux-hacl-rs/src/fstar/uint32.rs diff --git a/src/hacl_rs/fstar/uint64.rs b/libcrux-hacl-rs/src/fstar/uint64.rs similarity index 100% rename from src/hacl_rs/fstar/uint64.rs rename to libcrux-hacl-rs/src/fstar/uint64.rs diff --git a/src/hacl_rs/fstar/uint8.rs b/libcrux-hacl-rs/src/fstar/uint8.rs similarity index 100% rename from src/hacl_rs/fstar/uint8.rs rename to libcrux-hacl-rs/src/fstar/uint8.rs diff --git a/src/hacl_rs/hash_sha1.rs b/libcrux-hacl-rs/src/hash_sha1.rs similarity index 83% rename from src/hacl_rs/hash_sha1.rs rename to libcrux-hacl-rs/src/hash_sha1.rs index 2c6d4ffdf..8df310ae5 100644 --- a/src/hacl_rs/hash_sha1.rs +++ b/libcrux-hacl-rs/src/hash_sha1.rs @@ -20,7 +20,7 @@ pub(crate) fn init(s: &mut [u32]) { "i", 0u32, 1u32, - s[i as usize] = (&crate::hacl_rs::hash_sha1::_h0)[i as usize] + s[i as usize] = (&crate::hash_sha1::_h0)[i as usize] ) } @@ -130,7 +130,7 @@ pub(crate) fn update_multi(s: &mut [u32], blocks: &[u8], n_blocks: u32) { for i in 0u32..n_blocks { let sz: u32 = 64u32; let block: (&[u8], &[u8]) = blocks.split_at(sz.wrapping_mul(i) as usize); - crate::hacl_rs::hash_sha1::update(s, block.1) + crate::hash_sha1::update(s, block.1) } } @@ -140,7 +140,7 @@ pub(crate) fn update_last(s: &mut [u32], prev_len: u64, input: &[u8], input_len: let blocks: (&[u8], &[u8]) = input.split_at(0usize); let rest_len: u32 = input_len.wrapping_sub(blocks_len); let rest: (&[u8], &[u8]) = blocks.1.split_at(blocks_len as usize); - crate::hacl_rs::hash_sha1::update_multi(s, rest.0, blocks_n); + crate::hash_sha1::update_multi(s, rest.0, blocks_n); let total_input_len: u64 = prev_len.wrapping_add(input_len as u64); let pad_len: u32 = 1u32 .wrapping_add( @@ -155,8 +155,8 @@ pub(crate) fn update_last(s: &mut [u32], prev_len: u64, input: &[u8], input_len: let tmp_rest: (&mut [u8], &mut [u8]) = tmp.1.split_at_mut(0usize); let tmp_pad: (&mut [u8], &mut [u8]) = tmp_rest.1.split_at_mut(rest_len as usize); (tmp_pad.0[0usize..rest_len as usize]).copy_from_slice(&rest.1[0usize..rest_len as usize]); - crate::hacl_rs::hash_sha1::pad(total_input_len, tmp_pad.1); - crate::hacl_rs::hash_sha1::update_multi(s, tmp.1, tmp_len.wrapping_div(64u32)) + crate::hash_sha1::pad(total_input_len, tmp_pad.1); + crate::hash_sha1::update_multi(s, tmp.1, tmp_len.wrapping_div(64u32)) } pub(crate) fn hash_oneshot(output: &mut [u8], input: &[u8], input_len: u32) { @@ -182,29 +182,29 @@ pub(crate) fn hash_oneshot(output: &mut [u8], input: &[u8], input_len: u32) { let blocks0: &[u8] = rest.0; let rest_len0: u32 = rest_len; let rest0: &[u8] = rest.1; - crate::hacl_rs::hash_sha1::update_multi(&mut s, blocks0, blocks_n0); - crate::hacl_rs::hash_sha1::update_last(&mut s, blocks_len0 as u64, rest0, rest_len0); - crate::hacl_rs::hash_sha1::finish(&s, output) + crate::hash_sha1::update_multi(&mut s, blocks0, blocks_n0); + crate::hash_sha1::update_last(&mut s, blocks_len0 as u64, rest0, rest_len0); + crate::hash_sha1::finish(&s, output) } -pub type state_t = crate::hacl_rs::streaming_types::state_32; +pub type state_t = crate::streaming_types::state_32; -pub fn malloc() -> Box<[crate::hacl_rs::streaming_types::state_32]> { +pub fn malloc() -> Box<[crate::streaming_types::state_32]> { let buf: Box<[u8]> = vec![0u8; 64usize].into_boxed_slice(); let mut block_state: Box<[u32]> = vec![0u32; 5usize].into_boxed_slice(); - crate::hacl_rs::hash_sha1::init(&mut block_state); - let s: crate::hacl_rs::streaming_types::state_32 = crate::hacl_rs::streaming_types::state_32 { + crate::hash_sha1::init(&mut block_state); + let s: crate::streaming_types::state_32 = crate::streaming_types::state_32 { block_state, buf, total_len: 0u32 as u64, }; - let p: Box<[crate::hacl_rs::streaming_types::state_32]> = vec![s].into_boxed_slice(); + let p: Box<[crate::streaming_types::state_32]> = vec![s].into_boxed_slice(); p } -pub fn reset(state: &mut [crate::hacl_rs::streaming_types::state_32]) { +pub fn reset(state: &mut [crate::streaming_types::state_32]) { let block_state: &mut [u32] = &mut (state[0usize]).block_state; - crate::hacl_rs::hash_sha1::init(block_state); + crate::hash_sha1::init(block_state); let total_len: u64 = 0u32 as u64; (state[0usize]).total_len = total_len } @@ -213,14 +213,14 @@ pub fn reset(state: &mut [crate::hacl_rs::streaming_types::state_32]) { 0 = success, 1 = max length exceeded */ pub fn update0( - state: &mut [crate::hacl_rs::streaming_types::state_32], + state: &mut [crate::streaming_types::state_32], chunk: &[u8], chunk_len: u32, -) -> crate::hacl_rs::streaming_types::error_code { +) -> crate::streaming_types::error_code { let block_state: &mut [u32] = &mut (state[0usize]).block_state; let total_len: u64 = (state[0usize]).total_len; if chunk_len as u64 > 2305843009213693951u64.wrapping_sub(total_len) { - crate::hacl_rs::streaming_types::error_code::MaximumLengthExceeded + crate::streaming_types::error_code::MaximumLengthExceeded } else { let sz: u32 = if total_len.wrapping_rem(64u32 as u64) == 0u64 && total_len > 0u64 { 64u32 @@ -249,7 +249,7 @@ pub fn update0( total_len1.wrapping_rem(64u32 as u64) as u32 }; if sz1 != 0u32 { - crate::hacl_rs::hash_sha1::update_multi(block_state, buf, 1u32) + crate::hash_sha1::update_multi(block_state, buf, 1u32) }; let ite: u32 = if (chunk_len as u64).wrapping_rem(64u32 as u64) == 0u64 && chunk_len as u64 > 0u64 @@ -263,11 +263,7 @@ pub fn update0( let data2_len: u32 = chunk_len.wrapping_sub(data1_len); let data1: (&[u8], &[u8]) = chunk.split_at(0usize); let data2: (&[u8], &[u8]) = data1.1.split_at(data1_len as usize); - crate::hacl_rs::hash_sha1::update_multi( - block_state, - data2.0, - data1_len.wrapping_div(64u32), - ); + crate::hash_sha1::update_multi(block_state, data2.0, data1_len.wrapping_div(64u32)); let dst: (&mut [u8], &mut [u8]) = buf.split_at_mut(0usize); (dst.1[0usize..data2_len as usize]) .copy_from_slice(&data2.1[0usize..data2_len as usize]); @@ -296,7 +292,7 @@ pub fn update0( total_len10.wrapping_rem(64u32 as u64) as u32 }; if sz10 != 0u32 { - crate::hacl_rs::hash_sha1::update_multi(block_state, buf0, 1u32) + crate::hash_sha1::update_multi(block_state, buf0, 1u32) }; let ite: u32 = if (chunk_len.wrapping_sub(diff) as u64).wrapping_rem(64u32 as u64) == 0u64 @@ -314,22 +310,18 @@ pub fn update0( let data2_len: u32 = chunk_len.wrapping_sub(diff).wrapping_sub(data1_len); let data1: (&[u8], &[u8]) = chunk2.1.split_at(0usize); let data2: (&[u8], &[u8]) = data1.1.split_at(data1_len as usize); - crate::hacl_rs::hash_sha1::update_multi( - block_state, - data2.0, - data1_len.wrapping_div(64u32), - ); + crate::hash_sha1::update_multi(block_state, data2.0, data1_len.wrapping_div(64u32)); let dst: (&mut [u8], &mut [u8]) = buf0.split_at_mut(0usize); (dst.1[0usize..data2_len as usize]) .copy_from_slice(&data2.1[0usize..data2_len as usize]); (state[0usize]).total_len = total_len10.wrapping_add(chunk_len.wrapping_sub(diff) as u64) }; - crate::hacl_rs::streaming_types::error_code::Success + crate::streaming_types::error_code::Success } } -pub fn digest(state: &[crate::hacl_rs::streaming_types::state_32], output: &mut [u8]) { +pub fn digest(state: &[crate::streaming_types::state_32], output: &mut [u8]) { let block_state: &[u32] = &(state[0usize]).block_state; let buf_: &[u8] = &(state[0usize]).buf; let total_len: u64 = (state[0usize]).total_len; @@ -348,15 +340,13 @@ pub fn digest(state: &[crate::hacl_rs::streaming_types::state_32], output: &mut r.wrapping_rem(64u32) }; let buf_last: (&[u8], &[u8]) = buf_multi.1.split_at(r.wrapping_sub(ite) as usize); - crate::hacl_rs::hash_sha1::update_multi(&mut tmp_block_state, buf_last.0, 0u32); + crate::hash_sha1::update_multi(&mut tmp_block_state, buf_last.0, 0u32); let prev_len_last: u64 = total_len.wrapping_sub(r as u64); - crate::hacl_rs::hash_sha1::update_last(&mut tmp_block_state, prev_len_last, buf_last.1, r); - crate::hacl_rs::hash_sha1::finish(&tmp_block_state, output) + crate::hash_sha1::update_last(&mut tmp_block_state, prev_len_last, buf_last.1, r); + crate::hash_sha1::finish(&tmp_block_state, output) } -pub fn copy( - state: &[crate::hacl_rs::streaming_types::state_32], -) -> Box<[crate::hacl_rs::streaming_types::state_32]> { +pub fn copy(state: &[crate::streaming_types::state_32]) -> Box<[crate::streaming_types::state_32]> { let block_state0: &[u32] = &(state[0usize]).block_state; let buf0: &[u8] = &(state[0usize]).buf; let total_len0: u64 = (state[0usize]).total_len; @@ -364,15 +354,15 @@ pub fn copy( ((&mut buf)[0usize..64usize]).copy_from_slice(&buf0[0usize..64usize]); let mut block_state: Box<[u32]> = vec![0u32; 5usize].into_boxed_slice(); ((&mut block_state)[0usize..5usize]).copy_from_slice(&block_state0[0usize..5usize]); - let s: crate::hacl_rs::streaming_types::state_32 = crate::hacl_rs::streaming_types::state_32 { + let s: crate::streaming_types::state_32 = crate::streaming_types::state_32 { block_state, buf, total_len: total_len0, }; - let p: Box<[crate::hacl_rs::streaming_types::state_32]> = vec![s].into_boxed_slice(); + let p: Box<[crate::streaming_types::state_32]> = vec![s].into_boxed_slice(); p } pub fn hash(output: &mut [u8], input: &[u8], input_len: u32) { - crate::hacl_rs::hash_sha1::hash_oneshot(output, input, input_len) + crate::hash_sha1::hash_oneshot(output, input, input_len) } diff --git a/src/hacl_rs/hash_sha2.rs b/libcrux-hacl-rs/src/hash_sha2.rs similarity index 80% rename from src/hacl_rs/hash_sha2.rs rename to libcrux-hacl-rs/src/hash_sha2.rs index c6fa143e4..56aa9eef4 100644 --- a/src/hacl_rs/hash_sha2.rs +++ b/libcrux-hacl-rs/src/hash_sha2.rs @@ -203,7 +203,7 @@ pub(crate) const k384_512: [u64; 80] = [ pub(crate) fn sha256_init(hash: &mut [u32]) { krml::unroll_for!(8, "i", 0u32, 1u32, { - let x: u32 = (&crate::hacl_rs::hash_sha2::h256)[i as usize]; + let x: u32 = (&crate::hash_sha2::h256)[i as usize]; let os: (&mut [u32], &mut [u32]) = hash.split_at_mut(0usize); os.1[i as usize] = x }) @@ -249,8 +249,8 @@ fn sha256_update(b: &[u8], hash: &mut [u32]) { (&mut ws)[15usize] = u14; krml::unroll_for!(4, "i", 0u32, 1u32, { krml::unroll_for!(16, "i0", 0u32, 1u32, { - let k_t: u32 = (&crate::hacl_rs::hash_sha2::k224_256) - [16u32.wrapping_mul(i).wrapping_add(i0) as usize]; + let k_t: u32 = + (&crate::hash_sha2::k224_256)[16u32.wrapping_mul(i).wrapping_add(i0) as usize]; let ws_t: u32 = (&ws)[i0 as usize]; let a0: u32 = hash[0usize]; let b0: u32 = hash[1usize]; @@ -318,7 +318,7 @@ pub(crate) fn sha256_update_nblocks(len: u32, b: &[u8], st: &mut [u32]) { for i in 0u32..blocks { let b0: &[u8] = b; let mb: (&[u8], &[u8]) = b0.split_at(i.wrapping_mul(64u32) as usize); - crate::hacl_rs::hash_sha2::sha256_update(mb.1, st) + crate::hash_sha2::sha256_update(mb.1, st) } } @@ -346,9 +346,9 @@ pub(crate) fn sha256_update_last(totlen: u64, len: u32, b: &[u8], hash: &mut [u3 let lb1: &[u8] = l1; let last00: &[u8] = lb0; let last10: &[u8] = lb1; - crate::hacl_rs::hash_sha2::sha256_update(last00, hash); + crate::hash_sha2::sha256_update(last00, hash); if blocks > 1u32 { - crate::hacl_rs::hash_sha2::sha256_update(last10, hash) + crate::hash_sha2::sha256_update(last10, hash) } } @@ -370,7 +370,7 @@ pub(crate) fn sha256_finish(st: &[u32], h: &mut [u8]) { #[inline] fn sha224_init(hash: &mut [u32]) { krml::unroll_for!(8, "i", 0u32, 1u32, { - let x: u32 = (&crate::hacl_rs::hash_sha2::h224)[i as usize]; + let x: u32 = (&crate::hash_sha2::h224)[i as usize]; let os: (&mut [u32], &mut [u32]) = hash.split_at_mut(0usize); os.1[i as usize] = x }) @@ -378,11 +378,11 @@ fn sha224_init(hash: &mut [u32]) { #[inline] fn sha224_update_nblocks(len: u32, b: &[u8], st: &mut [u32]) { - crate::hacl_rs::hash_sha2::sha256_update_nblocks(len, b, st) + crate::hash_sha2::sha256_update_nblocks(len, b, st) } fn sha224_update_last(totlen: u64, len: u32, b: &[u8], st: &mut [u32]) { - crate::hacl_rs::hash_sha2::sha256_update_last(totlen, len, b, st) + crate::hash_sha2::sha256_update_last(totlen, len, b, st) } #[inline] @@ -403,7 +403,7 @@ fn sha224_finish(st: &[u32], h: &mut [u8]) { pub(crate) fn sha512_init(hash: &mut [u64]) { krml::unroll_for!(8, "i", 0u32, 1u32, { - let x: u64 = (&crate::hacl_rs::hash_sha2::h512)[i as usize]; + let x: u64 = (&crate::hash_sha2::h512)[i as usize]; let os: (&mut [u64], &mut [u64]) = hash.split_at_mut(0usize); os.1[i as usize] = x }) @@ -449,8 +449,8 @@ fn sha512_update(b: &[u8], hash: &mut [u64]) { (&mut ws)[15usize] = u14; krml::unroll_for!(5, "i", 0u32, 1u32, { krml::unroll_for!(16, "i0", 0u32, 1u32, { - let k_t: u64 = (&crate::hacl_rs::hash_sha2::k384_512) - [16u32.wrapping_mul(i).wrapping_add(i0) as usize]; + let k_t: u64 = + (&crate::hash_sha2::k384_512)[16u32.wrapping_mul(i).wrapping_add(i0) as usize]; let ws_t: u64 = (&ws)[i0 as usize]; let a0: u64 = hash[0usize]; let b0: u64 = hash[1usize]; @@ -517,7 +517,7 @@ pub(crate) fn sha512_update_nblocks(len: u32, b: &[u8], st: &mut [u64]) { for i in 0u32..blocks { let b0: &[u8] = b; let mb: (&[u8], &[u8]) = b0.split_at(i.wrapping_mul(128u32) as usize); - crate::hacl_rs::hash_sha2::sha512_update(mb.1, st) + crate::hash_sha2::sha512_update(mb.1, st) } } @@ -550,9 +550,9 @@ pub(crate) fn sha512_update_last( let lb1: &[u8] = l1; let last00: &[u8] = lb0; let last10: &[u8] = lb1; - crate::hacl_rs::hash_sha2::sha512_update(last00, hash); + crate::hash_sha2::sha512_update(last00, hash); if blocks > 1u32 { - crate::hacl_rs::hash_sha2::sha512_update(last10, hash) + crate::hash_sha2::sha512_update(last10, hash) } } @@ -573,14 +573,14 @@ pub(crate) fn sha512_finish(st: &[u64], h: &mut [u8]) { pub(crate) fn sha384_init(hash: &mut [u64]) { krml::unroll_for!(8, "i", 0u32, 1u32, { - let x: u64 = (&crate::hacl_rs::hash_sha2::h384)[i as usize]; + let x: u64 = (&crate::hash_sha2::h384)[i as usize]; let os: (&mut [u64], &mut [u64]) = hash.split_at_mut(0usize); os.1[i as usize] = x }) } pub(crate) fn sha384_update_nblocks(len: u32, b: &[u8], st: &mut [u64]) { - crate::hacl_rs::hash_sha2::sha512_update_nblocks(len, b, st) + crate::hash_sha2::sha512_update_nblocks(len, b, st) } pub(crate) fn sha384_update_last( @@ -589,7 +589,7 @@ pub(crate) fn sha384_update_last( b: &[u8], st: &mut [u64], ) { - crate::hacl_rs::hash_sha2::sha512_update_last(totlen, len, b, st) + crate::hash_sha2::sha512_update_last(totlen, len, b, st) } pub(crate) fn sha384_finish(st: &[u64], h: &mut [u8]) { @@ -607,28 +607,28 @@ pub(crate) fn sha384_finish(st: &[u64], h: &mut [u8]) { (h[0usize..48usize]).copy_from_slice(&(&(&hbuf)[0usize..])[0usize..48usize]) } -pub type state_t_224 = crate::hacl_rs::streaming_types::state_32; +pub type state_t_224 = crate::streaming_types::state_32; -pub type state_t_256 = crate::hacl_rs::streaming_types::state_32; +pub type state_t_256 = crate::streaming_types::state_32; -pub type state_t_384 = crate::hacl_rs::streaming_types::state_64; +pub type state_t_384 = crate::streaming_types::state_64; -pub type state_t_512 = crate::hacl_rs::streaming_types::state_64; +pub type state_t_512 = crate::streaming_types::state_64; /** Allocate initial state for the SHA2_256 hash. The state is to be freed by calling `free_256`. */ -pub fn malloc_256() -> Box<[crate::hacl_rs::streaming_types::state_32]> { +pub fn malloc_256() -> Box<[crate::streaming_types::state_32]> { let buf: Box<[u8]> = vec![0u8; 64usize].into_boxed_slice(); let mut block_state: Box<[u32]> = vec![0u32; 8usize].into_boxed_slice(); - crate::hacl_rs::hash_sha2::sha256_init(&mut block_state); - let s: crate::hacl_rs::streaming_types::state_32 = crate::hacl_rs::streaming_types::state_32 { + crate::hash_sha2::sha256_init(&mut block_state); + let s: crate::streaming_types::state_32 = crate::streaming_types::state_32 { block_state, buf, total_len: 0u32 as u64, }; - let p: Box<[crate::hacl_rs::streaming_types::state_32]> = vec![s].into_boxed_slice(); + let p: Box<[crate::streaming_types::state_32]> = vec![s].into_boxed_slice(); p } @@ -639,8 +639,8 @@ useful, for instance, if your control-flow diverges and you need to feed more (different) data into the hash in each branch. */ pub fn copy_256( - state: &[crate::hacl_rs::streaming_types::state_32], -) -> Box<[crate::hacl_rs::streaming_types::state_32]> { + state: &[crate::streaming_types::state_32], +) -> Box<[crate::streaming_types::state_32]> { let block_state0: &[u32] = &(state[0usize]).block_state; let buf0: &[u8] = &(state[0usize]).buf; let total_len0: u64 = (state[0usize]).total_len; @@ -648,35 +648,35 @@ pub fn copy_256( ((&mut buf)[0usize..64usize]).copy_from_slice(&buf0[0usize..64usize]); let mut block_state: Box<[u32]> = vec![0u32; 8usize].into_boxed_slice(); ((&mut block_state)[0usize..8usize]).copy_from_slice(&block_state0[0usize..8usize]); - let s: crate::hacl_rs::streaming_types::state_32 = crate::hacl_rs::streaming_types::state_32 { + let s: crate::streaming_types::state_32 = crate::streaming_types::state_32 { block_state, buf, total_len: total_len0, }; - let p: Box<[crate::hacl_rs::streaming_types::state_32]> = vec![s].into_boxed_slice(); + let p: Box<[crate::streaming_types::state_32]> = vec![s].into_boxed_slice(); p } /** Reset an existing state to the initial hash state with empty data. */ -pub fn reset_256(state: &mut [crate::hacl_rs::streaming_types::state_32]) { +pub fn reset_256(state: &mut [crate::streaming_types::state_32]) { let block_state: &mut [u32] = &mut (state[0usize]).block_state; - crate::hacl_rs::hash_sha2::sha256_init(block_state); + crate::hash_sha2::sha256_init(block_state); let total_len: u64 = 0u32 as u64; (state[0usize]).total_len = total_len } #[inline] fn update_224_256( - state: &mut [crate::hacl_rs::streaming_types::state_32], + state: &mut [crate::streaming_types::state_32], chunk: &[u8], chunk_len: u32, -) -> crate::hacl_rs::streaming_types::error_code { +) -> crate::streaming_types::error_code { let block_state: &mut [u32] = &mut (state[0usize]).block_state; let total_len: u64 = (state[0usize]).total_len; if chunk_len as u64 > 2305843009213693951u64.wrapping_sub(total_len) { - crate::hacl_rs::streaming_types::error_code::MaximumLengthExceeded + crate::streaming_types::error_code::MaximumLengthExceeded } else { let sz: u32 = if total_len.wrapping_rem(64u32 as u64) == 0u64 && total_len > 0u64 { 64u32 @@ -705,7 +705,7 @@ fn update_224_256( total_len1.wrapping_rem(64u32 as u64) as u32 }; if sz1 != 0u32 { - crate::hacl_rs::hash_sha2::sha256_update_nblocks(64u32, buf, block_state) + crate::hash_sha2::sha256_update_nblocks(64u32, buf, block_state) }; let ite: u32 = if (chunk_len as u64).wrapping_rem(64u32 as u64) == 0u64 && chunk_len as u64 > 0u64 @@ -719,7 +719,7 @@ fn update_224_256( let data2_len: u32 = chunk_len.wrapping_sub(data1_len); let data1: (&[u8], &[u8]) = chunk.split_at(0usize); let data2: (&[u8], &[u8]) = data1.1.split_at(data1_len as usize); - crate::hacl_rs::hash_sha2::sha256_update_nblocks( + crate::hash_sha2::sha256_update_nblocks( data1_len.wrapping_div(64u32).wrapping_mul(64u32), data2.0, block_state, @@ -752,7 +752,7 @@ fn update_224_256( total_len10.wrapping_rem(64u32 as u64) as u32 }; if sz10 != 0u32 { - crate::hacl_rs::hash_sha2::sha256_update_nblocks(64u32, buf0, block_state) + crate::hash_sha2::sha256_update_nblocks(64u32, buf0, block_state) }; let ite: u32 = if (chunk_len.wrapping_sub(diff) as u64).wrapping_rem(64u32 as u64) == 0u64 @@ -770,7 +770,7 @@ fn update_224_256( let data2_len: u32 = chunk_len.wrapping_sub(diff).wrapping_sub(data1_len); let data1: (&[u8], &[u8]) = chunk2.1.split_at(0usize); let data2: (&[u8], &[u8]) = data1.1.split_at(data1_len as usize); - crate::hacl_rs::hash_sha2::sha256_update_nblocks( + crate::hash_sha2::sha256_update_nblocks( data1_len.wrapping_div(64u32).wrapping_mul(64u32), data2.0, block_state, @@ -781,7 +781,7 @@ fn update_224_256( (state[0usize]).total_len = total_len10.wrapping_add(chunk_len.wrapping_sub(diff) as u64) }; - crate::hacl_rs::streaming_types::error_code::Success + crate::streaming_types::error_code::Success } } @@ -793,11 +793,11 @@ success, or 1 if the combined length of all of the data passed to `update_256` This function is identical to the update function for SHA2_224. */ pub fn update_256( - state: &mut [crate::hacl_rs::streaming_types::state_32], + state: &mut [crate::streaming_types::state_32], input: &[u8], input_len: u32, -) -> crate::hacl_rs::streaming_types::error_code { - crate::hacl_rs::hash_sha2::update_224_256(state, input, input_len) +) -> crate::streaming_types::error_code { + crate::hash_sha2::update_224_256(state, input, input_len) } /** @@ -806,7 +806,7 @@ valid after a call to `digest_256`, meaning the user may feed more data into the hash via `update_256`. (The digest_256 function operates on an internal copy of the state and therefore does not invalidate the client-held state `p`.) */ -pub fn digest_256(state: &[crate::hacl_rs::streaming_types::state_32], output: &mut [u8]) { +pub fn digest_256(state: &[crate::streaming_types::state_32], output: &mut [u8]) { let block_state: &[u32] = &(state[0usize]).block_state; let buf_: &[u8] = &(state[0usize]).buf; let total_len: u64 = (state[0usize]).total_len; @@ -825,15 +825,15 @@ pub fn digest_256(state: &[crate::hacl_rs::streaming_types::state_32], output: & r.wrapping_rem(64u32) }; let buf_last: (&[u8], &[u8]) = buf_multi.1.split_at(r.wrapping_sub(ite) as usize); - crate::hacl_rs::hash_sha2::sha256_update_nblocks(0u32, buf_last.0, &mut tmp_block_state); + crate::hash_sha2::sha256_update_nblocks(0u32, buf_last.0, &mut tmp_block_state); let prev_len_last: u64 = total_len.wrapping_sub(r as u64); - crate::hacl_rs::hash_sha2::sha256_update_last( + crate::hash_sha2::sha256_update_last( prev_len_last.wrapping_add(r as u64), r, buf_last.1, &mut tmp_block_state, ); - crate::hacl_rs::hash_sha2::sha256_finish(&tmp_block_state, output) + crate::hash_sha2::sha256_finish(&tmp_block_state, output) } /** @@ -843,43 +843,43 @@ pub fn hash_256(output: &mut [u8], input: &[u8], input_len: u32) { let ib: &[u8] = input; let rb: &mut [u8] = output; let mut st: [u32; 8] = [0u32; 8usize]; - crate::hacl_rs::hash_sha2::sha256_init(&mut st); + crate::hash_sha2::sha256_init(&mut st); let rem: u32 = input_len.wrapping_rem(64u32); let lenĀ·: u64 = input_len as u64; - crate::hacl_rs::hash_sha2::sha256_update_nblocks(input_len, ib, &mut st); + crate::hash_sha2::sha256_update_nblocks(input_len, ib, &mut st); let rem1: u32 = input_len.wrapping_rem(64u32); let b0: &[u8] = ib; let lb: (&[u8], &[u8]) = b0.split_at(input_len.wrapping_sub(rem1) as usize); - crate::hacl_rs::hash_sha2::sha256_update_last(lenĀ·, rem, lb.1, &mut st); - crate::hacl_rs::hash_sha2::sha256_finish(&st, rb) + crate::hash_sha2::sha256_update_last(lenĀ·, rem, lb.1, &mut st); + crate::hash_sha2::sha256_finish(&st, rb) } -pub fn malloc_224() -> Box<[crate::hacl_rs::streaming_types::state_32]> { +pub fn malloc_224() -> Box<[crate::streaming_types::state_32]> { let buf: Box<[u8]> = vec![0u8; 64usize].into_boxed_slice(); let mut block_state: Box<[u32]> = vec![0u32; 8usize].into_boxed_slice(); - crate::hacl_rs::hash_sha2::sha224_init(&mut block_state); - let s: crate::hacl_rs::streaming_types::state_32 = crate::hacl_rs::streaming_types::state_32 { + crate::hash_sha2::sha224_init(&mut block_state); + let s: crate::streaming_types::state_32 = crate::streaming_types::state_32 { block_state, buf, total_len: 0u32 as u64, }; - let p: Box<[crate::hacl_rs::streaming_types::state_32]> = vec![s].into_boxed_slice(); + let p: Box<[crate::streaming_types::state_32]> = vec![s].into_boxed_slice(); p } -pub fn reset_224(state: &mut [crate::hacl_rs::streaming_types::state_32]) { +pub fn reset_224(state: &mut [crate::streaming_types::state_32]) { let block_state: &mut [u32] = &mut (state[0usize]).block_state; - crate::hacl_rs::hash_sha2::sha224_init(block_state); + crate::hash_sha2::sha224_init(block_state); let total_len: u64 = 0u32 as u64; (state[0usize]).total_len = total_len } pub fn update_224( - state: &mut [crate::hacl_rs::streaming_types::state_32], + state: &mut [crate::streaming_types::state_32], input: &[u8], input_len: u32, -) -> crate::hacl_rs::streaming_types::error_code { - crate::hacl_rs::hash_sha2::update_224_256(state, input, input_len) +) -> crate::streaming_types::error_code { + crate::hash_sha2::update_224_256(state, input, input_len) } /** @@ -887,7 +887,7 @@ Write the resulting hash into `output`, an array of 28 bytes. The state remains valid after a call to `digest_224`, meaning the user may feed more data into the hash via `update_224`. */ -pub fn digest_224(state: &[crate::hacl_rs::streaming_types::state_32], output: &mut [u8]) { +pub fn digest_224(state: &[crate::streaming_types::state_32], output: &mut [u8]) { let block_state: &[u32] = &(state[0usize]).block_state; let buf_: &[u8] = &(state[0usize]).buf; let total_len: u64 = (state[0usize]).total_len; @@ -906,15 +906,15 @@ pub fn digest_224(state: &[crate::hacl_rs::streaming_types::state_32], output: & r.wrapping_rem(64u32) }; let buf_last: (&[u8], &[u8]) = buf_multi.1.split_at(r.wrapping_sub(ite) as usize); - crate::hacl_rs::hash_sha2::sha224_update_nblocks(0u32, buf_last.0, &mut tmp_block_state); + crate::hash_sha2::sha224_update_nblocks(0u32, buf_last.0, &mut tmp_block_state); let prev_len_last: u64 = total_len.wrapping_sub(r as u64); - crate::hacl_rs::hash_sha2::sha224_update_last( + crate::hash_sha2::sha224_update_last( prev_len_last.wrapping_add(r as u64), r, buf_last.1, &mut tmp_block_state, ); - crate::hacl_rs::hash_sha2::sha224_finish(&tmp_block_state, output) + crate::hash_sha2::sha224_finish(&tmp_block_state, output) } /** @@ -924,27 +924,27 @@ pub fn hash_224(output: &mut [u8], input: &[u8], input_len: u32) { let ib: &[u8] = input; let rb: &mut [u8] = output; let mut st: [u32; 8] = [0u32; 8usize]; - crate::hacl_rs::hash_sha2::sha224_init(&mut st); + crate::hash_sha2::sha224_init(&mut st); let rem: u32 = input_len.wrapping_rem(64u32); let lenĀ·: u64 = input_len as u64; - crate::hacl_rs::hash_sha2::sha224_update_nblocks(input_len, ib, &mut st); + crate::hash_sha2::sha224_update_nblocks(input_len, ib, &mut st); let rem1: u32 = input_len.wrapping_rem(64u32); let b0: &[u8] = ib; let lb: (&[u8], &[u8]) = b0.split_at(input_len.wrapping_sub(rem1) as usize); - crate::hacl_rs::hash_sha2::sha224_update_last(lenĀ·, rem, lb.1, &mut st); - crate::hacl_rs::hash_sha2::sha224_finish(&st, rb) + crate::hash_sha2::sha224_update_last(lenĀ·, rem, lb.1, &mut st); + crate::hash_sha2::sha224_finish(&st, rb) } -pub fn malloc_512() -> Box<[crate::hacl_rs::streaming_types::state_64]> { +pub fn malloc_512() -> Box<[crate::streaming_types::state_64]> { let buf: Box<[u8]> = vec![0u8; 128usize].into_boxed_slice(); let mut block_state: Box<[u64]> = vec![0u64; 8usize].into_boxed_slice(); - crate::hacl_rs::hash_sha2::sha512_init(&mut block_state); - let s: crate::hacl_rs::streaming_types::state_64 = crate::hacl_rs::streaming_types::state_64 { + crate::hash_sha2::sha512_init(&mut block_state); + let s: crate::streaming_types::state_64 = crate::streaming_types::state_64 { block_state, buf, total_len: 0u32 as u64, }; - let p: Box<[crate::hacl_rs::streaming_types::state_64]> = vec![s].into_boxed_slice(); + let p: Box<[crate::streaming_types::state_64]> = vec![s].into_boxed_slice(); p } @@ -955,8 +955,8 @@ useful, for instance, if your control-flow diverges and you need to feed more (different) data into the hash in each branch. */ pub fn copy_512( - state: &[crate::hacl_rs::streaming_types::state_64], -) -> Box<[crate::hacl_rs::streaming_types::state_64]> { + state: &[crate::streaming_types::state_64], +) -> Box<[crate::streaming_types::state_64]> { let block_state0: &[u64] = &(state[0usize]).block_state; let buf0: &[u8] = &(state[0usize]).buf; let total_len0: u64 = (state[0usize]).total_len; @@ -964,32 +964,32 @@ pub fn copy_512( ((&mut buf)[0usize..128usize]).copy_from_slice(&buf0[0usize..128usize]); let mut block_state: Box<[u64]> = vec![0u64; 8usize].into_boxed_slice(); ((&mut block_state)[0usize..8usize]).copy_from_slice(&block_state0[0usize..8usize]); - let s: crate::hacl_rs::streaming_types::state_64 = crate::hacl_rs::streaming_types::state_64 { + let s: crate::streaming_types::state_64 = crate::streaming_types::state_64 { block_state, buf, total_len: total_len0, }; - let p: Box<[crate::hacl_rs::streaming_types::state_64]> = vec![s].into_boxed_slice(); + let p: Box<[crate::streaming_types::state_64]> = vec![s].into_boxed_slice(); p } -pub fn reset_512(state: &mut [crate::hacl_rs::streaming_types::state_64]) { +pub fn reset_512(state: &mut [crate::streaming_types::state_64]) { let block_state: &mut [u64] = &mut (state[0usize]).block_state; - crate::hacl_rs::hash_sha2::sha512_init(block_state); + crate::hash_sha2::sha512_init(block_state); let total_len: u64 = 0u32 as u64; (state[0usize]).total_len = total_len } #[inline] fn update_384_512( - state: &mut [crate::hacl_rs::streaming_types::state_64], + state: &mut [crate::streaming_types::state_64], chunk: &[u8], chunk_len: u32, -) -> crate::hacl_rs::streaming_types::error_code { +) -> crate::streaming_types::error_code { let block_state: &mut [u64] = &mut (state[0usize]).block_state; let total_len: u64 = (state[0usize]).total_len; if chunk_len as u64 > 18446744073709551615u64.wrapping_sub(total_len) { - crate::hacl_rs::streaming_types::error_code::MaximumLengthExceeded + crate::streaming_types::error_code::MaximumLengthExceeded } else { let sz: u32 = if total_len.wrapping_rem(128u32 as u64) == 0u64 && total_len > 0u64 { 128u32 @@ -1018,7 +1018,7 @@ fn update_384_512( total_len1.wrapping_rem(128u32 as u64) as u32 }; if sz1 != 0u32 { - crate::hacl_rs::hash_sha2::sha512_update_nblocks(128u32, buf, block_state) + crate::hash_sha2::sha512_update_nblocks(128u32, buf, block_state) }; let ite: u32 = if (chunk_len as u64).wrapping_rem(128u32 as u64) == 0u64 && chunk_len as u64 > 0u64 @@ -1032,7 +1032,7 @@ fn update_384_512( let data2_len: u32 = chunk_len.wrapping_sub(data1_len); let data1: (&[u8], &[u8]) = chunk.split_at(0usize); let data2: (&[u8], &[u8]) = data1.1.split_at(data1_len as usize); - crate::hacl_rs::hash_sha2::sha512_update_nblocks( + crate::hash_sha2::sha512_update_nblocks( data1_len.wrapping_div(128u32).wrapping_mul(128u32), data2.0, block_state, @@ -1065,7 +1065,7 @@ fn update_384_512( total_len10.wrapping_rem(128u32 as u64) as u32 }; if sz10 != 0u32 { - crate::hacl_rs::hash_sha2::sha512_update_nblocks(128u32, buf0, block_state) + crate::hash_sha2::sha512_update_nblocks(128u32, buf0, block_state) }; let ite: u32 = if (chunk_len.wrapping_sub(diff) as u64).wrapping_rem(128u32 as u64) == 0u64 @@ -1083,7 +1083,7 @@ fn update_384_512( let data2_len: u32 = chunk_len.wrapping_sub(diff).wrapping_sub(data1_len); let data1: (&[u8], &[u8]) = chunk2.1.split_at(0usize); let data2: (&[u8], &[u8]) = data1.1.split_at(data1_len as usize); - crate::hacl_rs::hash_sha2::sha512_update_nblocks( + crate::hash_sha2::sha512_update_nblocks( data1_len.wrapping_div(128u32).wrapping_mul(128u32), data2.0, block_state, @@ -1094,7 +1094,7 @@ fn update_384_512( (state[0usize]).total_len = total_len10.wrapping_add(chunk_len.wrapping_sub(diff) as u64) }; - crate::hacl_rs::streaming_types::error_code::Success + crate::streaming_types::error_code::Success } } @@ -1106,11 +1106,11 @@ success, or 1 if the combined length of all of the data passed to `update_512` This function is identical to the update function for SHA2_384. */ pub fn update_512( - state: &mut [crate::hacl_rs::streaming_types::state_64], + state: &mut [crate::streaming_types::state_64], input: &[u8], input_len: u32, -) -> crate::hacl_rs::streaming_types::error_code { - crate::hacl_rs::hash_sha2::update_384_512(state, input, input_len) +) -> crate::streaming_types::error_code { + crate::hash_sha2::update_384_512(state, input, input_len) } /** @@ -1119,7 +1119,7 @@ valid after a call to `digest_512`, meaning the user may feed more data into the hash via `update_512`. (The digest_512 function operates on an internal copy of the state and therefore does not invalidate the client-held state `p`.) */ -pub fn digest_512(state: &[crate::hacl_rs::streaming_types::state_64], output: &mut [u8]) { +pub fn digest_512(state: &[crate::streaming_types::state_64], output: &mut [u8]) { let block_state: &[u64] = &(state[0usize]).block_state; let buf_: &[u8] = &(state[0usize]).buf; let total_len: u64 = (state[0usize]).total_len; @@ -1138,9 +1138,9 @@ pub fn digest_512(state: &[crate::hacl_rs::streaming_types::state_64], output: & r.wrapping_rem(128u32) }; let buf_last: (&[u8], &[u8]) = buf_multi.1.split_at(r.wrapping_sub(ite) as usize); - crate::hacl_rs::hash_sha2::sha512_update_nblocks(0u32, buf_last.0, &mut tmp_block_state); + crate::hash_sha2::sha512_update_nblocks(0u32, buf_last.0, &mut tmp_block_state); let prev_len_last: u64 = total_len.wrapping_sub(r as u64); - crate::hacl_rs::hash_sha2::sha512_update_last( + crate::hash_sha2::sha512_update_last( fstar::uint128::add( fstar::uint128::uint64_to_uint128(prev_len_last), fstar::uint128::uint64_to_uint128(r as u64), @@ -1149,7 +1149,7 @@ pub fn digest_512(state: &[crate::hacl_rs::streaming_types::state_64], output: & buf_last.1, &mut tmp_block_state, ); - crate::hacl_rs::hash_sha2::sha512_finish(&tmp_block_state, output) + crate::hash_sha2::sha512_finish(&tmp_block_state, output) } /** @@ -1159,43 +1159,43 @@ pub fn hash_512(output: &mut [u8], input: &[u8], input_len: u32) { let ib: &[u8] = input; let rb: &mut [u8] = output; let mut st: [u64; 8] = [0u64; 8usize]; - crate::hacl_rs::hash_sha2::sha512_init(&mut st); + crate::hash_sha2::sha512_init(&mut st); let rem: u32 = input_len.wrapping_rem(128u32); let lenĀ·: fstar::uint128::uint128 = fstar::uint128::uint64_to_uint128(input_len as u64); - crate::hacl_rs::hash_sha2::sha512_update_nblocks(input_len, ib, &mut st); + crate::hash_sha2::sha512_update_nblocks(input_len, ib, &mut st); let rem1: u32 = input_len.wrapping_rem(128u32); let b0: &[u8] = ib; let lb: (&[u8], &[u8]) = b0.split_at(input_len.wrapping_sub(rem1) as usize); - crate::hacl_rs::hash_sha2::sha512_update_last(lenĀ·, rem, lb.1, &mut st); - crate::hacl_rs::hash_sha2::sha512_finish(&st, rb) + crate::hash_sha2::sha512_update_last(lenĀ·, rem, lb.1, &mut st); + crate::hash_sha2::sha512_finish(&st, rb) } -pub fn malloc_384() -> Box<[crate::hacl_rs::streaming_types::state_64]> { +pub fn malloc_384() -> Box<[crate::streaming_types::state_64]> { let buf: Box<[u8]> = vec![0u8; 128usize].into_boxed_slice(); let mut block_state: Box<[u64]> = vec![0u64; 8usize].into_boxed_slice(); - crate::hacl_rs::hash_sha2::sha384_init(&mut block_state); - let s: crate::hacl_rs::streaming_types::state_64 = crate::hacl_rs::streaming_types::state_64 { + crate::hash_sha2::sha384_init(&mut block_state); + let s: crate::streaming_types::state_64 = crate::streaming_types::state_64 { block_state, buf, total_len: 0u32 as u64, }; - let p: Box<[crate::hacl_rs::streaming_types::state_64]> = vec![s].into_boxed_slice(); + let p: Box<[crate::streaming_types::state_64]> = vec![s].into_boxed_slice(); p } -pub fn reset_384(state: &mut [crate::hacl_rs::streaming_types::state_64]) { +pub fn reset_384(state: &mut [crate::streaming_types::state_64]) { let block_state: &mut [u64] = &mut (state[0usize]).block_state; - crate::hacl_rs::hash_sha2::sha384_init(block_state); + crate::hash_sha2::sha384_init(block_state); let total_len: u64 = 0u32 as u64; (state[0usize]).total_len = total_len } pub fn update_384( - state: &mut [crate::hacl_rs::streaming_types::state_64], + state: &mut [crate::streaming_types::state_64], input: &[u8], input_len: u32, -) -> crate::hacl_rs::streaming_types::error_code { - crate::hacl_rs::hash_sha2::update_384_512(state, input, input_len) +) -> crate::streaming_types::error_code { + crate::hash_sha2::update_384_512(state, input, input_len) } /** @@ -1203,7 +1203,7 @@ Write the resulting hash into `output`, an array of 48 bytes. The state remains valid after a call to `digest_384`, meaning the user may feed more data into the hash via `update_384`. */ -pub fn digest_384(state: &[crate::hacl_rs::streaming_types::state_64], output: &mut [u8]) { +pub fn digest_384(state: &[crate::streaming_types::state_64], output: &mut [u8]) { let block_state: &[u64] = &(state[0usize]).block_state; let buf_: &[u8] = &(state[0usize]).buf; let total_len: u64 = (state[0usize]).total_len; @@ -1222,9 +1222,9 @@ pub fn digest_384(state: &[crate::hacl_rs::streaming_types::state_64], output: & r.wrapping_rem(128u32) }; let buf_last: (&[u8], &[u8]) = buf_multi.1.split_at(r.wrapping_sub(ite) as usize); - crate::hacl_rs::hash_sha2::sha384_update_nblocks(0u32, buf_last.0, &mut tmp_block_state); + crate::hash_sha2::sha384_update_nblocks(0u32, buf_last.0, &mut tmp_block_state); let prev_len_last: u64 = total_len.wrapping_sub(r as u64); - crate::hacl_rs::hash_sha2::sha384_update_last( + crate::hash_sha2::sha384_update_last( fstar::uint128::add( fstar::uint128::uint64_to_uint128(prev_len_last), fstar::uint128::uint64_to_uint128(r as u64), @@ -1233,7 +1233,7 @@ pub fn digest_384(state: &[crate::hacl_rs::streaming_types::state_64], output: & buf_last.1, &mut tmp_block_state, ); - crate::hacl_rs::hash_sha2::sha384_finish(&tmp_block_state, output) + crate::hash_sha2::sha384_finish(&tmp_block_state, output) } /** @@ -1243,107 +1243,13 @@ pub fn hash_384(output: &mut [u8], input: &[u8], input_len: u32) { let ib: &[u8] = input; let rb: &mut [u8] = output; let mut st: [u64; 8] = [0u64; 8usize]; - crate::hacl_rs::hash_sha2::sha384_init(&mut st); + crate::hash_sha2::sha384_init(&mut st); let rem: u32 = input_len.wrapping_rem(128u32); let lenĀ·: fstar::uint128::uint128 = fstar::uint128::uint64_to_uint128(input_len as u64); - crate::hacl_rs::hash_sha2::sha384_update_nblocks(input_len, ib, &mut st); + crate::hash_sha2::sha384_update_nblocks(input_len, ib, &mut st); let rem1: u32 = input_len.wrapping_rem(128u32); let b0: &[u8] = ib; let lb: (&[u8], &[u8]) = b0.split_at(input_len.wrapping_sub(rem1) as usize); - crate::hacl_rs::hash_sha2::sha384_update_last(lenĀ·, rem, lb.1, &mut st); - crate::hacl_rs::hash_sha2::sha384_finish(&st, rb) + crate::hash_sha2::sha384_update_last(lenĀ·, rem, lb.1, &mut st); + crate::hash_sha2::sha384_finish(&st, rb) } - -// END GENERATED CODE - -macro_rules! impl_hash { - ($name:ident, $digest_size:literal, $state:ty, $malloc:expr, $reset:expr, $update:expr, $finish:expr, $copy:expr, $hash:expr) => { - pub struct $name { - state: $state, - } - - impl $name { - /// Return the digest for the given input byte slice, in immediate mode. - pub fn hash(digest: &mut [u8; $digest_size], payload: &[u8]) { - $hash(digest, payload, payload.len() as u32) - } - - /// Initialize a new digest state for streaming use. - pub fn new() -> $name { - $name { state: $malloc() } - } - - /// Add the `payload` to the digest. - pub fn update(&mut self, payload: &[u8]) { - $update(self.state.as_mut(), payload, payload.len() as u32); - } - - /// Get the digest. - /// - /// Note that the digest state can be continued to be used, to extend the - /// digest. - pub fn finish(&self, digest: &mut [u8; $digest_size]) { - $finish(self.state.as_ref(), digest); - } - - /// Reset the digest state. - pub fn reset(&mut self) { - $reset(self.state.as_mut()); - } - } - - impl Clone for $name { - fn clone(&self) -> Self { - Self { - state: $copy(self.state.as_ref()), - } - } - } - }; -} - -impl_hash!( - HaclRs_Sha2_Sha256, - 32, - Box<[crate::hacl_rs::streaming_types::state_32]>, - malloc_256, - reset_256, - update_256, - digest_256, - copy_256, - hash_256 -); -impl_hash!( - HaclRs_Sha2_Sha256_224, - 28, - Box<[crate::hacl_rs::streaming_types::state_32]>, - malloc_224, - reset_224, - update_224, - digest_224, - copy_256, - hash_224 -); - -impl_hash!( - HaclRs_Sha2_Sha512, - 64, - Box<[crate::hacl_rs::streaming_types::state_64]>, - malloc_512, - reset_512, - update_512, - digest_512, - copy_512, - hash_512 -); -impl_hash!( - HaclRs_Sha2_Sha512_384, - 48, - Box<[crate::hacl_rs::streaming_types::state_64]>, - malloc_384, - reset_384, - update_384, - digest_384, - copy_512, - hash_384 -); diff --git a/src/hacl_rs/hkdf.rs b/libcrux-hacl-rs/src/hkdf.rs similarity index 86% rename from src/hacl_rs/hkdf.rs rename to libcrux-hacl-rs/src/hkdf.rs index 9b51de11b..d05f39fe6 100644 --- a/src/hacl_rs/hkdf.rs +++ b/libcrux-hacl-rs/src/hkdf.rs @@ -35,15 +35,9 @@ pub fn expand_sha2_256( for i in 0u32..n { ctr.1[0usize] = i.wrapping_add(1u32) as u8; if i == 0u32 { - crate::hacl_rs::hmac::compute_sha2_256( - ctr.0, - prk, - prklen, - tag.0, - infolen.wrapping_add(1u32), - ) + crate::hmac::compute_sha2_256(ctr.0, prk, prklen, tag.0, infolen.wrapping_add(1u32)) } else { - crate::hacl_rs::hmac::compute_sha2_256( + crate::hmac::compute_sha2_256( ctr.0, prk, prklen, @@ -57,15 +51,9 @@ pub fn expand_sha2_256( if n.wrapping_mul(tlen) < len { ctr.1[0usize] = n.wrapping_add(1u32) as u8; if n == 0u32 { - crate::hacl_rs::hmac::compute_sha2_256( - ctr.0, - prk, - prklen, - tag.0, - infolen.wrapping_add(1u32), - ) + crate::hmac::compute_sha2_256(ctr.0, prk, prklen, tag.0, infolen.wrapping_add(1u32)) } else { - crate::hacl_rs::hmac::compute_sha2_256( + crate::hmac::compute_sha2_256( ctr.0, prk, prklen, @@ -90,7 +78,7 @@ Extract a fixed-length pseudorandom key from input keying material. @param ikmlen Length of input keying material. */ pub fn extract_sha2_256(prk: &mut [u8], salt: &[u8], saltlen: u32, ikm: &[u8], ikmlen: u32) { - crate::hacl_rs::hmac::compute_sha2_256(prk, salt, saltlen, ikm, ikmlen) + crate::hmac::compute_sha2_256(prk, salt, saltlen, ikm, ikmlen) } /** @@ -124,15 +112,9 @@ pub fn expand_sha2_384( for i in 0u32..n { ctr.1[0usize] = i.wrapping_add(1u32) as u8; if i == 0u32 { - crate::hacl_rs::hmac::compute_sha2_384( - ctr.0, - prk, - prklen, - tag.0, - infolen.wrapping_add(1u32), - ) + crate::hmac::compute_sha2_384(ctr.0, prk, prklen, tag.0, infolen.wrapping_add(1u32)) } else { - crate::hacl_rs::hmac::compute_sha2_384( + crate::hmac::compute_sha2_384( ctr.0, prk, prklen, @@ -146,15 +128,9 @@ pub fn expand_sha2_384( if n.wrapping_mul(tlen) < len { ctr.1[0usize] = n.wrapping_add(1u32) as u8; if n == 0u32 { - crate::hacl_rs::hmac::compute_sha2_384( - ctr.0, - prk, - prklen, - tag.0, - infolen.wrapping_add(1u32), - ) + crate::hmac::compute_sha2_384(ctr.0, prk, prklen, tag.0, infolen.wrapping_add(1u32)) } else { - crate::hacl_rs::hmac::compute_sha2_384( + crate::hmac::compute_sha2_384( ctr.0, prk, prklen, @@ -179,7 +155,7 @@ Extract a fixed-length pseudorandom key from input keying material. @param ikmlen Length of input keying material. */ pub fn extract_sha2_384(prk: &mut [u8], salt: &[u8], saltlen: u32, ikm: &[u8], ikmlen: u32) { - crate::hacl_rs::hmac::compute_sha2_384(prk, salt, saltlen, ikm, ikmlen) + crate::hmac::compute_sha2_384(prk, salt, saltlen, ikm, ikmlen) } /** @@ -213,15 +189,9 @@ pub fn expand_sha2_512( for i in 0u32..n { ctr.1[0usize] = i.wrapping_add(1u32) as u8; if i == 0u32 { - crate::hacl_rs::hmac::compute_sha2_512( - ctr.0, - prk, - prklen, - tag.0, - infolen.wrapping_add(1u32), - ) + crate::hmac::compute_sha2_512(ctr.0, prk, prklen, tag.0, infolen.wrapping_add(1u32)) } else { - crate::hacl_rs::hmac::compute_sha2_512( + crate::hmac::compute_sha2_512( ctr.0, prk, prklen, @@ -235,15 +205,9 @@ pub fn expand_sha2_512( if n.wrapping_mul(tlen) < len { ctr.1[0usize] = n.wrapping_add(1u32) as u8; if n == 0u32 { - crate::hacl_rs::hmac::compute_sha2_512( - ctr.0, - prk, - prklen, - tag.0, - infolen.wrapping_add(1u32), - ) + crate::hmac::compute_sha2_512(ctr.0, prk, prklen, tag.0, infolen.wrapping_add(1u32)) } else { - crate::hacl_rs::hmac::compute_sha2_512( + crate::hmac::compute_sha2_512( ctr.0, prk, prklen, @@ -268,7 +232,7 @@ Extract a fixed-length pseudorandom key from input keying material. @param ikmlen Length of input keying material. */ pub fn extract_sha2_512(prk: &mut [u8], salt: &[u8], saltlen: u32, ikm: &[u8], ikmlen: u32) { - crate::hacl_rs::hmac::compute_sha2_512(prk, salt, saltlen, ikm, ikmlen) + crate::hmac::compute_sha2_512(prk, salt, saltlen, ikm, ikmlen) } /* no blake2 for now @@ -303,7 +267,7 @@ pub fn expand_blake2s_32( for i in 0u32..n { ctr.1[0usize] = i.wrapping_add(1u32) as u8; if i == 0u32 { - crate::hacl_rs::hmac::compute_blake2s_32( + crate::hmac::compute_blake2s_32( ctr.0, prk, prklen, @@ -311,7 +275,7 @@ pub fn expand_blake2s_32( infolen.wrapping_add(1u32), ) } else { - crate::hacl_rs::hmac::compute_blake2s_32( + crate::hmac::compute_blake2s_32( ctr.0, prk, prklen, @@ -325,7 +289,7 @@ pub fn expand_blake2s_32( if n.wrapping_mul(tlen) < len { ctr.1[0usize] = n.wrapping_add(1u32) as u8; if n == 0u32 { - crate::hacl_rs::hmac::compute_blake2s_32( + crate::hmac::compute_blake2s_32( ctr.0, prk, prklen, @@ -333,7 +297,7 @@ pub fn expand_blake2s_32( infolen.wrapping_add(1u32), ) } else { - crate::hacl_rs::hmac::compute_blake2s_32( + crate::hmac::compute_blake2s_32( ctr.0, prk, prklen, @@ -358,7 +322,7 @@ Extract a fixed-length pseudorandom key from input keying material. @param ikmlen Length of input keying material. */ pub fn extract_blake2s_32(prk: &mut [u8], salt: &[u8], saltlen: u32, ikm: &[u8], ikmlen: u32) { - crate::hacl_rs::hmac::compute_blake2s_32(prk, salt, saltlen, ikm, ikmlen) + crate::hmac::compute_blake2s_32(prk, salt, saltlen, ikm, ikmlen) } /** @@ -392,7 +356,7 @@ pub fn expand_blake2b_32( for i in 0u32..n { ctr.1[0usize] = i.wrapping_add(1u32) as u8; if i == 0u32 { - crate::hacl_rs::hmac::compute_blake2b_32( + crate::hmac::compute_blake2b_32( ctr.0, prk, prklen, @@ -400,7 +364,7 @@ pub fn expand_blake2b_32( infolen.wrapping_add(1u32), ) } else { - crate::hacl_rs::hmac::compute_blake2b_32( + crate::hmac::compute_blake2b_32( ctr.0, prk, prklen, @@ -414,7 +378,7 @@ pub fn expand_blake2b_32( if n.wrapping_mul(tlen) < len { ctr.1[0usize] = n.wrapping_add(1u32) as u8; if n == 0u32 { - crate::hacl_rs::hmac::compute_blake2b_32( + crate::hmac::compute_blake2b_32( ctr.0, prk, prklen, @@ -422,7 +386,7 @@ pub fn expand_blake2b_32( infolen.wrapping_add(1u32), ) } else { - crate::hacl_rs::hmac::compute_blake2b_32( + crate::hmac::compute_blake2b_32( ctr.0, prk, prklen, @@ -447,6 +411,6 @@ Extract a fixed-length pseudorandom key from input keying material. @param ikmlen Length of input keying material. */ pub fn extract_blake2b_32(prk: &mut [u8], salt: &[u8], saltlen: u32, ikm: &[u8], ikmlen: u32) { - crate::hacl_rs::hmac::compute_blake2b_32(prk, salt, saltlen, ikm, ikmlen) + crate::hmac::compute_blake2b_32(prk, salt, saltlen, ikm, ikmlen) } */ diff --git a/src/hacl_rs/hmac.rs b/libcrux-hacl-rs/src/hmac.rs similarity index 77% rename from src/hacl_rs/hmac.rs rename to libcrux-hacl-rs/src/hmac.rs index a30347d58..e380456e3 100644 --- a/src/hacl_rs/hmac.rs +++ b/libcrux-hacl-rs/src/hmac.rs @@ -29,7 +29,7 @@ pub fn compute_sha1(dst: &mut [u8], key: &[u8], key_len: u32, data: &[u8], data_ if key_len <= 64u32 { (zeroes.0[0usize..key_len as usize]).copy_from_slice(&key[0usize..key_len as usize]) } else { - crate::hacl_rs::hash_sha1::hash_oneshot(zeroes.0, key, key_len) + crate::hash_sha1::hash_oneshot(zeroes.0, key, key_len) }; let mut ipad: Box<[u8]> = vec![0x36u8; l as usize].into_boxed_slice(); for i in 0u32..l { @@ -51,19 +51,19 @@ pub fn compute_sha1(dst: &mut [u8], key: &[u8], key_len: u32, data: &[u8], data_ 0xc3d2e1f0u32, ]; if data_len == 0u32 { - crate::hacl_rs::hash_sha1::update_last(&mut s, 0u64, &ipad, 64u32) + crate::hash_sha1::update_last(&mut s, 0u64, &ipad, 64u32) } else { let block_len: u32 = 64u32; let n_blocks: u32 = data_len.wrapping_div(block_len); let rem: u32 = data_len.wrapping_rem(block_len); - let scrut: crate::hacl_rs::hmac::__uint32_t_uint32_t = if n_blocks > 0u32 && rem == 0u32 { + let scrut: crate::hmac::__uint32_t_uint32_t = if n_blocks > 0u32 && rem == 0u32 { let n_blocksĀ·: u32 = n_blocks.wrapping_sub(1u32); - crate::hacl_rs::hmac::__uint32_t_uint32_t { + crate::hmac::__uint32_t_uint32_t { fst: n_blocksĀ·, snd: data_len.wrapping_sub(n_blocksĀ·.wrapping_mul(block_len)), } } else { - crate::hacl_rs::hmac::__uint32_t_uint32_t { + crate::hmac::__uint32_t_uint32_t { fst: n_blocks, snd: rem, } @@ -73,9 +73,9 @@ pub fn compute_sha1(dst: &mut [u8], key: &[u8], key_len: u32, data: &[u8], data_ let full_blocks_len: u32 = n_blocks0.wrapping_mul(block_len); let full_blocks: (&[u8], &[u8]) = data.split_at(0usize); let rem0: (&[u8], &[u8]) = full_blocks.1.split_at(full_blocks_len as usize); - crate::hacl_rs::hash_sha1::update_multi(&mut s, &ipad, 1u32); - crate::hacl_rs::hash_sha1::update_multi(&mut s, rem0.0, n_blocks0); - crate::hacl_rs::hash_sha1::update_last( + crate::hash_sha1::update_multi(&mut s, &ipad, 1u32); + crate::hash_sha1::update_multi(&mut s, rem0.0, n_blocks0); + crate::hash_sha1::update_last( &mut s, (64u32 as u64).wrapping_add(full_blocks_len as u64), rem0.1, @@ -83,20 +83,20 @@ pub fn compute_sha1(dst: &mut [u8], key: &[u8], key_len: u32, data: &[u8], data_ ) }; let dst1: (&mut [u8], &mut [u8]) = ipad.split_at_mut(0usize); - crate::hacl_rs::hash_sha1::finish(&s, dst1.1); + crate::hash_sha1::finish(&s, dst1.1); let hash1: (&[u8], &[u8]) = dst1.1.split_at(0usize); - crate::hacl_rs::hash_sha1::init(&mut s); + crate::hash_sha1::init(&mut s); let block_len: u32 = 64u32; let n_blocks: u32 = 20u32.wrapping_div(block_len); let rem: u32 = 20u32.wrapping_rem(block_len); - let scrut: crate::hacl_rs::hmac::__uint32_t_uint32_t = if n_blocks > 0u32 && rem == 0u32 { + let scrut: crate::hmac::__uint32_t_uint32_t = if n_blocks > 0u32 && rem == 0u32 { let n_blocksĀ·: u32 = n_blocks.wrapping_sub(1u32); - crate::hacl_rs::hmac::__uint32_t_uint32_t { + crate::hmac::__uint32_t_uint32_t { fst: n_blocksĀ·, snd: 20u32.wrapping_sub(n_blocksĀ·.wrapping_mul(block_len)), } } else { - crate::hacl_rs::hmac::__uint32_t_uint32_t { + crate::hmac::__uint32_t_uint32_t { fst: n_blocks, snd: rem, } @@ -106,15 +106,15 @@ pub fn compute_sha1(dst: &mut [u8], key: &[u8], key_len: u32, data: &[u8], data_ let full_blocks_len: u32 = n_blocks0.wrapping_mul(block_len); let full_blocks: (&[u8], &[u8]) = hash1.1.split_at(0usize); let rem0: (&[u8], &[u8]) = full_blocks.1.split_at(full_blocks_len as usize); - crate::hacl_rs::hash_sha1::update_multi(&mut s, &opad, 1u32); - crate::hacl_rs::hash_sha1::update_multi(&mut s, rem0.0, n_blocks0); - crate::hacl_rs::hash_sha1::update_last( + crate::hash_sha1::update_multi(&mut s, &opad, 1u32); + crate::hash_sha1::update_multi(&mut s, rem0.0, n_blocks0); + crate::hash_sha1::update_last( &mut s, (64u32 as u64).wrapping_add(full_blocks_len as u64), rem0.1, rem_len, ); - crate::hacl_rs::hash_sha1::finish(&s, dst) + crate::hash_sha1::finish(&s, dst) } /** @@ -133,7 +133,7 @@ pub fn compute_sha2_256(dst: &mut [u8], key: &[u8], key_len: u32, data: &[u8], d if key_len <= 64u32 { (zeroes.0[0usize..key_len as usize]).copy_from_slice(&key[0usize..key_len as usize]) } else { - crate::hacl_rs::hash_sha2::hash_256(zeroes.0, key, key_len) + crate::hash_sha2::hash_256(zeroes.0, key, key_len) }; let mut ipad: Box<[u8]> = vec![0x36u8; l as usize].into_boxed_slice(); for i in 0u32..l { @@ -149,30 +149,25 @@ pub fn compute_sha2_256(dst: &mut [u8], key: &[u8], key_len: u32, data: &[u8], d } let mut st: [u32; 8] = [0u32; 8usize]; krml::unroll_for!(8, "i", 0u32, 1u32, { - let x: u32 = (&crate::hacl_rs::hash_sha2::h256)[i as usize]; + let x: u32 = (&crate::hash_sha2::h256)[i as usize]; let os: (&mut [u32], &mut [u32]) = st.split_at_mut(0usize); os.1[i as usize] = x }); let s: &mut [u32] = &mut st; if data_len == 0u32 { - crate::hacl_rs::hash_sha2::sha256_update_last( - 0u64.wrapping_add(64u32 as u64), - 64u32, - &ipad, - s, - ) + crate::hash_sha2::sha256_update_last(0u64.wrapping_add(64u32 as u64), 64u32, &ipad, s) } else { let block_len: u32 = 64u32; let n_blocks: u32 = data_len.wrapping_div(block_len); let rem: u32 = data_len.wrapping_rem(block_len); - let scrut: crate::hacl_rs::hmac::__uint32_t_uint32_t = if n_blocks > 0u32 && rem == 0u32 { + let scrut: crate::hmac::__uint32_t_uint32_t = if n_blocks > 0u32 && rem == 0u32 { let n_blocksĀ·: u32 = n_blocks.wrapping_sub(1u32); - crate::hacl_rs::hmac::__uint32_t_uint32_t { + crate::hmac::__uint32_t_uint32_t { fst: n_blocksĀ·, snd: data_len.wrapping_sub(n_blocksĀ·.wrapping_mul(block_len)), } } else { - crate::hacl_rs::hmac::__uint32_t_uint32_t { + crate::hmac::__uint32_t_uint32_t { fst: n_blocks, snd: rem, } @@ -182,9 +177,9 @@ pub fn compute_sha2_256(dst: &mut [u8], key: &[u8], key_len: u32, data: &[u8], d let full_blocks_len: u32 = n_blocks0.wrapping_mul(block_len); let full_blocks: (&[u8], &[u8]) = data.split_at(0usize); let rem0: (&[u8], &[u8]) = full_blocks.1.split_at(full_blocks_len as usize); - crate::hacl_rs::hash_sha2::sha256_update_nblocks(64u32, &ipad, s); - crate::hacl_rs::hash_sha2::sha256_update_nblocks(n_blocks0.wrapping_mul(64u32), rem0.0, s); - crate::hacl_rs::hash_sha2::sha256_update_last( + crate::hash_sha2::sha256_update_nblocks(64u32, &ipad, s); + crate::hash_sha2::sha256_update_nblocks(n_blocks0.wrapping_mul(64u32), rem0.0, s); + crate::hash_sha2::sha256_update_last( (64u32 as u64) .wrapping_add(full_blocks_len as u64) .wrapping_add(rem_len as u64), @@ -194,20 +189,20 @@ pub fn compute_sha2_256(dst: &mut [u8], key: &[u8], key_len: u32, data: &[u8], d ) }; let dst1: (&mut [u8], &mut [u8]) = ipad.split_at_mut(0usize); - crate::hacl_rs::hash_sha2::sha256_finish(s, dst1.1); + crate::hash_sha2::sha256_finish(s, dst1.1); let hash1: (&[u8], &[u8]) = dst1.1.split_at(0usize); - crate::hacl_rs::hash_sha2::sha256_init(s); + crate::hash_sha2::sha256_init(s); let block_len: u32 = 64u32; let n_blocks: u32 = 32u32.wrapping_div(block_len); let rem: u32 = 32u32.wrapping_rem(block_len); - let scrut: crate::hacl_rs::hmac::__uint32_t_uint32_t = if n_blocks > 0u32 && rem == 0u32 { + let scrut: crate::hmac::__uint32_t_uint32_t = if n_blocks > 0u32 && rem == 0u32 { let n_blocksĀ·: u32 = n_blocks.wrapping_sub(1u32); - crate::hacl_rs::hmac::__uint32_t_uint32_t { + crate::hmac::__uint32_t_uint32_t { fst: n_blocksĀ·, snd: 32u32.wrapping_sub(n_blocksĀ·.wrapping_mul(block_len)), } } else { - crate::hacl_rs::hmac::__uint32_t_uint32_t { + crate::hmac::__uint32_t_uint32_t { fst: n_blocks, snd: rem, } @@ -217,9 +212,9 @@ pub fn compute_sha2_256(dst: &mut [u8], key: &[u8], key_len: u32, data: &[u8], d let full_blocks_len: u32 = n_blocks0.wrapping_mul(block_len); let full_blocks: (&[u8], &[u8]) = hash1.1.split_at(0usize); let rem0: (&[u8], &[u8]) = full_blocks.1.split_at(full_blocks_len as usize); - crate::hacl_rs::hash_sha2::sha256_update_nblocks(64u32, &opad, s); - crate::hacl_rs::hash_sha2::sha256_update_nblocks(n_blocks0.wrapping_mul(64u32), rem0.0, s); - crate::hacl_rs::hash_sha2::sha256_update_last( + crate::hash_sha2::sha256_update_nblocks(64u32, &opad, s); + crate::hash_sha2::sha256_update_nblocks(n_blocks0.wrapping_mul(64u32), rem0.0, s); + crate::hash_sha2::sha256_update_last( (64u32 as u64) .wrapping_add(full_blocks_len as u64) .wrapping_add(rem_len as u64), @@ -227,7 +222,7 @@ pub fn compute_sha2_256(dst: &mut [u8], key: &[u8], key_len: u32, data: &[u8], d rem0.1, s, ); - crate::hacl_rs::hash_sha2::sha256_finish(s, dst) + crate::hash_sha2::sha256_finish(s, dst) } /** @@ -246,7 +241,7 @@ pub fn compute_sha2_384(dst: &mut [u8], key: &[u8], key_len: u32, data: &[u8], d if key_len <= 128u32 { (zeroes.0[0usize..key_len as usize]).copy_from_slice(&key[0usize..key_len as usize]) } else { - crate::hacl_rs::hash_sha2::hash_384(zeroes.0, key, key_len) + crate::hash_sha2::hash_384(zeroes.0, key, key_len) }; let mut ipad: Box<[u8]> = vec![0x36u8; l as usize].into_boxed_slice(); for i in 0u32..l { @@ -262,13 +257,13 @@ pub fn compute_sha2_384(dst: &mut [u8], key: &[u8], key_len: u32, data: &[u8], d } let mut st: [u64; 8] = [0u64; 8usize]; krml::unroll_for!(8, "i", 0u32, 1u32, { - let x: u64 = (&crate::hacl_rs::hash_sha2::h384)[i as usize]; + let x: u64 = (&crate::hash_sha2::h384)[i as usize]; let os: (&mut [u64], &mut [u64]) = st.split_at_mut(0usize); os.1[i as usize] = x }); let s: &mut [u64] = &mut st; if data_len == 0u32 { - crate::hacl_rs::hash_sha2::sha384_update_last( + crate::hash_sha2::sha384_update_last( fstar::uint128::add( fstar::uint128::uint64_to_uint128(0u64), fstar::uint128::uint64_to_uint128(128u32 as u64), @@ -281,14 +276,14 @@ pub fn compute_sha2_384(dst: &mut [u8], key: &[u8], key_len: u32, data: &[u8], d let block_len: u32 = 128u32; let n_blocks: u32 = data_len.wrapping_div(block_len); let rem: u32 = data_len.wrapping_rem(block_len); - let scrut: crate::hacl_rs::hmac::__uint32_t_uint32_t = if n_blocks > 0u32 && rem == 0u32 { + let scrut: crate::hmac::__uint32_t_uint32_t = if n_blocks > 0u32 && rem == 0u32 { let n_blocksĀ·: u32 = n_blocks.wrapping_sub(1u32); - crate::hacl_rs::hmac::__uint32_t_uint32_t { + crate::hmac::__uint32_t_uint32_t { fst: n_blocksĀ·, snd: data_len.wrapping_sub(n_blocksĀ·.wrapping_mul(block_len)), } } else { - crate::hacl_rs::hmac::__uint32_t_uint32_t { + crate::hmac::__uint32_t_uint32_t { fst: n_blocks, snd: rem, } @@ -298,9 +293,9 @@ pub fn compute_sha2_384(dst: &mut [u8], key: &[u8], key_len: u32, data: &[u8], d let full_blocks_len: u32 = n_blocks0.wrapping_mul(block_len); let full_blocks: (&[u8], &[u8]) = data.split_at(0usize); let rem0: (&[u8], &[u8]) = full_blocks.1.split_at(full_blocks_len as usize); - crate::hacl_rs::hash_sha2::sha384_update_nblocks(128u32, &ipad, s); - crate::hacl_rs::hash_sha2::sha384_update_nblocks(n_blocks0.wrapping_mul(128u32), rem0.0, s); - crate::hacl_rs::hash_sha2::sha384_update_last( + crate::hash_sha2::sha384_update_nblocks(128u32, &ipad, s); + crate::hash_sha2::sha384_update_nblocks(n_blocks0.wrapping_mul(128u32), rem0.0, s); + crate::hash_sha2::sha384_update_last( fstar::uint128::add( fstar::uint128::add( fstar::uint128::uint64_to_uint128(128u32 as u64), @@ -314,20 +309,20 @@ pub fn compute_sha2_384(dst: &mut [u8], key: &[u8], key_len: u32, data: &[u8], d ) }; let dst1: (&mut [u8], &mut [u8]) = ipad.split_at_mut(0usize); - crate::hacl_rs::hash_sha2::sha384_finish(s, dst1.1); + crate::hash_sha2::sha384_finish(s, dst1.1); let hash1: (&[u8], &[u8]) = dst1.1.split_at(0usize); - crate::hacl_rs::hash_sha2::sha384_init(s); + crate::hash_sha2::sha384_init(s); let block_len: u32 = 128u32; let n_blocks: u32 = 48u32.wrapping_div(block_len); let rem: u32 = 48u32.wrapping_rem(block_len); - let scrut: crate::hacl_rs::hmac::__uint32_t_uint32_t = if n_blocks > 0u32 && rem == 0u32 { + let scrut: crate::hmac::__uint32_t_uint32_t = if n_blocks > 0u32 && rem == 0u32 { let n_blocksĀ·: u32 = n_blocks.wrapping_sub(1u32); - crate::hacl_rs::hmac::__uint32_t_uint32_t { + crate::hmac::__uint32_t_uint32_t { fst: n_blocksĀ·, snd: 48u32.wrapping_sub(n_blocksĀ·.wrapping_mul(block_len)), } } else { - crate::hacl_rs::hmac::__uint32_t_uint32_t { + crate::hmac::__uint32_t_uint32_t { fst: n_blocks, snd: rem, } @@ -337,9 +332,9 @@ pub fn compute_sha2_384(dst: &mut [u8], key: &[u8], key_len: u32, data: &[u8], d let full_blocks_len: u32 = n_blocks0.wrapping_mul(block_len); let full_blocks: (&[u8], &[u8]) = hash1.1.split_at(0usize); let rem0: (&[u8], &[u8]) = full_blocks.1.split_at(full_blocks_len as usize); - crate::hacl_rs::hash_sha2::sha384_update_nblocks(128u32, &opad, s); - crate::hacl_rs::hash_sha2::sha384_update_nblocks(n_blocks0.wrapping_mul(128u32), rem0.0, s); - crate::hacl_rs::hash_sha2::sha384_update_last( + crate::hash_sha2::sha384_update_nblocks(128u32, &opad, s); + crate::hash_sha2::sha384_update_nblocks(n_blocks0.wrapping_mul(128u32), rem0.0, s); + crate::hash_sha2::sha384_update_last( fstar::uint128::add( fstar::uint128::add( fstar::uint128::uint64_to_uint128(128u32 as u64), @@ -351,7 +346,7 @@ pub fn compute_sha2_384(dst: &mut [u8], key: &[u8], key_len: u32, data: &[u8], d rem0.1, s, ); - crate::hacl_rs::hash_sha2::sha384_finish(s, dst) + crate::hash_sha2::sha384_finish(s, dst) } /** @@ -370,7 +365,7 @@ pub fn compute_sha2_512(dst: &mut [u8], key: &[u8], key_len: u32, data: &[u8], d if key_len <= 128u32 { (zeroes.0[0usize..key_len as usize]).copy_from_slice(&key[0usize..key_len as usize]) } else { - crate::hacl_rs::hash_sha2::hash_512(zeroes.0, key, key_len) + crate::hash_sha2::hash_512(zeroes.0, key, key_len) }; let mut ipad: Box<[u8]> = vec![0x36u8; l as usize].into_boxed_slice(); for i in 0u32..l { @@ -386,13 +381,13 @@ pub fn compute_sha2_512(dst: &mut [u8], key: &[u8], key_len: u32, data: &[u8], d } let mut st: [u64; 8] = [0u64; 8usize]; krml::unroll_for!(8, "i", 0u32, 1u32, { - let x: u64 = (&crate::hacl_rs::hash_sha2::h512)[i as usize]; + let x: u64 = (&crate::hash_sha2::h512)[i as usize]; let os: (&mut [u64], &mut [u64]) = st.split_at_mut(0usize); os.1[i as usize] = x }); let s: &mut [u64] = &mut st; if data_len == 0u32 { - crate::hacl_rs::hash_sha2::sha512_update_last( + crate::hash_sha2::sha512_update_last( fstar::uint128::add( fstar::uint128::uint64_to_uint128(0u64), fstar::uint128::uint64_to_uint128(128u32 as u64), @@ -405,14 +400,14 @@ pub fn compute_sha2_512(dst: &mut [u8], key: &[u8], key_len: u32, data: &[u8], d let block_len: u32 = 128u32; let n_blocks: u32 = data_len.wrapping_div(block_len); let rem: u32 = data_len.wrapping_rem(block_len); - let scrut: crate::hacl_rs::hmac::__uint32_t_uint32_t = if n_blocks > 0u32 && rem == 0u32 { + let scrut: crate::hmac::__uint32_t_uint32_t = if n_blocks > 0u32 && rem == 0u32 { let n_blocksĀ·: u32 = n_blocks.wrapping_sub(1u32); - crate::hacl_rs::hmac::__uint32_t_uint32_t { + crate::hmac::__uint32_t_uint32_t { fst: n_blocksĀ·, snd: data_len.wrapping_sub(n_blocksĀ·.wrapping_mul(block_len)), } } else { - crate::hacl_rs::hmac::__uint32_t_uint32_t { + crate::hmac::__uint32_t_uint32_t { fst: n_blocks, snd: rem, } @@ -422,9 +417,9 @@ pub fn compute_sha2_512(dst: &mut [u8], key: &[u8], key_len: u32, data: &[u8], d let full_blocks_len: u32 = n_blocks0.wrapping_mul(block_len); let full_blocks: (&[u8], &[u8]) = data.split_at(0usize); let rem0: (&[u8], &[u8]) = full_blocks.1.split_at(full_blocks_len as usize); - crate::hacl_rs::hash_sha2::sha512_update_nblocks(128u32, &ipad, s); - crate::hacl_rs::hash_sha2::sha512_update_nblocks(n_blocks0.wrapping_mul(128u32), rem0.0, s); - crate::hacl_rs::hash_sha2::sha512_update_last( + crate::hash_sha2::sha512_update_nblocks(128u32, &ipad, s); + crate::hash_sha2::sha512_update_nblocks(n_blocks0.wrapping_mul(128u32), rem0.0, s); + crate::hash_sha2::sha512_update_last( fstar::uint128::add( fstar::uint128::add( fstar::uint128::uint64_to_uint128(128u32 as u64), @@ -438,20 +433,20 @@ pub fn compute_sha2_512(dst: &mut [u8], key: &[u8], key_len: u32, data: &[u8], d ) }; let dst1: (&mut [u8], &mut [u8]) = ipad.split_at_mut(0usize); - crate::hacl_rs::hash_sha2::sha512_finish(s, dst1.1); + crate::hash_sha2::sha512_finish(s, dst1.1); let hash1: (&[u8], &[u8]) = dst1.1.split_at(0usize); - crate::hacl_rs::hash_sha2::sha512_init(s); + crate::hash_sha2::sha512_init(s); let block_len: u32 = 128u32; let n_blocks: u32 = 64u32.wrapping_div(block_len); let rem: u32 = 64u32.wrapping_rem(block_len); - let scrut: crate::hacl_rs::hmac::__uint32_t_uint32_t = if n_blocks > 0u32 && rem == 0u32 { + let scrut: crate::hmac::__uint32_t_uint32_t = if n_blocks > 0u32 && rem == 0u32 { let n_blocksĀ·: u32 = n_blocks.wrapping_sub(1u32); - crate::hacl_rs::hmac::__uint32_t_uint32_t { + crate::hmac::__uint32_t_uint32_t { fst: n_blocksĀ·, snd: 64u32.wrapping_sub(n_blocksĀ·.wrapping_mul(block_len)), } } else { - crate::hacl_rs::hmac::__uint32_t_uint32_t { + crate::hmac::__uint32_t_uint32_t { fst: n_blocks, snd: rem, } @@ -461,9 +456,9 @@ pub fn compute_sha2_512(dst: &mut [u8], key: &[u8], key_len: u32, data: &[u8], d let full_blocks_len: u32 = n_blocks0.wrapping_mul(block_len); let full_blocks: (&[u8], &[u8]) = hash1.1.split_at(0usize); let rem0: (&[u8], &[u8]) = full_blocks.1.split_at(full_blocks_len as usize); - crate::hacl_rs::hash_sha2::sha512_update_nblocks(128u32, &opad, s); - crate::hacl_rs::hash_sha2::sha512_update_nblocks(n_blocks0.wrapping_mul(128u32), rem0.0, s); - crate::hacl_rs::hash_sha2::sha512_update_last( + crate::hash_sha2::sha512_update_nblocks(128u32, &opad, s); + crate::hash_sha2::sha512_update_nblocks(n_blocks0.wrapping_mul(128u32), rem0.0, s); + crate::hash_sha2::sha512_update_last( fstar::uint128::add( fstar::uint128::add( fstar::uint128::uint64_to_uint128(128u32 as u64), @@ -475,7 +470,7 @@ pub fn compute_sha2_512(dst: &mut [u8], key: &[u8], key_len: u32, data: &[u8], d rem0.1, s, ); - crate::hacl_rs::hash_sha2::sha512_finish(s, dst) + crate::hash_sha2::sha512_finish(s, dst) } /* no blake2 for now @@ -496,7 +491,7 @@ pub fn compute_blake2s_32(dst: &mut [u8], key: &[u8], key_len: u32, data: &[u8], if key_len <= 64u32 { (zeroes.0[0usize..key_len as usize]).copy_from_slice(&key[0usize..key_len as usize]) } else { - crate::hacl_rs::hash_blake2s::hash_with_key(zeroes.0, 32u32, key, key_len, &[], 0u32) + crate::hash_blake2s::hash_with_key(zeroes.0, 32u32, key, key_len, &[], 0u32) }; let mut ipad: Box<[u8]> = vec![0x36u8; l as usize].into_boxed_slice(); for i in 0u32..l { @@ -511,23 +506,23 @@ pub fn compute_blake2s_32(dst: &mut [u8], key: &[u8], key_len: u32, data: &[u8], (&mut opad)[i as usize] = xi ^ yi } let mut s: [u32; 16] = [0u32; 16usize]; - crate::hacl_rs::hash_blake2s::init(&mut s, 0u32, 32u32); + crate::hash_blake2s::init(&mut s, 0u32, 32u32); let s0: &mut [u32] = &mut s; if data_len == 0u32 { let mut wv: [u32; 16] = [0u32; 16usize]; - crate::hacl_rs::hash_blake2s::update_last(64u32, &mut wv, s0, false, 0u64, 64u32, &ipad) + crate::hash_blake2s::update_last(64u32, &mut wv, s0, false, 0u64, 64u32, &ipad) } else { let block_len: u32 = 64u32; let n_blocks: u32 = data_len.wrapping_div(block_len); let rem: u32 = data_len.wrapping_rem(block_len); - let scrut: crate::hacl_rs::hmac::__uint32_t_uint32_t = if n_blocks > 0u32 && rem == 0u32 { + let scrut: crate::hmac::__uint32_t_uint32_t = if n_blocks > 0u32 && rem == 0u32 { let n_blocksĀ·: u32 = n_blocks.wrapping_sub(1u32); - crate::hacl_rs::hmac::__uint32_t_uint32_t { + crate::hmac::__uint32_t_uint32_t { fst: n_blocksĀ·, snd: data_len.wrapping_sub(n_blocksĀ·.wrapping_mul(block_len)), } } else { - crate::hacl_rs::hmac::__uint32_t_uint32_t { + crate::hmac::__uint32_t_uint32_t { fst: n_blocks, snd: rem, } @@ -538,9 +533,9 @@ pub fn compute_blake2s_32(dst: &mut [u8], key: &[u8], key_len: u32, data: &[u8], let full_blocks: (&[u8], &[u8]) = data.split_at(0usize); let rem0: (&[u8], &[u8]) = full_blocks.1.split_at(full_blocks_len as usize); let mut wv: [u32; 16] = [0u32; 16usize]; - crate::hacl_rs::hash_blake2s::update_multi(64u32, &mut wv, s0, 0u64, &ipad, 1u32); + crate::hash_blake2s::update_multi(64u32, &mut wv, s0, 0u64, &ipad, 1u32); let mut wv0: [u32; 16] = [0u32; 16usize]; - crate::hacl_rs::hash_blake2s::update_multi( + crate::hash_blake2s::update_multi( n_blocks0.wrapping_mul(64u32), &mut wv0, s0, @@ -549,7 +544,7 @@ pub fn compute_blake2s_32(dst: &mut [u8], key: &[u8], key_len: u32, data: &[u8], n_blocks0, ); let mut wv1: [u32; 16] = [0u32; 16usize]; - crate::hacl_rs::hash_blake2s::update_last( + crate::hash_blake2s::update_last( rem_len, &mut wv1, s0, @@ -560,20 +555,20 @@ pub fn compute_blake2s_32(dst: &mut [u8], key: &[u8], key_len: u32, data: &[u8], ) }; let dst1: (&mut [u8], &mut [u8]) = ipad.split_at_mut(0usize); - crate::hacl_rs::hash_blake2s::finish(32u32, dst1.1, s0); + crate::hash_blake2s::finish(32u32, dst1.1, s0); let hash1: (&[u8], &[u8]) = dst1.1.split_at(0usize); - crate::hacl_rs::hash_blake2s::init(s0, 0u32, 32u32); + crate::hash_blake2s::init(s0, 0u32, 32u32); let block_len: u32 = 64u32; let n_blocks: u32 = 32u32.wrapping_div(block_len); let rem: u32 = 32u32.wrapping_rem(block_len); - let scrut: crate::hacl_rs::hmac::__uint32_t_uint32_t = if n_blocks > 0u32 && rem == 0u32 { + let scrut: crate::hmac::__uint32_t_uint32_t = if n_blocks > 0u32 && rem == 0u32 { let n_blocksĀ·: u32 = n_blocks.wrapping_sub(1u32); - crate::hacl_rs::hmac::__uint32_t_uint32_t { + crate::hmac::__uint32_t_uint32_t { fst: n_blocksĀ·, snd: 32u32.wrapping_sub(n_blocksĀ·.wrapping_mul(block_len)), } } else { - crate::hacl_rs::hmac::__uint32_t_uint32_t { + crate::hmac::__uint32_t_uint32_t { fst: n_blocks, snd: rem, } @@ -584,9 +579,9 @@ pub fn compute_blake2s_32(dst: &mut [u8], key: &[u8], key_len: u32, data: &[u8], let full_blocks: (&[u8], &[u8]) = hash1.1.split_at(0usize); let rem0: (&[u8], &[u8]) = full_blocks.1.split_at(full_blocks_len as usize); let mut wv: [u32; 16] = [0u32; 16usize]; - crate::hacl_rs::hash_blake2s::update_multi(64u32, &mut wv, s0, 0u64, &opad, 1u32); + crate::hash_blake2s::update_multi(64u32, &mut wv, s0, 0u64, &opad, 1u32); let mut wv0: [u32; 16] = [0u32; 16usize]; - crate::hacl_rs::hash_blake2s::update_multi( + crate::hash_blake2s::update_multi( n_blocks0.wrapping_mul(64u32), &mut wv0, s0, @@ -595,7 +590,7 @@ pub fn compute_blake2s_32(dst: &mut [u8], key: &[u8], key_len: u32, data: &[u8], n_blocks0, ); let mut wv1: [u32; 16] = [0u32; 16usize]; - crate::hacl_rs::hash_blake2s::update_last( + crate::hash_blake2s::update_last( rem_len, &mut wv1, s0, @@ -604,7 +599,7 @@ pub fn compute_blake2s_32(dst: &mut [u8], key: &[u8], key_len: u32, data: &[u8], rem_len, rem0.1, ); - crate::hacl_rs::hash_blake2s::finish(32u32, dst, s0) + crate::hash_blake2s::finish(32u32, dst, s0) } /** @@ -623,7 +618,7 @@ pub fn compute_blake2b_32(dst: &mut [u8], key: &[u8], key_len: u32, data: &[u8], if key_len <= 128u32 { (zeroes.0[0usize..key_len as usize]).copy_from_slice(&key[0usize..key_len as usize]) } else { - crate::hacl_rs::hash_blake2b::hash_with_key(zeroes.0, 64u32, key, key_len, &[], 0u32) + crate::hash_blake2b::hash_with_key(zeroes.0, 64u32, key, key_len, &[], 0u32) }; let mut ipad: Box<[u8]> = vec![0x36u8; l as usize].into_boxed_slice(); for i in 0u32..l { @@ -638,11 +633,11 @@ pub fn compute_blake2b_32(dst: &mut [u8], key: &[u8], key_len: u32, data: &[u8], (&mut opad)[i as usize] = xi ^ yi } let mut s: [u64; 16] = [0u64; 16usize]; - crate::hacl_rs::hash_blake2b::init(&mut s, 0u32, 64u32); + crate::hash_blake2b::init(&mut s, 0u32, 64u32); let s0: &mut [u64] = &mut s; if data_len == 0u32 { let mut wv: [u64; 16] = [0u64; 16usize]; - crate::hacl_rs::hash_blake2b::update_last( + crate::hash_blake2b::update_last( 128u32, &mut wv, s0, @@ -655,14 +650,14 @@ pub fn compute_blake2b_32(dst: &mut [u8], key: &[u8], key_len: u32, data: &[u8], let block_len: u32 = 128u32; let n_blocks: u32 = data_len.wrapping_div(block_len); let rem: u32 = data_len.wrapping_rem(block_len); - let scrut: crate::hacl_rs::hmac::__uint32_t_uint32_t = if n_blocks > 0u32 && rem == 0u32 { + let scrut: crate::hmac::__uint32_t_uint32_t = if n_blocks > 0u32 && rem == 0u32 { let n_blocksĀ·: u32 = n_blocks.wrapping_sub(1u32); - crate::hacl_rs::hmac::__uint32_t_uint32_t { + crate::hmac::__uint32_t_uint32_t { fst: n_blocksĀ·, snd: data_len.wrapping_sub(n_blocksĀ·.wrapping_mul(block_len)), } } else { - crate::hacl_rs::hmac::__uint32_t_uint32_t { + crate::hmac::__uint32_t_uint32_t { fst: n_blocks, snd: rem, } @@ -673,7 +668,7 @@ pub fn compute_blake2b_32(dst: &mut [u8], key: &[u8], key_len: u32, data: &[u8], let full_blocks: (&[u8], &[u8]) = data.split_at(0usize); let rem0: (&[u8], &[u8]) = full_blocks.1.split_at(full_blocks_len as usize); let mut wv: [u64; 16] = [0u64; 16usize]; - crate::hacl_rs::hash_blake2b::update_multi( + crate::hash_blake2b::update_multi( 128u32, &mut wv, s0, @@ -682,7 +677,7 @@ pub fn compute_blake2b_32(dst: &mut [u8], key: &[u8], key_len: u32, data: &[u8], 1u32, ); let mut wv0: [u64; 16] = [0u64; 16usize]; - crate::hacl_rs::hash_blake2b::update_multi( + crate::hash_blake2b::update_multi( n_blocks0.wrapping_mul(128u32), &mut wv0, s0, @@ -691,7 +686,7 @@ pub fn compute_blake2b_32(dst: &mut [u8], key: &[u8], key_len: u32, data: &[u8], n_blocks0, ); let mut wv1: [u64; 16] = [0u64; 16usize]; - crate::hacl_rs::hash_blake2b::update_last( + crate::hash_blake2b::update_last( rem_len, &mut wv1, s0, @@ -705,20 +700,20 @@ pub fn compute_blake2b_32(dst: &mut [u8], key: &[u8], key_len: u32, data: &[u8], ) }; let dst1: (&mut [u8], &mut [u8]) = ipad.split_at_mut(0usize); - crate::hacl_rs::hash_blake2b::finish(64u32, dst1.1, s0); + crate::hash_blake2b::finish(64u32, dst1.1, s0); let hash1: (&[u8], &[u8]) = dst1.1.split_at(0usize); - crate::hacl_rs::hash_blake2b::init(s0, 0u32, 64u32); + crate::hash_blake2b::init(s0, 0u32, 64u32); let block_len: u32 = 128u32; let n_blocks: u32 = 64u32.wrapping_div(block_len); let rem: u32 = 64u32.wrapping_rem(block_len); - let scrut: crate::hacl_rs::hmac::__uint32_t_uint32_t = if n_blocks > 0u32 && rem == 0u32 { + let scrut: crate::hmac::__uint32_t_uint32_t = if n_blocks > 0u32 && rem == 0u32 { let n_blocksĀ·: u32 = n_blocks.wrapping_sub(1u32); - crate::hacl_rs::hmac::__uint32_t_uint32_t { + crate::hmac::__uint32_t_uint32_t { fst: n_blocksĀ·, snd: 64u32.wrapping_sub(n_blocksĀ·.wrapping_mul(block_len)), } } else { - crate::hacl_rs::hmac::__uint32_t_uint32_t { + crate::hmac::__uint32_t_uint32_t { fst: n_blocks, snd: rem, } @@ -729,7 +724,7 @@ pub fn compute_blake2b_32(dst: &mut [u8], key: &[u8], key_len: u32, data: &[u8], let full_blocks: (&[u8], &[u8]) = hash1.1.split_at(0usize); let rem0: (&[u8], &[u8]) = full_blocks.1.split_at(full_blocks_len as usize); let mut wv: [u64; 16] = [0u64; 16usize]; - crate::hacl_rs::hash_blake2b::update_multi( + crate::hash_blake2b::update_multi( 128u32, &mut wv, s0, @@ -738,7 +733,7 @@ pub fn compute_blake2b_32(dst: &mut [u8], key: &[u8], key_len: u32, data: &[u8], 1u32, ); let mut wv0: [u64; 16] = [0u64; 16usize]; - crate::hacl_rs::hash_blake2b::update_multi( + crate::hash_blake2b::update_multi( n_blocks0.wrapping_mul(128u32), &mut wv0, s0, @@ -747,7 +742,7 @@ pub fn compute_blake2b_32(dst: &mut [u8], key: &[u8], key_len: u32, data: &[u8], n_blocks0, ); let mut wv1: [u64; 16] = [0u64; 16usize]; - crate::hacl_rs::hash_blake2b::update_last( + crate::hash_blake2b::update_last( rem_len, &mut wv1, s0, @@ -759,6 +754,6 @@ pub fn compute_blake2b_32(dst: &mut [u8], key: &[u8], key_len: u32, data: &[u8], rem_len, rem0.1, ); - crate::hacl_rs::hash_blake2b::finish(64u32, dst, s0) + crate::hash_blake2b::finish(64u32, dst, s0) } */ diff --git a/src/hacl_rs/mod.rs b/libcrux-hacl-rs/src/lib.rs similarity index 53% rename from src/hacl_rs/mod.rs rename to libcrux-hacl-rs/src/lib.rs index aa1ef1d51..59fc9a8e6 100644 --- a/src/hacl_rs/mod.rs +++ b/libcrux-hacl-rs/src/lib.rs @@ -2,8 +2,8 @@ mod fstar; mod lowstar; -pub(crate) mod hash_sha1; -pub(crate) mod hash_sha2; +pub mod hash_sha1; +pub mod hash_sha2; //pub(crate) mod hkdf; -pub(crate) mod hmac; -pub(crate) mod streaming_types; +pub mod hmac; +pub mod streaming_types; diff --git a/src/hacl_rs/lowstar.rs b/libcrux-hacl-rs/src/lowstar.rs similarity index 100% rename from src/hacl_rs/lowstar.rs rename to libcrux-hacl-rs/src/lowstar.rs diff --git a/src/hacl_rs/lowstar/endianness.rs b/libcrux-hacl-rs/src/lowstar/endianness.rs similarity index 100% rename from src/hacl_rs/lowstar/endianness.rs rename to libcrux-hacl-rs/src/lowstar/endianness.rs diff --git a/src/hacl_rs/lowstar/ignore.rs b/libcrux-hacl-rs/src/lowstar/ignore.rs similarity index 100% rename from src/hacl_rs/lowstar/ignore.rs rename to libcrux-hacl-rs/src/lowstar/ignore.rs diff --git a/src/hacl_rs/streaming_types.rs b/libcrux-hacl-rs/src/streaming_types.rs similarity index 63% rename from src/hacl_rs/streaming_types.rs rename to libcrux-hacl-rs/src/streaming_types.rs index 886aad904..0c53b03f2 100644 --- a/src/hacl_rs/streaming_types.rs +++ b/libcrux-hacl-rs/src/streaming_types.rs @@ -5,8 +5,7 @@ #![allow(unreachable_patterns)] #[derive(PartialEq, Clone, Copy)] -pub enum hash_alg -{ +pub enum hash_alg { SHA2_224, SHA2_256, SHA2_384, @@ -20,22 +19,27 @@ pub enum hash_alg SHA3_384, SHA3_512, Shake128, - Shake256 + Shake256, } #[derive(PartialEq, Clone, Copy)] -pub enum error_code -{ +pub enum error_code { Success, InvalidAlgorithm, InvalidLength, - MaximumLengthExceeded + MaximumLengthExceeded, } #[derive(PartialEq, Clone)] -pub struct state_32 -{ pub block_state: Box<[u32]>, pub buf: Box<[u8]>, pub total_len: u64 } +pub struct state_32 { + pub block_state: Box<[u32]>, + pub buf: Box<[u8]>, + pub total_len: u64, +} #[derive(PartialEq, Clone)] -pub struct state_64 -{ pub block_state: Box<[u64]>, pub buf: Box<[u8]>, pub total_len: u64 } +pub struct state_64 { + pub block_state: Box<[u64]>, + pub buf: Box<[u8]>, + pub total_len: u64, +} diff --git a/src/digest.rs b/src/digest.rs index 27a3f1291..a45a0e3b3 100644 --- a/src/digest.rs +++ b/src/digest.rs @@ -259,10 +259,98 @@ pub fn sha2_512(payload: &[u8]) -> Sha2_512Digest { // Streaming API - This is the recommended one. // For implementations based on hacl_rs (over hacl-c) -pub use crate::hacl_rs::hash_sha2::HaclRs_Sha2_Sha256 as Sha2_256; -pub use crate::hacl_rs::hash_sha2::HaclRs_Sha2_Sha256_224 as Sha2_224; -pub use crate::hacl_rs::hash_sha2::HaclRs_Sha2_Sha512 as Sha2_512; -pub use crate::hacl_rs::hash_sha2::HaclRs_Sha2_Sha512_384 as Sha2_384; +macro_rules! impl_hash { + ($name:ident, $digest_size:literal, $state:ty, $malloc:expr, $reset:expr, $update:expr, $finish:expr, $copy:expr, $hash:expr) => { + #[allow(non_camel_case_types)] + pub struct $name { + state: $state, + } + + impl $name { + /// Return the digest for the given input byte slice, in immediate mode. + pub fn hash(digest: &mut [u8; $digest_size], payload: &[u8]) { + $hash(digest, payload, payload.len() as u32) + } + + /// Initialize a new digest state for streaming use. + pub fn new() -> $name { + $name { state: $malloc() } + } + + /// Add the `payload` to the digest. + pub fn update(&mut self, payload: &[u8]) { + $update(self.state.as_mut(), payload, payload.len() as u32); + } + + /// Get the digest. + /// + /// Note that the digest state can be continued to be used, to extend the + /// digest. + pub fn finish(&self, digest: &mut [u8; $digest_size]) { + $finish(self.state.as_ref(), digest); + } + + /// Reset the digest state. + pub fn reset(&mut self) { + $reset(self.state.as_mut()); + } + } + + impl Clone for $name { + fn clone(&self) -> Self { + Self { + state: $copy(self.state.as_ref()), + } + } + } + }; +} + +impl_hash!( + Sha2_256, + 32, + Box<[libcrux_hacl_rs::streaming_types::state_32]>, + libcrux_hacl_rs::hash_sha2::malloc_256, + libcrux_hacl_rs::hash_sha2::reset_256, + libcrux_hacl_rs::hash_sha2::update_256, + libcrux_hacl_rs::hash_sha2::digest_256, + libcrux_hacl_rs::hash_sha2::copy_256, + libcrux_hacl_rs::hash_sha2::hash_256 +); +impl_hash!( + Sha2_224, + 28, + Box<[libcrux_hacl_rs::streaming_types::state_32]>, + libcrux_hacl_rs::hash_sha2::malloc_224, + libcrux_hacl_rs::hash_sha2::reset_224, + libcrux_hacl_rs::hash_sha2::update_224, + libcrux_hacl_rs::hash_sha2::digest_224, + libcrux_hacl_rs::hash_sha2::copy_256, + libcrux_hacl_rs::hash_sha2::hash_224 +); + +impl_hash!( + Sha2_512, + 64, + Box<[libcrux_hacl_rs::streaming_types::state_64]>, + libcrux_hacl_rs::hash_sha2::malloc_512, + libcrux_hacl_rs::hash_sha2::reset_512, + libcrux_hacl_rs::hash_sha2::update_512, + libcrux_hacl_rs::hash_sha2::digest_512, + libcrux_hacl_rs::hash_sha2::copy_512, + libcrux_hacl_rs::hash_sha2::hash_512 +); +impl_hash!( + Sha2_384, + 48, + Box<[libcrux_hacl_rs::streaming_types::state_64]>, + libcrux_hacl_rs::hash_sha2::malloc_384, + libcrux_hacl_rs::hash_sha2::reset_384, + libcrux_hacl_rs::hash_sha2::update_384, + libcrux_hacl_rs::hash_sha2::digest_384, + libcrux_hacl_rs::hash_sha2::copy_512, + libcrux_hacl_rs::hash_sha2::hash_384 +); // SHAKE messages from SHA 3 diff --git a/src/lib.rs b/src/lib.rs index cf2e6304b..8642eaa91 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -14,7 +14,6 @@ pub(crate) mod jasmin; // HACL pub(crate) mod hacl; -pub(crate) mod hacl_rs; // libcrux pub mod aead; From 56523a520d9d596e9b14b9c33d73d8388ac214ce Mon Sep 17 00:00:00 2001 From: "Jan Winkelmann (keks)" Date: Tue, 5 Nov 2024 17:44:25 +0100 Subject: [PATCH 07/18] make hmac crate use hacl-rs --- Cargo.lock | 1 + libcrux-hmac/Cargo.toml | 1 + libcrux-hmac/src/hacl_hmac.rs | 30 ------------------------------ libcrux-hmac/src/hmac.rs | 17 +++++++++++------ 4 files changed, 13 insertions(+), 36 deletions(-) delete mode 100644 libcrux-hmac/src/hacl_hmac.rs diff --git a/Cargo.lock b/Cargo.lock index 36051777b..59cbb1e11 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1014,6 +1014,7 @@ name = "libcrux-hmac" version = "0.0.2-beta.2" dependencies = [ "libcrux-hacl", + "libcrux-hacl-rs", "libcrux-hkdf", ] diff --git a/libcrux-hmac/Cargo.toml b/libcrux-hmac/Cargo.toml index a29de12d0..b0a3004b2 100644 --- a/libcrux-hmac/Cargo.toml +++ b/libcrux-hmac/Cargo.toml @@ -15,3 +15,4 @@ path = "src/hmac.rs" [dependencies] libcrux-hkdf = { version = "=0.0.2-beta.2", path = "../libcrux-hkdf" } libcrux-hacl = { version = "=0.0.2-beta.2", path = "../sys/hacl" } +libcrux-hacl-rs = { path = "../libcrux-hacl-rs/" } diff --git a/libcrux-hmac/src/hacl_hmac.rs b/libcrux-hmac/src/hacl_hmac.rs deleted file mode 100644 index 65136aa26..000000000 --- a/libcrux-hmac/src/hacl_hmac.rs +++ /dev/null @@ -1,30 +0,0 @@ -use libcrux_hacl::{ - Hacl_HMAC_compute_sha1, Hacl_HMAC_compute_sha2_256, Hacl_HMAC_compute_sha2_384, - Hacl_HMAC_compute_sha2_512, -}; - -macro_rules! impl_hmac { - ($name:ident,$fun:expr,$tag_len:literal) => { - /// Compute HMAC. - /// - /// Note that this function panics if `key` or `data` is larger than 2**32 bytes. - pub fn $name(key: &[u8], data: &[u8]) -> [u8; $tag_len] { - let mut dst = [0u8; $tag_len]; - unsafe { - $fun( - dst.as_mut_ptr(), - key.as_ptr() as _, - key.len().try_into().unwrap(), - data.as_ptr() as _, - data.len().try_into().unwrap(), - ) - } - dst - } - }; -} - -impl_hmac!(sha1, Hacl_HMAC_compute_sha1, 20); -impl_hmac!(sha2_256, Hacl_HMAC_compute_sha2_256, 32); -impl_hmac!(sha2_384, Hacl_HMAC_compute_sha2_384, 48); -impl_hmac!(sha2_512, Hacl_HMAC_compute_sha2_512, 64); diff --git a/libcrux-hmac/src/hmac.rs b/libcrux-hmac/src/hmac.rs index 3eea90a22..dab739650 100644 --- a/libcrux-hmac/src/hmac.rs +++ b/libcrux-hmac/src/hmac.rs @@ -3,7 +3,11 @@ //! This crate implements HMAC on SHA 1 and SHA 2 (except for SHA 224). use libcrux_hkdf as hkdf; -pub(crate) mod hacl_hmac; + +use libcrux_hacl_rs::hmac::compute_sha1 as hmac_sha1; +use libcrux_hacl_rs::hmac::compute_sha2_256 as hmac_sha256; +use libcrux_hacl_rs::hmac::compute_sha2_384 as hmac_sha384; +use libcrux_hacl_rs::hmac::compute_sha2_512 as hmac_sha512; /// The HMAC algorithm defining the used hash function. #[derive(Copy, Clone, Debug, PartialEq)] @@ -45,11 +49,12 @@ pub fn hmac(alg: Algorithm, key: &[u8], data: &[u8], tag_length: Option) Some(v) => v, None => native_tag_length, }; - let mut dst: Vec<_> = match alg { - Algorithm::Sha1 => crate::hacl_hmac::sha1(key, data).into(), - Algorithm::Sha256 => crate::hacl_hmac::sha2_256(key, data).into(), - Algorithm::Sha384 => crate::hacl_hmac::sha2_384(key, data).into(), - Algorithm::Sha512 => crate::hacl_hmac::sha2_512(key, data).into(), + let mut dst = vec![0u8; native_tag_length]; + match alg { + Algorithm::Sha1 => hmac_sha1(&mut dst, key, key.len() as u32, data, data.len() as u32), + Algorithm::Sha256 => hmac_sha256(&mut dst, key, key.len() as u32, data, data.len() as u32), + Algorithm::Sha384 => hmac_sha384(&mut dst, key, key.len() as u32, data, data.len() as u32), + Algorithm::Sha512 => hmac_sha512(&mut dst, key, key.len() as u32, data, data.len() as u32), }; dst.truncate(tag_length); dst From dfe85d2db943475f81729beddcfb65f832d3b44b Mon Sep 17 00:00:00 2001 From: "Jan Winkelmann (keks)" Date: Tue, 5 Nov 2024 18:23:32 +0100 Subject: [PATCH 08/18] use ed25519 from hacl-rs --- libcrux-hacl-rs/src/bignum.rs | 12 + libcrux-hacl-rs/src/bignum/bignum.rs | 2147 +++++++++++++++++ libcrux-hacl-rs/src/bignum/bignum256.rs | 1295 ++++++++++ libcrux-hacl-rs/src/bignum/bignum256_32.rs | 1332 ++++++++++ libcrux-hacl-rs/src/bignum/bignum32.rs | 753 ++++++ libcrux-hacl-rs/src/bignum/bignum4096.rs | 1245 ++++++++++ libcrux-hacl-rs/src/bignum/bignum4096_32.rs | 1247 ++++++++++ libcrux-hacl-rs/src/bignum/bignum64.rs | 651 +++++ libcrux-hacl-rs/src/bignum/bignum_base.rs | 467 ++++ libcrux-hacl-rs/src/bignum25519_51.rs | 724 ++++++ libcrux-hacl-rs/src/curve25519_51.rs | 340 +++ libcrux-hacl-rs/src/ed25519.rs | 1931 +++++++++++++++ libcrux-hacl-rs/src/ed25519_precomptable.rs | 478 ++++ libcrux-hacl-rs/src/lib.rs | 12 +- libcrux-hacl-rs/src/util.rs | 2 + .../src/util/inttypes_intrinsics.rs | 37 + libcrux-hacl-rs/src/util/memzero0.rs | 6 + src/signature.rs | 30 +- 18 files changed, 12698 insertions(+), 11 deletions(-) create mode 100644 libcrux-hacl-rs/src/bignum.rs create mode 100644 libcrux-hacl-rs/src/bignum/bignum.rs create mode 100644 libcrux-hacl-rs/src/bignum/bignum256.rs create mode 100644 libcrux-hacl-rs/src/bignum/bignum256_32.rs create mode 100644 libcrux-hacl-rs/src/bignum/bignum32.rs create mode 100644 libcrux-hacl-rs/src/bignum/bignum4096.rs create mode 100644 libcrux-hacl-rs/src/bignum/bignum4096_32.rs create mode 100644 libcrux-hacl-rs/src/bignum/bignum64.rs create mode 100644 libcrux-hacl-rs/src/bignum/bignum_base.rs create mode 100644 libcrux-hacl-rs/src/bignum25519_51.rs create mode 100644 libcrux-hacl-rs/src/curve25519_51.rs create mode 100644 libcrux-hacl-rs/src/ed25519.rs create mode 100644 libcrux-hacl-rs/src/ed25519_precomptable.rs create mode 100644 libcrux-hacl-rs/src/util.rs create mode 100644 libcrux-hacl-rs/src/util/inttypes_intrinsics.rs create mode 100644 libcrux-hacl-rs/src/util/memzero0.rs diff --git a/libcrux-hacl-rs/src/bignum.rs b/libcrux-hacl-rs/src/bignum.rs new file mode 100644 index 000000000..2d4a00fb1 --- /dev/null +++ b/libcrux-hacl-rs/src/bignum.rs @@ -0,0 +1,12 @@ +pub mod bignum_base; +pub mod bignum; +pub mod bignum32; +pub mod bignum64; +pub mod bignum256; +pub mod bignum256_32; +pub mod bignum4096; +pub mod bignum4096_32; + +pub mod test { + // pub mod bignum4096; +} diff --git a/libcrux-hacl-rs/src/bignum/bignum.rs b/libcrux-hacl-rs/src/bignum/bignum.rs new file mode 100644 index 000000000..87e097a97 --- /dev/null +++ b/libcrux-hacl-rs/src/bignum/bignum.rs @@ -0,0 +1,2147 @@ +#![allow(non_snake_case)] +#![allow(non_upper_case_globals)] +#![allow(non_camel_case_types)] +#![allow(unused_assignments)] +#![allow(unreachable_patterns)] + +use crate::fstar; +use crate::lowstar; +use crate::util as lib; + +pub(crate) fn bn_karatsuba_mul_uint32( + aLen: u32, + a: &[u32], + b: &[u32], + tmp: &mut [u32], + res: &mut [u32], +) { + if aLen < 32u32 || aLen.wrapping_rem(2u32) == 1u32 { + super::bignum_base::bn_mul_u32(aLen, a, aLen, b, res) + } else { + let len2: u32 = aLen.wrapping_div(2u32); + let a0: (&[u32], &[u32]) = a.split_at(0usize); + let a1: (&[u32], &[u32]) = a0.1.split_at(len2 as usize); + let b0: (&[u32], &[u32]) = b.split_at(0usize); + let b1: (&[u32], &[u32]) = b0.1.split_at(len2 as usize); + let t0: (&mut [u32], &mut [u32]) = tmp.split_at_mut(0usize); + let t1: (&mut [u32], &mut [u32]) = t0.1.split_at_mut(len2 as usize); + let tmpĀ·: (&mut [u32], &mut [u32]) = t1.1.split_at_mut(aLen as usize - len2 as usize); + let c0: u32 = super::bignum_base::bn_sub_eq_len_u32(len2, a1.0, a1.1, tmpĀ·.1); + let c1: u32 = super::bignum_base::bn_sub_eq_len_u32(len2, a1.1, a1.0, t1.0); + for i in 0u32..len2 { + let x: u32 = 0u32.wrapping_sub(c0) & t1.0[i as usize] + | !0u32.wrapping_sub(c0) & tmpĀ·.1[i as usize]; + let os: (&mut [u32], &mut [u32]) = t1.0.split_at_mut(0usize); + os.1[i as usize] = x + } + lowstar::ignore::ignore::(c1); + let c00: u32 = c0; + let c01: u32 = super::bignum_base::bn_sub_eq_len_u32(len2, b1.0, b1.1, tmpĀ·.1); + let c10: u32 = super::bignum_base::bn_sub_eq_len_u32(len2, b1.1, b1.0, tmpĀ·.0); + for i in 0u32..len2 { + let x: u32 = 0u32.wrapping_sub(c01) & tmpĀ·.0[i as usize] + | !0u32.wrapping_sub(c01) & tmpĀ·.1[i as usize]; + let os: (&mut [u32], &mut [u32]) = tmpĀ·.0.split_at_mut(0usize); + os.1[i as usize] = x + } + lowstar::ignore::ignore::(c10); + let c11: u32 = c01; + let t23: (&mut [u32], &mut [u32]) = tmpĀ·.1.split_at_mut(0usize); + let tmp1: (&mut [u32], &mut [u32]) = t23 + .1 + .split_at_mut(aLen.wrapping_add(aLen) as usize - aLen as usize); + super::bignum::bn_karatsuba_mul_uint32(len2, t1.0, tmpĀ·.0, tmp1.1, tmp1.0); + let r01: (&mut [u32], &mut [u32]) = res.split_at_mut(0usize); + let r23: (&mut [u32], &mut [u32]) = r01.1.split_at_mut(aLen as usize); + super::bignum::bn_karatsuba_mul_uint32(len2, a1.0, b1.0, tmp1.1, r23.0); + super::bignum::bn_karatsuba_mul_uint32(len2, a1.1, b1.1, tmp1.1, r23.1); + lowstar::ignore::ignore::<&[u32]>(res); + lowstar::ignore::ignore::<&[u32]>(tmp); + let r011: (&[u32], &[u32]) = res.split_at(0usize); + let r231: (&[u32], &[u32]) = r011.1.split_at(aLen as usize); + let t01: (&mut [u32], &mut [u32]) = tmp.split_at_mut(0usize); + let t231: (&mut [u32], &mut [u32]) = t01.1.split_at_mut(aLen as usize); + let t45: (&mut [u32], &mut [u32]) = t231 + .1 + .split_at_mut(2u32.wrapping_mul(aLen) as usize - aLen as usize); + let t67: (&mut [u32], &mut [u32]) = t45 + .1 + .split_at_mut(3u32.wrapping_mul(aLen) as usize - 2u32.wrapping_mul(aLen) as usize); + let c2: u32 = super::bignum_base::bn_add_eq_len_u32(aLen, r231.0, r231.1, t231.0); + let c_sign: u32 = c00 ^ c11; + let c3: u32 = super::bignum_base::bn_sub_eq_len_u32(aLen, t231.0, t45.0, t67.1); + let c31: u32 = c2.wrapping_sub(c3); + let c4: u32 = super::bignum_base::bn_add_eq_len_u32(aLen, t231.0, t45.0, t67.0); + let c41: u32 = c2.wrapping_add(c4); + let mask: u32 = 0u32.wrapping_sub(c_sign); + for i in 0u32..aLen { + let x: u32 = mask & t67.0[i as usize] | !mask & t67.1[i as usize]; + let os: (&mut [u32], &mut [u32]) = t67.0.split_at_mut(0usize); + os.1[i as usize] = x + } + let c5: u32 = mask & c41 | !mask & c31; + let aLen2: u32 = aLen.wrapping_div(2u32); + lowstar::ignore::ignore::<&[u32]>(res); + let r: (&mut [u32], &mut [u32]) = res.split_at_mut(aLen2 as usize); + let mut a_copy: Box<[u32]> = vec![0u32; aLen as usize].into_boxed_slice(); + let mut b_copy: Box<[u32]> = vec![0u32; aLen as usize].into_boxed_slice(); + ((&mut a_copy)[0usize..aLen as usize]).copy_from_slice(&r.1[0usize..aLen as usize]); + ((&mut b_copy)[0usize..aLen as usize]).copy_from_slice(&t67.0[0usize..aLen as usize]); + let r1: u32 = super::bignum_base::bn_add_eq_len_u32(aLen, &a_copy, &b_copy, r.1); + let r10: u32 = r1; + let c: u32 = r10; + let c6: u32 = c; + let c7: u32 = c5.wrapping_add(c6); + lowstar::ignore::ignore::<&[u32]>(res); + let r0: (&mut [u32], &mut [u32]) = res.split_at_mut(aLen.wrapping_add(aLen2) as usize); + let c010: u32 = + lib::inttypes_intrinsics::add_carry_u32(0u32, r0.1[0usize], c7, &mut r0.1[0usize..]); + let r11: u32 = if 1u32 + < aLen + .wrapping_add(aLen) + .wrapping_sub(aLen.wrapping_add(aLen2)) + { + let res1: (&mut [u32], &mut [u32]) = r0.1.split_at_mut(1usize); + let mut c8: [u32; 1] = [c010; 1usize]; + for i in 0u32..aLen + .wrapping_add(aLen) + .wrapping_sub(aLen.wrapping_add(aLen2)) + .wrapping_sub(1u32) + .wrapping_div(4u32) + { + let t11: u32 = res1.1[4u32.wrapping_mul(i) as usize]; + let res_i: (&mut [u32], &mut [u32]) = + res1.1.split_at_mut(4u32.wrapping_mul(i) as usize); + (&mut c8)[0usize] = + lib::inttypes_intrinsics::add_carry_u32((&c8)[0usize], t11, 0u32, res_i.1); + let t110: u32 = res1.1[4u32.wrapping_mul(i).wrapping_add(1u32) as usize]; + let res_i0: (&mut [u32], &mut [u32]) = + res1.1.split_at_mut(4u32.wrapping_mul(i) as usize + 1usize); + (&mut c8)[0usize] = + lib::inttypes_intrinsics::add_carry_u32((&c8)[0usize], t110, 0u32, res_i0.1); + let t111: u32 = res1.1[4u32.wrapping_mul(i).wrapping_add(2u32) as usize]; + let res_i1: (&mut [u32], &mut [u32]) = + res1.1.split_at_mut(4u32.wrapping_mul(i) as usize + 2usize); + (&mut c8)[0usize] = + lib::inttypes_intrinsics::add_carry_u32((&c8)[0usize], t111, 0u32, res_i1.1); + let t112: u32 = res1.1[4u32.wrapping_mul(i).wrapping_add(3u32) as usize]; + let res_i2: (&mut [u32], &mut [u32]) = + res1.1.split_at_mut(4u32.wrapping_mul(i) as usize + 3usize); + (&mut c8)[0usize] = + lib::inttypes_intrinsics::add_carry_u32((&c8)[0usize], t112, 0u32, res_i2.1) + } + for i in aLen + .wrapping_add(aLen) + .wrapping_sub(aLen.wrapping_add(aLen2)) + .wrapping_sub(1u32) + .wrapping_div(4u32) + .wrapping_mul(4u32) + ..aLen + .wrapping_add(aLen) + .wrapping_sub(aLen.wrapping_add(aLen2)) + .wrapping_sub(1u32) + { + let t11: u32 = res1.1[i as usize]; + let res_i: (&mut [u32], &mut [u32]) = res1.1.split_at_mut(i as usize); + (&mut c8)[0usize] = + lib::inttypes_intrinsics::add_carry_u32((&c8)[0usize], t11, 0u32, res_i.1) + } + let c110: u32 = (&c8)[0usize]; + c110 + } else { + c010 + }; + let c8: u32 = r11; + let c9: u32 = c8; + let c12: u32 = c9; + lowstar::ignore::ignore::(c12) + } +} + +pub(crate) fn bn_karatsuba_mul_uint64( + aLen: u32, + a: &[u64], + b: &[u64], + tmp: &mut [u64], + res: &mut [u64], +) { + if aLen < 32u32 || aLen.wrapping_rem(2u32) == 1u32 { + super::bignum_base::bn_mul_u64(aLen, a, aLen, b, res) + } else { + let len2: u32 = aLen.wrapping_div(2u32); + let a0: (&[u64], &[u64]) = a.split_at(0usize); + let a1: (&[u64], &[u64]) = a0.1.split_at(len2 as usize); + let b0: (&[u64], &[u64]) = b.split_at(0usize); + let b1: (&[u64], &[u64]) = b0.1.split_at(len2 as usize); + let t0: (&mut [u64], &mut [u64]) = tmp.split_at_mut(0usize); + let t1: (&mut [u64], &mut [u64]) = t0.1.split_at_mut(len2 as usize); + let tmpĀ·: (&mut [u64], &mut [u64]) = t1.1.split_at_mut(aLen as usize - len2 as usize); + let c0: u64 = super::bignum_base::bn_sub_eq_len_u64(len2, a1.0, a1.1, tmpĀ·.1); + let c1: u64 = super::bignum_base::bn_sub_eq_len_u64(len2, a1.1, a1.0, t1.0); + for i in 0u32..len2 { + let x: u64 = 0u64.wrapping_sub(c0) & t1.0[i as usize] + | !0u64.wrapping_sub(c0) & tmpĀ·.1[i as usize]; + let os: (&mut [u64], &mut [u64]) = t1.0.split_at_mut(0usize); + os.1[i as usize] = x + } + lowstar::ignore::ignore::(c1); + let c00: u64 = c0; + let c01: u64 = super::bignum_base::bn_sub_eq_len_u64(len2, b1.0, b1.1, tmpĀ·.1); + let c10: u64 = super::bignum_base::bn_sub_eq_len_u64(len2, b1.1, b1.0, tmpĀ·.0); + for i in 0u32..len2 { + let x: u64 = 0u64.wrapping_sub(c01) & tmpĀ·.0[i as usize] + | !0u64.wrapping_sub(c01) & tmpĀ·.1[i as usize]; + let os: (&mut [u64], &mut [u64]) = tmpĀ·.0.split_at_mut(0usize); + os.1[i as usize] = x + } + lowstar::ignore::ignore::(c10); + let c11: u64 = c01; + let t23: (&mut [u64], &mut [u64]) = tmpĀ·.1.split_at_mut(0usize); + let tmp1: (&mut [u64], &mut [u64]) = t23 + .1 + .split_at_mut(aLen.wrapping_add(aLen) as usize - aLen as usize); + super::bignum::bn_karatsuba_mul_uint64(len2, t1.0, tmpĀ·.0, tmp1.1, tmp1.0); + let r01: (&mut [u64], &mut [u64]) = res.split_at_mut(0usize); + let r23: (&mut [u64], &mut [u64]) = r01.1.split_at_mut(aLen as usize); + super::bignum::bn_karatsuba_mul_uint64(len2, a1.0, b1.0, tmp1.1, r23.0); + super::bignum::bn_karatsuba_mul_uint64(len2, a1.1, b1.1, tmp1.1, r23.1); + lowstar::ignore::ignore::<&[u64]>(res); + lowstar::ignore::ignore::<&[u64]>(tmp); + let r011: (&[u64], &[u64]) = res.split_at(0usize); + let r231: (&[u64], &[u64]) = r011.1.split_at(aLen as usize); + let t01: (&mut [u64], &mut [u64]) = tmp.split_at_mut(0usize); + let t231: (&mut [u64], &mut [u64]) = t01.1.split_at_mut(aLen as usize); + let t45: (&mut [u64], &mut [u64]) = t231 + .1 + .split_at_mut(2u32.wrapping_mul(aLen) as usize - aLen as usize); + let t67: (&mut [u64], &mut [u64]) = t45 + .1 + .split_at_mut(3u32.wrapping_mul(aLen) as usize - 2u32.wrapping_mul(aLen) as usize); + let c2: u64 = super::bignum_base::bn_add_eq_len_u64(aLen, r231.0, r231.1, t231.0); + let c_sign: u64 = c00 ^ c11; + let c3: u64 = super::bignum_base::bn_sub_eq_len_u64(aLen, t231.0, t45.0, t67.1); + let c31: u64 = c2.wrapping_sub(c3); + let c4: u64 = super::bignum_base::bn_add_eq_len_u64(aLen, t231.0, t45.0, t67.0); + let c41: u64 = c2.wrapping_add(c4); + let mask: u64 = 0u64.wrapping_sub(c_sign); + for i in 0u32..aLen { + let x: u64 = mask & t67.0[i as usize] | !mask & t67.1[i as usize]; + let os: (&mut [u64], &mut [u64]) = t67.0.split_at_mut(0usize); + os.1[i as usize] = x + } + let c5: u64 = mask & c41 | !mask & c31; + let aLen2: u32 = aLen.wrapping_div(2u32); + lowstar::ignore::ignore::<&[u64]>(res); + let r: (&mut [u64], &mut [u64]) = res.split_at_mut(aLen2 as usize); + let mut a_copy: Box<[u64]> = vec![0u64; aLen as usize].into_boxed_slice(); + let mut b_copy: Box<[u64]> = vec![0u64; aLen as usize].into_boxed_slice(); + ((&mut a_copy)[0usize..aLen as usize]).copy_from_slice(&r.1[0usize..aLen as usize]); + ((&mut b_copy)[0usize..aLen as usize]).copy_from_slice(&t67.0[0usize..aLen as usize]); + let r1: u64 = super::bignum_base::bn_add_eq_len_u64(aLen, &a_copy, &b_copy, r.1); + let r10: u64 = r1; + let c: u64 = r10; + let c6: u64 = c; + let c7: u64 = c5.wrapping_add(c6); + lowstar::ignore::ignore::<&[u64]>(res); + let r0: (&mut [u64], &mut [u64]) = res.split_at_mut(aLen.wrapping_add(aLen2) as usize); + let c010: u64 = + lib::inttypes_intrinsics::add_carry_u64(0u64, r0.1[0usize], c7, &mut r0.1[0usize..]); + let r11: u64 = if 1u32 + < aLen + .wrapping_add(aLen) + .wrapping_sub(aLen.wrapping_add(aLen2)) + { + let res1: (&mut [u64], &mut [u64]) = r0.1.split_at_mut(1usize); + let mut c8: [u64; 1] = [c010; 1usize]; + for i in 0u32..aLen + .wrapping_add(aLen) + .wrapping_sub(aLen.wrapping_add(aLen2)) + .wrapping_sub(1u32) + .wrapping_div(4u32) + { + let t11: u64 = res1.1[4u32.wrapping_mul(i) as usize]; + let res_i: (&mut [u64], &mut [u64]) = + res1.1.split_at_mut(4u32.wrapping_mul(i) as usize); + (&mut c8)[0usize] = + lib::inttypes_intrinsics::add_carry_u64((&c8)[0usize], t11, 0u64, res_i.1); + let t110: u64 = res1.1[4u32.wrapping_mul(i).wrapping_add(1u32) as usize]; + let res_i0: (&mut [u64], &mut [u64]) = + res1.1.split_at_mut(4u32.wrapping_mul(i) as usize + 1usize); + (&mut c8)[0usize] = + lib::inttypes_intrinsics::add_carry_u64((&c8)[0usize], t110, 0u64, res_i0.1); + let t111: u64 = res1.1[4u32.wrapping_mul(i).wrapping_add(2u32) as usize]; + let res_i1: (&mut [u64], &mut [u64]) = + res1.1.split_at_mut(4u32.wrapping_mul(i) as usize + 2usize); + (&mut c8)[0usize] = + lib::inttypes_intrinsics::add_carry_u64((&c8)[0usize], t111, 0u64, res_i1.1); + let t112: u64 = res1.1[4u32.wrapping_mul(i).wrapping_add(3u32) as usize]; + let res_i2: (&mut [u64], &mut [u64]) = + res1.1.split_at_mut(4u32.wrapping_mul(i) as usize + 3usize); + (&mut c8)[0usize] = + lib::inttypes_intrinsics::add_carry_u64((&c8)[0usize], t112, 0u64, res_i2.1) + } + for i in aLen + .wrapping_add(aLen) + .wrapping_sub(aLen.wrapping_add(aLen2)) + .wrapping_sub(1u32) + .wrapping_div(4u32) + .wrapping_mul(4u32) + ..aLen + .wrapping_add(aLen) + .wrapping_sub(aLen.wrapping_add(aLen2)) + .wrapping_sub(1u32) + { + let t11: u64 = res1.1[i as usize]; + let res_i: (&mut [u64], &mut [u64]) = res1.1.split_at_mut(i as usize); + (&mut c8)[0usize] = + lib::inttypes_intrinsics::add_carry_u64((&c8)[0usize], t11, 0u64, res_i.1) + } + let c110: u64 = (&c8)[0usize]; + c110 + } else { + c010 + }; + let c8: u64 = r11; + let c9: u64 = c8; + let c12: u64 = c9; + lowstar::ignore::ignore::(c12) + } +} + +pub(crate) fn bn_karatsuba_sqr_uint32(aLen: u32, a: &[u32], tmp: &mut [u32], res: &mut [u32]) { + if aLen < 32u32 || aLen.wrapping_rem(2u32) == 1u32 { + super::bignum_base::bn_sqr_u32(aLen, a, res) + } else { + let len2: u32 = aLen.wrapping_div(2u32); + let a0: (&[u32], &[u32]) = a.split_at(0usize); + let a1: (&[u32], &[u32]) = a0.1.split_at(len2 as usize); + let t0: (&mut [u32], &mut [u32]) = tmp.split_at_mut(0usize); + let tmpĀ·: (&mut [u32], &mut [u32]) = t0.1.split_at_mut(aLen as usize); + let c0: u32 = super::bignum_base::bn_sub_eq_len_u32(len2, a1.0, a1.1, tmpĀ·.1); + let c1: u32 = super::bignum_base::bn_sub_eq_len_u32(len2, a1.1, a1.0, tmpĀ·.0); + for i in 0u32..len2 { + let x: u32 = 0u32.wrapping_sub(c0) & tmpĀ·.0[i as usize] + | !0u32.wrapping_sub(c0) & tmpĀ·.1[i as usize]; + let os: (&mut [u32], &mut [u32]) = tmpĀ·.0.split_at_mut(0usize); + os.1[i as usize] = x + } + lowstar::ignore::ignore::(c1); + let c00: u32 = c0; + lowstar::ignore::ignore::(c00); + let t23: (&mut [u32], &mut [u32]) = tmpĀ·.1.split_at_mut(0usize); + let tmp1: (&mut [u32], &mut [u32]) = t23 + .1 + .split_at_mut(aLen.wrapping_add(aLen) as usize - aLen as usize); + super::bignum::bn_karatsuba_sqr_uint32(len2, tmpĀ·.0, tmp1.1, tmp1.0); + let r01: (&mut [u32], &mut [u32]) = res.split_at_mut(0usize); + let r23: (&mut [u32], &mut [u32]) = r01.1.split_at_mut(aLen as usize); + super::bignum::bn_karatsuba_sqr_uint32(len2, a1.0, tmp1.1, r23.0); + super::bignum::bn_karatsuba_sqr_uint32(len2, a1.1, tmp1.1, r23.1); + lowstar::ignore::ignore::<&[u32]>(res); + lowstar::ignore::ignore::<&[u32]>(tmp); + let r011: (&[u32], &[u32]) = res.split_at(0usize); + let r231: (&[u32], &[u32]) = r011.1.split_at(aLen as usize); + let t01: (&mut [u32], &mut [u32]) = tmp.split_at_mut(0usize); + let t231: (&mut [u32], &mut [u32]) = t01.1.split_at_mut(aLen as usize); + let t45: (&mut [u32], &mut [u32]) = t231 + .1 + .split_at_mut(2u32.wrapping_mul(aLen) as usize - aLen as usize); + let c2: u32 = super::bignum_base::bn_add_eq_len_u32(aLen, r231.0, r231.1, t231.0); + let c3: u32 = super::bignum_base::bn_sub_eq_len_u32(aLen, t231.0, t45.0, t45.1); + let c5: u32 = c2.wrapping_sub(c3); + let aLen2: u32 = aLen.wrapping_div(2u32); + lowstar::ignore::ignore::<&[u32]>(res); + let r: (&mut [u32], &mut [u32]) = res.split_at_mut(aLen2 as usize); + let mut a_copy: Box<[u32]> = vec![0u32; aLen as usize].into_boxed_slice(); + let mut b_copy: Box<[u32]> = vec![0u32; aLen as usize].into_boxed_slice(); + ((&mut a_copy)[0usize..aLen as usize]).copy_from_slice(&r.1[0usize..aLen as usize]); + ((&mut b_copy)[0usize..aLen as usize]).copy_from_slice(&t45.1[0usize..aLen as usize]); + let r1: u32 = super::bignum_base::bn_add_eq_len_u32(aLen, &a_copy, &b_copy, r.1); + let r10: u32 = r1; + let c: u32 = r10; + let c6: u32 = c; + let c7: u32 = c5.wrapping_add(c6); + lowstar::ignore::ignore::<&[u32]>(res); + let r0: (&mut [u32], &mut [u32]) = res.split_at_mut(aLen.wrapping_add(aLen2) as usize); + let c01: u32 = + lib::inttypes_intrinsics::add_carry_u32(0u32, r0.1[0usize], c7, &mut r0.1[0usize..]); + let r11: u32 = if 1u32 + < aLen + .wrapping_add(aLen) + .wrapping_sub(aLen.wrapping_add(aLen2)) + { + let res1: (&mut [u32], &mut [u32]) = r0.1.split_at_mut(1usize); + let mut c4: [u32; 1] = [c01; 1usize]; + for i in 0u32..aLen + .wrapping_add(aLen) + .wrapping_sub(aLen.wrapping_add(aLen2)) + .wrapping_sub(1u32) + .wrapping_div(4u32) + { + let t1: u32 = res1.1[4u32.wrapping_mul(i) as usize]; + let res_i: (&mut [u32], &mut [u32]) = + res1.1.split_at_mut(4u32.wrapping_mul(i) as usize); + (&mut c4)[0usize] = + lib::inttypes_intrinsics::add_carry_u32((&c4)[0usize], t1, 0u32, res_i.1); + let t10: u32 = res1.1[4u32.wrapping_mul(i).wrapping_add(1u32) as usize]; + let res_i0: (&mut [u32], &mut [u32]) = + res1.1.split_at_mut(4u32.wrapping_mul(i) as usize + 1usize); + (&mut c4)[0usize] = + lib::inttypes_intrinsics::add_carry_u32((&c4)[0usize], t10, 0u32, res_i0.1); + let t11: u32 = res1.1[4u32.wrapping_mul(i).wrapping_add(2u32) as usize]; + let res_i1: (&mut [u32], &mut [u32]) = + res1.1.split_at_mut(4u32.wrapping_mul(i) as usize + 2usize); + (&mut c4)[0usize] = + lib::inttypes_intrinsics::add_carry_u32((&c4)[0usize], t11, 0u32, res_i1.1); + let t12: u32 = res1.1[4u32.wrapping_mul(i).wrapping_add(3u32) as usize]; + let res_i2: (&mut [u32], &mut [u32]) = + res1.1.split_at_mut(4u32.wrapping_mul(i) as usize + 3usize); + (&mut c4)[0usize] = + lib::inttypes_intrinsics::add_carry_u32((&c4)[0usize], t12, 0u32, res_i2.1) + } + for i in aLen + .wrapping_add(aLen) + .wrapping_sub(aLen.wrapping_add(aLen2)) + .wrapping_sub(1u32) + .wrapping_div(4u32) + .wrapping_mul(4u32) + ..aLen + .wrapping_add(aLen) + .wrapping_sub(aLen.wrapping_add(aLen2)) + .wrapping_sub(1u32) + { + let t1: u32 = res1.1[i as usize]; + let res_i: (&mut [u32], &mut [u32]) = res1.1.split_at_mut(i as usize); + (&mut c4)[0usize] = + lib::inttypes_intrinsics::add_carry_u32((&c4)[0usize], t1, 0u32, res_i.1) + } + let c10: u32 = (&c4)[0usize]; + c10 + } else { + c01 + }; + let c8: u32 = r11; + let c4: u32 = c8; + let c9: u32 = c4; + lowstar::ignore::ignore::(c9) + } +} + +pub(crate) fn bn_karatsuba_sqr_uint64(aLen: u32, a: &[u64], tmp: &mut [u64], res: &mut [u64]) { + if aLen < 32u32 || aLen.wrapping_rem(2u32) == 1u32 { + super::bignum_base::bn_sqr_u64(aLen, a, res) + } else { + let len2: u32 = aLen.wrapping_div(2u32); + let a0: (&[u64], &[u64]) = a.split_at(0usize); + let a1: (&[u64], &[u64]) = a0.1.split_at(len2 as usize); + let t0: (&mut [u64], &mut [u64]) = tmp.split_at_mut(0usize); + let tmpĀ·: (&mut [u64], &mut [u64]) = t0.1.split_at_mut(aLen as usize); + let c0: u64 = super::bignum_base::bn_sub_eq_len_u64(len2, a1.0, a1.1, tmpĀ·.1); + let c1: u64 = super::bignum_base::bn_sub_eq_len_u64(len2, a1.1, a1.0, tmpĀ·.0); + for i in 0u32..len2 { + let x: u64 = 0u64.wrapping_sub(c0) & tmpĀ·.0[i as usize] + | !0u64.wrapping_sub(c0) & tmpĀ·.1[i as usize]; + let os: (&mut [u64], &mut [u64]) = tmpĀ·.0.split_at_mut(0usize); + os.1[i as usize] = x + } + lowstar::ignore::ignore::(c1); + let c00: u64 = c0; + lowstar::ignore::ignore::(c00); + let t23: (&mut [u64], &mut [u64]) = tmpĀ·.1.split_at_mut(0usize); + let tmp1: (&mut [u64], &mut [u64]) = t23 + .1 + .split_at_mut(aLen.wrapping_add(aLen) as usize - aLen as usize); + super::bignum::bn_karatsuba_sqr_uint64(len2, tmpĀ·.0, tmp1.1, tmp1.0); + let r01: (&mut [u64], &mut [u64]) = res.split_at_mut(0usize); + let r23: (&mut [u64], &mut [u64]) = r01.1.split_at_mut(aLen as usize); + super::bignum::bn_karatsuba_sqr_uint64(len2, a1.0, tmp1.1, r23.0); + super::bignum::bn_karatsuba_sqr_uint64(len2, a1.1, tmp1.1, r23.1); + lowstar::ignore::ignore::<&[u64]>(res); + lowstar::ignore::ignore::<&[u64]>(tmp); + let r011: (&[u64], &[u64]) = res.split_at(0usize); + let r231: (&[u64], &[u64]) = r011.1.split_at(aLen as usize); + let t01: (&mut [u64], &mut [u64]) = tmp.split_at_mut(0usize); + let t231: (&mut [u64], &mut [u64]) = t01.1.split_at_mut(aLen as usize); + let t45: (&mut [u64], &mut [u64]) = t231 + .1 + .split_at_mut(2u32.wrapping_mul(aLen) as usize - aLen as usize); + let c2: u64 = super::bignum_base::bn_add_eq_len_u64(aLen, r231.0, r231.1, t231.0); + let c3: u64 = super::bignum_base::bn_sub_eq_len_u64(aLen, t231.0, t45.0, t45.1); + let c5: u64 = c2.wrapping_sub(c3); + let aLen2: u32 = aLen.wrapping_div(2u32); + lowstar::ignore::ignore::<&[u64]>(res); + let r: (&mut [u64], &mut [u64]) = res.split_at_mut(aLen2 as usize); + let mut a_copy: Box<[u64]> = vec![0u64; aLen as usize].into_boxed_slice(); + let mut b_copy: Box<[u64]> = vec![0u64; aLen as usize].into_boxed_slice(); + ((&mut a_copy)[0usize..aLen as usize]).copy_from_slice(&r.1[0usize..aLen as usize]); + ((&mut b_copy)[0usize..aLen as usize]).copy_from_slice(&t45.1[0usize..aLen as usize]); + let r1: u64 = super::bignum_base::bn_add_eq_len_u64(aLen, &a_copy, &b_copy, r.1); + let r10: u64 = r1; + let c: u64 = r10; + let c6: u64 = c; + let c7: u64 = c5.wrapping_add(c6); + lowstar::ignore::ignore::<&[u64]>(res); + let r0: (&mut [u64], &mut [u64]) = res.split_at_mut(aLen.wrapping_add(aLen2) as usize); + let c01: u64 = + lib::inttypes_intrinsics::add_carry_u64(0u64, r0.1[0usize], c7, &mut r0.1[0usize..]); + let r11: u64 = if 1u32 + < aLen + .wrapping_add(aLen) + .wrapping_sub(aLen.wrapping_add(aLen2)) + { + let res1: (&mut [u64], &mut [u64]) = r0.1.split_at_mut(1usize); + let mut c4: [u64; 1] = [c01; 1usize]; + for i in 0u32..aLen + .wrapping_add(aLen) + .wrapping_sub(aLen.wrapping_add(aLen2)) + .wrapping_sub(1u32) + .wrapping_div(4u32) + { + let t1: u64 = res1.1[4u32.wrapping_mul(i) as usize]; + let res_i: (&mut [u64], &mut [u64]) = + res1.1.split_at_mut(4u32.wrapping_mul(i) as usize); + (&mut c4)[0usize] = + lib::inttypes_intrinsics::add_carry_u64((&c4)[0usize], t1, 0u64, res_i.1); + let t10: u64 = res1.1[4u32.wrapping_mul(i).wrapping_add(1u32) as usize]; + let res_i0: (&mut [u64], &mut [u64]) = + res1.1.split_at_mut(4u32.wrapping_mul(i) as usize + 1usize); + (&mut c4)[0usize] = + lib::inttypes_intrinsics::add_carry_u64((&c4)[0usize], t10, 0u64, res_i0.1); + let t11: u64 = res1.1[4u32.wrapping_mul(i).wrapping_add(2u32) as usize]; + let res_i1: (&mut [u64], &mut [u64]) = + res1.1.split_at_mut(4u32.wrapping_mul(i) as usize + 2usize); + (&mut c4)[0usize] = + lib::inttypes_intrinsics::add_carry_u64((&c4)[0usize], t11, 0u64, res_i1.1); + let t12: u64 = res1.1[4u32.wrapping_mul(i).wrapping_add(3u32) as usize]; + let res_i2: (&mut [u64], &mut [u64]) = + res1.1.split_at_mut(4u32.wrapping_mul(i) as usize + 3usize); + (&mut c4)[0usize] = + lib::inttypes_intrinsics::add_carry_u64((&c4)[0usize], t12, 0u64, res_i2.1) + } + for i in aLen + .wrapping_add(aLen) + .wrapping_sub(aLen.wrapping_add(aLen2)) + .wrapping_sub(1u32) + .wrapping_div(4u32) + .wrapping_mul(4u32) + ..aLen + .wrapping_add(aLen) + .wrapping_sub(aLen.wrapping_add(aLen2)) + .wrapping_sub(1u32) + { + let t1: u64 = res1.1[i as usize]; + let res_i: (&mut [u64], &mut [u64]) = res1.1.split_at_mut(i as usize); + (&mut c4)[0usize] = + lib::inttypes_intrinsics::add_carry_u64((&c4)[0usize], t1, 0u64, res_i.1) + } + let c10: u64 = (&c4)[0usize]; + c10 + } else { + c01 + }; + let c8: u64 = r11; + let c4: u64 = c8; + let c9: u64 = c4; + lowstar::ignore::ignore::(c9) + } +} + +/** +ATTENTION: this function is public, but is intended for internal use within this workspace; callers should not rely on the availability of this function, or its behavior! +*/ +pub fn bn_add_mod_n_u32(len1: u32, n: &[u32], a: &[u32], b: &[u32], res: &mut [u32]) { + let mut c: [u32; 1] = [0u32; 1usize]; + for i in 0u32..len1.wrapping_div(4u32) { + let t1: u32 = a[4u32.wrapping_mul(i) as usize]; + let t2: u32 = b[4u32.wrapping_mul(i) as usize]; + let res_i: (&mut [u32], &mut [u32]) = res.split_at_mut(4u32.wrapping_mul(i) as usize); + (&mut c)[0usize] = lib::inttypes_intrinsics::add_carry_u32((&c)[0usize], t1, t2, res_i.1); + let t10: u32 = a[4u32.wrapping_mul(i).wrapping_add(1u32) as usize]; + let t20: u32 = b[4u32.wrapping_mul(i).wrapping_add(1u32) as usize]; + let res_i0: (&mut [u32], &mut [u32]) = res_i.1.split_at_mut(1usize); + (&mut c)[0usize] = + lib::inttypes_intrinsics::add_carry_u32((&c)[0usize], t10, t20, res_i0.1); + let t11: u32 = a[4u32.wrapping_mul(i).wrapping_add(2u32) as usize]; + let t21: u32 = b[4u32.wrapping_mul(i).wrapping_add(2u32) as usize]; + let res_i1: (&mut [u32], &mut [u32]) = res_i0.1.split_at_mut(1usize); + (&mut c)[0usize] = + lib::inttypes_intrinsics::add_carry_u32((&c)[0usize], t11, t21, res_i1.1); + let t12: u32 = a[4u32.wrapping_mul(i).wrapping_add(3u32) as usize]; + let t22: u32 = b[4u32.wrapping_mul(i).wrapping_add(3u32) as usize]; + let res_i2: (&mut [u32], &mut [u32]) = res_i1.1.split_at_mut(1usize); + (&mut c)[0usize] = lib::inttypes_intrinsics::add_carry_u32((&c)[0usize], t12, t22, res_i2.1) + } + for i in len1.wrapping_div(4u32).wrapping_mul(4u32)..len1 { + let t1: u32 = a[i as usize]; + let t2: u32 = b[i as usize]; + let res_i: (&mut [u32], &mut [u32]) = res.split_at_mut(i as usize); + (&mut c)[0usize] = lib::inttypes_intrinsics::add_carry_u32((&c)[0usize], t1, t2, res_i.1) + } + let c0: u32 = (&c)[0usize]; + let mut tmp: Box<[u32]> = vec![0u32; len1 as usize].into_boxed_slice(); + let mut c1: [u32; 1] = [0u32; 1usize]; + for i in 0u32..len1.wrapping_div(4u32) { + let t1: u32 = res[4u32.wrapping_mul(i) as usize]; + let t2: u32 = n[4u32.wrapping_mul(i) as usize]; + let res_i: (&mut [u32], &mut [u32]) = tmp.split_at_mut(4u32.wrapping_mul(i) as usize); + (&mut c1)[0usize] = + lib::inttypes_intrinsics::sub_borrow_u32((&c1)[0usize], t1, t2, res_i.1); + let t10: u32 = res[4u32.wrapping_mul(i).wrapping_add(1u32) as usize]; + let t20: u32 = n[4u32.wrapping_mul(i).wrapping_add(1u32) as usize]; + let res_i0: (&mut [u32], &mut [u32]) = res_i.1.split_at_mut(1usize); + (&mut c1)[0usize] = + lib::inttypes_intrinsics::sub_borrow_u32((&c1)[0usize], t10, t20, res_i0.1); + let t11: u32 = res[4u32.wrapping_mul(i).wrapping_add(2u32) as usize]; + let t21: u32 = n[4u32.wrapping_mul(i).wrapping_add(2u32) as usize]; + let res_i1: (&mut [u32], &mut [u32]) = res_i0.1.split_at_mut(1usize); + (&mut c1)[0usize] = + lib::inttypes_intrinsics::sub_borrow_u32((&c1)[0usize], t11, t21, res_i1.1); + let t12: u32 = res[4u32.wrapping_mul(i).wrapping_add(3u32) as usize]; + let t22: u32 = n[4u32.wrapping_mul(i).wrapping_add(3u32) as usize]; + let res_i2: (&mut [u32], &mut [u32]) = res_i1.1.split_at_mut(1usize); + (&mut c1)[0usize] = + lib::inttypes_intrinsics::sub_borrow_u32((&c1)[0usize], t12, t22, res_i2.1) + } + for i in len1.wrapping_div(4u32).wrapping_mul(4u32)..len1 { + let t1: u32 = res[i as usize]; + let t2: u32 = n[i as usize]; + let res_i: (&mut [u32], &mut [u32]) = tmp.split_at_mut(i as usize); + (&mut c1)[0usize] = lib::inttypes_intrinsics::sub_borrow_u32((&c1)[0usize], t1, t2, res_i.1) + } + let c10: u32 = (&c1)[0usize]; + let c2: u32 = c0.wrapping_sub(c10); + for i in 0u32..len1 { + let x: u32 = c2 & res[i as usize] | !c2 & (&tmp)[i as usize]; + let os: (&mut [u32], &mut [u32]) = res.split_at_mut(0usize); + os.1[i as usize] = x + } +} + +/** +ATTENTION: this function is public, but is intended for internal use within this workspace; callers should not rely on the availability of this function, or its behavior! +*/ +pub fn bn_add_mod_n_u64(len1: u32, n: &[u64], a: &[u64], b: &[u64], res: &mut [u64]) { + let mut c: [u64; 1] = [0u64; 1usize]; + for i in 0u32..len1.wrapping_div(4u32) { + let t1: u64 = a[4u32.wrapping_mul(i) as usize]; + let t2: u64 = b[4u32.wrapping_mul(i) as usize]; + let res_i: (&mut [u64], &mut [u64]) = res.split_at_mut(4u32.wrapping_mul(i) as usize); + (&mut c)[0usize] = lib::inttypes_intrinsics::add_carry_u64((&c)[0usize], t1, t2, res_i.1); + let t10: u64 = a[4u32.wrapping_mul(i).wrapping_add(1u32) as usize]; + let t20: u64 = b[4u32.wrapping_mul(i).wrapping_add(1u32) as usize]; + let res_i0: (&mut [u64], &mut [u64]) = res_i.1.split_at_mut(1usize); + (&mut c)[0usize] = + lib::inttypes_intrinsics::add_carry_u64((&c)[0usize], t10, t20, res_i0.1); + let t11: u64 = a[4u32.wrapping_mul(i).wrapping_add(2u32) as usize]; + let t21: u64 = b[4u32.wrapping_mul(i).wrapping_add(2u32) as usize]; + let res_i1: (&mut [u64], &mut [u64]) = res_i0.1.split_at_mut(1usize); + (&mut c)[0usize] = + lib::inttypes_intrinsics::add_carry_u64((&c)[0usize], t11, t21, res_i1.1); + let t12: u64 = a[4u32.wrapping_mul(i).wrapping_add(3u32) as usize]; + let t22: u64 = b[4u32.wrapping_mul(i).wrapping_add(3u32) as usize]; + let res_i2: (&mut [u64], &mut [u64]) = res_i1.1.split_at_mut(1usize); + (&mut c)[0usize] = lib::inttypes_intrinsics::add_carry_u64((&c)[0usize], t12, t22, res_i2.1) + } + for i in len1.wrapping_div(4u32).wrapping_mul(4u32)..len1 { + let t1: u64 = a[i as usize]; + let t2: u64 = b[i as usize]; + let res_i: (&mut [u64], &mut [u64]) = res.split_at_mut(i as usize); + (&mut c)[0usize] = lib::inttypes_intrinsics::add_carry_u64((&c)[0usize], t1, t2, res_i.1) + } + let c0: u64 = (&c)[0usize]; + let mut tmp: Box<[u64]> = vec![0u64; len1 as usize].into_boxed_slice(); + let mut c1: [u64; 1] = [0u64; 1usize]; + for i in 0u32..len1.wrapping_div(4u32) { + let t1: u64 = res[4u32.wrapping_mul(i) as usize]; + let t2: u64 = n[4u32.wrapping_mul(i) as usize]; + let res_i: (&mut [u64], &mut [u64]) = tmp.split_at_mut(4u32.wrapping_mul(i) as usize); + (&mut c1)[0usize] = + lib::inttypes_intrinsics::sub_borrow_u64((&c1)[0usize], t1, t2, res_i.1); + let t10: u64 = res[4u32.wrapping_mul(i).wrapping_add(1u32) as usize]; + let t20: u64 = n[4u32.wrapping_mul(i).wrapping_add(1u32) as usize]; + let res_i0: (&mut [u64], &mut [u64]) = res_i.1.split_at_mut(1usize); + (&mut c1)[0usize] = + lib::inttypes_intrinsics::sub_borrow_u64((&c1)[0usize], t10, t20, res_i0.1); + let t11: u64 = res[4u32.wrapping_mul(i).wrapping_add(2u32) as usize]; + let t21: u64 = n[4u32.wrapping_mul(i).wrapping_add(2u32) as usize]; + let res_i1: (&mut [u64], &mut [u64]) = res_i0.1.split_at_mut(1usize); + (&mut c1)[0usize] = + lib::inttypes_intrinsics::sub_borrow_u64((&c1)[0usize], t11, t21, res_i1.1); + let t12: u64 = res[4u32.wrapping_mul(i).wrapping_add(3u32) as usize]; + let t22: u64 = n[4u32.wrapping_mul(i).wrapping_add(3u32) as usize]; + let res_i2: (&mut [u64], &mut [u64]) = res_i1.1.split_at_mut(1usize); + (&mut c1)[0usize] = + lib::inttypes_intrinsics::sub_borrow_u64((&c1)[0usize], t12, t22, res_i2.1) + } + for i in len1.wrapping_div(4u32).wrapping_mul(4u32)..len1 { + let t1: u64 = res[i as usize]; + let t2: u64 = n[i as usize]; + let res_i: (&mut [u64], &mut [u64]) = tmp.split_at_mut(i as usize); + (&mut c1)[0usize] = lib::inttypes_intrinsics::sub_borrow_u64((&c1)[0usize], t1, t2, res_i.1) + } + let c10: u64 = (&c1)[0usize]; + let c2: u64 = c0.wrapping_sub(c10); + for i in 0u32..len1 { + let x: u64 = c2 & res[i as usize] | !c2 & (&tmp)[i as usize]; + let os: (&mut [u64], &mut [u64]) = res.split_at_mut(0usize); + os.1[i as usize] = x + } +} + +/** +ATTENTION: this function is public, but is intended for internal use within this workspace; callers should not rely on the availability of this function, or its behavior! +*/ +pub fn bn_sub_mod_n_u32(len1: u32, n: &[u32], a: &[u32], b: &[u32], res: &mut [u32]) { + let mut c: [u32; 1] = [0u32; 1usize]; + for i in 0u32..len1.wrapping_div(4u32) { + let t1: u32 = a[4u32.wrapping_mul(i) as usize]; + let t2: u32 = b[4u32.wrapping_mul(i) as usize]; + let res_i: (&mut [u32], &mut [u32]) = res.split_at_mut(4u32.wrapping_mul(i) as usize); + (&mut c)[0usize] = lib::inttypes_intrinsics::sub_borrow_u32((&c)[0usize], t1, t2, res_i.1); + let t10: u32 = a[4u32.wrapping_mul(i).wrapping_add(1u32) as usize]; + let t20: u32 = b[4u32.wrapping_mul(i).wrapping_add(1u32) as usize]; + let res_i0: (&mut [u32], &mut [u32]) = res_i.1.split_at_mut(1usize); + (&mut c)[0usize] = + lib::inttypes_intrinsics::sub_borrow_u32((&c)[0usize], t10, t20, res_i0.1); + let t11: u32 = a[4u32.wrapping_mul(i).wrapping_add(2u32) as usize]; + let t21: u32 = b[4u32.wrapping_mul(i).wrapping_add(2u32) as usize]; + let res_i1: (&mut [u32], &mut [u32]) = res_i0.1.split_at_mut(1usize); + (&mut c)[0usize] = + lib::inttypes_intrinsics::sub_borrow_u32((&c)[0usize], t11, t21, res_i1.1); + let t12: u32 = a[4u32.wrapping_mul(i).wrapping_add(3u32) as usize]; + let t22: u32 = b[4u32.wrapping_mul(i).wrapping_add(3u32) as usize]; + let res_i2: (&mut [u32], &mut [u32]) = res_i1.1.split_at_mut(1usize); + (&mut c)[0usize] = + lib::inttypes_intrinsics::sub_borrow_u32((&c)[0usize], t12, t22, res_i2.1) + } + for i in len1.wrapping_div(4u32).wrapping_mul(4u32)..len1 { + let t1: u32 = a[i as usize]; + let t2: u32 = b[i as usize]; + let res_i: (&mut [u32], &mut [u32]) = res.split_at_mut(i as usize); + (&mut c)[0usize] = lib::inttypes_intrinsics::sub_borrow_u32((&c)[0usize], t1, t2, res_i.1) + } + let c0: u32 = (&c)[0usize]; + let mut tmp: Box<[u32]> = vec![0u32; len1 as usize].into_boxed_slice(); + let mut c1: [u32; 1] = [0u32; 1usize]; + for i in 0u32..len1.wrapping_div(4u32) { + let t1: u32 = res[4u32.wrapping_mul(i) as usize]; + let t2: u32 = n[4u32.wrapping_mul(i) as usize]; + let res_i: (&mut [u32], &mut [u32]) = tmp.split_at_mut(4u32.wrapping_mul(i) as usize); + (&mut c1)[0usize] = lib::inttypes_intrinsics::add_carry_u32((&c1)[0usize], t1, t2, res_i.1); + let t10: u32 = res[4u32.wrapping_mul(i).wrapping_add(1u32) as usize]; + let t20: u32 = n[4u32.wrapping_mul(i).wrapping_add(1u32) as usize]; + let res_i0: (&mut [u32], &mut [u32]) = res_i.1.split_at_mut(1usize); + (&mut c1)[0usize] = + lib::inttypes_intrinsics::add_carry_u32((&c1)[0usize], t10, t20, res_i0.1); + let t11: u32 = res[4u32.wrapping_mul(i).wrapping_add(2u32) as usize]; + let t21: u32 = n[4u32.wrapping_mul(i).wrapping_add(2u32) as usize]; + let res_i1: (&mut [u32], &mut [u32]) = res_i0.1.split_at_mut(1usize); + (&mut c1)[0usize] = + lib::inttypes_intrinsics::add_carry_u32((&c1)[0usize], t11, t21, res_i1.1); + let t12: u32 = res[4u32.wrapping_mul(i).wrapping_add(3u32) as usize]; + let t22: u32 = n[4u32.wrapping_mul(i).wrapping_add(3u32) as usize]; + let res_i2: (&mut [u32], &mut [u32]) = res_i1.1.split_at_mut(1usize); + (&mut c1)[0usize] = + lib::inttypes_intrinsics::add_carry_u32((&c1)[0usize], t12, t22, res_i2.1) + } + for i in len1.wrapping_div(4u32).wrapping_mul(4u32)..len1 { + let t1: u32 = res[i as usize]; + let t2: u32 = n[i as usize]; + let res_i: (&mut [u32], &mut [u32]) = tmp.split_at_mut(i as usize); + (&mut c1)[0usize] = lib::inttypes_intrinsics::add_carry_u32((&c1)[0usize], t1, t2, res_i.1) + } + let c10: u32 = (&c1)[0usize]; + lowstar::ignore::ignore::(c10); + let c2: u32 = 0u32.wrapping_sub(c0); + for i in 0u32..len1 { + let x: u32 = c2 & (&tmp)[i as usize] | !c2 & res[i as usize]; + let os: (&mut [u32], &mut [u32]) = res.split_at_mut(0usize); + os.1[i as usize] = x + } +} + +/** +ATTENTION: this function is public, but is intended for internal use within this workspace; callers should not rely on the availability of this function, or its behavior! +*/ +pub fn bn_sub_mod_n_u64(len1: u32, n: &[u64], a: &[u64], b: &[u64], res: &mut [u64]) { + let mut c: [u64; 1] = [0u64; 1usize]; + for i in 0u32..len1.wrapping_div(4u32) { + let t1: u64 = a[4u32.wrapping_mul(i) as usize]; + let t2: u64 = b[4u32.wrapping_mul(i) as usize]; + let res_i: (&mut [u64], &mut [u64]) = res.split_at_mut(4u32.wrapping_mul(i) as usize); + (&mut c)[0usize] = lib::inttypes_intrinsics::sub_borrow_u64((&c)[0usize], t1, t2, res_i.1); + let t10: u64 = a[4u32.wrapping_mul(i).wrapping_add(1u32) as usize]; + let t20: u64 = b[4u32.wrapping_mul(i).wrapping_add(1u32) as usize]; + let res_i0: (&mut [u64], &mut [u64]) = res_i.1.split_at_mut(1usize); + (&mut c)[0usize] = + lib::inttypes_intrinsics::sub_borrow_u64((&c)[0usize], t10, t20, res_i0.1); + let t11: u64 = a[4u32.wrapping_mul(i).wrapping_add(2u32) as usize]; + let t21: u64 = b[4u32.wrapping_mul(i).wrapping_add(2u32) as usize]; + let res_i1: (&mut [u64], &mut [u64]) = res_i0.1.split_at_mut(1usize); + (&mut c)[0usize] = + lib::inttypes_intrinsics::sub_borrow_u64((&c)[0usize], t11, t21, res_i1.1); + let t12: u64 = a[4u32.wrapping_mul(i).wrapping_add(3u32) as usize]; + let t22: u64 = b[4u32.wrapping_mul(i).wrapping_add(3u32) as usize]; + let res_i2: (&mut [u64], &mut [u64]) = res_i1.1.split_at_mut(1usize); + (&mut c)[0usize] = + lib::inttypes_intrinsics::sub_borrow_u64((&c)[0usize], t12, t22, res_i2.1) + } + for i in len1.wrapping_div(4u32).wrapping_mul(4u32)..len1 { + let t1: u64 = a[i as usize]; + let t2: u64 = b[i as usize]; + let res_i: (&mut [u64], &mut [u64]) = res.split_at_mut(i as usize); + (&mut c)[0usize] = lib::inttypes_intrinsics::sub_borrow_u64((&c)[0usize], t1, t2, res_i.1) + } + let c0: u64 = (&c)[0usize]; + let mut tmp: Box<[u64]> = vec![0u64; len1 as usize].into_boxed_slice(); + let mut c1: [u64; 1] = [0u64; 1usize]; + for i in 0u32..len1.wrapping_div(4u32) { + let t1: u64 = res[4u32.wrapping_mul(i) as usize]; + let t2: u64 = n[4u32.wrapping_mul(i) as usize]; + let res_i: (&mut [u64], &mut [u64]) = tmp.split_at_mut(4u32.wrapping_mul(i) as usize); + (&mut c1)[0usize] = lib::inttypes_intrinsics::add_carry_u64((&c1)[0usize], t1, t2, res_i.1); + let t10: u64 = res[4u32.wrapping_mul(i).wrapping_add(1u32) as usize]; + let t20: u64 = n[4u32.wrapping_mul(i).wrapping_add(1u32) as usize]; + let res_i0: (&mut [u64], &mut [u64]) = res_i.1.split_at_mut(1usize); + (&mut c1)[0usize] = + lib::inttypes_intrinsics::add_carry_u64((&c1)[0usize], t10, t20, res_i0.1); + let t11: u64 = res[4u32.wrapping_mul(i).wrapping_add(2u32) as usize]; + let t21: u64 = n[4u32.wrapping_mul(i).wrapping_add(2u32) as usize]; + let res_i1: (&mut [u64], &mut [u64]) = res_i0.1.split_at_mut(1usize); + (&mut c1)[0usize] = + lib::inttypes_intrinsics::add_carry_u64((&c1)[0usize], t11, t21, res_i1.1); + let t12: u64 = res[4u32.wrapping_mul(i).wrapping_add(3u32) as usize]; + let t22: u64 = n[4u32.wrapping_mul(i).wrapping_add(3u32) as usize]; + let res_i2: (&mut [u64], &mut [u64]) = res_i1.1.split_at_mut(1usize); + (&mut c1)[0usize] = + lib::inttypes_intrinsics::add_carry_u64((&c1)[0usize], t12, t22, res_i2.1) + } + for i in len1.wrapping_div(4u32).wrapping_mul(4u32)..len1 { + let t1: u64 = res[i as usize]; + let t2: u64 = n[i as usize]; + let res_i: (&mut [u64], &mut [u64]) = tmp.split_at_mut(i as usize); + (&mut c1)[0usize] = lib::inttypes_intrinsics::add_carry_u64((&c1)[0usize], t1, t2, res_i.1) + } + let c10: u64 = (&c1)[0usize]; + lowstar::ignore::ignore::(c10); + let c2: u64 = 0u64.wrapping_sub(c0); + for i in 0u32..len1 { + let x: u64 = c2 & (&tmp)[i as usize] | !c2 & res[i as usize]; + let os: (&mut [u64], &mut [u64]) = res.split_at_mut(0usize); + os.1[i as usize] = x + } +} + +/** +ATTENTION: this function is public, but is intended for internal use within this workspace; callers should not rely on the availability of this function, or its behavior! +*/ +pub fn mod_inv_uint32(n0: u32) -> u32 { + let alpha: u32 = 2147483648u32; + let beta: u32 = n0; + let mut ub: [u32; 1] = [0u32; 1usize]; + let mut vb: [u32; 1] = [0u32; 1usize]; + (&mut ub)[0usize] = 1u32; + (&mut vb)[0usize] = 0u32; + krml::unroll_for!(32, "_i", 0u32, 1u32, { + let us: u32 = (&ub)[0usize]; + let vs: u32 = (&vb)[0usize]; + let u_is_odd: u32 = 0u32.wrapping_sub(us & 1u32); + let beta_if_u_is_odd: u32 = beta & u_is_odd; + (&mut ub)[0usize] = (us ^ beta_if_u_is_odd) + .wrapping_shr(1u32) + .wrapping_add(us & beta_if_u_is_odd); + let alpha_if_u_is_odd: u32 = alpha & u_is_odd; + (&mut vb)[0usize] = vs.wrapping_shr(1u32).wrapping_add(alpha_if_u_is_odd) + }); + (&vb)[0usize] +} + +/** +ATTENTION: this function is public, but is intended for internal use within this workspace; callers should not rely on the availability of this function, or its behavior! +*/ +pub fn mod_inv_uint64(n0: u64) -> u64 { + let alpha: u64 = 9223372036854775808u64; + let beta: u64 = n0; + let mut ub: [u64; 1] = [0u64; 1usize]; + let mut vb: [u64; 1] = [0u64; 1usize]; + (&mut ub)[0usize] = 1u64; + (&mut vb)[0usize] = 0u64; + for _i in 0u32..64u32 { + let us: u64 = (&ub)[0usize]; + let vs: u64 = (&vb)[0usize]; + let u_is_odd: u64 = 0u64.wrapping_sub(us & 1u64); + let beta_if_u_is_odd: u64 = beta & u_is_odd; + (&mut ub)[0usize] = (us ^ beta_if_u_is_odd) + .wrapping_shr(1u32) + .wrapping_add(us & beta_if_u_is_odd); + let alpha_if_u_is_odd: u64 = alpha & u_is_odd; + (&mut vb)[0usize] = vs.wrapping_shr(1u32).wrapping_add(alpha_if_u_is_odd) + } + (&vb)[0usize] +} + +/** +ATTENTION: this function is public, but is intended for internal use within this workspace; callers should not rely on the availability of this function, or its behavior! +*/ +pub fn bn_check_modulus_u32(len: u32, n: &[u32]) -> u32 { + let mut one: Box<[u32]> = vec![0u32; len as usize].into_boxed_slice(); + ((&mut one)[0usize..len as usize]) + .copy_from_slice(&vec![0u32; len as usize].into_boxed_slice()); + (&mut one)[0usize] = 1u32; + let bit0: u32 = n[0usize] & 1u32; + let m0: u32 = 0u32.wrapping_sub(bit0); + let mut acc: [u32; 1] = [0u32; 1usize]; + for i in 0u32..len { + let beq: u32 = fstar::uint32::eq_mask((&one)[i as usize], n[i as usize]); + let blt: u32 = !fstar::uint32::gte_mask((&one)[i as usize], n[i as usize]); + (&mut acc)[0usize] = beq & (&acc)[0usize] | !beq & blt + } + let m1: u32 = (&acc)[0usize]; + m0 & m1 +} + +/** +ATTENTION: this function is public, but is intended for internal use within this workspace; callers should not rely on the availability of this function, or its behavior! +*/ +pub fn bn_precomp_r2_mod_n_u32(len: u32, nBits: u32, n: &[u32], res: &mut [u32]) { + (res[0usize..len as usize]).copy_from_slice(&vec![0u32; len as usize].into_boxed_slice()); + let i: u32 = nBits.wrapping_div(32u32); + let j: u32 = nBits.wrapping_rem(32u32); + res[i as usize] |= 1u32.wrapping_shl(j); + for _i in 0u32..64u32.wrapping_mul(len).wrapping_sub(nBits) { + let mut a_copy: Box<[u32]> = vec![0u32; len as usize].into_boxed_slice(); + let mut b_copy: Box<[u32]> = vec![0u32; len as usize].into_boxed_slice(); + ((&mut a_copy)[0usize..len as usize]).copy_from_slice(&res[0usize..len as usize]); + ((&mut b_copy)[0usize..len as usize]).copy_from_slice(&res[0usize..len as usize]); + super::bignum::bn_add_mod_n_u32(len, n, &a_copy, &b_copy, res) + } +} + +fn bn_mont_reduction_u32(len: u32, n: &[u32], nInv: u32, c: &mut [u32], res: &mut [u32]) { + let mut c0: [u32; 1] = [0u32; 1usize]; + for i in 0u32..len { + let qj: u32 = nInv.wrapping_mul(c[i as usize]); + let res_j: (&mut [u32], &mut [u32]) = c.split_at_mut(i as usize); + let mut c1: [u32; 1] = [0u32; 1usize]; + for i0 in 0u32..len.wrapping_div(4u32) { + let a_i: u32 = n[4u32.wrapping_mul(i0) as usize]; + let res_i: (&mut [u32], &mut [u32]) = + res_j.1.split_at_mut(4u32.wrapping_mul(i0) as usize); + (&mut c1)[0usize] = + super::bignum_base::mul_wide_add2_u32(a_i, qj, (&c1)[0usize], res_i.1); + let a_i0: u32 = n[4u32.wrapping_mul(i0).wrapping_add(1u32) as usize]; + let res_i0: (&mut [u32], &mut [u32]) = res_i.1.split_at_mut(1usize); + (&mut c1)[0usize] = + super::bignum_base::mul_wide_add2_u32(a_i0, qj, (&c1)[0usize], res_i0.1); + let a_i1: u32 = n[4u32.wrapping_mul(i0).wrapping_add(2u32) as usize]; + let res_i1: (&mut [u32], &mut [u32]) = res_i0.1.split_at_mut(1usize); + (&mut c1)[0usize] = + super::bignum_base::mul_wide_add2_u32(a_i1, qj, (&c1)[0usize], res_i1.1); + let a_i2: u32 = n[4u32.wrapping_mul(i0).wrapping_add(3u32) as usize]; + let res_i2: (&mut [u32], &mut [u32]) = res_i1.1.split_at_mut(1usize); + (&mut c1)[0usize] = + super::bignum_base::mul_wide_add2_u32(a_i2, qj, (&c1)[0usize], res_i2.1) + } + for i0 in len.wrapping_div(4u32).wrapping_mul(4u32)..len { + let a_i: u32 = n[i0 as usize]; + let res_i: (&mut [u32], &mut [u32]) = res_j.1.split_at_mut(i0 as usize); + (&mut c1)[0usize] = + super::bignum_base::mul_wide_add2_u32(a_i, qj, (&c1)[0usize], res_i.1) + } + let r: u32 = (&c1)[0usize]; + let c10: u32 = r; + let res_j0: u32 = c[len.wrapping_add(i) as usize]; + let resb: (&mut [u32], &mut [u32]) = c.split_at_mut(len.wrapping_add(i) as usize); + (&mut c0)[0usize] = + lib::inttypes_intrinsics::add_carry_u32((&c0)[0usize], c10, res_j0, resb.1) + } + (res[0usize..len.wrapping_add(len).wrapping_sub(len) as usize]).copy_from_slice( + &(&c[len as usize..])[0usize..len.wrapping_add(len).wrapping_sub(len) as usize], + ); + let c00: u32 = (&c0)[0usize]; + let mut tmp: Box<[u32]> = vec![0u32; len as usize].into_boxed_slice(); + let mut c1: [u32; 1] = [0u32; 1usize]; + for i in 0u32..len.wrapping_div(4u32) { + let t1: u32 = res[4u32.wrapping_mul(i) as usize]; + let t2: u32 = n[4u32.wrapping_mul(i) as usize]; + let res_i: (&mut [u32], &mut [u32]) = tmp.split_at_mut(4u32.wrapping_mul(i) as usize); + (&mut c1)[0usize] = + lib::inttypes_intrinsics::sub_borrow_u32((&c1)[0usize], t1, t2, res_i.1); + let t10: u32 = res[4u32.wrapping_mul(i).wrapping_add(1u32) as usize]; + let t20: u32 = n[4u32.wrapping_mul(i).wrapping_add(1u32) as usize]; + let res_i0: (&mut [u32], &mut [u32]) = res_i.1.split_at_mut(1usize); + (&mut c1)[0usize] = + lib::inttypes_intrinsics::sub_borrow_u32((&c1)[0usize], t10, t20, res_i0.1); + let t11: u32 = res[4u32.wrapping_mul(i).wrapping_add(2u32) as usize]; + let t21: u32 = n[4u32.wrapping_mul(i).wrapping_add(2u32) as usize]; + let res_i1: (&mut [u32], &mut [u32]) = res_i0.1.split_at_mut(1usize); + (&mut c1)[0usize] = + lib::inttypes_intrinsics::sub_borrow_u32((&c1)[0usize], t11, t21, res_i1.1); + let t12: u32 = res[4u32.wrapping_mul(i).wrapping_add(3u32) as usize]; + let t22: u32 = n[4u32.wrapping_mul(i).wrapping_add(3u32) as usize]; + let res_i2: (&mut [u32], &mut [u32]) = res_i1.1.split_at_mut(1usize); + (&mut c1)[0usize] = + lib::inttypes_intrinsics::sub_borrow_u32((&c1)[0usize], t12, t22, res_i2.1) + } + for i in len.wrapping_div(4u32).wrapping_mul(4u32)..len { + let t1: u32 = res[i as usize]; + let t2: u32 = n[i as usize]; + let res_i: (&mut [u32], &mut [u32]) = tmp.split_at_mut(i as usize); + (&mut c1)[0usize] = lib::inttypes_intrinsics::sub_borrow_u32((&c1)[0usize], t1, t2, res_i.1) + } + let c10: u32 = (&c1)[0usize]; + let c2: u32 = c00.wrapping_sub(c10); + for i in 0u32..len { + let x: u32 = c2 & res[i as usize] | !c2 & (&tmp)[i as usize]; + let os: (&mut [u32], &mut [u32]) = res.split_at_mut(0usize); + os.1[i as usize] = x + } +} + +/** +ATTENTION: this function is public, but is intended for internal use within this workspace; callers should not rely on the availability of this function, or its behavior! +*/ +pub fn bn_to_mont_u32(len: u32, n: &[u32], nInv: u32, r2: &[u32], a: &[u32], aM: &mut [u32]) { + let mut c: Box<[u32]> = vec![0u32; len.wrapping_add(len) as usize].into_boxed_slice(); + let mut tmp: Box<[u32]> = vec![0u32; 4u32.wrapping_mul(len) as usize].into_boxed_slice(); + super::bignum::bn_karatsuba_mul_uint32(len, a, r2, &mut tmp, &mut c); + super::bignum::bn_mont_reduction_u32(len, n, nInv, &mut c, aM) +} + +/** +ATTENTION: this function is public, but is intended for internal use within this workspace; callers should not rely on the availability of this function, or its behavior! +*/ +pub fn bn_from_mont_u32(len: u32, n: &[u32], nInv_u64: u32, aM: &[u32], a: &mut [u32]) { + let mut tmp: Box<[u32]> = vec![0u32; len.wrapping_add(len) as usize].into_boxed_slice(); + ((&mut tmp)[0usize..len as usize]).copy_from_slice(&aM[0usize..len as usize]); + super::bignum::bn_mont_reduction_u32(len, n, nInv_u64, &mut tmp, a) +} + +/** +ATTENTION: this function is public, but is intended for internal use within this workspace; callers should not rely on the availability of this function, or its behavior! +*/ +pub fn bn_mont_mul_u32( + len: u32, + n: &[u32], + nInv_u64: u32, + aM: &[u32], + bM: &[u32], + resM: &mut [u32], +) { + let mut c: Box<[u32]> = vec![0u32; len.wrapping_add(len) as usize].into_boxed_slice(); + let mut tmp: Box<[u32]> = vec![0u32; 4u32.wrapping_mul(len) as usize].into_boxed_slice(); + super::bignum::bn_karatsuba_mul_uint32(len, aM, bM, &mut tmp, &mut c); + super::bignum::bn_mont_reduction_u32(len, n, nInv_u64, &mut c, resM) +} + +/** +ATTENTION: this function is public, but is intended for internal use within this workspace; callers should not rely on the availability of this function, or its behavior! +*/ +pub fn bn_mont_sqr_u32(len: u32, n: &[u32], nInv_u64: u32, aM: &[u32], resM: &mut [u32]) { + let mut c: Box<[u32]> = vec![0u32; len.wrapping_add(len) as usize].into_boxed_slice(); + let mut tmp: Box<[u32]> = vec![0u32; 4u32.wrapping_mul(len) as usize].into_boxed_slice(); + super::bignum::bn_karatsuba_sqr_uint32(len, aM, &mut tmp, &mut c); + super::bignum::bn_mont_reduction_u32(len, n, nInv_u64, &mut c, resM) +} + +/** +ATTENTION: this function is public, but is intended for internal use within this workspace; callers should not rely on the availability of this function, or its behavior! +*/ +pub fn bn_check_modulus_u64(len: u32, n: &[u64]) -> u64 { + let mut one: Box<[u64]> = vec![0u64; len as usize].into_boxed_slice(); + ((&mut one)[0usize..len as usize]) + .copy_from_slice(&vec![0u64; len as usize].into_boxed_slice()); + (&mut one)[0usize] = 1u64; + let bit0: u64 = n[0usize] & 1u64; + let m0: u64 = 0u64.wrapping_sub(bit0); + let mut acc: [u64; 1] = [0u64; 1usize]; + for i in 0u32..len { + let beq: u64 = fstar::uint64::eq_mask((&one)[i as usize], n[i as usize]); + let blt: u64 = !fstar::uint64::gte_mask((&one)[i as usize], n[i as usize]); + (&mut acc)[0usize] = beq & (&acc)[0usize] | !beq & blt + } + let m1: u64 = (&acc)[0usize]; + m0 & m1 +} + +/** +ATTENTION: this function is public, but is intended for internal use within this workspace; callers should not rely on the availability of this function, or its behavior! +*/ +pub fn bn_precomp_r2_mod_n_u64(len: u32, nBits: u32, n: &[u64], res: &mut [u64]) { + (res[0usize..len as usize]).copy_from_slice(&vec![0u64; len as usize].into_boxed_slice()); + let i: u32 = nBits.wrapping_div(64u32); + let j: u32 = nBits.wrapping_rem(64u32); + res[i as usize] |= 1u64.wrapping_shl(j); + for _i in 0u32..128u32.wrapping_mul(len).wrapping_sub(nBits) { + let mut a_copy: Box<[u64]> = vec![0u64; len as usize].into_boxed_slice(); + let mut b_copy: Box<[u64]> = vec![0u64; len as usize].into_boxed_slice(); + ((&mut a_copy)[0usize..len as usize]).copy_from_slice(&res[0usize..len as usize]); + ((&mut b_copy)[0usize..len as usize]).copy_from_slice(&res[0usize..len as usize]); + super::bignum::bn_add_mod_n_u64(len, n, &a_copy, &b_copy, res) + } +} + +fn bn_mont_reduction_u64(len: u32, n: &[u64], nInv: u64, c: &mut [u64], res: &mut [u64]) { + let mut c0: [u64; 1] = [0u64; 1usize]; + for i in 0u32..len { + let qj: u64 = nInv.wrapping_mul(c[i as usize]); + let res_j: (&mut [u64], &mut [u64]) = c.split_at_mut(i as usize); + let mut c1: [u64; 1] = [0u64; 1usize]; + for i0 in 0u32..len.wrapping_div(4u32) { + let a_i: u64 = n[4u32.wrapping_mul(i0) as usize]; + let res_i: (&mut [u64], &mut [u64]) = + res_j.1.split_at_mut(4u32.wrapping_mul(i0) as usize); + (&mut c1)[0usize] = + super::bignum_base::mul_wide_add2_u64(a_i, qj, (&c1)[0usize], res_i.1); + let a_i0: u64 = n[4u32.wrapping_mul(i0).wrapping_add(1u32) as usize]; + let res_i0: (&mut [u64], &mut [u64]) = res_i.1.split_at_mut(1usize); + (&mut c1)[0usize] = + super::bignum_base::mul_wide_add2_u64(a_i0, qj, (&c1)[0usize], res_i0.1); + let a_i1: u64 = n[4u32.wrapping_mul(i0).wrapping_add(2u32) as usize]; + let res_i1: (&mut [u64], &mut [u64]) = res_i0.1.split_at_mut(1usize); + (&mut c1)[0usize] = + super::bignum_base::mul_wide_add2_u64(a_i1, qj, (&c1)[0usize], res_i1.1); + let a_i2: u64 = n[4u32.wrapping_mul(i0).wrapping_add(3u32) as usize]; + let res_i2: (&mut [u64], &mut [u64]) = res_i1.1.split_at_mut(1usize); + (&mut c1)[0usize] = + super::bignum_base::mul_wide_add2_u64(a_i2, qj, (&c1)[0usize], res_i2.1) + } + for i0 in len.wrapping_div(4u32).wrapping_mul(4u32)..len { + let a_i: u64 = n[i0 as usize]; + let res_i: (&mut [u64], &mut [u64]) = res_j.1.split_at_mut(i0 as usize); + (&mut c1)[0usize] = + super::bignum_base::mul_wide_add2_u64(a_i, qj, (&c1)[0usize], res_i.1) + } + let r: u64 = (&c1)[0usize]; + let c10: u64 = r; + let res_j0: u64 = c[len.wrapping_add(i) as usize]; + let resb: (&mut [u64], &mut [u64]) = c.split_at_mut(len.wrapping_add(i) as usize); + (&mut c0)[0usize] = + lib::inttypes_intrinsics::add_carry_u64((&c0)[0usize], c10, res_j0, resb.1) + } + (res[0usize..len.wrapping_add(len).wrapping_sub(len) as usize]).copy_from_slice( + &(&c[len as usize..])[0usize..len.wrapping_add(len).wrapping_sub(len) as usize], + ); + let c00: u64 = (&c0)[0usize]; + let mut tmp: Box<[u64]> = vec![0u64; len as usize].into_boxed_slice(); + let mut c1: [u64; 1] = [0u64; 1usize]; + for i in 0u32..len.wrapping_div(4u32) { + let t1: u64 = res[4u32.wrapping_mul(i) as usize]; + let t2: u64 = n[4u32.wrapping_mul(i) as usize]; + let res_i: (&mut [u64], &mut [u64]) = tmp.split_at_mut(4u32.wrapping_mul(i) as usize); + (&mut c1)[0usize] = + lib::inttypes_intrinsics::sub_borrow_u64((&c1)[0usize], t1, t2, res_i.1); + let t10: u64 = res[4u32.wrapping_mul(i).wrapping_add(1u32) as usize]; + let t20: u64 = n[4u32.wrapping_mul(i).wrapping_add(1u32) as usize]; + let res_i0: (&mut [u64], &mut [u64]) = res_i.1.split_at_mut(1usize); + (&mut c1)[0usize] = + lib::inttypes_intrinsics::sub_borrow_u64((&c1)[0usize], t10, t20, res_i0.1); + let t11: u64 = res[4u32.wrapping_mul(i).wrapping_add(2u32) as usize]; + let t21: u64 = n[4u32.wrapping_mul(i).wrapping_add(2u32) as usize]; + let res_i1: (&mut [u64], &mut [u64]) = res_i0.1.split_at_mut(1usize); + (&mut c1)[0usize] = + lib::inttypes_intrinsics::sub_borrow_u64((&c1)[0usize], t11, t21, res_i1.1); + let t12: u64 = res[4u32.wrapping_mul(i).wrapping_add(3u32) as usize]; + let t22: u64 = n[4u32.wrapping_mul(i).wrapping_add(3u32) as usize]; + let res_i2: (&mut [u64], &mut [u64]) = res_i1.1.split_at_mut(1usize); + (&mut c1)[0usize] = + lib::inttypes_intrinsics::sub_borrow_u64((&c1)[0usize], t12, t22, res_i2.1) + } + for i in len.wrapping_div(4u32).wrapping_mul(4u32)..len { + let t1: u64 = res[i as usize]; + let t2: u64 = n[i as usize]; + let res_i: (&mut [u64], &mut [u64]) = tmp.split_at_mut(i as usize); + (&mut c1)[0usize] = lib::inttypes_intrinsics::sub_borrow_u64((&c1)[0usize], t1, t2, res_i.1) + } + let c10: u64 = (&c1)[0usize]; + let c2: u64 = c00.wrapping_sub(c10); + for i in 0u32..len { + let x: u64 = c2 & res[i as usize] | !c2 & (&tmp)[i as usize]; + let os: (&mut [u64], &mut [u64]) = res.split_at_mut(0usize); + os.1[i as usize] = x + } +} + +/** +ATTENTION: this function is public, but is intended for internal use within this workspace; callers should not rely on the availability of this function, or its behavior! +*/ +pub fn bn_to_mont_u64(len: u32, n: &[u64], nInv: u64, r2: &[u64], a: &[u64], aM: &mut [u64]) { + let mut c: Box<[u64]> = vec![0u64; len.wrapping_add(len) as usize].into_boxed_slice(); + let mut tmp: Box<[u64]> = vec![0u64; 4u32.wrapping_mul(len) as usize].into_boxed_slice(); + super::bignum::bn_karatsuba_mul_uint64(len, a, r2, &mut tmp, &mut c); + super::bignum::bn_mont_reduction_u64(len, n, nInv, &mut c, aM) +} + +/** +ATTENTION: this function is public, but is intended for internal use within this workspace; callers should not rely on the availability of this function, or its behavior! +*/ +pub fn bn_from_mont_u64(len: u32, n: &[u64], nInv_u64: u64, aM: &[u64], a: &mut [u64]) { + let mut tmp: Box<[u64]> = vec![0u64; len.wrapping_add(len) as usize].into_boxed_slice(); + ((&mut tmp)[0usize..len as usize]).copy_from_slice(&aM[0usize..len as usize]); + super::bignum::bn_mont_reduction_u64(len, n, nInv_u64, &mut tmp, a) +} + +/** +ATTENTION: this function is public, but is intended for internal use within this workspace; callers should not rely on the availability of this function, or its behavior! +*/ +pub fn bn_mont_mul_u64( + len: u32, + n: &[u64], + nInv_u64: u64, + aM: &[u64], + bM: &[u64], + resM: &mut [u64], +) { + let mut c: Box<[u64]> = vec![0u64; len.wrapping_add(len) as usize].into_boxed_slice(); + let mut tmp: Box<[u64]> = vec![0u64; 4u32.wrapping_mul(len) as usize].into_boxed_slice(); + super::bignum::bn_karatsuba_mul_uint64(len, aM, bM, &mut tmp, &mut c); + super::bignum::bn_mont_reduction_u64(len, n, nInv_u64, &mut c, resM) +} + +/** +ATTENTION: this function is public, but is intended for internal use within this workspace; callers should not rely on the availability of this function, or its behavior! +*/ +pub fn bn_mont_sqr_u64(len: u32, n: &[u64], nInv_u64: u64, aM: &[u64], resM: &mut [u64]) { + let mut c: Box<[u64]> = vec![0u64; len.wrapping_add(len) as usize].into_boxed_slice(); + let mut tmp: Box<[u64]> = vec![0u64; 4u32.wrapping_mul(len) as usize].into_boxed_slice(); + super::bignum::bn_karatsuba_sqr_uint64(len, aM, &mut tmp, &mut c); + super::bignum::bn_mont_reduction_u64(len, n, nInv_u64, &mut c, resM) +} + +pub(crate) fn bn_almost_mont_reduction_u32( + len: u32, + n: &[u32], + nInv: u32, + c: &mut [u32], + res: &mut [u32], +) { + let mut c0: [u32; 1] = [0u32; 1usize]; + for i in 0u32..len { + let qj: u32 = nInv.wrapping_mul(c[i as usize]); + let res_j: (&mut [u32], &mut [u32]) = c.split_at_mut(i as usize); + let mut c1: [u32; 1] = [0u32; 1usize]; + for i0 in 0u32..len.wrapping_div(4u32) { + let a_i: u32 = n[4u32.wrapping_mul(i0) as usize]; + let res_i: (&mut [u32], &mut [u32]) = + res_j.1.split_at_mut(4u32.wrapping_mul(i0) as usize); + (&mut c1)[0usize] = + super::bignum_base::mul_wide_add2_u32(a_i, qj, (&c1)[0usize], res_i.1); + let a_i0: u32 = n[4u32.wrapping_mul(i0).wrapping_add(1u32) as usize]; + let res_i0: (&mut [u32], &mut [u32]) = res_i.1.split_at_mut(1usize); + (&mut c1)[0usize] = + super::bignum_base::mul_wide_add2_u32(a_i0, qj, (&c1)[0usize], res_i0.1); + let a_i1: u32 = n[4u32.wrapping_mul(i0).wrapping_add(2u32) as usize]; + let res_i1: (&mut [u32], &mut [u32]) = res_i0.1.split_at_mut(1usize); + (&mut c1)[0usize] = + super::bignum_base::mul_wide_add2_u32(a_i1, qj, (&c1)[0usize], res_i1.1); + let a_i2: u32 = n[4u32.wrapping_mul(i0).wrapping_add(3u32) as usize]; + let res_i2: (&mut [u32], &mut [u32]) = res_i1.1.split_at_mut(1usize); + (&mut c1)[0usize] = + super::bignum_base::mul_wide_add2_u32(a_i2, qj, (&c1)[0usize], res_i2.1) + } + for i0 in len.wrapping_div(4u32).wrapping_mul(4u32)..len { + let a_i: u32 = n[i0 as usize]; + let res_i: (&mut [u32], &mut [u32]) = res_j.1.split_at_mut(i0 as usize); + (&mut c1)[0usize] = + super::bignum_base::mul_wide_add2_u32(a_i, qj, (&c1)[0usize], res_i.1) + } + let r: u32 = (&c1)[0usize]; + let c10: u32 = r; + let res_j0: u32 = c[len.wrapping_add(i) as usize]; + let resb: (&mut [u32], &mut [u32]) = c.split_at_mut(len.wrapping_add(i) as usize); + (&mut c0)[0usize] = + lib::inttypes_intrinsics::add_carry_u32((&c0)[0usize], c10, res_j0, resb.1) + } + (res[0usize..len.wrapping_add(len).wrapping_sub(len) as usize]).copy_from_slice( + &(&c[len as usize..])[0usize..len.wrapping_add(len).wrapping_sub(len) as usize], + ); + let c00: u32 = (&c0)[0usize]; + let mut tmp: Box<[u32]> = vec![0u32; len as usize].into_boxed_slice(); + let c1: u32 = super::bignum_base::bn_sub_eq_len_u32(len, res, n, &mut tmp); + lowstar::ignore::ignore::(c1); + let m: u32 = 0u32.wrapping_sub(c00); + for i in 0u32..len { + let x: u32 = m & (&tmp)[i as usize] | !m & res[i as usize]; + let os: (&mut [u32], &mut [u32]) = res.split_at_mut(0usize); + os.1[i as usize] = x + } +} + +fn bn_almost_mont_mul_u32( + len: u32, + n: &[u32], + nInv_u64: u32, + aM: &[u32], + bM: &[u32], + resM: &mut [u32], +) { + let mut c: Box<[u32]> = vec![0u32; len.wrapping_add(len) as usize].into_boxed_slice(); + let mut tmp: Box<[u32]> = vec![0u32; 4u32.wrapping_mul(len) as usize].into_boxed_slice(); + super::bignum::bn_karatsuba_mul_uint32(len, aM, bM, &mut tmp, &mut c); + super::bignum::bn_almost_mont_reduction_u32(len, n, nInv_u64, &mut c, resM) +} + +fn bn_almost_mont_sqr_u32(len: u32, n: &[u32], nInv_u64: u32, aM: &[u32], resM: &mut [u32]) { + let mut c: Box<[u32]> = vec![0u32; len.wrapping_add(len) as usize].into_boxed_slice(); + let mut tmp: Box<[u32]> = vec![0u32; 4u32.wrapping_mul(len) as usize].into_boxed_slice(); + super::bignum::bn_karatsuba_sqr_uint32(len, aM, &mut tmp, &mut c); + super::bignum::bn_almost_mont_reduction_u32(len, n, nInv_u64, &mut c, resM) +} + +pub(crate) fn bn_almost_mont_reduction_u64( + len: u32, + n: &[u64], + nInv: u64, + c: &mut [u64], + res: &mut [u64], +) { + let mut c0: [u64; 1] = [0u64; 1usize]; + for i in 0u32..len { + let qj: u64 = nInv.wrapping_mul(c[i as usize]); + let res_j: (&mut [u64], &mut [u64]) = c.split_at_mut(i as usize); + let mut c1: [u64; 1] = [0u64; 1usize]; + for i0 in 0u32..len.wrapping_div(4u32) { + let a_i: u64 = n[4u32.wrapping_mul(i0) as usize]; + let res_i: (&mut [u64], &mut [u64]) = + res_j.1.split_at_mut(4u32.wrapping_mul(i0) as usize); + (&mut c1)[0usize] = + super::bignum_base::mul_wide_add2_u64(a_i, qj, (&c1)[0usize], res_i.1); + let a_i0: u64 = n[4u32.wrapping_mul(i0).wrapping_add(1u32) as usize]; + let res_i0: (&mut [u64], &mut [u64]) = res_i.1.split_at_mut(1usize); + (&mut c1)[0usize] = + super::bignum_base::mul_wide_add2_u64(a_i0, qj, (&c1)[0usize], res_i0.1); + let a_i1: u64 = n[4u32.wrapping_mul(i0).wrapping_add(2u32) as usize]; + let res_i1: (&mut [u64], &mut [u64]) = res_i0.1.split_at_mut(1usize); + (&mut c1)[0usize] = + super::bignum_base::mul_wide_add2_u64(a_i1, qj, (&c1)[0usize], res_i1.1); + let a_i2: u64 = n[4u32.wrapping_mul(i0).wrapping_add(3u32) as usize]; + let res_i2: (&mut [u64], &mut [u64]) = res_i1.1.split_at_mut(1usize); + (&mut c1)[0usize] = + super::bignum_base::mul_wide_add2_u64(a_i2, qj, (&c1)[0usize], res_i2.1) + } + for i0 in len.wrapping_div(4u32).wrapping_mul(4u32)..len { + let a_i: u64 = n[i0 as usize]; + let res_i: (&mut [u64], &mut [u64]) = res_j.1.split_at_mut(i0 as usize); + (&mut c1)[0usize] = + super::bignum_base::mul_wide_add2_u64(a_i, qj, (&c1)[0usize], res_i.1) + } + let r: u64 = (&c1)[0usize]; + let c10: u64 = r; + let res_j0: u64 = c[len.wrapping_add(i) as usize]; + let resb: (&mut [u64], &mut [u64]) = c.split_at_mut(len.wrapping_add(i) as usize); + (&mut c0)[0usize] = + lib::inttypes_intrinsics::add_carry_u64((&c0)[0usize], c10, res_j0, resb.1) + } + (res[0usize..len.wrapping_add(len).wrapping_sub(len) as usize]).copy_from_slice( + &(&c[len as usize..])[0usize..len.wrapping_add(len).wrapping_sub(len) as usize], + ); + let c00: u64 = (&c0)[0usize]; + let mut tmp: Box<[u64]> = vec![0u64; len as usize].into_boxed_slice(); + let c1: u64 = super::bignum_base::bn_sub_eq_len_u64(len, res, n, &mut tmp); + lowstar::ignore::ignore::(c1); + let m: u64 = 0u64.wrapping_sub(c00); + for i in 0u32..len { + let x: u64 = m & (&tmp)[i as usize] | !m & res[i as usize]; + let os: (&mut [u64], &mut [u64]) = res.split_at_mut(0usize); + os.1[i as usize] = x + } +} + +fn bn_almost_mont_mul_u64( + len: u32, + n: &[u64], + nInv_u64: u64, + aM: &[u64], + bM: &[u64], + resM: &mut [u64], +) { + let mut c: Box<[u64]> = vec![0u64; len.wrapping_add(len) as usize].into_boxed_slice(); + let mut tmp: Box<[u64]> = vec![0u64; 4u32.wrapping_mul(len) as usize].into_boxed_slice(); + super::bignum::bn_karatsuba_mul_uint64(len, aM, bM, &mut tmp, &mut c); + super::bignum::bn_almost_mont_reduction_u64(len, n, nInv_u64, &mut c, resM) +} + +fn bn_almost_mont_sqr_u64(len: u32, n: &[u64], nInv_u64: u64, aM: &[u64], resM: &mut [u64]) { + let mut c: Box<[u64]> = vec![0u64; len.wrapping_add(len) as usize].into_boxed_slice(); + let mut tmp: Box<[u64]> = vec![0u64; 4u32.wrapping_mul(len) as usize].into_boxed_slice(); + super::bignum::bn_karatsuba_sqr_uint64(len, aM, &mut tmp, &mut c); + super::bignum::bn_almost_mont_reduction_u64(len, n, nInv_u64, &mut c, resM) +} + +pub(crate) fn bn_check_mod_exp_u32(len: u32, n: &[u32], a: &[u32], bBits: u32, b: &[u32]) -> u32 { + let mut one: Box<[u32]> = vec![0u32; len as usize].into_boxed_slice(); + ((&mut one)[0usize..len as usize]) + .copy_from_slice(&vec![0u32; len as usize].into_boxed_slice()); + (&mut one)[0usize] = 1u32; + let bit0: u32 = n[0usize] & 1u32; + let m0: u32 = 0u32.wrapping_sub(bit0); + let mut acc: [u32; 1] = [0u32; 1usize]; + for i in 0u32..len { + let beq: u32 = fstar::uint32::eq_mask((&one)[i as usize], n[i as usize]); + let blt: u32 = !fstar::uint32::gte_mask((&one)[i as usize], n[i as usize]); + (&mut acc)[0usize] = beq & (&acc)[0usize] | !beq & blt + } + let m1: u32 = (&acc)[0usize]; + let m00: u32 = m0 & m1; + let bLen: u32 = if bBits == 0u32 { + 1u32 + } else { + bBits + .wrapping_sub(1u32) + .wrapping_div(32u32) + .wrapping_add(1u32) + }; + let m10: u32 = if bBits < 32u32.wrapping_mul(bLen) { + let mut b2: Box<[u32]> = vec![0u32; bLen as usize].into_boxed_slice(); + let i: u32 = bBits.wrapping_div(32u32); + let j: u32 = bBits.wrapping_rem(32u32); + (&mut b2)[i as usize] = (&b2)[i as usize] | 1u32.wrapping_shl(j); + let mut acc0: [u32; 1] = [0u32; 1usize]; + for i0 in 0u32..bLen { + let beq: u32 = fstar::uint32::eq_mask(b[i0 as usize], (&b2)[i0 as usize]); + let blt: u32 = !fstar::uint32::gte_mask(b[i0 as usize], (&b2)[i0 as usize]); + (&mut acc0)[0usize] = beq & (&acc0)[0usize] | !beq & blt + } + let res: u32 = (&acc0)[0usize]; + res + } else { + 0xFFFFFFFFu32 + }; + let mut acc0: [u32; 1] = [0u32; 1usize]; + for i in 0u32..len { + let beq: u32 = fstar::uint32::eq_mask(a[i as usize], n[i as usize]); + let blt: u32 = !fstar::uint32::gte_mask(a[i as usize], n[i as usize]); + (&mut acc0)[0usize] = beq & (&acc0)[0usize] | !beq & blt + } + let m2: u32 = (&acc0)[0usize]; + let m: u32 = m10 & m2; + m00 & m +} + +pub(crate) fn bn_mod_exp_vartime_precomp_u32( + len: u32, + n: &[u32], + mu: u32, + r2: &[u32], + a: &[u32], + bBits: u32, + b: &[u32], + res: &mut [u32], +) { + if bBits < 200u32 { + let mut aM: Box<[u32]> = vec![0u32; len as usize].into_boxed_slice(); + super::bignum::bn_to_mont_u32(len, n, mu, r2, a, &mut aM); + let mut resM: Box<[u32]> = vec![0u32; len as usize].into_boxed_slice(); + let mut ctx: Box<[u32]> = vec![0u32; len.wrapping_add(len) as usize].into_boxed_slice(); + ((&mut ctx)[0usize..len as usize]).copy_from_slice(&n[0usize..len as usize]); + ((&mut ctx)[len as usize..len as usize + len as usize]) + .copy_from_slice(&r2[0usize..len as usize]); + let ctx_n: (&[u32], &[u32]) = ctx.split_at(0usize); + let ctx_r2: (&[u32], &[u32]) = ctx_n.1.split_at(len as usize); + super::bignum::bn_from_mont_u32(len, ctx_r2.0, mu, ctx_r2.1, &mut resM); + lowstar::ignore::ignore::<&[u32]>(&ctx); + for i in 0u32..bBits { + let i1: u32 = i.wrapping_div(32u32); + let j: u32 = i.wrapping_rem(32u32); + let tmp: u32 = b[i1 as usize]; + let bit: u32 = tmp.wrapping_shr(j) & 1u32; + if bit != 0u32 { + let mut aM_copy: Box<[u32]> = vec![0u32; len as usize].into_boxed_slice(); + ((&mut aM_copy)[0usize..len as usize]) + .copy_from_slice(&(&resM)[0usize..len as usize]); + let ctx_n0: (&[u32], &[u32]) = ctx.split_at(0usize); + super::bignum::bn_almost_mont_mul_u32(len, ctx_n0.1, mu, &aM_copy, &aM, &mut resM); + lowstar::ignore::ignore::<&[u32]>(&ctx) + }; + let mut aM_copy: Box<[u32]> = vec![0u32; len as usize].into_boxed_slice(); + ((&mut aM_copy)[0usize..len as usize]).copy_from_slice(&(&aM)[0usize..len as usize]); + let ctx_n0: (&[u32], &[u32]) = ctx.split_at(0usize); + super::bignum::bn_almost_mont_sqr_u32(len, ctx_n0.1, mu, &aM_copy, &mut aM); + lowstar::ignore::ignore::<&[u32]>(&ctx) + } + super::bignum::bn_from_mont_u32(len, n, mu, &resM, res) + } else { + let mut aM: Box<[u32]> = vec![0u32; len as usize].into_boxed_slice(); + super::bignum::bn_to_mont_u32(len, n, mu, r2, a, &mut aM); + let mut resM: Box<[u32]> = vec![0u32; len as usize].into_boxed_slice(); + let bLen: u32 = if bBits == 0u32 { + 1u32 + } else { + bBits + .wrapping_sub(1u32) + .wrapping_div(32u32) + .wrapping_add(1u32) + }; + let mut ctx: Box<[u32]> = vec![0u32; len.wrapping_add(len) as usize].into_boxed_slice(); + ((&mut ctx)[0usize..len as usize]).copy_from_slice(&n[0usize..len as usize]); + ((&mut ctx)[len as usize..len as usize + len as usize]) + .copy_from_slice(&r2[0usize..len as usize]); + let mut table: Box<[u32]> = vec![0u32; 16u32.wrapping_mul(len) as usize].into_boxed_slice(); + let mut tmp: Box<[u32]> = vec![0u32; len as usize].into_boxed_slice(); + let t0: (&mut [u32], &mut [u32]) = table.split_at_mut(0usize); + let t1: (&mut [u32], &mut [u32]) = t0.1.split_at_mut(len as usize); + let ctx_n: (&[u32], &[u32]) = ctx.split_at(0usize); + let ctx_r2: (&[u32], &[u32]) = ctx_n.1.split_at(len as usize); + super::bignum::bn_from_mont_u32(len, ctx_r2.0, mu, ctx_r2.1, t1.0); + lowstar::ignore::ignore::<&[u32]>(&ctx); + (t1.1[0usize..len as usize]).copy_from_slice(&(&aM)[0usize..len as usize]); + lowstar::ignore::ignore::<&[u32]>(&table); + krml::unroll_for!(7, "i", 0u32, 1u32, { + let t11: (&[u32], &[u32]) = + table.split_at(i.wrapping_add(1u32).wrapping_mul(len) as usize); + let mut aM_copy: Box<[u32]> = vec![0u32; len as usize].into_boxed_slice(); + ((&mut aM_copy)[0usize..len as usize]).copy_from_slice(&t11.1[0usize..len as usize]); + let ctx_n0: (&[u32], &[u32]) = ctx.split_at(0usize); + super::bignum::bn_almost_mont_sqr_u32(len, ctx_n0.1, mu, &aM_copy, &mut tmp); + lowstar::ignore::ignore::<&[u32]>(&ctx); + ((&mut table)[2u32.wrapping_mul(i).wrapping_add(2u32).wrapping_mul(len) as usize + ..2u32.wrapping_mul(i).wrapping_add(2u32).wrapping_mul(len) as usize + + len as usize]) + .copy_from_slice(&(&tmp)[0usize..len as usize]); + let t2: (&[u32], &[u32]) = + table.split_at(2u32.wrapping_mul(i).wrapping_add(2u32).wrapping_mul(len) as usize); + let mut aM_copy0: Box<[u32]> = vec![0u32; len as usize].into_boxed_slice(); + ((&mut aM_copy0)[0usize..len as usize]).copy_from_slice(&(&aM)[0usize..len as usize]); + let ctx_n1: (&[u32], &[u32]) = ctx.split_at(0usize); + super::bignum::bn_almost_mont_mul_u32(len, ctx_n1.1, mu, &aM_copy0, t2.1, &mut tmp); + lowstar::ignore::ignore::<&[u32]>(&ctx); + ((&mut table)[2u32.wrapping_mul(i).wrapping_add(3u32).wrapping_mul(len) as usize + ..2u32.wrapping_mul(i).wrapping_add(3u32).wrapping_mul(len) as usize + + len as usize]) + .copy_from_slice(&(&tmp)[0usize..len as usize]) + }); + if bBits.wrapping_rem(4u32) != 0u32 { + let i: u32 = bBits.wrapping_div(4u32).wrapping_mul(4u32); + let bits_c: u32 = super::bignum_base::bn_get_bits_u32(bLen, b, i, 4u32); + let bits_l32: u32 = bits_c; + let a_bits_l: (&[u32], &[u32]) = table.split_at(bits_l32.wrapping_mul(len) as usize); + ((&mut resM)[0usize..len as usize]).copy_from_slice(&a_bits_l.1[0usize..len as usize]) + } else { + let ctx_n0: (&[u32], &[u32]) = ctx.split_at(0usize); + let ctx_r20: (&[u32], &[u32]) = ctx_n0.1.split_at(len as usize); + super::bignum::bn_from_mont_u32(len, ctx_r20.0, mu, ctx_r20.1, &mut resM); + lowstar::ignore::ignore::<&[u32]>(&ctx) + }; + let mut tmp0: Box<[u32]> = vec![0u32; len as usize].into_boxed_slice(); + for i in 0u32..bBits.wrapping_div(4u32) { + krml::unroll_for!(4, "_i", 0u32, 1u32, { + let mut aM_copy: Box<[u32]> = vec![0u32; len as usize].into_boxed_slice(); + ((&mut aM_copy)[0usize..len as usize]) + .copy_from_slice(&(&resM)[0usize..len as usize]); + let ctx_n0: (&[u32], &[u32]) = ctx.split_at(0usize); + super::bignum::bn_almost_mont_sqr_u32(len, ctx_n0.1, mu, &aM_copy, &mut resM); + lowstar::ignore::ignore::<&[u32]>(&ctx) + }); + let k: u32 = bBits + .wrapping_sub(bBits.wrapping_rem(4u32)) + .wrapping_sub(4u32.wrapping_mul(i)) + .wrapping_sub(4u32); + let bits_l: u32 = super::bignum_base::bn_get_bits_u32(bLen, b, k, 4u32); + lowstar::ignore::ignore::<&[u32]>(&table); + let bits_l32: u32 = bits_l; + let a_bits_l: (&[u32], &[u32]) = table.split_at(bits_l32.wrapping_mul(len) as usize); + ((&mut tmp0)[0usize..len as usize]).copy_from_slice(&a_bits_l.1[0usize..len as usize]); + let mut aM_copy: Box<[u32]> = vec![0u32; len as usize].into_boxed_slice(); + ((&mut aM_copy)[0usize..len as usize]).copy_from_slice(&(&resM)[0usize..len as usize]); + let ctx_n0: (&[u32], &[u32]) = ctx.split_at(0usize); + super::bignum::bn_almost_mont_mul_u32(len, ctx_n0.1, mu, &aM_copy, &tmp0, &mut resM); + lowstar::ignore::ignore::<&[u32]>(&ctx) + } + super::bignum::bn_from_mont_u32(len, n, mu, &resM, res) + } +} + +pub(crate) fn bn_mod_exp_consttime_precomp_u32( + len: u32, + n: &[u32], + mu: u32, + r2: &[u32], + a: &[u32], + bBits: u32, + b: &[u32], + res: &mut [u32], +) { + if bBits < 200u32 { + let mut aM: Box<[u32]> = vec![0u32; len as usize].into_boxed_slice(); + super::bignum::bn_to_mont_u32(len, n, mu, r2, a, &mut aM); + let mut resM: Box<[u32]> = vec![0u32; len as usize].into_boxed_slice(); + let mut ctx: Box<[u32]> = vec![0u32; len.wrapping_add(len) as usize].into_boxed_slice(); + ((&mut ctx)[0usize..len as usize]).copy_from_slice(&n[0usize..len as usize]); + ((&mut ctx)[len as usize..len as usize + len as usize]) + .copy_from_slice(&r2[0usize..len as usize]); + let mut sw: [u32; 1] = [0u32; 1usize]; + let ctx_n: (&[u32], &[u32]) = ctx.split_at(0usize); + let ctx_r2: (&[u32], &[u32]) = ctx_n.1.split_at(len as usize); + super::bignum::bn_from_mont_u32(len, ctx_r2.0, mu, ctx_r2.1, &mut resM); + lowstar::ignore::ignore::<&[u32]>(&ctx); + for i in 0u32..bBits { + let i1: u32 = bBits.wrapping_sub(i).wrapping_sub(1u32).wrapping_div(32u32); + let j: u32 = bBits.wrapping_sub(i).wrapping_sub(1u32).wrapping_rem(32u32); + let tmp: u32 = b[i1 as usize]; + let bit: u32 = tmp.wrapping_shr(j) & 1u32; + let sw1: u32 = bit ^ (&sw)[0usize]; + for i0 in 0u32..len { + let dummy: u32 = + 0u32.wrapping_sub(sw1) & ((&resM)[i0 as usize] ^ (&aM)[i0 as usize]); + (&mut resM)[i0 as usize] = (&resM)[i0 as usize] ^ dummy; + (&mut aM)[i0 as usize] = (&aM)[i0 as usize] ^ dummy + } + let mut aM_copy: Box<[u32]> = vec![0u32; len as usize].into_boxed_slice(); + ((&mut aM_copy)[0usize..len as usize]).copy_from_slice(&(&aM)[0usize..len as usize]); + let ctx_n0: (&[u32], &[u32]) = ctx.split_at(0usize); + super::bignum::bn_almost_mont_mul_u32(len, ctx_n0.1, mu, &aM_copy, &resM, &mut aM); + lowstar::ignore::ignore::<&[u32]>(&ctx); + let mut aM_copy0: Box<[u32]> = vec![0u32; len as usize].into_boxed_slice(); + ((&mut aM_copy0)[0usize..len as usize]).copy_from_slice(&(&resM)[0usize..len as usize]); + let ctx_n1: (&[u32], &[u32]) = ctx.split_at(0usize); + super::bignum::bn_almost_mont_sqr_u32(len, ctx_n1.1, mu, &aM_copy0, &mut resM); + lowstar::ignore::ignore::<&[u32]>(&ctx); + (&mut sw)[0usize] = bit + } + let sw0: u32 = (&sw)[0usize]; + for i in 0u32..len { + let dummy: u32 = 0u32.wrapping_sub(sw0) & ((&resM)[i as usize] ^ (&aM)[i as usize]); + (&mut resM)[i as usize] = (&resM)[i as usize] ^ dummy; + (&mut aM)[i as usize] = (&aM)[i as usize] ^ dummy + } + super::bignum::bn_from_mont_u32(len, n, mu, &resM, res) + } else { + let mut aM: Box<[u32]> = vec![0u32; len as usize].into_boxed_slice(); + super::bignum::bn_to_mont_u32(len, n, mu, r2, a, &mut aM); + let mut resM: Box<[u32]> = vec![0u32; len as usize].into_boxed_slice(); + let bLen: u32 = if bBits == 0u32 { + 1u32 + } else { + bBits + .wrapping_sub(1u32) + .wrapping_div(32u32) + .wrapping_add(1u32) + }; + let mut ctx: Box<[u32]> = vec![0u32; len.wrapping_add(len) as usize].into_boxed_slice(); + ((&mut ctx)[0usize..len as usize]).copy_from_slice(&n[0usize..len as usize]); + ((&mut ctx)[len as usize..len as usize + len as usize]) + .copy_from_slice(&r2[0usize..len as usize]); + let mut table: Box<[u32]> = vec![0u32; 16u32.wrapping_mul(len) as usize].into_boxed_slice(); + let mut tmp: Box<[u32]> = vec![0u32; len as usize].into_boxed_slice(); + let t0: (&mut [u32], &mut [u32]) = table.split_at_mut(0usize); + let t1: (&mut [u32], &mut [u32]) = t0.1.split_at_mut(len as usize); + let ctx_n: (&[u32], &[u32]) = ctx.split_at(0usize); + let ctx_r2: (&[u32], &[u32]) = ctx_n.1.split_at(len as usize); + super::bignum::bn_from_mont_u32(len, ctx_r2.0, mu, ctx_r2.1, t1.0); + lowstar::ignore::ignore::<&[u32]>(&ctx); + (t1.1[0usize..len as usize]).copy_from_slice(&(&aM)[0usize..len as usize]); + lowstar::ignore::ignore::<&[u32]>(&table); + krml::unroll_for!(7, "i", 0u32, 1u32, { + let t11: (&[u32], &[u32]) = + table.split_at(i.wrapping_add(1u32).wrapping_mul(len) as usize); + let mut aM_copy: Box<[u32]> = vec![0u32; len as usize].into_boxed_slice(); + ((&mut aM_copy)[0usize..len as usize]).copy_from_slice(&t11.1[0usize..len as usize]); + let ctx_n0: (&[u32], &[u32]) = ctx.split_at(0usize); + super::bignum::bn_almost_mont_sqr_u32(len, ctx_n0.1, mu, &aM_copy, &mut tmp); + lowstar::ignore::ignore::<&[u32]>(&ctx); + ((&mut table)[2u32.wrapping_mul(i).wrapping_add(2u32).wrapping_mul(len) as usize + ..2u32.wrapping_mul(i).wrapping_add(2u32).wrapping_mul(len) as usize + + len as usize]) + .copy_from_slice(&(&tmp)[0usize..len as usize]); + let t2: (&[u32], &[u32]) = + table.split_at(2u32.wrapping_mul(i).wrapping_add(2u32).wrapping_mul(len) as usize); + let mut aM_copy0: Box<[u32]> = vec![0u32; len as usize].into_boxed_slice(); + ((&mut aM_copy0)[0usize..len as usize]).copy_from_slice(&(&aM)[0usize..len as usize]); + let ctx_n1: (&[u32], &[u32]) = ctx.split_at(0usize); + super::bignum::bn_almost_mont_mul_u32(len, ctx_n1.1, mu, &aM_copy0, t2.1, &mut tmp); + lowstar::ignore::ignore::<&[u32]>(&ctx); + ((&mut table)[2u32.wrapping_mul(i).wrapping_add(3u32).wrapping_mul(len) as usize + ..2u32.wrapping_mul(i).wrapping_add(3u32).wrapping_mul(len) as usize + + len as usize]) + .copy_from_slice(&(&tmp)[0usize..len as usize]) + }); + if bBits.wrapping_rem(4u32) != 0u32 { + let i: u32 = bBits.wrapping_div(4u32).wrapping_mul(4u32); + let bits_c: u32 = super::bignum_base::bn_get_bits_u32(bLen, b, i, 4u32); + ((&mut resM)[0usize..len as usize]).copy_from_slice( + &(&(&table)[0u32.wrapping_mul(len) as usize..] as &[u32])[0usize..len as usize], + ); + krml::unroll_for!(15, "i0", 0u32, 1u32, { + let c: u32 = fstar::uint32::eq_mask(bits_c, i0.wrapping_add(1u32)); + let res_j: (&[u32], &[u32]) = + table.split_at(i0.wrapping_add(1u32).wrapping_mul(len) as usize); + for i1 in 0u32..len { + let x: u32 = c & res_j.1[i1 as usize] | !c & (&resM)[i1 as usize]; + let os: (&mut [u32], &mut [u32]) = resM.split_at_mut(0usize); + os.1[i1 as usize] = x + } + }) + } else { + let ctx_n0: (&[u32], &[u32]) = ctx.split_at(0usize); + let ctx_r20: (&[u32], &[u32]) = ctx_n0.1.split_at(len as usize); + super::bignum::bn_from_mont_u32(len, ctx_r20.0, mu, ctx_r20.1, &mut resM); + lowstar::ignore::ignore::<&[u32]>(&ctx) + }; + let mut tmp0: Box<[u32]> = vec![0u32; len as usize].into_boxed_slice(); + for i in 0u32..bBits.wrapping_div(4u32) { + krml::unroll_for!(4, "_i", 0u32, 1u32, { + let mut aM_copy: Box<[u32]> = vec![0u32; len as usize].into_boxed_slice(); + ((&mut aM_copy)[0usize..len as usize]) + .copy_from_slice(&(&resM)[0usize..len as usize]); + let ctx_n0: (&[u32], &[u32]) = ctx.split_at(0usize); + super::bignum::bn_almost_mont_sqr_u32(len, ctx_n0.1, mu, &aM_copy, &mut resM); + lowstar::ignore::ignore::<&[u32]>(&ctx) + }); + let k: u32 = bBits + .wrapping_sub(bBits.wrapping_rem(4u32)) + .wrapping_sub(4u32.wrapping_mul(i)) + .wrapping_sub(4u32); + let bits_l: u32 = super::bignum_base::bn_get_bits_u32(bLen, b, k, 4u32); + lowstar::ignore::ignore::<&[u32]>(&table); + ((&mut tmp0)[0usize..len as usize]).copy_from_slice( + &(&(&table)[0u32.wrapping_mul(len) as usize..] as &[u32])[0usize..len as usize], + ); + krml::unroll_for!(15, "i0", 0u32, 1u32, { + let c: u32 = fstar::uint32::eq_mask(bits_l, i0.wrapping_add(1u32)); + let res_j: (&[u32], &[u32]) = + table.split_at(i0.wrapping_add(1u32).wrapping_mul(len) as usize); + for i1 in 0u32..len { + let x: u32 = c & res_j.1[i1 as usize] | !c & (&tmp0)[i1 as usize]; + let os: (&mut [u32], &mut [u32]) = tmp0.split_at_mut(0usize); + os.1[i1 as usize] = x + } + }); + let mut aM_copy: Box<[u32]> = vec![0u32; len as usize].into_boxed_slice(); + ((&mut aM_copy)[0usize..len as usize]).copy_from_slice(&(&resM)[0usize..len as usize]); + let ctx_n0: (&[u32], &[u32]) = ctx.split_at(0usize); + super::bignum::bn_almost_mont_mul_u32(len, ctx_n0.1, mu, &aM_copy, &tmp0, &mut resM); + lowstar::ignore::ignore::<&[u32]>(&ctx) + } + super::bignum::bn_from_mont_u32(len, n, mu, &resM, res) + } +} + +pub(crate) fn bn_mod_exp_vartime_u32( + len: u32, + nBits: u32, + n: &[u32], + a: &[u32], + bBits: u32, + b: &[u32], + res: &mut [u32], +) { + let mut r2: Box<[u32]> = vec![0u32; len as usize].into_boxed_slice(); + super::bignum::bn_precomp_r2_mod_n_u32(len, nBits, n, &mut r2); + let mu: u32 = super::bignum::mod_inv_uint32(n[0usize]); + super::bignum::bn_mod_exp_vartime_precomp_u32(len, n, mu, &r2, a, bBits, b, res) +} + +pub(crate) fn bn_mod_exp_consttime_u32( + len: u32, + nBits: u32, + n: &[u32], + a: &[u32], + bBits: u32, + b: &[u32], + res: &mut [u32], +) { + let mut r2: Box<[u32]> = vec![0u32; len as usize].into_boxed_slice(); + super::bignum::bn_precomp_r2_mod_n_u32(len, nBits, n, &mut r2); + let mu: u32 = super::bignum::mod_inv_uint32(n[0usize]); + super::bignum::bn_mod_exp_consttime_precomp_u32(len, n, mu, &r2, a, bBits, b, res) +} + +pub(crate) fn bn_check_mod_exp_u64(len: u32, n: &[u64], a: &[u64], bBits: u32, b: &[u64]) -> u64 { + let mut one: Box<[u64]> = vec![0u64; len as usize].into_boxed_slice(); + ((&mut one)[0usize..len as usize]) + .copy_from_slice(&vec![0u64; len as usize].into_boxed_slice()); + (&mut one)[0usize] = 1u64; + let bit0: u64 = n[0usize] & 1u64; + let m0: u64 = 0u64.wrapping_sub(bit0); + let mut acc: [u64; 1] = [0u64; 1usize]; + for i in 0u32..len { + let beq: u64 = fstar::uint64::eq_mask((&one)[i as usize], n[i as usize]); + let blt: u64 = !fstar::uint64::gte_mask((&one)[i as usize], n[i as usize]); + (&mut acc)[0usize] = beq & (&acc)[0usize] | !beq & blt + } + let m1: u64 = (&acc)[0usize]; + let m00: u64 = m0 & m1; + let bLen: u32 = if bBits == 0u32 { + 1u32 + } else { + bBits + .wrapping_sub(1u32) + .wrapping_div(64u32) + .wrapping_add(1u32) + }; + let m10: u64 = if bBits < 64u32.wrapping_mul(bLen) { + let mut b2: Box<[u64]> = vec![0u64; bLen as usize].into_boxed_slice(); + let i: u32 = bBits.wrapping_div(64u32); + let j: u32 = bBits.wrapping_rem(64u32); + (&mut b2)[i as usize] = (&b2)[i as usize] | 1u64.wrapping_shl(j); + let mut acc0: [u64; 1] = [0u64; 1usize]; + for i0 in 0u32..bLen { + let beq: u64 = fstar::uint64::eq_mask(b[i0 as usize], (&b2)[i0 as usize]); + let blt: u64 = !fstar::uint64::gte_mask(b[i0 as usize], (&b2)[i0 as usize]); + (&mut acc0)[0usize] = beq & (&acc0)[0usize] | !beq & blt + } + let res: u64 = (&acc0)[0usize]; + res + } else { + 0xFFFFFFFFFFFFFFFFu64 + }; + let mut acc0: [u64; 1] = [0u64; 1usize]; + for i in 0u32..len { + let beq: u64 = fstar::uint64::eq_mask(a[i as usize], n[i as usize]); + let blt: u64 = !fstar::uint64::gte_mask(a[i as usize], n[i as usize]); + (&mut acc0)[0usize] = beq & (&acc0)[0usize] | !beq & blt + } + let m2: u64 = (&acc0)[0usize]; + let m: u64 = m10 & m2; + m00 & m +} + +/** +ATTENTION: this function is public, but is intended for internal use within this workspace; callers should not rely on the availability of this function, or its behavior! +*/ +pub fn bn_mod_exp_vartime_precomp_u64( + len: u32, + n: &[u64], + mu: u64, + r2: &[u64], + a: &[u64], + bBits: u32, + b: &[u64], + res: &mut [u64], +) { + if bBits < 200u32 { + let mut aM: Box<[u64]> = vec![0u64; len as usize].into_boxed_slice(); + super::bignum::bn_to_mont_u64(len, n, mu, r2, a, &mut aM); + let mut resM: Box<[u64]> = vec![0u64; len as usize].into_boxed_slice(); + let mut ctx: Box<[u64]> = vec![0u64; len.wrapping_add(len) as usize].into_boxed_slice(); + ((&mut ctx)[0usize..len as usize]).copy_from_slice(&n[0usize..len as usize]); + ((&mut ctx)[len as usize..len as usize + len as usize]) + .copy_from_slice(&r2[0usize..len as usize]); + let ctx_n: (&[u64], &[u64]) = ctx.split_at(0usize); + let ctx_r2: (&[u64], &[u64]) = ctx_n.1.split_at(len as usize); + super::bignum::bn_from_mont_u64(len, ctx_r2.0, mu, ctx_r2.1, &mut resM); + lowstar::ignore::ignore::<&[u64]>(&ctx); + for i in 0u32..bBits { + let i1: u32 = i.wrapping_div(64u32); + let j: u32 = i.wrapping_rem(64u32); + let tmp: u64 = b[i1 as usize]; + let bit: u64 = tmp.wrapping_shr(j) & 1u64; + if bit != 0u64 { + let mut aM_copy: Box<[u64]> = vec![0u64; len as usize].into_boxed_slice(); + ((&mut aM_copy)[0usize..len as usize]) + .copy_from_slice(&(&resM)[0usize..len as usize]); + let ctx_n0: (&[u64], &[u64]) = ctx.split_at(0usize); + super::bignum::bn_almost_mont_mul_u64(len, ctx_n0.1, mu, &aM_copy, &aM, &mut resM); + lowstar::ignore::ignore::<&[u64]>(&ctx) + }; + let mut aM_copy: Box<[u64]> = vec![0u64; len as usize].into_boxed_slice(); + ((&mut aM_copy)[0usize..len as usize]).copy_from_slice(&(&aM)[0usize..len as usize]); + let ctx_n0: (&[u64], &[u64]) = ctx.split_at(0usize); + super::bignum::bn_almost_mont_sqr_u64(len, ctx_n0.1, mu, &aM_copy, &mut aM); + lowstar::ignore::ignore::<&[u64]>(&ctx) + } + super::bignum::bn_from_mont_u64(len, n, mu, &resM, res) + } else { + let mut aM: Box<[u64]> = vec![0u64; len as usize].into_boxed_slice(); + super::bignum::bn_to_mont_u64(len, n, mu, r2, a, &mut aM); + let mut resM: Box<[u64]> = vec![0u64; len as usize].into_boxed_slice(); + let bLen: u32 = if bBits == 0u32 { + 1u32 + } else { + bBits + .wrapping_sub(1u32) + .wrapping_div(64u32) + .wrapping_add(1u32) + }; + let mut ctx: Box<[u64]> = vec![0u64; len.wrapping_add(len) as usize].into_boxed_slice(); + ((&mut ctx)[0usize..len as usize]).copy_from_slice(&n[0usize..len as usize]); + ((&mut ctx)[len as usize..len as usize + len as usize]) + .copy_from_slice(&r2[0usize..len as usize]); + let mut table: Box<[u64]> = vec![0u64; 16u32.wrapping_mul(len) as usize].into_boxed_slice(); + let mut tmp: Box<[u64]> = vec![0u64; len as usize].into_boxed_slice(); + let t0: (&mut [u64], &mut [u64]) = table.split_at_mut(0usize); + let t1: (&mut [u64], &mut [u64]) = t0.1.split_at_mut(len as usize); + let ctx_n: (&[u64], &[u64]) = ctx.split_at(0usize); + let ctx_r2: (&[u64], &[u64]) = ctx_n.1.split_at(len as usize); + super::bignum::bn_from_mont_u64(len, ctx_r2.0, mu, ctx_r2.1, t1.0); + lowstar::ignore::ignore::<&[u64]>(&ctx); + (t1.1[0usize..len as usize]).copy_from_slice(&(&aM)[0usize..len as usize]); + lowstar::ignore::ignore::<&[u64]>(&table); + krml::unroll_for!(7, "i", 0u32, 1u32, { + let t11: (&[u64], &[u64]) = + table.split_at(i.wrapping_add(1u32).wrapping_mul(len) as usize); + let mut aM_copy: Box<[u64]> = vec![0u64; len as usize].into_boxed_slice(); + ((&mut aM_copy)[0usize..len as usize]).copy_from_slice(&t11.1[0usize..len as usize]); + let ctx_n0: (&[u64], &[u64]) = ctx.split_at(0usize); + super::bignum::bn_almost_mont_sqr_u64(len, ctx_n0.1, mu, &aM_copy, &mut tmp); + lowstar::ignore::ignore::<&[u64]>(&ctx); + ((&mut table)[2u32.wrapping_mul(i).wrapping_add(2u32).wrapping_mul(len) as usize + ..2u32.wrapping_mul(i).wrapping_add(2u32).wrapping_mul(len) as usize + + len as usize]) + .copy_from_slice(&(&tmp)[0usize..len as usize]); + let t2: (&[u64], &[u64]) = + table.split_at(2u32.wrapping_mul(i).wrapping_add(2u32).wrapping_mul(len) as usize); + let mut aM_copy0: Box<[u64]> = vec![0u64; len as usize].into_boxed_slice(); + ((&mut aM_copy0)[0usize..len as usize]).copy_from_slice(&(&aM)[0usize..len as usize]); + let ctx_n1: (&[u64], &[u64]) = ctx.split_at(0usize); + super::bignum::bn_almost_mont_mul_u64(len, ctx_n1.1, mu, &aM_copy0, t2.1, &mut tmp); + lowstar::ignore::ignore::<&[u64]>(&ctx); + ((&mut table)[2u32.wrapping_mul(i).wrapping_add(3u32).wrapping_mul(len) as usize + ..2u32.wrapping_mul(i).wrapping_add(3u32).wrapping_mul(len) as usize + + len as usize]) + .copy_from_slice(&(&tmp)[0usize..len as usize]) + }); + if bBits.wrapping_rem(4u32) != 0u32 { + let i: u32 = bBits.wrapping_div(4u32).wrapping_mul(4u32); + let bits_c: u64 = super::bignum_base::bn_get_bits_u64(bLen, b, i, 4u32); + let bits_l32: u32 = bits_c as u32; + let a_bits_l: (&[u64], &[u64]) = table.split_at(bits_l32.wrapping_mul(len) as usize); + ((&mut resM)[0usize..len as usize]).copy_from_slice(&a_bits_l.1[0usize..len as usize]) + } else { + let ctx_n0: (&[u64], &[u64]) = ctx.split_at(0usize); + let ctx_r20: (&[u64], &[u64]) = ctx_n0.1.split_at(len as usize); + super::bignum::bn_from_mont_u64(len, ctx_r20.0, mu, ctx_r20.1, &mut resM); + lowstar::ignore::ignore::<&[u64]>(&ctx) + }; + let mut tmp0: Box<[u64]> = vec![0u64; len as usize].into_boxed_slice(); + for i in 0u32..bBits.wrapping_div(4u32) { + krml::unroll_for!(4, "_i", 0u32, 1u32, { + let mut aM_copy: Box<[u64]> = vec![0u64; len as usize].into_boxed_slice(); + ((&mut aM_copy)[0usize..len as usize]) + .copy_from_slice(&(&resM)[0usize..len as usize]); + let ctx_n0: (&[u64], &[u64]) = ctx.split_at(0usize); + super::bignum::bn_almost_mont_sqr_u64(len, ctx_n0.1, mu, &aM_copy, &mut resM); + lowstar::ignore::ignore::<&[u64]>(&ctx) + }); + let k: u32 = bBits + .wrapping_sub(bBits.wrapping_rem(4u32)) + .wrapping_sub(4u32.wrapping_mul(i)) + .wrapping_sub(4u32); + let bits_l: u64 = super::bignum_base::bn_get_bits_u64(bLen, b, k, 4u32); + lowstar::ignore::ignore::<&[u64]>(&table); + let bits_l32: u32 = bits_l as u32; + let a_bits_l: (&[u64], &[u64]) = table.split_at(bits_l32.wrapping_mul(len) as usize); + ((&mut tmp0)[0usize..len as usize]).copy_from_slice(&a_bits_l.1[0usize..len as usize]); + let mut aM_copy: Box<[u64]> = vec![0u64; len as usize].into_boxed_slice(); + ((&mut aM_copy)[0usize..len as usize]).copy_from_slice(&(&resM)[0usize..len as usize]); + let ctx_n0: (&[u64], &[u64]) = ctx.split_at(0usize); + super::bignum::bn_almost_mont_mul_u64(len, ctx_n0.1, mu, &aM_copy, &tmp0, &mut resM); + lowstar::ignore::ignore::<&[u64]>(&ctx) + } + super::bignum::bn_from_mont_u64(len, n, mu, &resM, res) + } +} + +/** +ATTENTION: this function is public, but is intended for internal use within this workspace; callers should not rely on the availability of this function, or its behavior! +*/ +pub fn bn_mod_exp_consttime_precomp_u64( + len: u32, + n: &[u64], + mu: u64, + r2: &[u64], + a: &[u64], + bBits: u32, + b: &[u64], + res: &mut [u64], +) { + if bBits < 200u32 { + let mut aM: Box<[u64]> = vec![0u64; len as usize].into_boxed_slice(); + super::bignum::bn_to_mont_u64(len, n, mu, r2, a, &mut aM); + let mut resM: Box<[u64]> = vec![0u64; len as usize].into_boxed_slice(); + let mut ctx: Box<[u64]> = vec![0u64; len.wrapping_add(len) as usize].into_boxed_slice(); + ((&mut ctx)[0usize..len as usize]).copy_from_slice(&n[0usize..len as usize]); + ((&mut ctx)[len as usize..len as usize + len as usize]) + .copy_from_slice(&r2[0usize..len as usize]); + let mut sw: [u64; 1] = [0u64; 1usize]; + let ctx_n: (&[u64], &[u64]) = ctx.split_at(0usize); + let ctx_r2: (&[u64], &[u64]) = ctx_n.1.split_at(len as usize); + super::bignum::bn_from_mont_u64(len, ctx_r2.0, mu, ctx_r2.1, &mut resM); + lowstar::ignore::ignore::<&[u64]>(&ctx); + for i in 0u32..bBits { + let i1: u32 = bBits.wrapping_sub(i).wrapping_sub(1u32).wrapping_div(64u32); + let j: u32 = bBits.wrapping_sub(i).wrapping_sub(1u32).wrapping_rem(64u32); + let tmp: u64 = b[i1 as usize]; + let bit: u64 = tmp.wrapping_shr(j) & 1u64; + let sw1: u64 = bit ^ (&sw)[0usize]; + for i0 in 0u32..len { + let dummy: u64 = + 0u64.wrapping_sub(sw1) & ((&resM)[i0 as usize] ^ (&aM)[i0 as usize]); + (&mut resM)[i0 as usize] = (&resM)[i0 as usize] ^ dummy; + (&mut aM)[i0 as usize] = (&aM)[i0 as usize] ^ dummy + } + let mut aM_copy: Box<[u64]> = vec![0u64; len as usize].into_boxed_slice(); + ((&mut aM_copy)[0usize..len as usize]).copy_from_slice(&(&aM)[0usize..len as usize]); + let ctx_n0: (&[u64], &[u64]) = ctx.split_at(0usize); + super::bignum::bn_almost_mont_mul_u64(len, ctx_n0.1, mu, &aM_copy, &resM, &mut aM); + lowstar::ignore::ignore::<&[u64]>(&ctx); + let mut aM_copy0: Box<[u64]> = vec![0u64; len as usize].into_boxed_slice(); + ((&mut aM_copy0)[0usize..len as usize]).copy_from_slice(&(&resM)[0usize..len as usize]); + let ctx_n1: (&[u64], &[u64]) = ctx.split_at(0usize); + super::bignum::bn_almost_mont_sqr_u64(len, ctx_n1.1, mu, &aM_copy0, &mut resM); + lowstar::ignore::ignore::<&[u64]>(&ctx); + (&mut sw)[0usize] = bit + } + let sw0: u64 = (&sw)[0usize]; + for i in 0u32..len { + let dummy: u64 = 0u64.wrapping_sub(sw0) & ((&resM)[i as usize] ^ (&aM)[i as usize]); + (&mut resM)[i as usize] = (&resM)[i as usize] ^ dummy; + (&mut aM)[i as usize] = (&aM)[i as usize] ^ dummy + } + super::bignum::bn_from_mont_u64(len, n, mu, &resM, res) + } else { + let mut aM: Box<[u64]> = vec![0u64; len as usize].into_boxed_slice(); + super::bignum::bn_to_mont_u64(len, n, mu, r2, a, &mut aM); + let mut resM: Box<[u64]> = vec![0u64; len as usize].into_boxed_slice(); + let bLen: u32 = if bBits == 0u32 { + 1u32 + } else { + bBits + .wrapping_sub(1u32) + .wrapping_div(64u32) + .wrapping_add(1u32) + }; + let mut ctx: Box<[u64]> = vec![0u64; len.wrapping_add(len) as usize].into_boxed_slice(); + ((&mut ctx)[0usize..len as usize]).copy_from_slice(&n[0usize..len as usize]); + ((&mut ctx)[len as usize..len as usize + len as usize]) + .copy_from_slice(&r2[0usize..len as usize]); + let mut table: Box<[u64]> = vec![0u64; 16u32.wrapping_mul(len) as usize].into_boxed_slice(); + let mut tmp: Box<[u64]> = vec![0u64; len as usize].into_boxed_slice(); + let t0: (&mut [u64], &mut [u64]) = table.split_at_mut(0usize); + let t1: (&mut [u64], &mut [u64]) = t0.1.split_at_mut(len as usize); + let ctx_n: (&[u64], &[u64]) = ctx.split_at(0usize); + let ctx_r2: (&[u64], &[u64]) = ctx_n.1.split_at(len as usize); + super::bignum::bn_from_mont_u64(len, ctx_r2.0, mu, ctx_r2.1, t1.0); + lowstar::ignore::ignore::<&[u64]>(&ctx); + (t1.1[0usize..len as usize]).copy_from_slice(&(&aM)[0usize..len as usize]); + lowstar::ignore::ignore::<&[u64]>(&table); + krml::unroll_for!(7, "i", 0u32, 1u32, { + let t11: (&[u64], &[u64]) = + table.split_at(i.wrapping_add(1u32).wrapping_mul(len) as usize); + let mut aM_copy: Box<[u64]> = vec![0u64; len as usize].into_boxed_slice(); + ((&mut aM_copy)[0usize..len as usize]).copy_from_slice(&t11.1[0usize..len as usize]); + let ctx_n0: (&[u64], &[u64]) = ctx.split_at(0usize); + super::bignum::bn_almost_mont_sqr_u64(len, ctx_n0.1, mu, &aM_copy, &mut tmp); + lowstar::ignore::ignore::<&[u64]>(&ctx); + ((&mut table)[2u32.wrapping_mul(i).wrapping_add(2u32).wrapping_mul(len) as usize + ..2u32.wrapping_mul(i).wrapping_add(2u32).wrapping_mul(len) as usize + + len as usize]) + .copy_from_slice(&(&tmp)[0usize..len as usize]); + let t2: (&[u64], &[u64]) = + table.split_at(2u32.wrapping_mul(i).wrapping_add(2u32).wrapping_mul(len) as usize); + let mut aM_copy0: Box<[u64]> = vec![0u64; len as usize].into_boxed_slice(); + ((&mut aM_copy0)[0usize..len as usize]).copy_from_slice(&(&aM)[0usize..len as usize]); + let ctx_n1: (&[u64], &[u64]) = ctx.split_at(0usize); + super::bignum::bn_almost_mont_mul_u64(len, ctx_n1.1, mu, &aM_copy0, t2.1, &mut tmp); + lowstar::ignore::ignore::<&[u64]>(&ctx); + ((&mut table)[2u32.wrapping_mul(i).wrapping_add(3u32).wrapping_mul(len) as usize + ..2u32.wrapping_mul(i).wrapping_add(3u32).wrapping_mul(len) as usize + + len as usize]) + .copy_from_slice(&(&tmp)[0usize..len as usize]) + }); + if bBits.wrapping_rem(4u32) != 0u32 { + let i: u32 = bBits.wrapping_div(4u32).wrapping_mul(4u32); + let bits_c: u64 = super::bignum_base::bn_get_bits_u64(bLen, b, i, 4u32); + ((&mut resM)[0usize..len as usize]).copy_from_slice( + &(&(&table)[0u32.wrapping_mul(len) as usize..] as &[u64])[0usize..len as usize], + ); + krml::unroll_for!(15, "i0", 0u32, 1u32, { + let c: u64 = fstar::uint64::eq_mask(bits_c, i0.wrapping_add(1u32) as u64); + let res_j: (&[u64], &[u64]) = + table.split_at(i0.wrapping_add(1u32).wrapping_mul(len) as usize); + for i1 in 0u32..len { + let x: u64 = c & res_j.1[i1 as usize] | !c & (&resM)[i1 as usize]; + let os: (&mut [u64], &mut [u64]) = resM.split_at_mut(0usize); + os.1[i1 as usize] = x + } + }) + } else { + let ctx_n0: (&[u64], &[u64]) = ctx.split_at(0usize); + let ctx_r20: (&[u64], &[u64]) = ctx_n0.1.split_at(len as usize); + super::bignum::bn_from_mont_u64(len, ctx_r20.0, mu, ctx_r20.1, &mut resM); + lowstar::ignore::ignore::<&[u64]>(&ctx) + }; + let mut tmp0: Box<[u64]> = vec![0u64; len as usize].into_boxed_slice(); + for i in 0u32..bBits.wrapping_div(4u32) { + krml::unroll_for!(4, "_i", 0u32, 1u32, { + let mut aM_copy: Box<[u64]> = vec![0u64; len as usize].into_boxed_slice(); + ((&mut aM_copy)[0usize..len as usize]) + .copy_from_slice(&(&resM)[0usize..len as usize]); + let ctx_n0: (&[u64], &[u64]) = ctx.split_at(0usize); + super::bignum::bn_almost_mont_sqr_u64(len, ctx_n0.1, mu, &aM_copy, &mut resM); + lowstar::ignore::ignore::<&[u64]>(&ctx) + }); + let k: u32 = bBits + .wrapping_sub(bBits.wrapping_rem(4u32)) + .wrapping_sub(4u32.wrapping_mul(i)) + .wrapping_sub(4u32); + let bits_l: u64 = super::bignum_base::bn_get_bits_u64(bLen, b, k, 4u32); + lowstar::ignore::ignore::<&[u64]>(&table); + ((&mut tmp0)[0usize..len as usize]).copy_from_slice( + &(&(&table)[0u32.wrapping_mul(len) as usize..] as &[u64])[0usize..len as usize], + ); + krml::unroll_for!(15, "i0", 0u32, 1u32, { + let c: u64 = fstar::uint64::eq_mask(bits_l, i0.wrapping_add(1u32) as u64); + let res_j: (&[u64], &[u64]) = + table.split_at(i0.wrapping_add(1u32).wrapping_mul(len) as usize); + for i1 in 0u32..len { + let x: u64 = c & res_j.1[i1 as usize] | !c & (&tmp0)[i1 as usize]; + let os: (&mut [u64], &mut [u64]) = tmp0.split_at_mut(0usize); + os.1[i1 as usize] = x + } + }); + let mut aM_copy: Box<[u64]> = vec![0u64; len as usize].into_boxed_slice(); + ((&mut aM_copy)[0usize..len as usize]).copy_from_slice(&(&resM)[0usize..len as usize]); + let ctx_n0: (&[u64], &[u64]) = ctx.split_at(0usize); + super::bignum::bn_almost_mont_mul_u64(len, ctx_n0.1, mu, &aM_copy, &tmp0, &mut resM); + lowstar::ignore::ignore::<&[u64]>(&ctx) + } + super::bignum::bn_from_mont_u64(len, n, mu, &resM, res) + } +} + +pub(crate) fn bn_mod_exp_vartime_u64( + len: u32, + nBits: u32, + n: &[u64], + a: &[u64], + bBits: u32, + b: &[u64], + res: &mut [u64], +) { + let mut r2: Box<[u64]> = vec![0u64; len as usize].into_boxed_slice(); + super::bignum::bn_precomp_r2_mod_n_u64(len, nBits, n, &mut r2); + let mu: u64 = super::bignum::mod_inv_uint64(n[0usize]); + super::bignum::bn_mod_exp_vartime_precomp_u64(len, n, mu, &r2, a, bBits, b, res) +} + +pub(crate) fn bn_mod_exp_consttime_u64( + len: u32, + nBits: u32, + n: &[u64], + a: &[u64], + bBits: u32, + b: &[u64], + res: &mut [u64], +) { + let mut r2: Box<[u64]> = vec![0u64; len as usize].into_boxed_slice(); + super::bignum::bn_precomp_r2_mod_n_u64(len, nBits, n, &mut r2); + let mu: u64 = super::bignum::mod_inv_uint64(n[0usize]); + super::bignum::bn_mod_exp_consttime_precomp_u64(len, n, mu, &r2, a, bBits, b, res) +} + +#[derive(PartialEq, Clone)] +pub struct bn_mont_ctx_u32 { + pub len: u32, + pub n: Box<[u32]>, + pub mu: u32, + pub r2: Box<[u32]>, +} + +#[derive(PartialEq, Clone)] +pub struct bn_mont_ctx_u64 { + pub len: u32, + pub n: Box<[u64]>, + pub mu: u64, + pub r2: Box<[u64]>, +} diff --git a/libcrux-hacl-rs/src/bignum/bignum256.rs b/libcrux-hacl-rs/src/bignum/bignum256.rs new file mode 100644 index 000000000..14c1d5efd --- /dev/null +++ b/libcrux-hacl-rs/src/bignum/bignum256.rs @@ -0,0 +1,1295 @@ +#![allow(non_snake_case)] +#![allow(non_upper_case_globals)] +#![allow(non_camel_case_types)] +#![allow(unused_assignments)] +#![allow(unreachable_patterns)] + +use crate::fstar; +use crate::lowstar; +use crate::util as lib; + +/** +Write `a + b mod 2^256` in `res`. + + This functions returns the carry. + + The arguments a, b and res are meant to be 256-bit bignums, i.e. uint64_t[4] +*/ +pub fn add(a: &[u64], b: &[u64], res: &mut [u64]) -> u64 { + let mut c: [u64; 1] = [0u64; 1usize]; + { + let t1: u64 = a[4u32.wrapping_mul(0u32) as usize]; + let t2: u64 = b[4u32.wrapping_mul(0u32) as usize]; + let res_i: (&mut [u64], &mut [u64]) = res.split_at_mut(4u32.wrapping_mul(0u32) as usize); + (&mut c)[0usize] = lib::inttypes_intrinsics::add_carry_u64((&c)[0usize], t1, t2, res_i.1); + let t10: u64 = a[4u32.wrapping_mul(0u32).wrapping_add(1u32) as usize]; + let t20: u64 = b[4u32.wrapping_mul(0u32).wrapping_add(1u32) as usize]; + let res_i0: (&mut [u64], &mut [u64]) = res_i.1.split_at_mut(1usize); + (&mut c)[0usize] = + lib::inttypes_intrinsics::add_carry_u64((&c)[0usize], t10, t20, res_i0.1); + let t11: u64 = a[4u32.wrapping_mul(0u32).wrapping_add(2u32) as usize]; + let t21: u64 = b[4u32.wrapping_mul(0u32).wrapping_add(2u32) as usize]; + let res_i1: (&mut [u64], &mut [u64]) = res_i0.1.split_at_mut(1usize); + (&mut c)[0usize] = + lib::inttypes_intrinsics::add_carry_u64((&c)[0usize], t11, t21, res_i1.1); + let t12: u64 = a[4u32.wrapping_mul(0u32).wrapping_add(3u32) as usize]; + let t22: u64 = b[4u32.wrapping_mul(0u32).wrapping_add(3u32) as usize]; + let res_i2: (&mut [u64], &mut [u64]) = res_i1.1.split_at_mut(1usize); + (&mut c)[0usize] = lib::inttypes_intrinsics::add_carry_u64((&c)[0usize], t12, t22, res_i2.1) + }; + (&c)[0usize] +} + +/** +Write `a - b mod 2^256` in `res`. + + This functions returns the carry. + + The arguments a, b and res are meant to be 256-bit bignums, i.e. uint64_t[4] +*/ +pub fn sub(a: &[u64], b: &[u64], res: &mut [u64]) -> u64 { + let mut c: [u64; 1] = [0u64; 1usize]; + { + let t1: u64 = a[4u32.wrapping_mul(0u32) as usize]; + let t2: u64 = b[4u32.wrapping_mul(0u32) as usize]; + let res_i: (&mut [u64], &mut [u64]) = res.split_at_mut(4u32.wrapping_mul(0u32) as usize); + (&mut c)[0usize] = lib::inttypes_intrinsics::sub_borrow_u64((&c)[0usize], t1, t2, res_i.1); + let t10: u64 = a[4u32.wrapping_mul(0u32).wrapping_add(1u32) as usize]; + let t20: u64 = b[4u32.wrapping_mul(0u32).wrapping_add(1u32) as usize]; + let res_i0: (&mut [u64], &mut [u64]) = res_i.1.split_at_mut(1usize); + (&mut c)[0usize] = + lib::inttypes_intrinsics::sub_borrow_u64((&c)[0usize], t10, t20, res_i0.1); + let t11: u64 = a[4u32.wrapping_mul(0u32).wrapping_add(2u32) as usize]; + let t21: u64 = b[4u32.wrapping_mul(0u32).wrapping_add(2u32) as usize]; + let res_i1: (&mut [u64], &mut [u64]) = res_i0.1.split_at_mut(1usize); + (&mut c)[0usize] = + lib::inttypes_intrinsics::sub_borrow_u64((&c)[0usize], t11, t21, res_i1.1); + let t12: u64 = a[4u32.wrapping_mul(0u32).wrapping_add(3u32) as usize]; + let t22: u64 = b[4u32.wrapping_mul(0u32).wrapping_add(3u32) as usize]; + let res_i2: (&mut [u64], &mut [u64]) = res_i1.1.split_at_mut(1usize); + (&mut c)[0usize] = + lib::inttypes_intrinsics::sub_borrow_u64((&c)[0usize], t12, t22, res_i2.1) + }; + (&c)[0usize] +} + +/** +Write `(a + b) mod n` in `res`. + + The arguments a, b, n and the outparam res are meant to be 256-bit bignums, i.e. uint64_t[4]. + + Before calling this function, the caller will need to ensure that the following + preconditions are observed. + ā€¢ a < n + ā€¢ b < n +*/ +pub fn add_mod(n: &[u64], a: &[u64], b: &[u64], res: &mut [u64]) { + let mut c: [u64; 1] = [0u64; 1usize]; + { + let t1: u64 = a[4u32.wrapping_mul(0u32) as usize]; + let t2: u64 = b[4u32.wrapping_mul(0u32) as usize]; + let res_i: (&mut [u64], &mut [u64]) = res.split_at_mut(4u32.wrapping_mul(0u32) as usize); + (&mut c)[0usize] = lib::inttypes_intrinsics::add_carry_u64((&c)[0usize], t1, t2, res_i.1); + let t10: u64 = a[4u32.wrapping_mul(0u32).wrapping_add(1u32) as usize]; + let t20: u64 = b[4u32.wrapping_mul(0u32).wrapping_add(1u32) as usize]; + let res_i0: (&mut [u64], &mut [u64]) = res_i.1.split_at_mut(1usize); + (&mut c)[0usize] = + lib::inttypes_intrinsics::add_carry_u64((&c)[0usize], t10, t20, res_i0.1); + let t11: u64 = a[4u32.wrapping_mul(0u32).wrapping_add(2u32) as usize]; + let t21: u64 = b[4u32.wrapping_mul(0u32).wrapping_add(2u32) as usize]; + let res_i1: (&mut [u64], &mut [u64]) = res_i0.1.split_at_mut(1usize); + (&mut c)[0usize] = + lib::inttypes_intrinsics::add_carry_u64((&c)[0usize], t11, t21, res_i1.1); + let t12: u64 = a[4u32.wrapping_mul(0u32).wrapping_add(3u32) as usize]; + let t22: u64 = b[4u32.wrapping_mul(0u32).wrapping_add(3u32) as usize]; + let res_i2: (&mut [u64], &mut [u64]) = res_i1.1.split_at_mut(1usize); + (&mut c)[0usize] = lib::inttypes_intrinsics::add_carry_u64((&c)[0usize], t12, t22, res_i2.1) + }; + let c0: u64 = (&c)[0usize]; + let mut tmp: [u64; 4] = [0u64; 4usize]; + let mut c1: [u64; 1] = [0u64; 1usize]; + { + let t1: u64 = res[4u32.wrapping_mul(0u32) as usize]; + let t2: u64 = n[4u32.wrapping_mul(0u32) as usize]; + let res_i: (&mut [u64], &mut [u64]) = tmp.split_at_mut(4u32.wrapping_mul(0u32) as usize); + (&mut c1)[0usize] = + lib::inttypes_intrinsics::sub_borrow_u64((&c1)[0usize], t1, t2, res_i.1); + let t10: u64 = res[4u32.wrapping_mul(0u32).wrapping_add(1u32) as usize]; + let t20: u64 = n[4u32.wrapping_mul(0u32).wrapping_add(1u32) as usize]; + let res_i0: (&mut [u64], &mut [u64]) = res_i.1.split_at_mut(1usize); + (&mut c1)[0usize] = + lib::inttypes_intrinsics::sub_borrow_u64((&c1)[0usize], t10, t20, res_i0.1); + let t11: u64 = res[4u32.wrapping_mul(0u32).wrapping_add(2u32) as usize]; + let t21: u64 = n[4u32.wrapping_mul(0u32).wrapping_add(2u32) as usize]; + let res_i1: (&mut [u64], &mut [u64]) = res_i0.1.split_at_mut(1usize); + (&mut c1)[0usize] = + lib::inttypes_intrinsics::sub_borrow_u64((&c1)[0usize], t11, t21, res_i1.1); + let t12: u64 = res[4u32.wrapping_mul(0u32).wrapping_add(3u32) as usize]; + let t22: u64 = n[4u32.wrapping_mul(0u32).wrapping_add(3u32) as usize]; + let res_i2: (&mut [u64], &mut [u64]) = res_i1.1.split_at_mut(1usize); + (&mut c1)[0usize] = + lib::inttypes_intrinsics::sub_borrow_u64((&c1)[0usize], t12, t22, res_i2.1) + }; + let c10: u64 = (&c1)[0usize]; + let c2: u64 = c0.wrapping_sub(c10); + krml::unroll_for!(4, "i", 0u32, 1u32, { + let x: u64 = c2 & res[i as usize] | !c2 & (&tmp)[i as usize]; + let os: (&mut [u64], &mut [u64]) = res.split_at_mut(0usize); + os.1[i as usize] = x + }) +} + +/** +Write `(a - b) mod n` in `res`. + + The arguments a, b, n and the outparam res are meant to be 256-bit bignums, i.e. uint64_t[4]. + + Before calling this function, the caller will need to ensure that the following + preconditions are observed. + ā€¢ a < n + ā€¢ b < n +*/ +pub fn sub_mod(n: &[u64], a: &[u64], b: &[u64], res: &mut [u64]) { + let mut c: [u64; 1] = [0u64; 1usize]; + { + let t1: u64 = a[4u32.wrapping_mul(0u32) as usize]; + let t2: u64 = b[4u32.wrapping_mul(0u32) as usize]; + let res_i: (&mut [u64], &mut [u64]) = res.split_at_mut(4u32.wrapping_mul(0u32) as usize); + (&mut c)[0usize] = lib::inttypes_intrinsics::sub_borrow_u64((&c)[0usize], t1, t2, res_i.1); + let t10: u64 = a[4u32.wrapping_mul(0u32).wrapping_add(1u32) as usize]; + let t20: u64 = b[4u32.wrapping_mul(0u32).wrapping_add(1u32) as usize]; + let res_i0: (&mut [u64], &mut [u64]) = res_i.1.split_at_mut(1usize); + (&mut c)[0usize] = + lib::inttypes_intrinsics::sub_borrow_u64((&c)[0usize], t10, t20, res_i0.1); + let t11: u64 = a[4u32.wrapping_mul(0u32).wrapping_add(2u32) as usize]; + let t21: u64 = b[4u32.wrapping_mul(0u32).wrapping_add(2u32) as usize]; + let res_i1: (&mut [u64], &mut [u64]) = res_i0.1.split_at_mut(1usize); + (&mut c)[0usize] = + lib::inttypes_intrinsics::sub_borrow_u64((&c)[0usize], t11, t21, res_i1.1); + let t12: u64 = a[4u32.wrapping_mul(0u32).wrapping_add(3u32) as usize]; + let t22: u64 = b[4u32.wrapping_mul(0u32).wrapping_add(3u32) as usize]; + let res_i2: (&mut [u64], &mut [u64]) = res_i1.1.split_at_mut(1usize); + (&mut c)[0usize] = + lib::inttypes_intrinsics::sub_borrow_u64((&c)[0usize], t12, t22, res_i2.1) + }; + let c0: u64 = (&c)[0usize]; + let mut tmp: [u64; 4] = [0u64; 4usize]; + let mut c1: [u64; 1] = [0u64; 1usize]; + { + let t1: u64 = res[4u32.wrapping_mul(0u32) as usize]; + let t2: u64 = n[4u32.wrapping_mul(0u32) as usize]; + let res_i: (&mut [u64], &mut [u64]) = tmp.split_at_mut(4u32.wrapping_mul(0u32) as usize); + (&mut c1)[0usize] = lib::inttypes_intrinsics::add_carry_u64((&c1)[0usize], t1, t2, res_i.1); + let t10: u64 = res[4u32.wrapping_mul(0u32).wrapping_add(1u32) as usize]; + let t20: u64 = n[4u32.wrapping_mul(0u32).wrapping_add(1u32) as usize]; + let res_i0: (&mut [u64], &mut [u64]) = res_i.1.split_at_mut(1usize); + (&mut c1)[0usize] = + lib::inttypes_intrinsics::add_carry_u64((&c1)[0usize], t10, t20, res_i0.1); + let t11: u64 = res[4u32.wrapping_mul(0u32).wrapping_add(2u32) as usize]; + let t21: u64 = n[4u32.wrapping_mul(0u32).wrapping_add(2u32) as usize]; + let res_i1: (&mut [u64], &mut [u64]) = res_i0.1.split_at_mut(1usize); + (&mut c1)[0usize] = + lib::inttypes_intrinsics::add_carry_u64((&c1)[0usize], t11, t21, res_i1.1); + let t12: u64 = res[4u32.wrapping_mul(0u32).wrapping_add(3u32) as usize]; + let t22: u64 = n[4u32.wrapping_mul(0u32).wrapping_add(3u32) as usize]; + let res_i2: (&mut [u64], &mut [u64]) = res_i1.1.split_at_mut(1usize); + (&mut c1)[0usize] = + lib::inttypes_intrinsics::add_carry_u64((&c1)[0usize], t12, t22, res_i2.1) + }; + let c10: u64 = (&c1)[0usize]; + lowstar::ignore::ignore::(c10); + let c2: u64 = 0u64.wrapping_sub(c0); + krml::unroll_for!(4, "i", 0u32, 1u32, { + let x: u64 = c2 & (&tmp)[i as usize] | !c2 & res[i as usize]; + let os: (&mut [u64], &mut [u64]) = res.split_at_mut(0usize); + os.1[i as usize] = x + }) +} + +/** +Write `a * b` in `res`. + + The arguments a and b are meant to be 256-bit bignums, i.e. uint64_t[4]. + The outparam res is meant to be a 512-bit bignum, i.e. uint64_t[8]. +*/ +pub fn mul(a: &[u64], b: &[u64], res: &mut [u64]) { + (res[0usize..8usize]).copy_from_slice(&[0u64; 8usize]); + krml::unroll_for!(4, "i", 0u32, 1u32, { + let bj: u64 = b[i as usize]; + let res_j: (&mut [u64], &mut [u64]) = res.split_at_mut(i as usize); + let mut c: [u64; 1] = [0u64; 1usize]; + { + let a_i: u64 = a[4u32.wrapping_mul(0u32) as usize]; + let res_i: (&mut [u64], &mut [u64]) = + res_j.1.split_at_mut(4u32.wrapping_mul(0u32) as usize); + (&mut c)[0usize] = + super::bignum_base::mul_wide_add2_u64(a_i, bj, (&c)[0usize], res_i.1); + let a_i0: u64 = a[4u32.wrapping_mul(0u32).wrapping_add(1u32) as usize]; + let res_i0: (&mut [u64], &mut [u64]) = res_i.1.split_at_mut(1usize); + (&mut c)[0usize] = + super::bignum_base::mul_wide_add2_u64(a_i0, bj, (&c)[0usize], res_i0.1); + let a_i1: u64 = a[4u32.wrapping_mul(0u32).wrapping_add(2u32) as usize]; + let res_i1: (&mut [u64], &mut [u64]) = res_i0.1.split_at_mut(1usize); + (&mut c)[0usize] = + super::bignum_base::mul_wide_add2_u64(a_i1, bj, (&c)[0usize], res_i1.1); + let a_i2: u64 = a[4u32.wrapping_mul(0u32).wrapping_add(3u32) as usize]; + let res_i2: (&mut [u64], &mut [u64]) = res_i1.1.split_at_mut(1usize); + (&mut c)[0usize] = + super::bignum_base::mul_wide_add2_u64(a_i2, bj, (&c)[0usize], res_i2.1) + }; + let r: u64 = (&c)[0usize]; + res[4u32.wrapping_add(i) as usize] = r + }) +} + +/** +Write `a * a` in `res`. + + The argument a is meant to be a 256-bit bignum, i.e. uint64_t[4]. + The outparam res is meant to be a 512-bit bignum, i.e. uint64_t[8]. +*/ +pub fn sqr(a: &[u64], res: &mut [u64]) { + (res[0usize..8usize]).copy_from_slice(&[0u64; 8usize]); + krml::unroll_for!(4, "i", 0u32, 1u32, { + let a_j: u64 = a[i as usize]; + let ab: (&[u64], &[u64]) = a.split_at(0usize); + let res_j: (&mut [u64], &mut [u64]) = res.split_at_mut(i as usize); + let mut c: [u64; 1] = [0u64; 1usize]; + for i0 in 0u32..i.wrapping_div(4u32) { + let a_i: u64 = ab.1[4u32.wrapping_mul(i0) as usize]; + let res_i: (&mut [u64], &mut [u64]) = + res_j.1.split_at_mut(4u32.wrapping_mul(i0) as usize); + (&mut c)[0usize] = + super::bignum_base::mul_wide_add2_u64(a_i, a_j, (&c)[0usize], res_i.1); + let a_i0: u64 = ab.1[4u32.wrapping_mul(i0).wrapping_add(1u32) as usize]; + let res_i0: (&mut [u64], &mut [u64]) = res_i.1.split_at_mut(1usize); + (&mut c)[0usize] = + super::bignum_base::mul_wide_add2_u64(a_i0, a_j, (&c)[0usize], res_i0.1); + let a_i1: u64 = ab.1[4u32.wrapping_mul(i0).wrapping_add(2u32) as usize]; + let res_i1: (&mut [u64], &mut [u64]) = res_i0.1.split_at_mut(1usize); + (&mut c)[0usize] = + super::bignum_base::mul_wide_add2_u64(a_i1, a_j, (&c)[0usize], res_i1.1); + let a_i2: u64 = ab.1[4u32.wrapping_mul(i0).wrapping_add(3u32) as usize]; + let res_i2: (&mut [u64], &mut [u64]) = res_i1.1.split_at_mut(1usize); + (&mut c)[0usize] = + super::bignum_base::mul_wide_add2_u64(a_i2, a_j, (&c)[0usize], res_i2.1) + } + for i0 in i.wrapping_div(4u32).wrapping_mul(4u32)..i { + let a_i: u64 = ab.1[i0 as usize]; + let res_i: (&mut [u64], &mut [u64]) = res_j.1.split_at_mut(i0 as usize); + (&mut c)[0usize] = + super::bignum_base::mul_wide_add2_u64(a_i, a_j, (&c)[0usize], res_i.1) + } + let r: u64 = (&c)[0usize]; + res[i.wrapping_add(i) as usize] = r + }); + let mut a_copy: [u64; 8] = [0u64; 8usize]; + let mut b_copy: [u64; 8] = [0u64; 8usize]; + ((&mut a_copy)[0usize..8usize]).copy_from_slice(&res[0usize..8usize]); + ((&mut b_copy)[0usize..8usize]).copy_from_slice(&res[0usize..8usize]); + let r: u64 = super::bignum_base::bn_add_eq_len_u64(8u32, &a_copy, &b_copy, res); + let c0: u64 = r; + lowstar::ignore::ignore::(c0); + let mut tmp: [u64; 8] = [0u64; 8usize]; + krml::unroll_for!(4, "i", 0u32, 1u32, { + let res1: fstar::uint128::uint128 = fstar::uint128::mul_wide(a[i as usize], a[i as usize]); + let hi: u64 = fstar::uint128::uint128_to_uint64(fstar::uint128::shift_right(res1, 64u32)); + let lo: u64 = fstar::uint128::uint128_to_uint64(res1); + (&mut tmp)[2u32.wrapping_mul(i) as usize] = lo; + (&mut tmp)[2u32.wrapping_mul(i).wrapping_add(1u32) as usize] = hi + }); + let mut a_copy0: [u64; 8] = [0u64; 8usize]; + let mut b_copy0: [u64; 8] = [0u64; 8usize]; + ((&mut a_copy0)[0usize..8usize]).copy_from_slice(&res[0usize..8usize]); + ((&mut b_copy0)[0usize..8usize]).copy_from_slice(&(&tmp)[0usize..8usize]); + let r0: u64 = super::bignum_base::bn_add_eq_len_u64(8u32, &a_copy0, &b_copy0, res); + let c1: u64 = r0; + lowstar::ignore::ignore::(c1) +} + +#[inline] +fn precompr2(nBits: u32, n: &[u64], res: &mut [u64]) { + (res[0usize..4usize]).copy_from_slice(&[0u64; 4usize]); + let i: u32 = nBits.wrapping_div(64u32); + let j: u32 = nBits.wrapping_rem(64u32); + res[i as usize] |= 1u64.wrapping_shl(j); + for _i in 0u32..512u32.wrapping_sub(nBits) { + let mut a_copy: [u64; 4] = [0u64; 4usize]; + let mut b_copy: [u64; 4] = [0u64; 4usize]; + ((&mut a_copy)[0usize..4usize]).copy_from_slice(&res[0usize..4usize]); + ((&mut b_copy)[0usize..4usize]).copy_from_slice(&res[0usize..4usize]); + super::bignum256::add_mod(n, &a_copy, &b_copy, res) + } +} + +#[inline] +fn reduction(n: &[u64], nInv: u64, c: &mut [u64], res: &mut [u64]) { + let mut c0: [u64; 1] = [0u64; 1usize]; + krml::unroll_for!(4, "i", 0u32, 1u32, { + let qj: u64 = nInv.wrapping_mul(c[i as usize]); + let res_j: (&mut [u64], &mut [u64]) = c.split_at_mut(i as usize); + let mut c1: [u64; 1] = [0u64; 1usize]; + { + let a_i: u64 = n[4u32.wrapping_mul(0u32) as usize]; + let res_i: (&mut [u64], &mut [u64]) = + res_j.1.split_at_mut(4u32.wrapping_mul(0u32) as usize); + (&mut c1)[0usize] = + super::bignum_base::mul_wide_add2_u64(a_i, qj, (&c1)[0usize], res_i.1); + let a_i0: u64 = n[4u32.wrapping_mul(0u32).wrapping_add(1u32) as usize]; + let res_i0: (&mut [u64], &mut [u64]) = res_i.1.split_at_mut(1usize); + (&mut c1)[0usize] = + super::bignum_base::mul_wide_add2_u64(a_i0, qj, (&c1)[0usize], res_i0.1); + let a_i1: u64 = n[4u32.wrapping_mul(0u32).wrapping_add(2u32) as usize]; + let res_i1: (&mut [u64], &mut [u64]) = res_i0.1.split_at_mut(1usize); + (&mut c1)[0usize] = + super::bignum_base::mul_wide_add2_u64(a_i1, qj, (&c1)[0usize], res_i1.1); + let a_i2: u64 = n[4u32.wrapping_mul(0u32).wrapping_add(3u32) as usize]; + let res_i2: (&mut [u64], &mut [u64]) = res_i1.1.split_at_mut(1usize); + (&mut c1)[0usize] = + super::bignum_base::mul_wide_add2_u64(a_i2, qj, (&c1)[0usize], res_i2.1) + }; + let r: u64 = (&c1)[0usize]; + let c10: u64 = r; + let res_j0: u64 = c[4u32.wrapping_add(i) as usize]; + let resb: (&mut [u64], &mut [u64]) = c.split_at_mut(i as usize + 4usize); + (&mut c0)[0usize] = + lib::inttypes_intrinsics::add_carry_u64((&c0)[0usize], c10, res_j0, resb.1) + }); + (res[0usize..4usize]).copy_from_slice(&(&c[4usize..])[0usize..4usize]); + let c00: u64 = (&c0)[0usize]; + let mut tmp: [u64; 4] = [0u64; 4usize]; + let mut c1: [u64; 1] = [0u64; 1usize]; + { + let t1: u64 = res[4u32.wrapping_mul(0u32) as usize]; + let t2: u64 = n[4u32.wrapping_mul(0u32) as usize]; + let res_i: (&mut [u64], &mut [u64]) = tmp.split_at_mut(4u32.wrapping_mul(0u32) as usize); + (&mut c1)[0usize] = + lib::inttypes_intrinsics::sub_borrow_u64((&c1)[0usize], t1, t2, res_i.1); + let t10: u64 = res[4u32.wrapping_mul(0u32).wrapping_add(1u32) as usize]; + let t20: u64 = n[4u32.wrapping_mul(0u32).wrapping_add(1u32) as usize]; + let res_i0: (&mut [u64], &mut [u64]) = res_i.1.split_at_mut(1usize); + (&mut c1)[0usize] = + lib::inttypes_intrinsics::sub_borrow_u64((&c1)[0usize], t10, t20, res_i0.1); + let t11: u64 = res[4u32.wrapping_mul(0u32).wrapping_add(2u32) as usize]; + let t21: u64 = n[4u32.wrapping_mul(0u32).wrapping_add(2u32) as usize]; + let res_i1: (&mut [u64], &mut [u64]) = res_i0.1.split_at_mut(1usize); + (&mut c1)[0usize] = + lib::inttypes_intrinsics::sub_borrow_u64((&c1)[0usize], t11, t21, res_i1.1); + let t12: u64 = res[4u32.wrapping_mul(0u32).wrapping_add(3u32) as usize]; + let t22: u64 = n[4u32.wrapping_mul(0u32).wrapping_add(3u32) as usize]; + let res_i2: (&mut [u64], &mut [u64]) = res_i1.1.split_at_mut(1usize); + (&mut c1)[0usize] = + lib::inttypes_intrinsics::sub_borrow_u64((&c1)[0usize], t12, t22, res_i2.1) + }; + let c10: u64 = (&c1)[0usize]; + let c2: u64 = c00.wrapping_sub(c10); + krml::unroll_for!(4, "i", 0u32, 1u32, { + let x: u64 = c2 & res[i as usize] | !c2 & (&tmp)[i as usize]; + let os: (&mut [u64], &mut [u64]) = res.split_at_mut(0usize); + os.1[i as usize] = x + }) +} + +#[inline] +fn to(n: &[u64], nInv: u64, r2: &[u64], a: &[u64], aM: &mut [u64]) { + let mut c: [u64; 8] = [0u64; 8usize]; + super::bignum256::mul(a, r2, &mut c); + super::bignum256::reduction(n, nInv, &mut c, aM) +} + +#[inline] +fn from(n: &[u64], nInv_u64: u64, aM: &[u64], a: &mut [u64]) { + let mut tmp: [u64; 8] = [0u64; 8usize]; + ((&mut tmp)[0usize..4usize]).copy_from_slice(&aM[0usize..4usize]); + super::bignum256::reduction(n, nInv_u64, &mut tmp, a) +} + +#[inline] +fn areduction(n: &[u64], nInv: u64, c: &mut [u64], res: &mut [u64]) { + let mut c0: [u64; 1] = [0u64; 1usize]; + krml::unroll_for!(4, "i", 0u32, 1u32, { + let qj: u64 = nInv.wrapping_mul(c[i as usize]); + let res_j: (&mut [u64], &mut [u64]) = c.split_at_mut(i as usize); + let mut c1: [u64; 1] = [0u64; 1usize]; + { + let a_i: u64 = n[4u32.wrapping_mul(0u32) as usize]; + let res_i: (&mut [u64], &mut [u64]) = + res_j.1.split_at_mut(4u32.wrapping_mul(0u32) as usize); + (&mut c1)[0usize] = + super::bignum_base::mul_wide_add2_u64(a_i, qj, (&c1)[0usize], res_i.1); + let a_i0: u64 = n[4u32.wrapping_mul(0u32).wrapping_add(1u32) as usize]; + let res_i0: (&mut [u64], &mut [u64]) = res_i.1.split_at_mut(1usize); + (&mut c1)[0usize] = + super::bignum_base::mul_wide_add2_u64(a_i0, qj, (&c1)[0usize], res_i0.1); + let a_i1: u64 = n[4u32.wrapping_mul(0u32).wrapping_add(2u32) as usize]; + let res_i1: (&mut [u64], &mut [u64]) = res_i0.1.split_at_mut(1usize); + (&mut c1)[0usize] = + super::bignum_base::mul_wide_add2_u64(a_i1, qj, (&c1)[0usize], res_i1.1); + let a_i2: u64 = n[4u32.wrapping_mul(0u32).wrapping_add(3u32) as usize]; + let res_i2: (&mut [u64], &mut [u64]) = res_i1.1.split_at_mut(1usize); + (&mut c1)[0usize] = + super::bignum_base::mul_wide_add2_u64(a_i2, qj, (&c1)[0usize], res_i2.1) + }; + let r: u64 = (&c1)[0usize]; + let c10: u64 = r; + let res_j0: u64 = c[4u32.wrapping_add(i) as usize]; + let resb: (&mut [u64], &mut [u64]) = c.split_at_mut(i as usize + 4usize); + (&mut c0)[0usize] = + lib::inttypes_intrinsics::add_carry_u64((&c0)[0usize], c10, res_j0, resb.1) + }); + (res[0usize..4usize]).copy_from_slice(&(&c[4usize..])[0usize..4usize]); + let c00: u64 = (&c0)[0usize]; + let mut tmp: [u64; 4] = [0u64; 4usize]; + let c1: u64 = super::bignum256::sub(res, n, &mut tmp); + lowstar::ignore::ignore::(c1); + let m: u64 = 0u64.wrapping_sub(c00); + krml::unroll_for!(4, "i", 0u32, 1u32, { + let x: u64 = m & (&tmp)[i as usize] | !m & res[i as usize]; + let os: (&mut [u64], &mut [u64]) = res.split_at_mut(0usize); + os.1[i as usize] = x + }) +} + +#[inline] +fn amont_mul(n: &[u64], nInv_u64: u64, aM: &[u64], bM: &[u64], resM: &mut [u64]) { + let mut c: [u64; 8] = [0u64; 8usize]; + super::bignum256::mul(aM, bM, &mut c); + super::bignum256::areduction(n, nInv_u64, &mut c, resM) +} + +#[inline] +fn amont_sqr(n: &[u64], nInv_u64: u64, aM: &[u64], resM: &mut [u64]) { + let mut c: [u64; 8] = [0u64; 8usize]; + super::bignum256::sqr(aM, &mut c); + super::bignum256::areduction(n, nInv_u64, &mut c, resM) +} + +#[inline] +fn bn_slow_precomp(n: &[u64], mu: u64, r2: &[u64], a: &[u64], res: &mut [u64]) { + let mut a_mod: [u64; 4] = [0u64; 4usize]; + let mut a1: [u64; 8] = [0u64; 8usize]; + ((&mut a1)[0usize..8usize]).copy_from_slice(&a[0usize..8usize]); + super::bignum256::areduction(n, mu, &mut a1, &mut a_mod); + super::bignum256::to(n, mu, r2, &a_mod, res) +} + +/** +Write `a mod n` in `res`. + + The argument a is meant to be a 512-bit bignum, i.e. uint64_t[8]. + The argument n and the outparam res are meant to be 256-bit bignums, i.e. uint64_t[4]. + + The function returns false if any of the following preconditions are violated, + true otherwise. + ā€¢ 1 < n + ā€¢ n % 2 = 1 +*/ +pub fn r#mod(n: &[u64], a: &[u64], res: &mut [u64]) -> bool { + let mut one: [u64; 4] = [0u64; 4usize]; + ((&mut one)[0usize..4usize]).copy_from_slice(&[0u64; 4usize]); + (&mut one)[0usize] = 1u64; + let bit0: u64 = n[0usize] & 1u64; + let m0: u64 = 0u64.wrapping_sub(bit0); + let mut acc: [u64; 1] = [0u64; 1usize]; + krml::unroll_for!(4, "i", 0u32, 1u32, { + let beq: u64 = fstar::uint64::eq_mask((&one)[i as usize], n[i as usize]); + let blt: u64 = !fstar::uint64::gte_mask((&one)[i as usize], n[i as usize]); + (&mut acc)[0usize] = beq & (&acc)[0usize] | !beq & blt + }); + let m1: u64 = (&acc)[0usize]; + let is_valid_m: u64 = m0 & m1; + let nBits: u32 = 64u32.wrapping_mul(super::bignum_base::bn_get_top_index_u64(4u32, n) as u32); + if is_valid_m == 0xFFFFFFFFFFFFFFFFu64 { + let mut r2: [u64; 4] = [0u64; 4usize]; + super::bignum256::precompr2(nBits, n, &mut r2); + let mu: u64 = super::bignum::mod_inv_uint64(n[0usize]); + super::bignum256::bn_slow_precomp(n, mu, &r2, a, res) + } else { + (res[0usize..4usize]).copy_from_slice(&[0u64; 4usize]) + }; + is_valid_m == 0xFFFFFFFFFFFFFFFFu64 +} + +fn exp_check(n: &[u64], a: &[u64], bBits: u32, b: &[u64]) -> u64 { + let mut one: [u64; 4] = [0u64; 4usize]; + ((&mut one)[0usize..4usize]).copy_from_slice(&[0u64; 4usize]); + (&mut one)[0usize] = 1u64; + let bit0: u64 = n[0usize] & 1u64; + let m0: u64 = 0u64.wrapping_sub(bit0); + let mut acc: [u64; 1] = [0u64; 1usize]; + krml::unroll_for!(4, "i", 0u32, 1u32, { + let beq: u64 = fstar::uint64::eq_mask((&one)[i as usize], n[i as usize]); + let blt: u64 = !fstar::uint64::gte_mask((&one)[i as usize], n[i as usize]); + (&mut acc)[0usize] = beq & (&acc)[0usize] | !beq & blt + }); + let m1: u64 = (&acc)[0usize]; + let m00: u64 = m0 & m1; + let bLen: u32 = if bBits == 0u32 { + 1u32 + } else { + bBits + .wrapping_sub(1u32) + .wrapping_div(64u32) + .wrapping_add(1u32) + }; + let m10: u64 = if bBits < 64u32.wrapping_mul(bLen) { + let mut b2: Box<[u64]> = vec![0u64; bLen as usize].into_boxed_slice(); + let i: u32 = bBits.wrapping_div(64u32); + let j: u32 = bBits.wrapping_rem(64u32); + (&mut b2)[i as usize] = (&b2)[i as usize] | 1u64.wrapping_shl(j); + let mut acc0: [u64; 1] = [0u64; 1usize]; + for i0 in 0u32..bLen { + let beq: u64 = fstar::uint64::eq_mask(b[i0 as usize], (&b2)[i0 as usize]); + let blt: u64 = !fstar::uint64::gte_mask(b[i0 as usize], (&b2)[i0 as usize]); + (&mut acc0)[0usize] = beq & (&acc0)[0usize] | !beq & blt + } + let res: u64 = (&acc0)[0usize]; + res + } else { + 0xFFFFFFFFFFFFFFFFu64 + }; + let mut acc0: [u64; 1] = [0u64; 1usize]; + krml::unroll_for!(4, "i", 0u32, 1u32, { + let beq: u64 = fstar::uint64::eq_mask(a[i as usize], n[i as usize]); + let blt: u64 = !fstar::uint64::gte_mask(a[i as usize], n[i as usize]); + (&mut acc0)[0usize] = beq & (&acc0)[0usize] | !beq & blt + }); + let m2: u64 = (&acc0)[0usize]; + let m: u64 = m10 & m2; + m00 & m +} + +#[inline] +fn exp_vartime_precomp( + n: &[u64], + mu: u64, + r2: &[u64], + a: &[u64], + bBits: u32, + b: &[u64], + res: &mut [u64], +) { + if bBits < 200u32 { + let mut aM: [u64; 4] = [0u64; 4usize]; + super::bignum256::to(n, mu, r2, a, &mut aM); + let mut resM: [u64; 4] = [0u64; 4usize]; + let mut ctx: [u64; 8] = [0u64; 8usize]; + ((&mut ctx)[0usize..4usize]).copy_from_slice(&n[0usize..4usize]); + ((&mut ctx)[4usize..4usize + 4usize]).copy_from_slice(&r2[0usize..4usize]); + let ctx_n: (&[u64], &[u64]) = ctx.split_at(0usize); + let ctx_r2: (&[u64], &[u64]) = ctx_n.1.split_at(4usize); + super::bignum256::from(ctx_r2.0, mu, ctx_r2.1, &mut resM); + lowstar::ignore::ignore::<&[u64]>(&ctx); + for i in 0u32..bBits { + let i1: u32 = i.wrapping_div(64u32); + let j: u32 = i.wrapping_rem(64u32); + let tmp: u64 = b[i1 as usize]; + let bit: u64 = tmp.wrapping_shr(j) & 1u64; + if bit != 0u64 { + let mut aM_copy: [u64; 4] = [0u64; 4usize]; + ((&mut aM_copy)[0usize..4usize]).copy_from_slice(&(&resM)[0usize..4usize]); + let ctx_n0: (&[u64], &[u64]) = ctx.split_at(0usize); + super::bignum256::amont_mul(ctx_n0.1, mu, &aM_copy, &aM, &mut resM); + lowstar::ignore::ignore::<&[u64]>(&ctx) + }; + let mut aM_copy: [u64; 4] = [0u64; 4usize]; + ((&mut aM_copy)[0usize..4usize]).copy_from_slice(&(&aM)[0usize..4usize]); + let ctx_n0: (&[u64], &[u64]) = ctx.split_at(0usize); + super::bignum256::amont_sqr(ctx_n0.1, mu, &aM_copy, &mut aM); + lowstar::ignore::ignore::<&[u64]>(&ctx) + } + super::bignum256::from(n, mu, &resM, res) + } else { + let mut aM: [u64; 4] = [0u64; 4usize]; + super::bignum256::to(n, mu, r2, a, &mut aM); + let mut resM: [u64; 4] = [0u64; 4usize]; + let bLen: u32 = if bBits == 0u32 { + 1u32 + } else { + bBits + .wrapping_sub(1u32) + .wrapping_div(64u32) + .wrapping_add(1u32) + }; + let mut ctx: [u64; 8] = [0u64; 8usize]; + ((&mut ctx)[0usize..4usize]).copy_from_slice(&n[0usize..4usize]); + ((&mut ctx)[4usize..4usize + 4usize]).copy_from_slice(&r2[0usize..4usize]); + let mut table: [u64; 64] = [0u64; 64usize]; + let mut tmp: [u64; 4] = [0u64; 4usize]; + let t0: (&mut [u64], &mut [u64]) = table.split_at_mut(0usize); + let t1: (&mut [u64], &mut [u64]) = t0.1.split_at_mut(4usize); + let ctx_n: (&[u64], &[u64]) = ctx.split_at(0usize); + let ctx_r2: (&[u64], &[u64]) = ctx_n.1.split_at(4usize); + super::bignum256::from(ctx_r2.0, mu, ctx_r2.1, t1.0); + lowstar::ignore::ignore::<&[u64]>(&ctx); + (t1.1[0usize..4usize]).copy_from_slice(&(&aM)[0usize..4usize]); + lowstar::ignore::ignore::<&[u64]>(&table); + krml::unroll_for!(7, "i", 0u32, 1u32, { + let t11: (&[u64], &[u64]) = + table.split_at(i.wrapping_add(1u32).wrapping_mul(4u32) as usize); + let mut aM_copy: [u64; 4] = [0u64; 4usize]; + ((&mut aM_copy)[0usize..4usize]).copy_from_slice(&t11.1[0usize..4usize]); + let ctx_n0: (&[u64], &[u64]) = ctx.split_at(0usize); + super::bignum256::amont_sqr(ctx_n0.1, mu, &aM_copy, &mut tmp); + lowstar::ignore::ignore::<&[u64]>(&ctx); + ((&mut table)[2u32.wrapping_mul(i).wrapping_add(2u32).wrapping_mul(4u32) as usize + ..2u32.wrapping_mul(i).wrapping_add(2u32).wrapping_mul(4u32) as usize + 4usize]) + .copy_from_slice(&(&tmp)[0usize..4usize]); + let t2: (&[u64], &[u64]) = + table.split_at(2u32.wrapping_mul(i).wrapping_add(2u32).wrapping_mul(4u32) as usize); + let mut aM_copy0: [u64; 4] = [0u64; 4usize]; + ((&mut aM_copy0)[0usize..4usize]).copy_from_slice(&(&aM)[0usize..4usize]); + let ctx_n1: (&[u64], &[u64]) = ctx.split_at(0usize); + super::bignum256::amont_mul(ctx_n1.1, mu, &aM_copy0, t2.1, &mut tmp); + lowstar::ignore::ignore::<&[u64]>(&ctx); + ((&mut table)[2u32.wrapping_mul(i).wrapping_add(3u32).wrapping_mul(4u32) as usize + ..2u32.wrapping_mul(i).wrapping_add(3u32).wrapping_mul(4u32) as usize + 4usize]) + .copy_from_slice(&(&tmp)[0usize..4usize]) + }); + if bBits.wrapping_rem(4u32) != 0u32 { + let i: u32 = bBits.wrapping_div(4u32).wrapping_mul(4u32); + let bits_c: u64 = super::bignum_base::bn_get_bits_u64(bLen, b, i, 4u32); + let bits_l32: u32 = bits_c as u32; + let a_bits_l: (&[u64], &[u64]) = table.split_at(bits_l32.wrapping_mul(4u32) as usize); + ((&mut resM)[0usize..4usize]).copy_from_slice(&a_bits_l.1[0usize..4usize]) + } else { + let ctx_n0: (&[u64], &[u64]) = ctx.split_at(0usize); + let ctx_r20: (&[u64], &[u64]) = ctx_n0.1.split_at(4usize); + super::bignum256::from(ctx_r20.0, mu, ctx_r20.1, &mut resM); + lowstar::ignore::ignore::<&[u64]>(&ctx) + }; + let mut tmp0: [u64; 4] = [0u64; 4usize]; + for i in 0u32..bBits.wrapping_div(4u32) { + krml::unroll_for!(4, "_i", 0u32, 1u32, { + let mut aM_copy: [u64; 4] = [0u64; 4usize]; + ((&mut aM_copy)[0usize..4usize]).copy_from_slice(&(&resM)[0usize..4usize]); + let ctx_n0: (&[u64], &[u64]) = ctx.split_at(0usize); + super::bignum256::amont_sqr(ctx_n0.1, mu, &aM_copy, &mut resM); + lowstar::ignore::ignore::<&[u64]>(&ctx) + }); + let k: u32 = bBits + .wrapping_sub(bBits.wrapping_rem(4u32)) + .wrapping_sub(4u32.wrapping_mul(i)) + .wrapping_sub(4u32); + let bits_l: u64 = super::bignum_base::bn_get_bits_u64(bLen, b, k, 4u32); + lowstar::ignore::ignore::<&[u64]>(&table); + let bits_l32: u32 = bits_l as u32; + let a_bits_l: (&[u64], &[u64]) = table.split_at(bits_l32.wrapping_mul(4u32) as usize); + ((&mut tmp0)[0usize..4usize]).copy_from_slice(&a_bits_l.1[0usize..4usize]); + let mut aM_copy: [u64; 4] = [0u64; 4usize]; + ((&mut aM_copy)[0usize..4usize]).copy_from_slice(&(&resM)[0usize..4usize]); + let ctx_n0: (&[u64], &[u64]) = ctx.split_at(0usize); + super::bignum256::amont_mul(ctx_n0.1, mu, &aM_copy, &tmp0, &mut resM); + lowstar::ignore::ignore::<&[u64]>(&ctx) + } + super::bignum256::from(n, mu, &resM, res) + } +} + +#[inline] +fn exp_consttime_precomp( + n: &[u64], + mu: u64, + r2: &[u64], + a: &[u64], + bBits: u32, + b: &[u64], + res: &mut [u64], +) { + if bBits < 200u32 { + let mut aM: [u64; 4] = [0u64; 4usize]; + super::bignum256::to(n, mu, r2, a, &mut aM); + let mut resM: [u64; 4] = [0u64; 4usize]; + let mut ctx: [u64; 8] = [0u64; 8usize]; + ((&mut ctx)[0usize..4usize]).copy_from_slice(&n[0usize..4usize]); + ((&mut ctx)[4usize..4usize + 4usize]).copy_from_slice(&r2[0usize..4usize]); + let mut sw: [u64; 1] = [0u64; 1usize]; + let ctx_n: (&[u64], &[u64]) = ctx.split_at(0usize); + let ctx_r2: (&[u64], &[u64]) = ctx_n.1.split_at(4usize); + super::bignum256::from(ctx_r2.0, mu, ctx_r2.1, &mut resM); + lowstar::ignore::ignore::<&[u64]>(&ctx); + for i in 0u32..bBits { + let i1: u32 = bBits.wrapping_sub(i).wrapping_sub(1u32).wrapping_div(64u32); + let j: u32 = bBits.wrapping_sub(i).wrapping_sub(1u32).wrapping_rem(64u32); + let tmp: u64 = b[i1 as usize]; + let bit: u64 = tmp.wrapping_shr(j) & 1u64; + let sw1: u64 = bit ^ (&sw)[0usize]; + krml::unroll_for!(4, "i0", 0u32, 1u32, { + let dummy: u64 = + 0u64.wrapping_sub(sw1) & ((&resM)[i0 as usize] ^ (&aM)[i0 as usize]); + (&mut resM)[i0 as usize] = (&resM)[i0 as usize] ^ dummy; + (&mut aM)[i0 as usize] = (&aM)[i0 as usize] ^ dummy + }); + let mut aM_copy: [u64; 4] = [0u64; 4usize]; + ((&mut aM_copy)[0usize..4usize]).copy_from_slice(&(&aM)[0usize..4usize]); + let ctx_n0: (&[u64], &[u64]) = ctx.split_at(0usize); + super::bignum256::amont_mul(ctx_n0.1, mu, &aM_copy, &resM, &mut aM); + lowstar::ignore::ignore::<&[u64]>(&ctx); + let mut aM_copy0: [u64; 4] = [0u64; 4usize]; + ((&mut aM_copy0)[0usize..4usize]).copy_from_slice(&(&resM)[0usize..4usize]); + let ctx_n1: (&[u64], &[u64]) = ctx.split_at(0usize); + super::bignum256::amont_sqr(ctx_n1.1, mu, &aM_copy0, &mut resM); + lowstar::ignore::ignore::<&[u64]>(&ctx); + (&mut sw)[0usize] = bit + } + let sw0: u64 = (&sw)[0usize]; + krml::unroll_for!(4, "i", 0u32, 1u32, { + let dummy: u64 = 0u64.wrapping_sub(sw0) & ((&resM)[i as usize] ^ (&aM)[i as usize]); + (&mut resM)[i as usize] = (&resM)[i as usize] ^ dummy; + (&mut aM)[i as usize] = (&aM)[i as usize] ^ dummy + }); + super::bignum256::from(n, mu, &resM, res) + } else { + let mut aM: [u64; 4] = [0u64; 4usize]; + super::bignum256::to(n, mu, r2, a, &mut aM); + let mut resM: [u64; 4] = [0u64; 4usize]; + let bLen: u32 = if bBits == 0u32 { + 1u32 + } else { + bBits + .wrapping_sub(1u32) + .wrapping_div(64u32) + .wrapping_add(1u32) + }; + let mut ctx: [u64; 8] = [0u64; 8usize]; + ((&mut ctx)[0usize..4usize]).copy_from_slice(&n[0usize..4usize]); + ((&mut ctx)[4usize..4usize + 4usize]).copy_from_slice(&r2[0usize..4usize]); + let mut table: [u64; 64] = [0u64; 64usize]; + let mut tmp: [u64; 4] = [0u64; 4usize]; + let t0: (&mut [u64], &mut [u64]) = table.split_at_mut(0usize); + let t1: (&mut [u64], &mut [u64]) = t0.1.split_at_mut(4usize); + let ctx_n: (&[u64], &[u64]) = ctx.split_at(0usize); + let ctx_r2: (&[u64], &[u64]) = ctx_n.1.split_at(4usize); + super::bignum256::from(ctx_r2.0, mu, ctx_r2.1, t1.0); + lowstar::ignore::ignore::<&[u64]>(&ctx); + (t1.1[0usize..4usize]).copy_from_slice(&(&aM)[0usize..4usize]); + lowstar::ignore::ignore::<&[u64]>(&table); + krml::unroll_for!(7, "i", 0u32, 1u32, { + let t11: (&[u64], &[u64]) = + table.split_at(i.wrapping_add(1u32).wrapping_mul(4u32) as usize); + let mut aM_copy: [u64; 4] = [0u64; 4usize]; + ((&mut aM_copy)[0usize..4usize]).copy_from_slice(&t11.1[0usize..4usize]); + let ctx_n0: (&[u64], &[u64]) = ctx.split_at(0usize); + super::bignum256::amont_sqr(ctx_n0.1, mu, &aM_copy, &mut tmp); + lowstar::ignore::ignore::<&[u64]>(&ctx); + ((&mut table)[2u32.wrapping_mul(i).wrapping_add(2u32).wrapping_mul(4u32) as usize + ..2u32.wrapping_mul(i).wrapping_add(2u32).wrapping_mul(4u32) as usize + 4usize]) + .copy_from_slice(&(&tmp)[0usize..4usize]); + let t2: (&[u64], &[u64]) = + table.split_at(2u32.wrapping_mul(i).wrapping_add(2u32).wrapping_mul(4u32) as usize); + let mut aM_copy0: [u64; 4] = [0u64; 4usize]; + ((&mut aM_copy0)[0usize..4usize]).copy_from_slice(&(&aM)[0usize..4usize]); + let ctx_n1: (&[u64], &[u64]) = ctx.split_at(0usize); + super::bignum256::amont_mul(ctx_n1.1, mu, &aM_copy0, t2.1, &mut tmp); + lowstar::ignore::ignore::<&[u64]>(&ctx); + ((&mut table)[2u32.wrapping_mul(i).wrapping_add(3u32).wrapping_mul(4u32) as usize + ..2u32.wrapping_mul(i).wrapping_add(3u32).wrapping_mul(4u32) as usize + 4usize]) + .copy_from_slice(&(&tmp)[0usize..4usize]) + }); + if bBits.wrapping_rem(4u32) != 0u32 { + let i: u32 = bBits.wrapping_div(4u32).wrapping_mul(4u32); + let bits_c: u64 = super::bignum_base::bn_get_bits_u64(bLen, b, i, 4u32); + ((&mut resM)[0usize..4usize]) + .copy_from_slice(&(&(&table)[0usize..] as &[u64])[0usize..4usize]); + krml::unroll_for!(15, "i0", 0u32, 1u32, { + let c: u64 = fstar::uint64::eq_mask(bits_c, i0.wrapping_add(1u32) as u64); + let res_j: (&[u64], &[u64]) = + table.split_at(i0.wrapping_add(1u32).wrapping_mul(4u32) as usize); + krml::unroll_for!(4, "i1", 0u32, 1u32, { + let x: u64 = c & res_j.1[i1 as usize] | !c & (&resM)[i1 as usize]; + let os: (&mut [u64], &mut [u64]) = resM.split_at_mut(0usize); + os.1[i1 as usize] = x + }) + }) + } else { + let ctx_n0: (&[u64], &[u64]) = ctx.split_at(0usize); + let ctx_r20: (&[u64], &[u64]) = ctx_n0.1.split_at(4usize); + super::bignum256::from(ctx_r20.0, mu, ctx_r20.1, &mut resM); + lowstar::ignore::ignore::<&[u64]>(&ctx) + }; + let mut tmp0: [u64; 4] = [0u64; 4usize]; + for i in 0u32..bBits.wrapping_div(4u32) { + krml::unroll_for!(4, "_i", 0u32, 1u32, { + let mut aM_copy: [u64; 4] = [0u64; 4usize]; + ((&mut aM_copy)[0usize..4usize]).copy_from_slice(&(&resM)[0usize..4usize]); + let ctx_n0: (&[u64], &[u64]) = ctx.split_at(0usize); + super::bignum256::amont_sqr(ctx_n0.1, mu, &aM_copy, &mut resM); + lowstar::ignore::ignore::<&[u64]>(&ctx) + }); + let k: u32 = bBits + .wrapping_sub(bBits.wrapping_rem(4u32)) + .wrapping_sub(4u32.wrapping_mul(i)) + .wrapping_sub(4u32); + let bits_l: u64 = super::bignum_base::bn_get_bits_u64(bLen, b, k, 4u32); + lowstar::ignore::ignore::<&[u64]>(&table); + ((&mut tmp0)[0usize..4usize]) + .copy_from_slice(&(&(&table)[0usize..] as &[u64])[0usize..4usize]); + krml::unroll_for!(15, "i0", 0u32, 1u32, { + let c: u64 = fstar::uint64::eq_mask(bits_l, i0.wrapping_add(1u32) as u64); + let res_j: (&[u64], &[u64]) = + table.split_at(i0.wrapping_add(1u32).wrapping_mul(4u32) as usize); + krml::unroll_for!(4, "i1", 0u32, 1u32, { + let x: u64 = c & res_j.1[i1 as usize] | !c & (&tmp0)[i1 as usize]; + let os: (&mut [u64], &mut [u64]) = tmp0.split_at_mut(0usize); + os.1[i1 as usize] = x + }) + }); + let mut aM_copy: [u64; 4] = [0u64; 4usize]; + ((&mut aM_copy)[0usize..4usize]).copy_from_slice(&(&resM)[0usize..4usize]); + let ctx_n0: (&[u64], &[u64]) = ctx.split_at(0usize); + super::bignum256::amont_mul(ctx_n0.1, mu, &aM_copy, &tmp0, &mut resM); + lowstar::ignore::ignore::<&[u64]>(&ctx) + } + super::bignum256::from(n, mu, &resM, res) + } +} + +#[inline] +fn exp_vartime(nBits: u32, n: &[u64], a: &[u64], bBits: u32, b: &[u64], res: &mut [u64]) { + let mut r2: [u64; 4] = [0u64; 4usize]; + super::bignum256::precompr2(nBits, n, &mut r2); + let mu: u64 = super::bignum::mod_inv_uint64(n[0usize]); + super::bignum256::exp_vartime_precomp(n, mu, &r2, a, bBits, b, res) +} + +#[inline] +fn exp_consttime(nBits: u32, n: &[u64], a: &[u64], bBits: u32, b: &[u64], res: &mut [u64]) { + let mut r2: [u64; 4] = [0u64; 4usize]; + super::bignum256::precompr2(nBits, n, &mut r2); + let mu: u64 = super::bignum::mod_inv_uint64(n[0usize]); + super::bignum256::exp_consttime_precomp(n, mu, &r2, a, bBits, b, res) +} + +/** +Write `a ^ b mod n` in `res`. + + The arguments a, n and the outparam res are meant to be 256-bit bignums, i.e. uint64_t[4]. + + The argument b is a bignum of any size, and bBits is an upper bound on the + number of significant bits of b. A tighter bound results in faster execution + time. When in doubt, the number of bits for the bignum size is always a safe + default, e.g. if b is a 256-bit bignum, bBits should be 256. + + The function is *NOT* constant-time on the argument b. See the + mod_exp_consttime_* functions for constant-time variants. + + The function returns false if any of the following preconditions are violated, + true otherwise. + ā€¢ n % 2 = 1 + ā€¢ 1 < n + ā€¢ b < pow2 bBits + ā€¢ a < n +*/ +pub fn mod_exp_vartime(n: &[u64], a: &[u64], bBits: u32, b: &[u64], res: &mut [u64]) -> bool { + let is_valid_m: u64 = super::bignum256::exp_check(n, a, bBits, b); + let nBits: u32 = 64u32.wrapping_mul(super::bignum_base::bn_get_top_index_u64(4u32, n) as u32); + if is_valid_m == 0xFFFFFFFFFFFFFFFFu64 { + super::bignum256::exp_vartime(nBits, n, a, bBits, b, res) + } else { + (res[0usize..4usize]).copy_from_slice(&[0u64; 4usize]) + }; + is_valid_m == 0xFFFFFFFFFFFFFFFFu64 +} + +/** +Write `a ^ b mod n` in `res`. + + The arguments a, n and the outparam res are meant to be 256-bit bignums, i.e. uint64_t[4]. + + The argument b is a bignum of any size, and bBits is an upper bound on the + number of significant bits of b. A tighter bound results in faster execution + time. When in doubt, the number of bits for the bignum size is always a safe + default, e.g. if b is a 256-bit bignum, bBits should be 256. + + This function is constant-time over its argument b, at the cost of a slower + execution time than mod_exp_vartime. + + The function returns false if any of the following preconditions are violated, + true otherwise. + ā€¢ n % 2 = 1 + ā€¢ 1 < n + ā€¢ b < pow2 bBits + ā€¢ a < n +*/ +pub fn mod_exp_consttime(n: &[u64], a: &[u64], bBits: u32, b: &[u64], res: &mut [u64]) -> bool { + let is_valid_m: u64 = super::bignum256::exp_check(n, a, bBits, b); + let nBits: u32 = 64u32.wrapping_mul(super::bignum_base::bn_get_top_index_u64(4u32, n) as u32); + if is_valid_m == 0xFFFFFFFFFFFFFFFFu64 { + super::bignum256::exp_consttime(nBits, n, a, bBits, b, res) + } else { + (res[0usize..4usize]).copy_from_slice(&[0u64; 4usize]) + }; + is_valid_m == 0xFFFFFFFFFFFFFFFFu64 +} + +/** +Write `a ^ (-1) mod n` in `res`. + + The arguments a, n and the outparam res are meant to be 256-bit bignums, i.e. uint64_t[4]. + + Before calling this function, the caller will need to ensure that the following + preconditions are observed. + ā€¢ n is a prime + + The function returns false if any of the following preconditions are violated, true otherwise. + ā€¢ n % 2 = 1 + ā€¢ 1 < n + ā€¢ 0 < a + ā€¢ a < n +*/ +pub fn mod_inv_prime_vartime(n: &[u64], a: &[u64], res: &mut [u64]) -> bool { + let mut one: [u64; 4] = [0u64; 4usize]; + ((&mut one)[0usize..4usize]).copy_from_slice(&[0u64; 4usize]); + (&mut one)[0usize] = 1u64; + let bit0: u64 = n[0usize] & 1u64; + let m0: u64 = 0u64.wrapping_sub(bit0); + let mut acc: [u64; 1] = [0u64; 1usize]; + krml::unroll_for!(4, "i", 0u32, 1u32, { + let beq: u64 = fstar::uint64::eq_mask((&one)[i as usize], n[i as usize]); + let blt: u64 = !fstar::uint64::gte_mask((&one)[i as usize], n[i as usize]); + (&mut acc)[0usize] = beq & (&acc)[0usize] | !beq & blt + }); + let m1: u64 = (&acc)[0usize]; + let m00: u64 = m0 & m1; + let bn_zero: [u64; 4] = [0u64; 4usize]; + let mut mask: [u64; 1] = [0xFFFFFFFFFFFFFFFFu64; 1usize]; + krml::unroll_for!(4, "i", 0u32, 1u32, { + let uu____0: u64 = fstar::uint64::eq_mask(a[i as usize], (&bn_zero)[i as usize]); + (&mut mask)[0usize] = uu____0 & (&mask)[0usize] + }); + let mask1: u64 = (&mask)[0usize]; + let res1: u64 = mask1; + let m10: u64 = res1; + let mut acc0: [u64; 1] = [0u64; 1usize]; + krml::unroll_for!(4, "i", 0u32, 1u32, { + let beq: u64 = fstar::uint64::eq_mask(a[i as usize], n[i as usize]); + let blt: u64 = !fstar::uint64::gte_mask(a[i as usize], n[i as usize]); + (&mut acc0)[0usize] = beq & (&acc0)[0usize] | !beq & blt + }); + let m2: u64 = (&acc0)[0usize]; + let is_valid_m: u64 = m00 & !m10 & m2; + let nBits: u32 = 64u32.wrapping_mul(super::bignum_base::bn_get_top_index_u64(4u32, n) as u32); + if is_valid_m == 0xFFFFFFFFFFFFFFFFu64 { + let mut n2: [u64; 4] = [0u64; 4usize]; + let c0: u64 = lib::inttypes_intrinsics::sub_borrow_u64( + 0u64, + n[0usize], + 2u64, + &mut (&mut n2)[0usize..], + ); + let a1: (&[u64], &[u64]) = n.split_at(1usize); + let res10: (&mut [u64], &mut [u64]) = n2.split_at_mut(1usize); + let mut c: [u64; 1] = [c0; 1usize]; + krml::unroll_for!(3, "i", 0u32, 1u32, { + let t1: u64 = a1.1[i as usize]; + let res_i: (&mut [u64], &mut [u64]) = res10.1.split_at_mut(i as usize); + (&mut c)[0usize] = + lib::inttypes_intrinsics::sub_borrow_u64((&c)[0usize], t1, 0u64, res_i.1) + }); + let c1: u64 = (&c)[0usize]; + let c2: u64 = c1; + lowstar::ignore::ignore::(c2); + super::bignum256::exp_vartime(nBits, n, a, 256u32, &n2, res) + } else { + (res[0usize..4usize]).copy_from_slice(&[0u64; 4usize]) + }; + is_valid_m == 0xFFFFFFFFFFFFFFFFu64 +} + +/** +Heap-allocate and initialize a montgomery context. + + The argument n is meant to be a 256-bit bignum, i.e. uint64_t[4]. + + Before calling this function, the caller will need to ensure that the following + preconditions are observed. + ā€¢ n % 2 = 1 + ā€¢ 1 < n + + The caller will need to call Hacl_Bignum256_mont_ctx_free on the return value + to avoid memory leaks. +*/ +pub fn mont_ctx_init(n: &[u64]) -> Box<[super::bignum::bn_mont_ctx_u64]> { + let mut r2: Box<[u64]> = vec![0u64; 4usize].into_boxed_slice(); + let mut n1: Box<[u64]> = vec![0u64; 4usize].into_boxed_slice(); + let r21: &mut [u64] = &mut r2; + let n11: &mut [u64] = &mut n1; + (n11[0usize..4usize]).copy_from_slice(&n[0usize..4usize]); + let nBits: u32 = 64u32.wrapping_mul(super::bignum_base::bn_get_top_index_u64(4u32, n) as u32); + super::bignum256::precompr2(nBits, n, r21); + let mu: u64 = super::bignum::mod_inv_uint64(n[0usize]); + let res: super::bignum::bn_mont_ctx_u64 = super::bignum::bn_mont_ctx_u64 { + len: 4u32, + n: (*n11).into(), + mu, + r2: (*r21).into(), + }; + let buf: Box<[super::bignum::bn_mont_ctx_u64]> = vec![res].into_boxed_slice(); + buf +} + +/** +Write `a mod n` in `res`. + + The argument a is meant to be a 512-bit bignum, i.e. uint64_t[8]. + The outparam res is meant to be a 256-bit bignum, i.e. uint64_t[4]. + The argument k is a montgomery context obtained through Hacl_Bignum256_mont_ctx_init. +*/ +pub fn mod_precomp(k: &[super::bignum::bn_mont_ctx_u64], a: &[u64], res: &mut [u64]) { + let n: &[u64] = &(k[0usize]).n; + let mu: u64 = (k[0usize]).mu; + let r2: &[u64] = &(k[0usize]).r2; + super::bignum256::bn_slow_precomp(n, mu, r2, a, res) +} + +/** +Write `a ^ b mod n` in `res`. + + The arguments a and the outparam res are meant to be 256-bit bignums, i.e. uint64_t[4]. + The argument k is a montgomery context obtained through Hacl_Bignum256_mont_ctx_init. + + The argument b is a bignum of any size, and bBits is an upper bound on the + number of significant bits of b. A tighter bound results in faster execution + time. When in doubt, the number of bits for the bignum size is always a safe + default, e.g. if b is a 256-bit bignum, bBits should be 256. + + The function is *NOT* constant-time on the argument b. See the + mod_exp_consttime_* functions for constant-time variants. + + Before calling this function, the caller will need to ensure that the following + preconditions are observed. + ā€¢ b < pow2 bBits + ā€¢ a < n +*/ +pub fn mod_exp_vartime_precomp( + k: &[super::bignum::bn_mont_ctx_u64], + a: &[u64], + bBits: u32, + b: &[u64], + res: &mut [u64], +) { + let n: &[u64] = &(k[0usize]).n; + let mu: u64 = (k[0usize]).mu; + let r2: &[u64] = &(k[0usize]).r2; + super::bignum256::exp_vartime_precomp(n, mu, r2, a, bBits, b, res) +} + +/** +Write `a ^ b mod n` in `res`. + + The arguments a and the outparam res are meant to be 256-bit bignums, i.e. uint64_t[4]. + The argument k is a montgomery context obtained through Hacl_Bignum256_mont_ctx_init. + + The argument b is a bignum of any size, and bBits is an upper bound on the + number of significant bits of b. A tighter bound results in faster execution + time. When in doubt, the number of bits for the bignum size is always a safe + default, e.g. if b is a 256-bit bignum, bBits should be 256. + + This function is constant-time over its argument b, at the cost of a slower + execution time than mod_exp_vartime_*. + + Before calling this function, the caller will need to ensure that the following + preconditions are observed. + ā€¢ b < pow2 bBits + ā€¢ a < n +*/ +pub fn mod_exp_consttime_precomp( + k: &[super::bignum::bn_mont_ctx_u64], + a: &[u64], + bBits: u32, + b: &[u64], + res: &mut [u64], +) { + let n: &[u64] = &(k[0usize]).n; + let mu: u64 = (k[0usize]).mu; + let r2: &[u64] = &(k[0usize]).r2; + super::bignum256::exp_consttime_precomp(n, mu, r2, a, bBits, b, res) +} + +/** +Write `a ^ (-1) mod n` in `res`. + + The argument a and the outparam res are meant to be 256-bit bignums, i.e. uint64_t[4]. + The argument k is a montgomery context obtained through Hacl_Bignum256_mont_ctx_init. + + Before calling this function, the caller will need to ensure that the following + preconditions are observed. + ā€¢ n is a prime + ā€¢ 0 < a + ā€¢ a < n +*/ +pub fn mod_inv_prime_vartime_precomp( + k: &[super::bignum::bn_mont_ctx_u64], + a: &[u64], + res: &mut [u64], +) { + let n: &[u64] = &(k[0usize]).n; + let mu: u64 = (k[0usize]).mu; + let r2: &[u64] = &(k[0usize]).r2; + let mut n2: [u64; 4] = [0u64; 4usize]; + let c0: u64 = + lib::inttypes_intrinsics::sub_borrow_u64(0u64, n[0usize], 2u64, &mut (&mut n2)[0usize..]); + let a1: (&[u64], &[u64]) = n.split_at(1usize); + let res1: (&mut [u64], &mut [u64]) = n2.split_at_mut(1usize); + let mut c: [u64; 1] = [c0; 1usize]; + krml::unroll_for!(3, "i", 0u32, 1u32, { + let t1: u64 = a1.1[i as usize]; + let res_i: (&mut [u64], &mut [u64]) = res1.1.split_at_mut(i as usize); + (&mut c)[0usize] = lib::inttypes_intrinsics::sub_borrow_u64((&c)[0usize], t1, 0u64, res_i.1) + }); + let c1: u64 = (&c)[0usize]; + let c2: u64 = c1; + lowstar::ignore::ignore::(c2); + super::bignum256::exp_vartime_precomp(n, mu, r2, a, 256u32, &n2, res) +} + +/** +Load a bid-endian bignum from memory. + + The argument b points to len bytes of valid memory. + The function returns a heap-allocated bignum of size sufficient to hold the + result of loading b, or NULL if either the allocation failed, or the amount of + required memory would exceed 4GB. + + If the return value is non-null, clients must eventually call free(3) on it to + avoid memory leaks. +*/ +pub fn new_bn_from_bytes_be(len: u32, b: &[u8]) -> Box<[u64]> { + if len == 0u32 || len.wrapping_sub(1u32).wrapping_div(8u32).wrapping_add(1u32) > 536870911u32 { + [].into() + } else { + let mut res: Box<[u64]> = + vec![0u64; len.wrapping_sub(1u32).wrapping_div(8u32).wrapping_add(1u32) as usize] + .into_boxed_slice(); + if false { + res + } else { + let res1: &mut [u64] = &mut res; + let res2: &mut [u64] = res1; + let bnLen: u32 = len.wrapping_sub(1u32).wrapping_div(8u32).wrapping_add(1u32); + let tmpLen: u32 = 8u32.wrapping_mul(bnLen); + let mut tmp: Box<[u8]> = vec![0u8; tmpLen as usize].into_boxed_slice(); + ((&mut tmp)[tmpLen.wrapping_sub(len) as usize + ..tmpLen.wrapping_sub(len) as usize + len as usize]) + .copy_from_slice(&b[0usize..len as usize]); + for i in 0u32..bnLen { + let u: u64 = lowstar::endianness::load64_be( + &(&tmp)[bnLen.wrapping_sub(i).wrapping_sub(1u32).wrapping_mul(8u32) as usize..], + ); + let x: u64 = u; + let os: (&mut [u64], &mut [u64]) = res2.split_at_mut(0usize); + os.1[i as usize] = x + } + (*res2).into() + } + } +} + +/** +Load a little-endian bignum from memory. + + The argument b points to len bytes of valid memory. + The function returns a heap-allocated bignum of size sufficient to hold the + result of loading b, or NULL if either the allocation failed, or the amount of + required memory would exceed 4GB. + + If the return value is non-null, clients must eventually call free(3) on it to + avoid memory leaks. +*/ +pub fn new_bn_from_bytes_le(len: u32, b: &[u8]) -> Box<[u64]> { + if len == 0u32 || len.wrapping_sub(1u32).wrapping_div(8u32).wrapping_add(1u32) > 536870911u32 { + [].into() + } else { + let mut res: Box<[u64]> = + vec![0u64; len.wrapping_sub(1u32).wrapping_div(8u32).wrapping_add(1u32) as usize] + .into_boxed_slice(); + if false { + res + } else { + let res1: &mut [u64] = &mut res; + let res2: &mut [u64] = res1; + let bnLen: u32 = len.wrapping_sub(1u32).wrapping_div(8u32).wrapping_add(1u32); + let tmpLen: u32 = 8u32.wrapping_mul(bnLen); + let mut tmp: Box<[u8]> = vec![0u8; tmpLen as usize].into_boxed_slice(); + ((&mut tmp)[0usize..len as usize]).copy_from_slice(&b[0usize..len as usize]); + for i in 0u32..len.wrapping_sub(1u32).wrapping_div(8u32).wrapping_add(1u32) { + let bj: (&[u8], &[u8]) = tmp.split_at(i.wrapping_mul(8u32) as usize); + let u: u64 = lowstar::endianness::load64_le(bj.1); + let r1: u64 = u; + let x: u64 = r1; + let os: (&mut [u64], &mut [u64]) = res2.split_at_mut(0usize); + os.1[i as usize] = x + } + (*res2).into() + } + } +} + +/** +Serialize a bignum into big-endian memory. + + The argument b points to a 256-bit bignum. + The outparam res points to 32 bytes of valid memory. +*/ +pub fn bn_to_bytes_be(b: &[u64], res: &mut [u8]) { + let tmp: [u8; 32] = [0u8; 32usize]; + lowstar::ignore::ignore::<&[u8]>(&tmp); + krml::unroll_for!( + 4, + "i", + 0u32, + 1u32, + lowstar::endianness::store64_be( + &mut res[i.wrapping_mul(8u32) as usize..], + b[4u32.wrapping_sub(i).wrapping_sub(1u32) as usize] + ) + ) +} + +/** +Serialize a bignum into little-endian memory. + + The argument b points to a 256-bit bignum. + The outparam res points to 32 bytes of valid memory. +*/ +pub fn bn_to_bytes_le(b: &[u64], res: &mut [u8]) { + let tmp: [u8; 32] = [0u8; 32usize]; + lowstar::ignore::ignore::<&[u8]>(&tmp); + krml::unroll_for!( + 4, + "i", + 0u32, + 1u32, + lowstar::endianness::store64_le(&mut res[i.wrapping_mul(8u32) as usize..], b[i as usize]) + ) +} + +/** +Returns 2^64 - 1 if a < b, otherwise returns 0. + + The arguments a and b are meant to be 256-bit bignums, i.e. uint64_t[4]. +*/ +pub fn lt_mask(a: &[u64], b: &[u64]) -> u64 { + let mut acc: [u64; 1] = [0u64; 1usize]; + krml::unroll_for!(4, "i", 0u32, 1u32, { + let beq: u64 = fstar::uint64::eq_mask(a[i as usize], b[i as usize]); + let blt: u64 = !fstar::uint64::gte_mask(a[i as usize], b[i as usize]); + (&mut acc)[0usize] = beq & (&acc)[0usize] | !beq & blt + }); + (&acc)[0usize] +} + +/** +Returns 2^64 - 1 if a = b, otherwise returns 0. + + The arguments a and b are meant to be 256-bit bignums, i.e. uint64_t[4]. +*/ +pub fn eq_mask(a: &[u64], b: &[u64]) -> u64 { + let mut mask: [u64; 1] = [0xFFFFFFFFFFFFFFFFu64; 1usize]; + krml::unroll_for!(4, "i", 0u32, 1u32, { + let uu____0: u64 = fstar::uint64::eq_mask(a[i as usize], b[i as usize]); + (&mut mask)[0usize] = uu____0 & (&mask)[0usize] + }); + let mask1: u64 = (&mask)[0usize]; + mask1 +} diff --git a/libcrux-hacl-rs/src/bignum/bignum256_32.rs b/libcrux-hacl-rs/src/bignum/bignum256_32.rs new file mode 100644 index 000000000..fc64ee8f3 --- /dev/null +++ b/libcrux-hacl-rs/src/bignum/bignum256_32.rs @@ -0,0 +1,1332 @@ +#![allow(non_snake_case)] +#![allow(non_upper_case_globals)] +#![allow(non_camel_case_types)] +#![allow(unused_assignments)] +#![allow(unreachable_patterns)] + +use crate::fstar; +use crate::lowstar; +use crate::util as lib; + +/** +Write `a + b mod 2^256` in `res`. + + This functions returns the carry. + + The arguments a, b and res are meant to be 256-bit bignums, i.e. uint32_t[8] +*/ +pub fn add(a: &[u32], b: &[u32], res: &mut [u32]) -> u32 { + let mut c: [u32; 1] = [0u32; 1usize]; + krml::unroll_for!(2, "i", 0u32, 1u32, { + let t1: u32 = a[4u32.wrapping_mul(i) as usize]; + let t2: u32 = b[4u32.wrapping_mul(i) as usize]; + let res_i: (&mut [u32], &mut [u32]) = res.split_at_mut(4u32.wrapping_mul(i) as usize); + (&mut c)[0usize] = lib::inttypes_intrinsics::add_carry_u32((&c)[0usize], t1, t2, res_i.1); + let t10: u32 = a[4u32.wrapping_mul(i).wrapping_add(1u32) as usize]; + let t20: u32 = b[4u32.wrapping_mul(i).wrapping_add(1u32) as usize]; + let res_i0: (&mut [u32], &mut [u32]) = res_i.1.split_at_mut(1usize); + (&mut c)[0usize] = + lib::inttypes_intrinsics::add_carry_u32((&c)[0usize], t10, t20, res_i0.1); + let t11: u32 = a[4u32.wrapping_mul(i).wrapping_add(2u32) as usize]; + let t21: u32 = b[4u32.wrapping_mul(i).wrapping_add(2u32) as usize]; + let res_i1: (&mut [u32], &mut [u32]) = res_i0.1.split_at_mut(1usize); + (&mut c)[0usize] = + lib::inttypes_intrinsics::add_carry_u32((&c)[0usize], t11, t21, res_i1.1); + let t12: u32 = a[4u32.wrapping_mul(i).wrapping_add(3u32) as usize]; + let t22: u32 = b[4u32.wrapping_mul(i).wrapping_add(3u32) as usize]; + let res_i2: (&mut [u32], &mut [u32]) = res_i1.1.split_at_mut(1usize); + (&mut c)[0usize] = lib::inttypes_intrinsics::add_carry_u32((&c)[0usize], t12, t22, res_i2.1) + }); + (&c)[0usize] +} + +/** +Write `a - b mod 2^256` in `res`. + + This functions returns the carry. + + The arguments a, b and res are meant to be 256-bit bignums, i.e. uint32_t[8] +*/ +pub fn sub(a: &[u32], b: &[u32], res: &mut [u32]) -> u32 { + let mut c: [u32; 1] = [0u32; 1usize]; + krml::unroll_for!(2, "i", 0u32, 1u32, { + let t1: u32 = a[4u32.wrapping_mul(i) as usize]; + let t2: u32 = b[4u32.wrapping_mul(i) as usize]; + let res_i: (&mut [u32], &mut [u32]) = res.split_at_mut(4u32.wrapping_mul(i) as usize); + (&mut c)[0usize] = lib::inttypes_intrinsics::sub_borrow_u32((&c)[0usize], t1, t2, res_i.1); + let t10: u32 = a[4u32.wrapping_mul(i).wrapping_add(1u32) as usize]; + let t20: u32 = b[4u32.wrapping_mul(i).wrapping_add(1u32) as usize]; + let res_i0: (&mut [u32], &mut [u32]) = res_i.1.split_at_mut(1usize); + (&mut c)[0usize] = + lib::inttypes_intrinsics::sub_borrow_u32((&c)[0usize], t10, t20, res_i0.1); + let t11: u32 = a[4u32.wrapping_mul(i).wrapping_add(2u32) as usize]; + let t21: u32 = b[4u32.wrapping_mul(i).wrapping_add(2u32) as usize]; + let res_i1: (&mut [u32], &mut [u32]) = res_i0.1.split_at_mut(1usize); + (&mut c)[0usize] = + lib::inttypes_intrinsics::sub_borrow_u32((&c)[0usize], t11, t21, res_i1.1); + let t12: u32 = a[4u32.wrapping_mul(i).wrapping_add(3u32) as usize]; + let t22: u32 = b[4u32.wrapping_mul(i).wrapping_add(3u32) as usize]; + let res_i2: (&mut [u32], &mut [u32]) = res_i1.1.split_at_mut(1usize); + (&mut c)[0usize] = + lib::inttypes_intrinsics::sub_borrow_u32((&c)[0usize], t12, t22, res_i2.1) + }); + (&c)[0usize] +} + +/** +Write `(a + b) mod n` in `res`. + + The arguments a, b, n and the outparam res are meant to be 256-bit bignums, i.e. uint32_t[8]. + + Before calling this function, the caller will need to ensure that the following + preconditions are observed. + ā€¢ a < n + ā€¢ b < n +*/ +pub fn add_mod(n: &[u32], a: &[u32], b: &[u32], res: &mut [u32]) { + let mut c: [u32; 1] = [0u32; 1usize]; + krml::unroll_for!(2, "i", 0u32, 1u32, { + let t1: u32 = a[4u32.wrapping_mul(i) as usize]; + let t2: u32 = b[4u32.wrapping_mul(i) as usize]; + let res_i: (&mut [u32], &mut [u32]) = res.split_at_mut(4u32.wrapping_mul(i) as usize); + (&mut c)[0usize] = lib::inttypes_intrinsics::add_carry_u32((&c)[0usize], t1, t2, res_i.1); + let t10: u32 = a[4u32.wrapping_mul(i).wrapping_add(1u32) as usize]; + let t20: u32 = b[4u32.wrapping_mul(i).wrapping_add(1u32) as usize]; + let res_i0: (&mut [u32], &mut [u32]) = res_i.1.split_at_mut(1usize); + (&mut c)[0usize] = + lib::inttypes_intrinsics::add_carry_u32((&c)[0usize], t10, t20, res_i0.1); + let t11: u32 = a[4u32.wrapping_mul(i).wrapping_add(2u32) as usize]; + let t21: u32 = b[4u32.wrapping_mul(i).wrapping_add(2u32) as usize]; + let res_i1: (&mut [u32], &mut [u32]) = res_i0.1.split_at_mut(1usize); + (&mut c)[0usize] = + lib::inttypes_intrinsics::add_carry_u32((&c)[0usize], t11, t21, res_i1.1); + let t12: u32 = a[4u32.wrapping_mul(i).wrapping_add(3u32) as usize]; + let t22: u32 = b[4u32.wrapping_mul(i).wrapping_add(3u32) as usize]; + let res_i2: (&mut [u32], &mut [u32]) = res_i1.1.split_at_mut(1usize); + (&mut c)[0usize] = lib::inttypes_intrinsics::add_carry_u32((&c)[0usize], t12, t22, res_i2.1) + }); + let c0: u32 = (&c)[0usize]; + let mut tmp: [u32; 8] = [0u32; 8usize]; + let mut c1: [u32; 1] = [0u32; 1usize]; + krml::unroll_for!(2, "i", 0u32, 1u32, { + let t1: u32 = res[4u32.wrapping_mul(i) as usize]; + let t2: u32 = n[4u32.wrapping_mul(i) as usize]; + let res_i: (&mut [u32], &mut [u32]) = tmp.split_at_mut(4u32.wrapping_mul(i) as usize); + (&mut c1)[0usize] = + lib::inttypes_intrinsics::sub_borrow_u32((&c1)[0usize], t1, t2, res_i.1); + let t10: u32 = res[4u32.wrapping_mul(i).wrapping_add(1u32) as usize]; + let t20: u32 = n[4u32.wrapping_mul(i).wrapping_add(1u32) as usize]; + let res_i0: (&mut [u32], &mut [u32]) = res_i.1.split_at_mut(1usize); + (&mut c1)[0usize] = + lib::inttypes_intrinsics::sub_borrow_u32((&c1)[0usize], t10, t20, res_i0.1); + let t11: u32 = res[4u32.wrapping_mul(i).wrapping_add(2u32) as usize]; + let t21: u32 = n[4u32.wrapping_mul(i).wrapping_add(2u32) as usize]; + let res_i1: (&mut [u32], &mut [u32]) = res_i0.1.split_at_mut(1usize); + (&mut c1)[0usize] = + lib::inttypes_intrinsics::sub_borrow_u32((&c1)[0usize], t11, t21, res_i1.1); + let t12: u32 = res[4u32.wrapping_mul(i).wrapping_add(3u32) as usize]; + let t22: u32 = n[4u32.wrapping_mul(i).wrapping_add(3u32) as usize]; + let res_i2: (&mut [u32], &mut [u32]) = res_i1.1.split_at_mut(1usize); + (&mut c1)[0usize] = + lib::inttypes_intrinsics::sub_borrow_u32((&c1)[0usize], t12, t22, res_i2.1) + }); + let c10: u32 = (&c1)[0usize]; + let c2: u32 = c0.wrapping_sub(c10); + krml::unroll_for!(8, "i", 0u32, 1u32, { + let x: u32 = c2 & res[i as usize] | !c2 & (&tmp)[i as usize]; + let os: (&mut [u32], &mut [u32]) = res.split_at_mut(0usize); + os.1[i as usize] = x + }) +} + +/** +Write `(a - b) mod n` in `res`. + + The arguments a, b, n and the outparam res are meant to be 256-bit bignums, i.e. uint32_t[8]. + + Before calling this function, the caller will need to ensure that the following + preconditions are observed. + ā€¢ a < n + ā€¢ b < n +*/ +pub fn sub_mod(n: &[u32], a: &[u32], b: &[u32], res: &mut [u32]) { + let mut c: [u32; 1] = [0u32; 1usize]; + krml::unroll_for!(2, "i", 0u32, 1u32, { + let t1: u32 = a[4u32.wrapping_mul(i) as usize]; + let t2: u32 = b[4u32.wrapping_mul(i) as usize]; + let res_i: (&mut [u32], &mut [u32]) = res.split_at_mut(4u32.wrapping_mul(i) as usize); + (&mut c)[0usize] = lib::inttypes_intrinsics::sub_borrow_u32((&c)[0usize], t1, t2, res_i.1); + let t10: u32 = a[4u32.wrapping_mul(i).wrapping_add(1u32) as usize]; + let t20: u32 = b[4u32.wrapping_mul(i).wrapping_add(1u32) as usize]; + let res_i0: (&mut [u32], &mut [u32]) = res_i.1.split_at_mut(1usize); + (&mut c)[0usize] = + lib::inttypes_intrinsics::sub_borrow_u32((&c)[0usize], t10, t20, res_i0.1); + let t11: u32 = a[4u32.wrapping_mul(i).wrapping_add(2u32) as usize]; + let t21: u32 = b[4u32.wrapping_mul(i).wrapping_add(2u32) as usize]; + let res_i1: (&mut [u32], &mut [u32]) = res_i0.1.split_at_mut(1usize); + (&mut c)[0usize] = + lib::inttypes_intrinsics::sub_borrow_u32((&c)[0usize], t11, t21, res_i1.1); + let t12: u32 = a[4u32.wrapping_mul(i).wrapping_add(3u32) as usize]; + let t22: u32 = b[4u32.wrapping_mul(i).wrapping_add(3u32) as usize]; + let res_i2: (&mut [u32], &mut [u32]) = res_i1.1.split_at_mut(1usize); + (&mut c)[0usize] = + lib::inttypes_intrinsics::sub_borrow_u32((&c)[0usize], t12, t22, res_i2.1) + }); + let c0: u32 = (&c)[0usize]; + let mut tmp: [u32; 8] = [0u32; 8usize]; + let mut c1: [u32; 1] = [0u32; 1usize]; + krml::unroll_for!(2, "i", 0u32, 1u32, { + let t1: u32 = res[4u32.wrapping_mul(i) as usize]; + let t2: u32 = n[4u32.wrapping_mul(i) as usize]; + let res_i: (&mut [u32], &mut [u32]) = tmp.split_at_mut(4u32.wrapping_mul(i) as usize); + (&mut c1)[0usize] = lib::inttypes_intrinsics::add_carry_u32((&c1)[0usize], t1, t2, res_i.1); + let t10: u32 = res[4u32.wrapping_mul(i).wrapping_add(1u32) as usize]; + let t20: u32 = n[4u32.wrapping_mul(i).wrapping_add(1u32) as usize]; + let res_i0: (&mut [u32], &mut [u32]) = res_i.1.split_at_mut(1usize); + (&mut c1)[0usize] = + lib::inttypes_intrinsics::add_carry_u32((&c1)[0usize], t10, t20, res_i0.1); + let t11: u32 = res[4u32.wrapping_mul(i).wrapping_add(2u32) as usize]; + let t21: u32 = n[4u32.wrapping_mul(i).wrapping_add(2u32) as usize]; + let res_i1: (&mut [u32], &mut [u32]) = res_i0.1.split_at_mut(1usize); + (&mut c1)[0usize] = + lib::inttypes_intrinsics::add_carry_u32((&c1)[0usize], t11, t21, res_i1.1); + let t12: u32 = res[4u32.wrapping_mul(i).wrapping_add(3u32) as usize]; + let t22: u32 = n[4u32.wrapping_mul(i).wrapping_add(3u32) as usize]; + let res_i2: (&mut [u32], &mut [u32]) = res_i1.1.split_at_mut(1usize); + (&mut c1)[0usize] = + lib::inttypes_intrinsics::add_carry_u32((&c1)[0usize], t12, t22, res_i2.1) + }); + let c10: u32 = (&c1)[0usize]; + lowstar::ignore::ignore::(c10); + let c2: u32 = 0u32.wrapping_sub(c0); + krml::unroll_for!(8, "i", 0u32, 1u32, { + let x: u32 = c2 & (&tmp)[i as usize] | !c2 & res[i as usize]; + let os: (&mut [u32], &mut [u32]) = res.split_at_mut(0usize); + os.1[i as usize] = x + }) +} + +/** +Write `a * b` in `res`. + + The arguments a and b are meant to be 256-bit bignums, i.e. uint32_t[8]. + The outparam res is meant to be a 512-bit bignum, i.e. uint32_t[16]. +*/ +pub fn mul(a: &[u32], b: &[u32], res: &mut [u32]) { + (res[0usize..16usize]).copy_from_slice(&[0u32; 16usize]); + krml::unroll_for!(8, "i", 0u32, 1u32, { + let bj: u32 = b[i as usize]; + let res_j: (&mut [u32], &mut [u32]) = res.split_at_mut(i as usize); + let mut c: [u32; 1] = [0u32; 1usize]; + krml::unroll_for!(2, "i0", 0u32, 1u32, { + let a_i: u32 = a[4u32.wrapping_mul(i0) as usize]; + let res_i: (&mut [u32], &mut [u32]) = + res_j.1.split_at_mut(4u32.wrapping_mul(i0) as usize); + (&mut c)[0usize] = + super::bignum_base::mul_wide_add2_u32(a_i, bj, (&c)[0usize], res_i.1); + let a_i0: u32 = a[4u32.wrapping_mul(i0).wrapping_add(1u32) as usize]; + let res_i0: (&mut [u32], &mut [u32]) = res_i.1.split_at_mut(1usize); + (&mut c)[0usize] = + super::bignum_base::mul_wide_add2_u32(a_i0, bj, (&c)[0usize], res_i0.1); + let a_i1: u32 = a[4u32.wrapping_mul(i0).wrapping_add(2u32) as usize]; + let res_i1: (&mut [u32], &mut [u32]) = res_i0.1.split_at_mut(1usize); + (&mut c)[0usize] = + super::bignum_base::mul_wide_add2_u32(a_i1, bj, (&c)[0usize], res_i1.1); + let a_i2: u32 = a[4u32.wrapping_mul(i0).wrapping_add(3u32) as usize]; + let res_i2: (&mut [u32], &mut [u32]) = res_i1.1.split_at_mut(1usize); + (&mut c)[0usize] = + super::bignum_base::mul_wide_add2_u32(a_i2, bj, (&c)[0usize], res_i2.1) + }); + let r: u32 = (&c)[0usize]; + res[8u32.wrapping_add(i) as usize] = r + }) +} + +/** +Write `a * a` in `res`. + + The argument a is meant to be a 256-bit bignum, i.e. uint32_t[8]. + The outparam res is meant to be a 512-bit bignum, i.e. uint32_t[16]. +*/ +pub fn sqr(a: &[u32], res: &mut [u32]) { + (res[0usize..16usize]).copy_from_slice(&[0u32; 16usize]); + krml::unroll_for!(8, "i", 0u32, 1u32, { + let a_j: u32 = a[i as usize]; + let ab: (&[u32], &[u32]) = a.split_at(0usize); + let res_j: (&mut [u32], &mut [u32]) = res.split_at_mut(i as usize); + let mut c: [u32; 1] = [0u32; 1usize]; + for i0 in 0u32..i.wrapping_div(4u32) { + let a_i: u32 = ab.1[4u32.wrapping_mul(i0) as usize]; + let res_i: (&mut [u32], &mut [u32]) = + res_j.1.split_at_mut(4u32.wrapping_mul(i0) as usize); + (&mut c)[0usize] = + super::bignum_base::mul_wide_add2_u32(a_i, a_j, (&c)[0usize], res_i.1); + let a_i0: u32 = ab.1[4u32.wrapping_mul(i0).wrapping_add(1u32) as usize]; + let res_i0: (&mut [u32], &mut [u32]) = res_i.1.split_at_mut(1usize); + (&mut c)[0usize] = + super::bignum_base::mul_wide_add2_u32(a_i0, a_j, (&c)[0usize], res_i0.1); + let a_i1: u32 = ab.1[4u32.wrapping_mul(i0).wrapping_add(2u32) as usize]; + let res_i1: (&mut [u32], &mut [u32]) = res_i0.1.split_at_mut(1usize); + (&mut c)[0usize] = + super::bignum_base::mul_wide_add2_u32(a_i1, a_j, (&c)[0usize], res_i1.1); + let a_i2: u32 = ab.1[4u32.wrapping_mul(i0).wrapping_add(3u32) as usize]; + let res_i2: (&mut [u32], &mut [u32]) = res_i1.1.split_at_mut(1usize); + (&mut c)[0usize] = + super::bignum_base::mul_wide_add2_u32(a_i2, a_j, (&c)[0usize], res_i2.1) + } + for i0 in i.wrapping_div(4u32).wrapping_mul(4u32)..i { + let a_i: u32 = ab.1[i0 as usize]; + let res_i: (&mut [u32], &mut [u32]) = res_j.1.split_at_mut(i0 as usize); + (&mut c)[0usize] = + super::bignum_base::mul_wide_add2_u32(a_i, a_j, (&c)[0usize], res_i.1) + } + let r: u32 = (&c)[0usize]; + res[i.wrapping_add(i) as usize] = r + }); + let mut a_copy: [u32; 16] = [0u32; 16usize]; + let mut b_copy: [u32; 16] = [0u32; 16usize]; + ((&mut a_copy)[0usize..16usize]).copy_from_slice(&res[0usize..16usize]); + ((&mut b_copy)[0usize..16usize]).copy_from_slice(&res[0usize..16usize]); + let r: u32 = super::bignum_base::bn_add_eq_len_u32(16u32, &a_copy, &b_copy, res); + let c0: u32 = r; + lowstar::ignore::ignore::(c0); + let mut tmp: [u32; 16] = [0u32; 16usize]; + krml::unroll_for!(8, "i", 0u32, 1u32, { + let res1: u64 = (a[i as usize] as u64).wrapping_mul(a[i as usize] as u64); + let hi: u32 = res1.wrapping_shr(32u32) as u32; + let lo: u32 = res1 as u32; + (&mut tmp)[2u32.wrapping_mul(i) as usize] = lo; + (&mut tmp)[2u32.wrapping_mul(i).wrapping_add(1u32) as usize] = hi + }); + let mut a_copy0: [u32; 16] = [0u32; 16usize]; + let mut b_copy0: [u32; 16] = [0u32; 16usize]; + ((&mut a_copy0)[0usize..16usize]).copy_from_slice(&res[0usize..16usize]); + ((&mut b_copy0)[0usize..16usize]).copy_from_slice(&(&tmp)[0usize..16usize]); + let r0: u32 = super::bignum_base::bn_add_eq_len_u32(16u32, &a_copy0, &b_copy0, res); + let c1: u32 = r0; + lowstar::ignore::ignore::(c1) +} + +#[inline] +fn precompr2(nBits: u32, n: &[u32], res: &mut [u32]) { + (res[0usize..8usize]).copy_from_slice(&[0u32; 8usize]); + let i: u32 = nBits.wrapping_div(32u32); + let j: u32 = nBits.wrapping_rem(32u32); + res[i as usize] |= 1u32.wrapping_shl(j); + for _i in 0u32..512u32.wrapping_sub(nBits) { + let mut a_copy: [u32; 8] = [0u32; 8usize]; + let mut b_copy: [u32; 8] = [0u32; 8usize]; + ((&mut a_copy)[0usize..8usize]).copy_from_slice(&res[0usize..8usize]); + ((&mut b_copy)[0usize..8usize]).copy_from_slice(&res[0usize..8usize]); + super::bignum256_32::add_mod(n, &a_copy, &b_copy, res) + } +} + +#[inline] +fn reduction(n: &[u32], nInv: u32, c: &mut [u32], res: &mut [u32]) { + let mut c0: [u32; 1] = [0u32; 1usize]; + krml::unroll_for!(8, "i", 0u32, 1u32, { + let qj: u32 = nInv.wrapping_mul(c[i as usize]); + let res_j: (&mut [u32], &mut [u32]) = c.split_at_mut(i as usize); + let mut c1: [u32; 1] = [0u32; 1usize]; + krml::unroll_for!(2, "i0", 0u32, 1u32, { + let a_i: u32 = n[4u32.wrapping_mul(i0) as usize]; + let res_i: (&mut [u32], &mut [u32]) = + res_j.1.split_at_mut(4u32.wrapping_mul(i0) as usize); + (&mut c1)[0usize] = + super::bignum_base::mul_wide_add2_u32(a_i, qj, (&c1)[0usize], res_i.1); + let a_i0: u32 = n[4u32.wrapping_mul(i0).wrapping_add(1u32) as usize]; + let res_i0: (&mut [u32], &mut [u32]) = res_i.1.split_at_mut(1usize); + (&mut c1)[0usize] = + super::bignum_base::mul_wide_add2_u32(a_i0, qj, (&c1)[0usize], res_i0.1); + let a_i1: u32 = n[4u32.wrapping_mul(i0).wrapping_add(2u32) as usize]; + let res_i1: (&mut [u32], &mut [u32]) = res_i0.1.split_at_mut(1usize); + (&mut c1)[0usize] = + super::bignum_base::mul_wide_add2_u32(a_i1, qj, (&c1)[0usize], res_i1.1); + let a_i2: u32 = n[4u32.wrapping_mul(i0).wrapping_add(3u32) as usize]; + let res_i2: (&mut [u32], &mut [u32]) = res_i1.1.split_at_mut(1usize); + (&mut c1)[0usize] = + super::bignum_base::mul_wide_add2_u32(a_i2, qj, (&c1)[0usize], res_i2.1) + }); + let r: u32 = (&c1)[0usize]; + let c10: u32 = r; + let res_j0: u32 = c[8u32.wrapping_add(i) as usize]; + let resb: (&mut [u32], &mut [u32]) = c.split_at_mut(i as usize + 8usize); + (&mut c0)[0usize] = + lib::inttypes_intrinsics::add_carry_u32((&c0)[0usize], c10, res_j0, resb.1) + }); + (res[0usize..8usize]).copy_from_slice(&(&c[8usize..])[0usize..8usize]); + let c00: u32 = (&c0)[0usize]; + let mut tmp: [u32; 8] = [0u32; 8usize]; + let mut c1: [u32; 1] = [0u32; 1usize]; + krml::unroll_for!(2, "i", 0u32, 1u32, { + let t1: u32 = res[4u32.wrapping_mul(i) as usize]; + let t2: u32 = n[4u32.wrapping_mul(i) as usize]; + let res_i: (&mut [u32], &mut [u32]) = tmp.split_at_mut(4u32.wrapping_mul(i) as usize); + (&mut c1)[0usize] = + lib::inttypes_intrinsics::sub_borrow_u32((&c1)[0usize], t1, t2, res_i.1); + let t10: u32 = res[4u32.wrapping_mul(i).wrapping_add(1u32) as usize]; + let t20: u32 = n[4u32.wrapping_mul(i).wrapping_add(1u32) as usize]; + let res_i0: (&mut [u32], &mut [u32]) = res_i.1.split_at_mut(1usize); + (&mut c1)[0usize] = + lib::inttypes_intrinsics::sub_borrow_u32((&c1)[0usize], t10, t20, res_i0.1); + let t11: u32 = res[4u32.wrapping_mul(i).wrapping_add(2u32) as usize]; + let t21: u32 = n[4u32.wrapping_mul(i).wrapping_add(2u32) as usize]; + let res_i1: (&mut [u32], &mut [u32]) = res_i0.1.split_at_mut(1usize); + (&mut c1)[0usize] = + lib::inttypes_intrinsics::sub_borrow_u32((&c1)[0usize], t11, t21, res_i1.1); + let t12: u32 = res[4u32.wrapping_mul(i).wrapping_add(3u32) as usize]; + let t22: u32 = n[4u32.wrapping_mul(i).wrapping_add(3u32) as usize]; + let res_i2: (&mut [u32], &mut [u32]) = res_i1.1.split_at_mut(1usize); + (&mut c1)[0usize] = + lib::inttypes_intrinsics::sub_borrow_u32((&c1)[0usize], t12, t22, res_i2.1) + }); + let c10: u32 = (&c1)[0usize]; + let c2: u32 = c00.wrapping_sub(c10); + krml::unroll_for!(8, "i", 0u32, 1u32, { + let x: u32 = c2 & res[i as usize] | !c2 & (&tmp)[i as usize]; + let os: (&mut [u32], &mut [u32]) = res.split_at_mut(0usize); + os.1[i as usize] = x + }) +} + +#[inline] +fn to(n: &[u32], nInv: u32, r2: &[u32], a: &[u32], aM: &mut [u32]) { + let mut c: [u32; 16] = [0u32; 16usize]; + super::bignum256_32::mul(a, r2, &mut c); + super::bignum256_32::reduction(n, nInv, &mut c, aM) +} + +#[inline] +fn from(n: &[u32], nInv_u64: u32, aM: &[u32], a: &mut [u32]) { + let mut tmp: [u32; 16] = [0u32; 16usize]; + ((&mut tmp)[0usize..8usize]).copy_from_slice(&aM[0usize..8usize]); + super::bignum256_32::reduction(n, nInv_u64, &mut tmp, a) +} + +#[inline] +fn areduction(n: &[u32], nInv: u32, c: &mut [u32], res: &mut [u32]) { + let mut c0: [u32; 1] = [0u32; 1usize]; + krml::unroll_for!(8, "i", 0u32, 1u32, { + let qj: u32 = nInv.wrapping_mul(c[i as usize]); + let res_j: (&mut [u32], &mut [u32]) = c.split_at_mut(i as usize); + let mut c1: [u32; 1] = [0u32; 1usize]; + krml::unroll_for!(2, "i0", 0u32, 1u32, { + let a_i: u32 = n[4u32.wrapping_mul(i0) as usize]; + let res_i: (&mut [u32], &mut [u32]) = + res_j.1.split_at_mut(4u32.wrapping_mul(i0) as usize); + (&mut c1)[0usize] = + super::bignum_base::mul_wide_add2_u32(a_i, qj, (&c1)[0usize], res_i.1); + let a_i0: u32 = n[4u32.wrapping_mul(i0).wrapping_add(1u32) as usize]; + let res_i0: (&mut [u32], &mut [u32]) = res_i.1.split_at_mut(1usize); + (&mut c1)[0usize] = + super::bignum_base::mul_wide_add2_u32(a_i0, qj, (&c1)[0usize], res_i0.1); + let a_i1: u32 = n[4u32.wrapping_mul(i0).wrapping_add(2u32) as usize]; + let res_i1: (&mut [u32], &mut [u32]) = res_i0.1.split_at_mut(1usize); + (&mut c1)[0usize] = + super::bignum_base::mul_wide_add2_u32(a_i1, qj, (&c1)[0usize], res_i1.1); + let a_i2: u32 = n[4u32.wrapping_mul(i0).wrapping_add(3u32) as usize]; + let res_i2: (&mut [u32], &mut [u32]) = res_i1.1.split_at_mut(1usize); + (&mut c1)[0usize] = + super::bignum_base::mul_wide_add2_u32(a_i2, qj, (&c1)[0usize], res_i2.1) + }); + let r: u32 = (&c1)[0usize]; + let c10: u32 = r; + let res_j0: u32 = c[8u32.wrapping_add(i) as usize]; + let resb: (&mut [u32], &mut [u32]) = c.split_at_mut(i as usize + 8usize); + (&mut c0)[0usize] = + lib::inttypes_intrinsics::add_carry_u32((&c0)[0usize], c10, res_j0, resb.1) + }); + (res[0usize..8usize]).copy_from_slice(&(&c[8usize..])[0usize..8usize]); + let c00: u32 = (&c0)[0usize]; + let mut tmp: [u32; 8] = [0u32; 8usize]; + let c1: u32 = super::bignum256_32::sub(res, n, &mut tmp); + lowstar::ignore::ignore::(c1); + let m: u32 = 0u32.wrapping_sub(c00); + krml::unroll_for!(8, "i", 0u32, 1u32, { + let x: u32 = m & (&tmp)[i as usize] | !m & res[i as usize]; + let os: (&mut [u32], &mut [u32]) = res.split_at_mut(0usize); + os.1[i as usize] = x + }) +} + +#[inline] +fn amont_mul(n: &[u32], nInv_u64: u32, aM: &[u32], bM: &[u32], resM: &mut [u32]) { + let mut c: [u32; 16] = [0u32; 16usize]; + super::bignum256_32::mul(aM, bM, &mut c); + super::bignum256_32::areduction(n, nInv_u64, &mut c, resM) +} + +#[inline] +fn amont_sqr(n: &[u32], nInv_u64: u32, aM: &[u32], resM: &mut [u32]) { + let mut c: [u32; 16] = [0u32; 16usize]; + super::bignum256_32::sqr(aM, &mut c); + super::bignum256_32::areduction(n, nInv_u64, &mut c, resM) +} + +#[inline] +fn bn_slow_precomp(n: &[u32], mu: u32, r2: &[u32], a: &[u32], res: &mut [u32]) { + let mut a_mod: [u32; 8] = [0u32; 8usize]; + let mut a1: [u32; 16] = [0u32; 16usize]; + ((&mut a1)[0usize..16usize]).copy_from_slice(&a[0usize..16usize]); + super::bignum256_32::areduction(n, mu, &mut a1, &mut a_mod); + super::bignum256_32::to(n, mu, r2, &a_mod, res) +} + +/** +Write `a mod n` in `res`. + + The argument a is meant to be a 512-bit bignum, i.e. uint32_t[16]. + The argument n and the outparam res are meant to be 256-bit bignums, i.e. uint32_t[8]. + + The function returns false if any of the following preconditions are violated, + true otherwise. + ā€¢ 1 < n + ā€¢ n % 2 = 1 +*/ +pub fn r#mod(n: &[u32], a: &[u32], res: &mut [u32]) -> bool { + let mut one: [u32; 8] = [0u32; 8usize]; + ((&mut one)[0usize..8usize]).copy_from_slice(&[0u32; 8usize]); + (&mut one)[0usize] = 1u32; + let bit0: u32 = n[0usize] & 1u32; + let m0: u32 = 0u32.wrapping_sub(bit0); + let mut acc: [u32; 1] = [0u32; 1usize]; + krml::unroll_for!(8, "i", 0u32, 1u32, { + let beq: u32 = fstar::uint32::eq_mask((&one)[i as usize], n[i as usize]); + let blt: u32 = !fstar::uint32::gte_mask((&one)[i as usize], n[i as usize]); + (&mut acc)[0usize] = beq & (&acc)[0usize] | !beq & blt + }); + let m1: u32 = (&acc)[0usize]; + let is_valid_m: u32 = m0 & m1; + let nBits: u32 = 32u32.wrapping_mul(super::bignum_base::bn_get_top_index_u32(8u32, n)); + if is_valid_m == 0xFFFFFFFFu32 { + let mut r2: [u32; 8] = [0u32; 8usize]; + super::bignum256_32::precompr2(nBits, n, &mut r2); + let mu: u32 = super::bignum::mod_inv_uint32(n[0usize]); + super::bignum256_32::bn_slow_precomp(n, mu, &r2, a, res) + } else { + (res[0usize..8usize]).copy_from_slice(&[0u32; 8usize]) + }; + is_valid_m == 0xFFFFFFFFu32 +} + +fn exp_check(n: &[u32], a: &[u32], bBits: u32, b: &[u32]) -> u32 { + let mut one: [u32; 8] = [0u32; 8usize]; + ((&mut one)[0usize..8usize]).copy_from_slice(&[0u32; 8usize]); + (&mut one)[0usize] = 1u32; + let bit0: u32 = n[0usize] & 1u32; + let m0: u32 = 0u32.wrapping_sub(bit0); + let mut acc: [u32; 1] = [0u32; 1usize]; + krml::unroll_for!(8, "i", 0u32, 1u32, { + let beq: u32 = fstar::uint32::eq_mask((&one)[i as usize], n[i as usize]); + let blt: u32 = !fstar::uint32::gte_mask((&one)[i as usize], n[i as usize]); + (&mut acc)[0usize] = beq & (&acc)[0usize] | !beq & blt + }); + let m1: u32 = (&acc)[0usize]; + let m00: u32 = m0 & m1; + let bLen: u32 = if bBits == 0u32 { + 1u32 + } else { + bBits + .wrapping_sub(1u32) + .wrapping_div(32u32) + .wrapping_add(1u32) + }; + let m10: u32 = if bBits < 32u32.wrapping_mul(bLen) { + let mut b2: Box<[u32]> = vec![0u32; bLen as usize].into_boxed_slice(); + let i: u32 = bBits.wrapping_div(32u32); + let j: u32 = bBits.wrapping_rem(32u32); + (&mut b2)[i as usize] = (&b2)[i as usize] | 1u32.wrapping_shl(j); + let mut acc0: [u32; 1] = [0u32; 1usize]; + for i0 in 0u32..bLen { + let beq: u32 = fstar::uint32::eq_mask(b[i0 as usize], (&b2)[i0 as usize]); + let blt: u32 = !fstar::uint32::gte_mask(b[i0 as usize], (&b2)[i0 as usize]); + (&mut acc0)[0usize] = beq & (&acc0)[0usize] | !beq & blt + } + let res: u32 = (&acc0)[0usize]; + res + } else { + 0xFFFFFFFFu32 + }; + let mut acc0: [u32; 1] = [0u32; 1usize]; + krml::unroll_for!(8, "i", 0u32, 1u32, { + let beq: u32 = fstar::uint32::eq_mask(a[i as usize], n[i as usize]); + let blt: u32 = !fstar::uint32::gte_mask(a[i as usize], n[i as usize]); + (&mut acc0)[0usize] = beq & (&acc0)[0usize] | !beq & blt + }); + let m2: u32 = (&acc0)[0usize]; + let m: u32 = m10 & m2; + m00 & m +} + +#[inline] +fn exp_vartime_precomp( + n: &[u32], + mu: u32, + r2: &[u32], + a: &[u32], + bBits: u32, + b: &[u32], + res: &mut [u32], +) { + if bBits < 200u32 { + let mut aM: [u32; 8] = [0u32; 8usize]; + super::bignum256_32::to(n, mu, r2, a, &mut aM); + let mut resM: [u32; 8] = [0u32; 8usize]; + let mut ctx: [u32; 16] = [0u32; 16usize]; + ((&mut ctx)[0usize..8usize]).copy_from_slice(&n[0usize..8usize]); + ((&mut ctx)[8usize..8usize + 8usize]).copy_from_slice(&r2[0usize..8usize]); + let ctx_n: (&[u32], &[u32]) = ctx.split_at(0usize); + let ctx_r2: (&[u32], &[u32]) = ctx_n.1.split_at(8usize); + super::bignum256_32::from(ctx_r2.0, mu, ctx_r2.1, &mut resM); + lowstar::ignore::ignore::<&[u32]>(&ctx); + for i in 0u32..bBits { + let i1: u32 = i.wrapping_div(32u32); + let j: u32 = i.wrapping_rem(32u32); + let tmp: u32 = b[i1 as usize]; + let bit: u32 = tmp.wrapping_shr(j) & 1u32; + if bit != 0u32 { + let mut aM_copy: [u32; 8] = [0u32; 8usize]; + ((&mut aM_copy)[0usize..8usize]).copy_from_slice(&(&resM)[0usize..8usize]); + let ctx_n0: (&[u32], &[u32]) = ctx.split_at(0usize); + super::bignum256_32::amont_mul(ctx_n0.1, mu, &aM_copy, &aM, &mut resM); + lowstar::ignore::ignore::<&[u32]>(&ctx) + }; + let mut aM_copy: [u32; 8] = [0u32; 8usize]; + ((&mut aM_copy)[0usize..8usize]).copy_from_slice(&(&aM)[0usize..8usize]); + let ctx_n0: (&[u32], &[u32]) = ctx.split_at(0usize); + super::bignum256_32::amont_sqr(ctx_n0.1, mu, &aM_copy, &mut aM); + lowstar::ignore::ignore::<&[u32]>(&ctx) + } + super::bignum256_32::from(n, mu, &resM, res) + } else { + let mut aM: [u32; 8] = [0u32; 8usize]; + super::bignum256_32::to(n, mu, r2, a, &mut aM); + let mut resM: [u32; 8] = [0u32; 8usize]; + let bLen: u32 = if bBits == 0u32 { + 1u32 + } else { + bBits + .wrapping_sub(1u32) + .wrapping_div(32u32) + .wrapping_add(1u32) + }; + let mut ctx: [u32; 16] = [0u32; 16usize]; + ((&mut ctx)[0usize..8usize]).copy_from_slice(&n[0usize..8usize]); + ((&mut ctx)[8usize..8usize + 8usize]).copy_from_slice(&r2[0usize..8usize]); + let mut table: [u32; 128] = [0u32; 128usize]; + let mut tmp: [u32; 8] = [0u32; 8usize]; + let t0: (&mut [u32], &mut [u32]) = table.split_at_mut(0usize); + let t1: (&mut [u32], &mut [u32]) = t0.1.split_at_mut(8usize); + let ctx_n: (&[u32], &[u32]) = ctx.split_at(0usize); + let ctx_r2: (&[u32], &[u32]) = ctx_n.1.split_at(8usize); + super::bignum256_32::from(ctx_r2.0, mu, ctx_r2.1, t1.0); + lowstar::ignore::ignore::<&[u32]>(&ctx); + (t1.1[0usize..8usize]).copy_from_slice(&(&aM)[0usize..8usize]); + lowstar::ignore::ignore::<&[u32]>(&table); + krml::unroll_for!(7, "i", 0u32, 1u32, { + let t11: (&[u32], &[u32]) = + table.split_at(i.wrapping_add(1u32).wrapping_mul(8u32) as usize); + let mut aM_copy: [u32; 8] = [0u32; 8usize]; + ((&mut aM_copy)[0usize..8usize]).copy_from_slice(&t11.1[0usize..8usize]); + let ctx_n0: (&[u32], &[u32]) = ctx.split_at(0usize); + super::bignum256_32::amont_sqr(ctx_n0.1, mu, &aM_copy, &mut tmp); + lowstar::ignore::ignore::<&[u32]>(&ctx); + ((&mut table)[2u32.wrapping_mul(i).wrapping_add(2u32).wrapping_mul(8u32) as usize + ..2u32.wrapping_mul(i).wrapping_add(2u32).wrapping_mul(8u32) as usize + 8usize]) + .copy_from_slice(&(&tmp)[0usize..8usize]); + let t2: (&[u32], &[u32]) = + table.split_at(2u32.wrapping_mul(i).wrapping_add(2u32).wrapping_mul(8u32) as usize); + let mut aM_copy0: [u32; 8] = [0u32; 8usize]; + ((&mut aM_copy0)[0usize..8usize]).copy_from_slice(&(&aM)[0usize..8usize]); + let ctx_n1: (&[u32], &[u32]) = ctx.split_at(0usize); + super::bignum256_32::amont_mul(ctx_n1.1, mu, &aM_copy0, t2.1, &mut tmp); + lowstar::ignore::ignore::<&[u32]>(&ctx); + ((&mut table)[2u32.wrapping_mul(i).wrapping_add(3u32).wrapping_mul(8u32) as usize + ..2u32.wrapping_mul(i).wrapping_add(3u32).wrapping_mul(8u32) as usize + 8usize]) + .copy_from_slice(&(&tmp)[0usize..8usize]) + }); + if bBits.wrapping_rem(4u32) != 0u32 { + let i: u32 = bBits.wrapping_div(4u32).wrapping_mul(4u32); + let bits_c: u32 = super::bignum_base::bn_get_bits_u32(bLen, b, i, 4u32); + let bits_l32: u32 = bits_c; + let a_bits_l: (&[u32], &[u32]) = table.split_at(bits_l32.wrapping_mul(8u32) as usize); + ((&mut resM)[0usize..8usize]).copy_from_slice(&a_bits_l.1[0usize..8usize]) + } else { + let ctx_n0: (&[u32], &[u32]) = ctx.split_at(0usize); + let ctx_r20: (&[u32], &[u32]) = ctx_n0.1.split_at(8usize); + super::bignum256_32::from(ctx_r20.0, mu, ctx_r20.1, &mut resM); + lowstar::ignore::ignore::<&[u32]>(&ctx) + }; + let mut tmp0: [u32; 8] = [0u32; 8usize]; + for i in 0u32..bBits.wrapping_div(4u32) { + krml::unroll_for!(4, "_i", 0u32, 1u32, { + let mut aM_copy: [u32; 8] = [0u32; 8usize]; + ((&mut aM_copy)[0usize..8usize]).copy_from_slice(&(&resM)[0usize..8usize]); + let ctx_n0: (&[u32], &[u32]) = ctx.split_at(0usize); + super::bignum256_32::amont_sqr(ctx_n0.1, mu, &aM_copy, &mut resM); + lowstar::ignore::ignore::<&[u32]>(&ctx) + }); + let k: u32 = bBits + .wrapping_sub(bBits.wrapping_rem(4u32)) + .wrapping_sub(4u32.wrapping_mul(i)) + .wrapping_sub(4u32); + let bits_l: u32 = super::bignum_base::bn_get_bits_u32(bLen, b, k, 4u32); + lowstar::ignore::ignore::<&[u32]>(&table); + let bits_l32: u32 = bits_l; + let a_bits_l: (&[u32], &[u32]) = table.split_at(bits_l32.wrapping_mul(8u32) as usize); + ((&mut tmp0)[0usize..8usize]).copy_from_slice(&a_bits_l.1[0usize..8usize]); + let mut aM_copy: [u32; 8] = [0u32; 8usize]; + ((&mut aM_copy)[0usize..8usize]).copy_from_slice(&(&resM)[0usize..8usize]); + let ctx_n0: (&[u32], &[u32]) = ctx.split_at(0usize); + super::bignum256_32::amont_mul(ctx_n0.1, mu, &aM_copy, &tmp0, &mut resM); + lowstar::ignore::ignore::<&[u32]>(&ctx) + } + super::bignum256_32::from(n, mu, &resM, res) + } +} + +#[inline] +fn exp_consttime_precomp( + n: &[u32], + mu: u32, + r2: &[u32], + a: &[u32], + bBits: u32, + b: &[u32], + res: &mut [u32], +) { + if bBits < 200u32 { + let mut aM: [u32; 8] = [0u32; 8usize]; + super::bignum256_32::to(n, mu, r2, a, &mut aM); + let mut resM: [u32; 8] = [0u32; 8usize]; + let mut ctx: [u32; 16] = [0u32; 16usize]; + ((&mut ctx)[0usize..8usize]).copy_from_slice(&n[0usize..8usize]); + ((&mut ctx)[8usize..8usize + 8usize]).copy_from_slice(&r2[0usize..8usize]); + let mut sw: [u32; 1] = [0u32; 1usize]; + let ctx_n: (&[u32], &[u32]) = ctx.split_at(0usize); + let ctx_r2: (&[u32], &[u32]) = ctx_n.1.split_at(8usize); + super::bignum256_32::from(ctx_r2.0, mu, ctx_r2.1, &mut resM); + lowstar::ignore::ignore::<&[u32]>(&ctx); + for i in 0u32..bBits { + let i1: u32 = bBits.wrapping_sub(i).wrapping_sub(1u32).wrapping_div(32u32); + let j: u32 = bBits.wrapping_sub(i).wrapping_sub(1u32).wrapping_rem(32u32); + let tmp: u32 = b[i1 as usize]; + let bit: u32 = tmp.wrapping_shr(j) & 1u32; + let sw1: u32 = bit ^ (&sw)[0usize]; + krml::unroll_for!(8, "i0", 0u32, 1u32, { + let dummy: u32 = + 0u32.wrapping_sub(sw1) & ((&resM)[i0 as usize] ^ (&aM)[i0 as usize]); + (&mut resM)[i0 as usize] = (&resM)[i0 as usize] ^ dummy; + (&mut aM)[i0 as usize] = (&aM)[i0 as usize] ^ dummy + }); + let mut aM_copy: [u32; 8] = [0u32; 8usize]; + ((&mut aM_copy)[0usize..8usize]).copy_from_slice(&(&aM)[0usize..8usize]); + let ctx_n0: (&[u32], &[u32]) = ctx.split_at(0usize); + super::bignum256_32::amont_mul(ctx_n0.1, mu, &aM_copy, &resM, &mut aM); + lowstar::ignore::ignore::<&[u32]>(&ctx); + let mut aM_copy0: [u32; 8] = [0u32; 8usize]; + ((&mut aM_copy0)[0usize..8usize]).copy_from_slice(&(&resM)[0usize..8usize]); + let ctx_n1: (&[u32], &[u32]) = ctx.split_at(0usize); + super::bignum256_32::amont_sqr(ctx_n1.1, mu, &aM_copy0, &mut resM); + lowstar::ignore::ignore::<&[u32]>(&ctx); + (&mut sw)[0usize] = bit + } + let sw0: u32 = (&sw)[0usize]; + krml::unroll_for!(8, "i", 0u32, 1u32, { + let dummy: u32 = 0u32.wrapping_sub(sw0) & ((&resM)[i as usize] ^ (&aM)[i as usize]); + (&mut resM)[i as usize] = (&resM)[i as usize] ^ dummy; + (&mut aM)[i as usize] = (&aM)[i as usize] ^ dummy + }); + super::bignum256_32::from(n, mu, &resM, res) + } else { + let mut aM: [u32; 8] = [0u32; 8usize]; + super::bignum256_32::to(n, mu, r2, a, &mut aM); + let mut resM: [u32; 8] = [0u32; 8usize]; + let bLen: u32 = if bBits == 0u32 { + 1u32 + } else { + bBits + .wrapping_sub(1u32) + .wrapping_div(32u32) + .wrapping_add(1u32) + }; + let mut ctx: [u32; 16] = [0u32; 16usize]; + ((&mut ctx)[0usize..8usize]).copy_from_slice(&n[0usize..8usize]); + ((&mut ctx)[8usize..8usize + 8usize]).copy_from_slice(&r2[0usize..8usize]); + let mut table: [u32; 128] = [0u32; 128usize]; + let mut tmp: [u32; 8] = [0u32; 8usize]; + let t0: (&mut [u32], &mut [u32]) = table.split_at_mut(0usize); + let t1: (&mut [u32], &mut [u32]) = t0.1.split_at_mut(8usize); + let ctx_n: (&[u32], &[u32]) = ctx.split_at(0usize); + let ctx_r2: (&[u32], &[u32]) = ctx_n.1.split_at(8usize); + super::bignum256_32::from(ctx_r2.0, mu, ctx_r2.1, t1.0); + lowstar::ignore::ignore::<&[u32]>(&ctx); + (t1.1[0usize..8usize]).copy_from_slice(&(&aM)[0usize..8usize]); + lowstar::ignore::ignore::<&[u32]>(&table); + krml::unroll_for!(7, "i", 0u32, 1u32, { + let t11: (&[u32], &[u32]) = + table.split_at(i.wrapping_add(1u32).wrapping_mul(8u32) as usize); + let mut aM_copy: [u32; 8] = [0u32; 8usize]; + ((&mut aM_copy)[0usize..8usize]).copy_from_slice(&t11.1[0usize..8usize]); + let ctx_n0: (&[u32], &[u32]) = ctx.split_at(0usize); + super::bignum256_32::amont_sqr(ctx_n0.1, mu, &aM_copy, &mut tmp); + lowstar::ignore::ignore::<&[u32]>(&ctx); + ((&mut table)[2u32.wrapping_mul(i).wrapping_add(2u32).wrapping_mul(8u32) as usize + ..2u32.wrapping_mul(i).wrapping_add(2u32).wrapping_mul(8u32) as usize + 8usize]) + .copy_from_slice(&(&tmp)[0usize..8usize]); + let t2: (&[u32], &[u32]) = + table.split_at(2u32.wrapping_mul(i).wrapping_add(2u32).wrapping_mul(8u32) as usize); + let mut aM_copy0: [u32; 8] = [0u32; 8usize]; + ((&mut aM_copy0)[0usize..8usize]).copy_from_slice(&(&aM)[0usize..8usize]); + let ctx_n1: (&[u32], &[u32]) = ctx.split_at(0usize); + super::bignum256_32::amont_mul(ctx_n1.1, mu, &aM_copy0, t2.1, &mut tmp); + lowstar::ignore::ignore::<&[u32]>(&ctx); + ((&mut table)[2u32.wrapping_mul(i).wrapping_add(3u32).wrapping_mul(8u32) as usize + ..2u32.wrapping_mul(i).wrapping_add(3u32).wrapping_mul(8u32) as usize + 8usize]) + .copy_from_slice(&(&tmp)[0usize..8usize]) + }); + if bBits.wrapping_rem(4u32) != 0u32 { + let i: u32 = bBits.wrapping_div(4u32).wrapping_mul(4u32); + let bits_c: u32 = super::bignum_base::bn_get_bits_u32(bLen, b, i, 4u32); + ((&mut resM)[0usize..8usize]) + .copy_from_slice(&(&(&table)[0usize..] as &[u32])[0usize..8usize]); + krml::unroll_for!(15, "i0", 0u32, 1u32, { + let c: u32 = fstar::uint32::eq_mask(bits_c, i0.wrapping_add(1u32)); + let res_j: (&[u32], &[u32]) = + table.split_at(i0.wrapping_add(1u32).wrapping_mul(8u32) as usize); + krml::unroll_for!(8, "i1", 0u32, 1u32, { + let x: u32 = c & res_j.1[i1 as usize] | !c & (&resM)[i1 as usize]; + let os: (&mut [u32], &mut [u32]) = resM.split_at_mut(0usize); + os.1[i1 as usize] = x + }) + }) + } else { + let ctx_n0: (&[u32], &[u32]) = ctx.split_at(0usize); + let ctx_r20: (&[u32], &[u32]) = ctx_n0.1.split_at(8usize); + super::bignum256_32::from(ctx_r20.0, mu, ctx_r20.1, &mut resM); + lowstar::ignore::ignore::<&[u32]>(&ctx) + }; + let mut tmp0: [u32; 8] = [0u32; 8usize]; + for i in 0u32..bBits.wrapping_div(4u32) { + krml::unroll_for!(4, "_i", 0u32, 1u32, { + let mut aM_copy: [u32; 8] = [0u32; 8usize]; + ((&mut aM_copy)[0usize..8usize]).copy_from_slice(&(&resM)[0usize..8usize]); + let ctx_n0: (&[u32], &[u32]) = ctx.split_at(0usize); + super::bignum256_32::amont_sqr(ctx_n0.1, mu, &aM_copy, &mut resM); + lowstar::ignore::ignore::<&[u32]>(&ctx) + }); + let k: u32 = bBits + .wrapping_sub(bBits.wrapping_rem(4u32)) + .wrapping_sub(4u32.wrapping_mul(i)) + .wrapping_sub(4u32); + let bits_l: u32 = super::bignum_base::bn_get_bits_u32(bLen, b, k, 4u32); + lowstar::ignore::ignore::<&[u32]>(&table); + ((&mut tmp0)[0usize..8usize]) + .copy_from_slice(&(&(&table)[0usize..] as &[u32])[0usize..8usize]); + krml::unroll_for!(15, "i0", 0u32, 1u32, { + let c: u32 = fstar::uint32::eq_mask(bits_l, i0.wrapping_add(1u32)); + let res_j: (&[u32], &[u32]) = + table.split_at(i0.wrapping_add(1u32).wrapping_mul(8u32) as usize); + krml::unroll_for!(8, "i1", 0u32, 1u32, { + let x: u32 = c & res_j.1[i1 as usize] | !c & (&tmp0)[i1 as usize]; + let os: (&mut [u32], &mut [u32]) = tmp0.split_at_mut(0usize); + os.1[i1 as usize] = x + }) + }); + let mut aM_copy: [u32; 8] = [0u32; 8usize]; + ((&mut aM_copy)[0usize..8usize]).copy_from_slice(&(&resM)[0usize..8usize]); + let ctx_n0: (&[u32], &[u32]) = ctx.split_at(0usize); + super::bignum256_32::amont_mul(ctx_n0.1, mu, &aM_copy, &tmp0, &mut resM); + lowstar::ignore::ignore::<&[u32]>(&ctx) + } + super::bignum256_32::from(n, mu, &resM, res) + } +} + +#[inline] +fn exp_vartime(nBits: u32, n: &[u32], a: &[u32], bBits: u32, b: &[u32], res: &mut [u32]) { + let mut r2: [u32; 8] = [0u32; 8usize]; + super::bignum256_32::precompr2(nBits, n, &mut r2); + let mu: u32 = super::bignum::mod_inv_uint32(n[0usize]); + super::bignum256_32::exp_vartime_precomp(n, mu, &r2, a, bBits, b, res) +} + +#[inline] +fn exp_consttime(nBits: u32, n: &[u32], a: &[u32], bBits: u32, b: &[u32], res: &mut [u32]) { + let mut r2: [u32; 8] = [0u32; 8usize]; + super::bignum256_32::precompr2(nBits, n, &mut r2); + let mu: u32 = super::bignum::mod_inv_uint32(n[0usize]); + super::bignum256_32::exp_consttime_precomp(n, mu, &r2, a, bBits, b, res) +} + +/** +Write `a ^ b mod n` in `res`. + + The arguments a, n and the outparam res are meant to be 256-bit bignums, i.e. uint32_t[8]. + + The argument b is a bignum of any size, and bBits is an upper bound on the + number of significant bits of b. A tighter bound results in faster execution + time. When in doubt, the number of bits for the bignum size is always a safe + default, e.g. if b is a 256-bit bignum, bBits should be 256. + + The function is *NOT* constant-time on the argument b. See the + mod_exp_consttime_* functions for constant-time variants. + + The function returns false if any of the following preconditions are violated, + true otherwise. + ā€¢ n % 2 = 1 + ā€¢ 1 < n + ā€¢ b < pow2 bBits + ā€¢ a < n +*/ +pub fn mod_exp_vartime(n: &[u32], a: &[u32], bBits: u32, b: &[u32], res: &mut [u32]) -> bool { + let is_valid_m: u32 = super::bignum256_32::exp_check(n, a, bBits, b); + let nBits: u32 = 32u32.wrapping_mul(super::bignum_base::bn_get_top_index_u32(8u32, n)); + if is_valid_m == 0xFFFFFFFFu32 { + super::bignum256_32::exp_vartime(nBits, n, a, bBits, b, res) + } else { + (res[0usize..8usize]).copy_from_slice(&[0u32; 8usize]) + }; + is_valid_m == 0xFFFFFFFFu32 +} + +/** +Write `a ^ b mod n` in `res`. + + The arguments a, n and the outparam res are meant to be 256-bit bignums, i.e. uint32_t[8]. + + The argument b is a bignum of any size, and bBits is an upper bound on the + number of significant bits of b. A tighter bound results in faster execution + time. When in doubt, the number of bits for the bignum size is always a safe + default, e.g. if b is a 256-bit bignum, bBits should be 256. + + This function is constant-time over its argument b, at the cost of a slower + execution time than mod_exp_vartime. + + The function returns false if any of the following preconditions are violated, + true otherwise. + ā€¢ n % 2 = 1 + ā€¢ 1 < n + ā€¢ b < pow2 bBits + ā€¢ a < n +*/ +pub fn mod_exp_consttime(n: &[u32], a: &[u32], bBits: u32, b: &[u32], res: &mut [u32]) -> bool { + let is_valid_m: u32 = super::bignum256_32::exp_check(n, a, bBits, b); + let nBits: u32 = 32u32.wrapping_mul(super::bignum_base::bn_get_top_index_u32(8u32, n)); + if is_valid_m == 0xFFFFFFFFu32 { + super::bignum256_32::exp_consttime(nBits, n, a, bBits, b, res) + } else { + (res[0usize..8usize]).copy_from_slice(&[0u32; 8usize]) + }; + is_valid_m == 0xFFFFFFFFu32 +} + +/** +Write `a ^ (-1) mod n` in `res`. + + The arguments a, n and the outparam res are meant to be 256-bit bignums, i.e. uint32_t[8]. + + Before calling this function, the caller will need to ensure that the following + preconditions are observed. + ā€¢ n is a prime + + The function returns false if any of the following preconditions are violated, true otherwise. + ā€¢ n % 2 = 1 + ā€¢ 1 < n + ā€¢ 0 < a + ā€¢ a < n +*/ +pub fn mod_inv_prime_vartime(n: &[u32], a: &[u32], res: &mut [u32]) -> bool { + let mut one: [u32; 8] = [0u32; 8usize]; + ((&mut one)[0usize..8usize]).copy_from_slice(&[0u32; 8usize]); + (&mut one)[0usize] = 1u32; + let bit0: u32 = n[0usize] & 1u32; + let m0: u32 = 0u32.wrapping_sub(bit0); + let mut acc: [u32; 1] = [0u32; 1usize]; + krml::unroll_for!(8, "i", 0u32, 1u32, { + let beq: u32 = fstar::uint32::eq_mask((&one)[i as usize], n[i as usize]); + let blt: u32 = !fstar::uint32::gte_mask((&one)[i as usize], n[i as usize]); + (&mut acc)[0usize] = beq & (&acc)[0usize] | !beq & blt + }); + let m1: u32 = (&acc)[0usize]; + let m00: u32 = m0 & m1; + let bn_zero: [u32; 8] = [0u32; 8usize]; + let mut mask: [u32; 1] = [0xFFFFFFFFu32; 1usize]; + krml::unroll_for!(8, "i", 0u32, 1u32, { + let uu____0: u32 = fstar::uint32::eq_mask(a[i as usize], (&bn_zero)[i as usize]); + (&mut mask)[0usize] = uu____0 & (&mask)[0usize] + }); + let mask1: u32 = (&mask)[0usize]; + let res1: u32 = mask1; + let m10: u32 = res1; + let mut acc0: [u32; 1] = [0u32; 1usize]; + krml::unroll_for!(8, "i", 0u32, 1u32, { + let beq: u32 = fstar::uint32::eq_mask(a[i as usize], n[i as usize]); + let blt: u32 = !fstar::uint32::gte_mask(a[i as usize], n[i as usize]); + (&mut acc0)[0usize] = beq & (&acc0)[0usize] | !beq & blt + }); + let m2: u32 = (&acc0)[0usize]; + let is_valid_m: u32 = m00 & !m10 & m2; + let nBits: u32 = 32u32.wrapping_mul(super::bignum_base::bn_get_top_index_u32(8u32, n)); + if is_valid_m == 0xFFFFFFFFu32 { + let mut n2: [u32; 8] = [0u32; 8usize]; + let c0: u32 = lib::inttypes_intrinsics::sub_borrow_u32( + 0u32, + n[0usize], + 2u32, + &mut (&mut n2)[0usize..], + ); + let a1: (&[u32], &[u32]) = n.split_at(1usize); + let res10: (&mut [u32], &mut [u32]) = n2.split_at_mut(1usize); + let mut c: [u32; 1] = [c0; 1usize]; + { + let t1: u32 = a1.1[4u32.wrapping_mul(0u32) as usize]; + let res_i: (&mut [u32], &mut [u32]) = + res10.1.split_at_mut(4u32.wrapping_mul(0u32) as usize); + (&mut c)[0usize] = + lib::inttypes_intrinsics::sub_borrow_u32((&c)[0usize], t1, 0u32, res_i.1); + let t10: u32 = a1.1[4u32.wrapping_mul(0u32).wrapping_add(1u32) as usize]; + let res_i0: (&mut [u32], &mut [u32]) = res_i.1.split_at_mut(1usize); + (&mut c)[0usize] = + lib::inttypes_intrinsics::sub_borrow_u32((&c)[0usize], t10, 0u32, res_i0.1); + let t11: u32 = a1.1[4u32.wrapping_mul(0u32).wrapping_add(2u32) as usize]; + let res_i1: (&mut [u32], &mut [u32]) = res_i0.1.split_at_mut(1usize); + (&mut c)[0usize] = + lib::inttypes_intrinsics::sub_borrow_u32((&c)[0usize], t11, 0u32, res_i1.1); + let t12: u32 = a1.1[4u32.wrapping_mul(0u32).wrapping_add(3u32) as usize]; + let res_i2: (&mut [u32], &mut [u32]) = res_i1.1.split_at_mut(1usize); + (&mut c)[0usize] = + lib::inttypes_intrinsics::sub_borrow_u32((&c)[0usize], t12, 0u32, res_i2.1) + }; + krml::unroll_for!(3, "i", 4u32, 1u32, { + let t1: u32 = a1.1[i as usize]; + let res_i: (&mut [u32], &mut [u32]) = res10.1.split_at_mut(i as usize); + (&mut c)[0usize] = + lib::inttypes_intrinsics::sub_borrow_u32((&c)[0usize], t1, 0u32, res_i.1) + }); + let c1: u32 = (&c)[0usize]; + let c2: u32 = c1; + lowstar::ignore::ignore::(c2); + super::bignum256_32::exp_vartime(nBits, n, a, 256u32, &n2, res) + } else { + (res[0usize..8usize]).copy_from_slice(&[0u32; 8usize]) + }; + is_valid_m == 0xFFFFFFFFu32 +} + +/** +Heap-allocate and initialize a montgomery context. + + The argument n is meant to be a 256-bit bignum, i.e. uint32_t[8]. + + Before calling this function, the caller will need to ensure that the following + preconditions are observed. + ā€¢ n % 2 = 1 + ā€¢ 1 < n + + The caller will need to call Hacl_Bignum256_mont_ctx_free on the return value + to avoid memory leaks. +*/ +pub fn mont_ctx_init(n: &[u32]) -> Box<[super::bignum::bn_mont_ctx_u32]> { + let mut r2: Box<[u32]> = vec![0u32; 8usize].into_boxed_slice(); + let mut n1: Box<[u32]> = vec![0u32; 8usize].into_boxed_slice(); + let r21: &mut [u32] = &mut r2; + let n11: &mut [u32] = &mut n1; + (n11[0usize..8usize]).copy_from_slice(&n[0usize..8usize]); + let nBits: u32 = 32u32.wrapping_mul(super::bignum_base::bn_get_top_index_u32(8u32, n)); + super::bignum256_32::precompr2(nBits, n, r21); + let mu: u32 = super::bignum::mod_inv_uint32(n[0usize]); + let res: super::bignum::bn_mont_ctx_u32 = super::bignum::bn_mont_ctx_u32 { + len: 8u32, + n: (*n11).into(), + mu, + r2: (*r21).into(), + }; + let buf: Box<[super::bignum::bn_mont_ctx_u32]> = vec![res].into_boxed_slice(); + buf +} + +/** +Write `a mod n` in `res`. + + The argument a is meant to be a 512-bit bignum, i.e. uint32_t[16]. + The outparam res is meant to be a 256-bit bignum, i.e. uint32_t[8]. + The argument k is a montgomery context obtained through Hacl_Bignum256_mont_ctx_init. +*/ +pub fn mod_precomp(k: &[super::bignum::bn_mont_ctx_u32], a: &[u32], res: &mut [u32]) { + let n: &[u32] = &(k[0usize]).n; + let mu: u32 = (k[0usize]).mu; + let r2: &[u32] = &(k[0usize]).r2; + super::bignum256_32::bn_slow_precomp(n, mu, r2, a, res) +} + +/** +Write `a ^ b mod n` in `res`. + + The arguments a and the outparam res are meant to be 256-bit bignums, i.e. uint32_t[8]. + The argument k is a montgomery context obtained through Hacl_Bignum256_mont_ctx_init. + + The argument b is a bignum of any size, and bBits is an upper bound on the + number of significant bits of b. A tighter bound results in faster execution + time. When in doubt, the number of bits for the bignum size is always a safe + default, e.g. if b is a 256-bit bignum, bBits should be 256. + + The function is *NOT* constant-time on the argument b. See the + mod_exp_consttime_* functions for constant-time variants. + + Before calling this function, the caller will need to ensure that the following + preconditions are observed. + ā€¢ b < pow2 bBits + ā€¢ a < n +*/ +pub fn mod_exp_vartime_precomp( + k: &[super::bignum::bn_mont_ctx_u32], + a: &[u32], + bBits: u32, + b: &[u32], + res: &mut [u32], +) { + let n: &[u32] = &(k[0usize]).n; + let mu: u32 = (k[0usize]).mu; + let r2: &[u32] = &(k[0usize]).r2; + super::bignum256_32::exp_vartime_precomp(n, mu, r2, a, bBits, b, res) +} + +/** +Write `a ^ b mod n` in `res`. + + The arguments a and the outparam res are meant to be 256-bit bignums, i.e. uint32_t[8]. + The argument k is a montgomery context obtained through Hacl_Bignum256_mont_ctx_init. + + The argument b is a bignum of any size, and bBits is an upper bound on the + number of significant bits of b. A tighter bound results in faster execution + time. When in doubt, the number of bits for the bignum size is always a safe + default, e.g. if b is a 256-bit bignum, bBits should be 256. + + This function is constant-time over its argument b, at the cost of a slower + execution time than mod_exp_vartime_*. + + Before calling this function, the caller will need to ensure that the following + preconditions are observed. + ā€¢ b < pow2 bBits + ā€¢ a < n +*/ +pub fn mod_exp_consttime_precomp( + k: &[super::bignum::bn_mont_ctx_u32], + a: &[u32], + bBits: u32, + b: &[u32], + res: &mut [u32], +) { + let n: &[u32] = &(k[0usize]).n; + let mu: u32 = (k[0usize]).mu; + let r2: &[u32] = &(k[0usize]).r2; + super::bignum256_32::exp_consttime_precomp(n, mu, r2, a, bBits, b, res) +} + +/** +Write `a ^ (-1) mod n` in `res`. + + The argument a and the outparam res are meant to be 256-bit bignums, i.e. uint32_t[8]. + The argument k is a montgomery context obtained through Hacl_Bignum256_mont_ctx_init. + + Before calling this function, the caller will need to ensure that the following + preconditions are observed. + ā€¢ n is a prime + ā€¢ 0 < a + ā€¢ a < n +*/ +pub fn mod_inv_prime_vartime_precomp( + k: &[super::bignum::bn_mont_ctx_u32], + a: &[u32], + res: &mut [u32], +) { + let n: &[u32] = &(k[0usize]).n; + let mu: u32 = (k[0usize]).mu; + let r2: &[u32] = &(k[0usize]).r2; + let mut n2: [u32; 8] = [0u32; 8usize]; + let c0: u32 = + lib::inttypes_intrinsics::sub_borrow_u32(0u32, n[0usize], 2u32, &mut (&mut n2)[0usize..]); + let a1: (&[u32], &[u32]) = n.split_at(1usize); + let res1: (&mut [u32], &mut [u32]) = n2.split_at_mut(1usize); + let mut c: [u32; 1] = [c0; 1usize]; + { + let t1: u32 = a1.1[4u32.wrapping_mul(0u32) as usize]; + let res_i: (&mut [u32], &mut [u32]) = res1.1.split_at_mut(4u32.wrapping_mul(0u32) as usize); + (&mut c)[0usize] = + lib::inttypes_intrinsics::sub_borrow_u32((&c)[0usize], t1, 0u32, res_i.1); + let t10: u32 = a1.1[4u32.wrapping_mul(0u32).wrapping_add(1u32) as usize]; + let res_i0: (&mut [u32], &mut [u32]) = res_i.1.split_at_mut(1usize); + (&mut c)[0usize] = + lib::inttypes_intrinsics::sub_borrow_u32((&c)[0usize], t10, 0u32, res_i0.1); + let t11: u32 = a1.1[4u32.wrapping_mul(0u32).wrapping_add(2u32) as usize]; + let res_i1: (&mut [u32], &mut [u32]) = res_i0.1.split_at_mut(1usize); + (&mut c)[0usize] = + lib::inttypes_intrinsics::sub_borrow_u32((&c)[0usize], t11, 0u32, res_i1.1); + let t12: u32 = a1.1[4u32.wrapping_mul(0u32).wrapping_add(3u32) as usize]; + let res_i2: (&mut [u32], &mut [u32]) = res_i1.1.split_at_mut(1usize); + (&mut c)[0usize] = + lib::inttypes_intrinsics::sub_borrow_u32((&c)[0usize], t12, 0u32, res_i2.1) + }; + krml::unroll_for!(3, "i", 4u32, 1u32, { + let t1: u32 = a1.1[i as usize]; + let res_i: (&mut [u32], &mut [u32]) = res1.1.split_at_mut(i as usize); + (&mut c)[0usize] = lib::inttypes_intrinsics::sub_borrow_u32((&c)[0usize], t1, 0u32, res_i.1) + }); + let c1: u32 = (&c)[0usize]; + let c2: u32 = c1; + lowstar::ignore::ignore::(c2); + super::bignum256_32::exp_vartime_precomp(n, mu, r2, a, 256u32, &n2, res) +} + +/** +Load a bid-endian bignum from memory. + + The argument b points to len bytes of valid memory. + The function returns a heap-allocated bignum of size sufficient to hold the + result of loading b, or NULL if either the allocation failed, or the amount of + required memory would exceed 4GB. + + If the return value is non-null, clients must eventually call free(3) on it to + avoid memory leaks. +*/ +pub fn new_bn_from_bytes_be(len: u32, b: &[u8]) -> Box<[u32]> { + if len == 0u32 || len.wrapping_sub(1u32).wrapping_div(4u32).wrapping_add(1u32) > 1073741823u32 { + [].into() + } else { + let mut res: Box<[u32]> = + vec![0u32; len.wrapping_sub(1u32).wrapping_div(4u32).wrapping_add(1u32) as usize] + .into_boxed_slice(); + if false { + res + } else { + let res1: &mut [u32] = &mut res; + let res2: &mut [u32] = res1; + let bnLen: u32 = len.wrapping_sub(1u32).wrapping_div(4u32).wrapping_add(1u32); + let tmpLen: u32 = 4u32.wrapping_mul(bnLen); + let mut tmp: Box<[u8]> = vec![0u8; tmpLen as usize].into_boxed_slice(); + ((&mut tmp)[tmpLen.wrapping_sub(len) as usize + ..tmpLen.wrapping_sub(len) as usize + len as usize]) + .copy_from_slice(&b[0usize..len as usize]); + for i in 0u32..bnLen { + let u: u32 = lowstar::endianness::load32_be( + &(&tmp)[bnLen.wrapping_sub(i).wrapping_sub(1u32).wrapping_mul(4u32) as usize..], + ); + let x: u32 = u; + let os: (&mut [u32], &mut [u32]) = res2.split_at_mut(0usize); + os.1[i as usize] = x + } + (*res2).into() + } + } +} + +/** +Load a little-endian bignum from memory. + + The argument b points to len bytes of valid memory. + The function returns a heap-allocated bignum of size sufficient to hold the + result of loading b, or NULL if either the allocation failed, or the amount of + required memory would exceed 4GB. + + If the return value is non-null, clients must eventually call free(3) on it to + avoid memory leaks. +*/ +pub fn new_bn_from_bytes_le(len: u32, b: &[u8]) -> Box<[u32]> { + if len == 0u32 || len.wrapping_sub(1u32).wrapping_div(4u32).wrapping_add(1u32) > 1073741823u32 { + [].into() + } else { + let mut res: Box<[u32]> = + vec![0u32; len.wrapping_sub(1u32).wrapping_div(4u32).wrapping_add(1u32) as usize] + .into_boxed_slice(); + if false { + res + } else { + let res1: &mut [u32] = &mut res; + let res2: &mut [u32] = res1; + let bnLen: u32 = len.wrapping_sub(1u32).wrapping_div(4u32).wrapping_add(1u32); + let tmpLen: u32 = 4u32.wrapping_mul(bnLen); + let mut tmp: Box<[u8]> = vec![0u8; tmpLen as usize].into_boxed_slice(); + ((&mut tmp)[0usize..len as usize]).copy_from_slice(&b[0usize..len as usize]); + for i in 0u32..len.wrapping_sub(1u32).wrapping_div(4u32).wrapping_add(1u32) { + let bj: (&[u8], &[u8]) = tmp.split_at(i.wrapping_mul(4u32) as usize); + let u: u32 = lowstar::endianness::load32_le(bj.1); + let r1: u32 = u; + let x: u32 = r1; + let os: (&mut [u32], &mut [u32]) = res2.split_at_mut(0usize); + os.1[i as usize] = x + } + (*res2).into() + } + } +} + +/** +Serialize a bignum into big-endian memory. + + The argument b points to a 256-bit bignum. + The outparam res points to 32 bytes of valid memory. +*/ +pub fn bn_to_bytes_be(b: &[u32], res: &mut [u8]) { + let tmp: [u8; 32] = [0u8; 32usize]; + lowstar::ignore::ignore::<&[u8]>(&tmp); + krml::unroll_for!( + 8, + "i", + 0u32, + 1u32, + lowstar::endianness::store32_be( + &mut res[i.wrapping_mul(4u32) as usize..], + b[8u32.wrapping_sub(i).wrapping_sub(1u32) as usize] + ) + ) +} + +/** +Serialize a bignum into little-endian memory. + + The argument b points to a 256-bit bignum. + The outparam res points to 32 bytes of valid memory. +*/ +pub fn bn_to_bytes_le(b: &[u32], res: &mut [u8]) { + let tmp: [u8; 32] = [0u8; 32usize]; + lowstar::ignore::ignore::<&[u8]>(&tmp); + krml::unroll_for!( + 8, + "i", + 0u32, + 1u32, + lowstar::endianness::store32_le(&mut res[i.wrapping_mul(4u32) as usize..], b[i as usize]) + ) +} + +/** +Returns 2^32 - 1 if a < b, otherwise returns 0. + + The arguments a and b are meant to be 256-bit bignums, i.e. uint32_t[8]. +*/ +pub fn lt_mask(a: &[u32], b: &[u32]) -> u32 { + let mut acc: [u32; 1] = [0u32; 1usize]; + krml::unroll_for!(8, "i", 0u32, 1u32, { + let beq: u32 = fstar::uint32::eq_mask(a[i as usize], b[i as usize]); + let blt: u32 = !fstar::uint32::gte_mask(a[i as usize], b[i as usize]); + (&mut acc)[0usize] = beq & (&acc)[0usize] | !beq & blt + }); + (&acc)[0usize] +} + +/** +Returns 2^32 - 1 if a = b, otherwise returns 0. + + The arguments a and b are meant to be 256-bit bignums, i.e. uint32_t[8]. +*/ +pub fn eq_mask(a: &[u32], b: &[u32]) -> u32 { + let mut mask: [u32; 1] = [0xFFFFFFFFu32; 1usize]; + krml::unroll_for!(8, "i", 0u32, 1u32, { + let uu____0: u32 = fstar::uint32::eq_mask(a[i as usize], b[i as usize]); + (&mut mask)[0usize] = uu____0 & (&mask)[0usize] + }); + let mask1: u32 = (&mask)[0usize]; + mask1 +} diff --git a/libcrux-hacl-rs/src/bignum/bignum32.rs b/libcrux-hacl-rs/src/bignum/bignum32.rs new file mode 100644 index 000000000..39abec2ef --- /dev/null +++ b/libcrux-hacl-rs/src/bignum/bignum32.rs @@ -0,0 +1,753 @@ +#![allow(non_snake_case)] +#![allow(non_upper_case_globals)] +#![allow(non_camel_case_types)] +#![allow(unused_assignments)] +#![allow(unreachable_patterns)] + +use crate::fstar; +use crate::lowstar; +use crate::util as lib; + +pub type pbn_mont_ctx_u32<'a> = &'a [super::bignum::bn_mont_ctx_u32]; + +/** +Write `a + b mod 2 ^ (32 * len)` in `res`. + + This function returns the carry. + + @param[in] len Number of limbs. + @param[in] a Points to `len` number of limbs, i.e. `uint32_t[len]`. Must not + partially overlap the memory locations of `b` or `res`. May have exactly equal memory + location to `b` or `res`. + @param[in] b Points to `len` number of limbs, i.e. `uint32_t[len]`. Must not + partially overlap the memory locations of `a` or `res`. May have exactly + equal memory location to `a` or `res`. + @param[out] res Points to `len` number of limbs where the carry is written, i.e. `uint32_t[len]`. + Must not partially overlap the memory locations of `a` or `b`. May have + exactly equal memory location to `a` or `b`. +*/ +pub fn add(len: u32, a: &[u32], b: &[u32], res: &mut [u32]) -> u32 { + super::bignum_base::bn_add_eq_len_u32(len, a, b, res) +} + +/** +Write `a - b mod 2 ^ (32 * len)` in `res`. + + This functions returns the carry. + + @param[in] len Number of limbs. + @param[in] a Points to `len` number of limbs, i.e. `uint32_t[len]`. Must not + partially overlap the memory locations of `b` or `res`. May have exactly + equal memory location to `b` or `res`. + @param[in] b Points to `len` number of limbs, i.e. `uint32_t[len]`. Must not + partially overlap the memory locations of `a` or `res`. May have exactly + equal memory location to `a` or `res`. + @param[out] res Points to `len` number of limbs where the carry is written, i.e. `uint32_t[len]`. + Must not partially overlap the memory locations of `a` or `b`. May have + exactly equal memory location to `a` or `b`. +*/ +pub fn sub(len: u32, a: &[u32], b: &[u32], res: &mut [u32]) -> u32 { + super::bignum_base::bn_sub_eq_len_u32(len, a, b, res) +} + +/** +Write `(a + b) mod n` in `res`. + + @param[in] len Number of limbs. + @param[in] a Points to `len` number of limbs, i.e. `uint32_t[len]`. Must not + partially overlap the memory locations of `b` or `res`. May have exactly + equal memory location to `b` or `res`. + @param[in] b Points to `len` number of limbs, i.e. `uint32_t[len]`. Must not + partially overlap the memory locations of `a` or `res`. May have exactly + equal memory location to `a` or `res`. + @param[in] n Points to `len` number of limbs, i.e. `uint32_t[len]`. Must be + disjoint from the memory locations of `a`, `b`, and `res`. + @param[out] res Points to `len` number of limbs where the result is written, i.e. `uint32_t[len]`. + Must not partially overlap the memory locations of `a` or `b`. May have + exactly equal memory location to `a` or `b`. + + @pre Before calling this function, the caller will need to ensure that the following + preconditions are observed: + - `a < n` + - `b < n` +*/ +pub fn add_mod(len: u32, n: &[u32], a: &[u32], b: &[u32], res: &mut [u32]) { + let mut a_copy: Box<[u32]> = vec![0u32; len as usize].into_boxed_slice(); + let mut b_copy: Box<[u32]> = vec![0u32; len as usize].into_boxed_slice(); + ((&mut a_copy)[0usize..len as usize]).copy_from_slice(&a[0usize..len as usize]); + ((&mut b_copy)[0usize..len as usize]).copy_from_slice(&b[0usize..len as usize]); + super::bignum::bn_add_mod_n_u32(len, n, &a_copy, &b_copy, res) +} + +/** +Write `(a - b) mod n` in `res`. + + @param[in] len Number of limbs. + @param[in] a Points to `len` number of limbs, i.e. `uint32_t[len]`. Must not + partially overlap the memory locations of `b` or `res`. May have exactly + equal memory location to `b` or `res`. + @param[in] b Points to `len` number of limbs, i.e. `uint32_t[len]`. Must not + partially overlap the memory locations of `a` or `res`. May have exactly + equal memory location to `a` or `res`. + @param[in] n Points to `len` number of limbs, i.e. `uint32_t[len]`. Must be + disjoint from the memory locations of `a`, `b`, and `res`. + @param[out] res Points to `len` number of limbs where the result is written, i.e. `uint32_t[len]`. + Must not partially overlap the memory locations of `a` or `b`. May have + exactly equal memory location to `a` or `b`. + + @pre Before calling this function, the caller will need to ensure that the following + preconditions are observed: + - `a < n` + - `b < n` +*/ +pub fn sub_mod(len: u32, n: &[u32], a: &[u32], b: &[u32], res: &mut [u32]) { + super::bignum::bn_sub_mod_n_u32(len, n, a, b, res) +} + +/** +Write `a * b` in `res`. + + @param[in] len Number of limbs. + @param[in] a Points to `len` number of limbs, i.e. `uint32_t[len]`. Must be + disjoint from the memory location of `b` and `res`. + @param[in] b Points to `len` number of limbs, i.e. `uint32_t[len]`. Must be + disjoint from the memory location of `a` and `res`. + @param[out] res Points to `2*len` number of limbs where the result is written, i.e. `uint32_t[2*len]`. + Must be disjoint from the memory locations of `a` and `b`. +*/ +pub fn mul(len: u32, a: &[u32], b: &[u32], res: &mut [u32]) { + let mut tmp: Box<[u32]> = vec![0u32; 4u32.wrapping_mul(len) as usize].into_boxed_slice(); + super::bignum::bn_karatsuba_mul_uint32(len, a, b, &mut tmp, res) +} + +/** +Write `a * a` in `res`. + + @param[in] a Points to `len` number of limbs, i.e. `uint32_t[len]`. Must be + disjoint from the memory location of `res`. + @param[out] res Points to `2*len` number of limbs where the result is written, i.e. `uint32_t[2*len]`. + Must be disjoint from the memory location of `a`. +*/ +pub fn sqr(len: u32, a: &[u32], res: &mut [u32]) { + let mut tmp: Box<[u32]> = vec![0u32; 4u32.wrapping_mul(len) as usize].into_boxed_slice(); + super::bignum::bn_karatsuba_sqr_uint32(len, a, &mut tmp, res) +} + +#[inline] +fn bn_slow_precomp(len: u32, n: &[u32], mu: u32, r2: &[u32], a: &[u32], res: &mut [u32]) { + let mut a_mod: Box<[u32]> = vec![0u32; len as usize].into_boxed_slice(); + let mut a1: Box<[u32]> = vec![0u32; len.wrapping_add(len) as usize].into_boxed_slice(); + ((&mut a1)[0usize..len.wrapping_add(len) as usize]) + .copy_from_slice(&a[0usize..len.wrapping_add(len) as usize]); + super::bignum::bn_almost_mont_reduction_u32(len, n, mu, &mut a1, &mut a_mod); + super::bignum::bn_to_mont_u32(len, n, mu, r2, &a_mod, res) +} + +/** +Write `a mod n` in `res`. + + @param[in] a Points to `2*len` number of limbs, i.e. `uint32_t[2*len]`. Must be + disjoint from the memory location of `res`. + @param[in] n Points to `len` number of limbs, i.e. `uint32_t[len]`. Must be + disjoint from the memory location of `res`. + @param[out] res Points to `len` number of limbs, i.e. `uint32_t[len]`. Must be + disjoint from the memory locations of `a` and `n`. + + @return `false` if any precondition is violated, `true` otherwise. + + @pre Before calling this function, the caller will need to ensure that the following + preconditions are observed: + - `1 < n` + - `n % 2 = 1` +*/ +pub fn r#mod(len: u32, n: &[u32], a: &[u32], res: &mut [u32]) -> bool { + let mut one: Box<[u32]> = vec![0u32; len as usize].into_boxed_slice(); + ((&mut one)[0usize..len as usize]) + .copy_from_slice(&vec![0u32; len as usize].into_boxed_slice()); + (&mut one)[0usize] = 1u32; + let bit0: u32 = n[0usize] & 1u32; + let m0: u32 = 0u32.wrapping_sub(bit0); + let mut acc: [u32; 1] = [0u32; 1usize]; + for i in 0u32..len { + let beq: u32 = fstar::uint32::eq_mask((&one)[i as usize], n[i as usize]); + let blt: u32 = !fstar::uint32::gte_mask((&one)[i as usize], n[i as usize]); + (&mut acc)[0usize] = beq & (&acc)[0usize] | !beq & blt + } + let m1: u32 = (&acc)[0usize]; + let is_valid_m: u32 = m0 & m1; + let nBits: u32 = 32u32.wrapping_mul(super::bignum_base::bn_get_top_index_u32(len, n)); + if is_valid_m == 0xFFFFFFFFu32 { + let mut r2: Box<[u32]> = vec![0u32; len as usize].into_boxed_slice(); + super::bignum::bn_precomp_r2_mod_n_u32(len, nBits, n, &mut r2); + let mu: u32 = super::bignum::mod_inv_uint32(n[0usize]); + super::bignum32::bn_slow_precomp(len, n, mu, &r2, a, res) + } else { + (res[0usize..len as usize]).copy_from_slice(&vec![0u32; len as usize].into_boxed_slice()) + }; + is_valid_m == 0xFFFFFFFFu32 +} + +/** +Write `a ^ b mod n` in `res`. + + This function is *NOT* constant-time on the argument `b`. See the + `mod_exp_consttime_*` functions for constant-time variants. + + @param[in] a Points to `len` number of limbs, i.e. `uint32_t[len]`. Must be + disjoint from the memory locations of `n` and `res`. + @param[in] n Points to `len` number of limbs, i.e. `uint32_t[len]`. Must be + disjoint from the memory locations of `a` and `res`. + @param[in] b Points to a bignum of any size, with an upper bound of `bBits` number of + significant bits. Must be disjoint from the memory location of `res`. + @param[in] bBits An upper bound on the number of significant bits of `b`. + A tighter bound results in faster execution time. When in doubt, the number + of bits for the bignum size is always a safe default, e.g. if `b` is a 4096-bit + bignum, `bBits` should be `4096`. + @param[out] res Points to `len` number of limbs, i.e. `uint32_t[len]`. Must be + disjoint from the memory locations of `a`, `b`, and `n`. + + @return `false` if any preconditions are violated, `true` otherwise. + + @pre Before calling this function, the caller will need to ensure that the following + preconditions are observed: + - `n % 2 = 1` + - `1 < n` + - `b < pow2 bBits` + - `a < n` +*/ +pub fn mod_exp_vartime( + len: u32, + n: &[u32], + a: &[u32], + bBits: u32, + b: &[u32], + res: &mut [u32], +) -> bool { + let is_valid_m: u32 = super::bignum::bn_check_mod_exp_u32(len, n, a, bBits, b); + let nBits: u32 = 32u32.wrapping_mul(super::bignum_base::bn_get_top_index_u32(len, n)); + if is_valid_m == 0xFFFFFFFFu32 { + super::bignum::bn_mod_exp_vartime_u32(len, nBits, n, a, bBits, b, res) + } else { + (res[0usize..len as usize]).copy_from_slice(&vec![0u32; len as usize].into_boxed_slice()) + }; + is_valid_m == 0xFFFFFFFFu32 +} + +/** +Write `a ^ b mod n` in `res`. + + This function is constant-time over its argument `b`, at the cost of a slower + execution time than `mod_exp_vartime_*`. + + @param[in] a Points to `len` number of limbs, i.e. `uint32_t[len]`. Must be + disjoint from the memory locations of `n` and `res`. + @param[in] n Points to `len` number of limbs, i.e. `uint32_t[len]`. Must be + disjoint from the memory locations of `a` and `res`. + @param[in] b Points to a bignum of any size, with an upper bound of `bBits` number of + significant bits. Must be disjoint from the memory location of `res`. + @param[in] bBits An upper bound on the number of significant bits of `b`. + A tighter bound results in faster execution time. When in doubt, the number + of bits for the bignum size is always a safe default, e.g. if `b` is a 4096-bit + bignum, `bBits` should be `4096`. + @param[out] res Points to `len` number of limbs, i.e. `uint32_t[len]`. Must be + disjoint from the memory locations of `a`, `b`, and `n`. + + @return `false` if any preconditions are violated, `true` otherwise. + + @pre Before calling this function, the caller will need to ensure that the following + preconditions are observed: + - `n % 2 = 1` + - `1 < n` + - `b < pow2 bBits` + - `a < n` +*/ +pub fn mod_exp_consttime( + len: u32, + n: &[u32], + a: &[u32], + bBits: u32, + b: &[u32], + res: &mut [u32], +) -> bool { + let is_valid_m: u32 = super::bignum::bn_check_mod_exp_u32(len, n, a, bBits, b); + let nBits: u32 = 32u32.wrapping_mul(super::bignum_base::bn_get_top_index_u32(len, n)); + if is_valid_m == 0xFFFFFFFFu32 { + super::bignum::bn_mod_exp_consttime_u32(len, nBits, n, a, bBits, b, res) + } else { + (res[0usize..len as usize]).copy_from_slice(&vec![0u32; len as usize].into_boxed_slice()) + }; + is_valid_m == 0xFFFFFFFFu32 +} + +/** +Write `a ^ (-1) mod n` in `res`. + + @param[in] a Points to `len` number of limbs, i.e. `uint32_t[len]`. Must be + disjoint from the memory locations of `n` and `res`. + @param[in] n Points to `len` number of limbs, i.e. `uint32_t[len]`. Must be + disjoint from the memory locations of `a` and `res`. + @param[out] res Points to `len` number of limbs, i.e. `uint32_t[len]`. Must be + disjoint from the memory location of `a` and `n`. + + @return `false` if any preconditions (except the precondition: `n` is a prime) + are violated, `true` otherwise. + + @pre Before calling this function, the caller will need to ensure that the following + preconditions are observed: + - `n` is a prime + - `n % 2 = 1` + - `1 < n` + - `0 < a` + - `a < n` +*/ +pub fn mod_inv_prime_vartime(len: u32, n: &[u32], a: &[u32], res: &mut [u32]) -> bool { + let mut one: Box<[u32]> = vec![0u32; len as usize].into_boxed_slice(); + ((&mut one)[0usize..len as usize]) + .copy_from_slice(&vec![0u32; len as usize].into_boxed_slice()); + (&mut one)[0usize] = 1u32; + let bit0: u32 = n[0usize] & 1u32; + let m0: u32 = 0u32.wrapping_sub(bit0); + let mut acc: [u32; 1] = [0u32; 1usize]; + for i in 0u32..len { + let beq: u32 = fstar::uint32::eq_mask((&one)[i as usize], n[i as usize]); + let blt: u32 = !fstar::uint32::gte_mask((&one)[i as usize], n[i as usize]); + (&mut acc)[0usize] = beq & (&acc)[0usize] | !beq & blt + } + let m1: u32 = (&acc)[0usize]; + let m00: u32 = m0 & m1; + let bn_zero: Box<[u32]> = vec![0u32; len as usize].into_boxed_slice(); + let mut mask: [u32; 1] = [0xFFFFFFFFu32; 1usize]; + for i in 0u32..len { + let uu____0: u32 = fstar::uint32::eq_mask(a[i as usize], (&bn_zero)[i as usize]); + (&mut mask)[0usize] = uu____0 & (&mask)[0usize] + } + let mask1: u32 = (&mask)[0usize]; + let res1: u32 = mask1; + let m10: u32 = res1; + let mut acc0: [u32; 1] = [0u32; 1usize]; + for i in 0u32..len { + let beq: u32 = fstar::uint32::eq_mask(a[i as usize], n[i as usize]); + let blt: u32 = !fstar::uint32::gte_mask(a[i as usize], n[i as usize]); + (&mut acc0)[0usize] = beq & (&acc0)[0usize] | !beq & blt + } + let m2: u32 = (&acc0)[0usize]; + let is_valid_m: u32 = m00 & !m10 & m2; + let nBits: u32 = 32u32.wrapping_mul(super::bignum_base::bn_get_top_index_u32(len, n)); + if is_valid_m == 0xFFFFFFFFu32 { + let mut n2: Box<[u32]> = vec![0u32; len as usize].into_boxed_slice(); + let c0: u32 = lib::inttypes_intrinsics::sub_borrow_u32( + 0u32, + n[0usize], + 2u32, + &mut (&mut n2)[0usize..], + ); + let c: u32 = if 1u32 < len { + let a1: (&[u32], &[u32]) = n.split_at(1usize); + let res10: (&mut [u32], &mut [u32]) = n2.split_at_mut(1usize); + let mut c: [u32; 1] = [c0; 1usize]; + for i in 0u32..len.wrapping_sub(1u32).wrapping_div(4u32) { + let t1: u32 = a1.1[4u32.wrapping_mul(i) as usize]; + let res_i: (&mut [u32], &mut [u32]) = + res10.1.split_at_mut(4u32.wrapping_mul(i) as usize); + (&mut c)[0usize] = + lib::inttypes_intrinsics::sub_borrow_u32((&c)[0usize], t1, 0u32, res_i.1); + let t10: u32 = a1.1[4u32.wrapping_mul(i).wrapping_add(1u32) as usize]; + let res_i0: (&mut [u32], &mut [u32]) = res_i.1.split_at_mut(1usize); + (&mut c)[0usize] = + lib::inttypes_intrinsics::sub_borrow_u32((&c)[0usize], t10, 0u32, res_i0.1); + let t11: u32 = a1.1[4u32.wrapping_mul(i).wrapping_add(2u32) as usize]; + let res_i1: (&mut [u32], &mut [u32]) = res_i0.1.split_at_mut(1usize); + (&mut c)[0usize] = + lib::inttypes_intrinsics::sub_borrow_u32((&c)[0usize], t11, 0u32, res_i1.1); + let t12: u32 = a1.1[4u32.wrapping_mul(i).wrapping_add(3u32) as usize]; + let res_i2: (&mut [u32], &mut [u32]) = res_i1.1.split_at_mut(1usize); + (&mut c)[0usize] = + lib::inttypes_intrinsics::sub_borrow_u32((&c)[0usize], t12, 0u32, res_i2.1) + } + for i in + len.wrapping_sub(1u32).wrapping_div(4u32).wrapping_mul(4u32)..len.wrapping_sub(1u32) + { + let t1: u32 = a1.1[i as usize]; + let res_i: (&mut [u32], &mut [u32]) = res10.1.split_at_mut(i as usize); + (&mut c)[0usize] = + lib::inttypes_intrinsics::sub_borrow_u32((&c)[0usize], t1, 0u32, res_i.1) + } + let c1: u32 = (&c)[0usize]; + c1 + } else { + c0 + }; + lowstar::ignore::ignore::(c); + super::bignum::bn_mod_exp_vartime_u32(len, nBits, n, a, 32u32.wrapping_mul(len), &n2, res) + } else { + (res[0usize..len as usize]).copy_from_slice(&vec![0u32; len as usize].into_boxed_slice()) + }; + is_valid_m == 0xFFFFFFFFu32 +} + +/** +Heap-allocate and initialize a montgomery context. + + @param n Points to `len` number of limbs, i.e. `uint32_t[len]`. + + @return A pointer to an allocated and initialized Montgomery context is returned. + Clients will need to call `Hacl_Bignum32_mont_ctx_free` on the return value to + avoid memory leaks. + + @pre Before calling this function, the caller will need to ensure that the following + preconditions are observed: + - `n % 2 = 1` + - `1 < n` +*/ +pub fn mont_ctx_init(len: u32, n: &[u32]) -> Box<[super::bignum::bn_mont_ctx_u32]> { + let mut r2: Box<[u32]> = vec![0u32; len as usize].into_boxed_slice(); + let mut n1: Box<[u32]> = vec![0u32; len as usize].into_boxed_slice(); + let r21: &mut [u32] = &mut r2; + let n11: &mut [u32] = &mut n1; + (n11[0usize..len as usize]).copy_from_slice(&n[0usize..len as usize]); + let nBits: u32 = 32u32.wrapping_mul(super::bignum_base::bn_get_top_index_u32(len, n)); + super::bignum::bn_precomp_r2_mod_n_u32(len, nBits, n, r21); + let mu: u32 = super::bignum::mod_inv_uint32(n[0usize]); + let res: super::bignum::bn_mont_ctx_u32 = super::bignum::bn_mont_ctx_u32 { + len, + n: (*n11).into(), + mu, + r2: (*r21).into(), + }; + let buf: Box<[super::bignum::bn_mont_ctx_u32]> = vec![res].into_boxed_slice(); + buf +} + +/** +Write `a mod n` in `res`. + + @param[in] k Points to a Montgomery context obtained from `Hacl_Bignum32_mont_ctx_init`. + @param[in] a Points to `2*len` number of limbs, i.e. `uint32_t[2*len]`. Must be + disjoint from the memory location of `res`. + @param[out] res Points to `len` number of limbs, i.e. `uint32_t[len]`. Must be + disjoint from the memory location of `a`. +*/ +pub fn mod_precomp(k: &[super::bignum::bn_mont_ctx_u32], a: &[u32], res: &mut [u32]) { + let len1: u32 = (k[0usize]).len; + let n: &[u32] = &(k[0usize]).n; + let mu: u32 = (k[0usize]).mu; + let r2: &[u32] = &(k[0usize]).r2; + super::bignum32::bn_slow_precomp(len1, n, mu, r2, a, res) +} + +/** +Write `a ^ b mod n` in `res`. + + This function is *NOT* constant-time on the argument `b`. See the + `mod_exp_consttime_*` functions for constant-time variants. + + @param[in] k Points to a Montgomery context obtained from `Hacl_Bignum32_mont_ctx_init`. + @param[in] a Points to `len` number of limbs, i.e. `uint32_t[len]`. Must be + disjoint from the memory location of `res`. + @param[in] b Points to a bignum of any size, with an upper bound of `bBits` number of + significant bits. Must be disjoint from the memory location of `res`. + @param[in] bBits An upper bound on the number of significant bits of `b`. + A tighter bound results in faster execution time. When in doubt, the number + of bits for the bignum size is always a safe default, e.g. if `b` is a 4096-bit + bignum, `bBits` should be `4096`. + @param[out] res Points to `len` number of limbs, i.e. `uint32_t[len]`. Must be + disjoint from the memory locations of `a` and `b`. + + @pre Before calling this function, the caller will need to ensure that the following + preconditions are observed: + - `b < pow2 bBits` + - `a < n` +*/ +pub fn mod_exp_vartime_precomp( + k: &[super::bignum::bn_mont_ctx_u32], + a: &[u32], + bBits: u32, + b: &[u32], + res: &mut [u32], +) { + let len1: u32 = (k[0usize]).len; + let n: &[u32] = &(k[0usize]).n; + let mu: u32 = (k[0usize]).mu; + let r2: &[u32] = &(k[0usize]).r2; + super::bignum::bn_mod_exp_vartime_precomp_u32(len1, n, mu, r2, a, bBits, b, res) +} + +/** +Write `a ^ b mod n` in `res`. + + This function is constant-time over its argument b, at the cost of a slower + execution time than `mod_exp_vartime_*`. + + @param[in] k Points to a Montgomery context obtained from `Hacl_Bignum32_mont_ctx_init`. + @param[in] a Points to `len` number of limbs, i.e. `uint32_t[len]`. Must be + disjoint from the memory location of `res`. + @param[in] b Points to a bignum of any size, with an upper bound of `bBits` number of + significant bits. Must be disjoint from the memory location of `res`. + @param[in] bBits An upper bound on the number of significant bits of `b`. + A tighter bound results in faster execution time. When in doubt, the number + of bits for the bignum size is always a safe default, e.g. if `b` is a 4096-bit + bignum, `bBits` should be `4096`. + @param[out] res Points to `len` number of limbs, i.e. `uint32_t[len]`. Must be + disjoint from the memory locations of `a` and `b`. + + @pre Before calling this function, the caller will need to ensure that the following + preconditions are observed: + - `b < pow2 bBits` + - `a < n` +*/ +pub fn mod_exp_consttime_precomp( + k: &[super::bignum::bn_mont_ctx_u32], + a: &[u32], + bBits: u32, + b: &[u32], + res: &mut [u32], +) { + let len1: u32 = (k[0usize]).len; + let n: &[u32] = &(k[0usize]).n; + let mu: u32 = (k[0usize]).mu; + let r2: &[u32] = &(k[0usize]).r2; + super::bignum::bn_mod_exp_consttime_precomp_u32(len1, n, mu, r2, a, bBits, b, res) +} + +/** +Write `a ^ (-1) mod n` in `res`. + + @param[in] k Points to a Montgomery context obtained through `Hacl_Bignum32_mont_ctx_init`. + @param[in] a Points to `len` number of limbs, i.e. `uint32_t[len]`. Must be + disjoint from the memory location of `res`. + @param[out] res Points to `len` number of limbs, i.e. `uint32_t[len]`. Must be + disjoint from the memory location of `a`. + + @pre Before calling this function, the caller will need to ensure that the following + preconditions are observed: + - `n` is a prime + - `0 < a` + - `a < n` +*/ +pub fn mod_inv_prime_vartime_precomp( + k: &[super::bignum::bn_mont_ctx_u32], + a: &[u32], + res: &mut [u32], +) { + let len1: u32 = (k[0usize]).len; + let n: &[u32] = &(k[0usize]).n; + let mu: u32 = (k[0usize]).mu; + let r2: &[u32] = &(k[0usize]).r2; + let mut n2: Box<[u32]> = vec![0u32; len1 as usize].into_boxed_slice(); + let c0: u32 = + lib::inttypes_intrinsics::sub_borrow_u32(0u32, n[0usize], 2u32, &mut (&mut n2)[0usize..]); + let c: u32 = if 1u32 < len1 { + let a1: (&[u32], &[u32]) = n.split_at(1usize); + let res1: (&mut [u32], &mut [u32]) = n2.split_at_mut(1usize); + let mut c: [u32; 1] = [c0; 1usize]; + for i in 0u32..len1.wrapping_sub(1u32).wrapping_div(4u32) { + let t1: u32 = a1.1[4u32.wrapping_mul(i) as usize]; + let res_i: (&mut [u32], &mut [u32]) = + res1.1.split_at_mut(4u32.wrapping_mul(i) as usize); + (&mut c)[0usize] = + lib::inttypes_intrinsics::sub_borrow_u32((&c)[0usize], t1, 0u32, res_i.1); + let t10: u32 = a1.1[4u32.wrapping_mul(i).wrapping_add(1u32) as usize]; + let res_i0: (&mut [u32], &mut [u32]) = res_i.1.split_at_mut(1usize); + (&mut c)[0usize] = + lib::inttypes_intrinsics::sub_borrow_u32((&c)[0usize], t10, 0u32, res_i0.1); + let t11: u32 = a1.1[4u32.wrapping_mul(i).wrapping_add(2u32) as usize]; + let res_i1: (&mut [u32], &mut [u32]) = res_i0.1.split_at_mut(1usize); + (&mut c)[0usize] = + lib::inttypes_intrinsics::sub_borrow_u32((&c)[0usize], t11, 0u32, res_i1.1); + let t12: u32 = a1.1[4u32.wrapping_mul(i).wrapping_add(3u32) as usize]; + let res_i2: (&mut [u32], &mut [u32]) = res_i1.1.split_at_mut(1usize); + (&mut c)[0usize] = + lib::inttypes_intrinsics::sub_borrow_u32((&c)[0usize], t12, 0u32, res_i2.1) + } + for i in len1 + .wrapping_sub(1u32) + .wrapping_div(4u32) + .wrapping_mul(4u32)..len1.wrapping_sub(1u32) + { + let t1: u32 = a1.1[i as usize]; + let res_i: (&mut [u32], &mut [u32]) = res1.1.split_at_mut(i as usize); + (&mut c)[0usize] = + lib::inttypes_intrinsics::sub_borrow_u32((&c)[0usize], t1, 0u32, res_i.1) + } + let c1: u32 = (&c)[0usize]; + c1 + } else { + c0 + }; + lowstar::ignore::ignore::(c); + super::bignum::bn_mod_exp_vartime_precomp_u32( + len1, + n, + mu, + r2, + a, + 32u32.wrapping_mul(len1), + &n2, + res, + ) +} + +/** +Load a bid-endian bignum from memory. + + @param len Size of `b` as number of bytes. + @param b Points to `len` number of bytes, i.e. `uint8_t[len]`. + + @return A heap-allocated bignum of size sufficient to hold the result of + loading `b`. Otherwise, `NULL`, if either the allocation failed, or the amount + of required memory would exceed 4GB. Clients must `free(3)` any non-null return + value to avoid memory leaks. +*/ +pub fn new_bn_from_bytes_be(len: u32, b: &[u8]) -> Box<[u32]> { + if len == 0u32 || len.wrapping_sub(1u32).wrapping_div(4u32).wrapping_add(1u32) > 1073741823u32 { + [].into() + } else { + let mut res: Box<[u32]> = + vec![0u32; len.wrapping_sub(1u32).wrapping_div(4u32).wrapping_add(1u32) as usize] + .into_boxed_slice(); + if false { + res + } else { + let res1: &mut [u32] = &mut res; + let res2: &mut [u32] = res1; + let bnLen: u32 = len.wrapping_sub(1u32).wrapping_div(4u32).wrapping_add(1u32); + let tmpLen: u32 = 4u32.wrapping_mul(bnLen); + let mut tmp: Box<[u8]> = vec![0u8; tmpLen as usize].into_boxed_slice(); + ((&mut tmp)[tmpLen.wrapping_sub(len) as usize + ..tmpLen.wrapping_sub(len) as usize + len as usize]) + .copy_from_slice(&b[0usize..len as usize]); + for i in 0u32..bnLen { + let u: u32 = lowstar::endianness::load32_be( + &(&tmp)[bnLen.wrapping_sub(i).wrapping_sub(1u32).wrapping_mul(4u32) as usize..], + ); + let x: u32 = u; + let os: (&mut [u32], &mut [u32]) = res2.split_at_mut(0usize); + os.1[i as usize] = x + } + (*res2).into() + } + } +} + +/** +Load a little-endian bignum from memory. + + @param len Size of `b` as number of bytes. + @param b Points to `len` number of bytes, i.e. `uint8_t[len]`. + + @return A heap-allocated bignum of size sufficient to hold the result of + loading `b`. Otherwise, `NULL`, if either the allocation failed, or the amount + of required memory would exceed 4GB. Clients must `free(3)` any non-null return + value to avoid memory leaks. +*/ +pub fn new_bn_from_bytes_le(len: u32, b: &[u8]) -> Box<[u32]> { + if len == 0u32 || len.wrapping_sub(1u32).wrapping_div(4u32).wrapping_add(1u32) > 1073741823u32 { + [].into() + } else { + let mut res: Box<[u32]> = + vec![0u32; len.wrapping_sub(1u32).wrapping_div(4u32).wrapping_add(1u32) as usize] + .into_boxed_slice(); + if false { + res + } else { + let res1: &mut [u32] = &mut res; + let res2: &mut [u32] = res1; + let bnLen: u32 = len.wrapping_sub(1u32).wrapping_div(4u32).wrapping_add(1u32); + let tmpLen: u32 = 4u32.wrapping_mul(bnLen); + let mut tmp: Box<[u8]> = vec![0u8; tmpLen as usize].into_boxed_slice(); + ((&mut tmp)[0usize..len as usize]).copy_from_slice(&b[0usize..len as usize]); + for i in 0u32..len.wrapping_sub(1u32).wrapping_div(4u32).wrapping_add(1u32) { + let bj: (&[u8], &[u8]) = tmp.split_at(i.wrapping_mul(4u32) as usize); + let u: u32 = lowstar::endianness::load32_le(bj.1); + let r1: u32 = u; + let x: u32 = r1; + let os: (&mut [u32], &mut [u32]) = res2.split_at_mut(0usize); + os.1[i as usize] = x + } + (*res2).into() + } + } +} + +/** +Serialize a bignum into big-endian memory. + + @param[in] len Size of `b` as number of bytes. + @param[in] b Points to a bignum of `ceil(len/4)` size. Must be disjoint from + the memory location of `res`. + @param[out] res Points to `len` number of bytes, i.e. `uint8_t[len]`. Must be + disjoint from the memory location of `b`. +*/ +pub fn bn_to_bytes_be(len: u32, b: &[u32], res: &mut [u8]) { + let bnLen: u32 = len.wrapping_sub(1u32).wrapping_div(4u32).wrapping_add(1u32); + let tmpLen: u32 = 4u32.wrapping_mul(bnLen); + let mut tmp: Box<[u8]> = vec![0u8; tmpLen as usize].into_boxed_slice(); + for i in 0u32..bnLen { + lowstar::endianness::store32_be( + &mut (&mut tmp)[i.wrapping_mul(4u32) as usize..], + b[bnLen.wrapping_sub(i).wrapping_sub(1u32) as usize], + ) + } + (res[0usize..len as usize]) + .copy_from_slice(&(&(&tmp)[tmpLen.wrapping_sub(len) as usize..])[0usize..len as usize]) +} + +/** +Serialize a bignum into little-endian memory. + + @param[in] len Size of `b` as number of bytes. + @param[in] b Points to a bignum of `ceil(len/4)` size. Must be disjoint from + the memory location of `res`. + @param[out] res Points to `len` number of bytes, i.e. `uint8_t[len]`. Must be + disjoint from the memory location of `b`. +*/ +pub fn bn_to_bytes_le(len: u32, b: &[u32], res: &mut [u8]) { + let bnLen: u32 = len.wrapping_sub(1u32).wrapping_div(4u32).wrapping_add(1u32); + let tmpLen: u32 = 4u32.wrapping_mul(bnLen); + let mut tmp: Box<[u8]> = vec![0u8; tmpLen as usize].into_boxed_slice(); + for i in 0u32..bnLen { + lowstar::endianness::store32_le( + &mut (&mut tmp)[i.wrapping_mul(4u32) as usize..], + b[i as usize], + ) + } + (res[0usize..len as usize]).copy_from_slice(&(&(&tmp)[0usize..])[0usize..len as usize]) +} + +/** +Returns 2^32 - 1 if a < b, otherwise returns 0. + + @param len Number of limbs. + @param a Points to `len` number of limbs, i.e. `uint32_t[len]`. + @param b Points to `len` number of limbs, i.e. `uint32_t[len]`. + + @return `2^32 - 1` if `a < b`, otherwise, `0`. +*/ +pub fn lt_mask(len: u32, a: &[u32], b: &[u32]) -> u32 { + let mut acc: [u32; 1] = [0u32; 1usize]; + for i in 0u32..len { + let beq: u32 = fstar::uint32::eq_mask(a[i as usize], b[i as usize]); + let blt: u32 = !fstar::uint32::gte_mask(a[i as usize], b[i as usize]); + (&mut acc)[0usize] = beq & (&acc)[0usize] | !beq & blt + } + (&acc)[0usize] +} + +/** +Returns 2^32 - 1 if a = b, otherwise returns 0. + + @param len Number of limbs. + @param a Points to `len` number of limbs, i.e. `uint32_t[len]`. + @param b Points to `len` number of limbs, i.e. `uint32_t[len]`. + + @return `2^32 - 1` if a = b, otherwise, `0`. +*/ +pub fn eq_mask(len: u32, a: &[u32], b: &[u32]) -> u32 { + let mut mask: [u32; 1] = [0xFFFFFFFFu32; 1usize]; + for i in 0u32..len { + let uu____0: u32 = fstar::uint32::eq_mask(a[i as usize], b[i as usize]); + (&mut mask)[0usize] = uu____0 & (&mask)[0usize] + } + let mask1: u32 = (&mask)[0usize]; + mask1 +} diff --git a/libcrux-hacl-rs/src/bignum/bignum4096.rs b/libcrux-hacl-rs/src/bignum/bignum4096.rs new file mode 100644 index 000000000..5a63cf32d --- /dev/null +++ b/libcrux-hacl-rs/src/bignum/bignum4096.rs @@ -0,0 +1,1245 @@ +#![allow(non_snake_case)] +#![allow(non_upper_case_globals)] +#![allow(non_camel_case_types)] +#![allow(unused_assignments)] +#![allow(unreachable_patterns)] + +use crate::fstar; +use crate::lowstar; +use crate::util as lib; + +/** +Write `a + b mod 2^4096` in `res`. + + This functions returns the carry. + + The arguments a, b and res are meant to be 4096-bit bignums, i.e. uint64_t[64] +*/ +pub fn add(a: &[u64], b: &[u64], res: &mut [u64]) -> u64 { + let mut c: [u64; 1] = [0u64; 1usize]; + krml::unroll_for!(16, "i", 0u32, 1u32, { + let t1: u64 = a[4u32.wrapping_mul(i) as usize]; + let t2: u64 = b[4u32.wrapping_mul(i) as usize]; + let res_i: (&mut [u64], &mut [u64]) = res.split_at_mut(4u32.wrapping_mul(i) as usize); + (&mut c)[0usize] = lib::inttypes_intrinsics::add_carry_u64((&c)[0usize], t1, t2, res_i.1); + let t10: u64 = a[4u32.wrapping_mul(i).wrapping_add(1u32) as usize]; + let t20: u64 = b[4u32.wrapping_mul(i).wrapping_add(1u32) as usize]; + let res_i0: (&mut [u64], &mut [u64]) = res_i.1.split_at_mut(1usize); + (&mut c)[0usize] = + lib::inttypes_intrinsics::add_carry_u64((&c)[0usize], t10, t20, res_i0.1); + let t11: u64 = a[4u32.wrapping_mul(i).wrapping_add(2u32) as usize]; + let t21: u64 = b[4u32.wrapping_mul(i).wrapping_add(2u32) as usize]; + let res_i1: (&mut [u64], &mut [u64]) = res_i0.1.split_at_mut(1usize); + (&mut c)[0usize] = + lib::inttypes_intrinsics::add_carry_u64((&c)[0usize], t11, t21, res_i1.1); + let t12: u64 = a[4u32.wrapping_mul(i).wrapping_add(3u32) as usize]; + let t22: u64 = b[4u32.wrapping_mul(i).wrapping_add(3u32) as usize]; + let res_i2: (&mut [u64], &mut [u64]) = res_i1.1.split_at_mut(1usize); + (&mut c)[0usize] = lib::inttypes_intrinsics::add_carry_u64((&c)[0usize], t12, t22, res_i2.1) + }); + (&c)[0usize] +} + +/** +Write `a - b mod 2^4096` in `res`. + + This functions returns the carry. + + The arguments a, b and res are meant to be 4096-bit bignums, i.e. uint64_t[64] +*/ +pub fn sub(a: &[u64], b: &[u64], res: &mut [u64]) -> u64 { + let mut c: [u64; 1] = [0u64; 1usize]; + krml::unroll_for!(16, "i", 0u32, 1u32, { + let t1: u64 = a[4u32.wrapping_mul(i) as usize]; + let t2: u64 = b[4u32.wrapping_mul(i) as usize]; + let res_i: (&mut [u64], &mut [u64]) = res.split_at_mut(4u32.wrapping_mul(i) as usize); + (&mut c)[0usize] = lib::inttypes_intrinsics::sub_borrow_u64((&c)[0usize], t1, t2, res_i.1); + let t10: u64 = a[4u32.wrapping_mul(i).wrapping_add(1u32) as usize]; + let t20: u64 = b[4u32.wrapping_mul(i).wrapping_add(1u32) as usize]; + let res_i0: (&mut [u64], &mut [u64]) = res_i.1.split_at_mut(1usize); + (&mut c)[0usize] = + lib::inttypes_intrinsics::sub_borrow_u64((&c)[0usize], t10, t20, res_i0.1); + let t11: u64 = a[4u32.wrapping_mul(i).wrapping_add(2u32) as usize]; + let t21: u64 = b[4u32.wrapping_mul(i).wrapping_add(2u32) as usize]; + let res_i1: (&mut [u64], &mut [u64]) = res_i0.1.split_at_mut(1usize); + (&mut c)[0usize] = + lib::inttypes_intrinsics::sub_borrow_u64((&c)[0usize], t11, t21, res_i1.1); + let t12: u64 = a[4u32.wrapping_mul(i).wrapping_add(3u32) as usize]; + let t22: u64 = b[4u32.wrapping_mul(i).wrapping_add(3u32) as usize]; + let res_i2: (&mut [u64], &mut [u64]) = res_i1.1.split_at_mut(1usize); + (&mut c)[0usize] = + lib::inttypes_intrinsics::sub_borrow_u64((&c)[0usize], t12, t22, res_i2.1) + }); + (&c)[0usize] +} + +/** +Write `(a + b) mod n` in `res`. + + The arguments a, b, n and the outparam res are meant to be 4096-bit bignums, i.e. uint64_t[64]. + + Before calling this function, the caller will need to ensure that the following + preconditions are observed. + ā€¢ a < n + ā€¢ b < n +*/ +pub fn add_mod(n: &[u64], a: &[u64], b: &[u64], res: &mut [u64]) { + let mut c: [u64; 1] = [0u64; 1usize]; + krml::unroll_for!(16, "i", 0u32, 1u32, { + let t1: u64 = a[4u32.wrapping_mul(i) as usize]; + let t2: u64 = b[4u32.wrapping_mul(i) as usize]; + let res_i: (&mut [u64], &mut [u64]) = res.split_at_mut(4u32.wrapping_mul(i) as usize); + (&mut c)[0usize] = lib::inttypes_intrinsics::add_carry_u64((&c)[0usize], t1, t2, res_i.1); + let t10: u64 = a[4u32.wrapping_mul(i).wrapping_add(1u32) as usize]; + let t20: u64 = b[4u32.wrapping_mul(i).wrapping_add(1u32) as usize]; + let res_i0: (&mut [u64], &mut [u64]) = res_i.1.split_at_mut(1usize); + (&mut c)[0usize] = + lib::inttypes_intrinsics::add_carry_u64((&c)[0usize], t10, t20, res_i0.1); + let t11: u64 = a[4u32.wrapping_mul(i).wrapping_add(2u32) as usize]; + let t21: u64 = b[4u32.wrapping_mul(i).wrapping_add(2u32) as usize]; + let res_i1: (&mut [u64], &mut [u64]) = res_i0.1.split_at_mut(1usize); + (&mut c)[0usize] = + lib::inttypes_intrinsics::add_carry_u64((&c)[0usize], t11, t21, res_i1.1); + let t12: u64 = a[4u32.wrapping_mul(i).wrapping_add(3u32) as usize]; + let t22: u64 = b[4u32.wrapping_mul(i).wrapping_add(3u32) as usize]; + let res_i2: (&mut [u64], &mut [u64]) = res_i1.1.split_at_mut(1usize); + (&mut c)[0usize] = lib::inttypes_intrinsics::add_carry_u64((&c)[0usize], t12, t22, res_i2.1) + }); + let c0: u64 = (&c)[0usize]; + let mut tmp: [u64; 64] = [0u64; 64usize]; + let mut c1: [u64; 1] = [0u64; 1usize]; + krml::unroll_for!(16, "i", 0u32, 1u32, { + let t1: u64 = res[4u32.wrapping_mul(i) as usize]; + let t2: u64 = n[4u32.wrapping_mul(i) as usize]; + let res_i: (&mut [u64], &mut [u64]) = tmp.split_at_mut(4u32.wrapping_mul(i) as usize); + (&mut c1)[0usize] = + lib::inttypes_intrinsics::sub_borrow_u64((&c1)[0usize], t1, t2, res_i.1); + let t10: u64 = res[4u32.wrapping_mul(i).wrapping_add(1u32) as usize]; + let t20: u64 = n[4u32.wrapping_mul(i).wrapping_add(1u32) as usize]; + let res_i0: (&mut [u64], &mut [u64]) = res_i.1.split_at_mut(1usize); + (&mut c1)[0usize] = + lib::inttypes_intrinsics::sub_borrow_u64((&c1)[0usize], t10, t20, res_i0.1); + let t11: u64 = res[4u32.wrapping_mul(i).wrapping_add(2u32) as usize]; + let t21: u64 = n[4u32.wrapping_mul(i).wrapping_add(2u32) as usize]; + let res_i1: (&mut [u64], &mut [u64]) = res_i0.1.split_at_mut(1usize); + (&mut c1)[0usize] = + lib::inttypes_intrinsics::sub_borrow_u64((&c1)[0usize], t11, t21, res_i1.1); + let t12: u64 = res[4u32.wrapping_mul(i).wrapping_add(3u32) as usize]; + let t22: u64 = n[4u32.wrapping_mul(i).wrapping_add(3u32) as usize]; + let res_i2: (&mut [u64], &mut [u64]) = res_i1.1.split_at_mut(1usize); + (&mut c1)[0usize] = + lib::inttypes_intrinsics::sub_borrow_u64((&c1)[0usize], t12, t22, res_i2.1) + }); + let c10: u64 = (&c1)[0usize]; + let c2: u64 = c0.wrapping_sub(c10); + for i in 0u32..64u32 { + let x: u64 = c2 & res[i as usize] | !c2 & (&tmp)[i as usize]; + let os: (&mut [u64], &mut [u64]) = res.split_at_mut(0usize); + os.1[i as usize] = x + } +} + +/** +Write `(a - b) mod n` in `res`. + + The arguments a, b, n and the outparam res are meant to be 4096-bit bignums, i.e. uint64_t[64]. + + Before calling this function, the caller will need to ensure that the following + preconditions are observed. + ā€¢ a < n + ā€¢ b < n +*/ +pub fn sub_mod(n: &[u64], a: &[u64], b: &[u64], res: &mut [u64]) { + let mut c: [u64; 1] = [0u64; 1usize]; + krml::unroll_for!(16, "i", 0u32, 1u32, { + let t1: u64 = a[4u32.wrapping_mul(i) as usize]; + let t2: u64 = b[4u32.wrapping_mul(i) as usize]; + let res_i: (&mut [u64], &mut [u64]) = res.split_at_mut(4u32.wrapping_mul(i) as usize); + (&mut c)[0usize] = lib::inttypes_intrinsics::sub_borrow_u64((&c)[0usize], t1, t2, res_i.1); + let t10: u64 = a[4u32.wrapping_mul(i).wrapping_add(1u32) as usize]; + let t20: u64 = b[4u32.wrapping_mul(i).wrapping_add(1u32) as usize]; + let res_i0: (&mut [u64], &mut [u64]) = res_i.1.split_at_mut(1usize); + (&mut c)[0usize] = + lib::inttypes_intrinsics::sub_borrow_u64((&c)[0usize], t10, t20, res_i0.1); + let t11: u64 = a[4u32.wrapping_mul(i).wrapping_add(2u32) as usize]; + let t21: u64 = b[4u32.wrapping_mul(i).wrapping_add(2u32) as usize]; + let res_i1: (&mut [u64], &mut [u64]) = res_i0.1.split_at_mut(1usize); + (&mut c)[0usize] = + lib::inttypes_intrinsics::sub_borrow_u64((&c)[0usize], t11, t21, res_i1.1); + let t12: u64 = a[4u32.wrapping_mul(i).wrapping_add(3u32) as usize]; + let t22: u64 = b[4u32.wrapping_mul(i).wrapping_add(3u32) as usize]; + let res_i2: (&mut [u64], &mut [u64]) = res_i1.1.split_at_mut(1usize); + (&mut c)[0usize] = + lib::inttypes_intrinsics::sub_borrow_u64((&c)[0usize], t12, t22, res_i2.1) + }); + let c0: u64 = (&c)[0usize]; + let mut tmp: [u64; 64] = [0u64; 64usize]; + let mut c1: [u64; 1] = [0u64; 1usize]; + krml::unroll_for!(16, "i", 0u32, 1u32, { + let t1: u64 = res[4u32.wrapping_mul(i) as usize]; + let t2: u64 = n[4u32.wrapping_mul(i) as usize]; + let res_i: (&mut [u64], &mut [u64]) = tmp.split_at_mut(4u32.wrapping_mul(i) as usize); + (&mut c1)[0usize] = lib::inttypes_intrinsics::add_carry_u64((&c1)[0usize], t1, t2, res_i.1); + let t10: u64 = res[4u32.wrapping_mul(i).wrapping_add(1u32) as usize]; + let t20: u64 = n[4u32.wrapping_mul(i).wrapping_add(1u32) as usize]; + let res_i0: (&mut [u64], &mut [u64]) = res_i.1.split_at_mut(1usize); + (&mut c1)[0usize] = + lib::inttypes_intrinsics::add_carry_u64((&c1)[0usize], t10, t20, res_i0.1); + let t11: u64 = res[4u32.wrapping_mul(i).wrapping_add(2u32) as usize]; + let t21: u64 = n[4u32.wrapping_mul(i).wrapping_add(2u32) as usize]; + let res_i1: (&mut [u64], &mut [u64]) = res_i0.1.split_at_mut(1usize); + (&mut c1)[0usize] = + lib::inttypes_intrinsics::add_carry_u64((&c1)[0usize], t11, t21, res_i1.1); + let t12: u64 = res[4u32.wrapping_mul(i).wrapping_add(3u32) as usize]; + let t22: u64 = n[4u32.wrapping_mul(i).wrapping_add(3u32) as usize]; + let res_i2: (&mut [u64], &mut [u64]) = res_i1.1.split_at_mut(1usize); + (&mut c1)[0usize] = + lib::inttypes_intrinsics::add_carry_u64((&c1)[0usize], t12, t22, res_i2.1) + }); + let c10: u64 = (&c1)[0usize]; + lowstar::ignore::ignore::(c10); + let c2: u64 = 0u64.wrapping_sub(c0); + for i in 0u32..64u32 { + let x: u64 = c2 & (&tmp)[i as usize] | !c2 & res[i as usize]; + let os: (&mut [u64], &mut [u64]) = res.split_at_mut(0usize); + os.1[i as usize] = x + } +} + +/** +Write `a * b` in `res`. + + The arguments a and b are meant to be 4096-bit bignums, i.e. uint64_t[64]. + The outparam res is meant to be a 8192-bit bignum, i.e. uint64_t[128]. +*/ +pub fn mul(a: &[u64], b: &[u64], res: &mut [u64]) { + let mut tmp: [u64; 256] = [0u64; 256usize]; + super::bignum::bn_karatsuba_mul_uint64(64u32, a, b, &mut tmp, res) +} + +/** +Write `a * a` in `res`. + + The argument a is meant to be a 4096-bit bignum, i.e. uint64_t[64]. + The outparam res is meant to be a 8192-bit bignum, i.e. uint64_t[128]. +*/ +pub fn sqr(a: &[u64], res: &mut [u64]) { + let mut tmp: [u64; 256] = [0u64; 256usize]; + super::bignum::bn_karatsuba_sqr_uint64(64u32, a, &mut tmp, res) +} + +#[inline] +fn precompr2(nBits: u32, n: &[u64], res: &mut [u64]) { + (res[0usize..64usize]).copy_from_slice(&[0u64; 64usize]); + let i: u32 = nBits.wrapping_div(64u32); + let j: u32 = nBits.wrapping_rem(64u32); + res[i as usize] |= 1u64.wrapping_shl(j); + for _i in 0u32..8192u32.wrapping_sub(nBits) { + let mut a_copy: [u64; 64] = [0u64; 64usize]; + let mut b_copy: [u64; 64] = [0u64; 64usize]; + ((&mut a_copy)[0usize..64usize]).copy_from_slice(&res[0usize..64usize]); + ((&mut b_copy)[0usize..64usize]).copy_from_slice(&res[0usize..64usize]); + super::bignum4096::add_mod(n, &a_copy, &b_copy, res) + } +} + +#[inline] +fn reduction(n: &[u64], nInv: u64, c: &mut [u64], res: &mut [u64]) { + let mut c0: [u64; 1] = [0u64; 1usize]; + for i in 0u32..64u32 { + let qj: u64 = nInv.wrapping_mul(c[i as usize]); + let res_j: (&mut [u64], &mut [u64]) = c.split_at_mut(i as usize); + let mut c1: [u64; 1] = [0u64; 1usize]; + krml::unroll_for!(16, "i0", 0u32, 1u32, { + let a_i: u64 = n[4u32.wrapping_mul(i0) as usize]; + let res_i: (&mut [u64], &mut [u64]) = + res_j.1.split_at_mut(4u32.wrapping_mul(i0) as usize); + (&mut c1)[0usize] = + super::bignum_base::mul_wide_add2_u64(a_i, qj, (&c1)[0usize], res_i.1); + let a_i0: u64 = n[4u32.wrapping_mul(i0).wrapping_add(1u32) as usize]; + let res_i0: (&mut [u64], &mut [u64]) = res_i.1.split_at_mut(1usize); + (&mut c1)[0usize] = + super::bignum_base::mul_wide_add2_u64(a_i0, qj, (&c1)[0usize], res_i0.1); + let a_i1: u64 = n[4u32.wrapping_mul(i0).wrapping_add(2u32) as usize]; + let res_i1: (&mut [u64], &mut [u64]) = res_i0.1.split_at_mut(1usize); + (&mut c1)[0usize] = + super::bignum_base::mul_wide_add2_u64(a_i1, qj, (&c1)[0usize], res_i1.1); + let a_i2: u64 = n[4u32.wrapping_mul(i0).wrapping_add(3u32) as usize]; + let res_i2: (&mut [u64], &mut [u64]) = res_i1.1.split_at_mut(1usize); + (&mut c1)[0usize] = + super::bignum_base::mul_wide_add2_u64(a_i2, qj, (&c1)[0usize], res_i2.1) + }); + let r: u64 = (&c1)[0usize]; + let c10: u64 = r; + let res_j0: u64 = c[64u32.wrapping_add(i) as usize]; + let resb: (&mut [u64], &mut [u64]) = c.split_at_mut(i as usize + 64usize); + (&mut c0)[0usize] = + lib::inttypes_intrinsics::add_carry_u64((&c0)[0usize], c10, res_j0, resb.1) + } + (res[0usize..64usize]).copy_from_slice(&(&c[64usize..])[0usize..64usize]); + let c00: u64 = (&c0)[0usize]; + let mut tmp: [u64; 64] = [0u64; 64usize]; + let mut c1: [u64; 1] = [0u64; 1usize]; + krml::unroll_for!(16, "i", 0u32, 1u32, { + let t1: u64 = res[4u32.wrapping_mul(i) as usize]; + let t2: u64 = n[4u32.wrapping_mul(i) as usize]; + let res_i: (&mut [u64], &mut [u64]) = tmp.split_at_mut(4u32.wrapping_mul(i) as usize); + (&mut c1)[0usize] = + lib::inttypes_intrinsics::sub_borrow_u64((&c1)[0usize], t1, t2, res_i.1); + let t10: u64 = res[4u32.wrapping_mul(i).wrapping_add(1u32) as usize]; + let t20: u64 = n[4u32.wrapping_mul(i).wrapping_add(1u32) as usize]; + let res_i0: (&mut [u64], &mut [u64]) = res_i.1.split_at_mut(1usize); + (&mut c1)[0usize] = + lib::inttypes_intrinsics::sub_borrow_u64((&c1)[0usize], t10, t20, res_i0.1); + let t11: u64 = res[4u32.wrapping_mul(i).wrapping_add(2u32) as usize]; + let t21: u64 = n[4u32.wrapping_mul(i).wrapping_add(2u32) as usize]; + let res_i1: (&mut [u64], &mut [u64]) = res_i0.1.split_at_mut(1usize); + (&mut c1)[0usize] = + lib::inttypes_intrinsics::sub_borrow_u64((&c1)[0usize], t11, t21, res_i1.1); + let t12: u64 = res[4u32.wrapping_mul(i).wrapping_add(3u32) as usize]; + let t22: u64 = n[4u32.wrapping_mul(i).wrapping_add(3u32) as usize]; + let res_i2: (&mut [u64], &mut [u64]) = res_i1.1.split_at_mut(1usize); + (&mut c1)[0usize] = + lib::inttypes_intrinsics::sub_borrow_u64((&c1)[0usize], t12, t22, res_i2.1) + }); + let c10: u64 = (&c1)[0usize]; + let c2: u64 = c00.wrapping_sub(c10); + for i in 0u32..64u32 { + let x: u64 = c2 & res[i as usize] | !c2 & (&tmp)[i as usize]; + let os: (&mut [u64], &mut [u64]) = res.split_at_mut(0usize); + os.1[i as usize] = x + } +} + +#[inline] +fn to(n: &[u64], nInv: u64, r2: &[u64], a: &[u64], aM: &mut [u64]) { + let mut c: [u64; 128] = [0u64; 128usize]; + super::bignum4096::mul(a, r2, &mut c); + super::bignum4096::reduction(n, nInv, &mut c, aM) +} + +#[inline] +fn from(n: &[u64], nInv_u64: u64, aM: &[u64], a: &mut [u64]) { + let mut tmp: [u64; 128] = [0u64; 128usize]; + ((&mut tmp)[0usize..64usize]).copy_from_slice(&aM[0usize..64usize]); + super::bignum4096::reduction(n, nInv_u64, &mut tmp, a) +} + +#[inline] +fn areduction(n: &[u64], nInv: u64, c: &mut [u64], res: &mut [u64]) { + let mut c0: [u64; 1] = [0u64; 1usize]; + for i in 0u32..64u32 { + let qj: u64 = nInv.wrapping_mul(c[i as usize]); + let res_j: (&mut [u64], &mut [u64]) = c.split_at_mut(i as usize); + let mut c1: [u64; 1] = [0u64; 1usize]; + krml::unroll_for!(16, "i0", 0u32, 1u32, { + let a_i: u64 = n[4u32.wrapping_mul(i0) as usize]; + let res_i: (&mut [u64], &mut [u64]) = + res_j.1.split_at_mut(4u32.wrapping_mul(i0) as usize); + (&mut c1)[0usize] = + super::bignum_base::mul_wide_add2_u64(a_i, qj, (&c1)[0usize], res_i.1); + let a_i0: u64 = n[4u32.wrapping_mul(i0).wrapping_add(1u32) as usize]; + let res_i0: (&mut [u64], &mut [u64]) = res_i.1.split_at_mut(1usize); + (&mut c1)[0usize] = + super::bignum_base::mul_wide_add2_u64(a_i0, qj, (&c1)[0usize], res_i0.1); + let a_i1: u64 = n[4u32.wrapping_mul(i0).wrapping_add(2u32) as usize]; + let res_i1: (&mut [u64], &mut [u64]) = res_i0.1.split_at_mut(1usize); + (&mut c1)[0usize] = + super::bignum_base::mul_wide_add2_u64(a_i1, qj, (&c1)[0usize], res_i1.1); + let a_i2: u64 = n[4u32.wrapping_mul(i0).wrapping_add(3u32) as usize]; + let res_i2: (&mut [u64], &mut [u64]) = res_i1.1.split_at_mut(1usize); + (&mut c1)[0usize] = + super::bignum_base::mul_wide_add2_u64(a_i2, qj, (&c1)[0usize], res_i2.1) + }); + let r: u64 = (&c1)[0usize]; + let c10: u64 = r; + let res_j0: u64 = c[64u32.wrapping_add(i) as usize]; + let resb: (&mut [u64], &mut [u64]) = c.split_at_mut(i as usize + 64usize); + (&mut c0)[0usize] = + lib::inttypes_intrinsics::add_carry_u64((&c0)[0usize], c10, res_j0, resb.1) + } + (res[0usize..64usize]).copy_from_slice(&(&c[64usize..])[0usize..64usize]); + let c00: u64 = (&c0)[0usize]; + let mut tmp: [u64; 64] = [0u64; 64usize]; + let c1: u64 = super::bignum4096::sub(res, n, &mut tmp); + lowstar::ignore::ignore::(c1); + let m: u64 = 0u64.wrapping_sub(c00); + for i in 0u32..64u32 { + let x: u64 = m & (&tmp)[i as usize] | !m & res[i as usize]; + let os: (&mut [u64], &mut [u64]) = res.split_at_mut(0usize); + os.1[i as usize] = x + } +} + +#[inline] +fn amont_mul(n: &[u64], nInv_u64: u64, aM: &[u64], bM: &[u64], resM: &mut [u64]) { + let mut c: [u64; 128] = [0u64; 128usize]; + super::bignum4096::mul(aM, bM, &mut c); + super::bignum4096::areduction(n, nInv_u64, &mut c, resM) +} + +#[inline] +fn amont_sqr(n: &[u64], nInv_u64: u64, aM: &[u64], resM: &mut [u64]) { + let mut c: [u64; 128] = [0u64; 128usize]; + super::bignum4096::sqr(aM, &mut c); + super::bignum4096::areduction(n, nInv_u64, &mut c, resM) +} + +#[inline] +fn bn_slow_precomp(n: &[u64], mu: u64, r2: &[u64], a: &[u64], res: &mut [u64]) { + let mut a_mod: [u64; 64] = [0u64; 64usize]; + let mut a1: [u64; 128] = [0u64; 128usize]; + ((&mut a1)[0usize..128usize]).copy_from_slice(&a[0usize..128usize]); + super::bignum4096::areduction(n, mu, &mut a1, &mut a_mod); + super::bignum4096::to(n, mu, r2, &a_mod, res) +} + +/** +Write `a mod n` in `res`. + + The argument a is meant to be a 8192-bit bignum, i.e. uint64_t[128]. + The argument n and the outparam res are meant to be 4096-bit bignums, i.e. uint64_t[64]. + + The function returns false if any of the following preconditions are violated, + true otherwise. + ā€¢ 1 < n + ā€¢ n % 2 = 1 +*/ +pub fn r#mod(n: &[u64], a: &[u64], res: &mut [u64]) -> bool { + let mut one: [u64; 64] = [0u64; 64usize]; + ((&mut one)[0usize..64usize]).copy_from_slice(&[0u64; 64usize]); + (&mut one)[0usize] = 1u64; + let bit0: u64 = n[0usize] & 1u64; + let m0: u64 = 0u64.wrapping_sub(bit0); + let mut acc: [u64; 1] = [0u64; 1usize]; + for i in 0u32..64u32 { + let beq: u64 = fstar::uint64::eq_mask((&one)[i as usize], n[i as usize]); + let blt: u64 = !fstar::uint64::gte_mask((&one)[i as usize], n[i as usize]); + (&mut acc)[0usize] = beq & (&acc)[0usize] | !beq & blt + } + let m1: u64 = (&acc)[0usize]; + let is_valid_m: u64 = m0 & m1; + let nBits: u32 = 64u32.wrapping_mul(super::bignum_base::bn_get_top_index_u64(64u32, n) as u32); + if is_valid_m == 0xFFFFFFFFFFFFFFFFu64 { + let mut r2: [u64; 64] = [0u64; 64usize]; + super::bignum4096::precompr2(nBits, n, &mut r2); + let mu: u64 = super::bignum::mod_inv_uint64(n[0usize]); + super::bignum4096::bn_slow_precomp(n, mu, &r2, a, res) + } else { + (res[0usize..64usize]).copy_from_slice(&[0u64; 64usize]) + }; + is_valid_m == 0xFFFFFFFFFFFFFFFFu64 +} + +fn exp_check(n: &[u64], a: &[u64], bBits: u32, b: &[u64]) -> u64 { + let mut one: [u64; 64] = [0u64; 64usize]; + ((&mut one)[0usize..64usize]).copy_from_slice(&[0u64; 64usize]); + (&mut one)[0usize] = 1u64; + let bit0: u64 = n[0usize] & 1u64; + let m0: u64 = 0u64.wrapping_sub(bit0); + let mut acc: [u64; 1] = [0u64; 1usize]; + for i in 0u32..64u32 { + let beq: u64 = fstar::uint64::eq_mask((&one)[i as usize], n[i as usize]); + let blt: u64 = !fstar::uint64::gte_mask((&one)[i as usize], n[i as usize]); + (&mut acc)[0usize] = beq & (&acc)[0usize] | !beq & blt + } + let m1: u64 = (&acc)[0usize]; + let m00: u64 = m0 & m1; + let bLen: u32 = if bBits == 0u32 { + 1u32 + } else { + bBits + .wrapping_sub(1u32) + .wrapping_div(64u32) + .wrapping_add(1u32) + }; + let m10: u64 = if bBits < 64u32.wrapping_mul(bLen) { + let mut b2: Box<[u64]> = vec![0u64; bLen as usize].into_boxed_slice(); + let i: u32 = bBits.wrapping_div(64u32); + let j: u32 = bBits.wrapping_rem(64u32); + (&mut b2)[i as usize] = (&b2)[i as usize] | 1u64.wrapping_shl(j); + let mut acc0: [u64; 1] = [0u64; 1usize]; + for i0 in 0u32..bLen { + let beq: u64 = fstar::uint64::eq_mask(b[i0 as usize], (&b2)[i0 as usize]); + let blt: u64 = !fstar::uint64::gte_mask(b[i0 as usize], (&b2)[i0 as usize]); + (&mut acc0)[0usize] = beq & (&acc0)[0usize] | !beq & blt + } + let res: u64 = (&acc0)[0usize]; + res + } else { + 0xFFFFFFFFFFFFFFFFu64 + }; + let mut acc0: [u64; 1] = [0u64; 1usize]; + for i in 0u32..64u32 { + let beq: u64 = fstar::uint64::eq_mask(a[i as usize], n[i as usize]); + let blt: u64 = !fstar::uint64::gte_mask(a[i as usize], n[i as usize]); + (&mut acc0)[0usize] = beq & (&acc0)[0usize] | !beq & blt + } + let m2: u64 = (&acc0)[0usize]; + let m: u64 = m10 & m2; + m00 & m +} + +#[inline] +fn exp_vartime_precomp( + n: &[u64], + mu: u64, + r2: &[u64], + a: &[u64], + bBits: u32, + b: &[u64], + res: &mut [u64], +) { + if bBits < 200u32 { + let mut aM: [u64; 64] = [0u64; 64usize]; + super::bignum4096::to(n, mu, r2, a, &mut aM); + let mut resM: [u64; 64] = [0u64; 64usize]; + let mut ctx: [u64; 128] = [0u64; 128usize]; + ((&mut ctx)[0usize..64usize]).copy_from_slice(&n[0usize..64usize]); + ((&mut ctx)[64usize..64usize + 64usize]).copy_from_slice(&r2[0usize..64usize]); + let ctx_n: (&[u64], &[u64]) = ctx.split_at(0usize); + let ctx_r2: (&[u64], &[u64]) = ctx_n.1.split_at(64usize); + super::bignum4096::from(ctx_r2.0, mu, ctx_r2.1, &mut resM); + lowstar::ignore::ignore::<&[u64]>(&ctx); + for i in 0u32..bBits { + let i1: u32 = i.wrapping_div(64u32); + let j: u32 = i.wrapping_rem(64u32); + let tmp: u64 = b[i1 as usize]; + let bit: u64 = tmp.wrapping_shr(j) & 1u64; + if bit != 0u64 { + let mut aM_copy: [u64; 64] = [0u64; 64usize]; + ((&mut aM_copy)[0usize..64usize]).copy_from_slice(&(&resM)[0usize..64usize]); + let ctx_n0: (&[u64], &[u64]) = ctx.split_at(0usize); + super::bignum4096::amont_mul(ctx_n0.1, mu, &aM_copy, &aM, &mut resM); + lowstar::ignore::ignore::<&[u64]>(&ctx) + }; + let mut aM_copy: [u64; 64] = [0u64; 64usize]; + ((&mut aM_copy)[0usize..64usize]).copy_from_slice(&(&aM)[0usize..64usize]); + let ctx_n0: (&[u64], &[u64]) = ctx.split_at(0usize); + super::bignum4096::amont_sqr(ctx_n0.1, mu, &aM_copy, &mut aM); + lowstar::ignore::ignore::<&[u64]>(&ctx) + } + super::bignum4096::from(n, mu, &resM, res) + } else { + let mut aM: [u64; 64] = [0u64; 64usize]; + super::bignum4096::to(n, mu, r2, a, &mut aM); + let mut resM: [u64; 64] = [0u64; 64usize]; + let bLen: u32 = if bBits == 0u32 { + 1u32 + } else { + bBits + .wrapping_sub(1u32) + .wrapping_div(64u32) + .wrapping_add(1u32) + }; + let mut ctx: [u64; 128] = [0u64; 128usize]; + ((&mut ctx)[0usize..64usize]).copy_from_slice(&n[0usize..64usize]); + ((&mut ctx)[64usize..64usize + 64usize]).copy_from_slice(&r2[0usize..64usize]); + let mut table: [u64; 1024] = [0u64; 1024usize]; + let mut tmp: [u64; 64] = [0u64; 64usize]; + let t0: (&mut [u64], &mut [u64]) = table.split_at_mut(0usize); + let t1: (&mut [u64], &mut [u64]) = t0.1.split_at_mut(64usize); + let ctx_n: (&[u64], &[u64]) = ctx.split_at(0usize); + let ctx_r2: (&[u64], &[u64]) = ctx_n.1.split_at(64usize); + super::bignum4096::from(ctx_r2.0, mu, ctx_r2.1, t1.0); + lowstar::ignore::ignore::<&[u64]>(&ctx); + (t1.1[0usize..64usize]).copy_from_slice(&(&aM)[0usize..64usize]); + lowstar::ignore::ignore::<&[u64]>(&table); + krml::unroll_for!(7, "i", 0u32, 1u32, { + let t11: (&[u64], &[u64]) = + table.split_at(i.wrapping_add(1u32).wrapping_mul(64u32) as usize); + let mut aM_copy: [u64; 64] = [0u64; 64usize]; + ((&mut aM_copy)[0usize..64usize]).copy_from_slice(&t11.1[0usize..64usize]); + let ctx_n0: (&[u64], &[u64]) = ctx.split_at(0usize); + super::bignum4096::amont_sqr(ctx_n0.1, mu, &aM_copy, &mut tmp); + lowstar::ignore::ignore::<&[u64]>(&ctx); + ((&mut table)[2u32.wrapping_mul(i).wrapping_add(2u32).wrapping_mul(64u32) as usize + ..2u32.wrapping_mul(i).wrapping_add(2u32).wrapping_mul(64u32) as usize + 64usize]) + .copy_from_slice(&(&tmp)[0usize..64usize]); + let t2: (&[u64], &[u64]) = table + .split_at(2u32.wrapping_mul(i).wrapping_add(2u32).wrapping_mul(64u32) as usize); + let mut aM_copy0: [u64; 64] = [0u64; 64usize]; + ((&mut aM_copy0)[0usize..64usize]).copy_from_slice(&(&aM)[0usize..64usize]); + let ctx_n1: (&[u64], &[u64]) = ctx.split_at(0usize); + super::bignum4096::amont_mul(ctx_n1.1, mu, &aM_copy0, t2.1, &mut tmp); + lowstar::ignore::ignore::<&[u64]>(&ctx); + ((&mut table)[2u32.wrapping_mul(i).wrapping_add(3u32).wrapping_mul(64u32) as usize + ..2u32.wrapping_mul(i).wrapping_add(3u32).wrapping_mul(64u32) as usize + 64usize]) + .copy_from_slice(&(&tmp)[0usize..64usize]) + }); + if bBits.wrapping_rem(4u32) != 0u32 { + let i: u32 = bBits.wrapping_div(4u32).wrapping_mul(4u32); + let bits_c: u64 = super::bignum_base::bn_get_bits_u64(bLen, b, i, 4u32); + let bits_l32: u32 = bits_c as u32; + let a_bits_l: (&[u64], &[u64]) = table.split_at(bits_l32.wrapping_mul(64u32) as usize); + ((&mut resM)[0usize..64usize]).copy_from_slice(&a_bits_l.1[0usize..64usize]) + } else { + let ctx_n0: (&[u64], &[u64]) = ctx.split_at(0usize); + let ctx_r20: (&[u64], &[u64]) = ctx_n0.1.split_at(64usize); + super::bignum4096::from(ctx_r20.0, mu, ctx_r20.1, &mut resM); + lowstar::ignore::ignore::<&[u64]>(&ctx) + }; + let mut tmp0: [u64; 64] = [0u64; 64usize]; + for i in 0u32..bBits.wrapping_div(4u32) { + krml::unroll_for!(4, "_i", 0u32, 1u32, { + let mut aM_copy: [u64; 64] = [0u64; 64usize]; + ((&mut aM_copy)[0usize..64usize]).copy_from_slice(&(&resM)[0usize..64usize]); + let ctx_n0: (&[u64], &[u64]) = ctx.split_at(0usize); + super::bignum4096::amont_sqr(ctx_n0.1, mu, &aM_copy, &mut resM); + lowstar::ignore::ignore::<&[u64]>(&ctx) + }); + let k: u32 = bBits + .wrapping_sub(bBits.wrapping_rem(4u32)) + .wrapping_sub(4u32.wrapping_mul(i)) + .wrapping_sub(4u32); + let bits_l: u64 = super::bignum_base::bn_get_bits_u64(bLen, b, k, 4u32); + lowstar::ignore::ignore::<&[u64]>(&table); + let bits_l32: u32 = bits_l as u32; + let a_bits_l: (&[u64], &[u64]) = table.split_at(bits_l32.wrapping_mul(64u32) as usize); + ((&mut tmp0)[0usize..64usize]).copy_from_slice(&a_bits_l.1[0usize..64usize]); + let mut aM_copy: [u64; 64] = [0u64; 64usize]; + ((&mut aM_copy)[0usize..64usize]).copy_from_slice(&(&resM)[0usize..64usize]); + let ctx_n0: (&[u64], &[u64]) = ctx.split_at(0usize); + super::bignum4096::amont_mul(ctx_n0.1, mu, &aM_copy, &tmp0, &mut resM); + lowstar::ignore::ignore::<&[u64]>(&ctx) + } + super::bignum4096::from(n, mu, &resM, res) + } +} + +#[inline] +fn exp_consttime_precomp( + n: &[u64], + mu: u64, + r2: &[u64], + a: &[u64], + bBits: u32, + b: &[u64], + res: &mut [u64], +) { + if bBits < 200u32 { + let mut aM: [u64; 64] = [0u64; 64usize]; + super::bignum4096::to(n, mu, r2, a, &mut aM); + let mut resM: [u64; 64] = [0u64; 64usize]; + let mut ctx: [u64; 128] = [0u64; 128usize]; + ((&mut ctx)[0usize..64usize]).copy_from_slice(&n[0usize..64usize]); + ((&mut ctx)[64usize..64usize + 64usize]).copy_from_slice(&r2[0usize..64usize]); + let mut sw: [u64; 1] = [0u64; 1usize]; + let ctx_n: (&[u64], &[u64]) = ctx.split_at(0usize); + let ctx_r2: (&[u64], &[u64]) = ctx_n.1.split_at(64usize); + super::bignum4096::from(ctx_r2.0, mu, ctx_r2.1, &mut resM); + lowstar::ignore::ignore::<&[u64]>(&ctx); + for i in 0u32..bBits { + let i1: u32 = bBits.wrapping_sub(i).wrapping_sub(1u32).wrapping_div(64u32); + let j: u32 = bBits.wrapping_sub(i).wrapping_sub(1u32).wrapping_rem(64u32); + let tmp: u64 = b[i1 as usize]; + let bit: u64 = tmp.wrapping_shr(j) & 1u64; + let sw1: u64 = bit ^ (&sw)[0usize]; + for i0 in 0u32..64u32 { + let dummy: u64 = + 0u64.wrapping_sub(sw1) & ((&resM)[i0 as usize] ^ (&aM)[i0 as usize]); + (&mut resM)[i0 as usize] = (&resM)[i0 as usize] ^ dummy; + (&mut aM)[i0 as usize] = (&aM)[i0 as usize] ^ dummy + } + let mut aM_copy: [u64; 64] = [0u64; 64usize]; + ((&mut aM_copy)[0usize..64usize]).copy_from_slice(&(&aM)[0usize..64usize]); + let ctx_n0: (&[u64], &[u64]) = ctx.split_at(0usize); + super::bignum4096::amont_mul(ctx_n0.1, mu, &aM_copy, &resM, &mut aM); + lowstar::ignore::ignore::<&[u64]>(&ctx); + let mut aM_copy0: [u64; 64] = [0u64; 64usize]; + ((&mut aM_copy0)[0usize..64usize]).copy_from_slice(&(&resM)[0usize..64usize]); + let ctx_n1: (&[u64], &[u64]) = ctx.split_at(0usize); + super::bignum4096::amont_sqr(ctx_n1.1, mu, &aM_copy0, &mut resM); + lowstar::ignore::ignore::<&[u64]>(&ctx); + (&mut sw)[0usize] = bit + } + let sw0: u64 = (&sw)[0usize]; + for i in 0u32..64u32 { + let dummy: u64 = 0u64.wrapping_sub(sw0) & ((&resM)[i as usize] ^ (&aM)[i as usize]); + (&mut resM)[i as usize] = (&resM)[i as usize] ^ dummy; + (&mut aM)[i as usize] = (&aM)[i as usize] ^ dummy + } + super::bignum4096::from(n, mu, &resM, res) + } else { + let mut aM: [u64; 64] = [0u64; 64usize]; + super::bignum4096::to(n, mu, r2, a, &mut aM); + let mut resM: [u64; 64] = [0u64; 64usize]; + let bLen: u32 = if bBits == 0u32 { + 1u32 + } else { + bBits + .wrapping_sub(1u32) + .wrapping_div(64u32) + .wrapping_add(1u32) + }; + let mut ctx: [u64; 128] = [0u64; 128usize]; + ((&mut ctx)[0usize..64usize]).copy_from_slice(&n[0usize..64usize]); + ((&mut ctx)[64usize..64usize + 64usize]).copy_from_slice(&r2[0usize..64usize]); + let mut table: [u64; 1024] = [0u64; 1024usize]; + let mut tmp: [u64; 64] = [0u64; 64usize]; + let t0: (&mut [u64], &mut [u64]) = table.split_at_mut(0usize); + let t1: (&mut [u64], &mut [u64]) = t0.1.split_at_mut(64usize); + let ctx_n: (&[u64], &[u64]) = ctx.split_at(0usize); + let ctx_r2: (&[u64], &[u64]) = ctx_n.1.split_at(64usize); + super::bignum4096::from(ctx_r2.0, mu, ctx_r2.1, t1.0); + lowstar::ignore::ignore::<&[u64]>(&ctx); + (t1.1[0usize..64usize]).copy_from_slice(&(&aM)[0usize..64usize]); + lowstar::ignore::ignore::<&[u64]>(&table); + krml::unroll_for!(7, "i", 0u32, 1u32, { + let t11: (&[u64], &[u64]) = + table.split_at(i.wrapping_add(1u32).wrapping_mul(64u32) as usize); + let mut aM_copy: [u64; 64] = [0u64; 64usize]; + ((&mut aM_copy)[0usize..64usize]).copy_from_slice(&t11.1[0usize..64usize]); + let ctx_n0: (&[u64], &[u64]) = ctx.split_at(0usize); + super::bignum4096::amont_sqr(ctx_n0.1, mu, &aM_copy, &mut tmp); + lowstar::ignore::ignore::<&[u64]>(&ctx); + ((&mut table)[2u32.wrapping_mul(i).wrapping_add(2u32).wrapping_mul(64u32) as usize + ..2u32.wrapping_mul(i).wrapping_add(2u32).wrapping_mul(64u32) as usize + 64usize]) + .copy_from_slice(&(&tmp)[0usize..64usize]); + let t2: (&[u64], &[u64]) = table + .split_at(2u32.wrapping_mul(i).wrapping_add(2u32).wrapping_mul(64u32) as usize); + let mut aM_copy0: [u64; 64] = [0u64; 64usize]; + ((&mut aM_copy0)[0usize..64usize]).copy_from_slice(&(&aM)[0usize..64usize]); + let ctx_n1: (&[u64], &[u64]) = ctx.split_at(0usize); + super::bignum4096::amont_mul(ctx_n1.1, mu, &aM_copy0, t2.1, &mut tmp); + lowstar::ignore::ignore::<&[u64]>(&ctx); + ((&mut table)[2u32.wrapping_mul(i).wrapping_add(3u32).wrapping_mul(64u32) as usize + ..2u32.wrapping_mul(i).wrapping_add(3u32).wrapping_mul(64u32) as usize + 64usize]) + .copy_from_slice(&(&tmp)[0usize..64usize]) + }); + if bBits.wrapping_rem(4u32) != 0u32 { + let i: u32 = bBits.wrapping_div(4u32).wrapping_mul(4u32); + let bits_c: u64 = super::bignum_base::bn_get_bits_u64(bLen, b, i, 4u32); + ((&mut resM)[0usize..64usize]) + .copy_from_slice(&(&(&table)[0usize..] as &[u64])[0usize..64usize]); + krml::unroll_for!(15, "i0", 0u32, 1u32, { + let c: u64 = fstar::uint64::eq_mask(bits_c, i0.wrapping_add(1u32) as u64); + let res_j: (&[u64], &[u64]) = + table.split_at(i0.wrapping_add(1u32).wrapping_mul(64u32) as usize); + for i1 in 0u32..64u32 { + let x: u64 = c & res_j.1[i1 as usize] | !c & (&resM)[i1 as usize]; + let os: (&mut [u64], &mut [u64]) = resM.split_at_mut(0usize); + os.1[i1 as usize] = x + } + }) + } else { + let ctx_n0: (&[u64], &[u64]) = ctx.split_at(0usize); + let ctx_r20: (&[u64], &[u64]) = ctx_n0.1.split_at(64usize); + super::bignum4096::from(ctx_r20.0, mu, ctx_r20.1, &mut resM); + lowstar::ignore::ignore::<&[u64]>(&ctx) + }; + let mut tmp0: [u64; 64] = [0u64; 64usize]; + for i in 0u32..bBits.wrapping_div(4u32) { + krml::unroll_for!(4, "_i", 0u32, 1u32, { + let mut aM_copy: [u64; 64] = [0u64; 64usize]; + ((&mut aM_copy)[0usize..64usize]).copy_from_slice(&(&resM)[0usize..64usize]); + let ctx_n0: (&[u64], &[u64]) = ctx.split_at(0usize); + super::bignum4096::amont_sqr(ctx_n0.1, mu, &aM_copy, &mut resM); + lowstar::ignore::ignore::<&[u64]>(&ctx) + }); + let k: u32 = bBits + .wrapping_sub(bBits.wrapping_rem(4u32)) + .wrapping_sub(4u32.wrapping_mul(i)) + .wrapping_sub(4u32); + let bits_l: u64 = super::bignum_base::bn_get_bits_u64(bLen, b, k, 4u32); + lowstar::ignore::ignore::<&[u64]>(&table); + ((&mut tmp0)[0usize..64usize]) + .copy_from_slice(&(&(&table)[0usize..] as &[u64])[0usize..64usize]); + krml::unroll_for!(15, "i0", 0u32, 1u32, { + let c: u64 = fstar::uint64::eq_mask(bits_l, i0.wrapping_add(1u32) as u64); + let res_j: (&[u64], &[u64]) = + table.split_at(i0.wrapping_add(1u32).wrapping_mul(64u32) as usize); + for i1 in 0u32..64u32 { + let x: u64 = c & res_j.1[i1 as usize] | !c & (&tmp0)[i1 as usize]; + let os: (&mut [u64], &mut [u64]) = tmp0.split_at_mut(0usize); + os.1[i1 as usize] = x + } + }); + let mut aM_copy: [u64; 64] = [0u64; 64usize]; + ((&mut aM_copy)[0usize..64usize]).copy_from_slice(&(&resM)[0usize..64usize]); + let ctx_n0: (&[u64], &[u64]) = ctx.split_at(0usize); + super::bignum4096::amont_mul(ctx_n0.1, mu, &aM_copy, &tmp0, &mut resM); + lowstar::ignore::ignore::<&[u64]>(&ctx) + } + super::bignum4096::from(n, mu, &resM, res) + } +} + +#[inline] +fn exp_vartime(nBits: u32, n: &[u64], a: &[u64], bBits: u32, b: &[u64], res: &mut [u64]) { + let mut r2: [u64; 64] = [0u64; 64usize]; + super::bignum4096::precompr2(nBits, n, &mut r2); + let mu: u64 = super::bignum::mod_inv_uint64(n[0usize]); + super::bignum4096::exp_vartime_precomp(n, mu, &r2, a, bBits, b, res) +} + +#[inline] +fn exp_consttime(nBits: u32, n: &[u64], a: &[u64], bBits: u32, b: &[u64], res: &mut [u64]) { + let mut r2: [u64; 64] = [0u64; 64usize]; + super::bignum4096::precompr2(nBits, n, &mut r2); + let mu: u64 = super::bignum::mod_inv_uint64(n[0usize]); + super::bignum4096::exp_consttime_precomp(n, mu, &r2, a, bBits, b, res) +} + +/** +Write `a ^ b mod n` in `res`. + + The arguments a, n and the outparam res are meant to be 4096-bit bignums, i.e. uint64_t[64]. + + The argument b is a bignum of any size, and bBits is an upper bound on the + number of significant bits of b. A tighter bound results in faster execution + time. When in doubt, the number of bits for the bignum size is always a safe + default, e.g. if b is a 4096-bit bignum, bBits should be 4096. + + The function is *NOT* constant-time on the argument b. See the + mod_exp_consttime_* functions for constant-time variants. + + The function returns false if any of the following preconditions are violated, + true otherwise. + ā€¢ n % 2 = 1 + ā€¢ 1 < n + ā€¢ b < pow2 bBits + ā€¢ a < n +*/ +pub fn mod_exp_vartime(n: &[u64], a: &[u64], bBits: u32, b: &[u64], res: &mut [u64]) -> bool { + let is_valid_m: u64 = super::bignum4096::exp_check(n, a, bBits, b); + let nBits: u32 = 64u32.wrapping_mul(super::bignum_base::bn_get_top_index_u64(64u32, n) as u32); + if is_valid_m == 0xFFFFFFFFFFFFFFFFu64 { + super::bignum4096::exp_vartime(nBits, n, a, bBits, b, res) + } else { + (res[0usize..64usize]).copy_from_slice(&[0u64; 64usize]) + }; + is_valid_m == 0xFFFFFFFFFFFFFFFFu64 +} + +/** +Write `a ^ b mod n` in `res`. + + The arguments a, n and the outparam res are meant to be 4096-bit bignums, i.e. uint64_t[64]. + + The argument b is a bignum of any size, and bBits is an upper bound on the + number of significant bits of b. A tighter bound results in faster execution + time. When in doubt, the number of bits for the bignum size is always a safe + default, e.g. if b is a 4096-bit bignum, bBits should be 4096. + + This function is constant-time over its argument b, at the cost of a slower + execution time than mod_exp_vartime. + + The function returns false if any of the following preconditions are violated, + true otherwise. + ā€¢ n % 2 = 1 + ā€¢ 1 < n + ā€¢ b < pow2 bBits + ā€¢ a < n +*/ +pub fn mod_exp_consttime(n: &[u64], a: &[u64], bBits: u32, b: &[u64], res: &mut [u64]) -> bool { + let is_valid_m: u64 = super::bignum4096::exp_check(n, a, bBits, b); + let nBits: u32 = 64u32.wrapping_mul(super::bignum_base::bn_get_top_index_u64(64u32, n) as u32); + if is_valid_m == 0xFFFFFFFFFFFFFFFFu64 { + super::bignum4096::exp_consttime(nBits, n, a, bBits, b, res) + } else { + (res[0usize..64usize]).copy_from_slice(&[0u64; 64usize]) + }; + is_valid_m == 0xFFFFFFFFFFFFFFFFu64 +} + +/** +Write `a ^ (-1) mod n` in `res`. + + The arguments a, n and the outparam res are meant to be 4096-bit bignums, i.e. uint64_t[64]. + + Before calling this function, the caller will need to ensure that the following + preconditions are observed. + ā€¢ n is a prime + + The function returns false if any of the following preconditions are violated, true otherwise. + ā€¢ n % 2 = 1 + ā€¢ 1 < n + ā€¢ 0 < a + ā€¢ a < n +*/ +pub fn mod_inv_prime_vartime(n: &[u64], a: &[u64], res: &mut [u64]) -> bool { + let mut one: [u64; 64] = [0u64; 64usize]; + ((&mut one)[0usize..64usize]).copy_from_slice(&[0u64; 64usize]); + (&mut one)[0usize] = 1u64; + let bit0: u64 = n[0usize] & 1u64; + let m0: u64 = 0u64.wrapping_sub(bit0); + let mut acc: [u64; 1] = [0u64; 1usize]; + for i in 0u32..64u32 { + let beq: u64 = fstar::uint64::eq_mask((&one)[i as usize], n[i as usize]); + let blt: u64 = !fstar::uint64::gte_mask((&one)[i as usize], n[i as usize]); + (&mut acc)[0usize] = beq & (&acc)[0usize] | !beq & blt + } + let m1: u64 = (&acc)[0usize]; + let m00: u64 = m0 & m1; + let bn_zero: [u64; 64] = [0u64; 64usize]; + let mut mask: [u64; 1] = [0xFFFFFFFFFFFFFFFFu64; 1usize]; + for i in 0u32..64u32 { + let uu____0: u64 = fstar::uint64::eq_mask(a[i as usize], (&bn_zero)[i as usize]); + (&mut mask)[0usize] = uu____0 & (&mask)[0usize] + } + let mask1: u64 = (&mask)[0usize]; + let res1: u64 = mask1; + let m10: u64 = res1; + let mut acc0: [u64; 1] = [0u64; 1usize]; + for i in 0u32..64u32 { + let beq: u64 = fstar::uint64::eq_mask(a[i as usize], n[i as usize]); + let blt: u64 = !fstar::uint64::gte_mask(a[i as usize], n[i as usize]); + (&mut acc0)[0usize] = beq & (&acc0)[0usize] | !beq & blt + } + let m2: u64 = (&acc0)[0usize]; + let is_valid_m: u64 = m00 & !m10 & m2; + let nBits: u32 = 64u32.wrapping_mul(super::bignum_base::bn_get_top_index_u64(64u32, n) as u32); + if is_valid_m == 0xFFFFFFFFFFFFFFFFu64 { + let mut n2: [u64; 64] = [0u64; 64usize]; + let c0: u64 = lib::inttypes_intrinsics::sub_borrow_u64( + 0u64, + n[0usize], + 2u64, + &mut (&mut n2)[0usize..], + ); + let a1: (&[u64], &[u64]) = n.split_at(1usize); + let res10: (&mut [u64], &mut [u64]) = n2.split_at_mut(1usize); + let mut c: [u64; 1] = [c0; 1usize]; + krml::unroll_for!(15, "i", 0u32, 1u32, { + let t1: u64 = a1.1[4u32.wrapping_mul(i) as usize]; + let res_i: (&mut [u64], &mut [u64]) = + res10.1.split_at_mut(4u32.wrapping_mul(i) as usize); + (&mut c)[0usize] = + lib::inttypes_intrinsics::sub_borrow_u64((&c)[0usize], t1, 0u64, res_i.1); + let t10: u64 = a1.1[4u32.wrapping_mul(i).wrapping_add(1u32) as usize]; + let res_i0: (&mut [u64], &mut [u64]) = res_i.1.split_at_mut(1usize); + (&mut c)[0usize] = + lib::inttypes_intrinsics::sub_borrow_u64((&c)[0usize], t10, 0u64, res_i0.1); + let t11: u64 = a1.1[4u32.wrapping_mul(i).wrapping_add(2u32) as usize]; + let res_i1: (&mut [u64], &mut [u64]) = res_i0.1.split_at_mut(1usize); + (&mut c)[0usize] = + lib::inttypes_intrinsics::sub_borrow_u64((&c)[0usize], t11, 0u64, res_i1.1); + let t12: u64 = a1.1[4u32.wrapping_mul(i).wrapping_add(3u32) as usize]; + let res_i2: (&mut [u64], &mut [u64]) = res_i1.1.split_at_mut(1usize); + (&mut c)[0usize] = + lib::inttypes_intrinsics::sub_borrow_u64((&c)[0usize], t12, 0u64, res_i2.1) + }); + krml::unroll_for!(3, "i", 60u32, 1u32, { + let t1: u64 = a1.1[i as usize]; + let res_i: (&mut [u64], &mut [u64]) = res10.1.split_at_mut(i as usize); + (&mut c)[0usize] = + lib::inttypes_intrinsics::sub_borrow_u64((&c)[0usize], t1, 0u64, res_i.1) + }); + let c1: u64 = (&c)[0usize]; + let c2: u64 = c1; + lowstar::ignore::ignore::(c2); + super::bignum4096::exp_vartime(nBits, n, a, 4096u32, &n2, res) + } else { + (res[0usize..64usize]).copy_from_slice(&[0u64; 64usize]) + }; + is_valid_m == 0xFFFFFFFFFFFFFFFFu64 +} + +/** +Heap-allocate and initialize a montgomery context. + + The argument n is meant to be a 4096-bit bignum, i.e. uint64_t[64]. + + Before calling this function, the caller will need to ensure that the following + preconditions are observed. + ā€¢ n % 2 = 1 + ā€¢ 1 < n + + The caller will need to call Hacl_Bignum4096_mont_ctx_free on the return value + to avoid memory leaks. +*/ +pub fn mont_ctx_init(n: &[u64]) -> Box<[super::bignum::bn_mont_ctx_u64]> { + let mut r2: Box<[u64]> = vec![0u64; 64usize].into_boxed_slice(); + let mut n1: Box<[u64]> = vec![0u64; 64usize].into_boxed_slice(); + let r21: &mut [u64] = &mut r2; + let n11: &mut [u64] = &mut n1; + (n11[0usize..64usize]).copy_from_slice(&n[0usize..64usize]); + let nBits: u32 = 64u32.wrapping_mul(super::bignum_base::bn_get_top_index_u64(64u32, n) as u32); + super::bignum4096::precompr2(nBits, n, r21); + let mu: u64 = super::bignum::mod_inv_uint64(n[0usize]); + let res: super::bignum::bn_mont_ctx_u64 = super::bignum::bn_mont_ctx_u64 { + len: 64u32, + n: (*n11).into(), + mu, + r2: (*r21).into(), + }; + let buf: Box<[super::bignum::bn_mont_ctx_u64]> = vec![res].into_boxed_slice(); + buf +} + +/** +Write `a mod n` in `res`. + + The argument a is meant to be a 8192-bit bignum, i.e. uint64_t[128]. + The outparam res is meant to be a 4096-bit bignum, i.e. uint64_t[64]. + The argument k is a montgomery context obtained through Hacl_Bignum4096_mont_ctx_init. +*/ +pub fn mod_precomp(k: &[super::bignum::bn_mont_ctx_u64], a: &[u64], res: &mut [u64]) { + let n: &[u64] = &(k[0usize]).n; + let mu: u64 = (k[0usize]).mu; + let r2: &[u64] = &(k[0usize]).r2; + super::bignum4096::bn_slow_precomp(n, mu, r2, a, res) +} + +/** +Write `a ^ b mod n` in `res`. + + The arguments a and the outparam res are meant to be 4096-bit bignums, i.e. uint64_t[64]. + The argument k is a montgomery context obtained through Hacl_Bignum4096_mont_ctx_init. + + The argument b is a bignum of any size, and bBits is an upper bound on the + number of significant bits of b. A tighter bound results in faster execution + time. When in doubt, the number of bits for the bignum size is always a safe + default, e.g. if b is a 4096-bit bignum, bBits should be 4096. + + The function is *NOT* constant-time on the argument b. See the + mod_exp_consttime_* functions for constant-time variants. + + Before calling this function, the caller will need to ensure that the following + preconditions are observed. + ā€¢ b < pow2 bBits + ā€¢ a < n +*/ +pub fn mod_exp_vartime_precomp( + k: &[super::bignum::bn_mont_ctx_u64], + a: &[u64], + bBits: u32, + b: &[u64], + res: &mut [u64], +) { + let n: &[u64] = &(k[0usize]).n; + let mu: u64 = (k[0usize]).mu; + let r2: &[u64] = &(k[0usize]).r2; + super::bignum4096::exp_vartime_precomp(n, mu, r2, a, bBits, b, res) +} + +/** +Write `a ^ b mod n` in `res`. + + The arguments a and the outparam res are meant to be 4096-bit bignums, i.e. uint64_t[64]. + The argument k is a montgomery context obtained through Hacl_Bignum4096_mont_ctx_init. + + The argument b is a bignum of any size, and bBits is an upper bound on the + number of significant bits of b. A tighter bound results in faster execution + time. When in doubt, the number of bits for the bignum size is always a safe + default, e.g. if b is a 4096-bit bignum, bBits should be 4096. + + This function is constant-time over its argument b, at the cost of a slower + execution time than mod_exp_vartime_*. + + Before calling this function, the caller will need to ensure that the following + preconditions are observed. + ā€¢ b < pow2 bBits + ā€¢ a < n +*/ +pub fn mod_exp_consttime_precomp( + k: &[super::bignum::bn_mont_ctx_u64], + a: &[u64], + bBits: u32, + b: &[u64], + res: &mut [u64], +) { + let n: &[u64] = &(k[0usize]).n; + let mu: u64 = (k[0usize]).mu; + let r2: &[u64] = &(k[0usize]).r2; + super::bignum4096::exp_consttime_precomp(n, mu, r2, a, bBits, b, res) +} + +/** +Write `a ^ (-1) mod n` in `res`. + + The argument a and the outparam res are meant to be 4096-bit bignums, i.e. uint64_t[64]. + The argument k is a montgomery context obtained through Hacl_Bignum4096_mont_ctx_init. + + Before calling this function, the caller will need to ensure that the following + preconditions are observed. + ā€¢ n is a prime + ā€¢ 0 < a + ā€¢ a < n +*/ +pub fn mod_inv_prime_vartime_precomp( + k: &[super::bignum::bn_mont_ctx_u64], + a: &[u64], + res: &mut [u64], +) { + let n: &[u64] = &(k[0usize]).n; + let mu: u64 = (k[0usize]).mu; + let r2: &[u64] = &(k[0usize]).r2; + let mut n2: [u64; 64] = [0u64; 64usize]; + let c0: u64 = + lib::inttypes_intrinsics::sub_borrow_u64(0u64, n[0usize], 2u64, &mut (&mut n2)[0usize..]); + let a1: (&[u64], &[u64]) = n.split_at(1usize); + let res1: (&mut [u64], &mut [u64]) = n2.split_at_mut(1usize); + let mut c: [u64; 1] = [c0; 1usize]; + krml::unroll_for!(15, "i", 0u32, 1u32, { + let t1: u64 = a1.1[4u32.wrapping_mul(i) as usize]; + let res_i: (&mut [u64], &mut [u64]) = res1.1.split_at_mut(4u32.wrapping_mul(i) as usize); + (&mut c)[0usize] = + lib::inttypes_intrinsics::sub_borrow_u64((&c)[0usize], t1, 0u64, res_i.1); + let t10: u64 = a1.1[4u32.wrapping_mul(i).wrapping_add(1u32) as usize]; + let res_i0: (&mut [u64], &mut [u64]) = res_i.1.split_at_mut(1usize); + (&mut c)[0usize] = + lib::inttypes_intrinsics::sub_borrow_u64((&c)[0usize], t10, 0u64, res_i0.1); + let t11: u64 = a1.1[4u32.wrapping_mul(i).wrapping_add(2u32) as usize]; + let res_i1: (&mut [u64], &mut [u64]) = res_i0.1.split_at_mut(1usize); + (&mut c)[0usize] = + lib::inttypes_intrinsics::sub_borrow_u64((&c)[0usize], t11, 0u64, res_i1.1); + let t12: u64 = a1.1[4u32.wrapping_mul(i).wrapping_add(3u32) as usize]; + let res_i2: (&mut [u64], &mut [u64]) = res_i1.1.split_at_mut(1usize); + (&mut c)[0usize] = + lib::inttypes_intrinsics::sub_borrow_u64((&c)[0usize], t12, 0u64, res_i2.1) + }); + krml::unroll_for!(3, "i", 60u32, 1u32, { + let t1: u64 = a1.1[i as usize]; + let res_i: (&mut [u64], &mut [u64]) = res1.1.split_at_mut(i as usize); + (&mut c)[0usize] = lib::inttypes_intrinsics::sub_borrow_u64((&c)[0usize], t1, 0u64, res_i.1) + }); + let c1: u64 = (&c)[0usize]; + let c2: u64 = c1; + lowstar::ignore::ignore::(c2); + super::bignum4096::exp_vartime_precomp(n, mu, r2, a, 4096u32, &n2, res) +} + +/** +Load a bid-endian bignum from memory. + + The argument b points to len bytes of valid memory. + The function returns a heap-allocated bignum of size sufficient to hold the + result of loading b, or NULL if either the allocation failed, or the amount of + required memory would exceed 4GB. + + If the return value is non-null, clients must eventually call free(3) on it to + avoid memory leaks. +*/ +pub fn new_bn_from_bytes_be(len: u32, b: &[u8]) -> Box<[u64]> { + if len == 0u32 || len.wrapping_sub(1u32).wrapping_div(8u32).wrapping_add(1u32) > 536870911u32 { + [].into() + } else { + let mut res: Box<[u64]> = + vec![0u64; len.wrapping_sub(1u32).wrapping_div(8u32).wrapping_add(1u32) as usize] + .into_boxed_slice(); + if false { + res + } else { + let res1: &mut [u64] = &mut res; + let res2: &mut [u64] = res1; + let bnLen: u32 = len.wrapping_sub(1u32).wrapping_div(8u32).wrapping_add(1u32); + let tmpLen: u32 = 8u32.wrapping_mul(bnLen); + let mut tmp: Box<[u8]> = vec![0u8; tmpLen as usize].into_boxed_slice(); + ((&mut tmp)[tmpLen.wrapping_sub(len) as usize + ..tmpLen.wrapping_sub(len) as usize + len as usize]) + .copy_from_slice(&b[0usize..len as usize]); + for i in 0u32..bnLen { + let u: u64 = lowstar::endianness::load64_be( + &(&tmp)[bnLen.wrapping_sub(i).wrapping_sub(1u32).wrapping_mul(8u32) as usize..], + ); + let x: u64 = u; + let os: (&mut [u64], &mut [u64]) = res2.split_at_mut(0usize); + os.1[i as usize] = x + } + (*res2).into() + } + } +} + +/** +Load a little-endian bignum from memory. + + The argument b points to len bytes of valid memory. + The function returns a heap-allocated bignum of size sufficient to hold the + result of loading b, or NULL if either the allocation failed, or the amount of + required memory would exceed 4GB. + + If the return value is non-null, clients must eventually call free(3) on it to + avoid memory leaks. +*/ +pub fn new_bn_from_bytes_le(len: u32, b: &[u8]) -> Box<[u64]> { + if len == 0u32 || len.wrapping_sub(1u32).wrapping_div(8u32).wrapping_add(1u32) > 536870911u32 { + [].into() + } else { + let mut res: Box<[u64]> = + vec![0u64; len.wrapping_sub(1u32).wrapping_div(8u32).wrapping_add(1u32) as usize] + .into_boxed_slice(); + if false { + res + } else { + let res1: &mut [u64] = &mut res; + let res2: &mut [u64] = res1; + let bnLen: u32 = len.wrapping_sub(1u32).wrapping_div(8u32).wrapping_add(1u32); + let tmpLen: u32 = 8u32.wrapping_mul(bnLen); + let mut tmp: Box<[u8]> = vec![0u8; tmpLen as usize].into_boxed_slice(); + ((&mut tmp)[0usize..len as usize]).copy_from_slice(&b[0usize..len as usize]); + for i in 0u32..len.wrapping_sub(1u32).wrapping_div(8u32).wrapping_add(1u32) { + let bj: (&[u8], &[u8]) = tmp.split_at(i.wrapping_mul(8u32) as usize); + let u: u64 = lowstar::endianness::load64_le(bj.1); + let r1: u64 = u; + let x: u64 = r1; + let os: (&mut [u64], &mut [u64]) = res2.split_at_mut(0usize); + os.1[i as usize] = x + } + (*res2).into() + } + } +} + +/** +Serialize a bignum into big-endian memory. + + The argument b points to a 4096-bit bignum. + The outparam res points to 512 bytes of valid memory. +*/ +pub fn bn_to_bytes_be(b: &[u64], res: &mut [u8]) { + let tmp: [u8; 512] = [0u8; 512usize]; + lowstar::ignore::ignore::<&[u8]>(&tmp); + for i in 0u32..64u32 { + lowstar::endianness::store64_be( + &mut res[i.wrapping_mul(8u32) as usize..], + b[64u32.wrapping_sub(i).wrapping_sub(1u32) as usize], + ) + } +} + +/** +Serialize a bignum into little-endian memory. + + The argument b points to a 4096-bit bignum. + The outparam res points to 512 bytes of valid memory. +*/ +pub fn bn_to_bytes_le(b: &[u64], res: &mut [u8]) { + let tmp: [u8; 512] = [0u8; 512usize]; + lowstar::ignore::ignore::<&[u8]>(&tmp); + for i in 0u32..64u32 { + lowstar::endianness::store64_le(&mut res[i.wrapping_mul(8u32) as usize..], b[i as usize]) + } +} + +/** +Returns 2^64 - 1 if a < b, otherwise returns 0. + + The arguments a and b are meant to be 4096-bit bignums, i.e. uint64_t[64]. +*/ +pub fn lt_mask(a: &[u64], b: &[u64]) -> u64 { + let mut acc: [u64; 1] = [0u64; 1usize]; + for i in 0u32..64u32 { + let beq: u64 = fstar::uint64::eq_mask(a[i as usize], b[i as usize]); + let blt: u64 = !fstar::uint64::gte_mask(a[i as usize], b[i as usize]); + (&mut acc)[0usize] = beq & (&acc)[0usize] | !beq & blt + } + (&acc)[0usize] +} + +/** +Returns 2^64 - 1 if a = b, otherwise returns 0. + + The arguments a and b are meant to be 4096-bit bignums, i.e. uint64_t[64]. +*/ +pub fn eq_mask(a: &[u64], b: &[u64]) -> u64 { + let mut mask: [u64; 1] = [0xFFFFFFFFFFFFFFFFu64; 1usize]; + for i in 0u32..64u32 { + let uu____0: u64 = fstar::uint64::eq_mask(a[i as usize], b[i as usize]); + (&mut mask)[0usize] = uu____0 & (&mask)[0usize] + } + let mask1: u64 = (&mask)[0usize]; + mask1 +} diff --git a/libcrux-hacl-rs/src/bignum/bignum4096_32.rs b/libcrux-hacl-rs/src/bignum/bignum4096_32.rs new file mode 100644 index 000000000..e384b25dc --- /dev/null +++ b/libcrux-hacl-rs/src/bignum/bignum4096_32.rs @@ -0,0 +1,1247 @@ +#![allow(non_snake_case)] +#![allow(non_upper_case_globals)] +#![allow(non_camel_case_types)] +#![allow(unused_assignments)] +#![allow(unreachable_patterns)] + +use crate::fstar; +use crate::lowstar; +use crate::util as lib; + +/** +Write `a + b mod 2^4096` in `res`. + + This functions returns the carry. + + The arguments a, b and res are meant to be 4096-bit bignums, i.e. uint32_t[128] +*/ +pub fn add(a: &[u32], b: &[u32], res: &mut [u32]) -> u32 { + let mut c: [u32; 1] = [0u32; 1usize]; + krml::unroll_for!(32, "i", 0u32, 1u32, { + let t1: u32 = a[4u32.wrapping_mul(i) as usize]; + let t2: u32 = b[4u32.wrapping_mul(i) as usize]; + let res_i: (&mut [u32], &mut [u32]) = res.split_at_mut(4u32.wrapping_mul(i) as usize); + (&mut c)[0usize] = lib::inttypes_intrinsics::add_carry_u32((&c)[0usize], t1, t2, res_i.1); + let t10: u32 = a[4u32.wrapping_mul(i).wrapping_add(1u32) as usize]; + let t20: u32 = b[4u32.wrapping_mul(i).wrapping_add(1u32) as usize]; + let res_i0: (&mut [u32], &mut [u32]) = res_i.1.split_at_mut(1usize); + (&mut c)[0usize] = + lib::inttypes_intrinsics::add_carry_u32((&c)[0usize], t10, t20, res_i0.1); + let t11: u32 = a[4u32.wrapping_mul(i).wrapping_add(2u32) as usize]; + let t21: u32 = b[4u32.wrapping_mul(i).wrapping_add(2u32) as usize]; + let res_i1: (&mut [u32], &mut [u32]) = res_i0.1.split_at_mut(1usize); + (&mut c)[0usize] = + lib::inttypes_intrinsics::add_carry_u32((&c)[0usize], t11, t21, res_i1.1); + let t12: u32 = a[4u32.wrapping_mul(i).wrapping_add(3u32) as usize]; + let t22: u32 = b[4u32.wrapping_mul(i).wrapping_add(3u32) as usize]; + let res_i2: (&mut [u32], &mut [u32]) = res_i1.1.split_at_mut(1usize); + (&mut c)[0usize] = lib::inttypes_intrinsics::add_carry_u32((&c)[0usize], t12, t22, res_i2.1) + }); + (&c)[0usize] +} + +/** +Write `a - b mod 2^4096` in `res`. + + This functions returns the carry. + + The arguments a, b and res are meant to be 4096-bit bignums, i.e. uint32_t[128] +*/ +pub fn sub(a: &[u32], b: &[u32], res: &mut [u32]) -> u32 { + let mut c: [u32; 1] = [0u32; 1usize]; + krml::unroll_for!(32, "i", 0u32, 1u32, { + let t1: u32 = a[4u32.wrapping_mul(i) as usize]; + let t2: u32 = b[4u32.wrapping_mul(i) as usize]; + let res_i: (&mut [u32], &mut [u32]) = res.split_at_mut(4u32.wrapping_mul(i) as usize); + (&mut c)[0usize] = lib::inttypes_intrinsics::sub_borrow_u32((&c)[0usize], t1, t2, res_i.1); + let t10: u32 = a[4u32.wrapping_mul(i).wrapping_add(1u32) as usize]; + let t20: u32 = b[4u32.wrapping_mul(i).wrapping_add(1u32) as usize]; + let res_i0: (&mut [u32], &mut [u32]) = res_i.1.split_at_mut(1usize); + (&mut c)[0usize] = + lib::inttypes_intrinsics::sub_borrow_u32((&c)[0usize], t10, t20, res_i0.1); + let t11: u32 = a[4u32.wrapping_mul(i).wrapping_add(2u32) as usize]; + let t21: u32 = b[4u32.wrapping_mul(i).wrapping_add(2u32) as usize]; + let res_i1: (&mut [u32], &mut [u32]) = res_i0.1.split_at_mut(1usize); + (&mut c)[0usize] = + lib::inttypes_intrinsics::sub_borrow_u32((&c)[0usize], t11, t21, res_i1.1); + let t12: u32 = a[4u32.wrapping_mul(i).wrapping_add(3u32) as usize]; + let t22: u32 = b[4u32.wrapping_mul(i).wrapping_add(3u32) as usize]; + let res_i2: (&mut [u32], &mut [u32]) = res_i1.1.split_at_mut(1usize); + (&mut c)[0usize] = + lib::inttypes_intrinsics::sub_borrow_u32((&c)[0usize], t12, t22, res_i2.1) + }); + (&c)[0usize] +} + +/** +Write `(a + b) mod n` in `res`. + + The arguments a, b, n and the outparam res are meant to be 4096-bit bignums, i.e. uint32_t[128]. + + Before calling this function, the caller will need to ensure that the following + preconditions are observed. + ā€¢ a < n + ā€¢ b < n +*/ +pub fn add_mod(n: &[u32], a: &[u32], b: &[u32], res: &mut [u32]) { + let mut c: [u32; 1] = [0u32; 1usize]; + krml::unroll_for!(32, "i", 0u32, 1u32, { + let t1: u32 = a[4u32.wrapping_mul(i) as usize]; + let t2: u32 = b[4u32.wrapping_mul(i) as usize]; + let res_i: (&mut [u32], &mut [u32]) = res.split_at_mut(4u32.wrapping_mul(i) as usize); + (&mut c)[0usize] = lib::inttypes_intrinsics::add_carry_u32((&c)[0usize], t1, t2, res_i.1); + let t10: u32 = a[4u32.wrapping_mul(i).wrapping_add(1u32) as usize]; + let t20: u32 = b[4u32.wrapping_mul(i).wrapping_add(1u32) as usize]; + let res_i0: (&mut [u32], &mut [u32]) = res_i.1.split_at_mut(1usize); + (&mut c)[0usize] = + lib::inttypes_intrinsics::add_carry_u32((&c)[0usize], t10, t20, res_i0.1); + let t11: u32 = a[4u32.wrapping_mul(i).wrapping_add(2u32) as usize]; + let t21: u32 = b[4u32.wrapping_mul(i).wrapping_add(2u32) as usize]; + let res_i1: (&mut [u32], &mut [u32]) = res_i0.1.split_at_mut(1usize); + (&mut c)[0usize] = + lib::inttypes_intrinsics::add_carry_u32((&c)[0usize], t11, t21, res_i1.1); + let t12: u32 = a[4u32.wrapping_mul(i).wrapping_add(3u32) as usize]; + let t22: u32 = b[4u32.wrapping_mul(i).wrapping_add(3u32) as usize]; + let res_i2: (&mut [u32], &mut [u32]) = res_i1.1.split_at_mut(1usize); + (&mut c)[0usize] = lib::inttypes_intrinsics::add_carry_u32((&c)[0usize], t12, t22, res_i2.1) + }); + let c0: u32 = (&c)[0usize]; + let mut tmp: [u32; 128] = [0u32; 128usize]; + let mut c1: [u32; 1] = [0u32; 1usize]; + krml::unroll_for!(32, "i", 0u32, 1u32, { + let t1: u32 = res[4u32.wrapping_mul(i) as usize]; + let t2: u32 = n[4u32.wrapping_mul(i) as usize]; + let res_i: (&mut [u32], &mut [u32]) = tmp.split_at_mut(4u32.wrapping_mul(i) as usize); + (&mut c1)[0usize] = + lib::inttypes_intrinsics::sub_borrow_u32((&c1)[0usize], t1, t2, res_i.1); + let t10: u32 = res[4u32.wrapping_mul(i).wrapping_add(1u32) as usize]; + let t20: u32 = n[4u32.wrapping_mul(i).wrapping_add(1u32) as usize]; + let res_i0: (&mut [u32], &mut [u32]) = res_i.1.split_at_mut(1usize); + (&mut c1)[0usize] = + lib::inttypes_intrinsics::sub_borrow_u32((&c1)[0usize], t10, t20, res_i0.1); + let t11: u32 = res[4u32.wrapping_mul(i).wrapping_add(2u32) as usize]; + let t21: u32 = n[4u32.wrapping_mul(i).wrapping_add(2u32) as usize]; + let res_i1: (&mut [u32], &mut [u32]) = res_i0.1.split_at_mut(1usize); + (&mut c1)[0usize] = + lib::inttypes_intrinsics::sub_borrow_u32((&c1)[0usize], t11, t21, res_i1.1); + let t12: u32 = res[4u32.wrapping_mul(i).wrapping_add(3u32) as usize]; + let t22: u32 = n[4u32.wrapping_mul(i).wrapping_add(3u32) as usize]; + let res_i2: (&mut [u32], &mut [u32]) = res_i1.1.split_at_mut(1usize); + (&mut c1)[0usize] = + lib::inttypes_intrinsics::sub_borrow_u32((&c1)[0usize], t12, t22, res_i2.1) + }); + let c10: u32 = (&c1)[0usize]; + let c2: u32 = c0.wrapping_sub(c10); + for i in 0u32..128u32 { + let x: u32 = c2 & res[i as usize] | !c2 & (&tmp)[i as usize]; + let os: (&mut [u32], &mut [u32]) = res.split_at_mut(0usize); + os.1[i as usize] = x + } +} + +/** +Write `(a - b) mod n` in `res`. + + The arguments a, b, n and the outparam res are meant to be 4096-bit bignums, i.e. uint32_t[128]. + + Before calling this function, the caller will need to ensure that the following + preconditions are observed. + ā€¢ a < n + ā€¢ b < n +*/ +pub fn sub_mod(n: &[u32], a: &[u32], b: &[u32], res: &mut [u32]) { + let mut c: [u32; 1] = [0u32; 1usize]; + krml::unroll_for!(32, "i", 0u32, 1u32, { + let t1: u32 = a[4u32.wrapping_mul(i) as usize]; + let t2: u32 = b[4u32.wrapping_mul(i) as usize]; + let res_i: (&mut [u32], &mut [u32]) = res.split_at_mut(4u32.wrapping_mul(i) as usize); + (&mut c)[0usize] = lib::inttypes_intrinsics::sub_borrow_u32((&c)[0usize], t1, t2, res_i.1); + let t10: u32 = a[4u32.wrapping_mul(i).wrapping_add(1u32) as usize]; + let t20: u32 = b[4u32.wrapping_mul(i).wrapping_add(1u32) as usize]; + let res_i0: (&mut [u32], &mut [u32]) = res_i.1.split_at_mut(1usize); + (&mut c)[0usize] = + lib::inttypes_intrinsics::sub_borrow_u32((&c)[0usize], t10, t20, res_i0.1); + let t11: u32 = a[4u32.wrapping_mul(i).wrapping_add(2u32) as usize]; + let t21: u32 = b[4u32.wrapping_mul(i).wrapping_add(2u32) as usize]; + let res_i1: (&mut [u32], &mut [u32]) = res_i0.1.split_at_mut(1usize); + (&mut c)[0usize] = + lib::inttypes_intrinsics::sub_borrow_u32((&c)[0usize], t11, t21, res_i1.1); + let t12: u32 = a[4u32.wrapping_mul(i).wrapping_add(3u32) as usize]; + let t22: u32 = b[4u32.wrapping_mul(i).wrapping_add(3u32) as usize]; + let res_i2: (&mut [u32], &mut [u32]) = res_i1.1.split_at_mut(1usize); + (&mut c)[0usize] = + lib::inttypes_intrinsics::sub_borrow_u32((&c)[0usize], t12, t22, res_i2.1) + }); + let c0: u32 = (&c)[0usize]; + let mut tmp: [u32; 128] = [0u32; 128usize]; + let mut c1: [u32; 1] = [0u32; 1usize]; + krml::unroll_for!(32, "i", 0u32, 1u32, { + let t1: u32 = res[4u32.wrapping_mul(i) as usize]; + let t2: u32 = n[4u32.wrapping_mul(i) as usize]; + let res_i: (&mut [u32], &mut [u32]) = tmp.split_at_mut(4u32.wrapping_mul(i) as usize); + (&mut c1)[0usize] = lib::inttypes_intrinsics::add_carry_u32((&c1)[0usize], t1, t2, res_i.1); + let t10: u32 = res[4u32.wrapping_mul(i).wrapping_add(1u32) as usize]; + let t20: u32 = n[4u32.wrapping_mul(i).wrapping_add(1u32) as usize]; + let res_i0: (&mut [u32], &mut [u32]) = res_i.1.split_at_mut(1usize); + (&mut c1)[0usize] = + lib::inttypes_intrinsics::add_carry_u32((&c1)[0usize], t10, t20, res_i0.1); + let t11: u32 = res[4u32.wrapping_mul(i).wrapping_add(2u32) as usize]; + let t21: u32 = n[4u32.wrapping_mul(i).wrapping_add(2u32) as usize]; + let res_i1: (&mut [u32], &mut [u32]) = res_i0.1.split_at_mut(1usize); + (&mut c1)[0usize] = + lib::inttypes_intrinsics::add_carry_u32((&c1)[0usize], t11, t21, res_i1.1); + let t12: u32 = res[4u32.wrapping_mul(i).wrapping_add(3u32) as usize]; + let t22: u32 = n[4u32.wrapping_mul(i).wrapping_add(3u32) as usize]; + let res_i2: (&mut [u32], &mut [u32]) = res_i1.1.split_at_mut(1usize); + (&mut c1)[0usize] = + lib::inttypes_intrinsics::add_carry_u32((&c1)[0usize], t12, t22, res_i2.1) + }); + let c10: u32 = (&c1)[0usize]; + lowstar::ignore::ignore::(c10); + let c2: u32 = 0u32.wrapping_sub(c0); + for i in 0u32..128u32 { + let x: u32 = c2 & (&tmp)[i as usize] | !c2 & res[i as usize]; + let os: (&mut [u32], &mut [u32]) = res.split_at_mut(0usize); + os.1[i as usize] = x + } +} + +/** +Write `a * b` in `res`. + + The arguments a and b are meant to be 4096-bit bignums, i.e. uint32_t[128]. + The outparam res is meant to be a 8192-bit bignum, i.e. uint32_t[256]. +*/ +pub fn mul(a: &[u32], b: &[u32], res: &mut [u32]) { + let mut tmp: [u32; 512] = [0u32; 512usize]; + super::bignum::bn_karatsuba_mul_uint32(128u32, a, b, &mut tmp, res) +} + +/** +Write `a * a` in `res`. + + The argument a is meant to be a 4096-bit bignum, i.e. uint32_t[128]. + The outparam res is meant to be a 8192-bit bignum, i.e. uint32_t[256]. +*/ +pub fn sqr(a: &[u32], res: &mut [u32]) { + let mut tmp: [u32; 512] = [0u32; 512usize]; + super::bignum::bn_karatsuba_sqr_uint32(128u32, a, &mut tmp, res) +} + +#[inline] +fn precompr2(nBits: u32, n: &[u32], res: &mut [u32]) { + (res[0usize..128usize]).copy_from_slice(&[0u32; 128usize]); + let i: u32 = nBits.wrapping_div(32u32); + let j: u32 = nBits.wrapping_rem(32u32); + res[i as usize] |= 1u32.wrapping_shl(j); + for _i in 0u32..8192u32.wrapping_sub(nBits) { + let mut a_copy: [u32; 128] = [0u32; 128usize]; + let mut b_copy: [u32; 128] = [0u32; 128usize]; + ((&mut a_copy)[0usize..128usize]).copy_from_slice(&res[0usize..128usize]); + ((&mut b_copy)[0usize..128usize]).copy_from_slice(&res[0usize..128usize]); + super::bignum4096_32::add_mod(n, &a_copy, &b_copy, res) + } +} + +#[inline] +fn reduction(n: &[u32], nInv: u32, c: &mut [u32], res: &mut [u32]) { + let mut c0: [u32; 1] = [0u32; 1usize]; + for i in 0u32..128u32 { + let qj: u32 = nInv.wrapping_mul(c[i as usize]); + let res_j: (&mut [u32], &mut [u32]) = c.split_at_mut(i as usize); + let mut c1: [u32; 1] = [0u32; 1usize]; + krml::unroll_for!(32, "i0", 0u32, 1u32, { + let a_i: u32 = n[4u32.wrapping_mul(i0) as usize]; + let res_i: (&mut [u32], &mut [u32]) = + res_j.1.split_at_mut(4u32.wrapping_mul(i0) as usize); + (&mut c1)[0usize] = + super::bignum_base::mul_wide_add2_u32(a_i, qj, (&c1)[0usize], res_i.1); + let a_i0: u32 = n[4u32.wrapping_mul(i0).wrapping_add(1u32) as usize]; + let res_i0: (&mut [u32], &mut [u32]) = res_i.1.split_at_mut(1usize); + (&mut c1)[0usize] = + super::bignum_base::mul_wide_add2_u32(a_i0, qj, (&c1)[0usize], res_i0.1); + let a_i1: u32 = n[4u32.wrapping_mul(i0).wrapping_add(2u32) as usize]; + let res_i1: (&mut [u32], &mut [u32]) = res_i0.1.split_at_mut(1usize); + (&mut c1)[0usize] = + super::bignum_base::mul_wide_add2_u32(a_i1, qj, (&c1)[0usize], res_i1.1); + let a_i2: u32 = n[4u32.wrapping_mul(i0).wrapping_add(3u32) as usize]; + let res_i2: (&mut [u32], &mut [u32]) = res_i1.1.split_at_mut(1usize); + (&mut c1)[0usize] = + super::bignum_base::mul_wide_add2_u32(a_i2, qj, (&c1)[0usize], res_i2.1) + }); + let r: u32 = (&c1)[0usize]; + let c10: u32 = r; + let res_j0: u32 = c[128u32.wrapping_add(i) as usize]; + let resb: (&mut [u32], &mut [u32]) = c.split_at_mut(i as usize + 128usize); + (&mut c0)[0usize] = + lib::inttypes_intrinsics::add_carry_u32((&c0)[0usize], c10, res_j0, resb.1) + } + (res[0usize..128usize]).copy_from_slice(&(&c[128usize..])[0usize..128usize]); + let c00: u32 = (&c0)[0usize]; + let mut tmp: [u32; 128] = [0u32; 128usize]; + let mut c1: [u32; 1] = [0u32; 1usize]; + krml::unroll_for!(32, "i", 0u32, 1u32, { + let t1: u32 = res[4u32.wrapping_mul(i) as usize]; + let t2: u32 = n[4u32.wrapping_mul(i) as usize]; + let res_i: (&mut [u32], &mut [u32]) = tmp.split_at_mut(4u32.wrapping_mul(i) as usize); + (&mut c1)[0usize] = + lib::inttypes_intrinsics::sub_borrow_u32((&c1)[0usize], t1, t2, res_i.1); + let t10: u32 = res[4u32.wrapping_mul(i).wrapping_add(1u32) as usize]; + let t20: u32 = n[4u32.wrapping_mul(i).wrapping_add(1u32) as usize]; + let res_i0: (&mut [u32], &mut [u32]) = res_i.1.split_at_mut(1usize); + (&mut c1)[0usize] = + lib::inttypes_intrinsics::sub_borrow_u32((&c1)[0usize], t10, t20, res_i0.1); + let t11: u32 = res[4u32.wrapping_mul(i).wrapping_add(2u32) as usize]; + let t21: u32 = n[4u32.wrapping_mul(i).wrapping_add(2u32) as usize]; + let res_i1: (&mut [u32], &mut [u32]) = res_i0.1.split_at_mut(1usize); + (&mut c1)[0usize] = + lib::inttypes_intrinsics::sub_borrow_u32((&c1)[0usize], t11, t21, res_i1.1); + let t12: u32 = res[4u32.wrapping_mul(i).wrapping_add(3u32) as usize]; + let t22: u32 = n[4u32.wrapping_mul(i).wrapping_add(3u32) as usize]; + let res_i2: (&mut [u32], &mut [u32]) = res_i1.1.split_at_mut(1usize); + (&mut c1)[0usize] = + lib::inttypes_intrinsics::sub_borrow_u32((&c1)[0usize], t12, t22, res_i2.1) + }); + let c10: u32 = (&c1)[0usize]; + let c2: u32 = c00.wrapping_sub(c10); + for i in 0u32..128u32 { + let x: u32 = c2 & res[i as usize] | !c2 & (&tmp)[i as usize]; + let os: (&mut [u32], &mut [u32]) = res.split_at_mut(0usize); + os.1[i as usize] = x + } +} + +#[inline] +fn to(n: &[u32], nInv: u32, r2: &[u32], a: &[u32], aM: &mut [u32]) { + let mut c: [u32; 256] = [0u32; 256usize]; + super::bignum4096_32::mul(a, r2, &mut c); + super::bignum4096_32::reduction(n, nInv, &mut c, aM) +} + +#[inline] +fn from(n: &[u32], nInv_u64: u32, aM: &[u32], a: &mut [u32]) { + let mut tmp: [u32; 256] = [0u32; 256usize]; + ((&mut tmp)[0usize..128usize]).copy_from_slice(&aM[0usize..128usize]); + super::bignum4096_32::reduction(n, nInv_u64, &mut tmp, a) +} + +#[inline] +fn areduction(n: &[u32], nInv: u32, c: &mut [u32], res: &mut [u32]) { + let mut c0: [u32; 1] = [0u32; 1usize]; + for i in 0u32..128u32 { + let qj: u32 = nInv.wrapping_mul(c[i as usize]); + let res_j: (&mut [u32], &mut [u32]) = c.split_at_mut(i as usize); + let mut c1: [u32; 1] = [0u32; 1usize]; + krml::unroll_for!(32, "i0", 0u32, 1u32, { + let a_i: u32 = n[4u32.wrapping_mul(i0) as usize]; + let res_i: (&mut [u32], &mut [u32]) = + res_j.1.split_at_mut(4u32.wrapping_mul(i0) as usize); + (&mut c1)[0usize] = + super::bignum_base::mul_wide_add2_u32(a_i, qj, (&c1)[0usize], res_i.1); + let a_i0: u32 = n[4u32.wrapping_mul(i0).wrapping_add(1u32) as usize]; + let res_i0: (&mut [u32], &mut [u32]) = res_i.1.split_at_mut(1usize); + (&mut c1)[0usize] = + super::bignum_base::mul_wide_add2_u32(a_i0, qj, (&c1)[0usize], res_i0.1); + let a_i1: u32 = n[4u32.wrapping_mul(i0).wrapping_add(2u32) as usize]; + let res_i1: (&mut [u32], &mut [u32]) = res_i0.1.split_at_mut(1usize); + (&mut c1)[0usize] = + super::bignum_base::mul_wide_add2_u32(a_i1, qj, (&c1)[0usize], res_i1.1); + let a_i2: u32 = n[4u32.wrapping_mul(i0).wrapping_add(3u32) as usize]; + let res_i2: (&mut [u32], &mut [u32]) = res_i1.1.split_at_mut(1usize); + (&mut c1)[0usize] = + super::bignum_base::mul_wide_add2_u32(a_i2, qj, (&c1)[0usize], res_i2.1) + }); + let r: u32 = (&c1)[0usize]; + let c10: u32 = r; + let res_j0: u32 = c[128u32.wrapping_add(i) as usize]; + let resb: (&mut [u32], &mut [u32]) = c.split_at_mut(i as usize + 128usize); + (&mut c0)[0usize] = + lib::inttypes_intrinsics::add_carry_u32((&c0)[0usize], c10, res_j0, resb.1) + } + (res[0usize..128usize]).copy_from_slice(&(&c[128usize..])[0usize..128usize]); + let c00: u32 = (&c0)[0usize]; + let mut tmp: [u32; 128] = [0u32; 128usize]; + let c1: u32 = super::bignum4096_32::sub(res, n, &mut tmp); + lowstar::ignore::ignore::(c1); + let m: u32 = 0u32.wrapping_sub(c00); + for i in 0u32..128u32 { + let x: u32 = m & (&tmp)[i as usize] | !m & res[i as usize]; + let os: (&mut [u32], &mut [u32]) = res.split_at_mut(0usize); + os.1[i as usize] = x + } +} + +#[inline] +fn amont_mul(n: &[u32], nInv_u64: u32, aM: &[u32], bM: &[u32], resM: &mut [u32]) { + let mut c: [u32; 256] = [0u32; 256usize]; + super::bignum4096_32::mul(aM, bM, &mut c); + super::bignum4096_32::areduction(n, nInv_u64, &mut c, resM) +} + +#[inline] +fn amont_sqr(n: &[u32], nInv_u64: u32, aM: &[u32], resM: &mut [u32]) { + let mut c: [u32; 256] = [0u32; 256usize]; + super::bignum4096_32::sqr(aM, &mut c); + super::bignum4096_32::areduction(n, nInv_u64, &mut c, resM) +} + +#[inline] +fn bn_slow_precomp(n: &[u32], mu: u32, r2: &[u32], a: &[u32], res: &mut [u32]) { + let mut a_mod: [u32; 128] = [0u32; 128usize]; + let mut a1: [u32; 256] = [0u32; 256usize]; + ((&mut a1)[0usize..256usize]).copy_from_slice(&a[0usize..256usize]); + super::bignum4096_32::areduction(n, mu, &mut a1, &mut a_mod); + super::bignum4096_32::to(n, mu, r2, &a_mod, res) +} + +/** +Write `a mod n` in `res`. + + The argument a is meant to be a 8192-bit bignum, i.e. uint32_t[256]. + The argument n and the outparam res are meant to be 4096-bit bignums, i.e. uint32_t[128]. + + The function returns false if any of the following preconditions are violated, + true otherwise. + ā€¢ 1 < n + ā€¢ n % 2 = 1 +*/ +pub fn r#mod(n: &[u32], a: &[u32], res: &mut [u32]) -> bool { + let mut one: [u32; 128] = [0u32; 128usize]; + ((&mut one)[0usize..128usize]).copy_from_slice(&[0u32; 128usize]); + (&mut one)[0usize] = 1u32; + let bit0: u32 = n[0usize] & 1u32; + let m0: u32 = 0u32.wrapping_sub(bit0); + let mut acc: [u32; 1] = [0u32; 1usize]; + for i in 0u32..128u32 { + let beq: u32 = fstar::uint32::eq_mask((&one)[i as usize], n[i as usize]); + let blt: u32 = !fstar::uint32::gte_mask((&one)[i as usize], n[i as usize]); + (&mut acc)[0usize] = beq & (&acc)[0usize] | !beq & blt + } + let m1: u32 = (&acc)[0usize]; + let is_valid_m: u32 = m0 & m1; + let nBits: u32 = 32u32.wrapping_mul(super::bignum_base::bn_get_top_index_u32(128u32, n)); + if is_valid_m == 0xFFFFFFFFu32 { + let mut r2: [u32; 128] = [0u32; 128usize]; + super::bignum4096_32::precompr2(nBits, n, &mut r2); + let mu: u32 = super::bignum::mod_inv_uint32(n[0usize]); + super::bignum4096_32::bn_slow_precomp(n, mu, &r2, a, res) + } else { + (res[0usize..128usize]).copy_from_slice(&[0u32; 128usize]) + }; + is_valid_m == 0xFFFFFFFFu32 +} + +fn exp_check(n: &[u32], a: &[u32], bBits: u32, b: &[u32]) -> u32 { + let mut one: [u32; 128] = [0u32; 128usize]; + ((&mut one)[0usize..128usize]).copy_from_slice(&[0u32; 128usize]); + (&mut one)[0usize] = 1u32; + let bit0: u32 = n[0usize] & 1u32; + let m0: u32 = 0u32.wrapping_sub(bit0); + let mut acc: [u32; 1] = [0u32; 1usize]; + for i in 0u32..128u32 { + let beq: u32 = fstar::uint32::eq_mask((&one)[i as usize], n[i as usize]); + let blt: u32 = !fstar::uint32::gte_mask((&one)[i as usize], n[i as usize]); + (&mut acc)[0usize] = beq & (&acc)[0usize] | !beq & blt + } + let m1: u32 = (&acc)[0usize]; + let m00: u32 = m0 & m1; + let bLen: u32 = if bBits == 0u32 { + 1u32 + } else { + bBits + .wrapping_sub(1u32) + .wrapping_div(32u32) + .wrapping_add(1u32) + }; + let m10: u32 = if bBits < 32u32.wrapping_mul(bLen) { + let mut b2: Box<[u32]> = vec![0u32; bLen as usize].into_boxed_slice(); + let i: u32 = bBits.wrapping_div(32u32); + let j: u32 = bBits.wrapping_rem(32u32); + (&mut b2)[i as usize] = (&b2)[i as usize] | 1u32.wrapping_shl(j); + let mut acc0: [u32; 1] = [0u32; 1usize]; + for i0 in 0u32..bLen { + let beq: u32 = fstar::uint32::eq_mask(b[i0 as usize], (&b2)[i0 as usize]); + let blt: u32 = !fstar::uint32::gte_mask(b[i0 as usize], (&b2)[i0 as usize]); + (&mut acc0)[0usize] = beq & (&acc0)[0usize] | !beq & blt + } + let res: u32 = (&acc0)[0usize]; + res + } else { + 0xFFFFFFFFu32 + }; + let mut acc0: [u32; 1] = [0u32; 1usize]; + for i in 0u32..128u32 { + let beq: u32 = fstar::uint32::eq_mask(a[i as usize], n[i as usize]); + let blt: u32 = !fstar::uint32::gte_mask(a[i as usize], n[i as usize]); + (&mut acc0)[0usize] = beq & (&acc0)[0usize] | !beq & blt + } + let m2: u32 = (&acc0)[0usize]; + let m: u32 = m10 & m2; + m00 & m +} + +#[inline] +fn exp_vartime_precomp( + n: &[u32], + mu: u32, + r2: &[u32], + a: &[u32], + bBits: u32, + b: &[u32], + res: &mut [u32], +) { + if bBits < 200u32 { + let mut aM: [u32; 128] = [0u32; 128usize]; + super::bignum4096_32::to(n, mu, r2, a, &mut aM); + let mut resM: [u32; 128] = [0u32; 128usize]; + let mut ctx: [u32; 256] = [0u32; 256usize]; + ((&mut ctx)[0usize..128usize]).copy_from_slice(&n[0usize..128usize]); + ((&mut ctx)[128usize..128usize + 128usize]).copy_from_slice(&r2[0usize..128usize]); + let ctx_n: (&[u32], &[u32]) = ctx.split_at(0usize); + let ctx_r2: (&[u32], &[u32]) = ctx_n.1.split_at(128usize); + super::bignum4096_32::from(ctx_r2.0, mu, ctx_r2.1, &mut resM); + lowstar::ignore::ignore::<&[u32]>(&ctx); + for i in 0u32..bBits { + let i1: u32 = i.wrapping_div(32u32); + let j: u32 = i.wrapping_rem(32u32); + let tmp: u32 = b[i1 as usize]; + let bit: u32 = tmp.wrapping_shr(j) & 1u32; + if bit != 0u32 { + let mut aM_copy: [u32; 128] = [0u32; 128usize]; + ((&mut aM_copy)[0usize..128usize]).copy_from_slice(&(&resM)[0usize..128usize]); + let ctx_n0: (&[u32], &[u32]) = ctx.split_at(0usize); + super::bignum4096_32::amont_mul(ctx_n0.1, mu, &aM_copy, &aM, &mut resM); + lowstar::ignore::ignore::<&[u32]>(&ctx) + }; + let mut aM_copy: [u32; 128] = [0u32; 128usize]; + ((&mut aM_copy)[0usize..128usize]).copy_from_slice(&(&aM)[0usize..128usize]); + let ctx_n0: (&[u32], &[u32]) = ctx.split_at(0usize); + super::bignum4096_32::amont_sqr(ctx_n0.1, mu, &aM_copy, &mut aM); + lowstar::ignore::ignore::<&[u32]>(&ctx) + } + super::bignum4096_32::from(n, mu, &resM, res) + } else { + let mut aM: [u32; 128] = [0u32; 128usize]; + super::bignum4096_32::to(n, mu, r2, a, &mut aM); + let mut resM: [u32; 128] = [0u32; 128usize]; + let bLen: u32 = if bBits == 0u32 { + 1u32 + } else { + bBits + .wrapping_sub(1u32) + .wrapping_div(32u32) + .wrapping_add(1u32) + }; + let mut ctx: [u32; 256] = [0u32; 256usize]; + ((&mut ctx)[0usize..128usize]).copy_from_slice(&n[0usize..128usize]); + ((&mut ctx)[128usize..128usize + 128usize]).copy_from_slice(&r2[0usize..128usize]); + let mut table: [u32; 2048] = [0u32; 2048usize]; + let mut tmp: [u32; 128] = [0u32; 128usize]; + let t0: (&mut [u32], &mut [u32]) = table.split_at_mut(0usize); + let t1: (&mut [u32], &mut [u32]) = t0.1.split_at_mut(128usize); + let ctx_n: (&[u32], &[u32]) = ctx.split_at(0usize); + let ctx_r2: (&[u32], &[u32]) = ctx_n.1.split_at(128usize); + super::bignum4096_32::from(ctx_r2.0, mu, ctx_r2.1, t1.0); + lowstar::ignore::ignore::<&[u32]>(&ctx); + (t1.1[0usize..128usize]).copy_from_slice(&(&aM)[0usize..128usize]); + lowstar::ignore::ignore::<&[u32]>(&table); + krml::unroll_for!(7, "i", 0u32, 1u32, { + let t11: (&[u32], &[u32]) = + table.split_at(i.wrapping_add(1u32).wrapping_mul(128u32) as usize); + let mut aM_copy: [u32; 128] = [0u32; 128usize]; + ((&mut aM_copy)[0usize..128usize]).copy_from_slice(&t11.1[0usize..128usize]); + let ctx_n0: (&[u32], &[u32]) = ctx.split_at(0usize); + super::bignum4096_32::amont_sqr(ctx_n0.1, mu, &aM_copy, &mut tmp); + lowstar::ignore::ignore::<&[u32]>(&ctx); + ((&mut table)[2u32.wrapping_mul(i).wrapping_add(2u32).wrapping_mul(128u32) as usize + ..2u32.wrapping_mul(i).wrapping_add(2u32).wrapping_mul(128u32) as usize + + 128usize]) + .copy_from_slice(&(&tmp)[0usize..128usize]); + let t2: (&[u32], &[u32]) = table + .split_at(2u32.wrapping_mul(i).wrapping_add(2u32).wrapping_mul(128u32) as usize); + let mut aM_copy0: [u32; 128] = [0u32; 128usize]; + ((&mut aM_copy0)[0usize..128usize]).copy_from_slice(&(&aM)[0usize..128usize]); + let ctx_n1: (&[u32], &[u32]) = ctx.split_at(0usize); + super::bignum4096_32::amont_mul(ctx_n1.1, mu, &aM_copy0, t2.1, &mut tmp); + lowstar::ignore::ignore::<&[u32]>(&ctx); + ((&mut table)[2u32.wrapping_mul(i).wrapping_add(3u32).wrapping_mul(128u32) as usize + ..2u32.wrapping_mul(i).wrapping_add(3u32).wrapping_mul(128u32) as usize + 128usize]) + .copy_from_slice(&(&tmp)[0usize..128usize]) + }); + if bBits.wrapping_rem(4u32) != 0u32 { + let i: u32 = bBits.wrapping_div(4u32).wrapping_mul(4u32); + let bits_c: u32 = super::bignum_base::bn_get_bits_u32(bLen, b, i, 4u32); + let bits_l32: u32 = bits_c; + let a_bits_l: (&[u32], &[u32]) = table.split_at(bits_l32.wrapping_mul(128u32) as usize); + ((&mut resM)[0usize..128usize]).copy_from_slice(&a_bits_l.1[0usize..128usize]) + } else { + let ctx_n0: (&[u32], &[u32]) = ctx.split_at(0usize); + let ctx_r20: (&[u32], &[u32]) = ctx_n0.1.split_at(128usize); + super::bignum4096_32::from(ctx_r20.0, mu, ctx_r20.1, &mut resM); + lowstar::ignore::ignore::<&[u32]>(&ctx) + }; + let mut tmp0: [u32; 128] = [0u32; 128usize]; + for i in 0u32..bBits.wrapping_div(4u32) { + krml::unroll_for!(4, "_i", 0u32, 1u32, { + let mut aM_copy: [u32; 128] = [0u32; 128usize]; + ((&mut aM_copy)[0usize..128usize]).copy_from_slice(&(&resM)[0usize..128usize]); + let ctx_n0: (&[u32], &[u32]) = ctx.split_at(0usize); + super::bignum4096_32::amont_sqr(ctx_n0.1, mu, &aM_copy, &mut resM); + lowstar::ignore::ignore::<&[u32]>(&ctx) + }); + let k: u32 = bBits + .wrapping_sub(bBits.wrapping_rem(4u32)) + .wrapping_sub(4u32.wrapping_mul(i)) + .wrapping_sub(4u32); + let bits_l: u32 = super::bignum_base::bn_get_bits_u32(bLen, b, k, 4u32); + lowstar::ignore::ignore::<&[u32]>(&table); + let bits_l32: u32 = bits_l; + let a_bits_l: (&[u32], &[u32]) = table.split_at(bits_l32.wrapping_mul(128u32) as usize); + ((&mut tmp0)[0usize..128usize]).copy_from_slice(&a_bits_l.1[0usize..128usize]); + let mut aM_copy: [u32; 128] = [0u32; 128usize]; + ((&mut aM_copy)[0usize..128usize]).copy_from_slice(&(&resM)[0usize..128usize]); + let ctx_n0: (&[u32], &[u32]) = ctx.split_at(0usize); + super::bignum4096_32::amont_mul(ctx_n0.1, mu, &aM_copy, &tmp0, &mut resM); + lowstar::ignore::ignore::<&[u32]>(&ctx) + } + super::bignum4096_32::from(n, mu, &resM, res) + } +} + +#[inline] +fn exp_consttime_precomp( + n: &[u32], + mu: u32, + r2: &[u32], + a: &[u32], + bBits: u32, + b: &[u32], + res: &mut [u32], +) { + if bBits < 200u32 { + let mut aM: [u32; 128] = [0u32; 128usize]; + super::bignum4096_32::to(n, mu, r2, a, &mut aM); + let mut resM: [u32; 128] = [0u32; 128usize]; + let mut ctx: [u32; 256] = [0u32; 256usize]; + ((&mut ctx)[0usize..128usize]).copy_from_slice(&n[0usize..128usize]); + ((&mut ctx)[128usize..128usize + 128usize]).copy_from_slice(&r2[0usize..128usize]); + let mut sw: [u32; 1] = [0u32; 1usize]; + let ctx_n: (&[u32], &[u32]) = ctx.split_at(0usize); + let ctx_r2: (&[u32], &[u32]) = ctx_n.1.split_at(128usize); + super::bignum4096_32::from(ctx_r2.0, mu, ctx_r2.1, &mut resM); + lowstar::ignore::ignore::<&[u32]>(&ctx); + for i in 0u32..bBits { + let i1: u32 = bBits.wrapping_sub(i).wrapping_sub(1u32).wrapping_div(32u32); + let j: u32 = bBits.wrapping_sub(i).wrapping_sub(1u32).wrapping_rem(32u32); + let tmp: u32 = b[i1 as usize]; + let bit: u32 = tmp.wrapping_shr(j) & 1u32; + let sw1: u32 = bit ^ (&sw)[0usize]; + for i0 in 0u32..128u32 { + let dummy: u32 = + 0u32.wrapping_sub(sw1) & ((&resM)[i0 as usize] ^ (&aM)[i0 as usize]); + (&mut resM)[i0 as usize] = (&resM)[i0 as usize] ^ dummy; + (&mut aM)[i0 as usize] = (&aM)[i0 as usize] ^ dummy + } + let mut aM_copy: [u32; 128] = [0u32; 128usize]; + ((&mut aM_copy)[0usize..128usize]).copy_from_slice(&(&aM)[0usize..128usize]); + let ctx_n0: (&[u32], &[u32]) = ctx.split_at(0usize); + super::bignum4096_32::amont_mul(ctx_n0.1, mu, &aM_copy, &resM, &mut aM); + lowstar::ignore::ignore::<&[u32]>(&ctx); + let mut aM_copy0: [u32; 128] = [0u32; 128usize]; + ((&mut aM_copy0)[0usize..128usize]).copy_from_slice(&(&resM)[0usize..128usize]); + let ctx_n1: (&[u32], &[u32]) = ctx.split_at(0usize); + super::bignum4096_32::amont_sqr(ctx_n1.1, mu, &aM_copy0, &mut resM); + lowstar::ignore::ignore::<&[u32]>(&ctx); + (&mut sw)[0usize] = bit + } + let sw0: u32 = (&sw)[0usize]; + for i in 0u32..128u32 { + let dummy: u32 = 0u32.wrapping_sub(sw0) & ((&resM)[i as usize] ^ (&aM)[i as usize]); + (&mut resM)[i as usize] = (&resM)[i as usize] ^ dummy; + (&mut aM)[i as usize] = (&aM)[i as usize] ^ dummy + } + super::bignum4096_32::from(n, mu, &resM, res) + } else { + let mut aM: [u32; 128] = [0u32; 128usize]; + super::bignum4096_32::to(n, mu, r2, a, &mut aM); + let mut resM: [u32; 128] = [0u32; 128usize]; + let bLen: u32 = if bBits == 0u32 { + 1u32 + } else { + bBits + .wrapping_sub(1u32) + .wrapping_div(32u32) + .wrapping_add(1u32) + }; + let mut ctx: [u32; 256] = [0u32; 256usize]; + ((&mut ctx)[0usize..128usize]).copy_from_slice(&n[0usize..128usize]); + ((&mut ctx)[128usize..128usize + 128usize]).copy_from_slice(&r2[0usize..128usize]); + let mut table: [u32; 2048] = [0u32; 2048usize]; + let mut tmp: [u32; 128] = [0u32; 128usize]; + let t0: (&mut [u32], &mut [u32]) = table.split_at_mut(0usize); + let t1: (&mut [u32], &mut [u32]) = t0.1.split_at_mut(128usize); + let ctx_n: (&[u32], &[u32]) = ctx.split_at(0usize); + let ctx_r2: (&[u32], &[u32]) = ctx_n.1.split_at(128usize); + super::bignum4096_32::from(ctx_r2.0, mu, ctx_r2.1, t1.0); + lowstar::ignore::ignore::<&[u32]>(&ctx); + (t1.1[0usize..128usize]).copy_from_slice(&(&aM)[0usize..128usize]); + lowstar::ignore::ignore::<&[u32]>(&table); + krml::unroll_for!(7, "i", 0u32, 1u32, { + let t11: (&[u32], &[u32]) = + table.split_at(i.wrapping_add(1u32).wrapping_mul(128u32) as usize); + let mut aM_copy: [u32; 128] = [0u32; 128usize]; + ((&mut aM_copy)[0usize..128usize]).copy_from_slice(&t11.1[0usize..128usize]); + let ctx_n0: (&[u32], &[u32]) = ctx.split_at(0usize); + super::bignum4096_32::amont_sqr(ctx_n0.1, mu, &aM_copy, &mut tmp); + lowstar::ignore::ignore::<&[u32]>(&ctx); + ((&mut table)[2u32.wrapping_mul(i).wrapping_add(2u32).wrapping_mul(128u32) as usize + ..2u32.wrapping_mul(i).wrapping_add(2u32).wrapping_mul(128u32) as usize + + 128usize]) + .copy_from_slice(&(&tmp)[0usize..128usize]); + let t2: (&[u32], &[u32]) = table + .split_at(2u32.wrapping_mul(i).wrapping_add(2u32).wrapping_mul(128u32) as usize); + let mut aM_copy0: [u32; 128] = [0u32; 128usize]; + ((&mut aM_copy0)[0usize..128usize]).copy_from_slice(&(&aM)[0usize..128usize]); + let ctx_n1: (&[u32], &[u32]) = ctx.split_at(0usize); + super::bignum4096_32::amont_mul(ctx_n1.1, mu, &aM_copy0, t2.1, &mut tmp); + lowstar::ignore::ignore::<&[u32]>(&ctx); + ((&mut table)[2u32.wrapping_mul(i).wrapping_add(3u32).wrapping_mul(128u32) as usize + ..2u32.wrapping_mul(i).wrapping_add(3u32).wrapping_mul(128u32) as usize + 128usize]) + .copy_from_slice(&(&tmp)[0usize..128usize]) + }); + if bBits.wrapping_rem(4u32) != 0u32 { + let i: u32 = bBits.wrapping_div(4u32).wrapping_mul(4u32); + let bits_c: u32 = super::bignum_base::bn_get_bits_u32(bLen, b, i, 4u32); + ((&mut resM)[0usize..128usize]) + .copy_from_slice(&(&(&table)[0usize..] as &[u32])[0usize..128usize]); + krml::unroll_for!(15, "i0", 0u32, 1u32, { + let c: u32 = fstar::uint32::eq_mask(bits_c, i0.wrapping_add(1u32)); + let res_j: (&[u32], &[u32]) = + table.split_at(i0.wrapping_add(1u32).wrapping_mul(128u32) as usize); + for i1 in 0u32..128u32 { + let x: u32 = c & res_j.1[i1 as usize] | !c & (&resM)[i1 as usize]; + let os: (&mut [u32], &mut [u32]) = resM.split_at_mut(0usize); + os.1[i1 as usize] = x + } + }) + } else { + let ctx_n0: (&[u32], &[u32]) = ctx.split_at(0usize); + let ctx_r20: (&[u32], &[u32]) = ctx_n0.1.split_at(128usize); + super::bignum4096_32::from(ctx_r20.0, mu, ctx_r20.1, &mut resM); + lowstar::ignore::ignore::<&[u32]>(&ctx) + }; + let mut tmp0: [u32; 128] = [0u32; 128usize]; + for i in 0u32..bBits.wrapping_div(4u32) { + krml::unroll_for!(4, "_i", 0u32, 1u32, { + let mut aM_copy: [u32; 128] = [0u32; 128usize]; + ((&mut aM_copy)[0usize..128usize]).copy_from_slice(&(&resM)[0usize..128usize]); + let ctx_n0: (&[u32], &[u32]) = ctx.split_at(0usize); + super::bignum4096_32::amont_sqr(ctx_n0.1, mu, &aM_copy, &mut resM); + lowstar::ignore::ignore::<&[u32]>(&ctx) + }); + let k: u32 = bBits + .wrapping_sub(bBits.wrapping_rem(4u32)) + .wrapping_sub(4u32.wrapping_mul(i)) + .wrapping_sub(4u32); + let bits_l: u32 = super::bignum_base::bn_get_bits_u32(bLen, b, k, 4u32); + lowstar::ignore::ignore::<&[u32]>(&table); + ((&mut tmp0)[0usize..128usize]) + .copy_from_slice(&(&(&table)[0usize..] as &[u32])[0usize..128usize]); + krml::unroll_for!(15, "i0", 0u32, 1u32, { + let c: u32 = fstar::uint32::eq_mask(bits_l, i0.wrapping_add(1u32)); + let res_j: (&[u32], &[u32]) = + table.split_at(i0.wrapping_add(1u32).wrapping_mul(128u32) as usize); + for i1 in 0u32..128u32 { + let x: u32 = c & res_j.1[i1 as usize] | !c & (&tmp0)[i1 as usize]; + let os: (&mut [u32], &mut [u32]) = tmp0.split_at_mut(0usize); + os.1[i1 as usize] = x + } + }); + let mut aM_copy: [u32; 128] = [0u32; 128usize]; + ((&mut aM_copy)[0usize..128usize]).copy_from_slice(&(&resM)[0usize..128usize]); + let ctx_n0: (&[u32], &[u32]) = ctx.split_at(0usize); + super::bignum4096_32::amont_mul(ctx_n0.1, mu, &aM_copy, &tmp0, &mut resM); + lowstar::ignore::ignore::<&[u32]>(&ctx) + } + super::bignum4096_32::from(n, mu, &resM, res) + } +} + +#[inline] +fn exp_vartime(nBits: u32, n: &[u32], a: &[u32], bBits: u32, b: &[u32], res: &mut [u32]) { + let mut r2: [u32; 128] = [0u32; 128usize]; + super::bignum4096_32::precompr2(nBits, n, &mut r2); + let mu: u32 = super::bignum::mod_inv_uint32(n[0usize]); + super::bignum4096_32::exp_vartime_precomp(n, mu, &r2, a, bBits, b, res) +} + +#[inline] +fn exp_consttime(nBits: u32, n: &[u32], a: &[u32], bBits: u32, b: &[u32], res: &mut [u32]) { + let mut r2: [u32; 128] = [0u32; 128usize]; + super::bignum4096_32::precompr2(nBits, n, &mut r2); + let mu: u32 = super::bignum::mod_inv_uint32(n[0usize]); + super::bignum4096_32::exp_consttime_precomp(n, mu, &r2, a, bBits, b, res) +} + +/** +Write `a ^ b mod n` in `res`. + + The arguments a, n and the outparam res are meant to be 4096-bit bignums, i.e. uint32_t[128]. + + The argument b is a bignum of any size, and bBits is an upper bound on the + number of significant bits of b. A tighter bound results in faster execution + time. When in doubt, the number of bits for the bignum size is always a safe + default, e.g. if b is a 4096-bit bignum, bBits should be 4096. + + The function is *NOT* constant-time on the argument b. See the + mod_exp_consttime_* functions for constant-time variants. + + The function returns false if any of the following preconditions are violated, + true otherwise. + ā€¢ n % 2 = 1 + ā€¢ 1 < n + ā€¢ b < pow2 bBits + ā€¢ a < n +*/ +pub fn mod_exp_vartime(n: &[u32], a: &[u32], bBits: u32, b: &[u32], res: &mut [u32]) -> bool { + let is_valid_m: u32 = super::bignum4096_32::exp_check(n, a, bBits, b); + let nBits: u32 = 32u32.wrapping_mul(super::bignum_base::bn_get_top_index_u32(128u32, n)); + if is_valid_m == 0xFFFFFFFFu32 { + super::bignum4096_32::exp_vartime(nBits, n, a, bBits, b, res) + } else { + (res[0usize..128usize]).copy_from_slice(&[0u32; 128usize]) + }; + is_valid_m == 0xFFFFFFFFu32 +} + +/** +Write `a ^ b mod n` in `res`. + + The arguments a, n and the outparam res are meant to be 4096-bit bignums, i.e. uint32_t[128]. + + The argument b is a bignum of any size, and bBits is an upper bound on the + number of significant bits of b. A tighter bound results in faster execution + time. When in doubt, the number of bits for the bignum size is always a safe + default, e.g. if b is a 4096-bit bignum, bBits should be 4096. + + This function is constant-time over its argument b, at the cost of a slower + execution time than mod_exp_vartime. + + The function returns false if any of the following preconditions are violated, + true otherwise. + ā€¢ n % 2 = 1 + ā€¢ 1 < n + ā€¢ b < pow2 bBits + ā€¢ a < n +*/ +pub fn mod_exp_consttime(n: &[u32], a: &[u32], bBits: u32, b: &[u32], res: &mut [u32]) -> bool { + let is_valid_m: u32 = super::bignum4096_32::exp_check(n, a, bBits, b); + let nBits: u32 = 32u32.wrapping_mul(super::bignum_base::bn_get_top_index_u32(128u32, n)); + if is_valid_m == 0xFFFFFFFFu32 { + super::bignum4096_32::exp_consttime(nBits, n, a, bBits, b, res) + } else { + (res[0usize..128usize]).copy_from_slice(&[0u32; 128usize]) + }; + is_valid_m == 0xFFFFFFFFu32 +} + +/** +Write `a ^ (-1) mod n` in `res`. + + The arguments a, n and the outparam res are meant to be 4096-bit bignums, i.e. uint32_t[128]. + + Before calling this function, the caller will need to ensure that the following + preconditions are observed. + ā€¢ n is a prime + + The function returns false if any of the following preconditions are violated, true otherwise. + ā€¢ n % 2 = 1 + ā€¢ 1 < n + ā€¢ 0 < a + ā€¢ a < n +*/ +pub fn mod_inv_prime_vartime(n: &[u32], a: &[u32], res: &mut [u32]) -> bool { + let mut one: [u32; 128] = [0u32; 128usize]; + ((&mut one)[0usize..128usize]).copy_from_slice(&[0u32; 128usize]); + (&mut one)[0usize] = 1u32; + let bit0: u32 = n[0usize] & 1u32; + let m0: u32 = 0u32.wrapping_sub(bit0); + let mut acc: [u32; 1] = [0u32; 1usize]; + for i in 0u32..128u32 { + let beq: u32 = fstar::uint32::eq_mask((&one)[i as usize], n[i as usize]); + let blt: u32 = !fstar::uint32::gte_mask((&one)[i as usize], n[i as usize]); + (&mut acc)[0usize] = beq & (&acc)[0usize] | !beq & blt + } + let m1: u32 = (&acc)[0usize]; + let m00: u32 = m0 & m1; + let bn_zero: [u32; 128] = [0u32; 128usize]; + let mut mask: [u32; 1] = [0xFFFFFFFFu32; 1usize]; + for i in 0u32..128u32 { + let uu____0: u32 = fstar::uint32::eq_mask(a[i as usize], (&bn_zero)[i as usize]); + (&mut mask)[0usize] = uu____0 & (&mask)[0usize] + } + let mask1: u32 = (&mask)[0usize]; + let res1: u32 = mask1; + let m10: u32 = res1; + let mut acc0: [u32; 1] = [0u32; 1usize]; + for i in 0u32..128u32 { + let beq: u32 = fstar::uint32::eq_mask(a[i as usize], n[i as usize]); + let blt: u32 = !fstar::uint32::gte_mask(a[i as usize], n[i as usize]); + (&mut acc0)[0usize] = beq & (&acc0)[0usize] | !beq & blt + } + let m2: u32 = (&acc0)[0usize]; + let is_valid_m: u32 = m00 & !m10 & m2; + let nBits: u32 = 32u32.wrapping_mul(super::bignum_base::bn_get_top_index_u32(128u32, n)); + if is_valid_m == 0xFFFFFFFFu32 { + let mut n2: [u32; 128] = [0u32; 128usize]; + let c0: u32 = lib::inttypes_intrinsics::sub_borrow_u32( + 0u32, + n[0usize], + 2u32, + &mut (&mut n2)[0usize..], + ); + let a1: (&[u32], &[u32]) = n.split_at(1usize); + let res10: (&mut [u32], &mut [u32]) = n2.split_at_mut(1usize); + let mut c: [u32; 1] = [c0; 1usize]; + krml::unroll_for!(31, "i", 0u32, 1u32, { + let t1: u32 = a1.1[4u32.wrapping_mul(i) as usize]; + let res_i: (&mut [u32], &mut [u32]) = + res10.1.split_at_mut(4u32.wrapping_mul(i) as usize); + (&mut c)[0usize] = + lib::inttypes_intrinsics::sub_borrow_u32((&c)[0usize], t1, 0u32, res_i.1); + let t10: u32 = a1.1[4u32.wrapping_mul(i).wrapping_add(1u32) as usize]; + let res_i0: (&mut [u32], &mut [u32]) = res_i.1.split_at_mut(1usize); + (&mut c)[0usize] = + lib::inttypes_intrinsics::sub_borrow_u32((&c)[0usize], t10, 0u32, res_i0.1); + let t11: u32 = a1.1[4u32.wrapping_mul(i).wrapping_add(2u32) as usize]; + let res_i1: (&mut [u32], &mut [u32]) = res_i0.1.split_at_mut(1usize); + (&mut c)[0usize] = + lib::inttypes_intrinsics::sub_borrow_u32((&c)[0usize], t11, 0u32, res_i1.1); + let t12: u32 = a1.1[4u32.wrapping_mul(i).wrapping_add(3u32) as usize]; + let res_i2: (&mut [u32], &mut [u32]) = res_i1.1.split_at_mut(1usize); + (&mut c)[0usize] = + lib::inttypes_intrinsics::sub_borrow_u32((&c)[0usize], t12, 0u32, res_i2.1) + }); + krml::unroll_for!(3, "i", 124u32, 1u32, { + let t1: u32 = a1.1[i as usize]; + let res_i: (&mut [u32], &mut [u32]) = res10.1.split_at_mut(i as usize); + (&mut c)[0usize] = + lib::inttypes_intrinsics::sub_borrow_u32((&c)[0usize], t1, 0u32, res_i.1) + }); + let c1: u32 = (&c)[0usize]; + let c2: u32 = c1; + lowstar::ignore::ignore::(c2); + super::bignum4096_32::exp_vartime(nBits, n, a, 4096u32, &n2, res) + } else { + (res[0usize..128usize]).copy_from_slice(&[0u32; 128usize]) + }; + is_valid_m == 0xFFFFFFFFu32 +} + +/** +Heap-allocate and initialize a montgomery context. + + The argument n is meant to be a 4096-bit bignum, i.e. uint32_t[128]. + + Before calling this function, the caller will need to ensure that the following + preconditions are observed. + ā€¢ n % 2 = 1 + ā€¢ 1 < n + + The caller will need to call Hacl_Bignum4096_mont_ctx_free on the return value + to avoid memory leaks. +*/ +pub fn mont_ctx_init(n: &[u32]) -> Box<[super::bignum::bn_mont_ctx_u32]> { + let mut r2: Box<[u32]> = vec![0u32; 128usize].into_boxed_slice(); + let mut n1: Box<[u32]> = vec![0u32; 128usize].into_boxed_slice(); + let r21: &mut [u32] = &mut r2; + let n11: &mut [u32] = &mut n1; + (n11[0usize..128usize]).copy_from_slice(&n[0usize..128usize]); + let nBits: u32 = 32u32.wrapping_mul(super::bignum_base::bn_get_top_index_u32(128u32, n)); + super::bignum4096_32::precompr2(nBits, n, r21); + let mu: u32 = super::bignum::mod_inv_uint32(n[0usize]); + let res: super::bignum::bn_mont_ctx_u32 = super::bignum::bn_mont_ctx_u32 { + len: 128u32, + n: (*n11).into(), + mu, + r2: (*r21).into(), + }; + let buf: Box<[super::bignum::bn_mont_ctx_u32]> = vec![res].into_boxed_slice(); + buf +} + +/** +Write `a mod n` in `res`. + + The argument a is meant to be a 8192-bit bignum, i.e. uint32_t[256]. + The outparam res is meant to be a 4096-bit bignum, i.e. uint32_t[128]. + The argument k is a montgomery context obtained through Hacl_Bignum4096_mont_ctx_init. +*/ +pub fn mod_precomp(k: &[super::bignum::bn_mont_ctx_u32], a: &[u32], res: &mut [u32]) { + let n: &[u32] = &(k[0usize]).n; + let mu: u32 = (k[0usize]).mu; + let r2: &[u32] = &(k[0usize]).r2; + super::bignum4096_32::bn_slow_precomp(n, mu, r2, a, res) +} + +/** +Write `a ^ b mod n` in `res`. + + The arguments a and the outparam res are meant to be 4096-bit bignums, i.e. uint32_t[128]. + The argument k is a montgomery context obtained through Hacl_Bignum4096_mont_ctx_init. + + The argument b is a bignum of any size, and bBits is an upper bound on the + number of significant bits of b. A tighter bound results in faster execution + time. When in doubt, the number of bits for the bignum size is always a safe + default, e.g. if b is a 4096-bit bignum, bBits should be 4096. + + The function is *NOT* constant-time on the argument b. See the + mod_exp_consttime_* functions for constant-time variants. + + Before calling this function, the caller will need to ensure that the following + preconditions are observed. + ā€¢ b < pow2 bBits + ā€¢ a < n +*/ +pub fn mod_exp_vartime_precomp( + k: &[super::bignum::bn_mont_ctx_u32], + a: &[u32], + bBits: u32, + b: &[u32], + res: &mut [u32], +) { + let n: &[u32] = &(k[0usize]).n; + let mu: u32 = (k[0usize]).mu; + let r2: &[u32] = &(k[0usize]).r2; + super::bignum4096_32::exp_vartime_precomp(n, mu, r2, a, bBits, b, res) +} + +/** +Write `a ^ b mod n` in `res`. + + The arguments a and the outparam res are meant to be 4096-bit bignums, i.e. uint32_t[128]. + The argument k is a montgomery context obtained through Hacl_Bignum4096_mont_ctx_init. + + The argument b is a bignum of any size, and bBits is an upper bound on the + number of significant bits of b. A tighter bound results in faster execution + time. When in doubt, the number of bits for the bignum size is always a safe + default, e.g. if b is a 4096-bit bignum, bBits should be 4096. + + This function is constant-time over its argument b, at the cost of a slower + execution time than mod_exp_vartime_*. + + Before calling this function, the caller will need to ensure that the following + preconditions are observed. + ā€¢ b < pow2 bBits + ā€¢ a < n +*/ +pub fn mod_exp_consttime_precomp( + k: &[super::bignum::bn_mont_ctx_u32], + a: &[u32], + bBits: u32, + b: &[u32], + res: &mut [u32], +) { + let n: &[u32] = &(k[0usize]).n; + let mu: u32 = (k[0usize]).mu; + let r2: &[u32] = &(k[0usize]).r2; + super::bignum4096_32::exp_consttime_precomp(n, mu, r2, a, bBits, b, res) +} + +/** +Write `a ^ (-1) mod n` in `res`. + + The argument a and the outparam res are meant to be 4096-bit bignums, i.e. uint32_t[128]. + The argument k is a montgomery context obtained through Hacl_Bignum4096_mont_ctx_init. + + Before calling this function, the caller will need to ensure that the following + preconditions are observed. + ā€¢ n is a prime + ā€¢ 0 < a + ā€¢ a < n +*/ +pub fn mod_inv_prime_vartime_precomp( + k: &[super::bignum::bn_mont_ctx_u32], + a: &[u32], + res: &mut [u32], +) { + let n: &[u32] = &(k[0usize]).n; + let mu: u32 = (k[0usize]).mu; + let r2: &[u32] = &(k[0usize]).r2; + let mut n2: [u32; 128] = [0u32; 128usize]; + let c0: u32 = + lib::inttypes_intrinsics::sub_borrow_u32(0u32, n[0usize], 2u32, &mut (&mut n2)[0usize..]); + let a1: (&[u32], &[u32]) = n.split_at(1usize); + let res1: (&mut [u32], &mut [u32]) = n2.split_at_mut(1usize); + let mut c: [u32; 1] = [c0; 1usize]; + krml::unroll_for!(31, "i", 0u32, 1u32, { + let t1: u32 = a1.1[4u32.wrapping_mul(i) as usize]; + let res_i: (&mut [u32], &mut [u32]) = res1.1.split_at_mut(4u32.wrapping_mul(i) as usize); + (&mut c)[0usize] = + lib::inttypes_intrinsics::sub_borrow_u32((&c)[0usize], t1, 0u32, res_i.1); + let t10: u32 = a1.1[4u32.wrapping_mul(i).wrapping_add(1u32) as usize]; + let res_i0: (&mut [u32], &mut [u32]) = res_i.1.split_at_mut(1usize); + (&mut c)[0usize] = + lib::inttypes_intrinsics::sub_borrow_u32((&c)[0usize], t10, 0u32, res_i0.1); + let t11: u32 = a1.1[4u32.wrapping_mul(i).wrapping_add(2u32) as usize]; + let res_i1: (&mut [u32], &mut [u32]) = res_i0.1.split_at_mut(1usize); + (&mut c)[0usize] = + lib::inttypes_intrinsics::sub_borrow_u32((&c)[0usize], t11, 0u32, res_i1.1); + let t12: u32 = a1.1[4u32.wrapping_mul(i).wrapping_add(3u32) as usize]; + let res_i2: (&mut [u32], &mut [u32]) = res_i1.1.split_at_mut(1usize); + (&mut c)[0usize] = + lib::inttypes_intrinsics::sub_borrow_u32((&c)[0usize], t12, 0u32, res_i2.1) + }); + krml::unroll_for!(3, "i", 124u32, 1u32, { + let t1: u32 = a1.1[i as usize]; + let res_i: (&mut [u32], &mut [u32]) = res1.1.split_at_mut(i as usize); + (&mut c)[0usize] = lib::inttypes_intrinsics::sub_borrow_u32((&c)[0usize], t1, 0u32, res_i.1) + }); + let c1: u32 = (&c)[0usize]; + let c2: u32 = c1; + lowstar::ignore::ignore::(c2); + super::bignum4096_32::exp_vartime_precomp(n, mu, r2, a, 4096u32, &n2, res) +} + +/** +Load a bid-endian bignum from memory. + + The argument b points to len bytes of valid memory. + The function returns a heap-allocated bignum of size sufficient to hold the + result of loading b, or NULL if either the allocation failed, or the amount of + required memory would exceed 4GB. + + If the return value is non-null, clients must eventually call free(3) on it to + avoid memory leaks. +*/ +pub fn new_bn_from_bytes_be(len: u32, b: &[u8]) -> Box<[u32]> { + if len == 0u32 || len.wrapping_sub(1u32).wrapping_div(4u32).wrapping_add(1u32) > 1073741823u32 { + [].into() + } else { + let mut res: Box<[u32]> = + vec![0u32; len.wrapping_sub(1u32).wrapping_div(4u32).wrapping_add(1u32) as usize] + .into_boxed_slice(); + if false { + res + } else { + let res1: &mut [u32] = &mut res; + let res2: &mut [u32] = res1; + let bnLen: u32 = len.wrapping_sub(1u32).wrapping_div(4u32).wrapping_add(1u32); + let tmpLen: u32 = 4u32.wrapping_mul(bnLen); + let mut tmp: Box<[u8]> = vec![0u8; tmpLen as usize].into_boxed_slice(); + ((&mut tmp)[tmpLen.wrapping_sub(len) as usize + ..tmpLen.wrapping_sub(len) as usize + len as usize]) + .copy_from_slice(&b[0usize..len as usize]); + for i in 0u32..bnLen { + let u: u32 = lowstar::endianness::load32_be( + &(&tmp)[bnLen.wrapping_sub(i).wrapping_sub(1u32).wrapping_mul(4u32) as usize..], + ); + let x: u32 = u; + let os: (&mut [u32], &mut [u32]) = res2.split_at_mut(0usize); + os.1[i as usize] = x + } + (*res2).into() + } + } +} + +/** +Load a little-endian bignum from memory. + + The argument b points to len bytes of valid memory. + The function returns a heap-allocated bignum of size sufficient to hold the + result of loading b, or NULL if either the allocation failed, or the amount of + required memory would exceed 4GB. + + If the return value is non-null, clients must eventually call free(3) on it to + avoid memory leaks. +*/ +pub fn new_bn_from_bytes_le(len: u32, b: &[u8]) -> Box<[u32]> { + if len == 0u32 || len.wrapping_sub(1u32).wrapping_div(4u32).wrapping_add(1u32) > 1073741823u32 { + [].into() + } else { + let mut res: Box<[u32]> = + vec![0u32; len.wrapping_sub(1u32).wrapping_div(4u32).wrapping_add(1u32) as usize] + .into_boxed_slice(); + if false { + res + } else { + let res1: &mut [u32] = &mut res; + let res2: &mut [u32] = res1; + let bnLen: u32 = len.wrapping_sub(1u32).wrapping_div(4u32).wrapping_add(1u32); + let tmpLen: u32 = 4u32.wrapping_mul(bnLen); + let mut tmp: Box<[u8]> = vec![0u8; tmpLen as usize].into_boxed_slice(); + ((&mut tmp)[0usize..len as usize]).copy_from_slice(&b[0usize..len as usize]); + for i in 0u32..len.wrapping_sub(1u32).wrapping_div(4u32).wrapping_add(1u32) { + let bj: (&[u8], &[u8]) = tmp.split_at(i.wrapping_mul(4u32) as usize); + let u: u32 = lowstar::endianness::load32_le(bj.1); + let r1: u32 = u; + let x: u32 = r1; + let os: (&mut [u32], &mut [u32]) = res2.split_at_mut(0usize); + os.1[i as usize] = x + } + (*res2).into() + } + } +} + +/** +Serialize a bignum into big-endian memory. + + The argument b points to a 4096-bit bignum. + The outparam res points to 512 bytes of valid memory. +*/ +pub fn bn_to_bytes_be(b: &[u32], res: &mut [u8]) { + let tmp: [u8; 512] = [0u8; 512usize]; + lowstar::ignore::ignore::<&[u8]>(&tmp); + for i in 0u32..128u32 { + lowstar::endianness::store32_be( + &mut res[i.wrapping_mul(4u32) as usize..], + b[128u32.wrapping_sub(i).wrapping_sub(1u32) as usize], + ) + } +} + +/** +Serialize a bignum into little-endian memory. + + The argument b points to a 4096-bit bignum. + The outparam res points to 512 bytes of valid memory. +*/ +pub fn bn_to_bytes_le(b: &[u32], res: &mut [u8]) { + let tmp: [u8; 512] = [0u8; 512usize]; + lowstar::ignore::ignore::<&[u8]>(&tmp); + for i in 0u32..128u32 { + lowstar::endianness::store32_le(&mut res[i.wrapping_mul(4u32) as usize..], b[i as usize]) + } +} + +/** +Returns 2^32 - 1 if a < b, otherwise returns 0. + + The arguments a and b are meant to be 4096-bit bignums, i.e. uint32_t[128]. +*/ +pub fn lt_mask(a: &[u32], b: &[u32]) -> u32 { + let mut acc: [u32; 1] = [0u32; 1usize]; + for i in 0u32..128u32 { + let beq: u32 = fstar::uint32::eq_mask(a[i as usize], b[i as usize]); + let blt: u32 = !fstar::uint32::gte_mask(a[i as usize], b[i as usize]); + (&mut acc)[0usize] = beq & (&acc)[0usize] | !beq & blt + } + (&acc)[0usize] +} + +/** +Returns 2^32 - 1 if a = b, otherwise returns 0. + + The arguments a and b are meant to be 4096-bit bignums, i.e. uint32_t[128]. +*/ +pub fn eq_mask(a: &[u32], b: &[u32]) -> u32 { + let mut mask: [u32; 1] = [0xFFFFFFFFu32; 1usize]; + for i in 0u32..128u32 { + let uu____0: u32 = fstar::uint32::eq_mask(a[i as usize], b[i as usize]); + (&mut mask)[0usize] = uu____0 & (&mask)[0usize] + } + let mask1: u32 = (&mask)[0usize]; + mask1 +} diff --git a/libcrux-hacl-rs/src/bignum/bignum64.rs b/libcrux-hacl-rs/src/bignum/bignum64.rs new file mode 100644 index 000000000..9cb581aa9 --- /dev/null +++ b/libcrux-hacl-rs/src/bignum/bignum64.rs @@ -0,0 +1,651 @@ +#![allow(non_snake_case)] +#![allow(non_upper_case_globals)] +#![allow(non_camel_case_types)] +#![allow(unused_assignments)] +#![allow(unreachable_patterns)] + +use crate::fstar; +use crate::lowstar; +use crate::util as lib; + +pub type pbn_mont_ctx_u64<'a> = &'a [super::bignum::bn_mont_ctx_u64]; + +/** +Write `a + b mod 2 ^ (64 * len)` in `res`. + + This functions returns the carry. + + The arguments a, b and the outparam res are meant to be `len` limbs in size, i.e. uint64_t[len] +*/ +pub fn add(len: u32, a: &[u64], b: &[u64], res: &mut [u64]) -> u64 { + super::bignum_base::bn_add_eq_len_u64(len, a, b, res) +} + +/** +Write `a - b mod 2 ^ (64 * len)` in `res`. + + This functions returns the carry. + + The arguments a, b and the outparam res are meant to be `len` limbs in size, i.e. uint64_t[len] +*/ +pub fn sub(len: u32, a: &[u64], b: &[u64], res: &mut [u64]) -> u64 { + super::bignum_base::bn_sub_eq_len_u64(len, a, b, res) +} + +/** +Write `(a + b) mod n` in `res`. + + The arguments a, b, n and the outparam res are meant to be `len` limbs in size, i.e. uint64_t[len]. + + Before calling this function, the caller will need to ensure that the following + preconditions are observed. + ā€¢ a < n + ā€¢ b < n +*/ +pub fn add_mod(len: u32, n: &[u64], a: &[u64], b: &[u64], res: &mut [u64]) { + let mut a_copy: Box<[u64]> = vec![0u64; len as usize].into_boxed_slice(); + let mut b_copy: Box<[u64]> = vec![0u64; len as usize].into_boxed_slice(); + ((&mut a_copy)[0usize..len as usize]).copy_from_slice(&a[0usize..len as usize]); + ((&mut b_copy)[0usize..len as usize]).copy_from_slice(&b[0usize..len as usize]); + super::bignum::bn_add_mod_n_u64(len, n, &a_copy, &b_copy, res) +} + +/** +Write `(a - b) mod n` in `res`. + + The arguments a, b, n and the outparam res are meant to be `len` limbs in size, i.e. uint64_t[len]. + + Before calling this function, the caller will need to ensure that the following + preconditions are observed. + ā€¢ a < n + ā€¢ b < n +*/ +pub fn sub_mod(len: u32, n: &[u64], a: &[u64], b: &[u64], res: &mut [u64]) { + super::bignum::bn_sub_mod_n_u64(len, n, a, b, res) +} + +/** +Write `a * b` in `res`. + + The arguments a and b are meant to be `len` limbs in size, i.e. uint64_t[len]. + The outparam res is meant to be `2*len` limbs in size, i.e. uint64_t[2*len]. +*/ +pub fn mul(len: u32, a: &[u64], b: &[u64], res: &mut [u64]) { + let mut tmp: Box<[u64]> = vec![0u64; 4u32.wrapping_mul(len) as usize].into_boxed_slice(); + super::bignum::bn_karatsuba_mul_uint64(len, a, b, &mut tmp, res) +} + +/** +Write `a * a` in `res`. + + The argument a is meant to be `len` limbs in size, i.e. uint64_t[len]. + The outparam res is meant to be `2*len` limbs in size, i.e. uint64_t[2*len]. +*/ +pub fn sqr(len: u32, a: &[u64], res: &mut [u64]) { + let mut tmp: Box<[u64]> = vec![0u64; 4u32.wrapping_mul(len) as usize].into_boxed_slice(); + super::bignum::bn_karatsuba_sqr_uint64(len, a, &mut tmp, res) +} + +#[inline] +fn bn_slow_precomp(len: u32, n: &[u64], mu: u64, r2: &[u64], a: &[u64], res: &mut [u64]) { + let mut a_mod: Box<[u64]> = vec![0u64; len as usize].into_boxed_slice(); + let mut a1: Box<[u64]> = vec![0u64; len.wrapping_add(len) as usize].into_boxed_slice(); + ((&mut a1)[0usize..len.wrapping_add(len) as usize]) + .copy_from_slice(&a[0usize..len.wrapping_add(len) as usize]); + super::bignum::bn_almost_mont_reduction_u64(len, n, mu, &mut a1, &mut a_mod); + super::bignum::bn_to_mont_u64(len, n, mu, r2, &a_mod, res) +} + +/** +Write `a mod n` in `res`. + + The argument a is meant to be `2*len` limbs in size, i.e. uint64_t[2*len]. + The argument n and the outparam res are meant to be `len` limbs in size, i.e. uint64_t[len]. + + The function returns false if any of the following preconditions are violated, + true otherwise. + ā€¢ 1 < n + ā€¢ n % 2 = 1 +*/ +pub fn r#mod(len: u32, n: &[u64], a: &[u64], res: &mut [u64]) -> bool { + let mut one: Box<[u64]> = vec![0u64; len as usize].into_boxed_slice(); + ((&mut one)[0usize..len as usize]) + .copy_from_slice(&vec![0u64; len as usize].into_boxed_slice()); + (&mut one)[0usize] = 1u64; + let bit0: u64 = n[0usize] & 1u64; + let m0: u64 = 0u64.wrapping_sub(bit0); + let mut acc: [u64; 1] = [0u64; 1usize]; + for i in 0u32..len { + let beq: u64 = fstar::uint64::eq_mask((&one)[i as usize], n[i as usize]); + let blt: u64 = !fstar::uint64::gte_mask((&one)[i as usize], n[i as usize]); + (&mut acc)[0usize] = beq & (&acc)[0usize] | !beq & blt + } + let m1: u64 = (&acc)[0usize]; + let is_valid_m: u64 = m0 & m1; + let nBits: u32 = 64u32.wrapping_mul(super::bignum_base::bn_get_top_index_u64(len, n) as u32); + if is_valid_m == 0xFFFFFFFFFFFFFFFFu64 { + let mut r2: Box<[u64]> = vec![0u64; len as usize].into_boxed_slice(); + super::bignum::bn_precomp_r2_mod_n_u64(len, nBits, n, &mut r2); + let mu: u64 = super::bignum::mod_inv_uint64(n[0usize]); + super::bignum64::bn_slow_precomp(len, n, mu, &r2, a, res) + } else { + (res[0usize..len as usize]).copy_from_slice(&vec![0u64; len as usize].into_boxed_slice()) + }; + is_valid_m == 0xFFFFFFFFFFFFFFFFu64 +} + +/** +Write `a ^ b mod n` in `res`. + + The arguments a, n and the outparam res are meant to be `len` limbs in size, i.e. uint64_t[len]. + + The argument b is a bignum of any size, and bBits is an upper bound on the + number of significant bits of b. A tighter bound results in faster execution + time. When in doubt, the number of bits for the bignum size is always a safe + default, e.g. if b is a 4096-bit bignum, bBits should be 4096. + + The function is *NOT* constant-time on the argument b. See the + mod_exp_consttime_* functions for constant-time variants. + + The function returns false if any of the following preconditions are violated, + true otherwise. + ā€¢ n % 2 = 1 + ā€¢ 1 < n + ā€¢ b < pow2 bBits + ā€¢ a < n +*/ +pub fn mod_exp_vartime( + len: u32, + n: &[u64], + a: &[u64], + bBits: u32, + b: &[u64], + res: &mut [u64], +) -> bool { + let is_valid_m: u64 = super::bignum::bn_check_mod_exp_u64(len, n, a, bBits, b); + let nBits: u32 = 64u32.wrapping_mul(super::bignum_base::bn_get_top_index_u64(len, n) as u32); + if is_valid_m == 0xFFFFFFFFFFFFFFFFu64 { + super::bignum::bn_mod_exp_vartime_u64(len, nBits, n, a, bBits, b, res) + } else { + (res[0usize..len as usize]).copy_from_slice(&vec![0u64; len as usize].into_boxed_slice()) + }; + is_valid_m == 0xFFFFFFFFFFFFFFFFu64 +} + +/** +Write `a ^ b mod n` in `res`. + + The arguments a, n and the outparam res are meant to be `len` limbs in size, i.e. uint64_t[len]. + + The argument b is a bignum of any size, and bBits is an upper bound on the + number of significant bits of b. A tighter bound results in faster execution + time. When in doubt, the number of bits for the bignum size is always a safe + default, e.g. if b is a 4096-bit bignum, bBits should be 4096. + + This function is constant-time over its argument b, at the cost of a slower + execution time than mod_exp_vartime. + + The function returns false if any of the following preconditions are violated, + true otherwise. + ā€¢ n % 2 = 1 + ā€¢ 1 < n + ā€¢ b < pow2 bBits + ā€¢ a < n +*/ +pub fn mod_exp_consttime( + len: u32, + n: &[u64], + a: &[u64], + bBits: u32, + b: &[u64], + res: &mut [u64], +) -> bool { + let is_valid_m: u64 = super::bignum::bn_check_mod_exp_u64(len, n, a, bBits, b); + let nBits: u32 = 64u32.wrapping_mul(super::bignum_base::bn_get_top_index_u64(len, n) as u32); + if is_valid_m == 0xFFFFFFFFFFFFFFFFu64 { + super::bignum::bn_mod_exp_consttime_u64(len, nBits, n, a, bBits, b, res) + } else { + (res[0usize..len as usize]).copy_from_slice(&vec![0u64; len as usize].into_boxed_slice()) + }; + is_valid_m == 0xFFFFFFFFFFFFFFFFu64 +} + +/** +Write `a ^ (-1) mod n` in `res`. + + The arguments a, n and the outparam res are meant to be `len` limbs in size, i.e. uint64_t[len]. + + Before calling this function, the caller will need to ensure that the following + preconditions are observed. + ā€¢ n is a prime + + The function returns false if any of the following preconditions are violated, + true otherwise. + ā€¢ n % 2 = 1 + ā€¢ 1 < n + ā€¢ 0 < a + ā€¢ a < n +*/ +pub fn mod_inv_prime_vartime(len: u32, n: &[u64], a: &[u64], res: &mut [u64]) -> bool { + let mut one: Box<[u64]> = vec![0u64; len as usize].into_boxed_slice(); + ((&mut one)[0usize..len as usize]) + .copy_from_slice(&vec![0u64; len as usize].into_boxed_slice()); + (&mut one)[0usize] = 1u64; + let bit0: u64 = n[0usize] & 1u64; + let m0: u64 = 0u64.wrapping_sub(bit0); + let mut acc: [u64; 1] = [0u64; 1usize]; + for i in 0u32..len { + let beq: u64 = fstar::uint64::eq_mask((&one)[i as usize], n[i as usize]); + let blt: u64 = !fstar::uint64::gte_mask((&one)[i as usize], n[i as usize]); + (&mut acc)[0usize] = beq & (&acc)[0usize] | !beq & blt + } + let m1: u64 = (&acc)[0usize]; + let m00: u64 = m0 & m1; + let bn_zero: Box<[u64]> = vec![0u64; len as usize].into_boxed_slice(); + let mut mask: [u64; 1] = [0xFFFFFFFFFFFFFFFFu64; 1usize]; + for i in 0u32..len { + let uu____0: u64 = fstar::uint64::eq_mask(a[i as usize], (&bn_zero)[i as usize]); + (&mut mask)[0usize] = uu____0 & (&mask)[0usize] + } + let mask1: u64 = (&mask)[0usize]; + let res1: u64 = mask1; + let m10: u64 = res1; + let mut acc0: [u64; 1] = [0u64; 1usize]; + for i in 0u32..len { + let beq: u64 = fstar::uint64::eq_mask(a[i as usize], n[i as usize]); + let blt: u64 = !fstar::uint64::gte_mask(a[i as usize], n[i as usize]); + (&mut acc0)[0usize] = beq & (&acc0)[0usize] | !beq & blt + } + let m2: u64 = (&acc0)[0usize]; + let is_valid_m: u64 = m00 & !m10 & m2; + let nBits: u32 = 64u32.wrapping_mul(super::bignum_base::bn_get_top_index_u64(len, n) as u32); + if is_valid_m == 0xFFFFFFFFFFFFFFFFu64 { + let mut n2: Box<[u64]> = vec![0u64; len as usize].into_boxed_slice(); + let c0: u64 = lib::inttypes_intrinsics::sub_borrow_u64( + 0u64, + n[0usize], + 2u64, + &mut (&mut n2)[0usize..], + ); + let c: u64 = if 1u32 < len { + let a1: (&[u64], &[u64]) = n.split_at(1usize); + let res10: (&mut [u64], &mut [u64]) = n2.split_at_mut(1usize); + let mut c: [u64; 1] = [c0; 1usize]; + for i in 0u32..len.wrapping_sub(1u32).wrapping_div(4u32) { + let t1: u64 = a1.1[4u32.wrapping_mul(i) as usize]; + let res_i: (&mut [u64], &mut [u64]) = + res10.1.split_at_mut(4u32.wrapping_mul(i) as usize); + (&mut c)[0usize] = + lib::inttypes_intrinsics::sub_borrow_u64((&c)[0usize], t1, 0u64, res_i.1); + let t10: u64 = a1.1[4u32.wrapping_mul(i).wrapping_add(1u32) as usize]; + let res_i0: (&mut [u64], &mut [u64]) = res_i.1.split_at_mut(1usize); + (&mut c)[0usize] = + lib::inttypes_intrinsics::sub_borrow_u64((&c)[0usize], t10, 0u64, res_i0.1); + let t11: u64 = a1.1[4u32.wrapping_mul(i).wrapping_add(2u32) as usize]; + let res_i1: (&mut [u64], &mut [u64]) = res_i0.1.split_at_mut(1usize); + (&mut c)[0usize] = + lib::inttypes_intrinsics::sub_borrow_u64((&c)[0usize], t11, 0u64, res_i1.1); + let t12: u64 = a1.1[4u32.wrapping_mul(i).wrapping_add(3u32) as usize]; + let res_i2: (&mut [u64], &mut [u64]) = res_i1.1.split_at_mut(1usize); + (&mut c)[0usize] = + lib::inttypes_intrinsics::sub_borrow_u64((&c)[0usize], t12, 0u64, res_i2.1) + } + for i in + len.wrapping_sub(1u32).wrapping_div(4u32).wrapping_mul(4u32)..len.wrapping_sub(1u32) + { + let t1: u64 = a1.1[i as usize]; + let res_i: (&mut [u64], &mut [u64]) = res10.1.split_at_mut(i as usize); + (&mut c)[0usize] = + lib::inttypes_intrinsics::sub_borrow_u64((&c)[0usize], t1, 0u64, res_i.1) + } + let c1: u64 = (&c)[0usize]; + c1 + } else { + c0 + }; + lowstar::ignore::ignore::(c); + super::bignum::bn_mod_exp_vartime_u64(len, nBits, n, a, 64u32.wrapping_mul(len), &n2, res) + } else { + (res[0usize..len as usize]).copy_from_slice(&vec![0u64; len as usize].into_boxed_slice()) + }; + is_valid_m == 0xFFFFFFFFFFFFFFFFu64 +} + +/** +Heap-allocate and initialize a montgomery context. + + The argument n is meant to be `len` limbs in size, i.e. uint64_t[len]. + + Before calling this function, the caller will need to ensure that the following + preconditions are observed. + ā€¢ n % 2 = 1 + ā€¢ 1 < n + + The caller will need to call Hacl_Bignum64_mont_ctx_free on the return value + to avoid memory leaks. +*/ +pub fn mont_ctx_init(len: u32, n: &[u64]) -> Box<[super::bignum::bn_mont_ctx_u64]> { + let mut r2: Box<[u64]> = vec![0u64; len as usize].into_boxed_slice(); + let mut n1: Box<[u64]> = vec![0u64; len as usize].into_boxed_slice(); + let r21: &mut [u64] = &mut r2; + let n11: &mut [u64] = &mut n1; + (n11[0usize..len as usize]).copy_from_slice(&n[0usize..len as usize]); + let nBits: u32 = 64u32.wrapping_mul(super::bignum_base::bn_get_top_index_u64(len, n) as u32); + super::bignum::bn_precomp_r2_mod_n_u64(len, nBits, n, r21); + let mu: u64 = super::bignum::mod_inv_uint64(n[0usize]); + let res: super::bignum::bn_mont_ctx_u64 = super::bignum::bn_mont_ctx_u64 { + len, + n: (*n11).into(), + mu, + r2: (*r21).into(), + }; + let buf: Box<[super::bignum::bn_mont_ctx_u64]> = vec![res].into_boxed_slice(); + buf +} + +/** +Write `a mod n` in `res`. + + The argument a is meant to be `2*len` limbs in size, i.e. uint64_t[2*len]. + The outparam res is meant to be `len` limbs in size, i.e. uint64_t[len]. + The argument k is a montgomery context obtained through Hacl_Bignum64_mont_ctx_init. +*/ +pub fn mod_precomp(k: &[super::bignum::bn_mont_ctx_u64], a: &[u64], res: &mut [u64]) { + let len1: u32 = (k[0usize]).len; + let n: &[u64] = &(k[0usize]).n; + let mu: u64 = (k[0usize]).mu; + let r2: &[u64] = &(k[0usize]).r2; + super::bignum64::bn_slow_precomp(len1, n, mu, r2, a, res) +} + +/** +Write `a ^ b mod n` in `res`. + + The arguments a and the outparam res are meant to be `len` limbs in size, i.e. uint64_t[len]. + The argument k is a montgomery context obtained through Hacl_Bignum64_mont_ctx_init. + + The argument b is a bignum of any size, and bBits is an upper bound on the + number of significant bits of b. A tighter bound results in faster execution + time. When in doubt, the number of bits for the bignum size is always a safe + default, e.g. if b is a 4096-bit bignum, bBits should be 4096. + + The function is *NOT* constant-time on the argument b. See the + mod_exp_consttime_* functions for constant-time variants. + + Before calling this function, the caller will need to ensure that the following + preconditions are observed. + ā€¢ b < pow2 bBits + ā€¢ a < n +*/ +pub fn mod_exp_vartime_precomp( + k: &[super::bignum::bn_mont_ctx_u64], + a: &[u64], + bBits: u32, + b: &[u64], + res: &mut [u64], +) { + let len1: u32 = (k[0usize]).len; + let n: &[u64] = &(k[0usize]).n; + let mu: u64 = (k[0usize]).mu; + let r2: &[u64] = &(k[0usize]).r2; + super::bignum::bn_mod_exp_vartime_precomp_u64(len1, n, mu, r2, a, bBits, b, res) +} + +/** +Write `a ^ b mod n` in `res`. + + The arguments a and the outparam res are meant to be `len` limbs in size, i.e. uint64_t[len]. + The argument k is a montgomery context obtained through Hacl_Bignum64_mont_ctx_init. + + The argument b is a bignum of any size, and bBits is an upper bound on the + number of significant bits of b. A tighter bound results in faster execution + time. When in doubt, the number of bits for the bignum size is always a safe + default, e.g. if b is a 4096-bit bignum, bBits should be 4096. + + This function is constant-time over its argument b, at the cost of a slower + execution time than mod_exp_vartime_*. + + Before calling this function, the caller will need to ensure that the following + preconditions are observed. + ā€¢ b < pow2 bBits + ā€¢ a < n +*/ +pub fn mod_exp_consttime_precomp( + k: &[super::bignum::bn_mont_ctx_u64], + a: &[u64], + bBits: u32, + b: &[u64], + res: &mut [u64], +) { + let len1: u32 = (k[0usize]).len; + let n: &[u64] = &(k[0usize]).n; + let mu: u64 = (k[0usize]).mu; + let r2: &[u64] = &(k[0usize]).r2; + super::bignum::bn_mod_exp_consttime_precomp_u64(len1, n, mu, r2, a, bBits, b, res) +} + +/** +Write `a ^ (-1) mod n` in `res`. + + The argument a and the outparam res are meant to be `len` limbs in size, i.e. uint64_t[len]. + The argument k is a montgomery context obtained through Hacl_Bignum64_mont_ctx_init. + + Before calling this function, the caller will need to ensure that the following + preconditions are observed. + ā€¢ n is a prime + ā€¢ 0 < a + ā€¢ a < n +*/ +pub fn mod_inv_prime_vartime_precomp( + k: &[super::bignum::bn_mont_ctx_u64], + a: &[u64], + res: &mut [u64], +) { + let len1: u32 = (k[0usize]).len; + let n: &[u64] = &(k[0usize]).n; + let mu: u64 = (k[0usize]).mu; + let r2: &[u64] = &(k[0usize]).r2; + let mut n2: Box<[u64]> = vec![0u64; len1 as usize].into_boxed_slice(); + let c0: u64 = + lib::inttypes_intrinsics::sub_borrow_u64(0u64, n[0usize], 2u64, &mut (&mut n2)[0usize..]); + let c: u64 = if 1u32 < len1 { + let a1: (&[u64], &[u64]) = n.split_at(1usize); + let res1: (&mut [u64], &mut [u64]) = n2.split_at_mut(1usize); + let mut c: [u64; 1] = [c0; 1usize]; + for i in 0u32..len1.wrapping_sub(1u32).wrapping_div(4u32) { + let t1: u64 = a1.1[4u32.wrapping_mul(i) as usize]; + let res_i: (&mut [u64], &mut [u64]) = + res1.1.split_at_mut(4u32.wrapping_mul(i) as usize); + (&mut c)[0usize] = + lib::inttypes_intrinsics::sub_borrow_u64((&c)[0usize], t1, 0u64, res_i.1); + let t10: u64 = a1.1[4u32.wrapping_mul(i).wrapping_add(1u32) as usize]; + let res_i0: (&mut [u64], &mut [u64]) = res_i.1.split_at_mut(1usize); + (&mut c)[0usize] = + lib::inttypes_intrinsics::sub_borrow_u64((&c)[0usize], t10, 0u64, res_i0.1); + let t11: u64 = a1.1[4u32.wrapping_mul(i).wrapping_add(2u32) as usize]; + let res_i1: (&mut [u64], &mut [u64]) = res_i0.1.split_at_mut(1usize); + (&mut c)[0usize] = + lib::inttypes_intrinsics::sub_borrow_u64((&c)[0usize], t11, 0u64, res_i1.1); + let t12: u64 = a1.1[4u32.wrapping_mul(i).wrapping_add(3u32) as usize]; + let res_i2: (&mut [u64], &mut [u64]) = res_i1.1.split_at_mut(1usize); + (&mut c)[0usize] = + lib::inttypes_intrinsics::sub_borrow_u64((&c)[0usize], t12, 0u64, res_i2.1) + } + for i in len1 + .wrapping_sub(1u32) + .wrapping_div(4u32) + .wrapping_mul(4u32)..len1.wrapping_sub(1u32) + { + let t1: u64 = a1.1[i as usize]; + let res_i: (&mut [u64], &mut [u64]) = res1.1.split_at_mut(i as usize); + (&mut c)[0usize] = + lib::inttypes_intrinsics::sub_borrow_u64((&c)[0usize], t1, 0u64, res_i.1) + } + let c1: u64 = (&c)[0usize]; + c1 + } else { + c0 + }; + lowstar::ignore::ignore::(c); + super::bignum::bn_mod_exp_vartime_precomp_u64( + len1, + n, + mu, + r2, + a, + 64u32.wrapping_mul(len1), + &n2, + res, + ) +} + +/** +Load a bid-endian bignum from memory. + + The argument b points to `len` bytes of valid memory. + The function returns a heap-allocated bignum of size sufficient to hold the + result of loading b, or NULL if either the allocation failed, or the amount of + required memory would exceed 4GB. + + If the return value is non-null, clients must eventually call free(3) on it to + avoid memory leaks. +*/ +pub fn new_bn_from_bytes_be(len: u32, b: &[u8]) -> Box<[u64]> { + if len == 0u32 || len.wrapping_sub(1u32).wrapping_div(8u32).wrapping_add(1u32) > 536870911u32 { + [].into() + } else { + let mut res: Box<[u64]> = + vec![0u64; len.wrapping_sub(1u32).wrapping_div(8u32).wrapping_add(1u32) as usize] + .into_boxed_slice(); + if false { + res + } else { + let res1: &mut [u64] = &mut res; + let res2: &mut [u64] = res1; + let bnLen: u32 = len.wrapping_sub(1u32).wrapping_div(8u32).wrapping_add(1u32); + let tmpLen: u32 = 8u32.wrapping_mul(bnLen); + let mut tmp: Box<[u8]> = vec![0u8; tmpLen as usize].into_boxed_slice(); + ((&mut tmp)[tmpLen.wrapping_sub(len) as usize + ..tmpLen.wrapping_sub(len) as usize + len as usize]) + .copy_from_slice(&b[0usize..len as usize]); + for i in 0u32..bnLen { + let u: u64 = lowstar::endianness::load64_be( + &(&tmp)[bnLen.wrapping_sub(i).wrapping_sub(1u32).wrapping_mul(8u32) as usize..], + ); + let x: u64 = u; + let os: (&mut [u64], &mut [u64]) = res2.split_at_mut(0usize); + os.1[i as usize] = x + } + (*res2).into() + } + } +} + +/** +Load a little-endian bignum from memory. + + The argument b points to `len` bytes of valid memory. + The function returns a heap-allocated bignum of size sufficient to hold the + result of loading b, or NULL if either the allocation failed, or the amount of + required memory would exceed 4GB. + + If the return value is non-null, clients must eventually call free(3) on it to + avoid memory leaks. +*/ +pub fn new_bn_from_bytes_le(len: u32, b: &[u8]) -> Box<[u64]> { + if len == 0u32 || len.wrapping_sub(1u32).wrapping_div(8u32).wrapping_add(1u32) > 536870911u32 { + [].into() + } else { + let mut res: Box<[u64]> = + vec![0u64; len.wrapping_sub(1u32).wrapping_div(8u32).wrapping_add(1u32) as usize] + .into_boxed_slice(); + if false { + res + } else { + let res1: &mut [u64] = &mut res; + let res2: &mut [u64] = res1; + let bnLen: u32 = len.wrapping_sub(1u32).wrapping_div(8u32).wrapping_add(1u32); + let tmpLen: u32 = 8u32.wrapping_mul(bnLen); + let mut tmp: Box<[u8]> = vec![0u8; tmpLen as usize].into_boxed_slice(); + ((&mut tmp)[0usize..len as usize]).copy_from_slice(&b[0usize..len as usize]); + for i in 0u32..len.wrapping_sub(1u32).wrapping_div(8u32).wrapping_add(1u32) { + let bj: (&[u8], &[u8]) = tmp.split_at(i.wrapping_mul(8u32) as usize); + let u: u64 = lowstar::endianness::load64_le(bj.1); + let r1: u64 = u; + let x: u64 = r1; + let os: (&mut [u64], &mut [u64]) = res2.split_at_mut(0usize); + os.1[i as usize] = x + } + (*res2).into() + } + } +} + +/** +Serialize a bignum into big-endian memory. + + The argument b points to a bignum of āŒˆlen / 8āŒ‰ size. + The outparam res points to `len` bytes of valid memory. +*/ +pub fn bn_to_bytes_be(len: u32, b: &[u64], res: &mut [u8]) { + let bnLen: u32 = len.wrapping_sub(1u32).wrapping_div(8u32).wrapping_add(1u32); + let tmpLen: u32 = 8u32.wrapping_mul(bnLen); + let mut tmp: Box<[u8]> = vec![0u8; tmpLen as usize].into_boxed_slice(); + for i in 0u32..bnLen { + lowstar::endianness::store64_be( + &mut (&mut tmp)[i.wrapping_mul(8u32) as usize..], + b[bnLen.wrapping_sub(i).wrapping_sub(1u32) as usize], + ) + } + (res[0usize..len as usize]) + .copy_from_slice(&(&(&tmp)[tmpLen.wrapping_sub(len) as usize..])[0usize..len as usize]) +} + +/** +Serialize a bignum into little-endian memory. + + The argument b points to a bignum of āŒˆlen / 8āŒ‰ size. + The outparam res points to `len` bytes of valid memory. +*/ +pub fn bn_to_bytes_le(len: u32, b: &[u64], res: &mut [u8]) { + let bnLen: u32 = len.wrapping_sub(1u32).wrapping_div(8u32).wrapping_add(1u32); + let tmpLen: u32 = 8u32.wrapping_mul(bnLen); + let mut tmp: Box<[u8]> = vec![0u8; tmpLen as usize].into_boxed_slice(); + for i in 0u32..bnLen { + lowstar::endianness::store64_le( + &mut (&mut tmp)[i.wrapping_mul(8u32) as usize..], + b[i as usize], + ) + } + (res[0usize..len as usize]).copy_from_slice(&(&(&tmp)[0usize..])[0usize..len as usize]) +} + +/** +Returns 2^64 - 1 if a < b, otherwise returns 0. + + The arguments a and b are meant to be `len` limbs in size, i.e. uint64_t[len]. +*/ +pub fn lt_mask(len: u32, a: &[u64], b: &[u64]) -> u64 { + let mut acc: [u64; 1] = [0u64; 1usize]; + for i in 0u32..len { + let beq: u64 = fstar::uint64::eq_mask(a[i as usize], b[i as usize]); + let blt: u64 = !fstar::uint64::gte_mask(a[i as usize], b[i as usize]); + (&mut acc)[0usize] = beq & (&acc)[0usize] | !beq & blt + } + (&acc)[0usize] +} + +/** +Returns 2^64 - 1 if a = b, otherwise returns 0. + + The arguments a and b are meant to be `len` limbs in size, i.e. uint64_t[len]. +*/ +pub fn eq_mask(len: u32, a: &[u64], b: &[u64]) -> u64 { + let mut mask: [u64; 1] = [0xFFFFFFFFFFFFFFFFu64; 1usize]; + for i in 0u32..len { + let uu____0: u64 = fstar::uint64::eq_mask(a[i as usize], b[i as usize]); + (&mut mask)[0usize] = uu____0 & (&mask)[0usize] + } + let mask1: u64 = (&mask)[0usize]; + mask1 +} diff --git a/libcrux-hacl-rs/src/bignum/bignum_base.rs b/libcrux-hacl-rs/src/bignum/bignum_base.rs new file mode 100644 index 000000000..5760b4fbd --- /dev/null +++ b/libcrux-hacl-rs/src/bignum/bignum_base.rs @@ -0,0 +1,467 @@ +#![allow(non_snake_case)] +#![allow(non_upper_case_globals)] +#![allow(non_camel_case_types)] +#![allow(unused_assignments)] +#![allow(unreachable_patterns)] + +use crate::fstar; +use crate::lowstar; +use crate::util as lib; + +#[inline] +pub(crate) fn mul_wide_add2_u32(a: u32, b: u32, c_in: u32, out: &mut [u32]) -> u32 { + let out0: u32 = out[0usize]; + let res: u64 = (a as u64) + .wrapping_mul(b as u64) + .wrapping_add(c_in as u64) + .wrapping_add(out0 as u64); + out[0usize] = res as u32; + res.wrapping_shr(32u32) as u32 +} + +/** +ATTENTION: this function is public, but is intended for internal use within this workspace; callers should not rely on the availability of this function, or its behavior! +*/ +#[inline] +pub fn mul_wide_add2_u64(a: u64, b: u64, c_in: u64, out: &mut [u64]) -> u64 { + let out0: u64 = out[0usize]; + let res: fstar::uint128::uint128 = fstar::uint128::add( + fstar::uint128::add( + fstar::uint128::mul_wide(a, b), + fstar::uint128::uint64_to_uint128(c_in), + ), + fstar::uint128::uint64_to_uint128(out0), + ); + out[0usize] = fstar::uint128::uint128_to_uint64(res); + fstar::uint128::uint128_to_uint64(fstar::uint128::shift_right(res, 64u32)) +} + +/** +ATTENTION: this function is public, but is intended for internal use within this workspace; callers should not rely on the availability of this function, or its behavior! +*/ +pub fn bn_from_bytes_be_uint64(len: u32, b: &[u8], res: &mut [u64]) { + let bnLen: u32 = len.wrapping_sub(1u32).wrapping_div(8u32).wrapping_add(1u32); + let tmpLen: u32 = 8u32.wrapping_mul(bnLen); + let mut tmp: Box<[u8]> = vec![0u8; tmpLen as usize].into_boxed_slice(); + ((&mut tmp) + [tmpLen.wrapping_sub(len) as usize..tmpLen.wrapping_sub(len) as usize + len as usize]) + .copy_from_slice(&b[0usize..len as usize]); + for i in 0u32..bnLen { + let u: u64 = lowstar::endianness::load64_be( + &(&tmp)[bnLen.wrapping_sub(i).wrapping_sub(1u32).wrapping_mul(8u32) as usize..], + ); + let x: u64 = u; + let os: (&mut [u64], &mut [u64]) = res.split_at_mut(0usize); + os.1[i as usize] = x + } +} + +/** +ATTENTION: this function is public, but is intended for internal use within this workspace; callers should not rely on the availability of this function, or its behavior! +*/ +pub fn bn_to_bytes_be_uint64(len: u32, b: &[u64], res: &mut [u8]) { + let bnLen: u32 = len.wrapping_sub(1u32).wrapping_div(8u32).wrapping_add(1u32); + let tmpLen: u32 = 8u32.wrapping_mul(bnLen); + let mut tmp: Box<[u8]> = vec![0u8; tmpLen as usize].into_boxed_slice(); + for i in 0u32..bnLen { + lowstar::endianness::store64_be( + &mut (&mut tmp)[i.wrapping_mul(8u32) as usize..], + b[bnLen.wrapping_sub(i).wrapping_sub(1u32) as usize], + ) + } + (res[0usize..len as usize]) + .copy_from_slice(&(&(&tmp)[tmpLen.wrapping_sub(len) as usize..])[0usize..len as usize]) +} + +/** +ATTENTION: this function is public, but is intended for internal use within this workspace; callers should not rely on the availability of this function, or its behavior! +*/ +pub fn bn_get_top_index_u32(len: u32, b: &[u32]) -> u32 { + let mut r#priv: [u32; 1] = [0u32; 1usize]; + for i in 0u32..len { + let mask: u32 = fstar::uint32::eq_mask(b[i as usize], 0u32); + (&mut r#priv)[0usize] = mask & (&r#priv)[0usize] | !mask & i + } + (&r#priv)[0usize] +} + +/** +ATTENTION: this function is public, but is intended for internal use within this workspace; callers should not rely on the availability of this function, or its behavior! +*/ +pub fn bn_get_top_index_u64(len: u32, b: &[u64]) -> u64 { + let mut r#priv: [u64; 1] = [0u64; 1usize]; + for i in 0u32..len { + let mask: u64 = fstar::uint64::eq_mask(b[i as usize], 0u64); + (&mut r#priv)[0usize] = mask & (&r#priv)[0usize] | !mask & i as u64 + } + (&r#priv)[0usize] +} + +/** +ATTENTION: this function is public, but is intended for internal use within this workspace; callers should not rely on the availability of this function, or its behavior! +*/ +#[inline] +pub fn bn_get_bits_u32(len: u32, b: &[u32], i: u32, l: u32) -> u32 { + let i1: u32 = i.wrapping_div(32u32); + let j: u32 = i.wrapping_rem(32u32); + let p1: u32 = (b[i1 as usize]).wrapping_shr(j); + let ite: u32 = if i1.wrapping_add(1u32) < len && 0u32 < j { + p1 | (b[i1.wrapping_add(1u32) as usize]).wrapping_shl(32u32.wrapping_sub(j)) + } else { + p1 + }; + ite & 1u32.wrapping_shl(l).wrapping_sub(1u32) +} + +/** +ATTENTION: this function is public, but is intended for internal use within this workspace; callers should not rely on the availability of this function, or its behavior! +*/ +#[inline] +pub fn bn_get_bits_u64(len: u32, b: &[u64], i: u32, l: u32) -> u64 { + let i1: u32 = i.wrapping_div(64u32); + let j: u32 = i.wrapping_rem(64u32); + let p1: u64 = (b[i1 as usize]).wrapping_shr(j); + let ite: u64 = if i1.wrapping_add(1u32) < len && 0u32 < j { + p1 | (b[i1.wrapping_add(1u32) as usize]).wrapping_shl(64u32.wrapping_sub(j)) + } else { + p1 + }; + ite & 1u64.wrapping_shl(l).wrapping_sub(1u64) +} + +pub(crate) fn bn_sub_eq_len_u32(aLen: u32, a: &[u32], b: &[u32], res: &mut [u32]) -> u32 { + let mut c: [u32; 1] = [0u32; 1usize]; + for i in 0u32..aLen.wrapping_div(4u32) { + let t1: u32 = a[4u32.wrapping_mul(i) as usize]; + let t2: u32 = b[4u32.wrapping_mul(i) as usize]; + let res_i: (&mut [u32], &mut [u32]) = res.split_at_mut(4u32.wrapping_mul(i) as usize); + (&mut c)[0usize] = lib::inttypes_intrinsics::sub_borrow_u32((&c)[0usize], t1, t2, res_i.1); + let t10: u32 = a[4u32.wrapping_mul(i).wrapping_add(1u32) as usize]; + let t20: u32 = b[4u32.wrapping_mul(i).wrapping_add(1u32) as usize]; + let res_i0: (&mut [u32], &mut [u32]) = res_i.1.split_at_mut(1usize); + (&mut c)[0usize] = + lib::inttypes_intrinsics::sub_borrow_u32((&c)[0usize], t10, t20, res_i0.1); + let t11: u32 = a[4u32.wrapping_mul(i).wrapping_add(2u32) as usize]; + let t21: u32 = b[4u32.wrapping_mul(i).wrapping_add(2u32) as usize]; + let res_i1: (&mut [u32], &mut [u32]) = res_i0.1.split_at_mut(1usize); + (&mut c)[0usize] = + lib::inttypes_intrinsics::sub_borrow_u32((&c)[0usize], t11, t21, res_i1.1); + let t12: u32 = a[4u32.wrapping_mul(i).wrapping_add(3u32) as usize]; + let t22: u32 = b[4u32.wrapping_mul(i).wrapping_add(3u32) as usize]; + let res_i2: (&mut [u32], &mut [u32]) = res_i1.1.split_at_mut(1usize); + (&mut c)[0usize] = + lib::inttypes_intrinsics::sub_borrow_u32((&c)[0usize], t12, t22, res_i2.1) + } + for i in aLen.wrapping_div(4u32).wrapping_mul(4u32)..aLen { + let t1: u32 = a[i as usize]; + let t2: u32 = b[i as usize]; + let res_i: (&mut [u32], &mut [u32]) = res.split_at_mut(i as usize); + (&mut c)[0usize] = lib::inttypes_intrinsics::sub_borrow_u32((&c)[0usize], t1, t2, res_i.1) + } + (&c)[0usize] +} + +pub(crate) fn bn_sub_eq_len_u64(aLen: u32, a: &[u64], b: &[u64], res: &mut [u64]) -> u64 { + let mut c: [u64; 1] = [0u64; 1usize]; + for i in 0u32..aLen.wrapping_div(4u32) { + let t1: u64 = a[4u32.wrapping_mul(i) as usize]; + let t2: u64 = b[4u32.wrapping_mul(i) as usize]; + let res_i: (&mut [u64], &mut [u64]) = res.split_at_mut(4u32.wrapping_mul(i) as usize); + (&mut c)[0usize] = lib::inttypes_intrinsics::sub_borrow_u64((&c)[0usize], t1, t2, res_i.1); + let t10: u64 = a[4u32.wrapping_mul(i).wrapping_add(1u32) as usize]; + let t20: u64 = b[4u32.wrapping_mul(i).wrapping_add(1u32) as usize]; + let res_i0: (&mut [u64], &mut [u64]) = res_i.1.split_at_mut(1usize); + (&mut c)[0usize] = + lib::inttypes_intrinsics::sub_borrow_u64((&c)[0usize], t10, t20, res_i0.1); + let t11: u64 = a[4u32.wrapping_mul(i).wrapping_add(2u32) as usize]; + let t21: u64 = b[4u32.wrapping_mul(i).wrapping_add(2u32) as usize]; + let res_i1: (&mut [u64], &mut [u64]) = res_i0.1.split_at_mut(1usize); + (&mut c)[0usize] = + lib::inttypes_intrinsics::sub_borrow_u64((&c)[0usize], t11, t21, res_i1.1); + let t12: u64 = a[4u32.wrapping_mul(i).wrapping_add(3u32) as usize]; + let t22: u64 = b[4u32.wrapping_mul(i).wrapping_add(3u32) as usize]; + let res_i2: (&mut [u64], &mut [u64]) = res_i1.1.split_at_mut(1usize); + (&mut c)[0usize] = + lib::inttypes_intrinsics::sub_borrow_u64((&c)[0usize], t12, t22, res_i2.1) + } + for i in aLen.wrapping_div(4u32).wrapping_mul(4u32)..aLen { + let t1: u64 = a[i as usize]; + let t2: u64 = b[i as usize]; + let res_i: (&mut [u64], &mut [u64]) = res.split_at_mut(i as usize); + (&mut c)[0usize] = lib::inttypes_intrinsics::sub_borrow_u64((&c)[0usize], t1, t2, res_i.1) + } + (&c)[0usize] +} + +pub(crate) fn bn_add_eq_len_u32(aLen: u32, a: &[u32], b: &[u32], res: &mut [u32]) -> u32 { + let mut c: [u32; 1] = [0u32; 1usize]; + for i in 0u32..aLen.wrapping_div(4u32) { + let t1: u32 = a[4u32.wrapping_mul(i) as usize]; + let t2: u32 = b[4u32.wrapping_mul(i) as usize]; + let res_i: (&mut [u32], &mut [u32]) = res.split_at_mut(4u32.wrapping_mul(i) as usize); + (&mut c)[0usize] = lib::inttypes_intrinsics::add_carry_u32((&c)[0usize], t1, t2, res_i.1); + let t10: u32 = a[4u32.wrapping_mul(i).wrapping_add(1u32) as usize]; + let t20: u32 = b[4u32.wrapping_mul(i).wrapping_add(1u32) as usize]; + let res_i0: (&mut [u32], &mut [u32]) = res_i.1.split_at_mut(1usize); + (&mut c)[0usize] = + lib::inttypes_intrinsics::add_carry_u32((&c)[0usize], t10, t20, res_i0.1); + let t11: u32 = a[4u32.wrapping_mul(i).wrapping_add(2u32) as usize]; + let t21: u32 = b[4u32.wrapping_mul(i).wrapping_add(2u32) as usize]; + let res_i1: (&mut [u32], &mut [u32]) = res_i0.1.split_at_mut(1usize); + (&mut c)[0usize] = + lib::inttypes_intrinsics::add_carry_u32((&c)[0usize], t11, t21, res_i1.1); + let t12: u32 = a[4u32.wrapping_mul(i).wrapping_add(3u32) as usize]; + let t22: u32 = b[4u32.wrapping_mul(i).wrapping_add(3u32) as usize]; + let res_i2: (&mut [u32], &mut [u32]) = res_i1.1.split_at_mut(1usize); + (&mut c)[0usize] = lib::inttypes_intrinsics::add_carry_u32((&c)[0usize], t12, t22, res_i2.1) + } + for i in aLen.wrapping_div(4u32).wrapping_mul(4u32)..aLen { + let t1: u32 = a[i as usize]; + let t2: u32 = b[i as usize]; + let res_i: (&mut [u32], &mut [u32]) = res.split_at_mut(i as usize); + (&mut c)[0usize] = lib::inttypes_intrinsics::add_carry_u32((&c)[0usize], t1, t2, res_i.1) + } + (&c)[0usize] +} + +/** +ATTENTION: this function is public, but is intended for internal use within this workspace; callers should not rely on the availability of this function, or its behavior! +*/ +pub fn bn_add_eq_len_u64(aLen: u32, a: &[u64], b: &[u64], res: &mut [u64]) -> u64 { + let mut c: [u64; 1] = [0u64; 1usize]; + for i in 0u32..aLen.wrapping_div(4u32) { + let t1: u64 = a[4u32.wrapping_mul(i) as usize]; + let t2: u64 = b[4u32.wrapping_mul(i) as usize]; + let res_i: (&mut [u64], &mut [u64]) = res.split_at_mut(4u32.wrapping_mul(i) as usize); + (&mut c)[0usize] = lib::inttypes_intrinsics::add_carry_u64((&c)[0usize], t1, t2, res_i.1); + let t10: u64 = a[4u32.wrapping_mul(i).wrapping_add(1u32) as usize]; + let t20: u64 = b[4u32.wrapping_mul(i).wrapping_add(1u32) as usize]; + let res_i0: (&mut [u64], &mut [u64]) = res_i.1.split_at_mut(1usize); + (&mut c)[0usize] = + lib::inttypes_intrinsics::add_carry_u64((&c)[0usize], t10, t20, res_i0.1); + let t11: u64 = a[4u32.wrapping_mul(i).wrapping_add(2u32) as usize]; + let t21: u64 = b[4u32.wrapping_mul(i).wrapping_add(2u32) as usize]; + let res_i1: (&mut [u64], &mut [u64]) = res_i0.1.split_at_mut(1usize); + (&mut c)[0usize] = + lib::inttypes_intrinsics::add_carry_u64((&c)[0usize], t11, t21, res_i1.1); + let t12: u64 = a[4u32.wrapping_mul(i).wrapping_add(3u32) as usize]; + let t22: u64 = b[4u32.wrapping_mul(i).wrapping_add(3u32) as usize]; + let res_i2: (&mut [u64], &mut [u64]) = res_i1.1.split_at_mut(1usize); + (&mut c)[0usize] = lib::inttypes_intrinsics::add_carry_u64((&c)[0usize], t12, t22, res_i2.1) + } + for i in aLen.wrapping_div(4u32).wrapping_mul(4u32)..aLen { + let t1: u64 = a[i as usize]; + let t2: u64 = b[i as usize]; + let res_i: (&mut [u64], &mut [u64]) = res.split_at_mut(i as usize); + (&mut c)[0usize] = lib::inttypes_intrinsics::add_carry_u64((&c)[0usize], t1, t2, res_i.1) + } + (&c)[0usize] +} + +#[inline] +pub(crate) fn bn_mul_u32(aLen: u32, a: &[u32], bLen: u32, b: &[u32], res: &mut [u32]) { + (res[0usize..aLen.wrapping_add(bLen) as usize]) + .copy_from_slice(&vec![0u32; aLen.wrapping_add(bLen) as usize].into_boxed_slice()); + for i in 0u32..bLen { + let bj: u32 = b[i as usize]; + let res_j: (&mut [u32], &mut [u32]) = res.split_at_mut(i as usize); + let mut c: [u32; 1] = [0u32; 1usize]; + for i0 in 0u32..aLen.wrapping_div(4u32) { + let a_i: u32 = a[4u32.wrapping_mul(i0) as usize]; + let res_i: (&mut [u32], &mut [u32]) = + res_j.1.split_at_mut(4u32.wrapping_mul(i0) as usize); + (&mut c)[0usize] = + super::bignum_base::mul_wide_add2_u32(a_i, bj, (&c)[0usize], res_i.1); + let a_i0: u32 = a[4u32.wrapping_mul(i0).wrapping_add(1u32) as usize]; + let res_i0: (&mut [u32], &mut [u32]) = res_i.1.split_at_mut(1usize); + (&mut c)[0usize] = + super::bignum_base::mul_wide_add2_u32(a_i0, bj, (&c)[0usize], res_i0.1); + let a_i1: u32 = a[4u32.wrapping_mul(i0).wrapping_add(2u32) as usize]; + let res_i1: (&mut [u32], &mut [u32]) = res_i0.1.split_at_mut(1usize); + (&mut c)[0usize] = + super::bignum_base::mul_wide_add2_u32(a_i1, bj, (&c)[0usize], res_i1.1); + let a_i2: u32 = a[4u32.wrapping_mul(i0).wrapping_add(3u32) as usize]; + let res_i2: (&mut [u32], &mut [u32]) = res_i1.1.split_at_mut(1usize); + (&mut c)[0usize] = + super::bignum_base::mul_wide_add2_u32(a_i2, bj, (&c)[0usize], res_i2.1) + } + for i0 in aLen.wrapping_div(4u32).wrapping_mul(4u32)..aLen { + let a_i: u32 = a[i0 as usize]; + let res_i: (&mut [u32], &mut [u32]) = res_j.1.split_at_mut(i0 as usize); + (&mut c)[0usize] = super::bignum_base::mul_wide_add2_u32(a_i, bj, (&c)[0usize], res_i.1) + } + let r: u32 = (&c)[0usize]; + res[aLen.wrapping_add(i) as usize] = r + } +} + +#[inline] +pub(crate) fn bn_mul_u64(aLen: u32, a: &[u64], bLen: u32, b: &[u64], res: &mut [u64]) { + (res[0usize..aLen.wrapping_add(bLen) as usize]) + .copy_from_slice(&vec![0u64; aLen.wrapping_add(bLen) as usize].into_boxed_slice()); + for i in 0u32..bLen { + let bj: u64 = b[i as usize]; + let res_j: (&mut [u64], &mut [u64]) = res.split_at_mut(i as usize); + let mut c: [u64; 1] = [0u64; 1usize]; + for i0 in 0u32..aLen.wrapping_div(4u32) { + let a_i: u64 = a[4u32.wrapping_mul(i0) as usize]; + let res_i: (&mut [u64], &mut [u64]) = + res_j.1.split_at_mut(4u32.wrapping_mul(i0) as usize); + (&mut c)[0usize] = + super::bignum_base::mul_wide_add2_u64(a_i, bj, (&c)[0usize], res_i.1); + let a_i0: u64 = a[4u32.wrapping_mul(i0).wrapping_add(1u32) as usize]; + let res_i0: (&mut [u64], &mut [u64]) = res_i.1.split_at_mut(1usize); + (&mut c)[0usize] = + super::bignum_base::mul_wide_add2_u64(a_i0, bj, (&c)[0usize], res_i0.1); + let a_i1: u64 = a[4u32.wrapping_mul(i0).wrapping_add(2u32) as usize]; + let res_i1: (&mut [u64], &mut [u64]) = res_i0.1.split_at_mut(1usize); + (&mut c)[0usize] = + super::bignum_base::mul_wide_add2_u64(a_i1, bj, (&c)[0usize], res_i1.1); + let a_i2: u64 = a[4u32.wrapping_mul(i0).wrapping_add(3u32) as usize]; + let res_i2: (&mut [u64], &mut [u64]) = res_i1.1.split_at_mut(1usize); + (&mut c)[0usize] = + super::bignum_base::mul_wide_add2_u64(a_i2, bj, (&c)[0usize], res_i2.1) + } + for i0 in aLen.wrapping_div(4u32).wrapping_mul(4u32)..aLen { + let a_i: u64 = a[i0 as usize]; + let res_i: (&mut [u64], &mut [u64]) = res_j.1.split_at_mut(i0 as usize); + (&mut c)[0usize] = super::bignum_base::mul_wide_add2_u64(a_i, bj, (&c)[0usize], res_i.1) + } + let r: u64 = (&c)[0usize]; + res[aLen.wrapping_add(i) as usize] = r + } +} + +#[inline] +pub(crate) fn bn_sqr_u32(aLen: u32, a: &[u32], res: &mut [u32]) { + (res[0usize..aLen.wrapping_add(aLen) as usize]) + .copy_from_slice(&vec![0u32; aLen.wrapping_add(aLen) as usize].into_boxed_slice()); + for i in 0u32..aLen { + let a_j: u32 = a[i as usize]; + let ab: (&[u32], &[u32]) = a.split_at(0usize); + let res_j: (&mut [u32], &mut [u32]) = res.split_at_mut(i as usize); + let mut c: [u32; 1] = [0u32; 1usize]; + for i0 in 0u32..i.wrapping_div(4u32) { + let a_i: u32 = ab.1[4u32.wrapping_mul(i0) as usize]; + let res_i: (&mut [u32], &mut [u32]) = + res_j.1.split_at_mut(4u32.wrapping_mul(i0) as usize); + (&mut c)[0usize] = + super::bignum_base::mul_wide_add2_u32(a_i, a_j, (&c)[0usize], res_i.1); + let a_i0: u32 = ab.1[4u32.wrapping_mul(i0).wrapping_add(1u32) as usize]; + let res_i0: (&mut [u32], &mut [u32]) = res_i.1.split_at_mut(1usize); + (&mut c)[0usize] = + super::bignum_base::mul_wide_add2_u32(a_i0, a_j, (&c)[0usize], res_i0.1); + let a_i1: u32 = ab.1[4u32.wrapping_mul(i0).wrapping_add(2u32) as usize]; + let res_i1: (&mut [u32], &mut [u32]) = res_i0.1.split_at_mut(1usize); + (&mut c)[0usize] = + super::bignum_base::mul_wide_add2_u32(a_i1, a_j, (&c)[0usize], res_i1.1); + let a_i2: u32 = ab.1[4u32.wrapping_mul(i0).wrapping_add(3u32) as usize]; + let res_i2: (&mut [u32], &mut [u32]) = res_i1.1.split_at_mut(1usize); + (&mut c)[0usize] = + super::bignum_base::mul_wide_add2_u32(a_i2, a_j, (&c)[0usize], res_i2.1) + } + for i0 in i.wrapping_div(4u32).wrapping_mul(4u32)..i { + let a_i: u32 = ab.1[i0 as usize]; + let res_i: (&mut [u32], &mut [u32]) = res_j.1.split_at_mut(i0 as usize); + (&mut c)[0usize] = + super::bignum_base::mul_wide_add2_u32(a_i, a_j, (&c)[0usize], res_i.1) + } + let r: u32 = (&c)[0usize]; + res[i.wrapping_add(i) as usize] = r + } + let mut a_copy: Box<[u32]> = vec![0u32; aLen.wrapping_add(aLen) as usize].into_boxed_slice(); + let mut b_copy: Box<[u32]> = vec![0u32; aLen.wrapping_add(aLen) as usize].into_boxed_slice(); + ((&mut a_copy)[0usize..aLen.wrapping_add(aLen) as usize]) + .copy_from_slice(&res[0usize..aLen.wrapping_add(aLen) as usize]); + ((&mut b_copy)[0usize..aLen.wrapping_add(aLen) as usize]) + .copy_from_slice(&res[0usize..aLen.wrapping_add(aLen) as usize]); + let r: u32 = + super::bignum_base::bn_add_eq_len_u32(aLen.wrapping_add(aLen), &a_copy, &b_copy, res); + let c0: u32 = r; + lowstar::ignore::ignore::(c0); + let mut tmp: Box<[u32]> = vec![0u32; aLen.wrapping_add(aLen) as usize].into_boxed_slice(); + for i in 0u32..aLen { + let res1: u64 = (a[i as usize] as u64).wrapping_mul(a[i as usize] as u64); + let hi: u32 = res1.wrapping_shr(32u32) as u32; + let lo: u32 = res1 as u32; + (&mut tmp)[2u32.wrapping_mul(i) as usize] = lo; + (&mut tmp)[2u32.wrapping_mul(i).wrapping_add(1u32) as usize] = hi + } + let mut a_copy0: Box<[u32]> = vec![0u32; aLen.wrapping_add(aLen) as usize].into_boxed_slice(); + let mut b_copy0: Box<[u32]> = vec![0u32; aLen.wrapping_add(aLen) as usize].into_boxed_slice(); + ((&mut a_copy0)[0usize..aLen.wrapping_add(aLen) as usize]) + .copy_from_slice(&res[0usize..aLen.wrapping_add(aLen) as usize]); + ((&mut b_copy0)[0usize..aLen.wrapping_add(aLen) as usize]) + .copy_from_slice(&(&tmp)[0usize..aLen.wrapping_add(aLen) as usize]); + let r0: u32 = + super::bignum_base::bn_add_eq_len_u32(aLen.wrapping_add(aLen), &a_copy0, &b_copy0, res); + let c1: u32 = r0; + lowstar::ignore::ignore::(c1) +} + +#[inline] +pub(crate) fn bn_sqr_u64(aLen: u32, a: &[u64], res: &mut [u64]) { + (res[0usize..aLen.wrapping_add(aLen) as usize]) + .copy_from_slice(&vec![0u64; aLen.wrapping_add(aLen) as usize].into_boxed_slice()); + for i in 0u32..aLen { + let a_j: u64 = a[i as usize]; + let ab: (&[u64], &[u64]) = a.split_at(0usize); + let res_j: (&mut [u64], &mut [u64]) = res.split_at_mut(i as usize); + let mut c: [u64; 1] = [0u64; 1usize]; + for i0 in 0u32..i.wrapping_div(4u32) { + let a_i: u64 = ab.1[4u32.wrapping_mul(i0) as usize]; + let res_i: (&mut [u64], &mut [u64]) = + res_j.1.split_at_mut(4u32.wrapping_mul(i0) as usize); + (&mut c)[0usize] = + super::bignum_base::mul_wide_add2_u64(a_i, a_j, (&c)[0usize], res_i.1); + let a_i0: u64 = ab.1[4u32.wrapping_mul(i0).wrapping_add(1u32) as usize]; + let res_i0: (&mut [u64], &mut [u64]) = res_i.1.split_at_mut(1usize); + (&mut c)[0usize] = + super::bignum_base::mul_wide_add2_u64(a_i0, a_j, (&c)[0usize], res_i0.1); + let a_i1: u64 = ab.1[4u32.wrapping_mul(i0).wrapping_add(2u32) as usize]; + let res_i1: (&mut [u64], &mut [u64]) = res_i0.1.split_at_mut(1usize); + (&mut c)[0usize] = + super::bignum_base::mul_wide_add2_u64(a_i1, a_j, (&c)[0usize], res_i1.1); + let a_i2: u64 = ab.1[4u32.wrapping_mul(i0).wrapping_add(3u32) as usize]; + let res_i2: (&mut [u64], &mut [u64]) = res_i1.1.split_at_mut(1usize); + (&mut c)[0usize] = + super::bignum_base::mul_wide_add2_u64(a_i2, a_j, (&c)[0usize], res_i2.1) + } + for i0 in i.wrapping_div(4u32).wrapping_mul(4u32)..i { + let a_i: u64 = ab.1[i0 as usize]; + let res_i: (&mut [u64], &mut [u64]) = res_j.1.split_at_mut(i0 as usize); + (&mut c)[0usize] = + super::bignum_base::mul_wide_add2_u64(a_i, a_j, (&c)[0usize], res_i.1) + } + let r: u64 = (&c)[0usize]; + res[i.wrapping_add(i) as usize] = r + } + let mut a_copy: Box<[u64]> = vec![0u64; aLen.wrapping_add(aLen) as usize].into_boxed_slice(); + let mut b_copy: Box<[u64]> = vec![0u64; aLen.wrapping_add(aLen) as usize].into_boxed_slice(); + ((&mut a_copy)[0usize..aLen.wrapping_add(aLen) as usize]) + .copy_from_slice(&res[0usize..aLen.wrapping_add(aLen) as usize]); + ((&mut b_copy)[0usize..aLen.wrapping_add(aLen) as usize]) + .copy_from_slice(&res[0usize..aLen.wrapping_add(aLen) as usize]); + let r: u64 = + super::bignum_base::bn_add_eq_len_u64(aLen.wrapping_add(aLen), &a_copy, &b_copy, res); + let c0: u64 = r; + lowstar::ignore::ignore::(c0); + let mut tmp: Box<[u64]> = vec![0u64; aLen.wrapping_add(aLen) as usize].into_boxed_slice(); + for i in 0u32..aLen { + let res1: fstar::uint128::uint128 = fstar::uint128::mul_wide(a[i as usize], a[i as usize]); + let hi: u64 = fstar::uint128::uint128_to_uint64(fstar::uint128::shift_right(res1, 64u32)); + let lo: u64 = fstar::uint128::uint128_to_uint64(res1); + (&mut tmp)[2u32.wrapping_mul(i) as usize] = lo; + (&mut tmp)[2u32.wrapping_mul(i).wrapping_add(1u32) as usize] = hi + } + let mut a_copy0: Box<[u64]> = vec![0u64; aLen.wrapping_add(aLen) as usize].into_boxed_slice(); + let mut b_copy0: Box<[u64]> = vec![0u64; aLen.wrapping_add(aLen) as usize].into_boxed_slice(); + ((&mut a_copy0)[0usize..aLen.wrapping_add(aLen) as usize]) + .copy_from_slice(&res[0usize..aLen.wrapping_add(aLen) as usize]); + ((&mut b_copy0)[0usize..aLen.wrapping_add(aLen) as usize]) + .copy_from_slice(&(&tmp)[0usize..aLen.wrapping_add(aLen) as usize]); + let r0: u64 = + super::bignum_base::bn_add_eq_len_u64(aLen.wrapping_add(aLen), &a_copy0, &b_copy0, res); + let c1: u64 = r0; + lowstar::ignore::ignore::(c1) +} diff --git a/libcrux-hacl-rs/src/bignum25519_51.rs b/libcrux-hacl-rs/src/bignum25519_51.rs new file mode 100644 index 000000000..bc9867e04 --- /dev/null +++ b/libcrux-hacl-rs/src/bignum25519_51.rs @@ -0,0 +1,724 @@ +#![allow(non_snake_case)] +#![allow(non_upper_case_globals)] +#![allow(non_camel_case_types)] +#![allow(unused_assignments)] +#![allow(unreachable_patterns)] + +use crate::fstar; +use crate::lowstar; + +#[inline] +pub(crate) fn fadd(out: &mut [u64], f1: &[u64], f2: &[u64]) { + let f10: u64 = f1[0usize]; + let f20: u64 = f2[0usize]; + let f11: u64 = f1[1usize]; + let f21: u64 = f2[1usize]; + let f12: u64 = f1[2usize]; + let f22: u64 = f2[2usize]; + let f13: u64 = f1[3usize]; + let f23: u64 = f2[3usize]; + let f14: u64 = f1[4usize]; + let f24: u64 = f2[4usize]; + out[0usize] = f10.wrapping_add(f20); + out[1usize] = f11.wrapping_add(f21); + out[2usize] = f12.wrapping_add(f22); + out[3usize] = f13.wrapping_add(f23); + out[4usize] = f14.wrapping_add(f24) +} + +#[inline] +pub(crate) fn fsub(out: &mut [u64], f1: &[u64], f2: &[u64]) { + let f10: u64 = f1[0usize]; + let f20: u64 = f2[0usize]; + let f11: u64 = f1[1usize]; + let f21: u64 = f2[1usize]; + let f12: u64 = f1[2usize]; + let f22: u64 = f2[2usize]; + let f13: u64 = f1[3usize]; + let f23: u64 = f2[3usize]; + let f14: u64 = f1[4usize]; + let f24: u64 = f2[4usize]; + out[0usize] = f10.wrapping_add(0x3fffffffffff68u64).wrapping_sub(f20); + out[1usize] = f11.wrapping_add(0x3ffffffffffff8u64).wrapping_sub(f21); + out[2usize] = f12.wrapping_add(0x3ffffffffffff8u64).wrapping_sub(f22); + out[3usize] = f13.wrapping_add(0x3ffffffffffff8u64).wrapping_sub(f23); + out[4usize] = f14.wrapping_add(0x3ffffffffffff8u64).wrapping_sub(f24) +} + +#[inline] +pub(crate) fn fmul(out: &mut [u64], f1: &[u64], f2: &[u64], uu___: &[fstar::uint128::uint128]) { + lowstar::ignore::ignore::<&[fstar::uint128::uint128]>(uu___); + let f10: u64 = f1[0usize]; + let f11: u64 = f1[1usize]; + let f12: u64 = f1[2usize]; + let f13: u64 = f1[3usize]; + let f14: u64 = f1[4usize]; + let f20: u64 = f2[0usize]; + let f21: u64 = f2[1usize]; + let f22: u64 = f2[2usize]; + let f23: u64 = f2[3usize]; + let f24: u64 = f2[4usize]; + let tmp1: u64 = f21.wrapping_mul(19u64); + let tmp2: u64 = f22.wrapping_mul(19u64); + let tmp3: u64 = f23.wrapping_mul(19u64); + let tmp4: u64 = f24.wrapping_mul(19u64); + let o0: fstar::uint128::uint128 = fstar::uint128::mul_wide(f10, f20); + let o1: fstar::uint128::uint128 = fstar::uint128::mul_wide(f10, f21); + let o2: fstar::uint128::uint128 = fstar::uint128::mul_wide(f10, f22); + let o3: fstar::uint128::uint128 = fstar::uint128::mul_wide(f10, f23); + let o4: fstar::uint128::uint128 = fstar::uint128::mul_wide(f10, f24); + let o01: fstar::uint128::uint128 = fstar::uint128::add(o0, fstar::uint128::mul_wide(f11, tmp4)); + let o11: fstar::uint128::uint128 = fstar::uint128::add(o1, fstar::uint128::mul_wide(f11, f20)); + let o21: fstar::uint128::uint128 = fstar::uint128::add(o2, fstar::uint128::mul_wide(f11, f21)); + let o31: fstar::uint128::uint128 = fstar::uint128::add(o3, fstar::uint128::mul_wide(f11, f22)); + let o41: fstar::uint128::uint128 = fstar::uint128::add(o4, fstar::uint128::mul_wide(f11, f23)); + let o02: fstar::uint128::uint128 = + fstar::uint128::add(o01, fstar::uint128::mul_wide(f12, tmp3)); + let o12: fstar::uint128::uint128 = + fstar::uint128::add(o11, fstar::uint128::mul_wide(f12, tmp4)); + let o22: fstar::uint128::uint128 = fstar::uint128::add(o21, fstar::uint128::mul_wide(f12, f20)); + let o32: fstar::uint128::uint128 = fstar::uint128::add(o31, fstar::uint128::mul_wide(f12, f21)); + let o42: fstar::uint128::uint128 = fstar::uint128::add(o41, fstar::uint128::mul_wide(f12, f22)); + let o03: fstar::uint128::uint128 = + fstar::uint128::add(o02, fstar::uint128::mul_wide(f13, tmp2)); + let o13: fstar::uint128::uint128 = + fstar::uint128::add(o12, fstar::uint128::mul_wide(f13, tmp3)); + let o23: fstar::uint128::uint128 = + fstar::uint128::add(o22, fstar::uint128::mul_wide(f13, tmp4)); + let o33: fstar::uint128::uint128 = fstar::uint128::add(o32, fstar::uint128::mul_wide(f13, f20)); + let o43: fstar::uint128::uint128 = fstar::uint128::add(o42, fstar::uint128::mul_wide(f13, f21)); + let o04: fstar::uint128::uint128 = + fstar::uint128::add(o03, fstar::uint128::mul_wide(f14, tmp1)); + let o14: fstar::uint128::uint128 = + fstar::uint128::add(o13, fstar::uint128::mul_wide(f14, tmp2)); + let o24: fstar::uint128::uint128 = + fstar::uint128::add(o23, fstar::uint128::mul_wide(f14, tmp3)); + let o34: fstar::uint128::uint128 = + fstar::uint128::add(o33, fstar::uint128::mul_wide(f14, tmp4)); + let o44: fstar::uint128::uint128 = fstar::uint128::add(o43, fstar::uint128::mul_wide(f14, f20)); + let tmp_w0: fstar::uint128::uint128 = o04; + let tmp_w1: fstar::uint128::uint128 = o14; + let tmp_w2: fstar::uint128::uint128 = o24; + let tmp_w3: fstar::uint128::uint128 = o34; + let tmp_w4: fstar::uint128::uint128 = o44; + let lĀ·: fstar::uint128::uint128 = + fstar::uint128::add(tmp_w0, fstar::uint128::uint64_to_uint128(0u64)); + let tmp01: u64 = fstar::uint128::uint128_to_uint64(lĀ·) & 0x7ffffffffffffu64; + let c0: u64 = fstar::uint128::uint128_to_uint64(fstar::uint128::shift_right(lĀ·, 51u32)); + let lĀ·0: fstar::uint128::uint128 = + fstar::uint128::add(tmp_w1, fstar::uint128::uint64_to_uint128(c0)); + let tmp11: u64 = fstar::uint128::uint128_to_uint64(lĀ·0) & 0x7ffffffffffffu64; + let c1: u64 = fstar::uint128::uint128_to_uint64(fstar::uint128::shift_right(lĀ·0, 51u32)); + let lĀ·1: fstar::uint128::uint128 = + fstar::uint128::add(tmp_w2, fstar::uint128::uint64_to_uint128(c1)); + let tmp21: u64 = fstar::uint128::uint128_to_uint64(lĀ·1) & 0x7ffffffffffffu64; + let c2: u64 = fstar::uint128::uint128_to_uint64(fstar::uint128::shift_right(lĀ·1, 51u32)); + let lĀ·2: fstar::uint128::uint128 = + fstar::uint128::add(tmp_w3, fstar::uint128::uint64_to_uint128(c2)); + let tmp31: u64 = fstar::uint128::uint128_to_uint64(lĀ·2) & 0x7ffffffffffffu64; + let c3: u64 = fstar::uint128::uint128_to_uint64(fstar::uint128::shift_right(lĀ·2, 51u32)); + let lĀ·3: fstar::uint128::uint128 = + fstar::uint128::add(tmp_w4, fstar::uint128::uint64_to_uint128(c3)); + let tmp41: u64 = fstar::uint128::uint128_to_uint64(lĀ·3) & 0x7ffffffffffffu64; + let c4: u64 = fstar::uint128::uint128_to_uint64(fstar::uint128::shift_right(lĀ·3, 51u32)); + let lĀ·4: u64 = tmp01.wrapping_add(c4.wrapping_mul(19u64)); + let tmp0Ā·: u64 = lĀ·4 & 0x7ffffffffffffu64; + let c5: u64 = lĀ·4.wrapping_shr(51u32); + let o00: u64 = tmp0Ā·; + let o10: u64 = tmp11.wrapping_add(c5); + let o20: u64 = tmp21; + let o30: u64 = tmp31; + let o40: u64 = tmp41; + out[0usize] = o00; + out[1usize] = o10; + out[2usize] = o20; + out[3usize] = o30; + out[4usize] = o40 +} + +#[inline] +pub(crate) fn fmul2(out: &mut [u64], f1: &[u64], f2: &[u64], uu___: &[fstar::uint128::uint128]) { + lowstar::ignore::ignore::<&[fstar::uint128::uint128]>(uu___); + let f10: u64 = f1[0usize]; + let f11: u64 = f1[1usize]; + let f12: u64 = f1[2usize]; + let f13: u64 = f1[3usize]; + let f14: u64 = f1[4usize]; + let f20: u64 = f2[0usize]; + let f21: u64 = f2[1usize]; + let f22: u64 = f2[2usize]; + let f23: u64 = f2[3usize]; + let f24: u64 = f2[4usize]; + let f30: u64 = f1[5usize]; + let f31: u64 = f1[6usize]; + let f32: u64 = f1[7usize]; + let f33: u64 = f1[8usize]; + let f34: u64 = f1[9usize]; + let f40: u64 = f2[5usize]; + let f41: u64 = f2[6usize]; + let f42: u64 = f2[7usize]; + let f43: u64 = f2[8usize]; + let f44: u64 = f2[9usize]; + let tmp11: u64 = f21.wrapping_mul(19u64); + let tmp12: u64 = f22.wrapping_mul(19u64); + let tmp13: u64 = f23.wrapping_mul(19u64); + let tmp14: u64 = f24.wrapping_mul(19u64); + let tmp21: u64 = f41.wrapping_mul(19u64); + let tmp22: u64 = f42.wrapping_mul(19u64); + let tmp23: u64 = f43.wrapping_mul(19u64); + let tmp24: u64 = f44.wrapping_mul(19u64); + let o0: fstar::uint128::uint128 = fstar::uint128::mul_wide(f10, f20); + let o1: fstar::uint128::uint128 = fstar::uint128::mul_wide(f10, f21); + let o2: fstar::uint128::uint128 = fstar::uint128::mul_wide(f10, f22); + let o3: fstar::uint128::uint128 = fstar::uint128::mul_wide(f10, f23); + let o4: fstar::uint128::uint128 = fstar::uint128::mul_wide(f10, f24); + let o01: fstar::uint128::uint128 = + fstar::uint128::add(o0, fstar::uint128::mul_wide(f11, tmp14)); + let o11: fstar::uint128::uint128 = fstar::uint128::add(o1, fstar::uint128::mul_wide(f11, f20)); + let o21: fstar::uint128::uint128 = fstar::uint128::add(o2, fstar::uint128::mul_wide(f11, f21)); + let o31: fstar::uint128::uint128 = fstar::uint128::add(o3, fstar::uint128::mul_wide(f11, f22)); + let o41: fstar::uint128::uint128 = fstar::uint128::add(o4, fstar::uint128::mul_wide(f11, f23)); + let o02: fstar::uint128::uint128 = + fstar::uint128::add(o01, fstar::uint128::mul_wide(f12, tmp13)); + let o12: fstar::uint128::uint128 = + fstar::uint128::add(o11, fstar::uint128::mul_wide(f12, tmp14)); + let o22: fstar::uint128::uint128 = fstar::uint128::add(o21, fstar::uint128::mul_wide(f12, f20)); + let o32: fstar::uint128::uint128 = fstar::uint128::add(o31, fstar::uint128::mul_wide(f12, f21)); + let o42: fstar::uint128::uint128 = fstar::uint128::add(o41, fstar::uint128::mul_wide(f12, f22)); + let o03: fstar::uint128::uint128 = + fstar::uint128::add(o02, fstar::uint128::mul_wide(f13, tmp12)); + let o13: fstar::uint128::uint128 = + fstar::uint128::add(o12, fstar::uint128::mul_wide(f13, tmp13)); + let o23: fstar::uint128::uint128 = + fstar::uint128::add(o22, fstar::uint128::mul_wide(f13, tmp14)); + let o33: fstar::uint128::uint128 = fstar::uint128::add(o32, fstar::uint128::mul_wide(f13, f20)); + let o43: fstar::uint128::uint128 = fstar::uint128::add(o42, fstar::uint128::mul_wide(f13, f21)); + let o04: fstar::uint128::uint128 = + fstar::uint128::add(o03, fstar::uint128::mul_wide(f14, tmp11)); + let o14: fstar::uint128::uint128 = + fstar::uint128::add(o13, fstar::uint128::mul_wide(f14, tmp12)); + let o24: fstar::uint128::uint128 = + fstar::uint128::add(o23, fstar::uint128::mul_wide(f14, tmp13)); + let o34: fstar::uint128::uint128 = + fstar::uint128::add(o33, fstar::uint128::mul_wide(f14, tmp14)); + let o44: fstar::uint128::uint128 = fstar::uint128::add(o43, fstar::uint128::mul_wide(f14, f20)); + let tmp_w10: fstar::uint128::uint128 = o04; + let tmp_w11: fstar::uint128::uint128 = o14; + let tmp_w12: fstar::uint128::uint128 = o24; + let tmp_w13: fstar::uint128::uint128 = o34; + let tmp_w14: fstar::uint128::uint128 = o44; + let o00: fstar::uint128::uint128 = fstar::uint128::mul_wide(f30, f40); + let o10: fstar::uint128::uint128 = fstar::uint128::mul_wide(f30, f41); + let o20: fstar::uint128::uint128 = fstar::uint128::mul_wide(f30, f42); + let o30: fstar::uint128::uint128 = fstar::uint128::mul_wide(f30, f43); + let o40: fstar::uint128::uint128 = fstar::uint128::mul_wide(f30, f44); + let o010: fstar::uint128::uint128 = + fstar::uint128::add(o00, fstar::uint128::mul_wide(f31, tmp24)); + let o110: fstar::uint128::uint128 = + fstar::uint128::add(o10, fstar::uint128::mul_wide(f31, f40)); + let o210: fstar::uint128::uint128 = + fstar::uint128::add(o20, fstar::uint128::mul_wide(f31, f41)); + let o310: fstar::uint128::uint128 = + fstar::uint128::add(o30, fstar::uint128::mul_wide(f31, f42)); + let o410: fstar::uint128::uint128 = + fstar::uint128::add(o40, fstar::uint128::mul_wide(f31, f43)); + let o020: fstar::uint128::uint128 = + fstar::uint128::add(o010, fstar::uint128::mul_wide(f32, tmp23)); + let o120: fstar::uint128::uint128 = + fstar::uint128::add(o110, fstar::uint128::mul_wide(f32, tmp24)); + let o220: fstar::uint128::uint128 = + fstar::uint128::add(o210, fstar::uint128::mul_wide(f32, f40)); + let o320: fstar::uint128::uint128 = + fstar::uint128::add(o310, fstar::uint128::mul_wide(f32, f41)); + let o420: fstar::uint128::uint128 = + fstar::uint128::add(o410, fstar::uint128::mul_wide(f32, f42)); + let o030: fstar::uint128::uint128 = + fstar::uint128::add(o020, fstar::uint128::mul_wide(f33, tmp22)); + let o130: fstar::uint128::uint128 = + fstar::uint128::add(o120, fstar::uint128::mul_wide(f33, tmp23)); + let o230: fstar::uint128::uint128 = + fstar::uint128::add(o220, fstar::uint128::mul_wide(f33, tmp24)); + let o330: fstar::uint128::uint128 = + fstar::uint128::add(o320, fstar::uint128::mul_wide(f33, f40)); + let o430: fstar::uint128::uint128 = + fstar::uint128::add(o420, fstar::uint128::mul_wide(f33, f41)); + let o040: fstar::uint128::uint128 = + fstar::uint128::add(o030, fstar::uint128::mul_wide(f34, tmp21)); + let o140: fstar::uint128::uint128 = + fstar::uint128::add(o130, fstar::uint128::mul_wide(f34, tmp22)); + let o240: fstar::uint128::uint128 = + fstar::uint128::add(o230, fstar::uint128::mul_wide(f34, tmp23)); + let o340: fstar::uint128::uint128 = + fstar::uint128::add(o330, fstar::uint128::mul_wide(f34, tmp24)); + let o440: fstar::uint128::uint128 = + fstar::uint128::add(o430, fstar::uint128::mul_wide(f34, f40)); + let tmp_w20: fstar::uint128::uint128 = o040; + let tmp_w21: fstar::uint128::uint128 = o140; + let tmp_w22: fstar::uint128::uint128 = o240; + let tmp_w23: fstar::uint128::uint128 = o340; + let tmp_w24: fstar::uint128::uint128 = o440; + let lĀ·: fstar::uint128::uint128 = + fstar::uint128::add(tmp_w10, fstar::uint128::uint64_to_uint128(0u64)); + let tmp0: u64 = fstar::uint128::uint128_to_uint64(lĀ·) & 0x7ffffffffffffu64; + let c0: u64 = fstar::uint128::uint128_to_uint64(fstar::uint128::shift_right(lĀ·, 51u32)); + let lĀ·0: fstar::uint128::uint128 = + fstar::uint128::add(tmp_w11, fstar::uint128::uint64_to_uint128(c0)); + let tmp1: u64 = fstar::uint128::uint128_to_uint64(lĀ·0) & 0x7ffffffffffffu64; + let c1: u64 = fstar::uint128::uint128_to_uint64(fstar::uint128::shift_right(lĀ·0, 51u32)); + let lĀ·1: fstar::uint128::uint128 = + fstar::uint128::add(tmp_w12, fstar::uint128::uint64_to_uint128(c1)); + let tmp2: u64 = fstar::uint128::uint128_to_uint64(lĀ·1) & 0x7ffffffffffffu64; + let c2: u64 = fstar::uint128::uint128_to_uint64(fstar::uint128::shift_right(lĀ·1, 51u32)); + let lĀ·2: fstar::uint128::uint128 = + fstar::uint128::add(tmp_w13, fstar::uint128::uint64_to_uint128(c2)); + let tmp3: u64 = fstar::uint128::uint128_to_uint64(lĀ·2) & 0x7ffffffffffffu64; + let c3: u64 = fstar::uint128::uint128_to_uint64(fstar::uint128::shift_right(lĀ·2, 51u32)); + let lĀ·3: fstar::uint128::uint128 = + fstar::uint128::add(tmp_w14, fstar::uint128::uint64_to_uint128(c3)); + let tmp4: u64 = fstar::uint128::uint128_to_uint64(lĀ·3) & 0x7ffffffffffffu64; + let c4: u64 = fstar::uint128::uint128_to_uint64(fstar::uint128::shift_right(lĀ·3, 51u32)); + let lĀ·4: u64 = tmp0.wrapping_add(c4.wrapping_mul(19u64)); + let tmp0Ā·: u64 = lĀ·4 & 0x7ffffffffffffu64; + let c5: u64 = lĀ·4.wrapping_shr(51u32); + let o100: u64 = tmp0Ā·; + let o111: u64 = tmp1.wrapping_add(c5); + let o121: u64 = tmp2; + let o131: u64 = tmp3; + let o141: u64 = tmp4; + let lĀ·5: fstar::uint128::uint128 = + fstar::uint128::add(tmp_w20, fstar::uint128::uint64_to_uint128(0u64)); + let tmp00: u64 = fstar::uint128::uint128_to_uint64(lĀ·5) & 0x7ffffffffffffu64; + let c00: u64 = fstar::uint128::uint128_to_uint64(fstar::uint128::shift_right(lĀ·5, 51u32)); + let lĀ·6: fstar::uint128::uint128 = + fstar::uint128::add(tmp_w21, fstar::uint128::uint64_to_uint128(c00)); + let tmp10: u64 = fstar::uint128::uint128_to_uint64(lĀ·6) & 0x7ffffffffffffu64; + let c10: u64 = fstar::uint128::uint128_to_uint64(fstar::uint128::shift_right(lĀ·6, 51u32)); + let lĀ·7: fstar::uint128::uint128 = + fstar::uint128::add(tmp_w22, fstar::uint128::uint64_to_uint128(c10)); + let tmp20: u64 = fstar::uint128::uint128_to_uint64(lĀ·7) & 0x7ffffffffffffu64; + let c20: u64 = fstar::uint128::uint128_to_uint64(fstar::uint128::shift_right(lĀ·7, 51u32)); + let lĀ·8: fstar::uint128::uint128 = + fstar::uint128::add(tmp_w23, fstar::uint128::uint64_to_uint128(c20)); + let tmp30: u64 = fstar::uint128::uint128_to_uint64(lĀ·8) & 0x7ffffffffffffu64; + let c30: u64 = fstar::uint128::uint128_to_uint64(fstar::uint128::shift_right(lĀ·8, 51u32)); + let lĀ·9: fstar::uint128::uint128 = + fstar::uint128::add(tmp_w24, fstar::uint128::uint64_to_uint128(c30)); + let tmp40: u64 = fstar::uint128::uint128_to_uint64(lĀ·9) & 0x7ffffffffffffu64; + let c40: u64 = fstar::uint128::uint128_to_uint64(fstar::uint128::shift_right(lĀ·9, 51u32)); + let lĀ·10: u64 = tmp00.wrapping_add(c40.wrapping_mul(19u64)); + let tmp0Ā·0: u64 = lĀ·10 & 0x7ffffffffffffu64; + let c50: u64 = lĀ·10.wrapping_shr(51u32); + let o200: u64 = tmp0Ā·0; + let o211: u64 = tmp10.wrapping_add(c50); + let o221: u64 = tmp20; + let o231: u64 = tmp30; + let o241: u64 = tmp40; + let o101: u64 = o100; + let o112: u64 = o111; + let o122: u64 = o121; + let o132: u64 = o131; + let o142: u64 = o141; + let o201: u64 = o200; + let o212: u64 = o211; + let o222: u64 = o221; + let o232: u64 = o231; + let o242: u64 = o241; + out[0usize] = o101; + out[1usize] = o112; + out[2usize] = o122; + out[3usize] = o132; + out[4usize] = o142; + out[5usize] = o201; + out[6usize] = o212; + out[7usize] = o222; + out[8usize] = o232; + out[9usize] = o242 +} + +#[inline] +pub(crate) fn fmul1(out: &mut [u64], f1: &[u64], f2: u64) { + let f10: u64 = f1[0usize]; + let f11: u64 = f1[1usize]; + let f12: u64 = f1[2usize]; + let f13: u64 = f1[3usize]; + let f14: u64 = f1[4usize]; + let tmp_w0: fstar::uint128::uint128 = fstar::uint128::mul_wide(f2, f10); + let tmp_w1: fstar::uint128::uint128 = fstar::uint128::mul_wide(f2, f11); + let tmp_w2: fstar::uint128::uint128 = fstar::uint128::mul_wide(f2, f12); + let tmp_w3: fstar::uint128::uint128 = fstar::uint128::mul_wide(f2, f13); + let tmp_w4: fstar::uint128::uint128 = fstar::uint128::mul_wide(f2, f14); + let lĀ·: fstar::uint128::uint128 = + fstar::uint128::add(tmp_w0, fstar::uint128::uint64_to_uint128(0u64)); + let tmp0: u64 = fstar::uint128::uint128_to_uint64(lĀ·) & 0x7ffffffffffffu64; + let c0: u64 = fstar::uint128::uint128_to_uint64(fstar::uint128::shift_right(lĀ·, 51u32)); + let lĀ·0: fstar::uint128::uint128 = + fstar::uint128::add(tmp_w1, fstar::uint128::uint64_to_uint128(c0)); + let tmp1: u64 = fstar::uint128::uint128_to_uint64(lĀ·0) & 0x7ffffffffffffu64; + let c1: u64 = fstar::uint128::uint128_to_uint64(fstar::uint128::shift_right(lĀ·0, 51u32)); + let lĀ·1: fstar::uint128::uint128 = + fstar::uint128::add(tmp_w2, fstar::uint128::uint64_to_uint128(c1)); + let tmp2: u64 = fstar::uint128::uint128_to_uint64(lĀ·1) & 0x7ffffffffffffu64; + let c2: u64 = fstar::uint128::uint128_to_uint64(fstar::uint128::shift_right(lĀ·1, 51u32)); + let lĀ·2: fstar::uint128::uint128 = + fstar::uint128::add(tmp_w3, fstar::uint128::uint64_to_uint128(c2)); + let tmp3: u64 = fstar::uint128::uint128_to_uint64(lĀ·2) & 0x7ffffffffffffu64; + let c3: u64 = fstar::uint128::uint128_to_uint64(fstar::uint128::shift_right(lĀ·2, 51u32)); + let lĀ·3: fstar::uint128::uint128 = + fstar::uint128::add(tmp_w4, fstar::uint128::uint64_to_uint128(c3)); + let tmp4: u64 = fstar::uint128::uint128_to_uint64(lĀ·3) & 0x7ffffffffffffu64; + let c4: u64 = fstar::uint128::uint128_to_uint64(fstar::uint128::shift_right(lĀ·3, 51u32)); + let lĀ·4: u64 = tmp0.wrapping_add(c4.wrapping_mul(19u64)); + let tmp0Ā·: u64 = lĀ·4 & 0x7ffffffffffffu64; + let c5: u64 = lĀ·4.wrapping_shr(51u32); + let o0: u64 = tmp0Ā·; + let o1: u64 = tmp1.wrapping_add(c5); + let o2: u64 = tmp2; + let o3: u64 = tmp3; + let o4: u64 = tmp4; + out[0usize] = o0; + out[1usize] = o1; + out[2usize] = o2; + out[3usize] = o3; + out[4usize] = o4 +} + +#[inline] +pub(crate) fn fsqr(out: &mut [u64], f: &[u64], uu___: &[fstar::uint128::uint128]) { + lowstar::ignore::ignore::<&[fstar::uint128::uint128]>(uu___); + let f0: u64 = f[0usize]; + let f1: u64 = f[1usize]; + let f2: u64 = f[2usize]; + let f3: u64 = f[3usize]; + let f4: u64 = f[4usize]; + let d0: u64 = 2u64.wrapping_mul(f0); + let d1: u64 = 2u64.wrapping_mul(f1); + let d2: u64 = 38u64.wrapping_mul(f2); + let d3: u64 = 19u64.wrapping_mul(f3); + let d419: u64 = 19u64.wrapping_mul(f4); + let d4: u64 = 2u64.wrapping_mul(d419); + let s0: fstar::uint128::uint128 = fstar::uint128::add( + fstar::uint128::add( + fstar::uint128::mul_wide(f0, f0), + fstar::uint128::mul_wide(d4, f1), + ), + fstar::uint128::mul_wide(d2, f3), + ); + let s1: fstar::uint128::uint128 = fstar::uint128::add( + fstar::uint128::add( + fstar::uint128::mul_wide(d0, f1), + fstar::uint128::mul_wide(d4, f2), + ), + fstar::uint128::mul_wide(d3, f3), + ); + let s2: fstar::uint128::uint128 = fstar::uint128::add( + fstar::uint128::add( + fstar::uint128::mul_wide(d0, f2), + fstar::uint128::mul_wide(f1, f1), + ), + fstar::uint128::mul_wide(d4, f3), + ); + let s3: fstar::uint128::uint128 = fstar::uint128::add( + fstar::uint128::add( + fstar::uint128::mul_wide(d0, f3), + fstar::uint128::mul_wide(d1, f2), + ), + fstar::uint128::mul_wide(f4, d419), + ); + let s4: fstar::uint128::uint128 = fstar::uint128::add( + fstar::uint128::add( + fstar::uint128::mul_wide(d0, f4), + fstar::uint128::mul_wide(d1, f3), + ), + fstar::uint128::mul_wide(f2, f2), + ); + let o0: fstar::uint128::uint128 = s0; + let o1: fstar::uint128::uint128 = s1; + let o2: fstar::uint128::uint128 = s2; + let o3: fstar::uint128::uint128 = s3; + let o4: fstar::uint128::uint128 = s4; + let lĀ·: fstar::uint128::uint128 = + fstar::uint128::add(o0, fstar::uint128::uint64_to_uint128(0u64)); + let tmp0: u64 = fstar::uint128::uint128_to_uint64(lĀ·) & 0x7ffffffffffffu64; + let c0: u64 = fstar::uint128::uint128_to_uint64(fstar::uint128::shift_right(lĀ·, 51u32)); + let lĀ·0: fstar::uint128::uint128 = + fstar::uint128::add(o1, fstar::uint128::uint64_to_uint128(c0)); + let tmp1: u64 = fstar::uint128::uint128_to_uint64(lĀ·0) & 0x7ffffffffffffu64; + let c1: u64 = fstar::uint128::uint128_to_uint64(fstar::uint128::shift_right(lĀ·0, 51u32)); + let lĀ·1: fstar::uint128::uint128 = + fstar::uint128::add(o2, fstar::uint128::uint64_to_uint128(c1)); + let tmp2: u64 = fstar::uint128::uint128_to_uint64(lĀ·1) & 0x7ffffffffffffu64; + let c2: u64 = fstar::uint128::uint128_to_uint64(fstar::uint128::shift_right(lĀ·1, 51u32)); + let lĀ·2: fstar::uint128::uint128 = + fstar::uint128::add(o3, fstar::uint128::uint64_to_uint128(c2)); + let tmp3: u64 = fstar::uint128::uint128_to_uint64(lĀ·2) & 0x7ffffffffffffu64; + let c3: u64 = fstar::uint128::uint128_to_uint64(fstar::uint128::shift_right(lĀ·2, 51u32)); + let lĀ·3: fstar::uint128::uint128 = + fstar::uint128::add(o4, fstar::uint128::uint64_to_uint128(c3)); + let tmp4: u64 = fstar::uint128::uint128_to_uint64(lĀ·3) & 0x7ffffffffffffu64; + let c4: u64 = fstar::uint128::uint128_to_uint64(fstar::uint128::shift_right(lĀ·3, 51u32)); + let lĀ·4: u64 = tmp0.wrapping_add(c4.wrapping_mul(19u64)); + let tmp0Ā·: u64 = lĀ·4 & 0x7ffffffffffffu64; + let c5: u64 = lĀ·4.wrapping_shr(51u32); + let o00: u64 = tmp0Ā·; + let o10: u64 = tmp1.wrapping_add(c5); + let o20: u64 = tmp2; + let o30: u64 = tmp3; + let o40: u64 = tmp4; + out[0usize] = o00; + out[1usize] = o10; + out[2usize] = o20; + out[3usize] = o30; + out[4usize] = o40 +} + +#[inline] +pub(crate) fn fsqr2(out: &mut [u64], f: &[u64], uu___: &[fstar::uint128::uint128]) { + lowstar::ignore::ignore::<&[fstar::uint128::uint128]>(uu___); + let f10: u64 = f[0usize]; + let f11: u64 = f[1usize]; + let f12: u64 = f[2usize]; + let f13: u64 = f[3usize]; + let f14: u64 = f[4usize]; + let f20: u64 = f[5usize]; + let f21: u64 = f[6usize]; + let f22: u64 = f[7usize]; + let f23: u64 = f[8usize]; + let f24: u64 = f[9usize]; + let d0: u64 = 2u64.wrapping_mul(f10); + let d1: u64 = 2u64.wrapping_mul(f11); + let d2: u64 = 38u64.wrapping_mul(f12); + let d3: u64 = 19u64.wrapping_mul(f13); + let d419: u64 = 19u64.wrapping_mul(f14); + let d4: u64 = 2u64.wrapping_mul(d419); + let s0: fstar::uint128::uint128 = fstar::uint128::add( + fstar::uint128::add( + fstar::uint128::mul_wide(f10, f10), + fstar::uint128::mul_wide(d4, f11), + ), + fstar::uint128::mul_wide(d2, f13), + ); + let s1: fstar::uint128::uint128 = fstar::uint128::add( + fstar::uint128::add( + fstar::uint128::mul_wide(d0, f11), + fstar::uint128::mul_wide(d4, f12), + ), + fstar::uint128::mul_wide(d3, f13), + ); + let s2: fstar::uint128::uint128 = fstar::uint128::add( + fstar::uint128::add( + fstar::uint128::mul_wide(d0, f12), + fstar::uint128::mul_wide(f11, f11), + ), + fstar::uint128::mul_wide(d4, f13), + ); + let s3: fstar::uint128::uint128 = fstar::uint128::add( + fstar::uint128::add( + fstar::uint128::mul_wide(d0, f13), + fstar::uint128::mul_wide(d1, f12), + ), + fstar::uint128::mul_wide(f14, d419), + ); + let s4: fstar::uint128::uint128 = fstar::uint128::add( + fstar::uint128::add( + fstar::uint128::mul_wide(d0, f14), + fstar::uint128::mul_wide(d1, f13), + ), + fstar::uint128::mul_wide(f12, f12), + ); + let o10: fstar::uint128::uint128 = s0; + let o11: fstar::uint128::uint128 = s1; + let o12: fstar::uint128::uint128 = s2; + let o13: fstar::uint128::uint128 = s3; + let o14: fstar::uint128::uint128 = s4; + let d00: u64 = 2u64.wrapping_mul(f20); + let d10: u64 = 2u64.wrapping_mul(f21); + let d20: u64 = 38u64.wrapping_mul(f22); + let d30: u64 = 19u64.wrapping_mul(f23); + let d4190: u64 = 19u64.wrapping_mul(f24); + let d40: u64 = 2u64.wrapping_mul(d4190); + let s00: fstar::uint128::uint128 = fstar::uint128::add( + fstar::uint128::add( + fstar::uint128::mul_wide(f20, f20), + fstar::uint128::mul_wide(d40, f21), + ), + fstar::uint128::mul_wide(d20, f23), + ); + let s10: fstar::uint128::uint128 = fstar::uint128::add( + fstar::uint128::add( + fstar::uint128::mul_wide(d00, f21), + fstar::uint128::mul_wide(d40, f22), + ), + fstar::uint128::mul_wide(d30, f23), + ); + let s20: fstar::uint128::uint128 = fstar::uint128::add( + fstar::uint128::add( + fstar::uint128::mul_wide(d00, f22), + fstar::uint128::mul_wide(f21, f21), + ), + fstar::uint128::mul_wide(d40, f23), + ); + let s30: fstar::uint128::uint128 = fstar::uint128::add( + fstar::uint128::add( + fstar::uint128::mul_wide(d00, f23), + fstar::uint128::mul_wide(d10, f22), + ), + fstar::uint128::mul_wide(f24, d4190), + ); + let s40: fstar::uint128::uint128 = fstar::uint128::add( + fstar::uint128::add( + fstar::uint128::mul_wide(d00, f24), + fstar::uint128::mul_wide(d10, f23), + ), + fstar::uint128::mul_wide(f22, f22), + ); + let o20: fstar::uint128::uint128 = s00; + let o21: fstar::uint128::uint128 = s10; + let o22: fstar::uint128::uint128 = s20; + let o23: fstar::uint128::uint128 = s30; + let o24: fstar::uint128::uint128 = s40; + let lĀ·: fstar::uint128::uint128 = + fstar::uint128::add(o10, fstar::uint128::uint64_to_uint128(0u64)); + let tmp0: u64 = fstar::uint128::uint128_to_uint64(lĀ·) & 0x7ffffffffffffu64; + let c0: u64 = fstar::uint128::uint128_to_uint64(fstar::uint128::shift_right(lĀ·, 51u32)); + let lĀ·0: fstar::uint128::uint128 = + fstar::uint128::add(o11, fstar::uint128::uint64_to_uint128(c0)); + let tmp1: u64 = fstar::uint128::uint128_to_uint64(lĀ·0) & 0x7ffffffffffffu64; + let c1: u64 = fstar::uint128::uint128_to_uint64(fstar::uint128::shift_right(lĀ·0, 51u32)); + let lĀ·1: fstar::uint128::uint128 = + fstar::uint128::add(o12, fstar::uint128::uint64_to_uint128(c1)); + let tmp2: u64 = fstar::uint128::uint128_to_uint64(lĀ·1) & 0x7ffffffffffffu64; + let c2: u64 = fstar::uint128::uint128_to_uint64(fstar::uint128::shift_right(lĀ·1, 51u32)); + let lĀ·2: fstar::uint128::uint128 = + fstar::uint128::add(o13, fstar::uint128::uint64_to_uint128(c2)); + let tmp3: u64 = fstar::uint128::uint128_to_uint64(lĀ·2) & 0x7ffffffffffffu64; + let c3: u64 = fstar::uint128::uint128_to_uint64(fstar::uint128::shift_right(lĀ·2, 51u32)); + let lĀ·3: fstar::uint128::uint128 = + fstar::uint128::add(o14, fstar::uint128::uint64_to_uint128(c3)); + let tmp4: u64 = fstar::uint128::uint128_to_uint64(lĀ·3) & 0x7ffffffffffffu64; + let c4: u64 = fstar::uint128::uint128_to_uint64(fstar::uint128::shift_right(lĀ·3, 51u32)); + let lĀ·4: u64 = tmp0.wrapping_add(c4.wrapping_mul(19u64)); + let tmp0Ā·: u64 = lĀ·4 & 0x7ffffffffffffu64; + let c5: u64 = lĀ·4.wrapping_shr(51u32); + let o101: u64 = tmp0Ā·; + let o111: u64 = tmp1.wrapping_add(c5); + let o121: u64 = tmp2; + let o131: u64 = tmp3; + let o141: u64 = tmp4; + let lĀ·5: fstar::uint128::uint128 = + fstar::uint128::add(o20, fstar::uint128::uint64_to_uint128(0u64)); + let tmp00: u64 = fstar::uint128::uint128_to_uint64(lĀ·5) & 0x7ffffffffffffu64; + let c00: u64 = fstar::uint128::uint128_to_uint64(fstar::uint128::shift_right(lĀ·5, 51u32)); + let lĀ·6: fstar::uint128::uint128 = + fstar::uint128::add(o21, fstar::uint128::uint64_to_uint128(c00)); + let tmp10: u64 = fstar::uint128::uint128_to_uint64(lĀ·6) & 0x7ffffffffffffu64; + let c10: u64 = fstar::uint128::uint128_to_uint64(fstar::uint128::shift_right(lĀ·6, 51u32)); + let lĀ·7: fstar::uint128::uint128 = + fstar::uint128::add(o22, fstar::uint128::uint64_to_uint128(c10)); + let tmp20: u64 = fstar::uint128::uint128_to_uint64(lĀ·7) & 0x7ffffffffffffu64; + let c20: u64 = fstar::uint128::uint128_to_uint64(fstar::uint128::shift_right(lĀ·7, 51u32)); + let lĀ·8: fstar::uint128::uint128 = + fstar::uint128::add(o23, fstar::uint128::uint64_to_uint128(c20)); + let tmp30: u64 = fstar::uint128::uint128_to_uint64(lĀ·8) & 0x7ffffffffffffu64; + let c30: u64 = fstar::uint128::uint128_to_uint64(fstar::uint128::shift_right(lĀ·8, 51u32)); + let lĀ·9: fstar::uint128::uint128 = + fstar::uint128::add(o24, fstar::uint128::uint64_to_uint128(c30)); + let tmp40: u64 = fstar::uint128::uint128_to_uint64(lĀ·9) & 0x7ffffffffffffu64; + let c40: u64 = fstar::uint128::uint128_to_uint64(fstar::uint128::shift_right(lĀ·9, 51u32)); + let lĀ·10: u64 = tmp00.wrapping_add(c40.wrapping_mul(19u64)); + let tmp0Ā·0: u64 = lĀ·10 & 0x7ffffffffffffu64; + let c50: u64 = lĀ·10.wrapping_shr(51u32); + let o201: u64 = tmp0Ā·0; + let o211: u64 = tmp10.wrapping_add(c50); + let o221: u64 = tmp20; + let o231: u64 = tmp30; + let o241: u64 = tmp40; + let o100: u64 = o101; + let o110: u64 = o111; + let o120: u64 = o121; + let o130: u64 = o131; + let o140: u64 = o141; + let o200: u64 = o201; + let o210: u64 = o211; + let o220: u64 = o221; + let o230: u64 = o231; + let o240: u64 = o241; + out[0usize] = o100; + out[1usize] = o110; + out[2usize] = o120; + out[3usize] = o130; + out[4usize] = o140; + out[5usize] = o200; + out[6usize] = o210; + out[7usize] = o220; + out[8usize] = o230; + out[9usize] = o240 +} + +pub(crate) fn store_felem(u64s: &mut [u64], f: &[u64]) { + let f0: u64 = f[0usize]; + let f1: u64 = f[1usize]; + let f2: u64 = f[2usize]; + let f3: u64 = f[3usize]; + let f4: u64 = f[4usize]; + let lĀ·: u64 = f0.wrapping_add(0u64); + let tmp0: u64 = lĀ· & 0x7ffffffffffffu64; + let c0: u64 = lĀ·.wrapping_shr(51u32); + let lĀ·0: u64 = f1.wrapping_add(c0); + let tmp1: u64 = lĀ·0 & 0x7ffffffffffffu64; + let c1: u64 = lĀ·0.wrapping_shr(51u32); + let lĀ·1: u64 = f2.wrapping_add(c1); + let tmp2: u64 = lĀ·1 & 0x7ffffffffffffu64; + let c2: u64 = lĀ·1.wrapping_shr(51u32); + let lĀ·2: u64 = f3.wrapping_add(c2); + let tmp3: u64 = lĀ·2 & 0x7ffffffffffffu64; + let c3: u64 = lĀ·2.wrapping_shr(51u32); + let lĀ·3: u64 = f4.wrapping_add(c3); + let tmp4: u64 = lĀ·3 & 0x7ffffffffffffu64; + let c4: u64 = lĀ·3.wrapping_shr(51u32); + let lĀ·4: u64 = tmp0.wrapping_add(c4.wrapping_mul(19u64)); + let tmp0Ā·: u64 = lĀ·4 & 0x7ffffffffffffu64; + let c5: u64 = lĀ·4.wrapping_shr(51u32); + let f01: u64 = tmp0Ā·; + let f11: u64 = tmp1.wrapping_add(c5); + let f21: u64 = tmp2; + let f31: u64 = tmp3; + let f41: u64 = tmp4; + let m0: u64 = fstar::uint64::gte_mask(f01, 0x7ffffffffffedu64); + let m1: u64 = fstar::uint64::eq_mask(f11, 0x7ffffffffffffu64); + let m2: u64 = fstar::uint64::eq_mask(f21, 0x7ffffffffffffu64); + let m3: u64 = fstar::uint64::eq_mask(f31, 0x7ffffffffffffu64); + let m4: u64 = fstar::uint64::eq_mask(f41, 0x7ffffffffffffu64); + let mask: u64 = m0 & m1 & m2 & m3 & m4; + let f0Ā·: u64 = f01.wrapping_sub(mask & 0x7ffffffffffedu64); + let f1Ā·: u64 = f11.wrapping_sub(mask & 0x7ffffffffffffu64); + let f2Ā·: u64 = f21.wrapping_sub(mask & 0x7ffffffffffffu64); + let f3Ā·: u64 = f31.wrapping_sub(mask & 0x7ffffffffffffu64); + let f4Ā·: u64 = f41.wrapping_sub(mask & 0x7ffffffffffffu64); + let f02: u64 = f0Ā·; + let f12: u64 = f1Ā·; + let f22: u64 = f2Ā·; + let f32: u64 = f3Ā·; + let f42: u64 = f4Ā·; + let o0: u64 = f02 | f12.wrapping_shl(51u32); + let o1: u64 = f12.wrapping_shr(13u32) | f22.wrapping_shl(38u32); + let o2: u64 = f22.wrapping_shr(26u32) | f32.wrapping_shl(25u32); + let o3: u64 = f32.wrapping_shr(39u32) | f42.wrapping_shl(12u32); + let o00: u64 = o0; + let o10: u64 = o1; + let o20: u64 = o2; + let o30: u64 = o3; + u64s[0usize] = o00; + u64s[1usize] = o10; + u64s[2usize] = o20; + u64s[3usize] = o30 +} + +#[inline] +pub(crate) fn cswap2(bit: u64, p1: &mut [u64], p2: &mut [u64]) { + let mask: u64 = 0u64.wrapping_sub(bit); + krml::unroll_for!(10, "i", 0u32, 1u32, { + let dummy: u64 = mask & (p1[i as usize] ^ p2[i as usize]); + p1[i as usize] ^= dummy; + p2[i as usize] ^= dummy + }) +} diff --git a/libcrux-hacl-rs/src/curve25519_51.rs b/libcrux-hacl-rs/src/curve25519_51.rs new file mode 100644 index 000000000..61928be92 --- /dev/null +++ b/libcrux-hacl-rs/src/curve25519_51.rs @@ -0,0 +1,340 @@ +#![allow(non_snake_case)] +#![allow(non_upper_case_globals)] +#![allow(non_camel_case_types)] +#![allow(unused_assignments)] +#![allow(unreachable_patterns)] + +use crate::fstar; +use crate::lowstar; + +const g25519: [u8; 32] = [ + 9u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, + 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, +]; + +fn point_add_and_double(q: &[u64], p01_tmp1: &mut [u64], tmp2: &[fstar::uint128::uint128]) { + let nq: (&mut [u64], &mut [u64]) = p01_tmp1.split_at_mut(0usize); + let nq_p1: (&mut [u64], &mut [u64]) = nq.1.split_at_mut(10usize); + let tmp1: (&mut [u64], &mut [u64]) = nq_p1.1.split_at_mut(10usize); + let x1: (&[u64], &[u64]) = q.split_at(0usize); + let x2: (&[u64], &[u64]) = nq_p1.0.split_at(0usize); + let z2: (&[u64], &[u64]) = x2.1.split_at(5usize); + let dc: (&mut [u64], &mut [u64]) = tmp1.1.split_at_mut(10usize); + let ab: (&mut [u64], &mut [u64]) = dc.0.split_at_mut(0usize); + let a: (&mut [u64], &mut [u64]) = ab.1.split_at_mut(0usize); + let b: (&mut [u64], &mut [u64]) = a.1.split_at_mut(5usize); + crate::bignum25519_51::fadd(b.0, z2.0, z2.1); + crate::bignum25519_51::fsub(b.1, z2.0, z2.1); + let ab1: (&mut [u64], &mut [u64]) = ab.1.split_at_mut(0usize); + let x3: (&mut [u64], &mut [u64]) = tmp1.0.split_at_mut(0usize); + let z31: (&mut [u64], &mut [u64]) = x3.1.split_at_mut(5usize); + let d: (&mut [u64], &mut [u64]) = dc.1.split_at_mut(0usize); + let c: (&mut [u64], &mut [u64]) = d.1.split_at_mut(5usize); + crate::bignum25519_51::fadd(c.1, z31.0, z31.1); + crate::bignum25519_51::fsub(c.0, z31.0, z31.1); + let mut f1_copy: [u64; 10] = [0u64; 10usize]; + ((&mut f1_copy)[0usize..10usize]).copy_from_slice(&dc.1[0usize..10usize]); + crate::bignum25519_51::fmul2(dc.1, &f1_copy, ab1.1, tmp2); + let d1: (&[u64], &[u64]) = dc.1.split_at(0usize); + let c1: (&[u64], &[u64]) = d1.1.split_at(5usize); + crate::bignum25519_51::fadd(z31.0, c1.0, c1.1); + crate::bignum25519_51::fsub(z31.1, c1.0, c1.1); + let ab2: (&mut [u64], &mut [u64]) = ab1.1.split_at_mut(0usize); + let dc1: (&mut [u64], &mut [u64]) = dc.1.split_at_mut(0usize); + crate::bignum25519_51::fsqr2(dc1.1, ab2.1, tmp2); + let mut f1_copy0: [u64; 10] = [0u64; 10usize]; + ((&mut f1_copy0)[0usize..10usize]).copy_from_slice(&tmp1.0[0usize..10usize]); + crate::bignum25519_51::fsqr2(tmp1.0, &f1_copy0, tmp2); + let a1: (&mut [u64], &mut [u64]) = ab2.1.split_at_mut(0usize); + let b1: (&mut [u64], &mut [u64]) = a1.1.split_at_mut(5usize); + let d0: (&mut [u64], &mut [u64]) = dc1.1.split_at_mut(0usize); + let c0: (&mut [u64], &mut [u64]) = d0.1.split_at_mut(5usize); + b1.0[0usize] = c0.1[0usize]; + b1.0[1usize] = c0.1[1usize]; + b1.0[2usize] = c0.1[2usize]; + b1.0[3usize] = c0.1[3usize]; + b1.0[4usize] = c0.1[4usize]; + let mut f2_copy: [u64; 5] = [0u64; 5usize]; + ((&mut f2_copy)[0usize..5usize]).copy_from_slice(&c0.1[0usize..5usize]); + crate::bignum25519_51::fsub(c0.1, c0.0, &f2_copy); + crate::bignum25519_51::fmul1(b1.1, c0.1, 121665u64); + let mut f1_copy1: [u64; 5] = [0u64; 5usize]; + ((&mut f1_copy1)[0usize..5usize]).copy_from_slice(&b1.1[0usize..5usize]); + crate::bignum25519_51::fadd(b1.1, &f1_copy1, c0.0); + let ab3: (&[u64], &[u64]) = ab2.1.split_at(0usize); + let dc2: (&[u64], &[u64]) = dc1.1.split_at(0usize); + crate::bignum25519_51::fmul2(nq_p1.0, dc2.1, ab3.1, tmp2); + let z310: (&mut [u64], &mut [u64]) = tmp1.0.split_at_mut(5usize); + let mut f1_copy2: [u64; 5] = [0u64; 5usize]; + ((&mut f1_copy2)[0usize..5usize]).copy_from_slice(&z310.1[0usize..5usize]); + crate::bignum25519_51::fmul(z310.1, &f1_copy2, x1.1, tmp2) +} + +fn point_double(nq: &mut [u64], tmp1: &mut [u64], tmp2: &[fstar::uint128::uint128]) { + let x2: (&[u64], &[u64]) = nq.split_at(0usize); + let z2: (&[u64], &[u64]) = x2.1.split_at(5usize); + let ab: (&mut [u64], &mut [u64]) = tmp1.split_at_mut(0usize); + let dc: (&mut [u64], &mut [u64]) = ab.1.split_at_mut(10usize); + let a: (&mut [u64], &mut [u64]) = dc.0.split_at_mut(0usize); + let b: (&mut [u64], &mut [u64]) = a.1.split_at_mut(5usize); + crate::bignum25519_51::fadd(b.0, z2.0, z2.1); + crate::bignum25519_51::fsub(b.1, z2.0, z2.1); + crate::bignum25519_51::fsqr2(dc.1, dc.0, tmp2); + let d: (&mut [u64], &mut [u64]) = dc.1.split_at_mut(0usize); + let c: (&mut [u64], &mut [u64]) = d.1.split_at_mut(5usize); + let a1: (&mut [u64], &mut [u64]) = dc.0.split_at_mut(0usize); + let b1: (&mut [u64], &mut [u64]) = a1.1.split_at_mut(5usize); + b1.0[0usize] = c.1[0usize]; + b1.0[1usize] = c.1[1usize]; + b1.0[2usize] = c.1[2usize]; + b1.0[3usize] = c.1[3usize]; + b1.0[4usize] = c.1[4usize]; + let mut f2_copy: [u64; 5] = [0u64; 5usize]; + ((&mut f2_copy)[0usize..5usize]).copy_from_slice(&c.1[0usize..5usize]); + crate::bignum25519_51::fsub(c.1, c.0, &f2_copy); + crate::bignum25519_51::fmul1(b1.1, c.1, 121665u64); + let mut f1_copy: [u64; 5] = [0u64; 5usize]; + ((&mut f1_copy)[0usize..5usize]).copy_from_slice(&b1.1[0usize..5usize]); + crate::bignum25519_51::fadd(b1.1, &f1_copy, c.0); + let ab1: (&[u64], &[u64]) = dc.0.split_at(0usize); + let dc1: (&[u64], &[u64]) = dc.1.split_at(0usize); + crate::bignum25519_51::fmul2(nq, dc1.1, ab1.1, tmp2) +} + +fn montgomery_ladder(out: &mut [u64], key: &[u8], init: &[u64]) { + let tmp2: [fstar::uint128::uint128; 10] = [fstar::uint128::uint64_to_uint128(0u64); 10usize]; + let mut p01_tmp1_swap: [u64; 41] = [0u64; 41usize]; + let p01: (&mut [u64], &mut [u64]) = p01_tmp1_swap.split_at_mut(0usize); + let p03: (&mut [u64], &mut [u64]) = p01.1.split_at_mut(0usize); + let p11: (&mut [u64], &mut [u64]) = p03.1.split_at_mut(10usize); + (p11.1[0usize..10usize]).copy_from_slice(&init[0usize..10usize]); + let x0: (&mut [u64], &mut [u64]) = p11.0.split_at_mut(0usize); + let z0: (&mut [u64], &mut [u64]) = x0.1.split_at_mut(5usize); + z0.0[0usize] = 1u64; + z0.0[1usize] = 0u64; + z0.0[2usize] = 0u64; + z0.0[3usize] = 0u64; + z0.0[4usize] = 0u64; + z0.1[0usize] = 0u64; + z0.1[1usize] = 0u64; + z0.1[2usize] = 0u64; + z0.1[3usize] = 0u64; + z0.1[4usize] = 0u64; + let swap: (&mut [u64], &mut [u64]) = p01.1.split_at_mut(40usize); + let p01_tmp1: (&mut [u64], &mut [u64]) = swap.0.split_at_mut(0usize); + let nq: (&mut [u64], &mut [u64]) = p01_tmp1.1.split_at_mut(0usize); + let nq_p1: (&mut [u64], &mut [u64]) = nq.1.split_at_mut(10usize); + crate::bignum25519_51::cswap2(1u64, nq_p1.0, nq_p1.1); + let p01_tmp11: (&mut [u64], &mut [u64]) = p01_tmp1.1.split_at_mut(0usize); + crate::curve25519_51::point_add_and_double(init, p01_tmp11.1, &tmp2); + swap.1[0usize] = 1u64; + for i in 0u32..251u32 { + let p01_tmp12: (&mut [u64], &mut [u64]) = p01_tmp11.1.split_at_mut(0usize); + let swap1: (&mut [u64], &mut [u64]) = swap.1.split_at_mut(0usize); + let nq1: (&mut [u64], &mut [u64]) = p01_tmp12.1.split_at_mut(0usize); + let nq_p11: (&mut [u64], &mut [u64]) = nq1.1.split_at_mut(10usize); + let bit: u64 = ((key[253u32.wrapping_sub(i).wrapping_div(8u32) as usize]) + .wrapping_shr(253u32.wrapping_sub(i).wrapping_rem(8u32)) + & 1u8) as u64; + let sw: u64 = swap1.1[0usize] ^ bit; + crate::bignum25519_51::cswap2(sw, nq_p11.0, nq_p11.1); + crate::curve25519_51::point_add_and_double(init, p01_tmp12.1, &tmp2); + swap1.1[0usize] = bit + } + let sw: u64 = swap.1[0usize]; + let p01_tmp12: (&mut [u64], &mut [u64]) = p01_tmp11.1.split_at_mut(0usize); + let nq1: (&mut [u64], &mut [u64]) = p01_tmp12.1.split_at_mut(0usize); + let nq_p11: (&mut [u64], &mut [u64]) = nq1.1.split_at_mut(10usize); + crate::bignum25519_51::cswap2(sw, nq_p11.0, nq_p11.1); + let p01_tmp10: (&mut [u64], &mut [u64]) = p01_tmp12.1.split_at_mut(0usize); + let nq0: (&mut [u64], &mut [u64]) = p01_tmp10.1.split_at_mut(0usize); + let tmp1: (&mut [u64], &mut [u64]) = nq0.1.split_at_mut(20usize); + crate::curve25519_51::point_double(tmp1.0, tmp1.1, &tmp2); + crate::curve25519_51::point_double(tmp1.0, tmp1.1, &tmp2); + crate::curve25519_51::point_double(tmp1.0, tmp1.1, &tmp2); + let p010: (&[u64], &[u64]) = p01_tmp10.1.split_at(0usize); + (out[0usize..10usize]).copy_from_slice(&p010.1[0usize..10usize]) +} + +pub(crate) fn fsquare_times(o: &mut [u64], inp: &[u64], tmp: &[fstar::uint128::uint128], n: u32) { + crate::bignum25519_51::fsqr(o, inp, tmp); + for _i in 0u32..n.wrapping_sub(1u32) { + let mut f1_copy: [u64; 5] = [0u64; 5usize]; + ((&mut f1_copy)[0usize..5usize]).copy_from_slice(&o[0usize..5usize]); + crate::bignum25519_51::fsqr(o, &f1_copy, tmp) + } +} + +pub(crate) fn finv(o: &mut [u64], i: &[u64], tmp: &[fstar::uint128::uint128]) { + let mut t1: [u64; 20] = [0u64; 20usize]; + let a1: (&mut [u64], &mut [u64]) = t1.split_at_mut(0usize); + let b1: (&mut [u64], &mut [u64]) = a1.1.split_at_mut(5usize); + let t01: (&mut [u64], &mut [u64]) = b1.1.split_at_mut(10usize); + let tmp1: (&[fstar::uint128::uint128], &[fstar::uint128::uint128]) = tmp.split_at(0usize); + crate::curve25519_51::fsquare_times(b1.0, i, tmp1.1, 1u32); + crate::curve25519_51::fsquare_times(t01.1, b1.0, tmp1.1, 2u32); + crate::bignum25519_51::fmul(t01.0, t01.1, i, tmp); + let mut f2_copy: [u64; 5] = [0u64; 5usize]; + ((&mut f2_copy)[0usize..5usize]).copy_from_slice(&b1.0[0usize..5usize]); + crate::bignum25519_51::fmul(b1.0, t01.0, &f2_copy, tmp); + let tmp11: (&[fstar::uint128::uint128], &[fstar::uint128::uint128]) = tmp.split_at(0usize); + crate::curve25519_51::fsquare_times(t01.1, b1.0, tmp11.1, 1u32); + let mut f2_copy0: [u64; 5] = [0u64; 5usize]; + ((&mut f2_copy0)[0usize..5usize]).copy_from_slice(&t01.0[0usize..5usize]); + crate::bignum25519_51::fmul(t01.0, t01.1, &f2_copy0, tmp); + let tmp12: (&[fstar::uint128::uint128], &[fstar::uint128::uint128]) = tmp.split_at(0usize); + crate::curve25519_51::fsquare_times(t01.1, t01.0, tmp12.1, 5u32); + let mut f2_copy1: [u64; 5] = [0u64; 5usize]; + ((&mut f2_copy1)[0usize..5usize]).copy_from_slice(&t01.0[0usize..5usize]); + crate::bignum25519_51::fmul(t01.0, t01.1, &f2_copy1, tmp); + let b10: (&mut [u64], &mut [u64]) = t01.0.split_at_mut(0usize); + let c1: (&mut [u64], &mut [u64]) = b10.1.split_at_mut(5usize); + let t010: (&mut [u64], &mut [u64]) = t01.1.split_at_mut(0usize); + let tmp10: (&[fstar::uint128::uint128], &[fstar::uint128::uint128]) = tmp.split_at(0usize); + crate::curve25519_51::fsquare_times(t010.1, c1.0, tmp10.1, 10u32); + crate::bignum25519_51::fmul(c1.1, t010.1, c1.0, tmp); + let tmp110: (&[fstar::uint128::uint128], &[fstar::uint128::uint128]) = tmp.split_at(0usize); + crate::curve25519_51::fsquare_times(t010.1, c1.1, tmp110.1, 20u32); + let mut f1_copy: [u64; 5] = [0u64; 5usize]; + ((&mut f1_copy)[0usize..5usize]).copy_from_slice(&t010.1[0usize..5usize]); + crate::bignum25519_51::fmul(t010.1, &f1_copy, c1.1, tmp); + let tmp120: (&[fstar::uint128::uint128], &[fstar::uint128::uint128]) = tmp.split_at(0usize); + let mut i_copy: [u64; 5] = [0u64; 5usize]; + ((&mut i_copy)[0usize..5usize]).copy_from_slice(&t010.1[0usize..5usize]); + crate::curve25519_51::fsquare_times(t010.1, &i_copy, tmp120.1, 10u32); + let mut f2_copy2: [u64; 5] = [0u64; 5usize]; + ((&mut f2_copy2)[0usize..5usize]).copy_from_slice(&c1.0[0usize..5usize]); + crate::bignum25519_51::fmul(c1.0, t010.1, &f2_copy2, tmp); + let tmp13: (&[fstar::uint128::uint128], &[fstar::uint128::uint128]) = tmp.split_at(0usize); + crate::curve25519_51::fsquare_times(t010.1, c1.0, tmp13.1, 50u32); + crate::bignum25519_51::fmul(c1.1, t010.1, c1.0, tmp); + let b11: (&[u64], &[u64]) = c1.0.split_at(0usize); + let c10: (&[u64], &[u64]) = c1.1.split_at(0usize); + let t011: (&mut [u64], &mut [u64]) = t010.1.split_at_mut(0usize); + let tmp14: (&[fstar::uint128::uint128], &[fstar::uint128::uint128]) = tmp.split_at(0usize); + crate::curve25519_51::fsquare_times(t011.1, c10.1, tmp14.1, 100u32); + let mut f1_copy0: [u64; 5] = [0u64; 5usize]; + ((&mut f1_copy0)[0usize..5usize]).copy_from_slice(&t011.1[0usize..5usize]); + crate::bignum25519_51::fmul(t011.1, &f1_copy0, c10.1, tmp); + let tmp111: (&[fstar::uint128::uint128], &[fstar::uint128::uint128]) = tmp.split_at(0usize); + let mut i_copy0: [u64; 5] = [0u64; 5usize]; + ((&mut i_copy0)[0usize..5usize]).copy_from_slice(&t011.1[0usize..5usize]); + crate::curve25519_51::fsquare_times(t011.1, &i_copy0, tmp111.1, 50u32); + let mut f1_copy1: [u64; 5] = [0u64; 5usize]; + ((&mut f1_copy1)[0usize..5usize]).copy_from_slice(&t011.1[0usize..5usize]); + crate::bignum25519_51::fmul(t011.1, &f1_copy1, b11.1, tmp); + let tmp121: (&[fstar::uint128::uint128], &[fstar::uint128::uint128]) = tmp.split_at(0usize); + let mut i_copy1: [u64; 5] = [0u64; 5usize]; + ((&mut i_copy1)[0usize..5usize]).copy_from_slice(&t011.1[0usize..5usize]); + crate::curve25519_51::fsquare_times(t011.1, &i_copy1, tmp121.1, 5u32); + let a: (&[u64], &[u64]) = b1.0.split_at(0usize); + let t0: (&[u64], &[u64]) = t011.1.split_at(0usize); + crate::bignum25519_51::fmul(o, t0.1, a.1, tmp) +} + +fn encode_point(o: &mut [u8], i: &[u64]) { + let x: (&[u64], &[u64]) = i.split_at(0usize); + let z: (&[u64], &[u64]) = x.1.split_at(5usize); + let mut tmp: [u64; 5] = [0u64; 5usize]; + let mut u64s: [u64; 4] = [0u64; 4usize]; + let tmp_w: [fstar::uint128::uint128; 10] = [fstar::uint128::uint64_to_uint128(0u64); 10usize]; + crate::curve25519_51::finv(&mut tmp, z.1, &tmp_w); + let mut f1_copy: [u64; 5] = [0u64; 5usize]; + ((&mut f1_copy)[0usize..5usize]).copy_from_slice(&(&tmp)[0usize..5usize]); + crate::bignum25519_51::fmul(&mut tmp, &f1_copy, z.0, &tmp_w); + crate::bignum25519_51::store_felem(&mut u64s, &tmp); + krml::unroll_for!( + 4, + "i0", + 0u32, + 1u32, + lowstar::endianness::store64_le( + &mut o[i0.wrapping_mul(8u32) as usize..], + (&u64s)[i0 as usize] + ) + ) +} + +/** +Compute the scalar multiple of a point. + +@param out Pointer to 32 bytes of memory, allocated by the caller, where the resulting point is written to. +@param priv Pointer to 32 bytes of memory where the secret/private key is read from. +@param pub Pointer to 32 bytes of memory where the public point is read from. +*/ +pub fn scalarmult(out: &mut [u8], r#priv: &[u8], r#pub: &[u8]) { + let mut init: [u64; 10] = [0u64; 10usize]; + let mut init_copy: [u64; 10] = [0u64; 10usize]; + let mut tmp: [u64; 4] = [0u64; 4usize]; + krml::unroll_for!(4, "i", 0u32, 1u32, { + let bj: (&[u8], &[u8]) = r#pub.split_at(i.wrapping_mul(8u32) as usize); + let u: u64 = lowstar::endianness::load64_le(bj.1); + let r: u64 = u; + let x: u64 = r; + let os: (&mut [u64], &mut [u64]) = tmp.split_at_mut(0usize); + os.1[i as usize] = x + }); + let tmp3: u64 = (&tmp)[3usize]; + (&mut tmp)[3usize] = tmp3 & 0x7fffffffffffffffu64; + let x: (&mut [u64], &mut [u64]) = init.split_at_mut(0usize); + let z: (&mut [u64], &mut [u64]) = x.1.split_at_mut(5usize); + z.1[0usize] = 1u64; + z.1[1usize] = 0u64; + z.1[2usize] = 0u64; + z.1[3usize] = 0u64; + z.1[4usize] = 0u64; + let f0l: u64 = (&tmp)[0usize] & 0x7ffffffffffffu64; + let f0h: u64 = ((&tmp)[0usize]).wrapping_shr(51u32); + let f1l: u64 = ((&tmp)[1usize] & 0x3fffffffffu64).wrapping_shl(13u32); + let f1h: u64 = ((&tmp)[1usize]).wrapping_shr(38u32); + let f2l: u64 = ((&tmp)[2usize] & 0x1ffffffu64).wrapping_shl(26u32); + let f2h: u64 = ((&tmp)[2usize]).wrapping_shr(25u32); + let f3l: u64 = ((&tmp)[3usize] & 0xfffu64).wrapping_shl(39u32); + let f3h: u64 = ((&tmp)[3usize]).wrapping_shr(12u32); + z.0[0usize] = f0l; + z.0[1usize] = f0h | f1l; + z.0[2usize] = f1h | f2l; + z.0[3usize] = f2h | f3l; + z.0[4usize] = f3h; + ((&mut init_copy)[0usize..10usize]).copy_from_slice(&(&init)[0usize..10usize]); + crate::curve25519_51::montgomery_ladder(&mut init, r#priv, &init_copy); + crate::curve25519_51::encode_point(out, &init) +} + +/** +Calculate a public point from a secret/private key. + +This computes a scalar multiplication of the secret/private key with the curve's basepoint. + +@param pub Pointer to 32 bytes of memory, allocated by the caller, where the resulting point is written to. +@param priv Pointer to 32 bytes of memory where the secret/private key is read from. +*/ +pub fn secret_to_public(r#pub: &mut [u8], r#priv: &[u8]) { + let mut basepoint: [u8; 32] = [0u8; 32usize]; + krml::unroll_for!(32, "i", 0u32, 1u32, { + let x: u8 = (&crate::curve25519_51::g25519)[i as usize]; + let os: (&mut [u8], &mut [u8]) = basepoint.split_at_mut(0usize); + os.1[i as usize] = x + }); + crate::curve25519_51::scalarmult(r#pub, r#priv, &basepoint) +} + +/** +Execute the diffie-hellmann key exchange. + +@param out Pointer to 32 bytes of memory, allocated by the caller, where the resulting point is written to. +@param priv Pointer to 32 bytes of memory where **our** secret/private key is read from. +@param pub Pointer to 32 bytes of memory where **their** public point is read from. +*/ +pub fn ecdh(out: &mut [u8], r#priv: &[u8], r#pub: &[u8]) -> bool { + let zeros: [u8; 32] = [0u8; 32usize]; + crate::curve25519_51::scalarmult(out, r#priv, r#pub); + let mut res: [u8; 1] = [255u8; 1usize]; + krml::unroll_for!(32, "i", 0u32, 1u32, { + let uu____0: u8 = fstar::uint8::eq_mask(out[i as usize], (&zeros)[i as usize]); + (&mut res)[0usize] = uu____0 & (&res)[0usize] + }); + let z: u8 = (&res)[0usize]; + let r: bool = z == 255u8; + !r +} diff --git a/libcrux-hacl-rs/src/ed25519.rs b/libcrux-hacl-rs/src/ed25519.rs new file mode 100644 index 000000000..bf1388a1a --- /dev/null +++ b/libcrux-hacl-rs/src/ed25519.rs @@ -0,0 +1,1931 @@ +#![allow(non_snake_case)] +#![allow(non_upper_case_globals)] +#![allow(non_camel_case_types)] +#![allow(unused_assignments)] +#![allow(unreachable_patterns)] + +use crate::bignum; +use crate::fstar; +use crate::lowstar; + +#[inline] +fn fsum(out: &mut [u64], a: &[u64], b: &[u64]) { + crate::bignum25519_51::fadd(out, a, b) +} + +#[inline] +fn fdifference(out: &mut [u64], a: &[u64], b: &[u64]) { + crate::bignum25519_51::fsub(out, a, b) +} + +pub(crate) fn reduce_513(a: &mut [u64]) { + let f0: u64 = a[0usize]; + let f1: u64 = a[1usize]; + let f2: u64 = a[2usize]; + let f3: u64 = a[3usize]; + let f4: u64 = a[4usize]; + let lĀ·: u64 = f0.wrapping_add(0u64); + let tmp0: u64 = lĀ· & 0x7ffffffffffffu64; + let c0: u64 = lĀ·.wrapping_shr(51u32); + let lĀ·0: u64 = f1.wrapping_add(c0); + let tmp1: u64 = lĀ·0 & 0x7ffffffffffffu64; + let c1: u64 = lĀ·0.wrapping_shr(51u32); + let lĀ·1: u64 = f2.wrapping_add(c1); + let tmp2: u64 = lĀ·1 & 0x7ffffffffffffu64; + let c2: u64 = lĀ·1.wrapping_shr(51u32); + let lĀ·2: u64 = f3.wrapping_add(c2); + let tmp3: u64 = lĀ·2 & 0x7ffffffffffffu64; + let c3: u64 = lĀ·2.wrapping_shr(51u32); + let lĀ·3: u64 = f4.wrapping_add(c3); + let tmp4: u64 = lĀ·3 & 0x7ffffffffffffu64; + let c4: u64 = lĀ·3.wrapping_shr(51u32); + let lĀ·4: u64 = tmp0.wrapping_add(c4.wrapping_mul(19u64)); + let tmp0Ā·: u64 = lĀ·4 & 0x7ffffffffffffu64; + let c5: u64 = lĀ·4.wrapping_shr(51u32); + a[0usize] = tmp0Ā·; + a[1usize] = tmp1.wrapping_add(c5); + a[2usize] = tmp2; + a[3usize] = tmp3; + a[4usize] = tmp4 +} + +#[inline] +fn fmul(output: &mut [u64], input: &[u64], input2: &[u64]) { + let tmp: [fstar::uint128::uint128; 10] = [fstar::uint128::uint64_to_uint128(0u64); 10usize]; + crate::bignum25519_51::fmul(output, input, input2, &tmp) +} + +#[inline] +fn times_2(out: &mut [u64], a: &[u64]) { + let a0: u64 = a[0usize]; + let a1: u64 = a[1usize]; + let a2: u64 = a[2usize]; + let a3: u64 = a[3usize]; + let a4: u64 = a[4usize]; + let o0: u64 = 2u64.wrapping_mul(a0); + let o1: u64 = 2u64.wrapping_mul(a1); + let o2: u64 = 2u64.wrapping_mul(a2); + let o3: u64 = 2u64.wrapping_mul(a3); + let o4: u64 = 2u64.wrapping_mul(a4); + out[0usize] = o0; + out[1usize] = o1; + out[2usize] = o2; + out[3usize] = o3; + out[4usize] = o4 +} + +#[inline] +fn times_d(out: &mut [u64], a: &[u64]) { + let mut d: [u64; 5] = [0u64; 5usize]; + (&mut d)[0usize] = 0x00034dca135978a3u64; + (&mut d)[1usize] = 0x0001a8283b156ebdu64; + (&mut d)[2usize] = 0x0005e7a26001c029u64; + (&mut d)[3usize] = 0x000739c663a03cbbu64; + (&mut d)[4usize] = 0x00052036cee2b6ffu64; + crate::ed25519::fmul(out, &d, a) +} + +#[inline] +fn times_2d(out: &mut [u64], a: &[u64]) { + let mut d2: [u64; 5] = [0u64; 5usize]; + (&mut d2)[0usize] = 0x00069b9426b2f159u64; + (&mut d2)[1usize] = 0x00035050762add7au64; + (&mut d2)[2usize] = 0x0003cf44c0038052u64; + (&mut d2)[3usize] = 0x0006738cc7407977u64; + (&mut d2)[4usize] = 0x0002406d9dc56dffu64; + crate::ed25519::fmul(out, &d2, a) +} + +#[inline] +fn fsquare(out: &mut [u64], a: &[u64]) { + let tmp: [fstar::uint128::uint128; 5] = [fstar::uint128::uint64_to_uint128(0u64); 5usize]; + crate::bignum25519_51::fsqr(out, a, &tmp) +} + +#[inline] +fn fsquare_times(output: &mut [u64], input: &[u64], count: u32) { + let tmp: [fstar::uint128::uint128; 5] = [fstar::uint128::uint64_to_uint128(0u64); 5usize]; + crate::curve25519_51::fsquare_times(output, input, &tmp, count) +} + +#[inline] +fn fsquare_times_inplace(output: &mut [u64], count: u32) { + let tmp: [fstar::uint128::uint128; 5] = [fstar::uint128::uint64_to_uint128(0u64); 5usize]; + let mut input: [u64; 5] = [0u64; 5usize]; + ((&mut input)[0usize..5usize]).copy_from_slice(&output[0usize..5usize]); + crate::curve25519_51::fsquare_times(output, &input, &tmp, count) +} + +pub(crate) fn inverse(out: &mut [u64], a: &[u64]) { + let tmp: [fstar::uint128::uint128; 10] = [fstar::uint128::uint64_to_uint128(0u64); 10usize]; + crate::curve25519_51::finv(out, a, &tmp) +} + +#[inline] +fn reduce(out: &mut [u64]) { + let o0: u64 = out[0usize]; + let o1: u64 = out[1usize]; + let o2: u64 = out[2usize]; + let o3: u64 = out[3usize]; + let o4: u64 = out[4usize]; + let lĀ·: u64 = o0.wrapping_add(0u64); + let tmp0: u64 = lĀ· & 0x7ffffffffffffu64; + let c0: u64 = lĀ·.wrapping_shr(51u32); + let lĀ·0: u64 = o1.wrapping_add(c0); + let tmp1: u64 = lĀ·0 & 0x7ffffffffffffu64; + let c1: u64 = lĀ·0.wrapping_shr(51u32); + let lĀ·1: u64 = o2.wrapping_add(c1); + let tmp2: u64 = lĀ·1 & 0x7ffffffffffffu64; + let c2: u64 = lĀ·1.wrapping_shr(51u32); + let lĀ·2: u64 = o3.wrapping_add(c2); + let tmp3: u64 = lĀ·2 & 0x7ffffffffffffu64; + let c3: u64 = lĀ·2.wrapping_shr(51u32); + let lĀ·3: u64 = o4.wrapping_add(c3); + let tmp4: u64 = lĀ·3 & 0x7ffffffffffffu64; + let c4: u64 = lĀ·3.wrapping_shr(51u32); + let lĀ·4: u64 = tmp0.wrapping_add(c4.wrapping_mul(19u64)); + let tmp0Ā·: u64 = lĀ·4 & 0x7ffffffffffffu64; + let c5: u64 = lĀ·4.wrapping_shr(51u32); + let f0: u64 = tmp0Ā·; + let f1: u64 = tmp1.wrapping_add(c5); + let f2: u64 = tmp2; + let f3: u64 = tmp3; + let f4: u64 = tmp4; + let m0: u64 = fstar::uint64::gte_mask(f0, 0x7ffffffffffedu64); + let m1: u64 = fstar::uint64::eq_mask(f1, 0x7ffffffffffffu64); + let m2: u64 = fstar::uint64::eq_mask(f2, 0x7ffffffffffffu64); + let m3: u64 = fstar::uint64::eq_mask(f3, 0x7ffffffffffffu64); + let m4: u64 = fstar::uint64::eq_mask(f4, 0x7ffffffffffffu64); + let mask: u64 = m0 & m1 & m2 & m3 & m4; + let f0Ā·: u64 = f0.wrapping_sub(mask & 0x7ffffffffffedu64); + let f1Ā·: u64 = f1.wrapping_sub(mask & 0x7ffffffffffffu64); + let f2Ā·: u64 = f2.wrapping_sub(mask & 0x7ffffffffffffu64); + let f3Ā·: u64 = f3.wrapping_sub(mask & 0x7ffffffffffffu64); + let f4Ā·: u64 = f4.wrapping_sub(mask & 0x7ffffffffffffu64); + let f01: u64 = f0Ā·; + let f11: u64 = f1Ā·; + let f21: u64 = f2Ā·; + let f31: u64 = f3Ā·; + let f41: u64 = f4Ā·; + out[0usize] = f01; + out[1usize] = f11; + out[2usize] = f21; + out[3usize] = f31; + out[4usize] = f41 +} + +pub(crate) fn load_51(output: &mut [u64], input: &[u8]) { + let mut u64s: [u64; 4] = [0u64; 4usize]; + krml::unroll_for!(4, "i", 0u32, 1u32, { + let bj: (&[u8], &[u8]) = input.split_at(i.wrapping_mul(8u32) as usize); + let u: u64 = lowstar::endianness::load64_le(bj.1); + let r: u64 = u; + let x: u64 = r; + let os: (&mut [u64], &mut [u64]) = u64s.split_at_mut(0usize); + os.1[i as usize] = x + }); + let u64s3: u64 = (&u64s)[3usize]; + (&mut u64s)[3usize] = u64s3 & 0x7fffffffffffffffu64; + output[0usize] = (&u64s)[0usize] & 0x7ffffffffffffu64; + output[1usize] = ((&u64s)[0usize]).wrapping_shr(51u32) + | ((&u64s)[1usize] & 0x3fffffffffu64).wrapping_shl(13u32); + output[2usize] = ((&u64s)[1usize]).wrapping_shr(38u32) + | ((&u64s)[2usize] & 0x1ffffffu64).wrapping_shl(26u32); + output[3usize] = + ((&u64s)[2usize]).wrapping_shr(25u32) | ((&u64s)[3usize] & 0xfffu64).wrapping_shl(39u32); + output[4usize] = ((&u64s)[3usize]).wrapping_shr(12u32) +} + +pub(crate) fn store_51(output: &mut [u8], input: &[u64]) { + let mut u64s: [u64; 4] = [0u64; 4usize]; + crate::bignum25519_51::store_felem(&mut u64s, input); + krml::unroll_for!( + 4, + "i", + 0u32, + 1u32, + lowstar::endianness::store64_le( + &mut output[i.wrapping_mul(8u32) as usize..], + (&u64s)[i as usize] + ) + ) +} + +pub(crate) fn point_double(out: &mut [u64], p: &[u64]) { + let mut tmp: [u64; 20] = [0u64; 20usize]; + let tmp1: (&mut [u64], &mut [u64]) = tmp.split_at_mut(0usize); + let tmp2: (&mut [u64], &mut [u64]) = tmp1.1.split_at_mut(5usize); + let tmp3: (&mut [u64], &mut [u64]) = tmp2.1.split_at_mut(5usize); + let tmp4: (&mut [u64], &mut [u64]) = tmp3.1.split_at_mut(5usize); + let x1: (&[u64], &[u64]) = p.split_at(0usize); + let y1: (&[u64], &[u64]) = x1.1.split_at(5usize); + let z1: (&[u64], &[u64]) = y1.1.split_at(5usize); + crate::ed25519::fsquare(tmp2.0, y1.0); + crate::ed25519::fsquare(tmp3.0, z1.0); + crate::ed25519::fsum(tmp4.0, tmp2.0, tmp3.0); + crate::ed25519::fdifference(tmp4.1, tmp2.0, tmp3.0); + crate::ed25519::fsquare(tmp2.0, z1.1); + let mut a_copy: [u64; 5] = [0u64; 5usize]; + ((&mut a_copy)[0usize..5usize]).copy_from_slice(&tmp2.0[0usize..5usize]); + crate::ed25519::times_2(tmp2.0, &a_copy); + let tmp10: (&mut [u64], &mut [u64]) = tmp2.0.split_at_mut(0usize); + let tmp20: (&mut [u64], &mut [u64]) = tmp3.0.split_at_mut(0usize); + let tmp30: (&mut [u64], &mut [u64]) = tmp4.0.split_at_mut(0usize); + let tmp40: (&mut [u64], &mut [u64]) = tmp4.1.split_at_mut(0usize); + let x10: (&[u64], &[u64]) = y1.0.split_at(0usize); + let y10: (&[u64], &[u64]) = z1.0.split_at(0usize); + crate::ed25519::fsum(tmp20.1, x10.1, y10.1); + let mut a_copy0: [u64; 5] = [0u64; 5usize]; + ((&mut a_copy0)[0usize..5usize]).copy_from_slice(&tmp20.1[0usize..5usize]); + crate::ed25519::fsquare(tmp20.1, &a_copy0); + crate::ed25519::reduce_513(tmp30.1); + let mut b_copy: [u64; 5] = [0u64; 5usize]; + ((&mut b_copy)[0usize..5usize]).copy_from_slice(&tmp20.1[0usize..5usize]); + crate::ed25519::fdifference(tmp20.1, tmp30.1, &b_copy); + crate::ed25519::reduce_513(tmp10.1); + crate::ed25519::reduce_513(tmp40.1); + let mut a_copy1: [u64; 5] = [0u64; 5usize]; + ((&mut a_copy1)[0usize..5usize]).copy_from_slice(&tmp10.1[0usize..5usize]); + crate::ed25519::fsum(tmp10.1, &a_copy1, tmp40.1); + let tmp_f: (&[u64], &[u64]) = tmp10.1.split_at(0usize); + let tmp_e: (&[u64], &[u64]) = tmp20.1.split_at(0usize); + let tmp_h: (&[u64], &[u64]) = tmp30.1.split_at(0usize); + let tmp_g: (&[u64], &[u64]) = tmp40.1.split_at(0usize); + let x3: (&mut [u64], &mut [u64]) = out.split_at_mut(0usize); + let y3: (&mut [u64], &mut [u64]) = x3.1.split_at_mut(5usize); + let z3: (&mut [u64], &mut [u64]) = y3.1.split_at_mut(5usize); + let t3: (&mut [u64], &mut [u64]) = z3.1.split_at_mut(5usize); + crate::ed25519::fmul(y3.0, tmp_e.1, tmp_f.1); + crate::ed25519::fmul(z3.0, tmp_g.1, tmp_h.1); + crate::ed25519::fmul(t3.1, tmp_e.1, tmp_h.1); + crate::ed25519::fmul(t3.0, tmp_f.1, tmp_g.1) +} + +pub(crate) fn point_add(out: &mut [u64], p: &[u64], q: &[u64]) { + let mut tmp: [u64; 30] = [0u64; 30usize]; + let tmp1: (&mut [u64], &mut [u64]) = tmp.split_at_mut(0usize); + let tmp2: (&mut [u64], &mut [u64]) = tmp1.1.split_at_mut(5usize); + let tmp3: (&mut [u64], &mut [u64]) = tmp2.1.split_at_mut(5usize); + let tmp4: (&mut [u64], &mut [u64]) = tmp3.1.split_at_mut(5usize); + let x1: (&[u64], &[u64]) = p.split_at(0usize); + let y1: (&[u64], &[u64]) = x1.1.split_at(5usize); + let x2: (&[u64], &[u64]) = q.split_at(0usize); + let y2: (&[u64], &[u64]) = x2.1.split_at(5usize); + crate::ed25519::fdifference(tmp2.0, y1.1, y1.0); + crate::ed25519::fdifference(tmp3.0, y2.1, y2.0); + crate::ed25519::fmul(tmp4.0, tmp2.0, tmp3.0); + crate::ed25519::fsum(tmp2.0, y1.1, y1.0); + crate::ed25519::fsum(tmp3.0, y2.1, y2.0); + crate::ed25519::fmul(tmp4.1, tmp2.0, tmp3.0); + let tmp10: (&mut [u64], &mut [u64]) = tmp2.0.split_at_mut(0usize); + let tmp20: (&mut [u64], &mut [u64]) = tmp3.0.split_at_mut(0usize); + let tmp30: (&[u64], &[u64]) = tmp4.0.split_at(0usize); + let tmp40: (&mut [u64], &mut [u64]) = tmp4.1.split_at_mut(0usize); + let tmp5: (&mut [u64], &mut [u64]) = tmp40.1.split_at_mut(5usize); + let tmp6: (&mut [u64], &mut [u64]) = tmp5.1.split_at_mut(5usize); + let z1: (&[u64], &[u64]) = y1.1.split_at(5usize); + let t1: (&[u64], &[u64]) = z1.1.split_at(5usize); + let z2: (&[u64], &[u64]) = y2.1.split_at(5usize); + let t2: (&[u64], &[u64]) = z2.1.split_at(5usize); + crate::ed25519::times_2d(tmp10.1, t1.1); + let mut inp_copy: [u64; 5] = [0u64; 5usize]; + ((&mut inp_copy)[0usize..5usize]).copy_from_slice(&tmp10.1[0usize..5usize]); + crate::ed25519::fmul(tmp10.1, &inp_copy, t2.1); + crate::ed25519::times_2(tmp20.1, t1.0); + let mut inp_copy0: [u64; 5] = [0u64; 5usize]; + ((&mut inp_copy0)[0usize..5usize]).copy_from_slice(&tmp20.1[0usize..5usize]); + crate::ed25519::fmul(tmp20.1, &inp_copy0, t2.0); + crate::ed25519::fdifference(tmp6.0, tmp5.0, tmp30.1); + crate::ed25519::fdifference(tmp6.1, tmp20.1, tmp10.1); + let mut a_copy: [u64; 5] = [0u64; 5usize]; + ((&mut a_copy)[0usize..5usize]).copy_from_slice(&tmp10.1[0usize..5usize]); + crate::ed25519::fsum(tmp10.1, &a_copy, tmp20.1); + crate::ed25519::fsum(tmp20.1, tmp5.0, tmp30.1); + let tmp_g: (&[u64], &[u64]) = tmp10.1.split_at(0usize); + let tmp_h: (&[u64], &[u64]) = tmp20.1.split_at(0usize); + let tmp_e: (&[u64], &[u64]) = tmp6.0.split_at(0usize); + let tmp_f: (&[u64], &[u64]) = tmp6.1.split_at(0usize); + let x3: (&mut [u64], &mut [u64]) = out.split_at_mut(0usize); + let y3: (&mut [u64], &mut [u64]) = x3.1.split_at_mut(5usize); + let z3: (&mut [u64], &mut [u64]) = y3.1.split_at_mut(5usize); + let t3: (&mut [u64], &mut [u64]) = z3.1.split_at_mut(5usize); + crate::ed25519::fmul(y3.0, tmp_e.1, tmp_f.1); + crate::ed25519::fmul(z3.0, tmp_g.1, tmp_h.1); + crate::ed25519::fmul(t3.1, tmp_e.1, tmp_h.1); + crate::ed25519::fmul(t3.0, tmp_f.1, tmp_g.1) +} + +pub(crate) fn make_point_inf(b: &mut [u64]) { + let x: (&mut [u64], &mut [u64]) = b.split_at_mut(0usize); + let y: (&mut [u64], &mut [u64]) = x.1.split_at_mut(5usize); + let z: (&mut [u64], &mut [u64]) = y.1.split_at_mut(5usize); + let t: (&mut [u64], &mut [u64]) = z.1.split_at_mut(5usize); + y.0[0usize] = 0u64; + y.0[1usize] = 0u64; + y.0[2usize] = 0u64; + y.0[3usize] = 0u64; + y.0[4usize] = 0u64; + z.0[0usize] = 1u64; + z.0[1usize] = 0u64; + z.0[2usize] = 0u64; + z.0[3usize] = 0u64; + z.0[4usize] = 0u64; + t.0[0usize] = 1u64; + t.0[1usize] = 0u64; + t.0[2usize] = 0u64; + t.0[3usize] = 0u64; + t.0[4usize] = 0u64; + t.1[0usize] = 0u64; + t.1[1usize] = 0u64; + t.1[2usize] = 0u64; + t.1[3usize] = 0u64; + t.1[4usize] = 0u64 +} + +#[inline] +fn pow2_252m2(out: &mut [u64], z: &[u64]) { + let mut buf: [u64; 20] = [0u64; 20usize]; + let a: (&mut [u64], &mut [u64]) = buf.split_at_mut(0usize); + let t0: (&mut [u64], &mut [u64]) = a.1.split_at_mut(5usize); + let b: (&mut [u64], &mut [u64]) = t0.1.split_at_mut(5usize); + let c: (&mut [u64], &mut [u64]) = b.1.split_at_mut(5usize); + crate::ed25519::fsquare_times(t0.0, z, 1u32); + crate::ed25519::fsquare_times(b.0, t0.0, 2u32); + crate::ed25519::fmul(c.0, b.0, z); + let mut inp_copy: [u64; 5] = [0u64; 5usize]; + ((&mut inp_copy)[0usize..5usize]).copy_from_slice(&t0.0[0usize..5usize]); + crate::ed25519::fmul(t0.0, &inp_copy, c.0); + crate::ed25519::fsquare_times(b.0, t0.0, 1u32); + let mut inp_copy0: [u64; 5] = [0u64; 5usize]; + ((&mut inp_copy0)[0usize..5usize]).copy_from_slice(&c.0[0usize..5usize]); + crate::ed25519::fmul(c.0, &inp_copy0, b.0); + crate::ed25519::fsquare_times(b.0, c.0, 5u32); + let mut inp_copy1: [u64; 5] = [0u64; 5usize]; + ((&mut inp_copy1)[0usize..5usize]).copy_from_slice(&c.0[0usize..5usize]); + crate::ed25519::fmul(c.0, &inp_copy1, b.0); + crate::ed25519::fsquare_times(b.0, c.0, 10u32); + crate::ed25519::fmul(c.1, b.0, c.0); + crate::ed25519::fsquare_times(b.0, c.1, 20u32); + let mut inp_copy2: [u64; 5] = [0u64; 5usize]; + ((&mut inp_copy2)[0usize..5usize]).copy_from_slice(&b.0[0usize..5usize]); + crate::ed25519::fmul(b.0, &inp_copy2, c.1); + crate::ed25519::fsquare_times_inplace(b.0, 10u32); + let mut inp_copy3: [u64; 5] = [0u64; 5usize]; + ((&mut inp_copy3)[0usize..5usize]).copy_from_slice(&c.0[0usize..5usize]); + crate::ed25519::fmul(c.0, &inp_copy3, b.0); + crate::ed25519::fsquare_times(b.0, c.0, 50u32); + let a0: (&mut [u64], &mut [u64]) = t0.0.split_at_mut(0usize); + let t00: (&mut [u64], &mut [u64]) = b.0.split_at_mut(0usize); + let b0: (&[u64], &[u64]) = c.0.split_at(0usize); + let c0: (&mut [u64], &mut [u64]) = c.1.split_at_mut(0usize); + crate::ed25519::fsquare_times(a0.1, z, 1u32); + crate::ed25519::fmul(c0.1, t00.1, b0.1); + crate::ed25519::fsquare_times(t00.1, c0.1, 100u32); + let mut inp_copy4: [u64; 5] = [0u64; 5usize]; + ((&mut inp_copy4)[0usize..5usize]).copy_from_slice(&t00.1[0usize..5usize]); + crate::ed25519::fmul(t00.1, &inp_copy4, c0.1); + crate::ed25519::fsquare_times_inplace(t00.1, 50u32); + let mut inp_copy5: [u64; 5] = [0u64; 5usize]; + ((&mut inp_copy5)[0usize..5usize]).copy_from_slice(&t00.1[0usize..5usize]); + crate::ed25519::fmul(t00.1, &inp_copy5, b0.1); + crate::ed25519::fsquare_times_inplace(t00.1, 2u32); + crate::ed25519::fmul(out, t00.1, a0.1) +} + +#[inline] +fn is_0(x: &[u64]) -> bool { + let x0: u64 = x[0usize]; + let x1: u64 = x[1usize]; + let x2: u64 = x[2usize]; + let x3: u64 = x[3usize]; + let x4: u64 = x[4usize]; + x0 == 0u64 && x1 == 0u64 && x2 == 0u64 && x3 == 0u64 && x4 == 0u64 +} + +#[inline] +fn mul_modp_sqrt_m1(x: &mut [u64]) { + let mut sqrt_m1: [u64; 5] = [0u64; 5usize]; + (&mut sqrt_m1)[0usize] = 0x00061b274a0ea0b0u64; + (&mut sqrt_m1)[1usize] = 0x0000d5a5fc8f189du64; + (&mut sqrt_m1)[2usize] = 0x0007ef5e9cbd0c60u64; + (&mut sqrt_m1)[3usize] = 0x00078595a6804c9eu64; + (&mut sqrt_m1)[4usize] = 0x0002b8324804fc1du64; + let mut inp_copy: [u64; 5] = [0u64; 5usize]; + ((&mut inp_copy)[0usize..5usize]).copy_from_slice(&x[0usize..5usize]); + crate::ed25519::fmul(x, &inp_copy, &sqrt_m1) +} + +#[inline] +fn recover_x(x: &mut [u64], y: &[u64], sign: u64) -> bool { + let mut tmp: [u64; 15] = [0u64; 15usize]; + let x2: (&mut [u64], &mut [u64]) = tmp.split_at_mut(0usize); + let x0: u64 = y[0usize]; + let x1: u64 = y[1usize]; + let x21: u64 = y[2usize]; + let x3: u64 = y[3usize]; + let x4: u64 = y[4usize]; + let b: bool = x0 >= 0x7ffffffffffedu64 + && x1 == 0x7ffffffffffffu64 + && x21 == 0x7ffffffffffffu64 + && x3 == 0x7ffffffffffffu64 + && x4 == 0x7ffffffffffffu64; + let res: bool = if b { + false + } else { + let mut tmp1: [u64; 20] = [0u64; 20usize]; + let one: (&mut [u64], &mut [u64]) = tmp1.split_at_mut(0usize); + let y2: (&mut [u64], &mut [u64]) = one.1.split_at_mut(5usize); + let dyyi: (&mut [u64], &mut [u64]) = y2.1.split_at_mut(5usize); + let dyy: (&mut [u64], &mut [u64]) = dyyi.1.split_at_mut(5usize); + y2.0[0usize] = 1u64; + y2.0[1usize] = 0u64; + y2.0[2usize] = 0u64; + y2.0[3usize] = 0u64; + y2.0[4usize] = 0u64; + crate::ed25519::fsquare(dyyi.0, y); + crate::ed25519::times_d(dyy.1, dyyi.0); + let mut a_copy: [u64; 5] = [0u64; 5usize]; + ((&mut a_copy)[0usize..5usize]).copy_from_slice(&dyy.1[0usize..5usize]); + crate::ed25519::fsum(dyy.1, &a_copy, y2.0); + crate::ed25519::reduce_513(dyy.1); + crate::ed25519::inverse(dyy.0, dyy.1); + crate::ed25519::fdifference(x2.1, dyyi.0, y2.0); + let mut inp_copy: [u64; 5] = [0u64; 5usize]; + ((&mut inp_copy)[0usize..5usize]).copy_from_slice(&x2.1[0usize..5usize]); + crate::ed25519::fmul(x2.1, &inp_copy, dyy.0); + crate::ed25519::reduce(x2.1); + let x2_is_0: bool = crate::ed25519::is_0(x2.1); + let z: u8 = if x2_is_0 { + if sign == 0u64 { + x[0usize] = 0u64; + x[1usize] = 0u64; + x[2usize] = 0u64; + x[3usize] = 0u64; + x[4usize] = 0u64; + 1u8 + } else { + 0u8 + } + } else { + 2u8 + }; + if z == 0u8 { + false + } else if z == 1u8 { + true + } else { + let x210: (&mut [u64], &mut [u64]) = x2.1.split_at_mut(0usize); + let x30: (&mut [u64], &mut [u64]) = x210.1.split_at_mut(5usize); + let t0: (&mut [u64], &mut [u64]) = x30.1.split_at_mut(5usize); + crate::ed25519::pow2_252m2(t0.0, x30.0); + crate::ed25519::fsquare(t0.1, t0.0); + let mut a_copy0: [u64; 5] = [0u64; 5usize]; + ((&mut a_copy0)[0usize..5usize]).copy_from_slice(&t0.1[0usize..5usize]); + crate::ed25519::fdifference(t0.1, &a_copy0, x30.0); + crate::ed25519::reduce_513(t0.1); + crate::ed25519::reduce(t0.1); + let t0_is_0: bool = crate::ed25519::is_0(t0.1); + if !t0_is_0 { + crate::ed25519::mul_modp_sqrt_m1(t0.0) + }; + let x211: (&[u64], &[u64]) = x30.0.split_at(0usize); + let x31: (&mut [u64], &mut [u64]) = t0.0.split_at_mut(0usize); + let t00: (&mut [u64], &mut [u64]) = t0.1.split_at_mut(0usize); + crate::ed25519::fsquare(t00.1, x31.1); + let mut a_copy1: [u64; 5] = [0u64; 5usize]; + ((&mut a_copy1)[0usize..5usize]).copy_from_slice(&t00.1[0usize..5usize]); + crate::ed25519::fdifference(t00.1, &a_copy1, x211.1); + crate::ed25519::reduce_513(t00.1); + crate::ed25519::reduce(t00.1); + let z1: bool = crate::ed25519::is_0(t00.1); + if z1 { + let x32: (&mut [u64], &mut [u64]) = x31.1.split_at_mut(0usize); + let t01: (&mut [u64], &mut [u64]) = t00.1.split_at_mut(0usize); + crate::ed25519::reduce(x32.1); + let x00: u64 = x32.1[0usize]; + let x01: u64 = x00 & 1u64; + if x01 != sign { + t01.1[0usize] = 0u64; + t01.1[1usize] = 0u64; + t01.1[2usize] = 0u64; + t01.1[3usize] = 0u64; + t01.1[4usize] = 0u64; + let mut b_copy: [u64; 5] = [0u64; 5usize]; + ((&mut b_copy)[0usize..5usize]).copy_from_slice(&x32.1[0usize..5usize]); + crate::ed25519::fdifference(x32.1, t01.1, &b_copy); + crate::ed25519::reduce_513(x32.1); + crate::ed25519::reduce(x32.1) + }; + (x[0usize..5usize]).copy_from_slice(&x32.1[0usize..5usize]); + true + } else { + false + } + } + }; + let res0: bool = res; + res0 +} + +pub(crate) fn point_decompress(out: &mut [u64], s: &[u8]) -> bool { + let mut tmp: [u64; 10] = [0u64; 10usize]; + let y: (&mut [u64], &mut [u64]) = tmp.split_at_mut(0usize); + let x: (&mut [u64], &mut [u64]) = y.1.split_at_mut(5usize); + let s31: u8 = s[31usize]; + let z: u8 = s31.wrapping_shr(7u32); + let sign: u64 = z as u64; + crate::ed25519::load_51(x.0, s); + let z0: bool = crate::ed25519::recover_x(x.1, x.0, sign); + let res: bool = if z0 { + let outx: (&mut [u64], &mut [u64]) = out.split_at_mut(0usize); + let outy: (&mut [u64], &mut [u64]) = outx.1.split_at_mut(5usize); + let outz: (&mut [u64], &mut [u64]) = outy.1.split_at_mut(5usize); + let outt: (&mut [u64], &mut [u64]) = outz.1.split_at_mut(5usize); + (outy.0[0usize..5usize]).copy_from_slice(&x.1[0usize..5usize]); + (outz.0[0usize..5usize]).copy_from_slice(&x.0[0usize..5usize]); + outt.0[0usize] = 1u64; + outt.0[1usize] = 0u64; + outt.0[2usize] = 0u64; + outt.0[3usize] = 0u64; + outt.0[4usize] = 0u64; + crate::ed25519::fmul(outt.1, x.1, x.0); + true + } else { + false + }; + let res0: bool = res; + res0 +} + +pub(crate) fn point_compress(z: &mut [u8], p: &[u64]) { + let mut tmp: [u64; 15] = [0u64; 15usize]; + let zinv: (&mut [u64], &mut [u64]) = tmp.split_at_mut(0usize); + let x: (&mut [u64], &mut [u64]) = zinv.1.split_at_mut(5usize); + let out: (&mut [u64], &mut [u64]) = x.1.split_at_mut(5usize); + let px: (&[u64], &[u64]) = p.split_at(0usize); + let py: (&[u64], &[u64]) = px.1.split_at(5usize); + let pz: (&[u64], &[u64]) = py.1.split_at(5usize); + crate::ed25519::inverse(x.0, pz.1); + crate::ed25519::fmul(out.0, py.0, x.0); + crate::ed25519::reduce(out.0); + crate::ed25519::fmul(out.1, pz.0, x.0); + crate::ed25519::reduce_513(out.1); + let x0: (&[u64], &[u64]) = out.0.split_at(0usize); + let out0: (&[u64], &[u64]) = out.1.split_at(0usize); + let x00: u64 = x0.1[0usize]; + let b: u64 = x00 & 1u64; + crate::ed25519::store_51(z, out0.1); + let xbyte: u8 = b as u8; + let o31: u8 = z[31usize]; + z[31usize] = o31.wrapping_add(xbyte.wrapping_shl(7u32)) +} + +#[inline] +fn barrett_reduction(z: &mut [u64], t: &[u64]) { + let t0: u64 = t[0usize]; + let t1: u64 = t[1usize]; + let t2: u64 = t[2usize]; + let t3: u64 = t[3usize]; + let t4: u64 = t[4usize]; + let t5: u64 = t[5usize]; + let t6: u64 = t[6usize]; + let t7: u64 = t[7usize]; + let t8: u64 = t[8usize]; + let t9: u64 = t[9usize]; + let m0: u64 = 0x12631a5cf5d3edu64; + let m1: u64 = 0xf9dea2f79cd658u64; + let m2: u64 = 0x000000000014deu64; + let m3: u64 = 0x00000000000000u64; + let m4: u64 = 0x00000010000000u64; + let m00: u64 = m0; + let m10: u64 = m1; + let m20: u64 = m2; + let m30: u64 = m3; + let m40: u64 = m4; + let m01: u64 = 0x9ce5a30a2c131bu64; + let m11: u64 = 0x215d086329a7edu64; + let m21: u64 = 0xffffffffeb2106u64; + let m31: u64 = 0xffffffffffffffu64; + let m41: u64 = 0x00000fffffffffu64; + let mu0: u64 = m01; + let mu1: u64 = m11; + let mu2: u64 = m21; + let mu3: u64 = m31; + let mu4: u64 = m41; + let yĀ·: u64 = (t5 & 0xffffffu64).wrapping_shl(32u32); + let xĀ·: u64 = t4.wrapping_shr(24u32); + let z0: u64 = xĀ· | yĀ·; + let yĀ·0: u64 = (t6 & 0xffffffu64).wrapping_shl(32u32); + let xĀ·0: u64 = t5.wrapping_shr(24u32); + let z1: u64 = xĀ·0 | yĀ·0; + let yĀ·1: u64 = (t7 & 0xffffffu64).wrapping_shl(32u32); + let xĀ·1: u64 = t6.wrapping_shr(24u32); + let z2: u64 = xĀ·1 | yĀ·1; + let yĀ·2: u64 = (t8 & 0xffffffu64).wrapping_shl(32u32); + let xĀ·2: u64 = t7.wrapping_shr(24u32); + let z3: u64 = xĀ·2 | yĀ·2; + let yĀ·3: u64 = (t9 & 0xffffffu64).wrapping_shl(32u32); + let xĀ·3: u64 = t8.wrapping_shr(24u32); + let z4: u64 = xĀ·3 | yĀ·3; + let q0: u64 = z0; + let q1: u64 = z1; + let q2: u64 = z2; + let q3: u64 = z3; + let q4: u64 = z4; + let xy00: fstar::uint128::uint128 = fstar::uint128::mul_wide(q0, mu0); + let xy01: fstar::uint128::uint128 = fstar::uint128::mul_wide(q0, mu1); + let xy02: fstar::uint128::uint128 = fstar::uint128::mul_wide(q0, mu2); + let xy03: fstar::uint128::uint128 = fstar::uint128::mul_wide(q0, mu3); + let xy04: fstar::uint128::uint128 = fstar::uint128::mul_wide(q0, mu4); + let xy10: fstar::uint128::uint128 = fstar::uint128::mul_wide(q1, mu0); + let xy11: fstar::uint128::uint128 = fstar::uint128::mul_wide(q1, mu1); + let xy12: fstar::uint128::uint128 = fstar::uint128::mul_wide(q1, mu2); + let xy13: fstar::uint128::uint128 = fstar::uint128::mul_wide(q1, mu3); + let xy14: fstar::uint128::uint128 = fstar::uint128::mul_wide(q1, mu4); + let xy20: fstar::uint128::uint128 = fstar::uint128::mul_wide(q2, mu0); + let xy21: fstar::uint128::uint128 = fstar::uint128::mul_wide(q2, mu1); + let xy22: fstar::uint128::uint128 = fstar::uint128::mul_wide(q2, mu2); + let xy23: fstar::uint128::uint128 = fstar::uint128::mul_wide(q2, mu3); + let xy24: fstar::uint128::uint128 = fstar::uint128::mul_wide(q2, mu4); + let xy30: fstar::uint128::uint128 = fstar::uint128::mul_wide(q3, mu0); + let xy31: fstar::uint128::uint128 = fstar::uint128::mul_wide(q3, mu1); + let xy32: fstar::uint128::uint128 = fstar::uint128::mul_wide(q3, mu2); + let xy33: fstar::uint128::uint128 = fstar::uint128::mul_wide(q3, mu3); + let xy34: fstar::uint128::uint128 = fstar::uint128::mul_wide(q3, mu4); + let xy40: fstar::uint128::uint128 = fstar::uint128::mul_wide(q4, mu0); + let xy41: fstar::uint128::uint128 = fstar::uint128::mul_wide(q4, mu1); + let xy42: fstar::uint128::uint128 = fstar::uint128::mul_wide(q4, mu2); + let xy43: fstar::uint128::uint128 = fstar::uint128::mul_wide(q4, mu3); + let xy44: fstar::uint128::uint128 = fstar::uint128::mul_wide(q4, mu4); + let z00: fstar::uint128::uint128 = xy00; + let z10: fstar::uint128::uint128 = fstar::uint128::add_mod(xy01, xy10); + let z20: fstar::uint128::uint128 = + fstar::uint128::add_mod(fstar::uint128::add_mod(xy02, xy11), xy20); + let z30: fstar::uint128::uint128 = fstar::uint128::add_mod( + fstar::uint128::add_mod(fstar::uint128::add_mod(xy03, xy12), xy21), + xy30, + ); + let z40: fstar::uint128::uint128 = fstar::uint128::add_mod( + fstar::uint128::add_mod( + fstar::uint128::add_mod(fstar::uint128::add_mod(xy04, xy13), xy22), + xy31, + ), + xy40, + ); + let z5: fstar::uint128::uint128 = fstar::uint128::add_mod( + fstar::uint128::add_mod(fstar::uint128::add_mod(xy14, xy23), xy32), + xy41, + ); + let z6: fstar::uint128::uint128 = + fstar::uint128::add_mod(fstar::uint128::add_mod(xy24, xy33), xy42); + let z7: fstar::uint128::uint128 = fstar::uint128::add_mod(xy34, xy43); + let z8: fstar::uint128::uint128 = xy44; + let carry: fstar::uint128::uint128 = fstar::uint128::shift_right(z00, 56u32); + let c0: fstar::uint128::uint128 = carry; + let carry0: fstar::uint128::uint128 = + fstar::uint128::shift_right(fstar::uint128::add_mod(z10, c0), 56u32); + let c1: fstar::uint128::uint128 = carry0; + let carry1: fstar::uint128::uint128 = + fstar::uint128::shift_right(fstar::uint128::add_mod(z20, c1), 56u32); + let c2: fstar::uint128::uint128 = carry1; + let carry2: fstar::uint128::uint128 = + fstar::uint128::shift_right(fstar::uint128::add_mod(z30, c2), 56u32); + let c3: fstar::uint128::uint128 = carry2; + let carry3: fstar::uint128::uint128 = + fstar::uint128::shift_right(fstar::uint128::add_mod(z40, c3), 56u32); + let t10: u64 = + fstar::uint128::uint128_to_uint64(fstar::uint128::add_mod(z40, c3)) & 0xffffffffffffffu64; + let c4: fstar::uint128::uint128 = carry3; + let t41: u64 = t10; + let carry4: fstar::uint128::uint128 = + fstar::uint128::shift_right(fstar::uint128::add_mod(z5, c4), 56u32); + let t100: u64 = + fstar::uint128::uint128_to_uint64(fstar::uint128::add_mod(z5, c4)) & 0xffffffffffffffu64; + let c5: fstar::uint128::uint128 = carry4; + let t51: u64 = t100; + let carry5: fstar::uint128::uint128 = + fstar::uint128::shift_right(fstar::uint128::add_mod(z6, c5), 56u32); + let t101: u64 = + fstar::uint128::uint128_to_uint64(fstar::uint128::add_mod(z6, c5)) & 0xffffffffffffffu64; + let c6: fstar::uint128::uint128 = carry5; + let t61: u64 = t101; + let carry6: fstar::uint128::uint128 = + fstar::uint128::shift_right(fstar::uint128::add_mod(z7, c6), 56u32); + let t102: u64 = + fstar::uint128::uint128_to_uint64(fstar::uint128::add_mod(z7, c6)) & 0xffffffffffffffu64; + let c7: fstar::uint128::uint128 = carry6; + let t71: u64 = t102; + let carry7: fstar::uint128::uint128 = + fstar::uint128::shift_right(fstar::uint128::add_mod(z8, c7), 56u32); + let t103: u64 = + fstar::uint128::uint128_to_uint64(fstar::uint128::add_mod(z8, c7)) & 0xffffffffffffffu64; + let c8: fstar::uint128::uint128 = carry7; + let t81: u64 = t103; + let t91: u64 = fstar::uint128::uint128_to_uint64(c8); + let qmu4Ā·: u64 = t41; + let qmu5Ā·: u64 = t51; + let qmu6Ā·: u64 = t61; + let qmu7Ā·: u64 = t71; + let qmu8Ā·: u64 = t81; + let qmu9Ā·: u64 = t91; + let yĀ·4: u64 = (qmu5Ā· & 0xffffffffffu64).wrapping_shl(16u32); + let xĀ·4: u64 = qmu4Ā·.wrapping_shr(40u32); + let z01: u64 = xĀ·4 | yĀ·4; + let yĀ·5: u64 = (qmu6Ā· & 0xffffffffffu64).wrapping_shl(16u32); + let xĀ·5: u64 = qmu5Ā·.wrapping_shr(40u32); + let z11: u64 = xĀ·5 | yĀ·5; + let yĀ·6: u64 = (qmu7Ā· & 0xffffffffffu64).wrapping_shl(16u32); + let xĀ·6: u64 = qmu6Ā·.wrapping_shr(40u32); + let z21: u64 = xĀ·6 | yĀ·6; + let yĀ·7: u64 = (qmu8Ā· & 0xffffffffffu64).wrapping_shl(16u32); + let xĀ·7: u64 = qmu7Ā·.wrapping_shr(40u32); + let z31: u64 = xĀ·7 | yĀ·7; + let yĀ·8: u64 = (qmu9Ā· & 0xffffffffffu64).wrapping_shl(16u32); + let xĀ·8: u64 = qmu8Ā·.wrapping_shr(40u32); + let z41: u64 = xĀ·8 | yĀ·8; + let qdiv0: u64 = z01; + let qdiv1: u64 = z11; + let qdiv2: u64 = z21; + let qdiv3: u64 = z31; + let qdiv4: u64 = z41; + let r0: u64 = t0; + let r1: u64 = t1; + let r2: u64 = t2; + let r3: u64 = t3; + let r4: u64 = t4 & 0xffffffffffu64; + let xy000: fstar::uint128::uint128 = fstar::uint128::mul_wide(qdiv0, m00); + let xy010: fstar::uint128::uint128 = fstar::uint128::mul_wide(qdiv0, m10); + let xy020: fstar::uint128::uint128 = fstar::uint128::mul_wide(qdiv0, m20); + let xy030: fstar::uint128::uint128 = fstar::uint128::mul_wide(qdiv0, m30); + let xy040: fstar::uint128::uint128 = fstar::uint128::mul_wide(qdiv0, m40); + let xy100: fstar::uint128::uint128 = fstar::uint128::mul_wide(qdiv1, m00); + let xy110: fstar::uint128::uint128 = fstar::uint128::mul_wide(qdiv1, m10); + let xy120: fstar::uint128::uint128 = fstar::uint128::mul_wide(qdiv1, m20); + let xy130: fstar::uint128::uint128 = fstar::uint128::mul_wide(qdiv1, m30); + let xy200: fstar::uint128::uint128 = fstar::uint128::mul_wide(qdiv2, m00); + let xy210: fstar::uint128::uint128 = fstar::uint128::mul_wide(qdiv2, m10); + let xy220: fstar::uint128::uint128 = fstar::uint128::mul_wide(qdiv2, m20); + let xy300: fstar::uint128::uint128 = fstar::uint128::mul_wide(qdiv3, m00); + let xy310: fstar::uint128::uint128 = fstar::uint128::mul_wide(qdiv3, m10); + let xy400: fstar::uint128::uint128 = fstar::uint128::mul_wide(qdiv4, m00); + let carry8: fstar::uint128::uint128 = fstar::uint128::shift_right(xy000, 56u32); + let t104: u64 = fstar::uint128::uint128_to_uint64(xy000) & 0xffffffffffffffu64; + let c00: fstar::uint128::uint128 = carry8; + let t01: u64 = t104; + let carry9: fstar::uint128::uint128 = fstar::uint128::shift_right( + fstar::uint128::add_mod(fstar::uint128::add_mod(xy010, xy100), c00), + 56u32, + ); + let t105: u64 = fstar::uint128::uint128_to_uint64(fstar::uint128::add_mod( + fstar::uint128::add_mod(xy010, xy100), + c00, + )) & 0xffffffffffffffu64; + let c10: fstar::uint128::uint128 = carry9; + let t11: u64 = t105; + let carry10: fstar::uint128::uint128 = fstar::uint128::shift_right( + fstar::uint128::add_mod( + fstar::uint128::add_mod(fstar::uint128::add_mod(xy020, xy110), xy200), + c10, + ), + 56u32, + ); + let t106: u64 = fstar::uint128::uint128_to_uint64(fstar::uint128::add_mod( + fstar::uint128::add_mod(fstar::uint128::add_mod(xy020, xy110), xy200), + c10, + )) & 0xffffffffffffffu64; + let c20: fstar::uint128::uint128 = carry10; + let t21: u64 = t106; + let carry11: fstar::uint128::uint128 = fstar::uint128::shift_right( + fstar::uint128::add_mod( + fstar::uint128::add_mod( + fstar::uint128::add_mod(fstar::uint128::add_mod(xy030, xy120), xy210), + xy300, + ), + c20, + ), + 56u32, + ); + let t107: u64 = fstar::uint128::uint128_to_uint64(fstar::uint128::add_mod( + fstar::uint128::add_mod( + fstar::uint128::add_mod(fstar::uint128::add_mod(xy030, xy120), xy210), + xy300, + ), + c20, + )) & 0xffffffffffffffu64; + let c30: fstar::uint128::uint128 = carry11; + let t31: u64 = t107; + let t410: u64 = fstar::uint128::uint128_to_uint64(fstar::uint128::add_mod( + fstar::uint128::add_mod( + fstar::uint128::add_mod( + fstar::uint128::add_mod(fstar::uint128::add_mod(xy040, xy130), xy220), + xy310, + ), + xy400, + ), + c30, + )) & 0xffffffffffu64; + let qmul0: u64 = t01; + let qmul1: u64 = t11; + let qmul2: u64 = t21; + let qmul3: u64 = t31; + let qmul4: u64 = t410; + let b: u64 = r0.wrapping_sub(qmul0).wrapping_shr(63u32); + let t108: u64 = b.wrapping_shl(56u32).wrapping_add(r0).wrapping_sub(qmul0); + let c11: u64 = b; + let t010: u64 = t108; + let b0: u64 = r1.wrapping_sub(qmul1.wrapping_add(c11)).wrapping_shr(63u32); + let t109: u64 = b0 + .wrapping_shl(56u32) + .wrapping_add(r1) + .wrapping_sub(qmul1.wrapping_add(c11)); + let c21: u64 = b0; + let t110: u64 = t109; + let b1: u64 = r2.wrapping_sub(qmul2.wrapping_add(c21)).wrapping_shr(63u32); + let t1010: u64 = b1 + .wrapping_shl(56u32) + .wrapping_add(r2) + .wrapping_sub(qmul2.wrapping_add(c21)); + let c31: u64 = b1; + let t210: u64 = t1010; + let b2: u64 = r3.wrapping_sub(qmul3.wrapping_add(c31)).wrapping_shr(63u32); + let t1011: u64 = b2 + .wrapping_shl(56u32) + .wrapping_add(r3) + .wrapping_sub(qmul3.wrapping_add(c31)); + let c40: u64 = b2; + let t310: u64 = t1011; + let b3: u64 = r4.wrapping_sub(qmul4.wrapping_add(c40)).wrapping_shr(63u32); + let t1012: u64 = b3 + .wrapping_shl(40u32) + .wrapping_add(r4) + .wrapping_sub(qmul4.wrapping_add(c40)); + let t411: u64 = t1012; + let s0: u64 = t010; + let s1: u64 = t110; + let s2: u64 = t210; + let s3: u64 = t310; + let s4: u64 = t411; + let m010: u64 = 0x12631a5cf5d3edu64; + let m110: u64 = 0xf9dea2f79cd658u64; + let m210: u64 = 0x000000000014deu64; + let m310: u64 = 0x00000000000000u64; + let m410: u64 = 0x00000010000000u64; + let y0: u64 = m010; + let y1: u64 = m110; + let y2: u64 = m210; + let y3: u64 = m310; + let y4: u64 = m410; + let b4: u64 = s0.wrapping_sub(y0).wrapping_shr(63u32); + let t1013: u64 = b4.wrapping_shl(56u32).wrapping_add(s0).wrapping_sub(y0); + let b00: u64 = b4; + let t011: u64 = t1013; + let b5: u64 = s1.wrapping_sub(y1.wrapping_add(b00)).wrapping_shr(63u32); + let t1014: u64 = b5 + .wrapping_shl(56u32) + .wrapping_add(s1) + .wrapping_sub(y1.wrapping_add(b00)); + let b10: u64 = b5; + let t111: u64 = t1014; + let b6: u64 = s2.wrapping_sub(y2.wrapping_add(b10)).wrapping_shr(63u32); + let t1015: u64 = b6 + .wrapping_shl(56u32) + .wrapping_add(s2) + .wrapping_sub(y2.wrapping_add(b10)); + let b20: u64 = b6; + let t211: u64 = t1015; + let b7: u64 = s3.wrapping_sub(y3.wrapping_add(b20)).wrapping_shr(63u32); + let t1016: u64 = b7 + .wrapping_shl(56u32) + .wrapping_add(s3) + .wrapping_sub(y3.wrapping_add(b20)); + let b30: u64 = b7; + let t311: u64 = t1016; + let b8: u64 = s4.wrapping_sub(y4.wrapping_add(b30)).wrapping_shr(63u32); + let t1017: u64 = b8 + .wrapping_shl(56u32) + .wrapping_add(s4) + .wrapping_sub(y4.wrapping_add(b30)); + let b40: u64 = b8; + let t412: u64 = t1017; + let mask: u64 = b40.wrapping_sub(1u64); + let z02: u64 = s0 ^ mask & (s0 ^ t011); + let z12: u64 = s1 ^ mask & (s1 ^ t111); + let z22: u64 = s2 ^ mask & (s2 ^ t211); + let z32: u64 = s3 ^ mask & (s3 ^ t311); + let z42: u64 = s4 ^ mask & (s4 ^ t412); + let z03: u64 = z02; + let z13: u64 = z12; + let z23: u64 = z22; + let z33: u64 = z32; + let z43: u64 = z42; + let o0: u64 = z03; + let o1: u64 = z13; + let o2: u64 = z23; + let o3: u64 = z33; + let o4: u64 = z43; + let z04: u64 = o0; + let z14: u64 = o1; + let z24: u64 = o2; + let z34: u64 = o3; + let z44: u64 = o4; + z[0usize] = z04; + z[1usize] = z14; + z[2usize] = z24; + z[3usize] = z34; + z[4usize] = z44 +} + +#[inline] +fn mul_modq(out: &mut [u64], x: &[u64], y: &[u64]) { + let mut tmp: [u64; 10] = [0u64; 10usize]; + let x0: u64 = x[0usize]; + let x1: u64 = x[1usize]; + let x2: u64 = x[2usize]; + let x3: u64 = x[3usize]; + let x4: u64 = x[4usize]; + let y0: u64 = y[0usize]; + let y1: u64 = y[1usize]; + let y2: u64 = y[2usize]; + let y3: u64 = y[3usize]; + let y4: u64 = y[4usize]; + let xy00: fstar::uint128::uint128 = fstar::uint128::mul_wide(x0, y0); + let xy01: fstar::uint128::uint128 = fstar::uint128::mul_wide(x0, y1); + let xy02: fstar::uint128::uint128 = fstar::uint128::mul_wide(x0, y2); + let xy03: fstar::uint128::uint128 = fstar::uint128::mul_wide(x0, y3); + let xy04: fstar::uint128::uint128 = fstar::uint128::mul_wide(x0, y4); + let xy10: fstar::uint128::uint128 = fstar::uint128::mul_wide(x1, y0); + let xy11: fstar::uint128::uint128 = fstar::uint128::mul_wide(x1, y1); + let xy12: fstar::uint128::uint128 = fstar::uint128::mul_wide(x1, y2); + let xy13: fstar::uint128::uint128 = fstar::uint128::mul_wide(x1, y3); + let xy14: fstar::uint128::uint128 = fstar::uint128::mul_wide(x1, y4); + let xy20: fstar::uint128::uint128 = fstar::uint128::mul_wide(x2, y0); + let xy21: fstar::uint128::uint128 = fstar::uint128::mul_wide(x2, y1); + let xy22: fstar::uint128::uint128 = fstar::uint128::mul_wide(x2, y2); + let xy23: fstar::uint128::uint128 = fstar::uint128::mul_wide(x2, y3); + let xy24: fstar::uint128::uint128 = fstar::uint128::mul_wide(x2, y4); + let xy30: fstar::uint128::uint128 = fstar::uint128::mul_wide(x3, y0); + let xy31: fstar::uint128::uint128 = fstar::uint128::mul_wide(x3, y1); + let xy32: fstar::uint128::uint128 = fstar::uint128::mul_wide(x3, y2); + let xy33: fstar::uint128::uint128 = fstar::uint128::mul_wide(x3, y3); + let xy34: fstar::uint128::uint128 = fstar::uint128::mul_wide(x3, y4); + let xy40: fstar::uint128::uint128 = fstar::uint128::mul_wide(x4, y0); + let xy41: fstar::uint128::uint128 = fstar::uint128::mul_wide(x4, y1); + let xy42: fstar::uint128::uint128 = fstar::uint128::mul_wide(x4, y2); + let xy43: fstar::uint128::uint128 = fstar::uint128::mul_wide(x4, y3); + let xy44: fstar::uint128::uint128 = fstar::uint128::mul_wide(x4, y4); + let z0: fstar::uint128::uint128 = xy00; + let z1: fstar::uint128::uint128 = fstar::uint128::add_mod(xy01, xy10); + let z2: fstar::uint128::uint128 = + fstar::uint128::add_mod(fstar::uint128::add_mod(xy02, xy11), xy20); + let z3: fstar::uint128::uint128 = fstar::uint128::add_mod( + fstar::uint128::add_mod(fstar::uint128::add_mod(xy03, xy12), xy21), + xy30, + ); + let z4: fstar::uint128::uint128 = fstar::uint128::add_mod( + fstar::uint128::add_mod( + fstar::uint128::add_mod(fstar::uint128::add_mod(xy04, xy13), xy22), + xy31, + ), + xy40, + ); + let z5: fstar::uint128::uint128 = fstar::uint128::add_mod( + fstar::uint128::add_mod(fstar::uint128::add_mod(xy14, xy23), xy32), + xy41, + ); + let z6: fstar::uint128::uint128 = + fstar::uint128::add_mod(fstar::uint128::add_mod(xy24, xy33), xy42); + let z7: fstar::uint128::uint128 = fstar::uint128::add_mod(xy34, xy43); + let z8: fstar::uint128::uint128 = xy44; + let carry: fstar::uint128::uint128 = fstar::uint128::shift_right(z0, 56u32); + let t: u64 = fstar::uint128::uint128_to_uint64(z0) & 0xffffffffffffffu64; + let c0: fstar::uint128::uint128 = carry; + let t0: u64 = t; + let carry0: fstar::uint128::uint128 = + fstar::uint128::shift_right(fstar::uint128::add_mod(z1, c0), 56u32); + let t1: u64 = + fstar::uint128::uint128_to_uint64(fstar::uint128::add_mod(z1, c0)) & 0xffffffffffffffu64; + let c1: fstar::uint128::uint128 = carry0; + let t10: u64 = t1; + let carry1: fstar::uint128::uint128 = + fstar::uint128::shift_right(fstar::uint128::add_mod(z2, c1), 56u32); + let t2: u64 = + fstar::uint128::uint128_to_uint64(fstar::uint128::add_mod(z2, c1)) & 0xffffffffffffffu64; + let c2: fstar::uint128::uint128 = carry1; + let t20: u64 = t2; + let carry2: fstar::uint128::uint128 = + fstar::uint128::shift_right(fstar::uint128::add_mod(z3, c2), 56u32); + let t3: u64 = + fstar::uint128::uint128_to_uint64(fstar::uint128::add_mod(z3, c2)) & 0xffffffffffffffu64; + let c3: fstar::uint128::uint128 = carry2; + let t30: u64 = t3; + let carry3: fstar::uint128::uint128 = + fstar::uint128::shift_right(fstar::uint128::add_mod(z4, c3), 56u32); + let t4: u64 = + fstar::uint128::uint128_to_uint64(fstar::uint128::add_mod(z4, c3)) & 0xffffffffffffffu64; + let c4: fstar::uint128::uint128 = carry3; + let t40: u64 = t4; + let carry4: fstar::uint128::uint128 = + fstar::uint128::shift_right(fstar::uint128::add_mod(z5, c4), 56u32); + let t5: u64 = + fstar::uint128::uint128_to_uint64(fstar::uint128::add_mod(z5, c4)) & 0xffffffffffffffu64; + let c5: fstar::uint128::uint128 = carry4; + let t50: u64 = t5; + let carry5: fstar::uint128::uint128 = + fstar::uint128::shift_right(fstar::uint128::add_mod(z6, c5), 56u32); + let t6: u64 = + fstar::uint128::uint128_to_uint64(fstar::uint128::add_mod(z6, c5)) & 0xffffffffffffffu64; + let c6: fstar::uint128::uint128 = carry5; + let t60: u64 = t6; + let carry6: fstar::uint128::uint128 = + fstar::uint128::shift_right(fstar::uint128::add_mod(z7, c6), 56u32); + let t7: u64 = + fstar::uint128::uint128_to_uint64(fstar::uint128::add_mod(z7, c6)) & 0xffffffffffffffu64; + let c7: fstar::uint128::uint128 = carry6; + let t70: u64 = t7; + let carry7: fstar::uint128::uint128 = + fstar::uint128::shift_right(fstar::uint128::add_mod(z8, c7), 56u32); + let t8: u64 = + fstar::uint128::uint128_to_uint64(fstar::uint128::add_mod(z8, c7)) & 0xffffffffffffffu64; + let c8: fstar::uint128::uint128 = carry7; + let t80: u64 = t8; + let t9: u64 = fstar::uint128::uint128_to_uint64(c8); + let z00: u64 = t0; + let z10: u64 = t10; + let z20: u64 = t20; + let z30: u64 = t30; + let z40: u64 = t40; + let z50: u64 = t50; + let z60: u64 = t60; + let z70: u64 = t70; + let z80: u64 = t80; + let z9: u64 = t9; + (&mut tmp)[0usize] = z00; + (&mut tmp)[1usize] = z10; + (&mut tmp)[2usize] = z20; + (&mut tmp)[3usize] = z30; + (&mut tmp)[4usize] = z40; + (&mut tmp)[5usize] = z50; + (&mut tmp)[6usize] = z60; + (&mut tmp)[7usize] = z70; + (&mut tmp)[8usize] = z80; + (&mut tmp)[9usize] = z9; + crate::ed25519::barrett_reduction(out, &tmp) +} + +#[inline] +fn add_modq(out: &mut [u64], x: &[u64], y: &[u64]) { + let x0: u64 = x[0usize]; + let x1: u64 = x[1usize]; + let x2: u64 = x[2usize]; + let x3: u64 = x[3usize]; + let x4: u64 = x[4usize]; + let y0: u64 = y[0usize]; + let y1: u64 = y[1usize]; + let y2: u64 = y[2usize]; + let y3: u64 = y[3usize]; + let y4: u64 = y[4usize]; + let carry: u64 = x0.wrapping_add(y0).wrapping_shr(56u32); + let t: u64 = x0.wrapping_add(y0) & 0xffffffffffffffu64; + let t0: u64 = t; + let c0: u64 = carry; + let carry0: u64 = x1.wrapping_add(y1).wrapping_add(c0).wrapping_shr(56u32); + let t1: u64 = x1.wrapping_add(y1).wrapping_add(c0) & 0xffffffffffffffu64; + let t10: u64 = t1; + let c1: u64 = carry0; + let carry1: u64 = x2.wrapping_add(y2).wrapping_add(c1).wrapping_shr(56u32); + let t2: u64 = x2.wrapping_add(y2).wrapping_add(c1) & 0xffffffffffffffu64; + let t20: u64 = t2; + let c2: u64 = carry1; + let carry2: u64 = x3.wrapping_add(y3).wrapping_add(c2).wrapping_shr(56u32); + let t3: u64 = x3.wrapping_add(y3).wrapping_add(c2) & 0xffffffffffffffu64; + let t30: u64 = t3; + let c3: u64 = carry2; + let t4: u64 = x4.wrapping_add(y4).wrapping_add(c3); + let m0: u64 = 0x12631a5cf5d3edu64; + let m1: u64 = 0xf9dea2f79cd658u64; + let m2: u64 = 0x000000000014deu64; + let m3: u64 = 0x00000000000000u64; + let m4: u64 = 0x00000010000000u64; + let y01: u64 = m0; + let y11: u64 = m1; + let y21: u64 = m2; + let y31: u64 = m3; + let y41: u64 = m4; + let b: u64 = t0.wrapping_sub(y01).wrapping_shr(63u32); + let t5: u64 = b.wrapping_shl(56u32).wrapping_add(t0).wrapping_sub(y01); + let b0: u64 = b; + let t01: u64 = t5; + let b1: u64 = t10.wrapping_sub(y11.wrapping_add(b0)).wrapping_shr(63u32); + let t6: u64 = b1 + .wrapping_shl(56u32) + .wrapping_add(t10) + .wrapping_sub(y11.wrapping_add(b0)); + let b10: u64 = b1; + let t11: u64 = t6; + let b2: u64 = t20.wrapping_sub(y21.wrapping_add(b10)).wrapping_shr(63u32); + let t7: u64 = b2 + .wrapping_shl(56u32) + .wrapping_add(t20) + .wrapping_sub(y21.wrapping_add(b10)); + let b20: u64 = b2; + let t21: u64 = t7; + let b3: u64 = t30.wrapping_sub(y31.wrapping_add(b20)).wrapping_shr(63u32); + let t8: u64 = b3 + .wrapping_shl(56u32) + .wrapping_add(t30) + .wrapping_sub(y31.wrapping_add(b20)); + let b30: u64 = b3; + let t31: u64 = t8; + let b4: u64 = t4.wrapping_sub(y41.wrapping_add(b30)).wrapping_shr(63u32); + let t9: u64 = b4 + .wrapping_shl(56u32) + .wrapping_add(t4) + .wrapping_sub(y41.wrapping_add(b30)); + let b40: u64 = b4; + let t41: u64 = t9; + let mask: u64 = b40.wrapping_sub(1u64); + let z0: u64 = t0 ^ mask & (t0 ^ t01); + let z1: u64 = t10 ^ mask & (t10 ^ t11); + let z2: u64 = t20 ^ mask & (t20 ^ t21); + let z3: u64 = t30 ^ mask & (t30 ^ t31); + let z4: u64 = t4 ^ mask & (t4 ^ t41); + let z00: u64 = z0; + let z10: u64 = z1; + let z20: u64 = z2; + let z30: u64 = z3; + let z40: u64 = z4; + let o0: u64 = z00; + let o1: u64 = z10; + let o2: u64 = z20; + let o3: u64 = z30; + let o4: u64 = z40; + let z01: u64 = o0; + let z11: u64 = o1; + let z21: u64 = o2; + let z31: u64 = o3; + let z41: u64 = o4; + out[0usize] = z01; + out[1usize] = z11; + out[2usize] = z21; + out[3usize] = z31; + out[4usize] = z41 +} + +#[inline] +fn gte_q(s: &[u64]) -> bool { + let s0: u64 = s[0usize]; + let s1: u64 = s[1usize]; + let s2: u64 = s[2usize]; + let s3: u64 = s[3usize]; + let s4: u64 = s[4usize]; + if s4 > 0x00000010000000u64 { + true + } else if s4 < 0x00000010000000u64 { + false + } else if s3 > 0x00000000000000u64 || s2 > 0x000000000014deu64 { + true + } else if s2 < 0x000000000014deu64 { + false + } else if s1 > 0xf9dea2f79cd658u64 { + true + } else if s1 < 0xf9dea2f79cd658u64 { + false + } else { + s0 >= 0x12631a5cf5d3edu64 + } +} + +#[inline] +fn eq(a: &[u64], b: &[u64]) -> bool { + let a0: u64 = a[0usize]; + let a1: u64 = a[1usize]; + let a2: u64 = a[2usize]; + let a3: u64 = a[3usize]; + let a4: u64 = a[4usize]; + let b0: u64 = b[0usize]; + let b1: u64 = b[1usize]; + let b2: u64 = b[2usize]; + let b3: u64 = b[3usize]; + let b4: u64 = b[4usize]; + a0 == b0 && a1 == b1 && a2 == b2 && a3 == b3 && a4 == b4 +} + +pub(crate) fn point_equal(p: &[u64], q: &[u64]) -> bool { + let mut tmp: [u64; 20] = [0u64; 20usize]; + let pxqz: (&mut [u64], &mut [u64]) = tmp.split_at_mut(0usize); + let qxpz: (&mut [u64], &mut [u64]) = pxqz.1.split_at_mut(5usize); + crate::ed25519::fmul(qxpz.0, &p[0usize..], &q[10usize..]); + crate::ed25519::reduce(qxpz.0); + crate::ed25519::fmul(qxpz.1, &q[0usize..], &p[10usize..]); + crate::ed25519::reduce(qxpz.1); + let b: bool = crate::ed25519::eq(qxpz.0, qxpz.1); + if b { + let pyqz: (&mut [u64], &mut [u64]) = qxpz.1.split_at_mut(5usize); + let qypz: (&mut [u64], &mut [u64]) = pyqz.1.split_at_mut(5usize); + crate::ed25519::fmul(qypz.0, &p[5usize..], &q[10usize..]); + crate::ed25519::reduce(qypz.0); + crate::ed25519::fmul(qypz.1, &q[5usize..], &p[10usize..]); + crate::ed25519::reduce(qypz.1); + crate::ed25519::eq(qypz.0, qypz.1) + } else { + false + } +} + +pub(crate) fn point_negate(p: &[u64], out: &mut [u64]) { + let mut zero: [u64; 5] = [0u64; 5usize]; + (&mut zero)[0usize] = 0u64; + (&mut zero)[1usize] = 0u64; + (&mut zero)[2usize] = 0u64; + (&mut zero)[3usize] = 0u64; + (&mut zero)[4usize] = 0u64; + let x: (&[u64], &[u64]) = p.split_at(0usize); + let y: (&[u64], &[u64]) = x.1.split_at(5usize); + let z: (&[u64], &[u64]) = y.1.split_at(5usize); + let t: (&[u64], &[u64]) = z.1.split_at(5usize); + let x1: (&mut [u64], &mut [u64]) = out.split_at_mut(0usize); + let y1: (&mut [u64], &mut [u64]) = x1.1.split_at_mut(5usize); + let z1: (&mut [u64], &mut [u64]) = y1.1.split_at_mut(5usize); + let t1: (&mut [u64], &mut [u64]) = z1.1.split_at_mut(5usize); + crate::ed25519::fdifference(y1.0, &zero, y.0); + crate::ed25519::reduce_513(y1.0); + (z1.0[0usize..5usize]).copy_from_slice(&z.0[0usize..5usize]); + (t1.0[0usize..5usize]).copy_from_slice(&t.0[0usize..5usize]); + crate::ed25519::fdifference(t1.1, &zero, t.1); + crate::ed25519::reduce_513(t1.1) +} + +pub(crate) fn point_mul(out: &mut [u64], scalar: &[u8], q: &[u64]) { + let mut bscalar: [u64; 4] = [0u64; 4usize]; + krml::unroll_for!(4, "i", 0u32, 1u32, { + let bj: (&[u8], &[u8]) = scalar.split_at(i.wrapping_mul(8u32) as usize); + let u: u64 = lowstar::endianness::load64_le(bj.1); + let r: u64 = u; + let x: u64 = r; + let os: (&mut [u64], &mut [u64]) = bscalar.split_at_mut(0usize); + os.1[i as usize] = x + }); + let mut table: [u64; 320] = [0u64; 320usize]; + let mut tmp: [u64; 20] = [0u64; 20usize]; + let t0: (&mut [u64], &mut [u64]) = table.split_at_mut(0usize); + let t1: (&mut [u64], &mut [u64]) = t0.1.split_at_mut(20usize); + crate::ed25519::make_point_inf(t1.0); + (t1.1[0usize..20usize]).copy_from_slice(&q[0usize..20usize]); + lowstar::ignore::ignore::<&[u64]>(&table); + krml::unroll_for!(7, "i", 0u32, 1u32, { + let t11: (&[u64], &[u64]) = + table.split_at(i.wrapping_add(1u32).wrapping_mul(20u32) as usize); + let mut p_copy: [u64; 20] = [0u64; 20usize]; + ((&mut p_copy)[0usize..20usize]).copy_from_slice(&t11.1[0usize..20usize]); + crate::ed25519::point_double(&mut tmp, &p_copy); + ((&mut table)[2u32.wrapping_mul(i).wrapping_add(2u32).wrapping_mul(20u32) as usize + ..2u32.wrapping_mul(i).wrapping_add(2u32).wrapping_mul(20u32) as usize + 20usize]) + .copy_from_slice(&(&tmp)[0usize..20usize]); + let t2: (&[u64], &[u64]) = + table.split_at(2u32.wrapping_mul(i).wrapping_add(2u32).wrapping_mul(20u32) as usize); + let mut p_copy0: [u64; 20] = [0u64; 20usize]; + ((&mut p_copy0)[0usize..20usize]).copy_from_slice(&q[0usize..20usize]); + crate::ed25519::point_add(&mut tmp, &p_copy0, t2.1); + ((&mut table)[2u32.wrapping_mul(i).wrapping_add(3u32).wrapping_mul(20u32) as usize + ..2u32.wrapping_mul(i).wrapping_add(3u32).wrapping_mul(20u32) as usize + 20usize]) + .copy_from_slice(&(&tmp)[0usize..20usize]) + }); + crate::ed25519::make_point_inf(out); + let mut tmp0: [u64; 20] = [0u64; 20usize]; + for i in 0u32..64u32 { + krml::unroll_for!(4, "_i", 0u32, 1u32, { + let mut p_copy: [u64; 20] = [0u64; 20usize]; + ((&mut p_copy)[0usize..20usize]).copy_from_slice(&out[0usize..20usize]); + crate::ed25519::point_double(out, &p_copy) + }); + let k: u32 = 256u32.wrapping_sub(4u32.wrapping_mul(i)).wrapping_sub(4u32); + let bits_l: u64 = bignum::bignum_base::bn_get_bits_u64(4u32, &bscalar, k, 4u32); + lowstar::ignore::ignore::<&[u64]>(&table); + ((&mut tmp0)[0usize..20usize]) + .copy_from_slice(&(&(&table)[0usize..] as &[u64])[0usize..20usize]); + krml::unroll_for!(15, "i0", 0u32, 1u32, { + let c: u64 = fstar::uint64::eq_mask(bits_l, i0.wrapping_add(1u32) as u64); + let res_j: (&[u64], &[u64]) = + table.split_at(i0.wrapping_add(1u32).wrapping_mul(20u32) as usize); + krml::unroll_for!(20, "i1", 0u32, 1u32, { + let x: u64 = c & res_j.1[i1 as usize] | !c & (&tmp0)[i1 as usize]; + let os: (&mut [u64], &mut [u64]) = tmp0.split_at_mut(0usize); + os.1[i1 as usize] = x + }) + }); + let mut p_copy: [u64; 20] = [0u64; 20usize]; + ((&mut p_copy)[0usize..20usize]).copy_from_slice(&out[0usize..20usize]); + crate::ed25519::point_add(out, &p_copy, &tmp0) + } +} + +#[inline] +fn precomp_get_consttime(table: &[u64], bits_l: u64, tmp: &mut [u64]) { + (tmp[0usize..20usize]).copy_from_slice(&(&table[0usize..])[0usize..20usize]); + krml::unroll_for!(15, "i", 0u32, 1u32, { + let c: u64 = fstar::uint64::eq_mask(bits_l, i.wrapping_add(1u32) as u64); + let res_j: (&[u64], &[u64]) = + table.split_at(i.wrapping_add(1u32).wrapping_mul(20u32) as usize); + krml::unroll_for!(20, "i0", 0u32, 1u32, { + let x: u64 = c & res_j.1[i0 as usize] | !c & tmp[i0 as usize]; + let os: (&mut [u64], &mut [u64]) = tmp.split_at_mut(0usize); + os.1[i0 as usize] = x + }) + }) +} + +#[inline] +fn point_mul_g(out: &mut [u64], scalar: &[u8]) { + let mut bscalar: [u64; 4] = [0u64; 4usize]; + krml::unroll_for!(4, "i", 0u32, 1u32, { + let bj: (&[u8], &[u8]) = scalar.split_at(i.wrapping_mul(8u32) as usize); + let u: u64 = lowstar::endianness::load64_le(bj.1); + let r: u64 = u; + let x: u64 = r; + let os: (&mut [u64], &mut [u64]) = bscalar.split_at_mut(0usize); + os.1[i as usize] = x + }); + let mut q1: [u64; 20] = [0u64; 20usize]; + let gx: (&mut [u64], &mut [u64]) = q1.split_at_mut(0usize); + let gy: (&mut [u64], &mut [u64]) = gx.1.split_at_mut(5usize); + let gz: (&mut [u64], &mut [u64]) = gy.1.split_at_mut(5usize); + let gt: (&mut [u64], &mut [u64]) = gz.1.split_at_mut(5usize); + gy.0[0usize] = 0x00062d608f25d51au64; + gy.0[1usize] = 0x000412a4b4f6592au64; + gy.0[2usize] = 0x00075b7171a4b31du64; + gy.0[3usize] = 0x0001ff60527118feu64; + gy.0[4usize] = 0x000216936d3cd6e5u64; + gz.0[0usize] = 0x0006666666666658u64; + gz.0[1usize] = 0x0004ccccccccccccu64; + gz.0[2usize] = 0x0001999999999999u64; + gz.0[3usize] = 0x0003333333333333u64; + gz.0[4usize] = 0x0006666666666666u64; + gt.0[0usize] = 1u64; + gt.0[1usize] = 0u64; + gt.0[2usize] = 0u64; + gt.0[3usize] = 0u64; + gt.0[4usize] = 0u64; + gt.1[0usize] = 0x00068ab3a5b7dda3u64; + gt.1[1usize] = 0x00000eea2a5eadbbu64; + gt.1[2usize] = 0x0002af8df483c27eu64; + gt.1[3usize] = 0x000332b375274732u64; + gt.1[4usize] = 0x00067875f0fd78b7u64; + let q2: [u64; 20] = [ + 13559344787725u64, + 2051621493703448u64, + 1947659315640708u64, + 626856790370168u64, + 1592804284034836u64, + 1781728767459187u64, + 278818420518009u64, + 2038030359908351u64, + 910625973862690u64, + 471887343142239u64, + 1298543306606048u64, + 794147365642417u64, + 129968992326749u64, + 523140861678572u64, + 1166419653909231u64, + 2009637196928390u64, + 1288020222395193u64, + 1007046974985829u64, + 208981102651386u64, + 2074009315253380u64, + ]; + let q3: [u64; 20] = [ + 557549315715710u64, + 196756086293855u64, + 846062225082495u64, + 1865068224838092u64, + 991112090754908u64, + 522916421512828u64, + 2098523346722375u64, + 1135633221747012u64, + 858420432114866u64, + 186358544306082u64, + 1044420411868480u64, + 2080052304349321u64, + 557301814716724u64, + 1305130257814057u64, + 2126012765451197u64, + 1441004402875101u64, + 353948968859203u64, + 470765987164835u64, + 1507675957683570u64, + 1086650358745097u64, + ]; + let q4: [u64; 20] = [ + 1129953239743101u64, + 1240339163956160u64, + 61002583352401u64, + 2017604552196030u64, + 1576867829229863u64, + 1508654942849389u64, + 270111619664077u64, + 1253097517254054u64, + 721798270973250u64, + 161923365415298u64, + 828530877526011u64, + 1494851059386763u64, + 662034171193976u64, + 1315349646974670u64, + 2199229517308806u64, + 497078277852673u64, + 1310507715989956u64, + 1881315714002105u64, + 2214039404983803u64, + 1331036420272667u64, + ]; + let r1: (&[u64], &[u64]) = bscalar.split_at(0usize); + let r2: (&[u64], &[u64]) = r1.1.split_at(1usize); + let r3: (&[u64], &[u64]) = r2.1.split_at(1usize); + let r4: (&[u64], &[u64]) = r3.1.split_at(1usize); + crate::ed25519::make_point_inf(out); + let mut tmp: [u64; 20] = [0u64; 20usize]; + krml::unroll_for!(16, "i", 0u32, 1u32, { + krml::unroll_for!(4, "_i", 0u32, 1u32, { + let mut p_copy: [u64; 20] = [0u64; 20usize]; + ((&mut p_copy)[0usize..20usize]).copy_from_slice(&out[0usize..20usize]); + crate::ed25519::point_double(out, &p_copy) + }); + let k: u32 = 64u32.wrapping_sub(4u32.wrapping_mul(i)).wrapping_sub(4u32); + let bits_l: u64 = bignum::bignum_base::bn_get_bits_u64(1u32, r4.1, k, 4u32); + lowstar::ignore::ignore::<&[u64]>( + &crate::ed25519_precomptable::precomp_g_pow2_192_table_w4, + ); + crate::ed25519::precomp_get_consttime( + &crate::ed25519_precomptable::precomp_g_pow2_192_table_w4, + bits_l, + &mut tmp, + ); + let mut p_copy: [u64; 20] = [0u64; 20usize]; + ((&mut p_copy)[0usize..20usize]).copy_from_slice(&out[0usize..20usize]); + crate::ed25519::point_add(out, &p_copy, &tmp); + let k0: u32 = 64u32.wrapping_sub(4u32.wrapping_mul(i)).wrapping_sub(4u32); + let bits_l0: u64 = bignum::bignum_base::bn_get_bits_u64(1u32, r4.0, k0, 4u32); + lowstar::ignore::ignore::<&[u64]>( + &crate::ed25519_precomptable::precomp_g_pow2_128_table_w4, + ); + crate::ed25519::precomp_get_consttime( + &crate::ed25519_precomptable::precomp_g_pow2_128_table_w4, + bits_l0, + &mut tmp, + ); + let mut p_copy0: [u64; 20] = [0u64; 20usize]; + ((&mut p_copy0)[0usize..20usize]).copy_from_slice(&out[0usize..20usize]); + crate::ed25519::point_add(out, &p_copy0, &tmp); + let k1: u32 = 64u32.wrapping_sub(4u32.wrapping_mul(i)).wrapping_sub(4u32); + let bits_l1: u64 = bignum::bignum_base::bn_get_bits_u64(1u32, r3.0, k1, 4u32); + lowstar::ignore::ignore::<&[u64]>(&crate::ed25519_precomptable::precomp_g_pow2_64_table_w4); + crate::ed25519::precomp_get_consttime( + &crate::ed25519_precomptable::precomp_g_pow2_64_table_w4, + bits_l1, + &mut tmp, + ); + let mut p_copy1: [u64; 20] = [0u64; 20usize]; + ((&mut p_copy1)[0usize..20usize]).copy_from_slice(&out[0usize..20usize]); + crate::ed25519::point_add(out, &p_copy1, &tmp); + let k2: u32 = 64u32.wrapping_sub(4u32.wrapping_mul(i)).wrapping_sub(4u32); + let bits_l2: u64 = bignum::bignum_base::bn_get_bits_u64(1u32, r2.0, k2, 4u32); + lowstar::ignore::ignore::<&[u64]>(&crate::ed25519_precomptable::precomp_basepoint_table_w4); + crate::ed25519::precomp_get_consttime( + &crate::ed25519_precomptable::precomp_basepoint_table_w4, + bits_l2, + &mut tmp, + ); + let mut p_copy2: [u64; 20] = [0u64; 20usize]; + ((&mut p_copy2)[0usize..20usize]).copy_from_slice(&out[0usize..20usize]); + crate::ed25519::point_add(out, &p_copy2, &tmp) + }); + lowstar::ignore::ignore::<&[u64]>(&q2); + lowstar::ignore::ignore::<&[u64]>(&q3); + lowstar::ignore::ignore::<&[u64]>(&q4) +} + +#[inline] +fn point_mul_g_double_vartime(out: &mut [u64], scalar1: &[u8], scalar2: &[u8], q2: &[u64]) { + let mut tmp: [u64; 28] = [0u64; 28usize]; + let g: (&mut [u64], &mut [u64]) = tmp.split_at_mut(0usize); + let bscalar1: (&mut [u64], &mut [u64]) = g.1.split_at_mut(20usize); + let bscalar2: (&mut [u64], &mut [u64]) = bscalar1.1.split_at_mut(4usize); + let gx: (&mut [u64], &mut [u64]) = bscalar1.0.split_at_mut(0usize); + let gy: (&mut [u64], &mut [u64]) = gx.1.split_at_mut(5usize); + let gz: (&mut [u64], &mut [u64]) = gy.1.split_at_mut(5usize); + let gt: (&mut [u64], &mut [u64]) = gz.1.split_at_mut(5usize); + gy.0[0usize] = 0x00062d608f25d51au64; + gy.0[1usize] = 0x000412a4b4f6592au64; + gy.0[2usize] = 0x00075b7171a4b31du64; + gy.0[3usize] = 0x0001ff60527118feu64; + gy.0[4usize] = 0x000216936d3cd6e5u64; + gz.0[0usize] = 0x0006666666666658u64; + gz.0[1usize] = 0x0004ccccccccccccu64; + gz.0[2usize] = 0x0001999999999999u64; + gz.0[3usize] = 0x0003333333333333u64; + gz.0[4usize] = 0x0006666666666666u64; + gt.0[0usize] = 1u64; + gt.0[1usize] = 0u64; + gt.0[2usize] = 0u64; + gt.0[3usize] = 0u64; + gt.0[4usize] = 0u64; + gt.1[0usize] = 0x00068ab3a5b7dda3u64; + gt.1[1usize] = 0x00000eea2a5eadbbu64; + gt.1[2usize] = 0x0002af8df483c27eu64; + gt.1[3usize] = 0x000332b375274732u64; + gt.1[4usize] = 0x00067875f0fd78b7u64; + krml::unroll_for!(4, "i", 0u32, 1u32, { + let bj: (&[u8], &[u8]) = scalar1.split_at(i.wrapping_mul(8u32) as usize); + let u: u64 = lowstar::endianness::load64_le(bj.1); + let r: u64 = u; + let x: u64 = r; + let os: (&mut [u64], &mut [u64]) = bscalar2.0.split_at_mut(0usize); + os.1[i as usize] = x + }); + krml::unroll_for!(4, "i", 0u32, 1u32, { + let bj: (&[u8], &[u8]) = scalar2.split_at(i.wrapping_mul(8u32) as usize); + let u: u64 = lowstar::endianness::load64_le(bj.1); + let r: u64 = u; + let x: u64 = r; + let os: (&mut [u64], &mut [u64]) = bscalar2.1.split_at_mut(0usize); + os.1[i as usize] = x + }); + let mut table2: [u64; 640] = [0u64; 640usize]; + let mut tmp1: [u64; 20] = [0u64; 20usize]; + let t0: (&mut [u64], &mut [u64]) = table2.split_at_mut(0usize); + let t1: (&mut [u64], &mut [u64]) = t0.1.split_at_mut(20usize); + crate::ed25519::make_point_inf(t1.0); + (t1.1[0usize..20usize]).copy_from_slice(&q2[0usize..20usize]); + lowstar::ignore::ignore::<&[u64]>(&table2); + krml::unroll_for!(15, "i", 0u32, 1u32, { + let t11: (&[u64], &[u64]) = + table2.split_at(i.wrapping_add(1u32).wrapping_mul(20u32) as usize); + let mut p_copy: [u64; 20] = [0u64; 20usize]; + ((&mut p_copy)[0usize..20usize]).copy_from_slice(&t11.1[0usize..20usize]); + crate::ed25519::point_double(&mut tmp1, &p_copy); + ((&mut table2)[2u32.wrapping_mul(i).wrapping_add(2u32).wrapping_mul(20u32) as usize + ..2u32.wrapping_mul(i).wrapping_add(2u32).wrapping_mul(20u32) as usize + 20usize]) + .copy_from_slice(&(&tmp1)[0usize..20usize]); + let t2: (&[u64], &[u64]) = + table2.split_at(2u32.wrapping_mul(i).wrapping_add(2u32).wrapping_mul(20u32) as usize); + let mut p_copy0: [u64; 20] = [0u64; 20usize]; + ((&mut p_copy0)[0usize..20usize]).copy_from_slice(&q2[0usize..20usize]); + crate::ed25519::point_add(&mut tmp1, &p_copy0, t2.1); + ((&mut table2)[2u32.wrapping_mul(i).wrapping_add(3u32).wrapping_mul(20u32) as usize + ..2u32.wrapping_mul(i).wrapping_add(3u32).wrapping_mul(20u32) as usize + 20usize]) + .copy_from_slice(&(&tmp1)[0usize..20usize]) + }); + let mut tmp10: [u64; 20] = [0u64; 20usize]; + let i: u32 = 255u32; + let bits_c: u64 = bignum::bignum_base::bn_get_bits_u64(4u32, bscalar2.0, i, 5u32); + let bits_l32: u32 = bits_c as u32; + let a_bits_l: &[u64] = &(&crate::ed25519_precomptable::precomp_basepoint_table_w5) + [bits_l32.wrapping_mul(20u32) as usize..]; + (out[0usize..20usize]).copy_from_slice(&a_bits_l[0usize..20usize]); + let i0: u32 = 255u32; + let bits_c0: u64 = bignum::bignum_base::bn_get_bits_u64(4u32, bscalar2.1, i0, 5u32); + let bits_l320: u32 = bits_c0 as u32; + let a_bits_l0: (&[u64], &[u64]) = table2.split_at(bits_l320.wrapping_mul(20u32) as usize); + ((&mut tmp10)[0usize..20usize]).copy_from_slice(&a_bits_l0.1[0usize..20usize]); + let mut p_copy: [u64; 20] = [0u64; 20usize]; + ((&mut p_copy)[0usize..20usize]).copy_from_slice(&out[0usize..20usize]); + crate::ed25519::point_add(out, &p_copy, &tmp10); + let mut tmp11: [u64; 20] = [0u64; 20usize]; + for i1 in 0u32..51u32 { + krml::unroll_for!(5, "_i", 0u32, 1u32, { + let mut p_copy0: [u64; 20] = [0u64; 20usize]; + ((&mut p_copy0)[0usize..20usize]).copy_from_slice(&out[0usize..20usize]); + crate::ed25519::point_double(out, &p_copy0) + }); + let k: u32 = 255u32 + .wrapping_sub(5u32.wrapping_mul(i1)) + .wrapping_sub(5u32); + let bits_l: u64 = bignum::bignum_base::bn_get_bits_u64(4u32, bscalar2.1, k, 5u32); + lowstar::ignore::ignore::<&[u64]>(&table2); + let bits_l321: u32 = bits_l as u32; + let a_bits_l1: (&[u64], &[u64]) = table2.split_at(bits_l321.wrapping_mul(20u32) as usize); + ((&mut tmp11)[0usize..20usize]).copy_from_slice(&a_bits_l1.1[0usize..20usize]); + let mut p_copy0: [u64; 20] = [0u64; 20usize]; + ((&mut p_copy0)[0usize..20usize]).copy_from_slice(&out[0usize..20usize]); + crate::ed25519::point_add(out, &p_copy0, &tmp11); + let k0: u32 = 255u32 + .wrapping_sub(5u32.wrapping_mul(i1)) + .wrapping_sub(5u32); + let bits_l0: u64 = bignum::bignum_base::bn_get_bits_u64(4u32, bscalar2.0, k0, 5u32); + lowstar::ignore::ignore::<&[u64]>(&crate::ed25519_precomptable::precomp_basepoint_table_w5); + let bits_l322: u32 = bits_l0 as u32; + let a_bits_l2: &[u64] = &(&crate::ed25519_precomptable::precomp_basepoint_table_w5) + [bits_l322.wrapping_mul(20u32) as usize..]; + ((&mut tmp11)[0usize..20usize]).copy_from_slice(&a_bits_l2[0usize..20usize]); + let mut p_copy1: [u64; 20] = [0u64; 20usize]; + ((&mut p_copy1)[0usize..20usize]).copy_from_slice(&out[0usize..20usize]); + crate::ed25519::point_add(out, &p_copy1, &tmp11) + } +} + +#[inline] +fn point_negate_mul_double_g_vartime(out: &mut [u64], scalar1: &[u8], scalar2: &[u8], q2: &[u64]) { + let mut q2_neg: [u64; 20] = [0u64; 20usize]; + crate::ed25519::point_negate(q2, &mut q2_neg); + crate::ed25519::point_mul_g_double_vartime(out, scalar1, scalar2, &q2_neg) +} + +#[inline] +fn store_56(out: &mut [u8], b: &[u64]) { + let b0: u64 = b[0usize]; + let b1: u64 = b[1usize]; + let b2: u64 = b[2usize]; + let b3: u64 = b[3usize]; + let b4: u64 = b[4usize]; + let b4Ā·: u32 = b4 as u32; + let b8: (&mut [u8], &mut [u8]) = out.split_at_mut(0usize); + lowstar::endianness::store64_le(b8.1, b0); + let b80: (&mut [u8], &mut [u8]) = b8.1.split_at_mut(7usize); + lowstar::endianness::store64_le(b80.1, b1); + let b81: (&mut [u8], &mut [u8]) = b80.1.split_at_mut(7usize); + lowstar::endianness::store64_le(b81.1, b2); + let b82: (&mut [u8], &mut [u8]) = b81.1.split_at_mut(7usize); + lowstar::endianness::store64_le(b82.1, b3); + lowstar::endianness::store32_le(&mut out[28usize..], b4Ā·) +} + +#[inline] +fn load_64_bytes(out: &mut [u64], b: &[u8]) { + let b8: (&[u8], &[u8]) = b.split_at(0usize); + let u: u64 = lowstar::endianness::load64_le(b8.1); + let z: u64 = u; + let b0: u64 = z & 0xffffffffffffffu64; + let b80: (&[u8], &[u8]) = b8.1.split_at(7usize); + let u0: u64 = lowstar::endianness::load64_le(b80.1); + let z0: u64 = u0; + let b1: u64 = z0 & 0xffffffffffffffu64; + let b81: (&[u8], &[u8]) = b80.1.split_at(7usize); + let u1: u64 = lowstar::endianness::load64_le(b81.1); + let z1: u64 = u1; + let b2: u64 = z1 & 0xffffffffffffffu64; + let b82: (&[u8], &[u8]) = b81.1.split_at(7usize); + let u2: u64 = lowstar::endianness::load64_le(b82.1); + let z2: u64 = u2; + let b3: u64 = z2 & 0xffffffffffffffu64; + let b83: (&[u8], &[u8]) = b82.1.split_at(7usize); + let u3: u64 = lowstar::endianness::load64_le(b83.1); + let z3: u64 = u3; + let b4: u64 = z3 & 0xffffffffffffffu64; + let b84: (&[u8], &[u8]) = b83.1.split_at(7usize); + let u4: u64 = lowstar::endianness::load64_le(b84.1); + let z4: u64 = u4; + let b5: u64 = z4 & 0xffffffffffffffu64; + let b85: (&[u8], &[u8]) = b84.1.split_at(7usize); + let u5: u64 = lowstar::endianness::load64_le(b85.1); + let z5: u64 = u5; + let b6: u64 = z5 & 0xffffffffffffffu64; + let b86: (&[u8], &[u8]) = b85.1.split_at(7usize); + let u6: u64 = lowstar::endianness::load64_le(b86.1); + let z6: u64 = u6; + let b7: u64 = z6 & 0xffffffffffffffu64; + let b87: (&[u8], &[u8]) = b86.1.split_at(7usize); + let u7: u64 = lowstar::endianness::load64_le(b87.1); + let z7: u64 = u7; + let b88: u64 = z7 & 0xffffffffffffffu64; + let b63: u8 = b[63usize]; + let b9: u64 = b63 as u64; + out[0usize] = b0; + out[1usize] = b1; + out[2usize] = b2; + out[3usize] = b3; + out[4usize] = b4; + out[5usize] = b5; + out[6usize] = b6; + out[7usize] = b7; + out[8usize] = b88; + out[9usize] = b9 +} + +#[inline] +fn load_32_bytes(out: &mut [u64], b: &[u8]) { + let b8: (&[u8], &[u8]) = b.split_at(0usize); + let u: u64 = lowstar::endianness::load64_le(b8.1); + let z: u64 = u; + let b0: u64 = z & 0xffffffffffffffu64; + let b80: (&[u8], &[u8]) = b8.1.split_at(7usize); + let u0: u64 = lowstar::endianness::load64_le(b80.1); + let z0: u64 = u0; + let b1: u64 = z0 & 0xffffffffffffffu64; + let b81: (&[u8], &[u8]) = b80.1.split_at(7usize); + let u1: u64 = lowstar::endianness::load64_le(b81.1); + let z1: u64 = u1; + let b2: u64 = z1 & 0xffffffffffffffu64; + let b82: (&[u8], &[u8]) = b81.1.split_at(7usize); + let u2: u64 = lowstar::endianness::load64_le(b82.1); + let z2: u64 = u2; + let b3: u64 = z2 & 0xffffffffffffffu64; + let u3: u32 = lowstar::endianness::load32_le(&b[28usize..]); + let b4: u32 = u3; + let b41: u64 = b4 as u64; + out[0usize] = b0; + out[1usize] = b1; + out[2usize] = b2; + out[3usize] = b3; + out[4usize] = b41 +} + +#[inline] +fn sha512_pre_msg(hash: &mut [u8], prefix: &[u8], len: u32, input: &[u8]) { + let buf: [u8; 128] = [0u8; 128usize]; + let mut block_state: [u64; 8] = [0u64; 8usize]; + crate::hash_sha2::sha512_init(&mut block_state); + let s: crate::streaming_types::state_64 = crate::streaming_types::state_64 { + block_state: Box::new(block_state), + buf: Box::new(buf), + total_len: 0u32 as u64, + }; + let mut p: [crate::streaming_types::state_64; 1] = [s; 1usize]; + let st: &mut [crate::streaming_types::state_64] = &mut p; + let err0: crate::streaming_types::error_code = crate::hash_sha2::update_512(st, prefix, 32u32); + let err1: crate::streaming_types::error_code = crate::hash_sha2::update_512(st, input, len); + lowstar::ignore::ignore::(err0); + lowstar::ignore::ignore::(err1); + crate::hash_sha2::digest_512(st, hash) +} + +#[inline] +fn sha512_pre_pre2_msg(hash: &mut [u8], prefix: &[u8], prefix2: &[u8], len: u32, input: &[u8]) { + let buf: [u8; 128] = [0u8; 128usize]; + let mut block_state: [u64; 8] = [0u64; 8usize]; + crate::hash_sha2::sha512_init(&mut block_state); + let s: crate::streaming_types::state_64 = crate::streaming_types::state_64 { + block_state: Box::new(block_state), + buf: Box::new(buf), + total_len: 0u32 as u64, + }; + let mut p: [crate::streaming_types::state_64; 1] = [s; 1usize]; + let st: &mut [crate::streaming_types::state_64] = &mut p; + let err0: crate::streaming_types::error_code = crate::hash_sha2::update_512(st, prefix, 32u32); + let err1: crate::streaming_types::error_code = crate::hash_sha2::update_512(st, prefix2, 32u32); + let err2: crate::streaming_types::error_code = crate::hash_sha2::update_512(st, input, len); + lowstar::ignore::ignore::(err0); + lowstar::ignore::ignore::(err1); + lowstar::ignore::ignore::(err2); + crate::hash_sha2::digest_512(st, hash) +} + +#[inline] +fn sha512_modq_pre(out: &mut [u64], prefix: &[u8], len: u32, input: &[u8]) { + let mut tmp: [u64; 10] = [0u64; 10usize]; + let mut hash: [u8; 64] = [0u8; 64usize]; + crate::ed25519::sha512_pre_msg(&mut hash, prefix, len, input); + crate::ed25519::load_64_bytes(&mut tmp, &hash); + crate::ed25519::barrett_reduction(out, &tmp) +} + +#[inline] +fn sha512_modq_pre_pre2(out: &mut [u64], prefix: &[u8], prefix2: &[u8], len: u32, input: &[u8]) { + let mut tmp: [u64; 10] = [0u64; 10usize]; + let mut hash: [u8; 64] = [0u8; 64usize]; + crate::ed25519::sha512_pre_pre2_msg(&mut hash, prefix, prefix2, len, input); + crate::ed25519::load_64_bytes(&mut tmp, &hash); + crate::ed25519::barrett_reduction(out, &tmp) +} + +#[inline] +fn point_mul_g_compress(out: &mut [u8], s: &[u8]) { + let mut tmp: [u64; 20] = [0u64; 20usize]; + crate::ed25519::point_mul_g(&mut tmp, s); + crate::ed25519::point_compress(out, &tmp) +} + +#[inline] +fn secret_expand(expanded: &mut [u8], secret: &[u8]) { + crate::hash_sha2::hash_512(expanded, secret, 32u32); + let h_low: (&mut [u8], &mut [u8]) = expanded.split_at_mut(0usize); + let h_low0: u8 = h_low.1[0usize]; + let h_low31: u8 = h_low.1[31usize]; + h_low.1[0usize] = h_low0 & 0xf8u8; + h_low.1[31usize] = h_low31 & 127u8 | 64u8 +} + +/** +Compute the public key from the private key. + + @param[out] public_key Points to 32 bytes of valid memory, i.e., `uint8_t[32]`. Must not overlap the memory location of `private_key`. + @param[in] private_key Points to 32 bytes of valid memory containing the private key, i.e., `uint8_t[32]`. +*/ +pub fn secret_to_public(public_key: &mut [u8], private_key: &[u8]) { + let mut expanded_secret: [u8; 64] = [0u8; 64usize]; + crate::ed25519::secret_expand(&mut expanded_secret, private_key); + let a: (&[u8], &[u8]) = expanded_secret.split_at(0usize); + crate::ed25519::point_mul_g_compress(public_key, a.1) +} + +/** +Compute the expanded keys for an Ed25519 signature. + + @param[out] expanded_keys Points to 96 bytes of valid memory, i.e., `uint8_t[96]`. Must not overlap the memory location of `private_key`. + @param[in] private_key Points to 32 bytes of valid memory containing the private key, i.e., `uint8_t[32]`. + + If one needs to sign several messages under the same private key, it is more efficient + to call `expand_keys` only once and `sign_expanded` multiple times, for each message. +*/ +pub fn expand_keys(expanded_keys: &mut [u8], private_key: &[u8]) { + let s_prefix: (&mut [u8], &mut [u8]) = expanded_keys.split_at_mut(32usize); + crate::ed25519::secret_expand(s_prefix.1, private_key); + let public_key: (&mut [u8], &mut [u8]) = s_prefix.0.split_at_mut(0usize); + let s: (&[u8], &[u8]) = s_prefix.1.split_at(0usize); + crate::ed25519::point_mul_g_compress(public_key.1, s.1) +} + +/** +Create an Ed25519 signature with the (precomputed) expanded keys. + + @param[out] signature Points to 64 bytes of valid memory, i.e., `uint8_t[64]`. Must not overlap the memory locations of `expanded_keys` nor `msg`. + @param[in] expanded_keys Points to 96 bytes of valid memory, i.e., `uint8_t[96]`, containing the expanded keys obtained by invoking `expand_keys`. + @param[in] msg_len Length of `msg`. + @param[in] msg Points to `msg_len` bytes of valid memory containing the message, i.e., `uint8_t[msg_len]`. + + If one needs to sign several messages under the same private key, it is more efficient + to call `expand_keys` only once and `sign_expanded` multiple times, for each message. +*/ +pub fn sign_expanded(signature: &mut [u8], expanded_keys: &[u8], msg_len: u32, msg: &[u8]) { + let rs: (&mut [u8], &mut [u8]) = signature.split_at_mut(0usize); + let ss: (&mut [u8], &mut [u8]) = rs.1.split_at_mut(32usize); + let mut rq: [u64; 5] = [0u64; 5usize]; + let mut hq: [u64; 5] = [0u64; 5usize]; + let mut rb: [u8; 32] = [0u8; 32usize]; + let public_key: (&[u8], &[u8]) = expanded_keys.split_at(0usize); + let s: (&[u8], &[u8]) = public_key.1.split_at(32usize); + let prefix: (&[u8], &[u8]) = s.1.split_at(32usize); + crate::ed25519::sha512_modq_pre(&mut rq, prefix.1, msg_len, msg); + crate::ed25519::store_56(&mut rb, &rq); + crate::ed25519::point_mul_g_compress(ss.0, &rb); + crate::ed25519::sha512_modq_pre_pre2(&mut hq, ss.0, s.0, msg_len, msg); + let mut aq: [u64; 5] = [0u64; 5usize]; + crate::ed25519::load_32_bytes(&mut aq, prefix.0); + let mut y_copy: [u64; 5] = [0u64; 5usize]; + ((&mut y_copy)[0usize..5usize]).copy_from_slice(&(&aq)[0usize..5usize]); + crate::ed25519::mul_modq(&mut aq, &hq, &y_copy); + let mut y_copy0: [u64; 5] = [0u64; 5usize]; + ((&mut y_copy0)[0usize..5usize]).copy_from_slice(&(&aq)[0usize..5usize]); + crate::ed25519::add_modq(&mut aq, &rq, &y_copy0); + crate::ed25519::store_56(ss.1, &aq) +} + +/** +Create an Ed25519 signature. + + @param[out] signature Points to 64 bytes of valid memory, i.e., `uint8_t[64]`. Must not overlap the memory locations of `private_key` nor `msg`. + @param[in] private_key Points to 32 bytes of valid memory containing the private key, i.e., `uint8_t[32]`. + @param[in] msg_len Length of `msg`. + @param[in] msg Points to `msg_len` bytes of valid memory containing the message, i.e., `uint8_t[msg_len]`. + + The function first calls `expand_keys` and then invokes `sign_expanded`. + + If one needs to sign several messages under the same private key, it is more efficient + to call `expand_keys` only once and `sign_expanded` multiple times, for each message. +*/ +pub fn sign(signature: &mut [u8], private_key: &[u8], msg_len: u32, msg: &[u8]) { + let mut expanded_keys: [u8; 96] = [0u8; 96usize]; + crate::ed25519::expand_keys(&mut expanded_keys, private_key); + crate::ed25519::sign_expanded(signature, &expanded_keys, msg_len, msg) +} + +/** +Verify an Ed25519 signature. + + @param public_key Points to 32 bytes of valid memory containing the public key, i.e., `uint8_t[32]`. + @param msg_len Length of `msg`. + @param msg Points to `msg_len` bytes of valid memory containing the message, i.e., `uint8_t[msg_len]`. + @param signature Points to 64 bytes of valid memory containing the signature, i.e., `uint8_t[64]`. + + @return Returns `true` if the signature is valid and `false` otherwise. +*/ +pub fn verify(public_key: &[u8], msg_len: u32, msg: &[u8], signature: &[u8]) -> bool { + let mut aĀ·: [u64; 20] = [0u64; 20usize]; + let b: bool = crate::ed25519::point_decompress(&mut aĀ·, public_key); + if b { + let mut rĀ·: [u64; 20] = [0u64; 20usize]; + let rs: (&[u8], &[u8]) = signature.split_at(0usize); + let bĀ·: bool = crate::ed25519::point_decompress(&mut rĀ·, rs.1); + if bĀ· { + let mut hb: [u8; 32] = [0u8; 32usize]; + let rs1: (&[u8], &[u8]) = rs.1.split_at(0usize); + let sb: (&[u8], &[u8]) = rs1.1.split_at(32usize); + let mut tmp: [u64; 5] = [0u64; 5usize]; + crate::ed25519::load_32_bytes(&mut tmp, sb.1); + let b1: bool = crate::ed25519::gte_q(&tmp); + let b10: bool = b1; + if b10 { + false + } else { + let mut tmp0: [u64; 5] = [0u64; 5usize]; + crate::ed25519::sha512_modq_pre_pre2(&mut tmp0, sb.0, public_key, msg_len, msg); + crate::ed25519::store_56(&mut hb, &tmp0); + let mut exp_d: [u64; 20] = [0u64; 20usize]; + crate::ed25519::point_negate_mul_double_g_vartime(&mut exp_d, sb.1, &hb, &aĀ·); + let b2: bool = crate::ed25519::point_equal(&exp_d, &rĀ·); + b2 + } + } else { + false + } + } else { + false + } +} diff --git a/libcrux-hacl-rs/src/ed25519_precomptable.rs b/libcrux-hacl-rs/src/ed25519_precomptable.rs new file mode 100644 index 000000000..ac344ed73 --- /dev/null +++ b/libcrux-hacl-rs/src/ed25519_precomptable.rs @@ -0,0 +1,478 @@ +#![allow(non_snake_case)] +#![allow(non_upper_case_globals)] +#![allow(non_camel_case_types)] +#![allow(unused_assignments)] +#![allow(unreachable_patterns)] + +pub(crate) const precomp_basepoint_table_w4: [u64; 320] = + [0u64, 0u64, 0u64, 0u64, 0u64, 1u64, 0u64, 0u64, 0u64, 0u64, 1u64, 0u64, 0u64, 0u64, 0u64, 0u64, + 0u64, 0u64, 0u64, 0u64, 1738742601995546u64, 1146398526822698u64, 2070867633025821u64, + 562264141797630u64, 587772402128613u64, 1801439850948184u64, 1351079888211148u64, + 450359962737049u64, 900719925474099u64, 1801439850948198u64, 1u64, 0u64, 0u64, 0u64, 0u64, + 1841354044333475u64, 16398895984059u64, 755974180946558u64, 900171276175154u64, + 1821297809914039u64, 1661154287933054u64, 284530020860578u64, 1390261174866914u64, + 1524110943907984u64, 1045603498418422u64, 928651508580478u64, 1383326941296346u64, + 961937908925785u64, 80455759693706u64, 904734540352947u64, 1507481815385608u64, + 2223447444246085u64, 1083941587175919u64, 2059929906842505u64, 1581435440146976u64, + 782730187692425u64, 9928394897574u64, 1539449519985236u64, 1923587931078510u64, + 552919286076056u64, 376925408065760u64, 447320488831784u64, 1362918338468019u64, + 1470031896696846u64, 2189796996539902u64, 1337552949959847u64, 1762287177775726u64, + 237994495816815u64, 1277840395970544u64, 543972849007241u64, 1224692671618814u64, + 162359533289271u64, 282240927125249u64, 586909166382289u64, 17726488197838u64, + 377014554985659u64, 1433835303052512u64, 702061469493692u64, 1142253108318154u64, + 318297794307551u64, 954362646308543u64, 517363881452320u64, 1868013482130416u64, + 262562472373260u64, 902232853249919u64, 2107343057055746u64, 462368348619024u64, + 1893758677092974u64, 2177729767846389u64, 2168532543559143u64, 443867094639821u64, + 730169342581022u64, 1564589016879755u64, 51218195700649u64, 76684578423745u64, + 560266272480743u64, 922517457707697u64, 2066645939860874u64, 1318277348414638u64, + 1576726809084003u64, 1817337608563665u64, 1874240939237666u64, 754733726333910u64, + 97085310406474u64, 751148364309235u64, 1622159695715187u64, 1444098819684916u64, + 130920805558089u64, 1260449179085308u64, 1860021740768461u64, 110052860348509u64, + 193830891643810u64, 164148413933881u64, 180017794795332u64, 1523506525254651u64, + 465981629225956u64, 559733514964572u64, 1279624874416974u64, 2026642326892306u64, + 1425156829982409u64, 2160936383793147u64, 1061870624975247u64, 2023497043036941u64, + 117942212883190u64, 490339622800774u64, 1729931303146295u64, 422305932971074u64, + 529103152793096u64, 1211973233775992u64, 721364955929681u64, 1497674430438813u64, + 342545521275073u64, 2102107575279372u64, 2108462244669966u64, 1382582406064082u64, + 2206396818383323u64, 2109093268641147u64, 10809845110983u64, 1605176920880099u64, + 744640650753946u64, 1712758897518129u64, 373410811281809u64, 648838265800209u64, + 813058095530999u64, 513987632620169u64, 465516160703329u64, 2136322186126330u64, + 1979645899422932u64, 1197131006470786u64, 1467836664863979u64, 1340751381374628u64, + 1810066212667962u64, 1009933588225499u64, 1106129188080873u64, 1388980405213901u64, + 533719246598044u64, 1169435803073277u64, 198920999285821u64, 487492330629854u64, + 1807093008537778u64, 1540899012923865u64, 2075080271659867u64, 1527990806921523u64, + 1323728742908002u64, 1568595959608205u64, 1388032187497212u64, 2026968840050568u64, + 1396591153295755u64, 820416950170901u64, 520060313205582u64, 2016404325094901u64, + 1584709677868520u64, 272161374469956u64, 1567188603996816u64, 1986160530078221u64, + 553930264324589u64, 1058426729027503u64, 8762762886675u64, 2216098143382988u64, + 1835145266889223u64, 1712936431558441u64, 1017009937844974u64, 585361667812740u64, + 2114711541628181u64, 2238729632971439u64, 121257546253072u64, 847154149018345u64, + 211972965476684u64, 287499084460129u64, 2098247259180197u64, 839070411583329u64, + 339551619574372u64, 1432951287640743u64, 526481249498942u64, 931991661905195u64, + 1884279965674487u64, 200486405604411u64, 364173020594788u64, 518034455936955u64, + 1085564703965501u64, 16030410467927u64, 604865933167613u64, 1695298441093964u64, + 498856548116159u64, 2193030062787034u64, 1706339802964179u64, 1721199073493888u64, + 820740951039755u64, 1216053436896834u64, 23954895815139u64, 1662515208920491u64, + 1705443427511899u64, 1957928899570365u64, 1189636258255725u64, 1795695471103809u64, + 1691191297654118u64, 282402585374360u64, 460405330264832u64, 63765529445733u64, + 469763447404473u64, 733607089694996u64, 685410420186959u64, 1096682630419738u64, + 1162548510542362u64, 1020949526456676u64, 1211660396870573u64, 613126398222696u64, + 1117829165843251u64, 742432540886650u64, 1483755088010658u64, 942392007134474u64, + 1447834130944107u64, 489368274863410u64, 23192985544898u64, 648442406146160u64, + 785438843373876u64, 249464684645238u64, 170494608205618u64, 335112827260550u64, + 1462050123162735u64, 1084803668439016u64, 853459233600325u64, 215777728187495u64, + 1965759433526974u64, 1349482894446537u64, 694163317612871u64, 860536766165036u64, + 1178788094084321u64, 1652739626626996u64, 2115723946388185u64, 1577204379094664u64, + 1083882859023240u64, 1768759143381635u64, 1737180992507258u64, 246054513922239u64, + 577253134087234u64, 356340280578042u64, 1638917769925142u64, 223550348130103u64, + 470592666638765u64, 22663573966996u64, 596552461152400u64, 364143537069499u64, + 3942119457699u64, 107951982889287u64, 1843471406713209u64, 1625773041610986u64, + 1466141092501702u64, 1043024095021271u64, 310429964047508u64, 98559121500372u64, + 152746933782868u64, 259407205078261u64, 828123093322585u64, 1576847274280091u64, + 1170871375757302u64, 1588856194642775u64, 984767822341977u64, 1141497997993760u64, + 809325345150796u64, 1879837728202511u64, 201340910657893u64, 1079157558888483u64, + 1052373448588065u64, 1732036202501778u64, 2105292670328445u64, 679751387312402u64, + 1679682144926229u64, 1695823455818780u64, 498852317075849u64, 1786555067788433u64, + 1670727545779425u64, 117945875433544u64, 407939139781844u64, 854632120023778u64, + 1413383148360437u64, 286030901733673u64, 1207361858071196u64, 461340408181417u64, + 1096919590360164u64, 1837594897475685u64, 533755561544165u64, 1638688042247712u64, + 1431653684793005u64, 1036458538873559u64, 390822120341779u64, 1920929837111618u64, + 543426740024168u64, 645751357799929u64, 2245025632994463u64, 1550778638076452u64, + 223738153459949u64, 1337209385492033u64, 1276967236456531u64, 1463815821063071u64, + 2070620870191473u64, 1199170709413753u64, 273230877394166u64, 1873264887608046u64, + 890877152910775u64]; + +pub(crate) const precomp_g_pow2_64_table_w4: [u64; 320] = + [0u64, 0u64, 0u64, 0u64, 0u64, 1u64, 0u64, 0u64, 0u64, 0u64, 1u64, 0u64, 0u64, 0u64, 0u64, 0u64, + 0u64, 0u64, 0u64, 0u64, 13559344787725u64, 2051621493703448u64, 1947659315640708u64, + 626856790370168u64, 1592804284034836u64, 1781728767459187u64, 278818420518009u64, + 2038030359908351u64, 910625973862690u64, 471887343142239u64, 1298543306606048u64, + 794147365642417u64, 129968992326749u64, 523140861678572u64, 1166419653909231u64, + 2009637196928390u64, 1288020222395193u64, 1007046974985829u64, 208981102651386u64, + 2074009315253380u64, 1564056062071967u64, 276822668750618u64, 206621292512572u64, + 470304361809269u64, 895215438398493u64, 1527859053868686u64, 1624967223409369u64, + 811821865979736u64, 350450534838340u64, 219143807921807u64, 507994540371254u64, + 986513794574720u64, 1142661369967121u64, 621278293399257u64, 556189161519781u64, + 351964007865066u64, 2011573453777822u64, 1367125527151537u64, 1691316722438196u64, + 731328817345164u64, 1284781192709232u64, 478439299539269u64, 204842178076429u64, + 2085125369913651u64, 1980773492792985u64, 1480264409524940u64, 688389585376233u64, + 612962643526972u64, 165595382536676u64, 1850300069212263u64, 1176357203491551u64, + 1880164984292321u64, 10786153104736u64, 1242293560510203u64, 1358399951884084u64, + 1901358796610357u64, 1385092558795806u64, 1734893785311348u64, 2046201851951191u64, + 1233811309557352u64, 1531160168656129u64, 1543287181303358u64, 516121446374119u64, + 723422668089935u64, 1228176774959679u64, 1598014722726267u64, 1630810326658412u64, + 1343833067463760u64, 1024397964362099u64, 1157142161346781u64, 56422174971792u64, + 544901687297092u64, 1291559028869009u64, 1336918672345120u64, 1390874603281353u64, + 1127199512010904u64, 992644979940964u64, 1035213479783573u64, 36043651196100u64, + 1220961519321221u64, 1348190007756977u64, 579420200329088u64, 1703819961008985u64, + 1993919213460047u64, 2225080008232251u64, 392785893702372u64, 464312521482632u64, + 1224525362116057u64, 810394248933036u64, 932513521649107u64, 592314953488703u64, + 586334603791548u64, 1310888126096549u64, 650842674074281u64, 1596447001791059u64, + 2086767406328284u64, 1866377645879940u64, 1721604362642743u64, 738502322566890u64, + 1851901097729689u64, 1158347571686914u64, 2023626733470827u64, 329625404653699u64, + 563555875598551u64, 516554588079177u64, 1134688306104598u64, 186301198420809u64, + 1339952213563300u64, 643605614625891u64, 1947505332718043u64, 1722071694852824u64, + 601679570440694u64, 1821275721236351u64, 1808307842870389u64, 1654165204015635u64, + 1457334100715245u64, 217784948678349u64, 1820622417674817u64, 1946121178444661u64, + 597980757799332u64, 1745271227710764u64, 2010952890941980u64, 339811849696648u64, + 1066120666993872u64, 261276166508990u64, 323098645774553u64, 207454744271283u64, + 941448672977675u64, 71890920544375u64, 840849789313357u64, 1223996070717926u64, + 196832550853408u64, 115986818309231u64, 1586171527267675u64, 1666169080973450u64, + 1456454731176365u64, 44467854369003u64, 2149656190691480u64, 283446383597589u64, + 2040542647729974u64, 305705593840224u64, 475315822269791u64, 648133452550632u64, + 169218658835720u64, 24960052338251u64, 938907951346766u64, 425970950490510u64, + 1037622011013183u64, 1026882082708180u64, 1635699409504916u64, 1644776942870488u64, + 2151820331175914u64, 824120674069819u64, 835744976610113u64, 1991271032313190u64, + 96507354724855u64, 400645405133260u64, 343728076650825u64, 1151585441385566u64, + 1403339955333520u64, 230186314139774u64, 1736248861506714u64, 1010804378904572u64, + 1394932289845636u64, 1901351256960852u64, 2187471430089807u64, 1003853262342670u64, + 1327743396767461u64, 1465160415991740u64, 366625359144534u64, 1534791405247604u64, + 1790905930250187u64, 1255484115292738u64, 2223291365520443u64, 210967717407408u64, + 26722916813442u64, 1919574361907910u64, 468825088280256u64, 2230011775946070u64, + 1628365642214479u64, 568871869234932u64, 1066987968780488u64, 1692242903745558u64, + 1678903997328589u64, 214262165888021u64, 1929686748607204u64, 1790138967989670u64, + 1790261616022076u64, 1559824537553112u64, 1230364591311358u64, 147531939886346u64, + 1528207085815487u64, 477957922927292u64, 285670243881618u64, 264430080123332u64, + 1163108160028611u64, 373201522147371u64, 34903775270979u64, 1750870048600662u64, + 1319328308741084u64, 1547548634278984u64, 1691259592202927u64, 2247758037259814u64, + 329611399953677u64, 1385555496268877u64, 2242438354031066u64, 1329523854843632u64, + 399895373846055u64, 678005703193452u64, 1496357700997771u64, 71909969781942u64, + 1515391418612349u64, 470110837888178u64, 1981307309417466u64, 1259888737412276u64, + 669991710228712u64, 1048546834514303u64, 1678323291295512u64, 2172033978088071u64, + 1529278455500556u64, 901984601941894u64, 780867622403807u64, 550105677282793u64, + 975860231176136u64, 525188281689178u64, 49966114807992u64, 1776449263836645u64, + 267851776380338u64, 2225969494054620u64, 2016794225789822u64, 1186108678266608u64, + 1023083271408882u64, 1119289418565906u64, 1248185897348801u64, 1846081539082697u64, + 23756429626075u64, 1441999021105403u64, 724497586552825u64, 1287761623605379u64, + 685303359654224u64, 2217156930690570u64, 163769288918347u64, 1098423278284094u64, + 1391470723006008u64, 570700152353516u64, 744804507262556u64, 2200464788609495u64, + 624141899161992u64, 2249570166275684u64, 378706441983561u64, 122486379999375u64, + 430741162798924u64, 113847463452574u64, 266250457840685u64, 2120743625072743u64, + 222186221043927u64, 1964290018305582u64, 1435278008132477u64, 1670867456663734u64, + 2009989552599079u64, 1348024113448744u64, 1158423886300455u64, 1356467152691569u64, + 306943042363674u64, 926879628664255u64, 1349295689598324u64, 725558330071205u64, + 536569987519948u64, 116436990335366u64, 1551888573800376u64, 2044698345945451u64, + 104279940291311u64, 251526570943220u64, 754735828122925u64, 33448073576361u64, + 994605876754543u64, 546007584022006u64, 2217332798409487u64, 706477052561591u64, + 131174619428653u64, 2148698284087243u64, 239290486205186u64, 2161325796952184u64, + 1713452845607994u64, 1297861562938913u64, 1779539876828514u64, 1926559018603871u64, + 296485747893968u64, 1859208206640686u64, 538513979002718u64, 103998826506137u64, + 2025375396538469u64, 1370680785701206u64, 1698557311253840u64, 1411096399076595u64, + 2132580530813677u64, 2071564345845035u64, 498581428556735u64, 1136010486691371u64, + 1927619356993146u64]; + +pub(crate) const precomp_g_pow2_128_table_w4: [u64; 320] = + [0u64, 0u64, 0u64, 0u64, 0u64, 1u64, 0u64, 0u64, 0u64, 0u64, 1u64, 0u64, 0u64, 0u64, 0u64, 0u64, + 0u64, 0u64, 0u64, 0u64, 557549315715710u64, 196756086293855u64, 846062225082495u64, + 1865068224838092u64, 991112090754908u64, 522916421512828u64, 2098523346722375u64, + 1135633221747012u64, 858420432114866u64, 186358544306082u64, 1044420411868480u64, + 2080052304349321u64, 557301814716724u64, 1305130257814057u64, 2126012765451197u64, + 1441004402875101u64, 353948968859203u64, 470765987164835u64, 1507675957683570u64, + 1086650358745097u64, 1911913434398388u64, 66086091117182u64, 1137511952425971u64, + 36958263512141u64, 2193310025325256u64, 1085191426269045u64, 1232148267909446u64, + 1449894406170117u64, 1241416717139557u64, 1940876999212868u64, 829758415918121u64, + 309608450373449u64, 2228398547683851u64, 1580623271960188u64, 1675601502456740u64, + 1360363115493548u64, 1098397313096815u64, 1809255384359797u64, 1458261916834384u64, + 210682545649705u64, 1606836641068115u64, 1230478270405318u64, 1843192771547802u64, + 1794596343564051u64, 229060710252162u64, 2169742775467181u64, 701467067318072u64, + 696018499035555u64, 521051885339807u64, 158329567901874u64, 740426481832143u64, + 1369811177301441u64, 503351589084015u64, 1781114827942261u64, 1650493549693035u64, + 2174562418345156u64, 456517194809244u64, 2052761522121179u64, 2233342271123682u64, + 1445872925177435u64, 1131882576902813u64, 220765848055241u64, 1280259961403769u64, + 1581497080160712u64, 1477441080108824u64, 218428165202767u64, 1970598141278907u64, + 643366736173069u64, 2167909426804014u64, 834993711408259u64, 1922437166463212u64, + 1900036281472252u64, 513794844386304u64, 1297904164900114u64, 1147626295373268u64, + 1910101606251299u64, 182933838633381u64, 806229530787362u64, 155511666433200u64, + 290522463375462u64, 534373523491751u64, 1302938814480515u64, 1664979184120445u64, + 304235649499423u64, 339284524318609u64, 1881717946973483u64, 1670802286833842u64, + 2223637120675737u64, 135818919485814u64, 1144856572842792u64, 2234981613434386u64, + 963917024969826u64, 402275378284993u64, 141532417412170u64, 921537468739387u64, + 963905069722607u64, 1405442890733358u64, 1567763927164655u64, 1664776329195930u64, + 2095924165508507u64, 994243110271379u64, 1243925610609353u64, 1029845815569727u64, + 1001968867985629u64, 170368934002484u64, 1100906131583801u64, 1825190326449569u64, + 1462285121182096u64, 1545240767016377u64, 797859025652273u64, 1062758326657530u64, + 1125600735118266u64, 739325756774527u64, 1420144485966996u64, 1915492743426702u64, + 752968196344993u64, 882156396938351u64, 1909097048763227u64, 849058590685611u64, + 840754951388500u64, 1832926948808323u64, 2023317100075297u64, 322382745442827u64, + 1569741341737601u64, 1678986113194987u64, 757598994581938u64, 29678659580705u64, + 1239680935977986u64, 1509239427168474u64, 1055981929287006u64, 1894085471158693u64, + 916486225488490u64, 642168890366120u64, 300453362620010u64, 1858797242721481u64, + 2077989823177130u64, 510228455273334u64, 1473284798689270u64, 5173934574301u64, + 765285232030050u64, 1007154707631065u64, 1862128712885972u64, 168873464821340u64, + 1967853269759318u64, 1489896018263031u64, 592451806166369u64, 1242298565603883u64, + 1838918921339058u64, 697532763910695u64, 294335466239059u64, 135687058387449u64, + 2133734403874176u64, 2121911143127699u64, 20222476737364u64, 1200824626476747u64, + 1397731736540791u64, 702378430231418u64, 59059527640068u64, 460992547183981u64, + 1016125857842765u64, 1273530839608957u64, 96724128829301u64, 1313433042425233u64, + 3543822857227u64, 761975685357118u64, 110417360745248u64, 1079634164577663u64, + 2044574510020457u64, 338709058603120u64, 94541336042799u64, 127963233585039u64, + 94427896272258u64, 1143501979342182u64, 1217958006212230u64, 2153887831492134u64, + 1519219513255575u64, 251793195454181u64, 392517349345200u64, 1507033011868881u64, + 2208494254670752u64, 1364389582694359u64, 2214069430728063u64, 1272814257105752u64, + 741450148906352u64, 1105776675555685u64, 824447222014984u64, 528745219306376u64, + 589427609121575u64, 1501786838809155u64, 379067373073147u64, 184909476589356u64, + 1346887560616185u64, 1932023742314082u64, 1633302311869264u64, 1685314821133069u64, + 1836610282047884u64, 1595571594397150u64, 615441688872198u64, 1926435616702564u64, + 235632180396480u64, 1051918343571810u64, 2150570051687050u64, 879198845408738u64, + 1443966275205464u64, 481362545245088u64, 512807443532642u64, 641147578283480u64, + 1594276116945596u64, 1844812743300602u64, 2044559316019485u64, 202620777969020u64, + 852992984136302u64, 1500869642692910u64, 1085216217052457u64, 1736294372259758u64, + 2009666354486552u64, 1262389020715248u64, 1166527705256867u64, 1409917450806036u64, + 1705819160057637u64, 1116901782584378u64, 1278460472285473u64, 257879811360157u64, + 40314007176886u64, 701309846749639u64, 1380457676672777u64, 631519782380272u64, + 1196339573466793u64, 955537708940017u64, 532725633381530u64, 641190593731833u64, + 7214357153807u64, 481922072107983u64, 1634886189207352u64, 1247659758261633u64, + 1655809614786430u64, 43105797900223u64, 76205809912607u64, 1936575107455823u64, + 1107927314642236u64, 2199986333469333u64, 802974829322510u64, 718173128143482u64, + 539385184235615u64, 2075693785611221u64, 953281147333690u64, 1623571637172587u64, + 655274535022250u64, 1568078078819021u64, 101142125049712u64, 1488441673350881u64, + 1457969561944515u64, 1492622544287712u64, 2041460689280803u64, 1961848091392887u64, + 461003520846938u64, 934728060399807u64, 117723291519705u64, 1027773762863526u64, + 56765304991567u64, 2184028379550479u64, 1768767711894030u64, 1304432068983172u64, + 498080974452325u64, 2134905654858163u64, 1446137427202647u64, 551613831549590u64, + 680288767054205u64, 1278113339140386u64, 378149431842614u64, 80520494426960u64, + 2080985256348782u64, 673432591799820u64, 739189463724560u64, 1847191452197509u64, + 527737312871602u64, 477609358840073u64, 1891633072677946u64, 1841456828278466u64, + 2242502936489002u64, 524791829362709u64, 276648168514036u64, 991706903257619u64, + 512580228297906u64, 1216855104975946u64, 67030930303149u64, 769593945208213u64, + 2048873385103577u64, 455635274123107u64, 2077404927176696u64, 1803539634652306u64, + 1837579953843417u64, 1564240068662828u64, 1964310918970435u64, 832822906252492u64, + 1516044634195010u64, 770571447506889u64, 602215152486818u64, 1760828333136947u64, + 730156776030376u64]; + +pub(crate) const precomp_g_pow2_192_table_w4: [u64; 320] = + [0u64, 0u64, 0u64, 0u64, 0u64, 1u64, 0u64, 0u64, 0u64, 0u64, 1u64, 0u64, 0u64, 0u64, 0u64, 0u64, + 0u64, 0u64, 0u64, 0u64, 1129953239743101u64, 1240339163956160u64, 61002583352401u64, + 2017604552196030u64, 1576867829229863u64, 1508654942849389u64, 270111619664077u64, + 1253097517254054u64, 721798270973250u64, 161923365415298u64, 828530877526011u64, + 1494851059386763u64, 662034171193976u64, 1315349646974670u64, 2199229517308806u64, + 497078277852673u64, 1310507715989956u64, 1881315714002105u64, 2214039404983803u64, + 1331036420272667u64, 296286697520787u64, 1179367922639127u64, 25348441419697u64, + 2200984961703188u64, 150893128908291u64, 1978614888570852u64, 1539657347172046u64, + 553810196523619u64, 246017573977646u64, 1440448985385485u64, 346049108099981u64, + 601166606218546u64, 855822004151713u64, 1957521326383188u64, 1114240380430887u64, + 1349639675122048u64, 957375954499040u64, 111551795360136u64, 618586733648988u64, + 490708840688866u64, 1267002049697314u64, 1130723224930028u64, 215603029480828u64, + 1277138555414710u64, 1556750324971322u64, 1407903521793741u64, 1836836546590749u64, + 576500297444199u64, 2074707599091135u64, 1826239864380012u64, 1935365705983312u64, + 239501825683682u64, 1594236669034980u64, 1283078975055301u64, 856745636255925u64, + 1342128647959981u64, 945216428379689u64, 938746202496410u64, 105775123333919u64, + 1379852610117266u64, 1770216827500275u64, 1016017267535704u64, 1902885522469532u64, + 994184703730489u64, 2227487538793763u64, 53155967096055u64, 1264120808114350u64, + 1334928769376729u64, 393911808079997u64, 826229239481845u64, 1827903006733192u64, + 1449283706008465u64, 1258040415217849u64, 1641484112868370u64, 1140150841968176u64, + 391113338021313u64, 162138667815833u64, 742204396566060u64, 110709233440557u64, + 90179377432917u64, 530511949644489u64, 911568635552279u64, 135869304780166u64, + 617719999563692u64, 1802525001631319u64, 1836394639510490u64, 1862739456475085u64, + 1378284444664288u64, 1617882529391756u64, 876124429891172u64, 1147654641445091u64, + 1476943370400542u64, 688601222759067u64, 2120281968990205u64, 1387113236912611u64, + 2125245820685788u64, 1030674016350092u64, 1594684598654247u64, 1165939511879820u64, + 271499323244173u64, 546587254515484u64, 945603425742936u64, 1242252568170226u64, + 561598728058142u64, 604827091794712u64, 19869753585186u64, 565367744708915u64, + 536755754533603u64, 1767258313589487u64, 907952975936127u64, 292851652613937u64, + 163573546237963u64, 837601408384564u64, 591996990118301u64, 2126051747693057u64, + 182247548824566u64, 908369044122868u64, 1335442699947273u64, 2234292296528612u64, + 689537529333034u64, 2174778663790714u64, 1011407643592667u64, 1856130618715473u64, + 1557437221651741u64, 2250285407006102u64, 1412384213410827u64, 1428042038612456u64, + 962709733973660u64, 313995703125919u64, 1844969155869325u64, 787716782673657u64, + 622504542173478u64, 930119043384654u64, 2128870043952488u64, 537781531479523u64, + 1556666269904940u64, 417333635741346u64, 1986743846438415u64, 877620478041197u64, + 2205624582983829u64, 595260668884488u64, 2025159350373157u64, 2091659716088235u64, + 1423634716596391u64, 653686638634080u64, 1972388399989956u64, 795575741798014u64, + 889240107997846u64, 1446156876910732u64, 1028507012221776u64, 1071697574586478u64, + 1689630411899691u64, 604092816502174u64, 1909917373896122u64, 1602544877643837u64, + 1227177032923867u64, 62684197535630u64, 186146290753883u64, 414449055316766u64, + 1560555880866750u64, 157579947096755u64, 230526795502384u64, 1197673369665894u64, + 593779215869037u64, 214638834474097u64, 1796344443484478u64, 493550548257317u64, + 1628442824033694u64, 1410811655893495u64, 1009361960995171u64, 604736219740352u64, + 392445928555351u64, 1254295770295706u64, 1958074535046128u64, 508699942241019u64, + 739405911261325u64, 1678760393882409u64, 517763708545996u64, 640040257898722u64, + 384966810872913u64, 407454748380128u64, 152604679407451u64, 185102854927662u64, + 1448175503649595u64, 100328519208674u64, 1153263667012830u64, 1643926437586490u64, + 609632142834154u64, 980984004749261u64, 855290732258779u64, 2186022163021506u64, + 1254052618626070u64, 1850030517182611u64, 162348933090207u64, 1948712273679932u64, + 1331832516262191u64, 1219400369175863u64, 89689036937483u64, 1554886057235815u64, + 1520047528432789u64, 81263957652811u64, 146612464257008u64, 2207945627164163u64, + 919846660682546u64, 1925694087906686u64, 2102027292388012u64, 887992003198635u64, + 1817924871537027u64, 746660005584342u64, 753757153275525u64, 91394270908699u64, + 511837226544151u64, 736341543649373u64, 1256371121466367u64, 1977778299551813u64, + 817915174462263u64, 1602323381418035u64, 190035164572930u64, 603796401391181u64, + 2152666873671669u64, 1813900316324112u64, 1292622433358041u64, 888439870199892u64, + 978918155071994u64, 534184417909805u64, 466460084317313u64, 1275223140288685u64, + 786407043883517u64, 1620520623925754u64, 1753625021290269u64, 751937175104525u64, + 905301961820613u64, 697059847245437u64, 584919033981144u64, 1272165506533156u64, + 1532180021450866u64, 1901407354005301u64, 1421319720492586u64, 2179081609765456u64, + 2193253156667632u64, 1080248329608584u64, 2158422436462066u64, 759167597017850u64, + 545759071151285u64, 641600428493698u64, 943791424499848u64, 469571542427864u64, + 951117845222467u64, 1780538594373407u64, 614611122040309u64, 1354826131886963u64, + 221898131992340u64, 1145699723916219u64, 798735379961769u64, 1843560518208287u64, + 1424523160161545u64, 205549016574779u64, 2239491587362749u64, 1918363582399888u64, + 1292183072788455u64, 1783513123192567u64, 1584027954317205u64, 1890421443925740u64, + 1718459319874929u64, 1522091040748809u64, 399467600667219u64, 1870973059066576u64, + 287514433150348u64, 1397845311152885u64, 1880440629872863u64, 709302939340341u64, + 1813571361109209u64, 86598795876860u64, 1146964554310612u64, 1590956584862432u64, + 2097004628155559u64, 656227622102390u64, 1808500445541891u64, 958336726523135u64, + 2007604569465975u64, 313504950390997u64, 1399686004953620u64, 1759732788465234u64, + 1562539721055836u64, 1575722765016293u64, 793318366641259u64, 443876859384887u64, + 547308921989704u64, 636698687503328u64, 2179175835287340u64, 498333551718258u64, + 932248760026176u64, 1612395686304653u64, 2179774103745626u64, 1359658123541018u64, + 171488501802442u64, 1625034951791350u64, 520196922773633u64, 1873787546341877u64, + 303457823885368u64]; + +pub(crate) const precomp_basepoint_table_w5: [u64; 640] = + [0u64, 0u64, 0u64, 0u64, 0u64, 1u64, 0u64, 0u64, 0u64, 0u64, 1u64, 0u64, 0u64, 0u64, 0u64, 0u64, + 0u64, 0u64, 0u64, 0u64, 1738742601995546u64, 1146398526822698u64, 2070867633025821u64, + 562264141797630u64, 587772402128613u64, 1801439850948184u64, 1351079888211148u64, + 450359962737049u64, 900719925474099u64, 1801439850948198u64, 1u64, 0u64, 0u64, 0u64, 0u64, + 1841354044333475u64, 16398895984059u64, 755974180946558u64, 900171276175154u64, + 1821297809914039u64, 1661154287933054u64, 284530020860578u64, 1390261174866914u64, + 1524110943907984u64, 1045603498418422u64, 928651508580478u64, 1383326941296346u64, + 961937908925785u64, 80455759693706u64, 904734540352947u64, 1507481815385608u64, + 2223447444246085u64, 1083941587175919u64, 2059929906842505u64, 1581435440146976u64, + 782730187692425u64, 9928394897574u64, 1539449519985236u64, 1923587931078510u64, + 552919286076056u64, 376925408065760u64, 447320488831784u64, 1362918338468019u64, + 1470031896696846u64, 2189796996539902u64, 1337552949959847u64, 1762287177775726u64, + 237994495816815u64, 1277840395970544u64, 543972849007241u64, 1224692671618814u64, + 162359533289271u64, 282240927125249u64, 586909166382289u64, 17726488197838u64, + 377014554985659u64, 1433835303052512u64, 702061469493692u64, 1142253108318154u64, + 318297794307551u64, 954362646308543u64, 517363881452320u64, 1868013482130416u64, + 262562472373260u64, 902232853249919u64, 2107343057055746u64, 462368348619024u64, + 1893758677092974u64, 2177729767846389u64, 2168532543559143u64, 443867094639821u64, + 730169342581022u64, 1564589016879755u64, 51218195700649u64, 76684578423745u64, + 560266272480743u64, 922517457707697u64, 2066645939860874u64, 1318277348414638u64, + 1576726809084003u64, 1817337608563665u64, 1874240939237666u64, 754733726333910u64, + 97085310406474u64, 751148364309235u64, 1622159695715187u64, 1444098819684916u64, + 130920805558089u64, 1260449179085308u64, 1860021740768461u64, 110052860348509u64, + 193830891643810u64, 164148413933881u64, 180017794795332u64, 1523506525254651u64, + 465981629225956u64, 559733514964572u64, 1279624874416974u64, 2026642326892306u64, + 1425156829982409u64, 2160936383793147u64, 1061870624975247u64, 2023497043036941u64, + 117942212883190u64, 490339622800774u64, 1729931303146295u64, 422305932971074u64, + 529103152793096u64, 1211973233775992u64, 721364955929681u64, 1497674430438813u64, + 342545521275073u64, 2102107575279372u64, 2108462244669966u64, 1382582406064082u64, + 2206396818383323u64, 2109093268641147u64, 10809845110983u64, 1605176920880099u64, + 744640650753946u64, 1712758897518129u64, 373410811281809u64, 648838265800209u64, + 813058095530999u64, 513987632620169u64, 465516160703329u64, 2136322186126330u64, + 1979645899422932u64, 1197131006470786u64, 1467836664863979u64, 1340751381374628u64, + 1810066212667962u64, 1009933588225499u64, 1106129188080873u64, 1388980405213901u64, + 533719246598044u64, 1169435803073277u64, 198920999285821u64, 487492330629854u64, + 1807093008537778u64, 1540899012923865u64, 2075080271659867u64, 1527990806921523u64, + 1323728742908002u64, 1568595959608205u64, 1388032187497212u64, 2026968840050568u64, + 1396591153295755u64, 820416950170901u64, 520060313205582u64, 2016404325094901u64, + 1584709677868520u64, 272161374469956u64, 1567188603996816u64, 1986160530078221u64, + 553930264324589u64, 1058426729027503u64, 8762762886675u64, 2216098143382988u64, + 1835145266889223u64, 1712936431558441u64, 1017009937844974u64, 585361667812740u64, + 2114711541628181u64, 2238729632971439u64, 121257546253072u64, 847154149018345u64, + 211972965476684u64, 287499084460129u64, 2098247259180197u64, 839070411583329u64, + 339551619574372u64, 1432951287640743u64, 526481249498942u64, 931991661905195u64, + 1884279965674487u64, 200486405604411u64, 364173020594788u64, 518034455936955u64, + 1085564703965501u64, 16030410467927u64, 604865933167613u64, 1695298441093964u64, + 498856548116159u64, 2193030062787034u64, 1706339802964179u64, 1721199073493888u64, + 820740951039755u64, 1216053436896834u64, 23954895815139u64, 1662515208920491u64, + 1705443427511899u64, 1957928899570365u64, 1189636258255725u64, 1795695471103809u64, + 1691191297654118u64, 282402585374360u64, 460405330264832u64, 63765529445733u64, + 469763447404473u64, 733607089694996u64, 685410420186959u64, 1096682630419738u64, + 1162548510542362u64, 1020949526456676u64, 1211660396870573u64, 613126398222696u64, + 1117829165843251u64, 742432540886650u64, 1483755088010658u64, 942392007134474u64, + 1447834130944107u64, 489368274863410u64, 23192985544898u64, 648442406146160u64, + 785438843373876u64, 249464684645238u64, 170494608205618u64, 335112827260550u64, + 1462050123162735u64, 1084803668439016u64, 853459233600325u64, 215777728187495u64, + 1965759433526974u64, 1349482894446537u64, 694163317612871u64, 860536766165036u64, + 1178788094084321u64, 1652739626626996u64, 2115723946388185u64, 1577204379094664u64, + 1083882859023240u64, 1768759143381635u64, 1737180992507258u64, 246054513922239u64, + 577253134087234u64, 356340280578042u64, 1638917769925142u64, 223550348130103u64, + 470592666638765u64, 22663573966996u64, 596552461152400u64, 364143537069499u64, + 3942119457699u64, 107951982889287u64, 1843471406713209u64, 1625773041610986u64, + 1466141092501702u64, 1043024095021271u64, 310429964047508u64, 98559121500372u64, + 152746933782868u64, 259407205078261u64, 828123093322585u64, 1576847274280091u64, + 1170871375757302u64, 1588856194642775u64, 984767822341977u64, 1141497997993760u64, + 809325345150796u64, 1879837728202511u64, 201340910657893u64, 1079157558888483u64, + 1052373448588065u64, 1732036202501778u64, 2105292670328445u64, 679751387312402u64, + 1679682144926229u64, 1695823455818780u64, 498852317075849u64, 1786555067788433u64, + 1670727545779425u64, 117945875433544u64, 407939139781844u64, 854632120023778u64, + 1413383148360437u64, 286030901733673u64, 1207361858071196u64, 461340408181417u64, + 1096919590360164u64, 1837594897475685u64, 533755561544165u64, 1638688042247712u64, + 1431653684793005u64, 1036458538873559u64, 390822120341779u64, 1920929837111618u64, + 543426740024168u64, 645751357799929u64, 2245025632994463u64, 1550778638076452u64, + 223738153459949u64, 1337209385492033u64, 1276967236456531u64, 1463815821063071u64, + 2070620870191473u64, 1199170709413753u64, 273230877394166u64, 1873264887608046u64, + 890877152910775u64, 983226445635730u64, 44873798519521u64, 697147127512130u64, + 961631038239304u64, 709966160696826u64, 1706677689540366u64, 502782733796035u64, + 812545535346033u64, 1693622521296452u64, 1955813093002510u64, 1259937612881362u64, + 1873032503803559u64, 1140330566016428u64, 1675726082440190u64, 60029928909786u64, + 170335608866763u64, 766444312315022u64, 2025049511434113u64, 2200845622430647u64, + 1201269851450408u64, 590071752404907u64, 1400995030286946u64, 2152637413853822u64, + 2108495473841983u64, 3855406710349u64, 1726137673168580u64, 51004317200100u64, + 1749082328586939u64, 1704088976144558u64, 1977318954775118u64, 2062602253162400u64, + 948062503217479u64, 361953965048030u64, 1528264887238440u64, 62582552172290u64, + 2241602163389280u64, 156385388121765u64, 2124100319761492u64, 388928050571382u64, + 1556123596922727u64, 979310669812384u64, 113043855206104u64, 2023223924825469u64, + 643651703263034u64, 2234446903655540u64, 1577241261424997u64, 860253174523845u64, + 1691026473082448u64, 1091672764933872u64, 1957463109756365u64, 530699502660193u64, + 349587141723569u64, 674661681919563u64, 1633727303856240u64, 708909037922144u64, + 2160722508518119u64, 1302188051602540u64, 976114603845777u64, 120004758721939u64, + 1681630708873780u64, 622274095069244u64, 1822346309016698u64, 1100921177951904u64, + 2216952659181677u64, 1844020550362490u64, 1976451368365774u64, 1321101422068822u64, + 1189859436282668u64, 2008801879735257u64, 2219413454333565u64, 424288774231098u64, + 359793146977912u64, 270293357948703u64, 587226003677000u64, 1482071926139945u64, + 1419630774650359u64, 1104739070570175u64, 1662129023224130u64, 1609203612533411u64, + 1250932720691980u64, 95215711818495u64, 498746909028150u64, 158151296991874u64, + 1201379988527734u64, 561599945143989u64, 2211577425617888u64, 2166577612206324u64, + 1057590354233512u64, 1968123280416769u64, 1316586165401313u64, 762728164447634u64, + 2045395244316047u64, 1531796898725716u64, 315385971670425u64, 1109421039396756u64, + 2183635256408562u64, 1896751252659461u64, 840236037179080u64, 796245792277211u64, + 508345890111193u64, 1275386465287222u64, 513560822858784u64, 1784735733120313u64, + 1346467478899695u64, 601125231208417u64, 701076661112726u64, 1841998436455089u64, + 1156768600940434u64, 1967853462343221u64, 2178318463061452u64, 481885520752741u64, + 675262828640945u64, 1033539418596582u64, 1743329872635846u64, 159322641251283u64, + 1573076470127113u64, 954827619308195u64, 778834750662635u64, 619912782122617u64, + 515681498488209u64, 1675866144246843u64, 811716020969981u64, 1125515272217398u64, + 1398917918287342u64, 1301680949183175u64, 726474739583734u64, 587246193475200u64, + 1096581582611864u64, 1469911826213486u64, 1990099711206364u64, 1256496099816508u64, + 2019924615195672u64, 1251232456707555u64, 2042971196009755u64, 214061878479265u64, + 115385726395472u64, 1677875239524132u64, 756888883383540u64, 1153862117756233u64, + 503391530851096u64, 946070017477513u64, 1878319040542579u64, 1101349418586920u64, + 793245696431613u64, 397920495357645u64, 2174023872951112u64, 1517867915189593u64, + 1829855041462995u64, 1046709983503619u64, 424081940711857u64, 2112438073094647u64, + 1504338467349861u64, 2244574127374532u64, 2136937537441911u64, 1741150838990304u64, + 25894628400571u64, 512213526781178u64, 1168384260796379u64, 1424607682379833u64, + 938677789731564u64, 872882241891896u64, 1713199397007700u64, 1410496326218359u64, + 854379752407031u64, 465141611727634u64, 315176937037857u64, 1020115054571233u64, + 1856290111077229u64, 2028366269898204u64, 1432980880307543u64, 469932710425448u64, + 581165267592247u64, 496399148156603u64, 2063435226705903u64, 2116841086237705u64, + 498272567217048u64, 1829438076967906u64, 1573925801278491u64, 460763576329867u64, + 1705264723728225u64, 999514866082412u64, 29635061779362u64, 1884233592281020u64, + 1449755591461338u64, 42579292783222u64, 1869504355369200u64, 495506004805251u64, + 264073104888427u64, 2088880861028612u64, 104646456386576u64, 1258445191399967u64, + 1348736801545799u64, 2068276361286613u64, 884897216646374u64, 922387476801376u64, + 1043886580402805u64, 1240883498470831u64, 1601554651937110u64, 804382935289482u64, + 512379564477239u64, 1466384519077032u64, 1280698500238386u64, 211303836685749u64, + 2081725624793803u64, 545247644516879u64, 215313359330384u64, 286479751145614u64, + 2213650281751636u64, 2164927945999874u64, 2072162991540882u64, 1443769115444779u64, + 1581473274363095u64, 434633875922699u64, 340456055781599u64, 373043091080189u64, + 839476566531776u64, 1856706858509978u64, 931616224909153u64, 1888181317414065u64, + 213654322650262u64, 1161078103416244u64, 1822042328851513u64, 915817709028812u64, + 1828297056698188u64, 1212017130909403u64, 60258343247333u64, 342085800008230u64, + 930240559508270u64, 1549884999174952u64, 809895264249462u64, 184726257947682u64, + 1157065433504828u64, 1209999630381477u64, 999920399374391u64, 1714770150788163u64, + 2026130985413228u64, 506776632883140u64, 1349042668246528u64, 1937232292976967u64, + 942302637530730u64, 160211904766226u64, 1042724500438571u64, 212454865139142u64, + 244104425172642u64, 1376990622387496u64, 76126752421227u64, 1027540886376422u64, + 1912210655133026u64, 13410411589575u64, 1475856708587773u64, 615563352691682u64, + 1446629324872644u64, 1683670301784014u64, 1049873327197127u64, 1826401704084838u64, + 2032577048760775u64, 1922203607878853u64, 836708788764806u64, 2193084654695012u64, + 1342923183256659u64, 849356986294271u64, 1228863973965618u64, 94886161081867u64, + 1423288430204892u64, 2016167528707016u64, 1633187660972877u64, 1550621242301752u64, + 340630244512994u64, 2103577710806901u64, 221625016538931u64, 421544147350960u64, + 580428704555156u64, 1479831381265617u64, 518057926544698u64, 955027348790630u64, + 1326749172561598u64, 1118304625755967u64, 1994005916095176u64, 1799757332780663u64, + 751343129396941u64, 1468672898746144u64, 1451689964451386u64, 755070293921171u64, + 904857405877052u64, 1276087530766984u64, 403986562858511u64, 1530661255035337u64, + 1644972908910502u64, 1370170080438957u64, 139839536695744u64, 909930462436512u64, + 1899999215356933u64, 635992381064566u64, 788740975837654u64, 224241231493695u64, + 1267090030199302u64, 998908061660139u64, 1784537499699278u64, 859195370018706u64, + 1953966091439379u64, 2189271820076010u64, 2039067059943978u64, 1526694380855202u64, + 2040321513194941u64, 329922071218689u64, 1953032256401326u64, 989631424403521u64, + 328825014934242u64, 9407151397696u64, 63551373671268u64, 1624728632895792u64, + 1608324920739262u64, 1178239350351945u64, 1198077399579702u64, 277620088676229u64, + 1775359437312528u64, 1653558177737477u64, 1652066043408850u64, 1063359889686622u64, + 1975063804860653u64]; diff --git a/libcrux-hacl-rs/src/lib.rs b/libcrux-hacl-rs/src/lib.rs index 59fc9a8e6..9ec25d1fe 100644 --- a/libcrux-hacl-rs/src/lib.rs +++ b/libcrux-hacl-rs/src/lib.rs @@ -1,9 +1,19 @@ // Utility modules. In the generated hacl-rs, these are individual crates. +mod bignum; mod fstar; mod lowstar; +mod util; pub mod hash_sha1; pub mod hash_sha2; -//pub(crate) mod hkdf; pub mod hmac; pub mod streaming_types; + +pub mod bignum25519_51; +pub mod curve25519_51; +pub mod ed25519; +pub mod ed25519_precomptable; + +// things that still are broken: +// +//pub(crate) mod hkdf; diff --git a/libcrux-hacl-rs/src/util.rs b/libcrux-hacl-rs/src/util.rs new file mode 100644 index 000000000..71efc9ccd --- /dev/null +++ b/libcrux-hacl-rs/src/util.rs @@ -0,0 +1,2 @@ +pub mod inttypes_intrinsics; +pub mod memzero0; diff --git a/libcrux-hacl-rs/src/util/inttypes_intrinsics.rs b/libcrux-hacl-rs/src/util/inttypes_intrinsics.rs new file mode 100644 index 000000000..daa304a6f --- /dev/null +++ b/libcrux-hacl-rs/src/util/inttypes_intrinsics.rs @@ -0,0 +1,37 @@ +#![allow(non_snake_case)] +#![allow(non_upper_case_globals)] +#![allow(non_camel_case_types)] +#![allow(unused_assignments)] +#![allow(unreachable_patterns)] + +use crate::fstar; + +pub fn add_carry_u32(cin: u32, x: u32, y: u32, r: &mut [u32]) -> u32 { + let res: u64 = (x as u64).wrapping_add(cin as u64).wrapping_add(y as u64); + let c: u32 = res.wrapping_shr(32u32) as u32; + r[0usize] = res as u32; + c +} + +pub fn sub_borrow_u32(cin: u32, x: u32, y: u32, r: &mut [u32]) -> u32 { + let res: u64 = (x as u64).wrapping_sub(y as u64).wrapping_sub(cin as u64); + let c: u32 = res.wrapping_shr(32u32) as u32 & 1u32; + r[0usize] = res as u32; + c +} + +pub fn add_carry_u64(cin: u64, x: u64, y: u64, r: &mut [u64]) -> u64 { + let res: u64 = x.wrapping_add(cin).wrapping_add(y); + let c: u64 = (!fstar::uint64::gte_mask(res, x) | fstar::uint64::eq_mask(res, x) & cin) & 1u64; + r[0usize] = res; + c +} + +pub fn sub_borrow_u64(cin: u64, x: u64, y: u64, r: &mut [u64]) -> u64 { + let res: u64 = x.wrapping_sub(y).wrapping_sub(cin); + let c: u64 = (fstar::uint64::gte_mask(res, x) & !fstar::uint64::eq_mask(res, x) + | fstar::uint64::eq_mask(res, x) & cin) + & 1u64; + r[0usize] = res; + c +} diff --git a/libcrux-hacl-rs/src/util/memzero0.rs b/libcrux-hacl-rs/src/util/memzero0.rs new file mode 100644 index 000000000..180827a53 --- /dev/null +++ b/libcrux-hacl-rs/src/util/memzero0.rs @@ -0,0 +1,6 @@ +pub fn memzero(x: &mut [T], len: u32) { + let zero: T = unsafe { std::mem::zeroed() }; + for i in 0..len { + x[i as usize] = zero; + } +} diff --git a/src/signature.rs b/src/signature.rs index 4a140cccc..29380e952 100644 --- a/src/signature.rs +++ b/src/signature.rs @@ -4,10 +4,8 @@ //! * EdDSA 25519 //! * RSA PSS -use crate::{ - ecdh, - hacl::{self, ed25519}, -}; +use crate::{ecdh, hacl}; +use libcrux_hacl_rs::ed25519; use rand::{CryptoRng, Rng, RngCore}; use self::rsa_pss::RsaPssSignature; @@ -395,11 +393,13 @@ pub fn sign( )? } Algorithm::Ed25519 => { - let signature = ed25519::sign( - payload, + let mut signature = [0u8; 64]; + ed25519::sign( + &mut signature, private_key.try_into().map_err(|_| Error::SigningError)?, - ) - .map_err(into_signing_error)?; + payload.len() as u32, + payload, + ); Signature::Ed25519(Ed25519Signature { signature }) } Algorithm::RsaPss(_) => { @@ -461,7 +461,16 @@ pub fn verify(payload: &[u8], signature: &Signature, public_key: &[u8]) -> Resul .map_err(into_verify_error), Signature::Ed25519(signature) => { let public_key = public_key.try_into().map_err(|_| Error::InvalidSignature)?; - ed25519::verify(payload, public_key, &signature.signature).map_err(into_verify_error) + if ed25519::verify( + public_key, + payload.len() as u32, + payload, + &signature.signature, + ) { + Ok(()) + } else { + Err(Error::InvalidSignature) + } } Signature::RsaPss(_) => todo!(), } @@ -498,7 +507,8 @@ pub fn key_gen( break; } - let pk = ed25519::secret_to_public(&sk); + let mut pk = [0u8; 32]; + ed25519::secret_to_public(&mut pk, &sk); Ok((sk.to_vec(), pk.to_vec())) } From 10bed788587fa16ee364a312f43b4503009fbe36 Mon Sep 17 00:00:00 2001 From: "Jan Winkelmann (keks)" Date: Tue, 5 Nov 2024 18:30:07 +0100 Subject: [PATCH 09/18] use ed25519 better --- src/hacl/ed25519.rs | 34 ++++++++++++++-------------------- src/signature.rs | 30 ++++++++++-------------------- 2 files changed, 24 insertions(+), 40 deletions(-) diff --git a/src/hacl/ed25519.rs b/src/hacl/ed25519.rs index 1df534b6a..048241386 100644 --- a/src/hacl/ed25519.rs +++ b/src/hacl/ed25519.rs @@ -1,4 +1,4 @@ -use libcrux_hacl::{Hacl_Ed25519_secret_to_public, Hacl_Ed25519_sign, Hacl_Ed25519_verify}; +use libcrux_hacl_rs::ed25519; #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum Error { @@ -8,27 +8,23 @@ pub enum Error { pub fn sign(payload: &[u8], private_key: &[u8; 32]) -> Result<[u8; 64], Error> { let mut signature = [0u8; 64]; - unsafe { - Hacl_Ed25519_sign( - signature.as_mut_ptr(), - private_key.as_ptr() as _, - payload.len().try_into().map_err(|_| Error::SigningError)?, - payload.as_ptr() as _, - ); - } + ed25519::sign( + &mut signature, + private_key, + payload.len().try_into().map_err(|_| Error::SigningError)?, + payload, + ); Ok(signature) } pub fn verify(payload: &[u8], public_key: &[u8; 32], signature: &[u8; 64]) -> Result<(), Error> { - if unsafe { - Hacl_Ed25519_verify( - public_key.as_ptr() as _, - payload.len().try_into().map_err(|_| Error::SigningError)?, - payload.as_ptr() as _, - signature.as_ptr() as _, - ) - } { + if ed25519::verify( + public_key, + payload.len().try_into().map_err(|_| Error::SigningError)?, + payload, + signature, + ) { Ok(()) } else { Err(Error::InvalidSignature) @@ -38,8 +34,6 @@ pub fn verify(payload: &[u8], public_key: &[u8; 32], signature: &[u8; 64]) -> Re /// Compute the public point for the given secret key `sk`. pub fn secret_to_public(sk: &[u8; 32]) -> [u8; 32] { let mut out = [0u8; 32]; - unsafe { - Hacl_Ed25519_secret_to_public(out.as_mut_ptr(), sk.as_ptr() as _); - } + ed25519::secret_to_public(&mut out, sk); out } diff --git a/src/signature.rs b/src/signature.rs index 29380e952..4a140cccc 100644 --- a/src/signature.rs +++ b/src/signature.rs @@ -4,8 +4,10 @@ //! * EdDSA 25519 //! * RSA PSS -use crate::{ecdh, hacl}; -use libcrux_hacl_rs::ed25519; +use crate::{ + ecdh, + hacl::{self, ed25519}, +}; use rand::{CryptoRng, Rng, RngCore}; use self::rsa_pss::RsaPssSignature; @@ -393,13 +395,11 @@ pub fn sign( )? } Algorithm::Ed25519 => { - let mut signature = [0u8; 64]; - ed25519::sign( - &mut signature, - private_key.try_into().map_err(|_| Error::SigningError)?, - payload.len() as u32, + let signature = ed25519::sign( payload, - ); + private_key.try_into().map_err(|_| Error::SigningError)?, + ) + .map_err(into_signing_error)?; Signature::Ed25519(Ed25519Signature { signature }) } Algorithm::RsaPss(_) => { @@ -461,16 +461,7 @@ pub fn verify(payload: &[u8], signature: &Signature, public_key: &[u8]) -> Resul .map_err(into_verify_error), Signature::Ed25519(signature) => { let public_key = public_key.try_into().map_err(|_| Error::InvalidSignature)?; - if ed25519::verify( - public_key, - payload.len() as u32, - payload, - &signature.signature, - ) { - Ok(()) - } else { - Err(Error::InvalidSignature) - } + ed25519::verify(payload, public_key, &signature.signature).map_err(into_verify_error) } Signature::RsaPss(_) => todo!(), } @@ -507,8 +498,7 @@ pub fn key_gen( break; } - let mut pk = [0u8; 32]; - ed25519::secret_to_public(&mut pk, &sk); + let pk = ed25519::secret_to_public(&sk); Ok((sk.to_vec(), pk.to_vec())) } From 309be7aadaad28e96540488bc80a27fe4b56efc9 Mon Sep 17 00:00:00 2001 From: "Jan Winkelmann (keks)" Date: Wed, 6 Nov 2024 11:41:46 +0100 Subject: [PATCH 10/18] fix length checks in hmac and sha2 --- libcrux-hmac/src/hmac.rs | 45 ++++++++++++++++++++++++++++++---------- src/digest.rs | 8 +++++-- 2 files changed, 40 insertions(+), 13 deletions(-) diff --git a/libcrux-hmac/src/hmac.rs b/libcrux-hmac/src/hmac.rs index dab739650..10b22e530 100644 --- a/libcrux-hmac/src/hmac.rs +++ b/libcrux-hmac/src/hmac.rs @@ -4,11 +4,6 @@ use libcrux_hkdf as hkdf; -use libcrux_hacl_rs::hmac::compute_sha1 as hmac_sha1; -use libcrux_hacl_rs::hmac::compute_sha2_256 as hmac_sha256; -use libcrux_hacl_rs::hmac::compute_sha2_384 as hmac_sha384; -use libcrux_hacl_rs::hmac::compute_sha2_512 as hmac_sha512; - /// The HMAC algorithm defining the used hash function. #[derive(Copy, Clone, Debug, PartialEq)] pub enum Algorithm { @@ -43,19 +38,47 @@ pub const fn tag_size(alg: Algorithm) -> usize { /// Compute the HMAC value with the given `alg` and `key` on `data` with an /// output tag length of `tag_length`. /// Returns a vector of length `tag_length`. +/// Panics if either `key` or `data` are longer than `u32::MAX`. pub fn hmac(alg: Algorithm, key: &[u8], data: &[u8], tag_length: Option) -> Vec { let native_tag_length = tag_size(alg); let tag_length = match tag_length { Some(v) => v, None => native_tag_length, }; - let mut dst = vec![0u8; native_tag_length]; - match alg { - Algorithm::Sha1 => hmac_sha1(&mut dst, key, key.len() as u32, data, data.len() as u32), - Algorithm::Sha256 => hmac_sha256(&mut dst, key, key.len() as u32, data, data.len() as u32), - Algorithm::Sha384 => hmac_sha384(&mut dst, key, key.len() as u32, data, data.len() as u32), - Algorithm::Sha512 => hmac_sha512(&mut dst, key, key.len() as u32, data, data.len() as u32), + let mut dst: Vec<_> = match alg { + Algorithm::Sha1 => wrap_bufalloc(|buf| hmac_sha1(buf, key, data)).into(), + Algorithm::Sha256 => wrap_bufalloc(|buf| hmac_sha256(buf, key, data)).into(), + Algorithm::Sha384 => wrap_bufalloc(|buf| hmac_sha384(buf, key, data)).into(), + Algorithm::Sha512 => wrap_bufalloc(|buf| hmac_sha512(buf, key, data)).into(), }; dst.truncate(tag_length); dst } + +fn wrap_bufalloc(f: F) -> [u8; N] { + let mut buf = [0u8; N]; + f(&mut buf); + buf +} + +macro_rules! impl_hmac { + ($name:ident,$fun:path,$tag_len:literal) => { + /// Compute HMAC. + /// + /// Note that this function panics if `key` or `data` is larger than 2**32 bytes. + pub fn $name(dst: &mut [u8; $tag_len], key: &[u8], data: &[u8]) { + $fun( + dst, + key, + key.len().try_into().unwrap(), + data, + data.len().try_into().unwrap(), + ) + } + }; +} + +impl_hmac!(hmac_sha1, libcrux_hacl_rs::hmac::compute_sha1, 20); +impl_hmac!(hmac_sha256, libcrux_hacl_rs::hmac::compute_sha2_256, 32); +impl_hmac!(hmac_sha384, libcrux_hacl_rs::hmac::compute_sha2_384, 32); +impl_hmac!(hmac_sha512, libcrux_hacl_rs::hmac::compute_sha2_512, 32); diff --git a/src/digest.rs b/src/digest.rs index a45a0e3b3..5384cab19 100644 --- a/src/digest.rs +++ b/src/digest.rs @@ -268,8 +268,10 @@ macro_rules! impl_hash { impl $name { /// Return the digest for the given input byte slice, in immediate mode. + /// Will panic if `payload` is longer than `u32::MAX`. pub fn hash(digest: &mut [u8; $digest_size], payload: &[u8]) { - $hash(digest, payload, payload.len() as u32) + let payload_len = payload.len().try_into().unwrap(); + $hash(digest, payload, payload_len) } /// Initialize a new digest state for streaming use. @@ -278,8 +280,10 @@ macro_rules! impl_hash { } /// Add the `payload` to the digest. + /// Will panic if `payload` is longer than `u32::MAX`. pub fn update(&mut self, payload: &[u8]) { - $update(self.state.as_mut(), payload, payload.len() as u32); + let payload_len = payload.len().try_into().unwrap(); + $update(self.state.as_mut(), payload, payload_len); } /// Get the digest. From 757b0fe05007457bc7046862414e2392669e5de7 Mon Sep 17 00:00:00 2001 From: "Jan Winkelmann (keks)" Date: Wed, 6 Nov 2024 11:41:59 +0100 Subject: [PATCH 11/18] add hacl-rs hkdf --- Cargo.lock | 2 +- libcrux-hacl-rs/src/hkdf.rs | 178 +++++++++++++++++++++--------- libcrux-hacl-rs/src/lib.rs | 5 +- libcrux-hkdf/Cargo.toml | 3 +- libcrux-hkdf/src/hacl_hkdf.rs | 202 +++++++++++++++++++++++----------- libcrux-hkdf/src/hkdf.rs | 72 +++++++----- 6 files changed, 313 insertions(+), 149 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 59cbb1e11..821ca500e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1006,7 +1006,7 @@ version = "0.1.0" name = "libcrux-hkdf" version = "0.0.2-beta.2" dependencies = [ - "libcrux-hacl", + "libcrux-hacl-rs", ] [[package]] diff --git a/libcrux-hacl-rs/src/hkdf.rs b/libcrux-hacl-rs/src/hkdf.rs index d05f39fe6..242cb8f6b 100644 --- a/libcrux-hacl-rs/src/hkdf.rs +++ b/libcrux-hacl-rs/src/hkdf.rs @@ -4,6 +4,8 @@ #![allow(unused_assignments)] #![allow(unreachable_patterns)] +use crate::lowstar; + /** Expand pseudorandom key to desired length. @@ -27,18 +29,27 @@ pub fn expand_sha2_256( let output: (&mut [u8], &mut [u8]) = okm.split_at_mut(0usize); let mut text: Box<[u8]> = vec![0u8; tlen.wrapping_add(infolen).wrapping_add(1u32) as usize].into_boxed_slice(); - let text0: (&mut [u8], &mut [u8]) = text.split_at_mut(tlen as usize); - let tag: (&mut [u8], &mut [u8]) = text0.1.split_at_mut(0usize - tlen as usize); - let ctr: (&mut [u8], &mut [u8]) = tag.1.split_at_mut(tlen.wrapping_add(infolen) as usize); ((&mut (&mut text)[tlen as usize..])[0usize..infolen as usize]) .copy_from_slice(&info[0usize..infolen as usize]); + let mut tag: Box<[u8]> = vec![0u8; tlen as usize].into_boxed_slice(); for i in 0u32..n { + let ctr: (&mut [u8], &mut [u8]) = text.split_at_mut(tlen.wrapping_add(infolen) as usize); ctr.1[0usize] = i.wrapping_add(1u32) as u8; + lowstar::ignore::ignore::<&[u8]>(&text); + let text0: (&[u8], &[u8]) = text.split_at(tlen as usize); if i == 0u32 { - crate::hmac::compute_sha2_256(ctr.0, prk, prklen, tag.0, infolen.wrapping_add(1u32)) + crate::hmac::compute_sha2_256( + &mut tag, + prk, + prklen, + text0.1, + infolen.wrapping_add(1u32), + ) } else { + ((&mut (&mut text)[0usize..])[0usize..tlen as usize]) + .copy_from_slice(&(&tag)[0usize..tlen as usize]); crate::hmac::compute_sha2_256( - ctr.0, + &mut tag, prk, prklen, &text, @@ -46,15 +57,26 @@ pub fn expand_sha2_256( ) }; ((&mut output.1[i.wrapping_mul(tlen) as usize..])[0usize..tlen as usize]) - .copy_from_slice(&ctr.0[0usize..tlen as usize]) + .copy_from_slice(&(&tag)[0usize..tlen as usize]) } if n.wrapping_mul(tlen) < len { + let ctr: (&mut [u8], &mut [u8]) = text.split_at_mut(tlen.wrapping_add(infolen) as usize); ctr.1[0usize] = n.wrapping_add(1u32) as u8; + lowstar::ignore::ignore::<&[u8]>(&text); + let text0: (&[u8], &[u8]) = text.split_at(tlen as usize); if n == 0u32 { - crate::hmac::compute_sha2_256(ctr.0, prk, prklen, tag.0, infolen.wrapping_add(1u32)) + crate::hmac::compute_sha2_256( + &mut tag, + prk, + prklen, + text0.1, + infolen.wrapping_add(1u32), + ) } else { + ((&mut (&mut text)[0usize..])[0usize..tlen as usize]) + .copy_from_slice(&(&tag)[0usize..tlen as usize]); crate::hmac::compute_sha2_256( - ctr.0, + &mut tag, prk, prklen, &text, @@ -63,7 +85,7 @@ pub fn expand_sha2_256( }; let block: (&mut [u8], &mut [u8]) = output.1.split_at_mut(n.wrapping_mul(tlen) as usize); (block.1[0usize..len.wrapping_sub(n.wrapping_mul(tlen)) as usize]).copy_from_slice( - &(&ctr.0[0usize..])[0usize..len.wrapping_sub(n.wrapping_mul(tlen)) as usize], + &(&(&tag)[0usize..])[0usize..len.wrapping_sub(n.wrapping_mul(tlen)) as usize], ) } } @@ -104,18 +126,27 @@ pub fn expand_sha2_384( let output: (&mut [u8], &mut [u8]) = okm.split_at_mut(0usize); let mut text: Box<[u8]> = vec![0u8; tlen.wrapping_add(infolen).wrapping_add(1u32) as usize].into_boxed_slice(); - let text0: (&mut [u8], &mut [u8]) = text.split_at_mut(tlen as usize); - let tag: (&mut [u8], &mut [u8]) = text0.1.split_at_mut(0usize - tlen as usize); - let ctr: (&mut [u8], &mut [u8]) = tag.1.split_at_mut(tlen.wrapping_add(infolen) as usize); ((&mut (&mut text)[tlen as usize..])[0usize..infolen as usize]) .copy_from_slice(&info[0usize..infolen as usize]); + let mut tag: Box<[u8]> = vec![0u8; tlen as usize].into_boxed_slice(); for i in 0u32..n { + let ctr: (&mut [u8], &mut [u8]) = text.split_at_mut(tlen.wrapping_add(infolen) as usize); ctr.1[0usize] = i.wrapping_add(1u32) as u8; + lowstar::ignore::ignore::<&[u8]>(&text); + let text0: (&[u8], &[u8]) = text.split_at(tlen as usize); if i == 0u32 { - crate::hmac::compute_sha2_384(ctr.0, prk, prklen, tag.0, infolen.wrapping_add(1u32)) + crate::hmac::compute_sha2_384( + &mut tag, + prk, + prklen, + text0.1, + infolen.wrapping_add(1u32), + ) } else { + ((&mut (&mut text)[0usize..])[0usize..tlen as usize]) + .copy_from_slice(&(&tag)[0usize..tlen as usize]); crate::hmac::compute_sha2_384( - ctr.0, + &mut tag, prk, prklen, &text, @@ -123,15 +154,26 @@ pub fn expand_sha2_384( ) }; ((&mut output.1[i.wrapping_mul(tlen) as usize..])[0usize..tlen as usize]) - .copy_from_slice(&ctr.0[0usize..tlen as usize]) + .copy_from_slice(&(&tag)[0usize..tlen as usize]) } if n.wrapping_mul(tlen) < len { + let ctr: (&mut [u8], &mut [u8]) = text.split_at_mut(tlen.wrapping_add(infolen) as usize); ctr.1[0usize] = n.wrapping_add(1u32) as u8; + lowstar::ignore::ignore::<&[u8]>(&text); + let text0: (&[u8], &[u8]) = text.split_at(tlen as usize); if n == 0u32 { - crate::hmac::compute_sha2_384(ctr.0, prk, prklen, tag.0, infolen.wrapping_add(1u32)) + crate::hmac::compute_sha2_384( + &mut tag, + prk, + prklen, + text0.1, + infolen.wrapping_add(1u32), + ) } else { + ((&mut (&mut text)[0usize..])[0usize..tlen as usize]) + .copy_from_slice(&(&tag)[0usize..tlen as usize]); crate::hmac::compute_sha2_384( - ctr.0, + &mut tag, prk, prklen, &text, @@ -140,7 +182,7 @@ pub fn expand_sha2_384( }; let block: (&mut [u8], &mut [u8]) = output.1.split_at_mut(n.wrapping_mul(tlen) as usize); (block.1[0usize..len.wrapping_sub(n.wrapping_mul(tlen)) as usize]).copy_from_slice( - &(&ctr.0[0usize..])[0usize..len.wrapping_sub(n.wrapping_mul(tlen)) as usize], + &(&(&tag)[0usize..])[0usize..len.wrapping_sub(n.wrapping_mul(tlen)) as usize], ) } } @@ -181,18 +223,27 @@ pub fn expand_sha2_512( let output: (&mut [u8], &mut [u8]) = okm.split_at_mut(0usize); let mut text: Box<[u8]> = vec![0u8; tlen.wrapping_add(infolen).wrapping_add(1u32) as usize].into_boxed_slice(); - let text0: (&mut [u8], &mut [u8]) = text.split_at_mut(tlen as usize); - let tag: (&mut [u8], &mut [u8]) = text0.1.split_at_mut(0usize - tlen as usize); - let ctr: (&mut [u8], &mut [u8]) = tag.1.split_at_mut(tlen.wrapping_add(infolen) as usize); ((&mut (&mut text)[tlen as usize..])[0usize..infolen as usize]) .copy_from_slice(&info[0usize..infolen as usize]); + let mut tag: Box<[u8]> = vec![0u8; tlen as usize].into_boxed_slice(); for i in 0u32..n { + let ctr: (&mut [u8], &mut [u8]) = text.split_at_mut(tlen.wrapping_add(infolen) as usize); ctr.1[0usize] = i.wrapping_add(1u32) as u8; + lowstar::ignore::ignore::<&[u8]>(&text); + let text0: (&[u8], &[u8]) = text.split_at(tlen as usize); if i == 0u32 { - crate::hmac::compute_sha2_512(ctr.0, prk, prklen, tag.0, infolen.wrapping_add(1u32)) + crate::hmac::compute_sha2_512( + &mut tag, + prk, + prklen, + text0.1, + infolen.wrapping_add(1u32), + ) } else { + ((&mut (&mut text)[0usize..])[0usize..tlen as usize]) + .copy_from_slice(&(&tag)[0usize..tlen as usize]); crate::hmac::compute_sha2_512( - ctr.0, + &mut tag, prk, prklen, &text, @@ -200,15 +251,26 @@ pub fn expand_sha2_512( ) }; ((&mut output.1[i.wrapping_mul(tlen) as usize..])[0usize..tlen as usize]) - .copy_from_slice(&ctr.0[0usize..tlen as usize]) + .copy_from_slice(&(&tag)[0usize..tlen as usize]) } if n.wrapping_mul(tlen) < len { + let ctr: (&mut [u8], &mut [u8]) = text.split_at_mut(tlen.wrapping_add(infolen) as usize); ctr.1[0usize] = n.wrapping_add(1u32) as u8; + lowstar::ignore::ignore::<&[u8]>(&text); + let text0: (&[u8], &[u8]) = text.split_at(tlen as usize); if n == 0u32 { - crate::hmac::compute_sha2_512(ctr.0, prk, prklen, tag.0, infolen.wrapping_add(1u32)) + crate::hmac::compute_sha2_512( + &mut tag, + prk, + prklen, + text0.1, + infolen.wrapping_add(1u32), + ) } else { + ((&mut (&mut text)[0usize..])[0usize..tlen as usize]) + .copy_from_slice(&(&tag)[0usize..tlen as usize]); crate::hmac::compute_sha2_512( - ctr.0, + &mut tag, prk, prklen, &text, @@ -217,7 +279,7 @@ pub fn expand_sha2_512( }; let block: (&mut [u8], &mut [u8]) = output.1.split_at_mut(n.wrapping_mul(tlen) as usize); (block.1[0usize..len.wrapping_sub(n.wrapping_mul(tlen)) as usize]).copy_from_slice( - &(&ctr.0[0usize..])[0usize..len.wrapping_sub(n.wrapping_mul(tlen)) as usize], + &(&(&tag)[0usize..])[0usize..len.wrapping_sub(n.wrapping_mul(tlen)) as usize], ) } } @@ -235,7 +297,7 @@ pub fn extract_sha2_512(prk: &mut [u8], salt: &[u8], saltlen: u32, ikm: &[u8], i crate::hmac::compute_sha2_512(prk, salt, saltlen, ikm, ikmlen) } -/* no blake2 for now +/* /** Expand pseudorandom key to desired length. @@ -259,24 +321,27 @@ pub fn expand_blake2s_32( let output: (&mut [u8], &mut [u8]) = okm.split_at_mut(0usize); let mut text: Box<[u8]> = vec![0u8; tlen.wrapping_add(infolen).wrapping_add(1u32) as usize].into_boxed_slice(); - let text0: (&mut [u8], &mut [u8]) = text.split_at_mut(tlen as usize); - let tag: (&mut [u8], &mut [u8]) = text0.1.split_at_mut(0usize - tlen as usize); - let ctr: (&mut [u8], &mut [u8]) = tag.1.split_at_mut(tlen.wrapping_add(infolen) as usize); ((&mut (&mut text)[tlen as usize..])[0usize..infolen as usize]) .copy_from_slice(&info[0usize..infolen as usize]); + let mut tag: Box<[u8]> = vec![0u8; tlen as usize].into_boxed_slice(); for i in 0u32..n { + let ctr: (&mut [u8], &mut [u8]) = text.split_at_mut(tlen.wrapping_add(infolen) as usize); ctr.1[0usize] = i.wrapping_add(1u32) as u8; + lowstar::ignore::ignore::<&[u8]>(&text); + let text0: (&[u8], &[u8]) = text.split_at(tlen as usize); if i == 0u32 { crate::hmac::compute_blake2s_32( - ctr.0, + &mut tag, prk, prklen, - tag.0, + text0.1, infolen.wrapping_add(1u32), ) } else { + ((&mut (&mut text)[0usize..])[0usize..tlen as usize]) + .copy_from_slice(&(&tag)[0usize..tlen as usize]); crate::hmac::compute_blake2s_32( - ctr.0, + &mut tag, prk, prklen, &text, @@ -284,21 +349,26 @@ pub fn expand_blake2s_32( ) }; ((&mut output.1[i.wrapping_mul(tlen) as usize..])[0usize..tlen as usize]) - .copy_from_slice(&ctr.0[0usize..tlen as usize]) + .copy_from_slice(&(&tag)[0usize..tlen as usize]) } if n.wrapping_mul(tlen) < len { + let ctr: (&mut [u8], &mut [u8]) = text.split_at_mut(tlen.wrapping_add(infolen) as usize); ctr.1[0usize] = n.wrapping_add(1u32) as u8; + lowstar::ignore::ignore::<&[u8]>(&text); + let text0: (&[u8], &[u8]) = text.split_at(tlen as usize); if n == 0u32 { crate::hmac::compute_blake2s_32( - ctr.0, + &mut tag, prk, prklen, - tag.0, + text0.1, infolen.wrapping_add(1u32), ) } else { + ((&mut (&mut text)[0usize..])[0usize..tlen as usize]) + .copy_from_slice(&(&tag)[0usize..tlen as usize]); crate::hmac::compute_blake2s_32( - ctr.0, + &mut tag, prk, prklen, &text, @@ -307,7 +377,7 @@ pub fn expand_blake2s_32( }; let block: (&mut [u8], &mut [u8]) = output.1.split_at_mut(n.wrapping_mul(tlen) as usize); (block.1[0usize..len.wrapping_sub(n.wrapping_mul(tlen)) as usize]).copy_from_slice( - &(&ctr.0[0usize..])[0usize..len.wrapping_sub(n.wrapping_mul(tlen)) as usize], + &(&(&tag)[0usize..])[0usize..len.wrapping_sub(n.wrapping_mul(tlen)) as usize], ) } } @@ -348,24 +418,27 @@ pub fn expand_blake2b_32( let output: (&mut [u8], &mut [u8]) = okm.split_at_mut(0usize); let mut text: Box<[u8]> = vec![0u8; tlen.wrapping_add(infolen).wrapping_add(1u32) as usize].into_boxed_slice(); - let text0: (&mut [u8], &mut [u8]) = text.split_at_mut(tlen as usize); - let tag: (&mut [u8], &mut [u8]) = text0.1.split_at_mut(0usize - tlen as usize); - let ctr: (&mut [u8], &mut [u8]) = tag.1.split_at_mut(tlen.wrapping_add(infolen) as usize); ((&mut (&mut text)[tlen as usize..])[0usize..infolen as usize]) .copy_from_slice(&info[0usize..infolen as usize]); + let mut tag: Box<[u8]> = vec![0u8; tlen as usize].into_boxed_slice(); for i in 0u32..n { + let ctr: (&mut [u8], &mut [u8]) = text.split_at_mut(tlen.wrapping_add(infolen) as usize); ctr.1[0usize] = i.wrapping_add(1u32) as u8; + lowstar::ignore::ignore::<&[u8]>(&text); + let text0: (&[u8], &[u8]) = text.split_at(tlen as usize); if i == 0u32 { crate::hmac::compute_blake2b_32( - ctr.0, + &mut tag, prk, prklen, - tag.0, + text0.1, infolen.wrapping_add(1u32), ) } else { + ((&mut (&mut text)[0usize..])[0usize..tlen as usize]) + .copy_from_slice(&(&tag)[0usize..tlen as usize]); crate::hmac::compute_blake2b_32( - ctr.0, + &mut tag, prk, prklen, &text, @@ -373,21 +446,26 @@ pub fn expand_blake2b_32( ) }; ((&mut output.1[i.wrapping_mul(tlen) as usize..])[0usize..tlen as usize]) - .copy_from_slice(&ctr.0[0usize..tlen as usize]) + .copy_from_slice(&(&tag)[0usize..tlen as usize]) } if n.wrapping_mul(tlen) < len { + let ctr: (&mut [u8], &mut [u8]) = text.split_at_mut(tlen.wrapping_add(infolen) as usize); ctr.1[0usize] = n.wrapping_add(1u32) as u8; + lowstar::ignore::ignore::<&[u8]>(&text); + let text0: (&[u8], &[u8]) = text.split_at(tlen as usize); if n == 0u32 { crate::hmac::compute_blake2b_32( - ctr.0, + &mut tag, prk, prklen, - tag.0, + text0.1, infolen.wrapping_add(1u32), ) } else { + ((&mut (&mut text)[0usize..])[0usize..tlen as usize]) + .copy_from_slice(&(&tag)[0usize..tlen as usize]); crate::hmac::compute_blake2b_32( - ctr.0, + &mut tag, prk, prklen, &text, @@ -396,7 +474,7 @@ pub fn expand_blake2b_32( }; let block: (&mut [u8], &mut [u8]) = output.1.split_at_mut(n.wrapping_mul(tlen) as usize); (block.1[0usize..len.wrapping_sub(n.wrapping_mul(tlen)) as usize]).copy_from_slice( - &(&ctr.0[0usize..])[0usize..len.wrapping_sub(n.wrapping_mul(tlen)) as usize], + &(&(&tag)[0usize..])[0usize..len.wrapping_sub(n.wrapping_mul(tlen)) as usize], ) } } diff --git a/libcrux-hacl-rs/src/lib.rs b/libcrux-hacl-rs/src/lib.rs index 9ec25d1fe..589739883 100644 --- a/libcrux-hacl-rs/src/lib.rs +++ b/libcrux-hacl-rs/src/lib.rs @@ -6,6 +6,7 @@ mod util; pub mod hash_sha1; pub mod hash_sha2; +pub mod hkdf; pub mod hmac; pub mod streaming_types; @@ -13,7 +14,3 @@ pub mod bignum25519_51; pub mod curve25519_51; pub mod ed25519; pub mod ed25519_precomptable; - -// things that still are broken: -// -//pub(crate) mod hkdf; diff --git a/libcrux-hkdf/Cargo.toml b/libcrux-hkdf/Cargo.toml index af1715038..c3593e3ba 100644 --- a/libcrux-hkdf/Cargo.toml +++ b/libcrux-hkdf/Cargo.toml @@ -13,5 +13,4 @@ description = "Libcrux HKDF implementation" path = "src/hkdf.rs" [dependencies] -libcrux-hacl = { version = "=0.0.2-beta.2", path = "../sys/hacl" } - +libcrux-hacl-rs = { path = "../libcrux-hacl-rs/" } diff --git a/libcrux-hkdf/src/hacl_hkdf.rs b/libcrux-hkdf/src/hacl_hkdf.rs index 346060e04..5b4a06dfd 100644 --- a/libcrux-hkdf/src/hacl_hkdf.rs +++ b/libcrux-hkdf/src/hacl_hkdf.rs @@ -1,36 +1,101 @@ #![allow(dead_code)] -/// HKDF Errors. -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -pub enum Error { - /// The requested output key material in expand was too large for the used - /// hash function. - OkmTooLarge, - /// At least one function argument has been too large to process. - ArgumentsTooLarge, +use crate::{Algorithm, Error}; + +pub trait HkdfMode { + /// The hash algorithm used in this HKDF mode. + const MODE: Algorithm; + + /// HKDF extract using the `salt` and the input key material `ikm`. + /// The result is written to `prk`. + /// + /// Note that this function panics if `salt` or `ikm` is longer than (2**32 - 1) bytes. + fn extract(prk: &mut [u8; HASH_LEN], salt: &[u8], ikm: &[u8]); + + /// HKDF expand using the pre-key material `prk` and `info`. The output length + /// is defined through the type of the `okm` parameter, that the output is written to. + /// + /// Returns nothing on success. + /// Returns [`Error::OkmTooLarge`] if the requested `okm_len` is too large. + /// Returns [`Error::ArgumentsTooLarge`] if one of `salt` or `ikm` are longer than, or + /// `OKM_LEN` is larger than (2**32 - 1) bytes. + fn expand( + okm: &mut [u8; OKM_LEN], + prk: &[u8], + info: &[u8], + ) -> Result<(), Error>; + + /// HKDF expand using the pre-key material `prk` and `info`. The output length + /// is defined by the parameter `okm_len`. + /// + /// Returns the key material in an array of length `okm_len` on success. + /// Returns [`Error::OkmTooLarge`] if the requested `okm_len` is too large. + /// Returns [`Error::ArgumentsTooLarge`] if `salt` or `ikm` is longer than + /// (2**32 - 1) bytes. + fn expand_vec(prk: &[u8], info: &[u8], okm_len: usize) -> Result, Error>; + + /// HKDF using the `salt`, input key material `ikm`, `info`. The output length + /// is defined through the result type. + /// Calls `extract` and `expand` with the given input. + /// + /// Returns the key material in an array of length `okm_len`. + /// Note that this function panics if `salt` or `ikm` is longer than (2**32 - 1) bytes. + fn hkdf( + okm: &mut [u8; OKM_LEN], + salt: &[u8], + ikm: &[u8], + info: &[u8], + ) -> Result<(), Error> { + let mut prk = [0u8; HASH_LEN]; + Self::extract(&mut prk, salt, ikm); + Self::expand(okm, &prk, info) + } + /// HKDF using the `salt`, input key material `ikm`, `info`. The output length + /// is defined by the parameter `okm_len`. + /// Calls `extract` and `expand` with the given input. + /// + /// Returns the key material in an array of length `okm_len`. + /// Note that this function panics if `salt` or `ikm` is longer than (2**32 - 1) bytes. + fn hkdf_vec(salt: &[u8], ikm: &[u8], info: &[u8], okm_len: usize) -> Result, Error> { + let mut prk = [0u8; HASH_LEN]; + Self::extract(&mut prk, salt, ikm); + Self::expand_vec(&prk, info, okm_len) + } } macro_rules! impl_hkdf { - ($name:ident,$extract:ident,$expand:ident,$tag_len:literal) => { + ($sname:ident,$name:ident, $mode:path, $extract:ident, $expand:ident,$hash_len:literal) => { + pub struct $sname; + pub mod $name { - use super::Error; + use super::{checked_u32, $sname, Algorithm, Error, HkdfMode}; + + impl HkdfMode<$hash_len> for $sname { + const MODE: Algorithm = $mode; + + fn extract(prk: &mut [u8; $hash_len], salt: &[u8], ikm: &[u8]) { + extract(prk, salt, ikm) + } + + fn expand( + okm: &mut [u8; OKM_LEN], + prk: &[u8], + info: &[u8], + ) -> Result<(), Error> { + expand(okm, prk, info) + } + + fn expand_vec(prk: &[u8], info: &[u8], okm_len: usize) -> Result, Error> { + vec::expand(prk, info, okm_len) + } + } /// HKDF extract using the `salt`, and the input key material `ikm`. - /// Returns the pre-key material in an array of tag length. + /// Returns the pre-key material in an array of hash length. /// - /// Note that this function panics if `salt` or `ikm` is larger than 2**32 bytes. - pub fn extract(salt: &[u8], ikm: &[u8]) -> [u8; $tag_len] { - let mut prk = [0u8; $tag_len]; - unsafe { - libcrux_hacl::$extract( - prk.as_mut_ptr(), - salt.as_ptr() as _, - salt.len().try_into().unwrap(), - ikm.as_ptr() as _, - ikm.len().try_into().unwrap(), - ); - } - prk + /// Note that this function panics if `salt` or `ikm` is longer than (2**32 - 1) bytes. + pub fn extract(prk: &mut [u8; $hash_len], salt: &[u8], ikm: &[u8]) { + $sname::extract(prk, salt, ikm) } /// HKDF expand using the pre-key material `prk` and `info`. The output length @@ -41,27 +106,24 @@ macro_rules! impl_hkdf { /// Note that this function returns an [`Error::ArgumentsTooLarge`] /// if `salt`, `ikm`, or `OKM_LEN` is larger than 2**32 bytes. pub fn expand( + okm: &mut [u8; OKM_LEN], prk: &[u8], info: &[u8], - ) -> Result<[u8; OKM_LEN], Error> { - if OKM_LEN > 255 * $tag_len { + ) -> Result<(), Error> { + if OKM_LEN > 255 * $hash_len { // Output size is too large. HACL doesn't catch this. return Err(Error::OkmTooLarge); } - let mut okm = [0u8; OKM_LEN]; - unsafe { - libcrux_hacl::$expand( - okm.as_mut_ptr(), - prk.as_ptr() as _, - prk.len().try_into().map_err(|_| Error::ArgumentsTooLarge)?, - info.as_ptr() as _, - info.len() - .try_into() - .map_err(|_| Error::ArgumentsTooLarge)?, - OKM_LEN.try_into().map_err(|_| Error::ArgumentsTooLarge)?, - ); - } - Ok(okm) + + libcrux_hacl_rs::hkdf::$expand( + okm, + prk, + checked_u32(prk.len())?, + info, + checked_u32(info.len())?, + checked_u32(OKM_LEN)?, + ); + Ok(()) } /// HKDF using the `salt`, input key material `ikm`, `info`. The output length @@ -69,45 +131,45 @@ macro_rules! impl_hkdf { /// Calls `extract` and `expand` with the given input. /// /// Returns the key material in an array of length `okm_len`. + /// Note that this function panics if `salt` or `ikm` is longer than (2**32 - 1) bytes. pub fn hkdf( + okm: &mut [u8; OKM_LEN], salt: &[u8], ikm: &[u8], info: &[u8], - ) -> Result<[u8; OKM_LEN], Error> { - let prk = extract(salt, ikm); - expand(&prk, info) + ) -> Result<(), Error> { + let mut prk = [0u8; $hash_len]; + extract(&mut prk, salt, ikm); + expand(okm, &prk, info) } /// This module uses heap allocated vectors for cases where the output /// length is not const. pub mod vec { - use super::super::Error; + use super::{checked_u32, Error}; /// HKDF expand using the pre-key material `prk` and `info`. The output length - /// is defined through the result type. + /// is defined by the parameter `okm_len`. /// Returns the key material in an array of length `okm_len` or /// [`Error::OkmTooLarge`] if the requested `okm_len` is too large. /// /// Note that this function returns an [`Error::ArgumentsTooLarge`] - /// if `salt`, `ikm`, or `OKM_LEN` is larger than 2**32 bytes. + /// if `salt`, `ikm`, or `OKM_LEN` is longer than (2**32 - 1) bytes. pub fn expand(prk: &[u8], info: &[u8], okm_len: usize) -> Result, Error> { - if okm_len > 255 * $tag_len { + if okm_len > 255 * $hash_len { // Output size is too large. HACL doesn't catch this. return Err(Error::OkmTooLarge); } + let mut okm = vec![0u8; okm_len]; - unsafe { - libcrux_hacl::$expand( - okm.as_mut_ptr(), - prk.as_ptr() as _, - prk.len().try_into().map_err(|_| Error::ArgumentsTooLarge)?, - info.as_ptr() as _, - info.len() - .try_into() - .map_err(|_| Error::ArgumentsTooLarge)?, - okm_len.try_into().map_err(|_| Error::ArgumentsTooLarge)?, - ); - } + libcrux_hacl_rs::hkdf::$expand( + &mut okm, + prk, + checked_u32(prk.len())?, + info, + checked_u32(info.len())?, + checked_u32(okm_len)?, + ); Ok(okm) } } @@ -116,22 +178,32 @@ macro_rules! impl_hkdf { } impl_hkdf!( + HkdfSha2_256, sha2_256, - Hacl_HKDF_extract_sha2_256, - Hacl_HKDF_expand_sha2_256, + Algorithm::Sha256, + extract_sha2_256, + expand_sha2_256, 32 ); impl_hkdf!( + HkdfSha2_384, sha2_384, - Hacl_HKDF_extract_sha2_384, - Hacl_HKDF_expand_sha2_384, + Algorithm::Sha384, + extract_sha2_384, + expand_sha2_384, 48 ); impl_hkdf!( + HkdfSha2_512, sha2_512, - Hacl_HKDF_extract_sha2_512, - Hacl_HKDF_expand_sha2_512, + Algorithm::Sha512, + extract_sha2_512, + expand_sha2_512, 64 ); + +fn checked_u32(num: usize) -> Result { + num.try_into().map_err(|_| Error::ArgumentsTooLarge) +} diff --git a/libcrux-hkdf/src/hkdf.rs b/libcrux-hkdf/src/hkdf.rs index c3a9036fe..d802b8cd3 100644 --- a/libcrux-hkdf/src/hkdf.rs +++ b/libcrux-hkdf/src/hkdf.rs @@ -4,6 +4,8 @@ pub(crate) mod hacl_hkdf; +use hacl_hkdf::{HkdfMode, HkdfSha2_256, HkdfSha2_384, HkdfSha2_512}; + /// The HKDF algorithm defining the used hash function. #[derive(Copy, Clone, Debug, PartialEq)] pub enum Algorithm { @@ -12,25 +14,35 @@ pub enum Algorithm { Sha512, } +impl Algorithm { + pub const fn tag_len(self) -> usize { + match self { + Algorithm::Sha256 => 32, + Algorithm::Sha384 => 48, + Algorithm::Sha512 => 64, + } + } +} + /// HKDF Errors #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum Error { - OkmLengthTooLarge, + /// The requested output key material in expand was too large for the used + /// hash function. + OkmTooLarge, + /// At least one function argument has been too large to process. + ArgumentsTooLarge, } /// HKDF extract using hash function `mode`, `salt`, and the input key material `ikm`. /// Returns the pre-key material in a vector of tag length. pub fn extract(alg: Algorithm, salt: impl AsRef<[u8]>, ikm: impl AsRef<[u8]>) -> Vec { + let salt = salt.as_ref(); + let ikm = ikm.as_ref(); match alg { - Algorithm::Sha256 => { - crate::hacl_hkdf::sha2_256::extract(salt.as_ref(), ikm.as_ref()).into() - } - Algorithm::Sha384 => { - crate::hacl_hkdf::sha2_384::extract(salt.as_ref(), ikm.as_ref()).into() - } - Algorithm::Sha512 => { - crate::hacl_hkdf::sha2_512::extract(salt.as_ref(), ikm.as_ref()).into() - } + Algorithm::Sha256 => allocbuf(|prk| HkdfSha2_256::extract(prk, salt, ikm)), + Algorithm::Sha384 => allocbuf(|prk| HkdfSha2_384::extract(prk, salt, ikm)), + Algorithm::Sha512 => allocbuf(|prk| HkdfSha2_512::extract(prk, salt, ikm)), } } @@ -43,19 +55,12 @@ pub fn expand( info: impl AsRef<[u8]>, okm_len: usize, ) -> Result, Error> { + let prk = prk.as_ref(); + let info = info.as_ref(); match alg { - Algorithm::Sha256 => { - crate::hacl_hkdf::sha2_256::vec::expand(prk.as_ref(), info.as_ref(), okm_len) - .map_err(|_| Error::OkmLengthTooLarge) - } - Algorithm::Sha384 => { - crate::hacl_hkdf::sha2_384::vec::expand(prk.as_ref(), info.as_ref(), okm_len) - .map_err(|_| Error::OkmLengthTooLarge) - } - Algorithm::Sha512 => { - crate::hacl_hkdf::sha2_512::vec::expand(prk.as_ref(), info.as_ref(), okm_len) - .map_err(|_| Error::OkmLengthTooLarge) - } + Algorithm::Sha256 => HkdfSha2_256::expand_vec(prk, info, okm_len), + Algorithm::Sha384 => HkdfSha2_384::expand_vec(prk, info, okm_len), + Algorithm::Sha512 => HkdfSha2_512::expand_vec(prk, info, okm_len), } } @@ -65,11 +70,24 @@ pub fn expand( /// if the requested output length is too large. pub fn hkdf( mode: Algorithm, - salt: &[u8], - ikm: &[u8], - info: &[u8], + salt: impl AsRef<[u8]>, + ikm: impl AsRef<[u8]>, + info: impl AsRef<[u8]>, okm_len: usize, ) -> Result, Error> { - let prk = extract(mode, salt, ikm); - expand(mode, prk, info, okm_len) + let salt = salt.as_ref(); + let ikm = ikm.as_ref(); + let info = info.as_ref(); + + match mode { + Algorithm::Sha256 => HkdfSha2_256::hkdf_vec(salt, ikm, info, okm_len), + Algorithm::Sha384 => HkdfSha2_384::hkdf_vec(salt, ikm, info, okm_len), + Algorithm::Sha512 => HkdfSha2_512::hkdf_vec(salt, ikm, info, okm_len), + } +} + +fn allocbuf(f: F) -> Vec { + let mut buf = [0u8; N]; + f(&mut buf); + buf.into() } From 9fd9278a0a6b28731be145e0817fba4eeb95717b Mon Sep 17 00:00:00 2001 From: "Jan Winkelmann (keks)" Date: Wed, 6 Nov 2024 11:58:56 +0100 Subject: [PATCH 12/18] fix cargo toml indent depth --- Cargo.toml | 68 +++++++++++++++++++++++++++--------------------------- 1 file changed, 34 insertions(+), 34 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 029c434c4..e5b53373e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,23 +1,23 @@ [workspace] members = [ - "sys/hacl", - "sys/libjade", - "sys/platform", - "sys/pqclean", - "sys/lib25519", - "benchmarks", - "fuzz", - "libcrux-ml-kem", - "libcrux-sha3", - "libcrux-ml-dsa", - "libcrux-intrinsics", - "libcrux-kem", - "libcrux-hmac", - "libcrux-hkdf", - "libcrux-ecdh", - "libcrux-psq", - "libcrux-hacl-rs-krml", - "cavp", + "sys/hacl", + "sys/libjade", + "sys/platform", + "sys/pqclean", + "sys/lib25519", + "benchmarks", + "fuzz", + "libcrux-ml-kem", + "libcrux-sha3", + "libcrux-ml-dsa", + "libcrux-intrinsics", + "libcrux-kem", + "libcrux-hmac", + "libcrux-hkdf", + "libcrux-ecdh", + "libcrux-psq", + "libcrux-hacl-rs-krml", + "cavp", ] [workspace.package] @@ -44,15 +44,15 @@ readme.workspace = true documentation = "https://docs.rs/libcrux/" description = "The Formally Verified Cryptography Library" exclude = [ - "/tests", - "/specs", - "/proofs", - "/*.py", - "/wasm-demo", - "/fuzz", - "/git-hooks", - "/architecture", - "/libcrux.fst.config.json", + "/tests", + "/specs", + "/proofs", + "/*.py", + "/wasm-demo", + "/fuzz", + "/git-hooks", + "/architecture", + "/libcrux.fst.config.json", ] [lib] @@ -114,11 +114,11 @@ panic = "abort" [lints.rust] unexpected_cfgs = { level = "warn", check-cfg = [ - 'cfg(hax)', - 'cfg(eurydice)', - 'cfg(doc_cfg)', - 'cfg(libjade)', - 'cfg(simd128)', - 'cfg(simd256)', - 'cfg(aes_ni)', + 'cfg(hax)', + 'cfg(eurydice)', + 'cfg(doc_cfg)', + 'cfg(libjade)', + 'cfg(simd128)', + 'cfg(simd256)', + 'cfg(aes_ni)', ] } From 98f8eedc4f0922f9ecb4f0212eb2380c45afcbf5 Mon Sep 17 00:00:00 2001 From: "Jan Winkelmann (keks)" Date: Wed, 6 Nov 2024 12:07:59 +0100 Subject: [PATCH 13/18] fix hmac lengths --- libcrux-hmac/src/hmac.rs | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/libcrux-hmac/src/hmac.rs b/libcrux-hmac/src/hmac.rs index 10b22e530..5383f999f 100644 --- a/libcrux-hmac/src/hmac.rs +++ b/libcrux-hmac/src/hmac.rs @@ -46,19 +46,19 @@ pub fn hmac(alg: Algorithm, key: &[u8], data: &[u8], tag_length: Option) None => native_tag_length, }; let mut dst: Vec<_> = match alg { - Algorithm::Sha1 => wrap_bufalloc(|buf| hmac_sha1(buf, key, data)).into(), - Algorithm::Sha256 => wrap_bufalloc(|buf| hmac_sha256(buf, key, data)).into(), - Algorithm::Sha384 => wrap_bufalloc(|buf| hmac_sha384(buf, key, data)).into(), - Algorithm::Sha512 => wrap_bufalloc(|buf| hmac_sha512(buf, key, data)).into(), + Algorithm::Sha1 => wrap_bufalloc(|buf| hmac_sha1(buf, key, data)), + Algorithm::Sha256 => wrap_bufalloc(|buf| hmac_sha256(buf, key, data)), + Algorithm::Sha384 => wrap_bufalloc(|buf| hmac_sha384(buf, key, data)), + Algorithm::Sha512 => wrap_bufalloc(|buf| hmac_sha512(buf, key, data)), }; dst.truncate(tag_length); dst } -fn wrap_bufalloc(f: F) -> [u8; N] { +fn wrap_bufalloc(f: F) -> Vec { let mut buf = [0u8; N]; f(&mut buf); - buf + buf.to_vec() } macro_rules! impl_hmac { @@ -80,5 +80,5 @@ macro_rules! impl_hmac { impl_hmac!(hmac_sha1, libcrux_hacl_rs::hmac::compute_sha1, 20); impl_hmac!(hmac_sha256, libcrux_hacl_rs::hmac::compute_sha2_256, 32); -impl_hmac!(hmac_sha384, libcrux_hacl_rs::hmac::compute_sha2_384, 32); -impl_hmac!(hmac_sha512, libcrux_hacl_rs::hmac::compute_sha2_512, 32); +impl_hmac!(hmac_sha384, libcrux_hacl_rs::hmac::compute_sha2_384, 48); +impl_hmac!(hmac_sha512, libcrux_hacl_rs::hmac::compute_sha2_512, 64); From 5520a0d595001742678ca940257b7ac2b207aec7 Mon Sep 17 00:00:00 2001 From: "Jan Winkelmann (keks)" Date: Wed, 6 Nov 2024 12:15:41 +0100 Subject: [PATCH 14/18] fix name and add comment --- libcrux-hkdf/src/hkdf.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/libcrux-hkdf/src/hkdf.rs b/libcrux-hkdf/src/hkdf.rs index d802b8cd3..4218a0db0 100644 --- a/libcrux-hkdf/src/hkdf.rs +++ b/libcrux-hkdf/src/hkdf.rs @@ -15,7 +15,8 @@ pub enum Algorithm { } impl Algorithm { - pub const fn tag_len(self) -> usize { + /// Returns the length of the underlying hash function. + pub const fn hash_len(self) -> usize { match self { Algorithm::Sha256 => 32, Algorithm::Sha384 => 48, From 4d3d2f0a52276586f019d920db9ac5dcb86647b6 Mon Sep 17 00:00:00 2001 From: "Jan Winkelmann (keks)" Date: Wed, 6 Nov 2024 12:21:37 +0100 Subject: [PATCH 15/18] impl Default for Sha2 hash states --- src/digest.rs | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src/digest.rs b/src/digest.rs index 5384cab19..e4d7243fc 100644 --- a/src/digest.rs +++ b/src/digest.rs @@ -300,6 +300,12 @@ macro_rules! impl_hash { } } + impl Default for $name { + fn default() -> Self { + Self::new() + } + } + impl Clone for $name { fn clone(&self) -> Self { Self { From 112ccc2cd6dec21fd66e8b7f0878f95005628314 Mon Sep 17 00:00:00 2001 From: "Jan Winkelmann (keks)" Date: Wed, 6 Nov 2024 12:32:44 +0100 Subject: [PATCH 16/18] fmt rest of generated code --- libcrux-hacl-rs-krml/src/lib.rs | 36 +- libcrux-hacl-rs/src/bignum.rs | 8 +- libcrux-hacl-rs/src/ed25519_precomptable.rs | 2398 +++++++++++++++---- libcrux-hacl-rs/src/fstar/uint16.rs | 34 +- libcrux-hacl-rs/src/fstar/uint32.rs | 34 +- libcrux-hacl-rs/src/fstar/uint64.rs | 34 +- libcrux-hacl-rs/src/fstar/uint8.rs | 35 +- libcrux-hacl-rs/src/lowstar/endianness.rs | 12 +- 8 files changed, 2022 insertions(+), 569 deletions(-) diff --git a/libcrux-hacl-rs-krml/src/lib.rs b/libcrux-hacl-rs-krml/src/lib.rs index 18f6fe094..30896bf7f 100644 --- a/libcrux-hacl-rs-krml/src/lib.rs +++ b/libcrux-hacl-rs-krml/src/lib.rs @@ -1,16 +1,16 @@ -use proc_macro::{TokenStream,TokenTree,Delimiter}; +use proc_macro::{Delimiter, TokenStream, TokenTree}; -fn skip_comma>(ts: &mut T) { +fn skip_comma>(ts: &mut T) { match ts.next() { - | Some (TokenTree::Punct(p)) => assert_eq!(p.as_char(), ','), - | _ => panic!("Expected comma") + Some(TokenTree::Punct(p)) => assert_eq!(p.as_char(), ','), + _ => panic!("Expected comma"), } } -fn accept_token>(ts: &mut T) -> TokenTree { +fn accept_token>(ts: &mut T) -> TokenTree { match ts.next() { - | Some(t) => t, - | _ => panic!("early end") + Some(t) => t, + _ => panic!("early end"), } } @@ -24,23 +24,23 @@ pub fn unroll_for(ts: TokenStream) -> TokenStream { let n_loops = accept_token(&mut i).to_string().parse::().unwrap(); skip_comma(&mut i); let var = accept_token(&mut i).to_string(); - let var = &var[1..var.len()-1]; + let var = &var[1..var.len() - 1]; skip_comma(&mut i); let start = accept_token(&mut i).to_string(); skip_comma(&mut i); let increment = accept_token(&mut i).to_string(); skip_comma(&mut i); let grouped_body = brace(TokenStream::from_iter(i)); - let chunks = - (0..n_loops).map(|i| { - let chunks = [ - format!("const {}: u32 = {} + {} * {};", var, start, i, increment).parse().unwrap(), - TokenStream::from(grouped_body.clone()), - ";".parse().unwrap() - ]; - TokenStream::from(brace(TokenStream::from_iter(chunks))) - }) - ; + let chunks = (0..n_loops).map(|i| { + let chunks = [ + format!("const {}: u32 = {} + {} * {};", var, start, i, increment) + .parse() + .unwrap(), + TokenStream::from(grouped_body.clone()), + ";".parse().unwrap(), + ]; + TokenStream::from(brace(TokenStream::from_iter(chunks))) + }); TokenStream::from(brace(TokenStream::from_iter(chunks.into_iter().flatten()))) // "{ let i = 0; println!(\"FROM MACRO{}\", i); }".parse().unwrap() } diff --git a/libcrux-hacl-rs/src/bignum.rs b/libcrux-hacl-rs/src/bignum.rs index 2d4a00fb1..18f0a69d6 100644 --- a/libcrux-hacl-rs/src/bignum.rs +++ b/libcrux-hacl-rs/src/bignum.rs @@ -1,12 +1,12 @@ -pub mod bignum_base; pub mod bignum; -pub mod bignum32; -pub mod bignum64; pub mod bignum256; pub mod bignum256_32; +pub mod bignum32; pub mod bignum4096; pub mod bignum4096_32; +pub mod bignum64; +pub mod bignum_base; pub mod test { - // pub mod bignum4096; + // pub mod bignum4096; } diff --git a/libcrux-hacl-rs/src/ed25519_precomptable.rs b/libcrux-hacl-rs/src/ed25519_precomptable.rs index ac344ed73..f0e8def64 100644 --- a/libcrux-hacl-rs/src/ed25519_precomptable.rs +++ b/libcrux-hacl-rs/src/ed25519_precomptable.rs @@ -4,475 +4,1937 @@ #![allow(unused_assignments)] #![allow(unreachable_patterns)] -pub(crate) const precomp_basepoint_table_w4: [u64; 320] = - [0u64, 0u64, 0u64, 0u64, 0u64, 1u64, 0u64, 0u64, 0u64, 0u64, 1u64, 0u64, 0u64, 0u64, 0u64, 0u64, - 0u64, 0u64, 0u64, 0u64, 1738742601995546u64, 1146398526822698u64, 2070867633025821u64, - 562264141797630u64, 587772402128613u64, 1801439850948184u64, 1351079888211148u64, - 450359962737049u64, 900719925474099u64, 1801439850948198u64, 1u64, 0u64, 0u64, 0u64, 0u64, - 1841354044333475u64, 16398895984059u64, 755974180946558u64, 900171276175154u64, - 1821297809914039u64, 1661154287933054u64, 284530020860578u64, 1390261174866914u64, - 1524110943907984u64, 1045603498418422u64, 928651508580478u64, 1383326941296346u64, - 961937908925785u64, 80455759693706u64, 904734540352947u64, 1507481815385608u64, - 2223447444246085u64, 1083941587175919u64, 2059929906842505u64, 1581435440146976u64, - 782730187692425u64, 9928394897574u64, 1539449519985236u64, 1923587931078510u64, - 552919286076056u64, 376925408065760u64, 447320488831784u64, 1362918338468019u64, - 1470031896696846u64, 2189796996539902u64, 1337552949959847u64, 1762287177775726u64, - 237994495816815u64, 1277840395970544u64, 543972849007241u64, 1224692671618814u64, - 162359533289271u64, 282240927125249u64, 586909166382289u64, 17726488197838u64, - 377014554985659u64, 1433835303052512u64, 702061469493692u64, 1142253108318154u64, - 318297794307551u64, 954362646308543u64, 517363881452320u64, 1868013482130416u64, - 262562472373260u64, 902232853249919u64, 2107343057055746u64, 462368348619024u64, - 1893758677092974u64, 2177729767846389u64, 2168532543559143u64, 443867094639821u64, - 730169342581022u64, 1564589016879755u64, 51218195700649u64, 76684578423745u64, - 560266272480743u64, 922517457707697u64, 2066645939860874u64, 1318277348414638u64, - 1576726809084003u64, 1817337608563665u64, 1874240939237666u64, 754733726333910u64, - 97085310406474u64, 751148364309235u64, 1622159695715187u64, 1444098819684916u64, - 130920805558089u64, 1260449179085308u64, 1860021740768461u64, 110052860348509u64, - 193830891643810u64, 164148413933881u64, 180017794795332u64, 1523506525254651u64, - 465981629225956u64, 559733514964572u64, 1279624874416974u64, 2026642326892306u64, - 1425156829982409u64, 2160936383793147u64, 1061870624975247u64, 2023497043036941u64, - 117942212883190u64, 490339622800774u64, 1729931303146295u64, 422305932971074u64, - 529103152793096u64, 1211973233775992u64, 721364955929681u64, 1497674430438813u64, - 342545521275073u64, 2102107575279372u64, 2108462244669966u64, 1382582406064082u64, - 2206396818383323u64, 2109093268641147u64, 10809845110983u64, 1605176920880099u64, - 744640650753946u64, 1712758897518129u64, 373410811281809u64, 648838265800209u64, - 813058095530999u64, 513987632620169u64, 465516160703329u64, 2136322186126330u64, - 1979645899422932u64, 1197131006470786u64, 1467836664863979u64, 1340751381374628u64, - 1810066212667962u64, 1009933588225499u64, 1106129188080873u64, 1388980405213901u64, - 533719246598044u64, 1169435803073277u64, 198920999285821u64, 487492330629854u64, - 1807093008537778u64, 1540899012923865u64, 2075080271659867u64, 1527990806921523u64, - 1323728742908002u64, 1568595959608205u64, 1388032187497212u64, 2026968840050568u64, - 1396591153295755u64, 820416950170901u64, 520060313205582u64, 2016404325094901u64, - 1584709677868520u64, 272161374469956u64, 1567188603996816u64, 1986160530078221u64, - 553930264324589u64, 1058426729027503u64, 8762762886675u64, 2216098143382988u64, - 1835145266889223u64, 1712936431558441u64, 1017009937844974u64, 585361667812740u64, - 2114711541628181u64, 2238729632971439u64, 121257546253072u64, 847154149018345u64, - 211972965476684u64, 287499084460129u64, 2098247259180197u64, 839070411583329u64, - 339551619574372u64, 1432951287640743u64, 526481249498942u64, 931991661905195u64, - 1884279965674487u64, 200486405604411u64, 364173020594788u64, 518034455936955u64, - 1085564703965501u64, 16030410467927u64, 604865933167613u64, 1695298441093964u64, - 498856548116159u64, 2193030062787034u64, 1706339802964179u64, 1721199073493888u64, - 820740951039755u64, 1216053436896834u64, 23954895815139u64, 1662515208920491u64, - 1705443427511899u64, 1957928899570365u64, 1189636258255725u64, 1795695471103809u64, - 1691191297654118u64, 282402585374360u64, 460405330264832u64, 63765529445733u64, - 469763447404473u64, 733607089694996u64, 685410420186959u64, 1096682630419738u64, - 1162548510542362u64, 1020949526456676u64, 1211660396870573u64, 613126398222696u64, - 1117829165843251u64, 742432540886650u64, 1483755088010658u64, 942392007134474u64, - 1447834130944107u64, 489368274863410u64, 23192985544898u64, 648442406146160u64, - 785438843373876u64, 249464684645238u64, 170494608205618u64, 335112827260550u64, - 1462050123162735u64, 1084803668439016u64, 853459233600325u64, 215777728187495u64, - 1965759433526974u64, 1349482894446537u64, 694163317612871u64, 860536766165036u64, - 1178788094084321u64, 1652739626626996u64, 2115723946388185u64, 1577204379094664u64, - 1083882859023240u64, 1768759143381635u64, 1737180992507258u64, 246054513922239u64, - 577253134087234u64, 356340280578042u64, 1638917769925142u64, 223550348130103u64, - 470592666638765u64, 22663573966996u64, 596552461152400u64, 364143537069499u64, - 3942119457699u64, 107951982889287u64, 1843471406713209u64, 1625773041610986u64, - 1466141092501702u64, 1043024095021271u64, 310429964047508u64, 98559121500372u64, - 152746933782868u64, 259407205078261u64, 828123093322585u64, 1576847274280091u64, - 1170871375757302u64, 1588856194642775u64, 984767822341977u64, 1141497997993760u64, - 809325345150796u64, 1879837728202511u64, 201340910657893u64, 1079157558888483u64, - 1052373448588065u64, 1732036202501778u64, 2105292670328445u64, 679751387312402u64, - 1679682144926229u64, 1695823455818780u64, 498852317075849u64, 1786555067788433u64, - 1670727545779425u64, 117945875433544u64, 407939139781844u64, 854632120023778u64, - 1413383148360437u64, 286030901733673u64, 1207361858071196u64, 461340408181417u64, - 1096919590360164u64, 1837594897475685u64, 533755561544165u64, 1638688042247712u64, - 1431653684793005u64, 1036458538873559u64, 390822120341779u64, 1920929837111618u64, - 543426740024168u64, 645751357799929u64, 2245025632994463u64, 1550778638076452u64, - 223738153459949u64, 1337209385492033u64, 1276967236456531u64, 1463815821063071u64, - 2070620870191473u64, 1199170709413753u64, 273230877394166u64, 1873264887608046u64, - 890877152910775u64]; +pub(crate) const precomp_basepoint_table_w4: [u64; 320] = [ + 0u64, + 0u64, + 0u64, + 0u64, + 0u64, + 1u64, + 0u64, + 0u64, + 0u64, + 0u64, + 1u64, + 0u64, + 0u64, + 0u64, + 0u64, + 0u64, + 0u64, + 0u64, + 0u64, + 0u64, + 1738742601995546u64, + 1146398526822698u64, + 2070867633025821u64, + 562264141797630u64, + 587772402128613u64, + 1801439850948184u64, + 1351079888211148u64, + 450359962737049u64, + 900719925474099u64, + 1801439850948198u64, + 1u64, + 0u64, + 0u64, + 0u64, + 0u64, + 1841354044333475u64, + 16398895984059u64, + 755974180946558u64, + 900171276175154u64, + 1821297809914039u64, + 1661154287933054u64, + 284530020860578u64, + 1390261174866914u64, + 1524110943907984u64, + 1045603498418422u64, + 928651508580478u64, + 1383326941296346u64, + 961937908925785u64, + 80455759693706u64, + 904734540352947u64, + 1507481815385608u64, + 2223447444246085u64, + 1083941587175919u64, + 2059929906842505u64, + 1581435440146976u64, + 782730187692425u64, + 9928394897574u64, + 1539449519985236u64, + 1923587931078510u64, + 552919286076056u64, + 376925408065760u64, + 447320488831784u64, + 1362918338468019u64, + 1470031896696846u64, + 2189796996539902u64, + 1337552949959847u64, + 1762287177775726u64, + 237994495816815u64, + 1277840395970544u64, + 543972849007241u64, + 1224692671618814u64, + 162359533289271u64, + 282240927125249u64, + 586909166382289u64, + 17726488197838u64, + 377014554985659u64, + 1433835303052512u64, + 702061469493692u64, + 1142253108318154u64, + 318297794307551u64, + 954362646308543u64, + 517363881452320u64, + 1868013482130416u64, + 262562472373260u64, + 902232853249919u64, + 2107343057055746u64, + 462368348619024u64, + 1893758677092974u64, + 2177729767846389u64, + 2168532543559143u64, + 443867094639821u64, + 730169342581022u64, + 1564589016879755u64, + 51218195700649u64, + 76684578423745u64, + 560266272480743u64, + 922517457707697u64, + 2066645939860874u64, + 1318277348414638u64, + 1576726809084003u64, + 1817337608563665u64, + 1874240939237666u64, + 754733726333910u64, + 97085310406474u64, + 751148364309235u64, + 1622159695715187u64, + 1444098819684916u64, + 130920805558089u64, + 1260449179085308u64, + 1860021740768461u64, + 110052860348509u64, + 193830891643810u64, + 164148413933881u64, + 180017794795332u64, + 1523506525254651u64, + 465981629225956u64, + 559733514964572u64, + 1279624874416974u64, + 2026642326892306u64, + 1425156829982409u64, + 2160936383793147u64, + 1061870624975247u64, + 2023497043036941u64, + 117942212883190u64, + 490339622800774u64, + 1729931303146295u64, + 422305932971074u64, + 529103152793096u64, + 1211973233775992u64, + 721364955929681u64, + 1497674430438813u64, + 342545521275073u64, + 2102107575279372u64, + 2108462244669966u64, + 1382582406064082u64, + 2206396818383323u64, + 2109093268641147u64, + 10809845110983u64, + 1605176920880099u64, + 744640650753946u64, + 1712758897518129u64, + 373410811281809u64, + 648838265800209u64, + 813058095530999u64, + 513987632620169u64, + 465516160703329u64, + 2136322186126330u64, + 1979645899422932u64, + 1197131006470786u64, + 1467836664863979u64, + 1340751381374628u64, + 1810066212667962u64, + 1009933588225499u64, + 1106129188080873u64, + 1388980405213901u64, + 533719246598044u64, + 1169435803073277u64, + 198920999285821u64, + 487492330629854u64, + 1807093008537778u64, + 1540899012923865u64, + 2075080271659867u64, + 1527990806921523u64, + 1323728742908002u64, + 1568595959608205u64, + 1388032187497212u64, + 2026968840050568u64, + 1396591153295755u64, + 820416950170901u64, + 520060313205582u64, + 2016404325094901u64, + 1584709677868520u64, + 272161374469956u64, + 1567188603996816u64, + 1986160530078221u64, + 553930264324589u64, + 1058426729027503u64, + 8762762886675u64, + 2216098143382988u64, + 1835145266889223u64, + 1712936431558441u64, + 1017009937844974u64, + 585361667812740u64, + 2114711541628181u64, + 2238729632971439u64, + 121257546253072u64, + 847154149018345u64, + 211972965476684u64, + 287499084460129u64, + 2098247259180197u64, + 839070411583329u64, + 339551619574372u64, + 1432951287640743u64, + 526481249498942u64, + 931991661905195u64, + 1884279965674487u64, + 200486405604411u64, + 364173020594788u64, + 518034455936955u64, + 1085564703965501u64, + 16030410467927u64, + 604865933167613u64, + 1695298441093964u64, + 498856548116159u64, + 2193030062787034u64, + 1706339802964179u64, + 1721199073493888u64, + 820740951039755u64, + 1216053436896834u64, + 23954895815139u64, + 1662515208920491u64, + 1705443427511899u64, + 1957928899570365u64, + 1189636258255725u64, + 1795695471103809u64, + 1691191297654118u64, + 282402585374360u64, + 460405330264832u64, + 63765529445733u64, + 469763447404473u64, + 733607089694996u64, + 685410420186959u64, + 1096682630419738u64, + 1162548510542362u64, + 1020949526456676u64, + 1211660396870573u64, + 613126398222696u64, + 1117829165843251u64, + 742432540886650u64, + 1483755088010658u64, + 942392007134474u64, + 1447834130944107u64, + 489368274863410u64, + 23192985544898u64, + 648442406146160u64, + 785438843373876u64, + 249464684645238u64, + 170494608205618u64, + 335112827260550u64, + 1462050123162735u64, + 1084803668439016u64, + 853459233600325u64, + 215777728187495u64, + 1965759433526974u64, + 1349482894446537u64, + 694163317612871u64, + 860536766165036u64, + 1178788094084321u64, + 1652739626626996u64, + 2115723946388185u64, + 1577204379094664u64, + 1083882859023240u64, + 1768759143381635u64, + 1737180992507258u64, + 246054513922239u64, + 577253134087234u64, + 356340280578042u64, + 1638917769925142u64, + 223550348130103u64, + 470592666638765u64, + 22663573966996u64, + 596552461152400u64, + 364143537069499u64, + 3942119457699u64, + 107951982889287u64, + 1843471406713209u64, + 1625773041610986u64, + 1466141092501702u64, + 1043024095021271u64, + 310429964047508u64, + 98559121500372u64, + 152746933782868u64, + 259407205078261u64, + 828123093322585u64, + 1576847274280091u64, + 1170871375757302u64, + 1588856194642775u64, + 984767822341977u64, + 1141497997993760u64, + 809325345150796u64, + 1879837728202511u64, + 201340910657893u64, + 1079157558888483u64, + 1052373448588065u64, + 1732036202501778u64, + 2105292670328445u64, + 679751387312402u64, + 1679682144926229u64, + 1695823455818780u64, + 498852317075849u64, + 1786555067788433u64, + 1670727545779425u64, + 117945875433544u64, + 407939139781844u64, + 854632120023778u64, + 1413383148360437u64, + 286030901733673u64, + 1207361858071196u64, + 461340408181417u64, + 1096919590360164u64, + 1837594897475685u64, + 533755561544165u64, + 1638688042247712u64, + 1431653684793005u64, + 1036458538873559u64, + 390822120341779u64, + 1920929837111618u64, + 543426740024168u64, + 645751357799929u64, + 2245025632994463u64, + 1550778638076452u64, + 223738153459949u64, + 1337209385492033u64, + 1276967236456531u64, + 1463815821063071u64, + 2070620870191473u64, + 1199170709413753u64, + 273230877394166u64, + 1873264887608046u64, + 890877152910775u64, +]; -pub(crate) const precomp_g_pow2_64_table_w4: [u64; 320] = - [0u64, 0u64, 0u64, 0u64, 0u64, 1u64, 0u64, 0u64, 0u64, 0u64, 1u64, 0u64, 0u64, 0u64, 0u64, 0u64, - 0u64, 0u64, 0u64, 0u64, 13559344787725u64, 2051621493703448u64, 1947659315640708u64, - 626856790370168u64, 1592804284034836u64, 1781728767459187u64, 278818420518009u64, - 2038030359908351u64, 910625973862690u64, 471887343142239u64, 1298543306606048u64, - 794147365642417u64, 129968992326749u64, 523140861678572u64, 1166419653909231u64, - 2009637196928390u64, 1288020222395193u64, 1007046974985829u64, 208981102651386u64, - 2074009315253380u64, 1564056062071967u64, 276822668750618u64, 206621292512572u64, - 470304361809269u64, 895215438398493u64, 1527859053868686u64, 1624967223409369u64, - 811821865979736u64, 350450534838340u64, 219143807921807u64, 507994540371254u64, - 986513794574720u64, 1142661369967121u64, 621278293399257u64, 556189161519781u64, - 351964007865066u64, 2011573453777822u64, 1367125527151537u64, 1691316722438196u64, - 731328817345164u64, 1284781192709232u64, 478439299539269u64, 204842178076429u64, - 2085125369913651u64, 1980773492792985u64, 1480264409524940u64, 688389585376233u64, - 612962643526972u64, 165595382536676u64, 1850300069212263u64, 1176357203491551u64, - 1880164984292321u64, 10786153104736u64, 1242293560510203u64, 1358399951884084u64, - 1901358796610357u64, 1385092558795806u64, 1734893785311348u64, 2046201851951191u64, - 1233811309557352u64, 1531160168656129u64, 1543287181303358u64, 516121446374119u64, - 723422668089935u64, 1228176774959679u64, 1598014722726267u64, 1630810326658412u64, - 1343833067463760u64, 1024397964362099u64, 1157142161346781u64, 56422174971792u64, - 544901687297092u64, 1291559028869009u64, 1336918672345120u64, 1390874603281353u64, - 1127199512010904u64, 992644979940964u64, 1035213479783573u64, 36043651196100u64, - 1220961519321221u64, 1348190007756977u64, 579420200329088u64, 1703819961008985u64, - 1993919213460047u64, 2225080008232251u64, 392785893702372u64, 464312521482632u64, - 1224525362116057u64, 810394248933036u64, 932513521649107u64, 592314953488703u64, - 586334603791548u64, 1310888126096549u64, 650842674074281u64, 1596447001791059u64, - 2086767406328284u64, 1866377645879940u64, 1721604362642743u64, 738502322566890u64, - 1851901097729689u64, 1158347571686914u64, 2023626733470827u64, 329625404653699u64, - 563555875598551u64, 516554588079177u64, 1134688306104598u64, 186301198420809u64, - 1339952213563300u64, 643605614625891u64, 1947505332718043u64, 1722071694852824u64, - 601679570440694u64, 1821275721236351u64, 1808307842870389u64, 1654165204015635u64, - 1457334100715245u64, 217784948678349u64, 1820622417674817u64, 1946121178444661u64, - 597980757799332u64, 1745271227710764u64, 2010952890941980u64, 339811849696648u64, - 1066120666993872u64, 261276166508990u64, 323098645774553u64, 207454744271283u64, - 941448672977675u64, 71890920544375u64, 840849789313357u64, 1223996070717926u64, - 196832550853408u64, 115986818309231u64, 1586171527267675u64, 1666169080973450u64, - 1456454731176365u64, 44467854369003u64, 2149656190691480u64, 283446383597589u64, - 2040542647729974u64, 305705593840224u64, 475315822269791u64, 648133452550632u64, - 169218658835720u64, 24960052338251u64, 938907951346766u64, 425970950490510u64, - 1037622011013183u64, 1026882082708180u64, 1635699409504916u64, 1644776942870488u64, - 2151820331175914u64, 824120674069819u64, 835744976610113u64, 1991271032313190u64, - 96507354724855u64, 400645405133260u64, 343728076650825u64, 1151585441385566u64, - 1403339955333520u64, 230186314139774u64, 1736248861506714u64, 1010804378904572u64, - 1394932289845636u64, 1901351256960852u64, 2187471430089807u64, 1003853262342670u64, - 1327743396767461u64, 1465160415991740u64, 366625359144534u64, 1534791405247604u64, - 1790905930250187u64, 1255484115292738u64, 2223291365520443u64, 210967717407408u64, - 26722916813442u64, 1919574361907910u64, 468825088280256u64, 2230011775946070u64, - 1628365642214479u64, 568871869234932u64, 1066987968780488u64, 1692242903745558u64, - 1678903997328589u64, 214262165888021u64, 1929686748607204u64, 1790138967989670u64, - 1790261616022076u64, 1559824537553112u64, 1230364591311358u64, 147531939886346u64, - 1528207085815487u64, 477957922927292u64, 285670243881618u64, 264430080123332u64, - 1163108160028611u64, 373201522147371u64, 34903775270979u64, 1750870048600662u64, - 1319328308741084u64, 1547548634278984u64, 1691259592202927u64, 2247758037259814u64, - 329611399953677u64, 1385555496268877u64, 2242438354031066u64, 1329523854843632u64, - 399895373846055u64, 678005703193452u64, 1496357700997771u64, 71909969781942u64, - 1515391418612349u64, 470110837888178u64, 1981307309417466u64, 1259888737412276u64, - 669991710228712u64, 1048546834514303u64, 1678323291295512u64, 2172033978088071u64, - 1529278455500556u64, 901984601941894u64, 780867622403807u64, 550105677282793u64, - 975860231176136u64, 525188281689178u64, 49966114807992u64, 1776449263836645u64, - 267851776380338u64, 2225969494054620u64, 2016794225789822u64, 1186108678266608u64, - 1023083271408882u64, 1119289418565906u64, 1248185897348801u64, 1846081539082697u64, - 23756429626075u64, 1441999021105403u64, 724497586552825u64, 1287761623605379u64, - 685303359654224u64, 2217156930690570u64, 163769288918347u64, 1098423278284094u64, - 1391470723006008u64, 570700152353516u64, 744804507262556u64, 2200464788609495u64, - 624141899161992u64, 2249570166275684u64, 378706441983561u64, 122486379999375u64, - 430741162798924u64, 113847463452574u64, 266250457840685u64, 2120743625072743u64, - 222186221043927u64, 1964290018305582u64, 1435278008132477u64, 1670867456663734u64, - 2009989552599079u64, 1348024113448744u64, 1158423886300455u64, 1356467152691569u64, - 306943042363674u64, 926879628664255u64, 1349295689598324u64, 725558330071205u64, - 536569987519948u64, 116436990335366u64, 1551888573800376u64, 2044698345945451u64, - 104279940291311u64, 251526570943220u64, 754735828122925u64, 33448073576361u64, - 994605876754543u64, 546007584022006u64, 2217332798409487u64, 706477052561591u64, - 131174619428653u64, 2148698284087243u64, 239290486205186u64, 2161325796952184u64, - 1713452845607994u64, 1297861562938913u64, 1779539876828514u64, 1926559018603871u64, - 296485747893968u64, 1859208206640686u64, 538513979002718u64, 103998826506137u64, - 2025375396538469u64, 1370680785701206u64, 1698557311253840u64, 1411096399076595u64, - 2132580530813677u64, 2071564345845035u64, 498581428556735u64, 1136010486691371u64, - 1927619356993146u64]; +pub(crate) const precomp_g_pow2_64_table_w4: [u64; 320] = [ + 0u64, + 0u64, + 0u64, + 0u64, + 0u64, + 1u64, + 0u64, + 0u64, + 0u64, + 0u64, + 1u64, + 0u64, + 0u64, + 0u64, + 0u64, + 0u64, + 0u64, + 0u64, + 0u64, + 0u64, + 13559344787725u64, + 2051621493703448u64, + 1947659315640708u64, + 626856790370168u64, + 1592804284034836u64, + 1781728767459187u64, + 278818420518009u64, + 2038030359908351u64, + 910625973862690u64, + 471887343142239u64, + 1298543306606048u64, + 794147365642417u64, + 129968992326749u64, + 523140861678572u64, + 1166419653909231u64, + 2009637196928390u64, + 1288020222395193u64, + 1007046974985829u64, + 208981102651386u64, + 2074009315253380u64, + 1564056062071967u64, + 276822668750618u64, + 206621292512572u64, + 470304361809269u64, + 895215438398493u64, + 1527859053868686u64, + 1624967223409369u64, + 811821865979736u64, + 350450534838340u64, + 219143807921807u64, + 507994540371254u64, + 986513794574720u64, + 1142661369967121u64, + 621278293399257u64, + 556189161519781u64, + 351964007865066u64, + 2011573453777822u64, + 1367125527151537u64, + 1691316722438196u64, + 731328817345164u64, + 1284781192709232u64, + 478439299539269u64, + 204842178076429u64, + 2085125369913651u64, + 1980773492792985u64, + 1480264409524940u64, + 688389585376233u64, + 612962643526972u64, + 165595382536676u64, + 1850300069212263u64, + 1176357203491551u64, + 1880164984292321u64, + 10786153104736u64, + 1242293560510203u64, + 1358399951884084u64, + 1901358796610357u64, + 1385092558795806u64, + 1734893785311348u64, + 2046201851951191u64, + 1233811309557352u64, + 1531160168656129u64, + 1543287181303358u64, + 516121446374119u64, + 723422668089935u64, + 1228176774959679u64, + 1598014722726267u64, + 1630810326658412u64, + 1343833067463760u64, + 1024397964362099u64, + 1157142161346781u64, + 56422174971792u64, + 544901687297092u64, + 1291559028869009u64, + 1336918672345120u64, + 1390874603281353u64, + 1127199512010904u64, + 992644979940964u64, + 1035213479783573u64, + 36043651196100u64, + 1220961519321221u64, + 1348190007756977u64, + 579420200329088u64, + 1703819961008985u64, + 1993919213460047u64, + 2225080008232251u64, + 392785893702372u64, + 464312521482632u64, + 1224525362116057u64, + 810394248933036u64, + 932513521649107u64, + 592314953488703u64, + 586334603791548u64, + 1310888126096549u64, + 650842674074281u64, + 1596447001791059u64, + 2086767406328284u64, + 1866377645879940u64, + 1721604362642743u64, + 738502322566890u64, + 1851901097729689u64, + 1158347571686914u64, + 2023626733470827u64, + 329625404653699u64, + 563555875598551u64, + 516554588079177u64, + 1134688306104598u64, + 186301198420809u64, + 1339952213563300u64, + 643605614625891u64, + 1947505332718043u64, + 1722071694852824u64, + 601679570440694u64, + 1821275721236351u64, + 1808307842870389u64, + 1654165204015635u64, + 1457334100715245u64, + 217784948678349u64, + 1820622417674817u64, + 1946121178444661u64, + 597980757799332u64, + 1745271227710764u64, + 2010952890941980u64, + 339811849696648u64, + 1066120666993872u64, + 261276166508990u64, + 323098645774553u64, + 207454744271283u64, + 941448672977675u64, + 71890920544375u64, + 840849789313357u64, + 1223996070717926u64, + 196832550853408u64, + 115986818309231u64, + 1586171527267675u64, + 1666169080973450u64, + 1456454731176365u64, + 44467854369003u64, + 2149656190691480u64, + 283446383597589u64, + 2040542647729974u64, + 305705593840224u64, + 475315822269791u64, + 648133452550632u64, + 169218658835720u64, + 24960052338251u64, + 938907951346766u64, + 425970950490510u64, + 1037622011013183u64, + 1026882082708180u64, + 1635699409504916u64, + 1644776942870488u64, + 2151820331175914u64, + 824120674069819u64, + 835744976610113u64, + 1991271032313190u64, + 96507354724855u64, + 400645405133260u64, + 343728076650825u64, + 1151585441385566u64, + 1403339955333520u64, + 230186314139774u64, + 1736248861506714u64, + 1010804378904572u64, + 1394932289845636u64, + 1901351256960852u64, + 2187471430089807u64, + 1003853262342670u64, + 1327743396767461u64, + 1465160415991740u64, + 366625359144534u64, + 1534791405247604u64, + 1790905930250187u64, + 1255484115292738u64, + 2223291365520443u64, + 210967717407408u64, + 26722916813442u64, + 1919574361907910u64, + 468825088280256u64, + 2230011775946070u64, + 1628365642214479u64, + 568871869234932u64, + 1066987968780488u64, + 1692242903745558u64, + 1678903997328589u64, + 214262165888021u64, + 1929686748607204u64, + 1790138967989670u64, + 1790261616022076u64, + 1559824537553112u64, + 1230364591311358u64, + 147531939886346u64, + 1528207085815487u64, + 477957922927292u64, + 285670243881618u64, + 264430080123332u64, + 1163108160028611u64, + 373201522147371u64, + 34903775270979u64, + 1750870048600662u64, + 1319328308741084u64, + 1547548634278984u64, + 1691259592202927u64, + 2247758037259814u64, + 329611399953677u64, + 1385555496268877u64, + 2242438354031066u64, + 1329523854843632u64, + 399895373846055u64, + 678005703193452u64, + 1496357700997771u64, + 71909969781942u64, + 1515391418612349u64, + 470110837888178u64, + 1981307309417466u64, + 1259888737412276u64, + 669991710228712u64, + 1048546834514303u64, + 1678323291295512u64, + 2172033978088071u64, + 1529278455500556u64, + 901984601941894u64, + 780867622403807u64, + 550105677282793u64, + 975860231176136u64, + 525188281689178u64, + 49966114807992u64, + 1776449263836645u64, + 267851776380338u64, + 2225969494054620u64, + 2016794225789822u64, + 1186108678266608u64, + 1023083271408882u64, + 1119289418565906u64, + 1248185897348801u64, + 1846081539082697u64, + 23756429626075u64, + 1441999021105403u64, + 724497586552825u64, + 1287761623605379u64, + 685303359654224u64, + 2217156930690570u64, + 163769288918347u64, + 1098423278284094u64, + 1391470723006008u64, + 570700152353516u64, + 744804507262556u64, + 2200464788609495u64, + 624141899161992u64, + 2249570166275684u64, + 378706441983561u64, + 122486379999375u64, + 430741162798924u64, + 113847463452574u64, + 266250457840685u64, + 2120743625072743u64, + 222186221043927u64, + 1964290018305582u64, + 1435278008132477u64, + 1670867456663734u64, + 2009989552599079u64, + 1348024113448744u64, + 1158423886300455u64, + 1356467152691569u64, + 306943042363674u64, + 926879628664255u64, + 1349295689598324u64, + 725558330071205u64, + 536569987519948u64, + 116436990335366u64, + 1551888573800376u64, + 2044698345945451u64, + 104279940291311u64, + 251526570943220u64, + 754735828122925u64, + 33448073576361u64, + 994605876754543u64, + 546007584022006u64, + 2217332798409487u64, + 706477052561591u64, + 131174619428653u64, + 2148698284087243u64, + 239290486205186u64, + 2161325796952184u64, + 1713452845607994u64, + 1297861562938913u64, + 1779539876828514u64, + 1926559018603871u64, + 296485747893968u64, + 1859208206640686u64, + 538513979002718u64, + 103998826506137u64, + 2025375396538469u64, + 1370680785701206u64, + 1698557311253840u64, + 1411096399076595u64, + 2132580530813677u64, + 2071564345845035u64, + 498581428556735u64, + 1136010486691371u64, + 1927619356993146u64, +]; -pub(crate) const precomp_g_pow2_128_table_w4: [u64; 320] = - [0u64, 0u64, 0u64, 0u64, 0u64, 1u64, 0u64, 0u64, 0u64, 0u64, 1u64, 0u64, 0u64, 0u64, 0u64, 0u64, - 0u64, 0u64, 0u64, 0u64, 557549315715710u64, 196756086293855u64, 846062225082495u64, - 1865068224838092u64, 991112090754908u64, 522916421512828u64, 2098523346722375u64, - 1135633221747012u64, 858420432114866u64, 186358544306082u64, 1044420411868480u64, - 2080052304349321u64, 557301814716724u64, 1305130257814057u64, 2126012765451197u64, - 1441004402875101u64, 353948968859203u64, 470765987164835u64, 1507675957683570u64, - 1086650358745097u64, 1911913434398388u64, 66086091117182u64, 1137511952425971u64, - 36958263512141u64, 2193310025325256u64, 1085191426269045u64, 1232148267909446u64, - 1449894406170117u64, 1241416717139557u64, 1940876999212868u64, 829758415918121u64, - 309608450373449u64, 2228398547683851u64, 1580623271960188u64, 1675601502456740u64, - 1360363115493548u64, 1098397313096815u64, 1809255384359797u64, 1458261916834384u64, - 210682545649705u64, 1606836641068115u64, 1230478270405318u64, 1843192771547802u64, - 1794596343564051u64, 229060710252162u64, 2169742775467181u64, 701467067318072u64, - 696018499035555u64, 521051885339807u64, 158329567901874u64, 740426481832143u64, - 1369811177301441u64, 503351589084015u64, 1781114827942261u64, 1650493549693035u64, - 2174562418345156u64, 456517194809244u64, 2052761522121179u64, 2233342271123682u64, - 1445872925177435u64, 1131882576902813u64, 220765848055241u64, 1280259961403769u64, - 1581497080160712u64, 1477441080108824u64, 218428165202767u64, 1970598141278907u64, - 643366736173069u64, 2167909426804014u64, 834993711408259u64, 1922437166463212u64, - 1900036281472252u64, 513794844386304u64, 1297904164900114u64, 1147626295373268u64, - 1910101606251299u64, 182933838633381u64, 806229530787362u64, 155511666433200u64, - 290522463375462u64, 534373523491751u64, 1302938814480515u64, 1664979184120445u64, - 304235649499423u64, 339284524318609u64, 1881717946973483u64, 1670802286833842u64, - 2223637120675737u64, 135818919485814u64, 1144856572842792u64, 2234981613434386u64, - 963917024969826u64, 402275378284993u64, 141532417412170u64, 921537468739387u64, - 963905069722607u64, 1405442890733358u64, 1567763927164655u64, 1664776329195930u64, - 2095924165508507u64, 994243110271379u64, 1243925610609353u64, 1029845815569727u64, - 1001968867985629u64, 170368934002484u64, 1100906131583801u64, 1825190326449569u64, - 1462285121182096u64, 1545240767016377u64, 797859025652273u64, 1062758326657530u64, - 1125600735118266u64, 739325756774527u64, 1420144485966996u64, 1915492743426702u64, - 752968196344993u64, 882156396938351u64, 1909097048763227u64, 849058590685611u64, - 840754951388500u64, 1832926948808323u64, 2023317100075297u64, 322382745442827u64, - 1569741341737601u64, 1678986113194987u64, 757598994581938u64, 29678659580705u64, - 1239680935977986u64, 1509239427168474u64, 1055981929287006u64, 1894085471158693u64, - 916486225488490u64, 642168890366120u64, 300453362620010u64, 1858797242721481u64, - 2077989823177130u64, 510228455273334u64, 1473284798689270u64, 5173934574301u64, - 765285232030050u64, 1007154707631065u64, 1862128712885972u64, 168873464821340u64, - 1967853269759318u64, 1489896018263031u64, 592451806166369u64, 1242298565603883u64, - 1838918921339058u64, 697532763910695u64, 294335466239059u64, 135687058387449u64, - 2133734403874176u64, 2121911143127699u64, 20222476737364u64, 1200824626476747u64, - 1397731736540791u64, 702378430231418u64, 59059527640068u64, 460992547183981u64, - 1016125857842765u64, 1273530839608957u64, 96724128829301u64, 1313433042425233u64, - 3543822857227u64, 761975685357118u64, 110417360745248u64, 1079634164577663u64, - 2044574510020457u64, 338709058603120u64, 94541336042799u64, 127963233585039u64, - 94427896272258u64, 1143501979342182u64, 1217958006212230u64, 2153887831492134u64, - 1519219513255575u64, 251793195454181u64, 392517349345200u64, 1507033011868881u64, - 2208494254670752u64, 1364389582694359u64, 2214069430728063u64, 1272814257105752u64, - 741450148906352u64, 1105776675555685u64, 824447222014984u64, 528745219306376u64, - 589427609121575u64, 1501786838809155u64, 379067373073147u64, 184909476589356u64, - 1346887560616185u64, 1932023742314082u64, 1633302311869264u64, 1685314821133069u64, - 1836610282047884u64, 1595571594397150u64, 615441688872198u64, 1926435616702564u64, - 235632180396480u64, 1051918343571810u64, 2150570051687050u64, 879198845408738u64, - 1443966275205464u64, 481362545245088u64, 512807443532642u64, 641147578283480u64, - 1594276116945596u64, 1844812743300602u64, 2044559316019485u64, 202620777969020u64, - 852992984136302u64, 1500869642692910u64, 1085216217052457u64, 1736294372259758u64, - 2009666354486552u64, 1262389020715248u64, 1166527705256867u64, 1409917450806036u64, - 1705819160057637u64, 1116901782584378u64, 1278460472285473u64, 257879811360157u64, - 40314007176886u64, 701309846749639u64, 1380457676672777u64, 631519782380272u64, - 1196339573466793u64, 955537708940017u64, 532725633381530u64, 641190593731833u64, - 7214357153807u64, 481922072107983u64, 1634886189207352u64, 1247659758261633u64, - 1655809614786430u64, 43105797900223u64, 76205809912607u64, 1936575107455823u64, - 1107927314642236u64, 2199986333469333u64, 802974829322510u64, 718173128143482u64, - 539385184235615u64, 2075693785611221u64, 953281147333690u64, 1623571637172587u64, - 655274535022250u64, 1568078078819021u64, 101142125049712u64, 1488441673350881u64, - 1457969561944515u64, 1492622544287712u64, 2041460689280803u64, 1961848091392887u64, - 461003520846938u64, 934728060399807u64, 117723291519705u64, 1027773762863526u64, - 56765304991567u64, 2184028379550479u64, 1768767711894030u64, 1304432068983172u64, - 498080974452325u64, 2134905654858163u64, 1446137427202647u64, 551613831549590u64, - 680288767054205u64, 1278113339140386u64, 378149431842614u64, 80520494426960u64, - 2080985256348782u64, 673432591799820u64, 739189463724560u64, 1847191452197509u64, - 527737312871602u64, 477609358840073u64, 1891633072677946u64, 1841456828278466u64, - 2242502936489002u64, 524791829362709u64, 276648168514036u64, 991706903257619u64, - 512580228297906u64, 1216855104975946u64, 67030930303149u64, 769593945208213u64, - 2048873385103577u64, 455635274123107u64, 2077404927176696u64, 1803539634652306u64, - 1837579953843417u64, 1564240068662828u64, 1964310918970435u64, 832822906252492u64, - 1516044634195010u64, 770571447506889u64, 602215152486818u64, 1760828333136947u64, - 730156776030376u64]; +pub(crate) const precomp_g_pow2_128_table_w4: [u64; 320] = [ + 0u64, + 0u64, + 0u64, + 0u64, + 0u64, + 1u64, + 0u64, + 0u64, + 0u64, + 0u64, + 1u64, + 0u64, + 0u64, + 0u64, + 0u64, + 0u64, + 0u64, + 0u64, + 0u64, + 0u64, + 557549315715710u64, + 196756086293855u64, + 846062225082495u64, + 1865068224838092u64, + 991112090754908u64, + 522916421512828u64, + 2098523346722375u64, + 1135633221747012u64, + 858420432114866u64, + 186358544306082u64, + 1044420411868480u64, + 2080052304349321u64, + 557301814716724u64, + 1305130257814057u64, + 2126012765451197u64, + 1441004402875101u64, + 353948968859203u64, + 470765987164835u64, + 1507675957683570u64, + 1086650358745097u64, + 1911913434398388u64, + 66086091117182u64, + 1137511952425971u64, + 36958263512141u64, + 2193310025325256u64, + 1085191426269045u64, + 1232148267909446u64, + 1449894406170117u64, + 1241416717139557u64, + 1940876999212868u64, + 829758415918121u64, + 309608450373449u64, + 2228398547683851u64, + 1580623271960188u64, + 1675601502456740u64, + 1360363115493548u64, + 1098397313096815u64, + 1809255384359797u64, + 1458261916834384u64, + 210682545649705u64, + 1606836641068115u64, + 1230478270405318u64, + 1843192771547802u64, + 1794596343564051u64, + 229060710252162u64, + 2169742775467181u64, + 701467067318072u64, + 696018499035555u64, + 521051885339807u64, + 158329567901874u64, + 740426481832143u64, + 1369811177301441u64, + 503351589084015u64, + 1781114827942261u64, + 1650493549693035u64, + 2174562418345156u64, + 456517194809244u64, + 2052761522121179u64, + 2233342271123682u64, + 1445872925177435u64, + 1131882576902813u64, + 220765848055241u64, + 1280259961403769u64, + 1581497080160712u64, + 1477441080108824u64, + 218428165202767u64, + 1970598141278907u64, + 643366736173069u64, + 2167909426804014u64, + 834993711408259u64, + 1922437166463212u64, + 1900036281472252u64, + 513794844386304u64, + 1297904164900114u64, + 1147626295373268u64, + 1910101606251299u64, + 182933838633381u64, + 806229530787362u64, + 155511666433200u64, + 290522463375462u64, + 534373523491751u64, + 1302938814480515u64, + 1664979184120445u64, + 304235649499423u64, + 339284524318609u64, + 1881717946973483u64, + 1670802286833842u64, + 2223637120675737u64, + 135818919485814u64, + 1144856572842792u64, + 2234981613434386u64, + 963917024969826u64, + 402275378284993u64, + 141532417412170u64, + 921537468739387u64, + 963905069722607u64, + 1405442890733358u64, + 1567763927164655u64, + 1664776329195930u64, + 2095924165508507u64, + 994243110271379u64, + 1243925610609353u64, + 1029845815569727u64, + 1001968867985629u64, + 170368934002484u64, + 1100906131583801u64, + 1825190326449569u64, + 1462285121182096u64, + 1545240767016377u64, + 797859025652273u64, + 1062758326657530u64, + 1125600735118266u64, + 739325756774527u64, + 1420144485966996u64, + 1915492743426702u64, + 752968196344993u64, + 882156396938351u64, + 1909097048763227u64, + 849058590685611u64, + 840754951388500u64, + 1832926948808323u64, + 2023317100075297u64, + 322382745442827u64, + 1569741341737601u64, + 1678986113194987u64, + 757598994581938u64, + 29678659580705u64, + 1239680935977986u64, + 1509239427168474u64, + 1055981929287006u64, + 1894085471158693u64, + 916486225488490u64, + 642168890366120u64, + 300453362620010u64, + 1858797242721481u64, + 2077989823177130u64, + 510228455273334u64, + 1473284798689270u64, + 5173934574301u64, + 765285232030050u64, + 1007154707631065u64, + 1862128712885972u64, + 168873464821340u64, + 1967853269759318u64, + 1489896018263031u64, + 592451806166369u64, + 1242298565603883u64, + 1838918921339058u64, + 697532763910695u64, + 294335466239059u64, + 135687058387449u64, + 2133734403874176u64, + 2121911143127699u64, + 20222476737364u64, + 1200824626476747u64, + 1397731736540791u64, + 702378430231418u64, + 59059527640068u64, + 460992547183981u64, + 1016125857842765u64, + 1273530839608957u64, + 96724128829301u64, + 1313433042425233u64, + 3543822857227u64, + 761975685357118u64, + 110417360745248u64, + 1079634164577663u64, + 2044574510020457u64, + 338709058603120u64, + 94541336042799u64, + 127963233585039u64, + 94427896272258u64, + 1143501979342182u64, + 1217958006212230u64, + 2153887831492134u64, + 1519219513255575u64, + 251793195454181u64, + 392517349345200u64, + 1507033011868881u64, + 2208494254670752u64, + 1364389582694359u64, + 2214069430728063u64, + 1272814257105752u64, + 741450148906352u64, + 1105776675555685u64, + 824447222014984u64, + 528745219306376u64, + 589427609121575u64, + 1501786838809155u64, + 379067373073147u64, + 184909476589356u64, + 1346887560616185u64, + 1932023742314082u64, + 1633302311869264u64, + 1685314821133069u64, + 1836610282047884u64, + 1595571594397150u64, + 615441688872198u64, + 1926435616702564u64, + 235632180396480u64, + 1051918343571810u64, + 2150570051687050u64, + 879198845408738u64, + 1443966275205464u64, + 481362545245088u64, + 512807443532642u64, + 641147578283480u64, + 1594276116945596u64, + 1844812743300602u64, + 2044559316019485u64, + 202620777969020u64, + 852992984136302u64, + 1500869642692910u64, + 1085216217052457u64, + 1736294372259758u64, + 2009666354486552u64, + 1262389020715248u64, + 1166527705256867u64, + 1409917450806036u64, + 1705819160057637u64, + 1116901782584378u64, + 1278460472285473u64, + 257879811360157u64, + 40314007176886u64, + 701309846749639u64, + 1380457676672777u64, + 631519782380272u64, + 1196339573466793u64, + 955537708940017u64, + 532725633381530u64, + 641190593731833u64, + 7214357153807u64, + 481922072107983u64, + 1634886189207352u64, + 1247659758261633u64, + 1655809614786430u64, + 43105797900223u64, + 76205809912607u64, + 1936575107455823u64, + 1107927314642236u64, + 2199986333469333u64, + 802974829322510u64, + 718173128143482u64, + 539385184235615u64, + 2075693785611221u64, + 953281147333690u64, + 1623571637172587u64, + 655274535022250u64, + 1568078078819021u64, + 101142125049712u64, + 1488441673350881u64, + 1457969561944515u64, + 1492622544287712u64, + 2041460689280803u64, + 1961848091392887u64, + 461003520846938u64, + 934728060399807u64, + 117723291519705u64, + 1027773762863526u64, + 56765304991567u64, + 2184028379550479u64, + 1768767711894030u64, + 1304432068983172u64, + 498080974452325u64, + 2134905654858163u64, + 1446137427202647u64, + 551613831549590u64, + 680288767054205u64, + 1278113339140386u64, + 378149431842614u64, + 80520494426960u64, + 2080985256348782u64, + 673432591799820u64, + 739189463724560u64, + 1847191452197509u64, + 527737312871602u64, + 477609358840073u64, + 1891633072677946u64, + 1841456828278466u64, + 2242502936489002u64, + 524791829362709u64, + 276648168514036u64, + 991706903257619u64, + 512580228297906u64, + 1216855104975946u64, + 67030930303149u64, + 769593945208213u64, + 2048873385103577u64, + 455635274123107u64, + 2077404927176696u64, + 1803539634652306u64, + 1837579953843417u64, + 1564240068662828u64, + 1964310918970435u64, + 832822906252492u64, + 1516044634195010u64, + 770571447506889u64, + 602215152486818u64, + 1760828333136947u64, + 730156776030376u64, +]; -pub(crate) const precomp_g_pow2_192_table_w4: [u64; 320] = - [0u64, 0u64, 0u64, 0u64, 0u64, 1u64, 0u64, 0u64, 0u64, 0u64, 1u64, 0u64, 0u64, 0u64, 0u64, 0u64, - 0u64, 0u64, 0u64, 0u64, 1129953239743101u64, 1240339163956160u64, 61002583352401u64, - 2017604552196030u64, 1576867829229863u64, 1508654942849389u64, 270111619664077u64, - 1253097517254054u64, 721798270973250u64, 161923365415298u64, 828530877526011u64, - 1494851059386763u64, 662034171193976u64, 1315349646974670u64, 2199229517308806u64, - 497078277852673u64, 1310507715989956u64, 1881315714002105u64, 2214039404983803u64, - 1331036420272667u64, 296286697520787u64, 1179367922639127u64, 25348441419697u64, - 2200984961703188u64, 150893128908291u64, 1978614888570852u64, 1539657347172046u64, - 553810196523619u64, 246017573977646u64, 1440448985385485u64, 346049108099981u64, - 601166606218546u64, 855822004151713u64, 1957521326383188u64, 1114240380430887u64, - 1349639675122048u64, 957375954499040u64, 111551795360136u64, 618586733648988u64, - 490708840688866u64, 1267002049697314u64, 1130723224930028u64, 215603029480828u64, - 1277138555414710u64, 1556750324971322u64, 1407903521793741u64, 1836836546590749u64, - 576500297444199u64, 2074707599091135u64, 1826239864380012u64, 1935365705983312u64, - 239501825683682u64, 1594236669034980u64, 1283078975055301u64, 856745636255925u64, - 1342128647959981u64, 945216428379689u64, 938746202496410u64, 105775123333919u64, - 1379852610117266u64, 1770216827500275u64, 1016017267535704u64, 1902885522469532u64, - 994184703730489u64, 2227487538793763u64, 53155967096055u64, 1264120808114350u64, - 1334928769376729u64, 393911808079997u64, 826229239481845u64, 1827903006733192u64, - 1449283706008465u64, 1258040415217849u64, 1641484112868370u64, 1140150841968176u64, - 391113338021313u64, 162138667815833u64, 742204396566060u64, 110709233440557u64, - 90179377432917u64, 530511949644489u64, 911568635552279u64, 135869304780166u64, - 617719999563692u64, 1802525001631319u64, 1836394639510490u64, 1862739456475085u64, - 1378284444664288u64, 1617882529391756u64, 876124429891172u64, 1147654641445091u64, - 1476943370400542u64, 688601222759067u64, 2120281968990205u64, 1387113236912611u64, - 2125245820685788u64, 1030674016350092u64, 1594684598654247u64, 1165939511879820u64, - 271499323244173u64, 546587254515484u64, 945603425742936u64, 1242252568170226u64, - 561598728058142u64, 604827091794712u64, 19869753585186u64, 565367744708915u64, - 536755754533603u64, 1767258313589487u64, 907952975936127u64, 292851652613937u64, - 163573546237963u64, 837601408384564u64, 591996990118301u64, 2126051747693057u64, - 182247548824566u64, 908369044122868u64, 1335442699947273u64, 2234292296528612u64, - 689537529333034u64, 2174778663790714u64, 1011407643592667u64, 1856130618715473u64, - 1557437221651741u64, 2250285407006102u64, 1412384213410827u64, 1428042038612456u64, - 962709733973660u64, 313995703125919u64, 1844969155869325u64, 787716782673657u64, - 622504542173478u64, 930119043384654u64, 2128870043952488u64, 537781531479523u64, - 1556666269904940u64, 417333635741346u64, 1986743846438415u64, 877620478041197u64, - 2205624582983829u64, 595260668884488u64, 2025159350373157u64, 2091659716088235u64, - 1423634716596391u64, 653686638634080u64, 1972388399989956u64, 795575741798014u64, - 889240107997846u64, 1446156876910732u64, 1028507012221776u64, 1071697574586478u64, - 1689630411899691u64, 604092816502174u64, 1909917373896122u64, 1602544877643837u64, - 1227177032923867u64, 62684197535630u64, 186146290753883u64, 414449055316766u64, - 1560555880866750u64, 157579947096755u64, 230526795502384u64, 1197673369665894u64, - 593779215869037u64, 214638834474097u64, 1796344443484478u64, 493550548257317u64, - 1628442824033694u64, 1410811655893495u64, 1009361960995171u64, 604736219740352u64, - 392445928555351u64, 1254295770295706u64, 1958074535046128u64, 508699942241019u64, - 739405911261325u64, 1678760393882409u64, 517763708545996u64, 640040257898722u64, - 384966810872913u64, 407454748380128u64, 152604679407451u64, 185102854927662u64, - 1448175503649595u64, 100328519208674u64, 1153263667012830u64, 1643926437586490u64, - 609632142834154u64, 980984004749261u64, 855290732258779u64, 2186022163021506u64, - 1254052618626070u64, 1850030517182611u64, 162348933090207u64, 1948712273679932u64, - 1331832516262191u64, 1219400369175863u64, 89689036937483u64, 1554886057235815u64, - 1520047528432789u64, 81263957652811u64, 146612464257008u64, 2207945627164163u64, - 919846660682546u64, 1925694087906686u64, 2102027292388012u64, 887992003198635u64, - 1817924871537027u64, 746660005584342u64, 753757153275525u64, 91394270908699u64, - 511837226544151u64, 736341543649373u64, 1256371121466367u64, 1977778299551813u64, - 817915174462263u64, 1602323381418035u64, 190035164572930u64, 603796401391181u64, - 2152666873671669u64, 1813900316324112u64, 1292622433358041u64, 888439870199892u64, - 978918155071994u64, 534184417909805u64, 466460084317313u64, 1275223140288685u64, - 786407043883517u64, 1620520623925754u64, 1753625021290269u64, 751937175104525u64, - 905301961820613u64, 697059847245437u64, 584919033981144u64, 1272165506533156u64, - 1532180021450866u64, 1901407354005301u64, 1421319720492586u64, 2179081609765456u64, - 2193253156667632u64, 1080248329608584u64, 2158422436462066u64, 759167597017850u64, - 545759071151285u64, 641600428493698u64, 943791424499848u64, 469571542427864u64, - 951117845222467u64, 1780538594373407u64, 614611122040309u64, 1354826131886963u64, - 221898131992340u64, 1145699723916219u64, 798735379961769u64, 1843560518208287u64, - 1424523160161545u64, 205549016574779u64, 2239491587362749u64, 1918363582399888u64, - 1292183072788455u64, 1783513123192567u64, 1584027954317205u64, 1890421443925740u64, - 1718459319874929u64, 1522091040748809u64, 399467600667219u64, 1870973059066576u64, - 287514433150348u64, 1397845311152885u64, 1880440629872863u64, 709302939340341u64, - 1813571361109209u64, 86598795876860u64, 1146964554310612u64, 1590956584862432u64, - 2097004628155559u64, 656227622102390u64, 1808500445541891u64, 958336726523135u64, - 2007604569465975u64, 313504950390997u64, 1399686004953620u64, 1759732788465234u64, - 1562539721055836u64, 1575722765016293u64, 793318366641259u64, 443876859384887u64, - 547308921989704u64, 636698687503328u64, 2179175835287340u64, 498333551718258u64, - 932248760026176u64, 1612395686304653u64, 2179774103745626u64, 1359658123541018u64, - 171488501802442u64, 1625034951791350u64, 520196922773633u64, 1873787546341877u64, - 303457823885368u64]; +pub(crate) const precomp_g_pow2_192_table_w4: [u64; 320] = [ + 0u64, + 0u64, + 0u64, + 0u64, + 0u64, + 1u64, + 0u64, + 0u64, + 0u64, + 0u64, + 1u64, + 0u64, + 0u64, + 0u64, + 0u64, + 0u64, + 0u64, + 0u64, + 0u64, + 0u64, + 1129953239743101u64, + 1240339163956160u64, + 61002583352401u64, + 2017604552196030u64, + 1576867829229863u64, + 1508654942849389u64, + 270111619664077u64, + 1253097517254054u64, + 721798270973250u64, + 161923365415298u64, + 828530877526011u64, + 1494851059386763u64, + 662034171193976u64, + 1315349646974670u64, + 2199229517308806u64, + 497078277852673u64, + 1310507715989956u64, + 1881315714002105u64, + 2214039404983803u64, + 1331036420272667u64, + 296286697520787u64, + 1179367922639127u64, + 25348441419697u64, + 2200984961703188u64, + 150893128908291u64, + 1978614888570852u64, + 1539657347172046u64, + 553810196523619u64, + 246017573977646u64, + 1440448985385485u64, + 346049108099981u64, + 601166606218546u64, + 855822004151713u64, + 1957521326383188u64, + 1114240380430887u64, + 1349639675122048u64, + 957375954499040u64, + 111551795360136u64, + 618586733648988u64, + 490708840688866u64, + 1267002049697314u64, + 1130723224930028u64, + 215603029480828u64, + 1277138555414710u64, + 1556750324971322u64, + 1407903521793741u64, + 1836836546590749u64, + 576500297444199u64, + 2074707599091135u64, + 1826239864380012u64, + 1935365705983312u64, + 239501825683682u64, + 1594236669034980u64, + 1283078975055301u64, + 856745636255925u64, + 1342128647959981u64, + 945216428379689u64, + 938746202496410u64, + 105775123333919u64, + 1379852610117266u64, + 1770216827500275u64, + 1016017267535704u64, + 1902885522469532u64, + 994184703730489u64, + 2227487538793763u64, + 53155967096055u64, + 1264120808114350u64, + 1334928769376729u64, + 393911808079997u64, + 826229239481845u64, + 1827903006733192u64, + 1449283706008465u64, + 1258040415217849u64, + 1641484112868370u64, + 1140150841968176u64, + 391113338021313u64, + 162138667815833u64, + 742204396566060u64, + 110709233440557u64, + 90179377432917u64, + 530511949644489u64, + 911568635552279u64, + 135869304780166u64, + 617719999563692u64, + 1802525001631319u64, + 1836394639510490u64, + 1862739456475085u64, + 1378284444664288u64, + 1617882529391756u64, + 876124429891172u64, + 1147654641445091u64, + 1476943370400542u64, + 688601222759067u64, + 2120281968990205u64, + 1387113236912611u64, + 2125245820685788u64, + 1030674016350092u64, + 1594684598654247u64, + 1165939511879820u64, + 271499323244173u64, + 546587254515484u64, + 945603425742936u64, + 1242252568170226u64, + 561598728058142u64, + 604827091794712u64, + 19869753585186u64, + 565367744708915u64, + 536755754533603u64, + 1767258313589487u64, + 907952975936127u64, + 292851652613937u64, + 163573546237963u64, + 837601408384564u64, + 591996990118301u64, + 2126051747693057u64, + 182247548824566u64, + 908369044122868u64, + 1335442699947273u64, + 2234292296528612u64, + 689537529333034u64, + 2174778663790714u64, + 1011407643592667u64, + 1856130618715473u64, + 1557437221651741u64, + 2250285407006102u64, + 1412384213410827u64, + 1428042038612456u64, + 962709733973660u64, + 313995703125919u64, + 1844969155869325u64, + 787716782673657u64, + 622504542173478u64, + 930119043384654u64, + 2128870043952488u64, + 537781531479523u64, + 1556666269904940u64, + 417333635741346u64, + 1986743846438415u64, + 877620478041197u64, + 2205624582983829u64, + 595260668884488u64, + 2025159350373157u64, + 2091659716088235u64, + 1423634716596391u64, + 653686638634080u64, + 1972388399989956u64, + 795575741798014u64, + 889240107997846u64, + 1446156876910732u64, + 1028507012221776u64, + 1071697574586478u64, + 1689630411899691u64, + 604092816502174u64, + 1909917373896122u64, + 1602544877643837u64, + 1227177032923867u64, + 62684197535630u64, + 186146290753883u64, + 414449055316766u64, + 1560555880866750u64, + 157579947096755u64, + 230526795502384u64, + 1197673369665894u64, + 593779215869037u64, + 214638834474097u64, + 1796344443484478u64, + 493550548257317u64, + 1628442824033694u64, + 1410811655893495u64, + 1009361960995171u64, + 604736219740352u64, + 392445928555351u64, + 1254295770295706u64, + 1958074535046128u64, + 508699942241019u64, + 739405911261325u64, + 1678760393882409u64, + 517763708545996u64, + 640040257898722u64, + 384966810872913u64, + 407454748380128u64, + 152604679407451u64, + 185102854927662u64, + 1448175503649595u64, + 100328519208674u64, + 1153263667012830u64, + 1643926437586490u64, + 609632142834154u64, + 980984004749261u64, + 855290732258779u64, + 2186022163021506u64, + 1254052618626070u64, + 1850030517182611u64, + 162348933090207u64, + 1948712273679932u64, + 1331832516262191u64, + 1219400369175863u64, + 89689036937483u64, + 1554886057235815u64, + 1520047528432789u64, + 81263957652811u64, + 146612464257008u64, + 2207945627164163u64, + 919846660682546u64, + 1925694087906686u64, + 2102027292388012u64, + 887992003198635u64, + 1817924871537027u64, + 746660005584342u64, + 753757153275525u64, + 91394270908699u64, + 511837226544151u64, + 736341543649373u64, + 1256371121466367u64, + 1977778299551813u64, + 817915174462263u64, + 1602323381418035u64, + 190035164572930u64, + 603796401391181u64, + 2152666873671669u64, + 1813900316324112u64, + 1292622433358041u64, + 888439870199892u64, + 978918155071994u64, + 534184417909805u64, + 466460084317313u64, + 1275223140288685u64, + 786407043883517u64, + 1620520623925754u64, + 1753625021290269u64, + 751937175104525u64, + 905301961820613u64, + 697059847245437u64, + 584919033981144u64, + 1272165506533156u64, + 1532180021450866u64, + 1901407354005301u64, + 1421319720492586u64, + 2179081609765456u64, + 2193253156667632u64, + 1080248329608584u64, + 2158422436462066u64, + 759167597017850u64, + 545759071151285u64, + 641600428493698u64, + 943791424499848u64, + 469571542427864u64, + 951117845222467u64, + 1780538594373407u64, + 614611122040309u64, + 1354826131886963u64, + 221898131992340u64, + 1145699723916219u64, + 798735379961769u64, + 1843560518208287u64, + 1424523160161545u64, + 205549016574779u64, + 2239491587362749u64, + 1918363582399888u64, + 1292183072788455u64, + 1783513123192567u64, + 1584027954317205u64, + 1890421443925740u64, + 1718459319874929u64, + 1522091040748809u64, + 399467600667219u64, + 1870973059066576u64, + 287514433150348u64, + 1397845311152885u64, + 1880440629872863u64, + 709302939340341u64, + 1813571361109209u64, + 86598795876860u64, + 1146964554310612u64, + 1590956584862432u64, + 2097004628155559u64, + 656227622102390u64, + 1808500445541891u64, + 958336726523135u64, + 2007604569465975u64, + 313504950390997u64, + 1399686004953620u64, + 1759732788465234u64, + 1562539721055836u64, + 1575722765016293u64, + 793318366641259u64, + 443876859384887u64, + 547308921989704u64, + 636698687503328u64, + 2179175835287340u64, + 498333551718258u64, + 932248760026176u64, + 1612395686304653u64, + 2179774103745626u64, + 1359658123541018u64, + 171488501802442u64, + 1625034951791350u64, + 520196922773633u64, + 1873787546341877u64, + 303457823885368u64, +]; -pub(crate) const precomp_basepoint_table_w5: [u64; 640] = - [0u64, 0u64, 0u64, 0u64, 0u64, 1u64, 0u64, 0u64, 0u64, 0u64, 1u64, 0u64, 0u64, 0u64, 0u64, 0u64, - 0u64, 0u64, 0u64, 0u64, 1738742601995546u64, 1146398526822698u64, 2070867633025821u64, - 562264141797630u64, 587772402128613u64, 1801439850948184u64, 1351079888211148u64, - 450359962737049u64, 900719925474099u64, 1801439850948198u64, 1u64, 0u64, 0u64, 0u64, 0u64, - 1841354044333475u64, 16398895984059u64, 755974180946558u64, 900171276175154u64, - 1821297809914039u64, 1661154287933054u64, 284530020860578u64, 1390261174866914u64, - 1524110943907984u64, 1045603498418422u64, 928651508580478u64, 1383326941296346u64, - 961937908925785u64, 80455759693706u64, 904734540352947u64, 1507481815385608u64, - 2223447444246085u64, 1083941587175919u64, 2059929906842505u64, 1581435440146976u64, - 782730187692425u64, 9928394897574u64, 1539449519985236u64, 1923587931078510u64, - 552919286076056u64, 376925408065760u64, 447320488831784u64, 1362918338468019u64, - 1470031896696846u64, 2189796996539902u64, 1337552949959847u64, 1762287177775726u64, - 237994495816815u64, 1277840395970544u64, 543972849007241u64, 1224692671618814u64, - 162359533289271u64, 282240927125249u64, 586909166382289u64, 17726488197838u64, - 377014554985659u64, 1433835303052512u64, 702061469493692u64, 1142253108318154u64, - 318297794307551u64, 954362646308543u64, 517363881452320u64, 1868013482130416u64, - 262562472373260u64, 902232853249919u64, 2107343057055746u64, 462368348619024u64, - 1893758677092974u64, 2177729767846389u64, 2168532543559143u64, 443867094639821u64, - 730169342581022u64, 1564589016879755u64, 51218195700649u64, 76684578423745u64, - 560266272480743u64, 922517457707697u64, 2066645939860874u64, 1318277348414638u64, - 1576726809084003u64, 1817337608563665u64, 1874240939237666u64, 754733726333910u64, - 97085310406474u64, 751148364309235u64, 1622159695715187u64, 1444098819684916u64, - 130920805558089u64, 1260449179085308u64, 1860021740768461u64, 110052860348509u64, - 193830891643810u64, 164148413933881u64, 180017794795332u64, 1523506525254651u64, - 465981629225956u64, 559733514964572u64, 1279624874416974u64, 2026642326892306u64, - 1425156829982409u64, 2160936383793147u64, 1061870624975247u64, 2023497043036941u64, - 117942212883190u64, 490339622800774u64, 1729931303146295u64, 422305932971074u64, - 529103152793096u64, 1211973233775992u64, 721364955929681u64, 1497674430438813u64, - 342545521275073u64, 2102107575279372u64, 2108462244669966u64, 1382582406064082u64, - 2206396818383323u64, 2109093268641147u64, 10809845110983u64, 1605176920880099u64, - 744640650753946u64, 1712758897518129u64, 373410811281809u64, 648838265800209u64, - 813058095530999u64, 513987632620169u64, 465516160703329u64, 2136322186126330u64, - 1979645899422932u64, 1197131006470786u64, 1467836664863979u64, 1340751381374628u64, - 1810066212667962u64, 1009933588225499u64, 1106129188080873u64, 1388980405213901u64, - 533719246598044u64, 1169435803073277u64, 198920999285821u64, 487492330629854u64, - 1807093008537778u64, 1540899012923865u64, 2075080271659867u64, 1527990806921523u64, - 1323728742908002u64, 1568595959608205u64, 1388032187497212u64, 2026968840050568u64, - 1396591153295755u64, 820416950170901u64, 520060313205582u64, 2016404325094901u64, - 1584709677868520u64, 272161374469956u64, 1567188603996816u64, 1986160530078221u64, - 553930264324589u64, 1058426729027503u64, 8762762886675u64, 2216098143382988u64, - 1835145266889223u64, 1712936431558441u64, 1017009937844974u64, 585361667812740u64, - 2114711541628181u64, 2238729632971439u64, 121257546253072u64, 847154149018345u64, - 211972965476684u64, 287499084460129u64, 2098247259180197u64, 839070411583329u64, - 339551619574372u64, 1432951287640743u64, 526481249498942u64, 931991661905195u64, - 1884279965674487u64, 200486405604411u64, 364173020594788u64, 518034455936955u64, - 1085564703965501u64, 16030410467927u64, 604865933167613u64, 1695298441093964u64, - 498856548116159u64, 2193030062787034u64, 1706339802964179u64, 1721199073493888u64, - 820740951039755u64, 1216053436896834u64, 23954895815139u64, 1662515208920491u64, - 1705443427511899u64, 1957928899570365u64, 1189636258255725u64, 1795695471103809u64, - 1691191297654118u64, 282402585374360u64, 460405330264832u64, 63765529445733u64, - 469763447404473u64, 733607089694996u64, 685410420186959u64, 1096682630419738u64, - 1162548510542362u64, 1020949526456676u64, 1211660396870573u64, 613126398222696u64, - 1117829165843251u64, 742432540886650u64, 1483755088010658u64, 942392007134474u64, - 1447834130944107u64, 489368274863410u64, 23192985544898u64, 648442406146160u64, - 785438843373876u64, 249464684645238u64, 170494608205618u64, 335112827260550u64, - 1462050123162735u64, 1084803668439016u64, 853459233600325u64, 215777728187495u64, - 1965759433526974u64, 1349482894446537u64, 694163317612871u64, 860536766165036u64, - 1178788094084321u64, 1652739626626996u64, 2115723946388185u64, 1577204379094664u64, - 1083882859023240u64, 1768759143381635u64, 1737180992507258u64, 246054513922239u64, - 577253134087234u64, 356340280578042u64, 1638917769925142u64, 223550348130103u64, - 470592666638765u64, 22663573966996u64, 596552461152400u64, 364143537069499u64, - 3942119457699u64, 107951982889287u64, 1843471406713209u64, 1625773041610986u64, - 1466141092501702u64, 1043024095021271u64, 310429964047508u64, 98559121500372u64, - 152746933782868u64, 259407205078261u64, 828123093322585u64, 1576847274280091u64, - 1170871375757302u64, 1588856194642775u64, 984767822341977u64, 1141497997993760u64, - 809325345150796u64, 1879837728202511u64, 201340910657893u64, 1079157558888483u64, - 1052373448588065u64, 1732036202501778u64, 2105292670328445u64, 679751387312402u64, - 1679682144926229u64, 1695823455818780u64, 498852317075849u64, 1786555067788433u64, - 1670727545779425u64, 117945875433544u64, 407939139781844u64, 854632120023778u64, - 1413383148360437u64, 286030901733673u64, 1207361858071196u64, 461340408181417u64, - 1096919590360164u64, 1837594897475685u64, 533755561544165u64, 1638688042247712u64, - 1431653684793005u64, 1036458538873559u64, 390822120341779u64, 1920929837111618u64, - 543426740024168u64, 645751357799929u64, 2245025632994463u64, 1550778638076452u64, - 223738153459949u64, 1337209385492033u64, 1276967236456531u64, 1463815821063071u64, - 2070620870191473u64, 1199170709413753u64, 273230877394166u64, 1873264887608046u64, - 890877152910775u64, 983226445635730u64, 44873798519521u64, 697147127512130u64, - 961631038239304u64, 709966160696826u64, 1706677689540366u64, 502782733796035u64, - 812545535346033u64, 1693622521296452u64, 1955813093002510u64, 1259937612881362u64, - 1873032503803559u64, 1140330566016428u64, 1675726082440190u64, 60029928909786u64, - 170335608866763u64, 766444312315022u64, 2025049511434113u64, 2200845622430647u64, - 1201269851450408u64, 590071752404907u64, 1400995030286946u64, 2152637413853822u64, - 2108495473841983u64, 3855406710349u64, 1726137673168580u64, 51004317200100u64, - 1749082328586939u64, 1704088976144558u64, 1977318954775118u64, 2062602253162400u64, - 948062503217479u64, 361953965048030u64, 1528264887238440u64, 62582552172290u64, - 2241602163389280u64, 156385388121765u64, 2124100319761492u64, 388928050571382u64, - 1556123596922727u64, 979310669812384u64, 113043855206104u64, 2023223924825469u64, - 643651703263034u64, 2234446903655540u64, 1577241261424997u64, 860253174523845u64, - 1691026473082448u64, 1091672764933872u64, 1957463109756365u64, 530699502660193u64, - 349587141723569u64, 674661681919563u64, 1633727303856240u64, 708909037922144u64, - 2160722508518119u64, 1302188051602540u64, 976114603845777u64, 120004758721939u64, - 1681630708873780u64, 622274095069244u64, 1822346309016698u64, 1100921177951904u64, - 2216952659181677u64, 1844020550362490u64, 1976451368365774u64, 1321101422068822u64, - 1189859436282668u64, 2008801879735257u64, 2219413454333565u64, 424288774231098u64, - 359793146977912u64, 270293357948703u64, 587226003677000u64, 1482071926139945u64, - 1419630774650359u64, 1104739070570175u64, 1662129023224130u64, 1609203612533411u64, - 1250932720691980u64, 95215711818495u64, 498746909028150u64, 158151296991874u64, - 1201379988527734u64, 561599945143989u64, 2211577425617888u64, 2166577612206324u64, - 1057590354233512u64, 1968123280416769u64, 1316586165401313u64, 762728164447634u64, - 2045395244316047u64, 1531796898725716u64, 315385971670425u64, 1109421039396756u64, - 2183635256408562u64, 1896751252659461u64, 840236037179080u64, 796245792277211u64, - 508345890111193u64, 1275386465287222u64, 513560822858784u64, 1784735733120313u64, - 1346467478899695u64, 601125231208417u64, 701076661112726u64, 1841998436455089u64, - 1156768600940434u64, 1967853462343221u64, 2178318463061452u64, 481885520752741u64, - 675262828640945u64, 1033539418596582u64, 1743329872635846u64, 159322641251283u64, - 1573076470127113u64, 954827619308195u64, 778834750662635u64, 619912782122617u64, - 515681498488209u64, 1675866144246843u64, 811716020969981u64, 1125515272217398u64, - 1398917918287342u64, 1301680949183175u64, 726474739583734u64, 587246193475200u64, - 1096581582611864u64, 1469911826213486u64, 1990099711206364u64, 1256496099816508u64, - 2019924615195672u64, 1251232456707555u64, 2042971196009755u64, 214061878479265u64, - 115385726395472u64, 1677875239524132u64, 756888883383540u64, 1153862117756233u64, - 503391530851096u64, 946070017477513u64, 1878319040542579u64, 1101349418586920u64, - 793245696431613u64, 397920495357645u64, 2174023872951112u64, 1517867915189593u64, - 1829855041462995u64, 1046709983503619u64, 424081940711857u64, 2112438073094647u64, - 1504338467349861u64, 2244574127374532u64, 2136937537441911u64, 1741150838990304u64, - 25894628400571u64, 512213526781178u64, 1168384260796379u64, 1424607682379833u64, - 938677789731564u64, 872882241891896u64, 1713199397007700u64, 1410496326218359u64, - 854379752407031u64, 465141611727634u64, 315176937037857u64, 1020115054571233u64, - 1856290111077229u64, 2028366269898204u64, 1432980880307543u64, 469932710425448u64, - 581165267592247u64, 496399148156603u64, 2063435226705903u64, 2116841086237705u64, - 498272567217048u64, 1829438076967906u64, 1573925801278491u64, 460763576329867u64, - 1705264723728225u64, 999514866082412u64, 29635061779362u64, 1884233592281020u64, - 1449755591461338u64, 42579292783222u64, 1869504355369200u64, 495506004805251u64, - 264073104888427u64, 2088880861028612u64, 104646456386576u64, 1258445191399967u64, - 1348736801545799u64, 2068276361286613u64, 884897216646374u64, 922387476801376u64, - 1043886580402805u64, 1240883498470831u64, 1601554651937110u64, 804382935289482u64, - 512379564477239u64, 1466384519077032u64, 1280698500238386u64, 211303836685749u64, - 2081725624793803u64, 545247644516879u64, 215313359330384u64, 286479751145614u64, - 2213650281751636u64, 2164927945999874u64, 2072162991540882u64, 1443769115444779u64, - 1581473274363095u64, 434633875922699u64, 340456055781599u64, 373043091080189u64, - 839476566531776u64, 1856706858509978u64, 931616224909153u64, 1888181317414065u64, - 213654322650262u64, 1161078103416244u64, 1822042328851513u64, 915817709028812u64, - 1828297056698188u64, 1212017130909403u64, 60258343247333u64, 342085800008230u64, - 930240559508270u64, 1549884999174952u64, 809895264249462u64, 184726257947682u64, - 1157065433504828u64, 1209999630381477u64, 999920399374391u64, 1714770150788163u64, - 2026130985413228u64, 506776632883140u64, 1349042668246528u64, 1937232292976967u64, - 942302637530730u64, 160211904766226u64, 1042724500438571u64, 212454865139142u64, - 244104425172642u64, 1376990622387496u64, 76126752421227u64, 1027540886376422u64, - 1912210655133026u64, 13410411589575u64, 1475856708587773u64, 615563352691682u64, - 1446629324872644u64, 1683670301784014u64, 1049873327197127u64, 1826401704084838u64, - 2032577048760775u64, 1922203607878853u64, 836708788764806u64, 2193084654695012u64, - 1342923183256659u64, 849356986294271u64, 1228863973965618u64, 94886161081867u64, - 1423288430204892u64, 2016167528707016u64, 1633187660972877u64, 1550621242301752u64, - 340630244512994u64, 2103577710806901u64, 221625016538931u64, 421544147350960u64, - 580428704555156u64, 1479831381265617u64, 518057926544698u64, 955027348790630u64, - 1326749172561598u64, 1118304625755967u64, 1994005916095176u64, 1799757332780663u64, - 751343129396941u64, 1468672898746144u64, 1451689964451386u64, 755070293921171u64, - 904857405877052u64, 1276087530766984u64, 403986562858511u64, 1530661255035337u64, - 1644972908910502u64, 1370170080438957u64, 139839536695744u64, 909930462436512u64, - 1899999215356933u64, 635992381064566u64, 788740975837654u64, 224241231493695u64, - 1267090030199302u64, 998908061660139u64, 1784537499699278u64, 859195370018706u64, - 1953966091439379u64, 2189271820076010u64, 2039067059943978u64, 1526694380855202u64, - 2040321513194941u64, 329922071218689u64, 1953032256401326u64, 989631424403521u64, - 328825014934242u64, 9407151397696u64, 63551373671268u64, 1624728632895792u64, - 1608324920739262u64, 1178239350351945u64, 1198077399579702u64, 277620088676229u64, - 1775359437312528u64, 1653558177737477u64, 1652066043408850u64, 1063359889686622u64, - 1975063804860653u64]; +pub(crate) const precomp_basepoint_table_w5: [u64; 640] = [ + 0u64, + 0u64, + 0u64, + 0u64, + 0u64, + 1u64, + 0u64, + 0u64, + 0u64, + 0u64, + 1u64, + 0u64, + 0u64, + 0u64, + 0u64, + 0u64, + 0u64, + 0u64, + 0u64, + 0u64, + 1738742601995546u64, + 1146398526822698u64, + 2070867633025821u64, + 562264141797630u64, + 587772402128613u64, + 1801439850948184u64, + 1351079888211148u64, + 450359962737049u64, + 900719925474099u64, + 1801439850948198u64, + 1u64, + 0u64, + 0u64, + 0u64, + 0u64, + 1841354044333475u64, + 16398895984059u64, + 755974180946558u64, + 900171276175154u64, + 1821297809914039u64, + 1661154287933054u64, + 284530020860578u64, + 1390261174866914u64, + 1524110943907984u64, + 1045603498418422u64, + 928651508580478u64, + 1383326941296346u64, + 961937908925785u64, + 80455759693706u64, + 904734540352947u64, + 1507481815385608u64, + 2223447444246085u64, + 1083941587175919u64, + 2059929906842505u64, + 1581435440146976u64, + 782730187692425u64, + 9928394897574u64, + 1539449519985236u64, + 1923587931078510u64, + 552919286076056u64, + 376925408065760u64, + 447320488831784u64, + 1362918338468019u64, + 1470031896696846u64, + 2189796996539902u64, + 1337552949959847u64, + 1762287177775726u64, + 237994495816815u64, + 1277840395970544u64, + 543972849007241u64, + 1224692671618814u64, + 162359533289271u64, + 282240927125249u64, + 586909166382289u64, + 17726488197838u64, + 377014554985659u64, + 1433835303052512u64, + 702061469493692u64, + 1142253108318154u64, + 318297794307551u64, + 954362646308543u64, + 517363881452320u64, + 1868013482130416u64, + 262562472373260u64, + 902232853249919u64, + 2107343057055746u64, + 462368348619024u64, + 1893758677092974u64, + 2177729767846389u64, + 2168532543559143u64, + 443867094639821u64, + 730169342581022u64, + 1564589016879755u64, + 51218195700649u64, + 76684578423745u64, + 560266272480743u64, + 922517457707697u64, + 2066645939860874u64, + 1318277348414638u64, + 1576726809084003u64, + 1817337608563665u64, + 1874240939237666u64, + 754733726333910u64, + 97085310406474u64, + 751148364309235u64, + 1622159695715187u64, + 1444098819684916u64, + 130920805558089u64, + 1260449179085308u64, + 1860021740768461u64, + 110052860348509u64, + 193830891643810u64, + 164148413933881u64, + 180017794795332u64, + 1523506525254651u64, + 465981629225956u64, + 559733514964572u64, + 1279624874416974u64, + 2026642326892306u64, + 1425156829982409u64, + 2160936383793147u64, + 1061870624975247u64, + 2023497043036941u64, + 117942212883190u64, + 490339622800774u64, + 1729931303146295u64, + 422305932971074u64, + 529103152793096u64, + 1211973233775992u64, + 721364955929681u64, + 1497674430438813u64, + 342545521275073u64, + 2102107575279372u64, + 2108462244669966u64, + 1382582406064082u64, + 2206396818383323u64, + 2109093268641147u64, + 10809845110983u64, + 1605176920880099u64, + 744640650753946u64, + 1712758897518129u64, + 373410811281809u64, + 648838265800209u64, + 813058095530999u64, + 513987632620169u64, + 465516160703329u64, + 2136322186126330u64, + 1979645899422932u64, + 1197131006470786u64, + 1467836664863979u64, + 1340751381374628u64, + 1810066212667962u64, + 1009933588225499u64, + 1106129188080873u64, + 1388980405213901u64, + 533719246598044u64, + 1169435803073277u64, + 198920999285821u64, + 487492330629854u64, + 1807093008537778u64, + 1540899012923865u64, + 2075080271659867u64, + 1527990806921523u64, + 1323728742908002u64, + 1568595959608205u64, + 1388032187497212u64, + 2026968840050568u64, + 1396591153295755u64, + 820416950170901u64, + 520060313205582u64, + 2016404325094901u64, + 1584709677868520u64, + 272161374469956u64, + 1567188603996816u64, + 1986160530078221u64, + 553930264324589u64, + 1058426729027503u64, + 8762762886675u64, + 2216098143382988u64, + 1835145266889223u64, + 1712936431558441u64, + 1017009937844974u64, + 585361667812740u64, + 2114711541628181u64, + 2238729632971439u64, + 121257546253072u64, + 847154149018345u64, + 211972965476684u64, + 287499084460129u64, + 2098247259180197u64, + 839070411583329u64, + 339551619574372u64, + 1432951287640743u64, + 526481249498942u64, + 931991661905195u64, + 1884279965674487u64, + 200486405604411u64, + 364173020594788u64, + 518034455936955u64, + 1085564703965501u64, + 16030410467927u64, + 604865933167613u64, + 1695298441093964u64, + 498856548116159u64, + 2193030062787034u64, + 1706339802964179u64, + 1721199073493888u64, + 820740951039755u64, + 1216053436896834u64, + 23954895815139u64, + 1662515208920491u64, + 1705443427511899u64, + 1957928899570365u64, + 1189636258255725u64, + 1795695471103809u64, + 1691191297654118u64, + 282402585374360u64, + 460405330264832u64, + 63765529445733u64, + 469763447404473u64, + 733607089694996u64, + 685410420186959u64, + 1096682630419738u64, + 1162548510542362u64, + 1020949526456676u64, + 1211660396870573u64, + 613126398222696u64, + 1117829165843251u64, + 742432540886650u64, + 1483755088010658u64, + 942392007134474u64, + 1447834130944107u64, + 489368274863410u64, + 23192985544898u64, + 648442406146160u64, + 785438843373876u64, + 249464684645238u64, + 170494608205618u64, + 335112827260550u64, + 1462050123162735u64, + 1084803668439016u64, + 853459233600325u64, + 215777728187495u64, + 1965759433526974u64, + 1349482894446537u64, + 694163317612871u64, + 860536766165036u64, + 1178788094084321u64, + 1652739626626996u64, + 2115723946388185u64, + 1577204379094664u64, + 1083882859023240u64, + 1768759143381635u64, + 1737180992507258u64, + 246054513922239u64, + 577253134087234u64, + 356340280578042u64, + 1638917769925142u64, + 223550348130103u64, + 470592666638765u64, + 22663573966996u64, + 596552461152400u64, + 364143537069499u64, + 3942119457699u64, + 107951982889287u64, + 1843471406713209u64, + 1625773041610986u64, + 1466141092501702u64, + 1043024095021271u64, + 310429964047508u64, + 98559121500372u64, + 152746933782868u64, + 259407205078261u64, + 828123093322585u64, + 1576847274280091u64, + 1170871375757302u64, + 1588856194642775u64, + 984767822341977u64, + 1141497997993760u64, + 809325345150796u64, + 1879837728202511u64, + 201340910657893u64, + 1079157558888483u64, + 1052373448588065u64, + 1732036202501778u64, + 2105292670328445u64, + 679751387312402u64, + 1679682144926229u64, + 1695823455818780u64, + 498852317075849u64, + 1786555067788433u64, + 1670727545779425u64, + 117945875433544u64, + 407939139781844u64, + 854632120023778u64, + 1413383148360437u64, + 286030901733673u64, + 1207361858071196u64, + 461340408181417u64, + 1096919590360164u64, + 1837594897475685u64, + 533755561544165u64, + 1638688042247712u64, + 1431653684793005u64, + 1036458538873559u64, + 390822120341779u64, + 1920929837111618u64, + 543426740024168u64, + 645751357799929u64, + 2245025632994463u64, + 1550778638076452u64, + 223738153459949u64, + 1337209385492033u64, + 1276967236456531u64, + 1463815821063071u64, + 2070620870191473u64, + 1199170709413753u64, + 273230877394166u64, + 1873264887608046u64, + 890877152910775u64, + 983226445635730u64, + 44873798519521u64, + 697147127512130u64, + 961631038239304u64, + 709966160696826u64, + 1706677689540366u64, + 502782733796035u64, + 812545535346033u64, + 1693622521296452u64, + 1955813093002510u64, + 1259937612881362u64, + 1873032503803559u64, + 1140330566016428u64, + 1675726082440190u64, + 60029928909786u64, + 170335608866763u64, + 766444312315022u64, + 2025049511434113u64, + 2200845622430647u64, + 1201269851450408u64, + 590071752404907u64, + 1400995030286946u64, + 2152637413853822u64, + 2108495473841983u64, + 3855406710349u64, + 1726137673168580u64, + 51004317200100u64, + 1749082328586939u64, + 1704088976144558u64, + 1977318954775118u64, + 2062602253162400u64, + 948062503217479u64, + 361953965048030u64, + 1528264887238440u64, + 62582552172290u64, + 2241602163389280u64, + 156385388121765u64, + 2124100319761492u64, + 388928050571382u64, + 1556123596922727u64, + 979310669812384u64, + 113043855206104u64, + 2023223924825469u64, + 643651703263034u64, + 2234446903655540u64, + 1577241261424997u64, + 860253174523845u64, + 1691026473082448u64, + 1091672764933872u64, + 1957463109756365u64, + 530699502660193u64, + 349587141723569u64, + 674661681919563u64, + 1633727303856240u64, + 708909037922144u64, + 2160722508518119u64, + 1302188051602540u64, + 976114603845777u64, + 120004758721939u64, + 1681630708873780u64, + 622274095069244u64, + 1822346309016698u64, + 1100921177951904u64, + 2216952659181677u64, + 1844020550362490u64, + 1976451368365774u64, + 1321101422068822u64, + 1189859436282668u64, + 2008801879735257u64, + 2219413454333565u64, + 424288774231098u64, + 359793146977912u64, + 270293357948703u64, + 587226003677000u64, + 1482071926139945u64, + 1419630774650359u64, + 1104739070570175u64, + 1662129023224130u64, + 1609203612533411u64, + 1250932720691980u64, + 95215711818495u64, + 498746909028150u64, + 158151296991874u64, + 1201379988527734u64, + 561599945143989u64, + 2211577425617888u64, + 2166577612206324u64, + 1057590354233512u64, + 1968123280416769u64, + 1316586165401313u64, + 762728164447634u64, + 2045395244316047u64, + 1531796898725716u64, + 315385971670425u64, + 1109421039396756u64, + 2183635256408562u64, + 1896751252659461u64, + 840236037179080u64, + 796245792277211u64, + 508345890111193u64, + 1275386465287222u64, + 513560822858784u64, + 1784735733120313u64, + 1346467478899695u64, + 601125231208417u64, + 701076661112726u64, + 1841998436455089u64, + 1156768600940434u64, + 1967853462343221u64, + 2178318463061452u64, + 481885520752741u64, + 675262828640945u64, + 1033539418596582u64, + 1743329872635846u64, + 159322641251283u64, + 1573076470127113u64, + 954827619308195u64, + 778834750662635u64, + 619912782122617u64, + 515681498488209u64, + 1675866144246843u64, + 811716020969981u64, + 1125515272217398u64, + 1398917918287342u64, + 1301680949183175u64, + 726474739583734u64, + 587246193475200u64, + 1096581582611864u64, + 1469911826213486u64, + 1990099711206364u64, + 1256496099816508u64, + 2019924615195672u64, + 1251232456707555u64, + 2042971196009755u64, + 214061878479265u64, + 115385726395472u64, + 1677875239524132u64, + 756888883383540u64, + 1153862117756233u64, + 503391530851096u64, + 946070017477513u64, + 1878319040542579u64, + 1101349418586920u64, + 793245696431613u64, + 397920495357645u64, + 2174023872951112u64, + 1517867915189593u64, + 1829855041462995u64, + 1046709983503619u64, + 424081940711857u64, + 2112438073094647u64, + 1504338467349861u64, + 2244574127374532u64, + 2136937537441911u64, + 1741150838990304u64, + 25894628400571u64, + 512213526781178u64, + 1168384260796379u64, + 1424607682379833u64, + 938677789731564u64, + 872882241891896u64, + 1713199397007700u64, + 1410496326218359u64, + 854379752407031u64, + 465141611727634u64, + 315176937037857u64, + 1020115054571233u64, + 1856290111077229u64, + 2028366269898204u64, + 1432980880307543u64, + 469932710425448u64, + 581165267592247u64, + 496399148156603u64, + 2063435226705903u64, + 2116841086237705u64, + 498272567217048u64, + 1829438076967906u64, + 1573925801278491u64, + 460763576329867u64, + 1705264723728225u64, + 999514866082412u64, + 29635061779362u64, + 1884233592281020u64, + 1449755591461338u64, + 42579292783222u64, + 1869504355369200u64, + 495506004805251u64, + 264073104888427u64, + 2088880861028612u64, + 104646456386576u64, + 1258445191399967u64, + 1348736801545799u64, + 2068276361286613u64, + 884897216646374u64, + 922387476801376u64, + 1043886580402805u64, + 1240883498470831u64, + 1601554651937110u64, + 804382935289482u64, + 512379564477239u64, + 1466384519077032u64, + 1280698500238386u64, + 211303836685749u64, + 2081725624793803u64, + 545247644516879u64, + 215313359330384u64, + 286479751145614u64, + 2213650281751636u64, + 2164927945999874u64, + 2072162991540882u64, + 1443769115444779u64, + 1581473274363095u64, + 434633875922699u64, + 340456055781599u64, + 373043091080189u64, + 839476566531776u64, + 1856706858509978u64, + 931616224909153u64, + 1888181317414065u64, + 213654322650262u64, + 1161078103416244u64, + 1822042328851513u64, + 915817709028812u64, + 1828297056698188u64, + 1212017130909403u64, + 60258343247333u64, + 342085800008230u64, + 930240559508270u64, + 1549884999174952u64, + 809895264249462u64, + 184726257947682u64, + 1157065433504828u64, + 1209999630381477u64, + 999920399374391u64, + 1714770150788163u64, + 2026130985413228u64, + 506776632883140u64, + 1349042668246528u64, + 1937232292976967u64, + 942302637530730u64, + 160211904766226u64, + 1042724500438571u64, + 212454865139142u64, + 244104425172642u64, + 1376990622387496u64, + 76126752421227u64, + 1027540886376422u64, + 1912210655133026u64, + 13410411589575u64, + 1475856708587773u64, + 615563352691682u64, + 1446629324872644u64, + 1683670301784014u64, + 1049873327197127u64, + 1826401704084838u64, + 2032577048760775u64, + 1922203607878853u64, + 836708788764806u64, + 2193084654695012u64, + 1342923183256659u64, + 849356986294271u64, + 1228863973965618u64, + 94886161081867u64, + 1423288430204892u64, + 2016167528707016u64, + 1633187660972877u64, + 1550621242301752u64, + 340630244512994u64, + 2103577710806901u64, + 221625016538931u64, + 421544147350960u64, + 580428704555156u64, + 1479831381265617u64, + 518057926544698u64, + 955027348790630u64, + 1326749172561598u64, + 1118304625755967u64, + 1994005916095176u64, + 1799757332780663u64, + 751343129396941u64, + 1468672898746144u64, + 1451689964451386u64, + 755070293921171u64, + 904857405877052u64, + 1276087530766984u64, + 403986562858511u64, + 1530661255035337u64, + 1644972908910502u64, + 1370170080438957u64, + 139839536695744u64, + 909930462436512u64, + 1899999215356933u64, + 635992381064566u64, + 788740975837654u64, + 224241231493695u64, + 1267090030199302u64, + 998908061660139u64, + 1784537499699278u64, + 859195370018706u64, + 1953966091439379u64, + 2189271820076010u64, + 2039067059943978u64, + 1526694380855202u64, + 2040321513194941u64, + 329922071218689u64, + 1953032256401326u64, + 989631424403521u64, + 328825014934242u64, + 9407151397696u64, + 63551373671268u64, + 1624728632895792u64, + 1608324920739262u64, + 1178239350351945u64, + 1198077399579702u64, + 277620088676229u64, + 1775359437312528u64, + 1653558177737477u64, + 1652066043408850u64, + 1063359889686622u64, + 1975063804860653u64, +]; diff --git a/libcrux-hacl-rs/src/fstar/uint16.rs b/libcrux-hacl-rs/src/fstar/uint16.rs index 138e938f7..42f429efc 100644 --- a/libcrux-hacl-rs/src/fstar/uint16.rs +++ b/libcrux-hacl-rs/src/fstar/uint16.rs @@ -1,21 +1,19 @@ -pub fn eq_mask(a: u16, b: u16) -> u16 -{ - let x = a ^ b; - let minus_x = (!x).wrapping_add(1u16); - let x_or_minus_x = x | minus_x; - let xnx = x_or_minus_x.wrapping_shr(15); - xnx.wrapping_sub(1u16) +pub fn eq_mask(a: u16, b: u16) -> u16 { + let x = a ^ b; + let minus_x = (!x).wrapping_add(1u16); + let x_or_minus_x = x | minus_x; + let xnx = x_or_minus_x.wrapping_shr(15); + xnx.wrapping_sub(1u16) } -pub fn gte_mask(a: u16, b: u16) -> u16 -{ - let x = a; - let y = b; - let x_xor_y = x ^ y; - let x_sub_y = x.wrapping_sub(y); - let x_sub_y_xor_y = x_sub_y ^ y; - let q = x_xor_y | x_sub_y_xor_y; - let x_xor_q = x ^ q; - let x_xor_q_ = x_xor_q.wrapping_shr(15); - x_xor_q_.wrapping_sub(1u16) +pub fn gte_mask(a: u16, b: u16) -> u16 { + let x = a; + let y = b; + let x_xor_y = x ^ y; + let x_sub_y = x.wrapping_sub(y); + let x_sub_y_xor_y = x_sub_y ^ y; + let q = x_xor_y | x_sub_y_xor_y; + let x_xor_q = x ^ q; + let x_xor_q_ = x_xor_q.wrapping_shr(15); + x_xor_q_.wrapping_sub(1u16) } diff --git a/libcrux-hacl-rs/src/fstar/uint32.rs b/libcrux-hacl-rs/src/fstar/uint32.rs index 95520a4fd..9ea3a652f 100644 --- a/libcrux-hacl-rs/src/fstar/uint32.rs +++ b/libcrux-hacl-rs/src/fstar/uint32.rs @@ -1,21 +1,19 @@ -pub fn eq_mask(a: u32, b: u32) -> u32 -{ - let x = a ^ b; - let minus_x = (!x).wrapping_add(1u32); - let x_or_minus_x = x | minus_x; - let xnx = x_or_minus_x.wrapping_shr(31); - xnx.wrapping_sub(1u32) +pub fn eq_mask(a: u32, b: u32) -> u32 { + let x = a ^ b; + let minus_x = (!x).wrapping_add(1u32); + let x_or_minus_x = x | minus_x; + let xnx = x_or_minus_x.wrapping_shr(31); + xnx.wrapping_sub(1u32) } -pub fn gte_mask(a: u32, b: u32) -> u32 -{ - let x = a; - let y = b; - let x_xor_y = x ^ y; - let x_sub_y = x.wrapping_sub(y); - let x_sub_y_xor_y = x_sub_y ^ y; - let q = x_xor_y | x_sub_y_xor_y; - let x_xor_q = x ^ q; - let x_xor_q_ = x_xor_q.wrapping_shr(31); - x_xor_q_.wrapping_sub(1u32) +pub fn gte_mask(a: u32, b: u32) -> u32 { + let x = a; + let y = b; + let x_xor_y = x ^ y; + let x_sub_y = x.wrapping_sub(y); + let x_sub_y_xor_y = x_sub_y ^ y; + let q = x_xor_y | x_sub_y_xor_y; + let x_xor_q = x ^ q; + let x_xor_q_ = x_xor_q.wrapping_shr(31); + x_xor_q_.wrapping_sub(1u32) } diff --git a/libcrux-hacl-rs/src/fstar/uint64.rs b/libcrux-hacl-rs/src/fstar/uint64.rs index 0c7d45896..4f48d9d4d 100644 --- a/libcrux-hacl-rs/src/fstar/uint64.rs +++ b/libcrux-hacl-rs/src/fstar/uint64.rs @@ -1,21 +1,19 @@ -pub fn eq_mask(a: u64, b: u64) -> u64 -{ - let x = a ^ b; - let minus_x = (!x).wrapping_add(1u64); - let x_or_minus_x = x | minus_x; - let xnx = x_or_minus_x.wrapping_shr(63); - xnx.wrapping_sub(1u64) +pub fn eq_mask(a: u64, b: u64) -> u64 { + let x = a ^ b; + let minus_x = (!x).wrapping_add(1u64); + let x_or_minus_x = x | minus_x; + let xnx = x_or_minus_x.wrapping_shr(63); + xnx.wrapping_sub(1u64) } -pub fn gte_mask(a: u64, b: u64) -> u64 -{ - let x = a; - let y = b; - let x_xor_y = x ^ y; - let x_sub_y = x.wrapping_sub(y); - let x_sub_y_xor_y = x_sub_y ^ y; - let q = x_xor_y | x_sub_y_xor_y; - let x_xor_q = x ^ q; - let x_xor_q_ = x_xor_q.wrapping_shr(63); - x_xor_q_.wrapping_sub(1u64) +pub fn gte_mask(a: u64, b: u64) -> u64 { + let x = a; + let y = b; + let x_xor_y = x ^ y; + let x_sub_y = x.wrapping_sub(y); + let x_sub_y_xor_y = x_sub_y ^ y; + let q = x_xor_y | x_sub_y_xor_y; + let x_xor_q = x ^ q; + let x_xor_q_ = x_xor_q.wrapping_shr(63); + x_xor_q_.wrapping_sub(1u64) } diff --git a/libcrux-hacl-rs/src/fstar/uint8.rs b/libcrux-hacl-rs/src/fstar/uint8.rs index 0c80314d8..196e153bc 100644 --- a/libcrux-hacl-rs/src/fstar/uint8.rs +++ b/libcrux-hacl-rs/src/fstar/uint8.rs @@ -1,22 +1,19 @@ -pub fn eq_mask(a: u8, b: u8) -> u8 -{ - let x = a ^ b; - let minus_x = (!x).wrapping_add(1u8); - let x_or_minus_x = x | minus_x; - let xnx = x_or_minus_x.wrapping_shr(7); - xnx.wrapping_sub(1u8) +pub fn eq_mask(a: u8, b: u8) -> u8 { + let x = a ^ b; + let minus_x = (!x).wrapping_add(1u8); + let x_or_minus_x = x | minus_x; + let xnx = x_or_minus_x.wrapping_shr(7); + xnx.wrapping_sub(1u8) } -pub fn gte_mask(a: u8, b: u8) -> u8 -{ - let x = a; - let y = b; - let x_xor_y = x ^ y; - let x_sub_y = x.wrapping_sub(y); - let x_sub_y_xor_y = x_sub_y ^ y; - let q = x_xor_y | x_sub_y_xor_y; - let x_xor_q = x ^ q; - let x_xor_q_ = x_xor_q.wrapping_shr(7); - x_xor_q_.wrapping_sub(1u8) +pub fn gte_mask(a: u8, b: u8) -> u8 { + let x = a; + let y = b; + let x_xor_y = x ^ y; + let x_sub_y = x.wrapping_sub(y); + let x_sub_y_xor_y = x_sub_y ^ y; + let q = x_xor_y | x_sub_y_xor_y; + let x_xor_q = x ^ q; + let x_xor_q_ = x_xor_q.wrapping_shr(7); + x_xor_q_.wrapping_sub(1u8) } - diff --git a/libcrux-hacl-rs/src/lowstar/endianness.rs b/libcrux-hacl-rs/src/lowstar/endianness.rs index 00d3ea9c5..27e227900 100644 --- a/libcrux-hacl-rs/src/lowstar/endianness.rs +++ b/libcrux-hacl-rs/src/lowstar/endianness.rs @@ -6,7 +6,7 @@ pub fn load16_le(bytes: &[u8]) -> u16 { u16::from_le_bytes(bytes[0..2].try_into().unwrap()) } -pub fn store16_le(bytes: &mut[u8], x: u16) { +pub fn store16_le(bytes: &mut [u8], x: u16) { bytes[0..2].copy_from_slice(&u16::to_le_bytes(x)) } @@ -14,7 +14,7 @@ pub fn load32_le(bytes: &[u8]) -> u32 { u32::from_le_bytes(bytes[0..4].try_into().unwrap()) } -pub fn store32_le(bytes: &mut[u8], x: u32) { +pub fn store32_le(bytes: &mut [u8], x: u32) { bytes[0..4].copy_from_slice(&u32::to_le_bytes(x)) } @@ -22,7 +22,7 @@ pub fn load64_le(bytes: &[u8]) -> u64 { u64::from_le_bytes(bytes[0..8].try_into().unwrap()) } -pub fn store64_le(bytes: &mut[u8], x: u64) { +pub fn store64_le(bytes: &mut [u8], x: u64) { bytes[0..8].copy_from_slice(&u64::to_le_bytes(x)) } @@ -32,7 +32,7 @@ pub fn load32_be(bytes: &[u8]) -> u32 { u32::from_be_bytes(bytes[0..4].try_into().unwrap()) } -pub fn store32_be(bytes: &mut[u8], x: u32) { +pub fn store32_be(bytes: &mut [u8], x: u32) { bytes[0..4].copy_from_slice(&u32::to_be_bytes(x)) } @@ -40,7 +40,7 @@ pub fn load64_be(bytes: &[u8]) -> u64 { u64::from_be_bytes(bytes[0..8].try_into().unwrap()) } -pub fn store64_be(bytes: &mut[u8], x: u64) { +pub fn store64_be(bytes: &mut [u8], x: u64) { bytes[0..8].copy_from_slice(&u64::to_be_bytes(x)) } @@ -48,6 +48,6 @@ pub fn load128_be(bytes: &[u8]) -> u128 { u128::from_be_bytes(bytes[0..16].try_into().unwrap()) } -pub fn store128_be(bytes: &mut[u8], x: u128) { +pub fn store128_be(bytes: &mut [u8], x: u128) { bytes[0..16].copy_from_slice(&u128::to_be_bytes(x)) } From 7cbcff156ce0eb3d3ab6a96a269529235f4e3eb5 Mon Sep 17 00:00:00 2001 From: "Jan Winkelmann (keks)" Date: Wed, 6 Nov 2024 13:36:36 +0100 Subject: [PATCH 17/18] fix mishap --- libcrux-hkdf/src/hacl_hkdf.rs | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/libcrux-hkdf/src/hacl_hkdf.rs b/libcrux-hkdf/src/hacl_hkdf.rs index 5b4a06dfd..75c3cc748 100644 --- a/libcrux-hkdf/src/hacl_hkdf.rs +++ b/libcrux-hkdf/src/hacl_hkdf.rs @@ -95,7 +95,13 @@ macro_rules! impl_hkdf { /// /// Note that this function panics if `salt` or `ikm` is longer than (2**32 - 1) bytes. pub fn extract(prk: &mut [u8; $hash_len], salt: &[u8], ikm: &[u8]) { - $sname::extract(prk, salt, ikm) + libcrux_hacl_rs::hkdf::$extract( + prk, + salt, + checked_u32(salt.len()).unwrap(), + ikm, + checked_u32(ikm.len()).unwrap(), + ); } /// HKDF expand using the pre-key material `prk` and `info`. The output length From a1363a895a4210beac8d7ce774a03286addd00ac Mon Sep 17 00:00:00 2001 From: "Jan Winkelmann (keks)" Date: Wed, 6 Nov 2024 13:37:03 +0100 Subject: [PATCH 18/18] add wycheproof test for ed25519, hkdf and hmac --- Cargo.lock | 18 ++++++++++++++++++ Cargo.toml | 1 + tests/ed25519.rs | 38 ++++++++++++++++++++++++++++++++++++++ tests/hkdf.rs | 35 +++++++++++++++++++++++++++++++++++ tests/hmac.rs | 34 ++++++++++++++++++++++++++++++++++ 5 files changed, 126 insertions(+) create mode 100644 tests/ed25519.rs create mode 100644 tests/hkdf.rs create mode 100644 tests/hmac.rs diff --git a/Cargo.lock b/Cargo.lock index 821ca500e..f3ce8a642 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -499,6 +499,12 @@ dependencies = [ "zeroize", ] +[[package]] +name = "data-encoding" +version = "2.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e8566979429cf69b49a5c740c60791108e86440e8be149bbea4fe54d2c32d6e2" + [[package]] name = "der" version = "0.7.9" @@ -956,6 +962,7 @@ dependencies = [ "serde_json", "wasm-bindgen", "wasm-bindgen-test", + "wycheproof", ] [[package]] @@ -2090,6 +2097,17 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" +[[package]] +name = "wycheproof" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "efb3be19abfb206c6adcbdf2007b09b0e8ca1f6530db40c03b42ce8ed4719894" +dependencies = [ + "data-encoding", + "serde", + "serde_json", +] + [[package]] name = "x25519-dalek" version = "2.0.1" diff --git a/Cargo.toml b/Cargo.toml index e5b53373e..11e9ac75b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -94,6 +94,7 @@ serde_json = { version = "1.0" } serde = { version = "1.0", features = ["derive"] } hex = { version = "0.4.3", features = ["serde"] } clap = { version = "4.5", features = ["derive"] } +wycheproof = "0.6.0" [target.'cfg(target_arch = "wasm32")'.dev-dependencies] wasm-bindgen-test = "0.3" diff --git a/tests/ed25519.rs b/tests/ed25519.rs new file mode 100644 index 000000000..224897143 --- /dev/null +++ b/tests/ed25519.rs @@ -0,0 +1,38 @@ +use libcrux::signature::{Algorithm, Ed25519Signature, Signature}; + +#[test] +fn run_wycheproof() { + for test_name in wycheproof::eddsa::TestName::all() { + let _ = match test_name { + wycheproof::eddsa::TestName::Ed25519 => Algorithm::Ed25519, + _ => continue, + }; + let test_set = wycheproof::eddsa::TestSet::load(test_name) + .expect("error loading wycheproof test for name {test_name}"); + + println!("Test Set {test_name:?}"); + for test_group in test_set.test_groups { + let pk = &test_group.key.pk; + + for (i, test) in test_group.tests.into_iter().enumerate() { + let comment = &test.comment; + println!("Test {i}: {comment}"); + + match test.result { + wycheproof::TestResult::Valid => { + let sig = + Signature::Ed25519(Ed25519Signature::from_slice(&test.sig).unwrap()); + libcrux::signature::verify(&test.msg, &sig, pk).unwrap(); + } + wycheproof::TestResult::Invalid => { + Ed25519Signature::from_slice(&test.sig) + .map(Signature::Ed25519) + .and_then(|sig| libcrux::signature::verify(&test.msg, &sig, pk)) + .expect_err("expected error"); + } + _ => unreachable!(), + } + } + } + } +} diff --git a/tests/hkdf.rs b/tests/hkdf.rs new file mode 100644 index 000000000..9d57cfd8b --- /dev/null +++ b/tests/hkdf.rs @@ -0,0 +1,35 @@ +use libcrux_hkdf::Algorithm; +use wycheproof::TestResult; + +#[test] +fn run_wycheproof() { + for test_name in wycheproof::hkdf::TestName::all() { + let alg = match test_name { + wycheproof::hkdf::TestName::HkdfSha1 => continue, + wycheproof::hkdf::TestName::HkdfSha256 => Algorithm::Sha256, + wycheproof::hkdf::TestName::HkdfSha384 => Algorithm::Sha384, + wycheproof::hkdf::TestName::HkdfSha512 => Algorithm::Sha512, + }; + let test_set = wycheproof::hkdf::TestSet::load(test_name) + .expect("error loading wycheproof test for name {test_name}"); + + println!("Test Set {test_name:?}"); + for test_group in test_set.test_groups { + for (i, test) in test_group.tests.into_iter().enumerate() { + let comment = &test.comment; + println!("Test {i}: {comment}"); + + let result = libcrux_hkdf::hkdf(alg, &test.salt, &test.ikm, &test.info, test.size); + match (result, test.result) { + (Ok(okm), TestResult::Valid) => { + assert_eq!(okm.as_slice(), test.okm.as_ref()) + } + (Err(_), TestResult::Invalid) => {} + other => { + panic!("found failing test case: {test:?}, got {other:?}") + } + } + } + } + } +} diff --git a/tests/hmac.rs b/tests/hmac.rs new file mode 100644 index 000000000..930fae419 --- /dev/null +++ b/tests/hmac.rs @@ -0,0 +1,34 @@ +use libcrux_hmac::Algorithm; + +#[test] +fn run_wycheproof() { + for test_name in wycheproof::mac::TestName::all() { + let alg = match test_name { + wycheproof::mac::TestName::HmacSha1 => Algorithm::Sha1, + wycheproof::mac::TestName::HmacSha256 => Algorithm::Sha256, + wycheproof::mac::TestName::HmacSha384 => Algorithm::Sha384, + wycheproof::mac::TestName::HmacSha512 => Algorithm::Sha512, + _ => continue, + }; + let test_set = wycheproof::mac::TestSet::load(test_name) + .expect("error loading wycheproof test for name {test_name}"); + + println!("Test Set {test_name:?}"); + for test_group in test_set.test_groups { + for (i, test) in test_group.tests.into_iter().enumerate() { + let comment = &test.comment; + println!("Test {i}: {comment}"); + + let tag = libcrux_hmac::hmac(alg, &test.key, &test.msg, Some(test.tag.len())); + + match test.result { + wycheproof::TestResult::Valid => assert_eq!(tag.as_slice(), test.tag.as_ref()), + wycheproof::TestResult::Invalid => { + assert_ne!(tag.as_slice(), test.tag.as_ref()) + } + _ => unreachable!(), + } + } + } + } +}