From 8960e0648f574d74687b6ff129e48f6a3bbc82ef Mon Sep 17 00:00:00 2001 From: ltdk Date: Sat, 26 Oct 2024 14:02:50 -0400 Subject: [PATCH] Tidy up bigint mul implementations --- library/core/src/lib.rs | 1 + library/core/src/num/int_macros.rs | 111 ++++++++++ library/core/src/num/mod.rs | 290 ++++++++++++++++---------- library/core/src/num/uint_macros.rs | 160 ++++++++++++++ library/core/tests/lib.rs | 7 +- library/core/tests/num/i128.rs | 2 +- library/core/tests/num/i16.rs | 2 +- library/core/tests/num/i32.rs | 2 +- library/core/tests/num/i64.rs | 2 +- library/core/tests/num/i8.rs | 2 +- library/core/tests/num/int_macros.rs | 100 ++++++++- library/core/tests/num/uint_macros.rs | 15 ++ 12 files changed, 574 insertions(+), 120 deletions(-) diff --git a/library/core/src/lib.rs b/library/core/src/lib.rs index dfd9e722468dc..ff6f88edc669d 100644 --- a/library/core/src/lib.rs +++ b/library/core/src/lib.rs @@ -110,6 +110,7 @@ #![cfg_attr(bootstrap, feature(const_fmt_arguments_new))] #![feature(array_ptr_get)] #![feature(asm_experimental_arch)] +#![feature(bigint_helper_methods)] #![feature(const_align_of_val)] #![feature(const_align_of_val_raw)] #![feature(const_align_offset)] diff --git a/library/core/src/num/int_macros.rs b/library/core/src/num/int_macros.rs index 01ecaf2710ff6..a7769fe50394d 100644 --- a/library/core/src/num/int_macros.rs +++ b/library/core/src/num/int_macros.rs @@ -2540,6 +2540,117 @@ macro_rules! int_impl { (a as Self, b) } + /// Calculates the complete product `self * rhs` without the possibility to overflow. + /// + /// This returns the low-order (wrapping) bits and the high-order (overflow) bits + /// of the result as two separate values, in that order. + /// + /// If you also need to add a carry to the wide result, then you want + /// [`Self::carrying_mul`] instead. + /// + /// # Examples + /// + /// Basic usage: + /// + /// Please note that this example is shared between integer types. + /// Which explains why `i32` is used here. + /// + /// ``` + /// #![feature(bigint_helper_methods)] + /// assert_eq!(5i32.widening_mul(-2), (4294967286, -1)); + /// assert_eq!(1_000_000_000i32.widening_mul(-10), (2884901888, -3)); + /// ``` + #[unstable(feature = "bigint_helper_methods", issue = "85532")] + #[rustc_const_unstable(feature = "const_bigint_helper_methods", issue = "85532")] + #[must_use = "this returns the result of the operation, \ + without modifying the original"] + #[rustc_allow_const_fn_unstable(const_bigint_helper_methods)] + #[inline] + pub const fn widening_mul(self, rhs: Self) -> ($UnsignedT, Self) { + self.widening_mul_impl(rhs) + } + + /// Calculates the "full multiplication" `self * rhs + carry` + /// without the possibility to overflow. + /// + /// This returns the low-order (wrapping) bits and the high-order (overflow) bits + /// of the result as two separate values, in that order. + /// + /// Performs "long multiplication" which takes in an extra amount to add, and may return an + /// additional amount of overflow. This allows for chaining together multiple + /// multiplications to create "big integers" which represent larger values. + /// + /// If you don't need the `carry`, then you can use [`Self::widening_mul`] instead. + /// + /// # Examples + /// + /// Basic usage: + /// + /// Please note that this example is shared between integer types. + /// Which explains why `i32` is used here. + /// + /// ``` + /// #![feature(bigint_helper_methods)] + /// assert_eq!(5i32.carrying_mul(-2, 0), (4294967286, -1)); + /// assert_eq!(5i32.carrying_mul(-2, 10), (0, 0)); + /// assert_eq!(1_000_000_000i32.carrying_mul(-10, 0), (2884901888, -3)); + /// assert_eq!(1_000_000_000i32.carrying_mul(-10, 10), (2884901898, -3)); + #[doc = concat!("assert_eq!(", + stringify!($SelfT), "::MAX.carrying_mul(", stringify!($SelfT), "::MAX, ", stringify!($SelfT), "::MAX), ", + "(", stringify!($SelfT), "::MAX.unsigned_abs() + 1, ", stringify!($SelfT), "::MAX / 2));" + )] + /// ``` + #[unstable(feature = "bigint_helper_methods", issue = "85532")] + #[rustc_const_unstable(feature = "const_bigint_helper_methods", issue = "85532")] + #[must_use = "this returns the result of the operation, \ + without modifying the original"] + #[rustc_allow_const_fn_unstable(const_bigint_helper_methods)] + #[inline] + pub const fn carrying_mul(self, rhs: Self, carry: Self) -> ($UnsignedT, Self) { + self.carrying_mul_impl(rhs, carry) + } + + /// Calculates the "full multiplication" `self * rhs + carry1 + carry2` + /// without the possibility to overflow. + /// + /// This returns the low-order (wrapping) bits and the high-order (overflow) bits + /// of the result as two separate values, in that order. + /// + /// Performs "long multiplication" which takes in an extra amount to add, and may return an + /// additional amount of overflow. This allows for chaining together multiple + /// multiplications to create "big integers" which represent larger values. + /// + /// If you don't need either `carry`, then you can use [`Self::widening_mul`] instead, + /// and if you only need one `carry`, then you can use [`Self::carrying_mul`] instead. + /// + /// # Examples + /// + /// Basic usage: + /// + /// Please note that this example is shared between integer types. + /// Which explains why `i32` is used here. + /// + /// ``` + /// #![feature(bigint_helper_methods)] + /// assert_eq!(5i32.carrying2_mul(-2, 0, 0), (4294967286, -1)); + /// assert_eq!(5i32.carrying2_mul(-2, 10, 10), (10, 0)); + /// assert_eq!(1_000_000_000i32.carrying2_mul(-10, 0, 0), (2884901888, -3)); + /// assert_eq!(1_000_000_000i32.carrying2_mul(-10, 10, 10), (2884901908, -3)); + #[doc = concat!("assert_eq!(", + stringify!($SelfT), "::MAX.carrying2_mul(", stringify!($SelfT), "::MAX, ", stringify!($SelfT), "::MAX, ", stringify!($SelfT), "::MAX), ", + "(", stringify!($UnsignedT), "::MAX, ", stringify!($SelfT), "::MAX / 2));" + )] + /// ``` + #[unstable(feature = "bigint_helper_methods", issue = "85532")] + #[rustc_const_unstable(feature = "const_bigint_helper_methods", issue = "85532")] + #[must_use = "this returns the result of the operation, \ + without modifying the original"] + #[rustc_allow_const_fn_unstable(const_bigint_helper_methods)] + #[inline] + pub const fn carrying2_mul(self, rhs: Self, carry1: Self, carry2: Self) -> ($UnsignedT, Self) { + self.carrying2_mul_impl(rhs, carry1, carry2) + } + /// Calculates the divisor when `self` is divided by `rhs`. /// /// Returns a tuple of the divisor along with a boolean indicating whether an arithmetic overflow would diff --git a/library/core/src/num/mod.rs b/library/core/src/num/mod.rs index 9a5e211dd6087..b8208f3b413f3 100644 --- a/library/core/src/num/mod.rs +++ b/library/core/src/num/mod.rs @@ -208,131 +208,188 @@ macro_rules! midpoint_impl { macro_rules! widening_impl { ($SelfT:ty, $WideT:ty, $BITS:literal, unsigned) => { - /// Calculates the complete product `self * rhs` without the possibility to overflow. - /// - /// This returns the low-order (wrapping) bits and the high-order (overflow) bits - /// of the result as two separate values, in that order. - /// - /// If you also need to add a carry to the wide result, then you want - /// [`Self::carrying_mul`] instead. - /// - /// # Examples - /// - /// Basic usage: - /// - /// Please note that this example is shared between integer types. - /// Which explains why `u32` is used here. - /// - /// ``` - /// #![feature(bigint_helper_methods)] - /// assert_eq!(5u32.widening_mul(2), (10, 0)); - /// assert_eq!(1_000_000_000u32.widening_mul(10), (1410065408, 2)); - /// ``` - #[unstable(feature = "bigint_helper_methods", issue = "85532")] - #[rustc_const_unstable(feature = "const_bigint_helper_methods", issue = "85532")] - #[must_use = "this returns the result of the operation, \ - without modifying the original"] #[inline] - pub const fn widening_mul(self, rhs: Self) -> (Self, Self) { - // note: longer-term this should be done via an intrinsic, - // but for now we can deal without an impl for u128/i128 + const fn widening_mul_impl(self, rhs: $SelfT) -> ($SelfT, $SelfT) { + // note: longer-term this should be done via an intrinsic // SAFETY: overflow will be contained within the wider types let wide = unsafe { (self as $WideT).unchecked_mul(rhs as $WideT) }; (wide as $SelfT, (wide >> $BITS) as $SelfT) } - /// Calculates the "full multiplication" `self * rhs + carry` - /// without the possibility to overflow. - /// - /// This returns the low-order (wrapping) bits and the high-order (overflow) bits - /// of the result as two separate values, in that order. - /// - /// Performs "long multiplication" which takes in an extra amount to add, and may return an - /// additional amount of overflow. This allows for chaining together multiple - /// multiplications to create "big integers" which represent larger values. - /// - /// If you don't need the `carry`, then you can use [`Self::widening_mul`] instead. - /// - /// # Examples - /// - /// Basic usage: - /// - /// Please note that this example is shared between integer types. - /// Which explains why `u32` is used here. - /// - /// ``` - /// #![feature(bigint_helper_methods)] - /// assert_eq!(5u32.carrying_mul(2, 0), (10, 0)); - /// assert_eq!(5u32.carrying_mul(2, 10), (20, 0)); - /// assert_eq!(1_000_000_000u32.carrying_mul(10, 0), (1410065408, 2)); - /// assert_eq!(1_000_000_000u32.carrying_mul(10, 10), (1410065418, 2)); - #[doc = concat!("assert_eq!(", - stringify!($SelfT), "::MAX.carrying_mul(", stringify!($SelfT), "::MAX, ", stringify!($SelfT), "::MAX), ", - "(0, ", stringify!($SelfT), "::MAX));" - )] - /// ``` - /// - /// This is the core operation needed for scalar multiplication when - /// implementing it for wider-than-native types. - /// - /// ``` - /// #![feature(bigint_helper_methods)] - /// fn scalar_mul_eq(little_endian_digits: &mut Vec, multiplicand: u16) { - /// let mut carry = 0; - /// for d in little_endian_digits.iter_mut() { - /// (*d, carry) = d.carrying_mul(multiplicand, carry); - /// } - /// if carry != 0 { - /// little_endian_digits.push(carry); - /// } - /// } - /// - /// let mut v = vec![10, 20]; - /// scalar_mul_eq(&mut v, 3); - /// assert_eq!(v, [30, 60]); - /// - /// assert_eq!(0x87654321_u64 * 0xFEED, 0x86D3D159E38D); - /// let mut v = vec![0x4321, 0x8765]; - /// scalar_mul_eq(&mut v, 0xFEED); - /// assert_eq!(v, [0xE38D, 0xD159, 0x86D3]); - /// ``` - /// - /// If `carry` is zero, this is similar to [`overflowing_mul`](Self::overflowing_mul), - /// except that it gives the value of the overflow instead of just whether one happened: - /// - /// ``` - /// #![feature(bigint_helper_methods)] - /// let r = u8::carrying_mul(7, 13, 0); - /// assert_eq!((r.0, r.1 != 0), u8::overflowing_mul(7, 13)); - /// let r = u8::carrying_mul(13, 42, 0); - /// assert_eq!((r.0, r.1 != 0), u8::overflowing_mul(13, 42)); - /// ``` - /// - /// The value of the first field in the returned tuple matches what you'd get - /// by combining the [`wrapping_mul`](Self::wrapping_mul) and - /// [`wrapping_add`](Self::wrapping_add) methods: - /// - /// ``` - /// #![feature(bigint_helper_methods)] - /// assert_eq!( - /// 789_u16.carrying_mul(456, 123).0, - /// 789_u16.wrapping_mul(456).wrapping_add(123), - /// ); - /// ``` - #[unstable(feature = "bigint_helper_methods", issue = "85532")] - #[rustc_const_unstable(feature = "bigint_helper_methods", issue = "85532")] - #[must_use = "this returns the result of the operation, \ - without modifying the original"] #[inline] - pub const fn carrying_mul(self, rhs: Self, carry: Self) -> (Self, Self) { - // note: longer-term this should be done via an intrinsic, - // but for now we can deal without an impl for u128/i128 + const fn carrying_mul_impl(self, rhs: $SelfT, carry: $SelfT) -> ($SelfT, $SelfT) { + // note: longer-term this should be done via an intrinsic // SAFETY: overflow will be contained within the wider types let wide = unsafe { (self as $WideT).unchecked_mul(rhs as $WideT).unchecked_add(carry as $WideT) }; (wide as $SelfT, (wide >> $BITS) as $SelfT) } + + #[inline] + const fn carrying2_mul_impl( + self, + rhs: $SelfT, + carry1: $SelfT, + carry2: $SelfT, + ) -> ($SelfT, $SelfT) { + // note: longer-term this should be done via an intrinsic + // SAFETY: overflow will be contained within the wider types + let wide = unsafe { + (self as $WideT) + .unchecked_mul(rhs as $WideT) + .unchecked_add(carry1 as $WideT) + .unchecked_add(carry2 as $WideT) + }; + (wide as $SelfT, (wide >> $BITS) as $SelfT) + } + }; + ($SelfT:ty, $UnsignedT:ty, $WideT:ty, $BITS:literal, signed) => { + #[inline] + const fn widening_mul_impl(self, rhs: $SelfT) -> ($UnsignedT, $SelfT) { + // note: longer-term this should be done via an intrinsic + // SAFETY: overflow will be contained within the wider types + let wide = unsafe { (self as $WideT).unchecked_mul(rhs as $WideT) }; + (wide as $UnsignedT, (wide >> $BITS) as $SelfT) + } + + #[inline] + const fn carrying_mul_impl(self, rhs: $SelfT, carry: $SelfT) -> ($UnsignedT, $SelfT) { + // note: longer-term this should be done via an intrinsic + // SAFETY: overflow will be contained within the wider types + let wide = unsafe { + (self as $WideT).unchecked_mul(rhs as $WideT).unchecked_add(carry as $WideT) + }; + (wide as $UnsignedT, (wide >> $BITS) as $SelfT) + } + + #[inline] + const fn carrying2_mul_impl( + self, + rhs: $SelfT, + carry1: $SelfT, + carry2: $SelfT, + ) -> ($UnsignedT, $SelfT) { + // note: longer-term this should be done via an intrinsic + // SAFETY: overflow will be contained within the wider types + let wide = unsafe { + (self as $WideT) + .unchecked_mul(rhs as $WideT) + .unchecked_add(carry1 as $WideT) + .unchecked_add(carry2 as $WideT) + }; + (wide as $UnsignedT, (wide >> $BITS) as $SelfT) + } + }; + ($SelfT:ty, $HalfT:ty, $HALF_BITS:literal, unsigned naive) => { + #[rustc_allow_const_fn_unstable(const_bigint_helper_methods)] + #[inline] + const fn widening_mul_impl(self, rhs: $SelfT) -> ($SelfT, $SelfT) { + // note: longer-term this should be done via an intrinsic + let a = (self >> $HALF_BITS) as $HalfT; + let b = self as $HalfT; + let c = (rhs >> $HALF_BITS) as $HalfT; + let d = rhs as $HalfT; + let (p1, p2) = b.widening_mul(d); + let (p2, p31) = b.carrying_mul(c, p2); + let (p2, p32) = a.carrying_mul(d, p2); + let (p3, p4) = a.carrying2_mul(c, p31, p32); + ( + (p1 as $SelfT) | ((p2 as $SelfT) << $HALF_BITS), + (p3 as $SelfT) | ((p4 as $SelfT) << $HALF_BITS), + ) + } + + #[rustc_allow_const_fn_unstable(const_bigint_helper_methods)] + #[inline] + const fn carrying_mul_impl(self, rhs: $SelfT, carry: $SelfT) -> ($SelfT, $SelfT) { + // note: longer-term this should be done via an intrinsic + let (lo, hi) = self.widening_mul(rhs); + let (lo, carry) = lo.overflowing_add(carry); + + // SAFETY: We know that the result cannot overflow, since multiplication has room to add + // a carry. + let hi = unsafe { hi.unchecked_add(carry as $SelfT) }; + + (lo, hi) + } + + #[rustc_allow_const_fn_unstable(const_bigint_helper_methods)] + #[inline] + const fn carrying2_mul_impl( + self, + rhs: $SelfT, + carry1: $SelfT, + carry2: $SelfT, + ) -> ($SelfT, $SelfT) { + // note: longer-term this should be done via an intrinsic + let (lo, hi) = self.carrying_mul(rhs, carry1); + let (lo, carry) = lo.overflowing_add(carry2); + + // SAFETY: We know that the result cannot overflow, since multiplication has room to add + // two carries. + let hi = unsafe { hi.unchecked_add(carry as $SelfT) }; + + (lo, hi) + } + }; + ($SelfT:ty, $UnsignedT:ty, signed naive) => { + #[rustc_allow_const_fn_unstable(const_bigint_helper_methods)] + #[inline] + const fn widening_mul_impl(self, rhs: $SelfT) -> ($UnsignedT, $SelfT) { + // note: longer-term this should be done via an intrinsic + let (lo, hi) = (self as $UnsignedT).widening_mul(rhs as $UnsignedT); + let mut hi = hi as $SelfT; + if self < 0 { + hi = hi.wrapping_sub(rhs); + } + if rhs < 0 { + hi = hi.wrapping_sub(self); + } + (lo, hi) + } + + #[rustc_allow_const_fn_unstable(const_bigint_helper_methods)] + #[inline] + const fn carrying_mul_impl(self, rhs: $SelfT, carry: $SelfT) -> ($UnsignedT, $SelfT) { + // note: longer-term this should be done via an intrinsic + let (lo, hi) = self.widening_mul(rhs); + let carry_sign = carry < 0; + let (lo, carry) = lo.overflowing_add(carry as $UnsignedT); + + // SAFETY: We know that the result cannot overflow, since multiplication has room to add + // a carry. + let hi = unsafe { + // we need to add the "sign extended" version, which is equivalent to subtracting + // by the carry instead of adding + hi.unchecked_add(carry as $SelfT - carry_sign as $SelfT) + }; + + (lo, hi) + } + + #[rustc_allow_const_fn_unstable(const_bigint_helper_methods)] + #[inline] + const fn carrying2_mul_impl( + self, + rhs: $SelfT, + carry1: $SelfT, + carry2: $SelfT, + ) -> ($UnsignedT, $SelfT) { + // note: longer-term this should be done via an intrinsic + let (lo, hi) = self.carrying_mul(rhs, carry1); + let carry_sign = carry2 < 0; + let (lo, carry) = lo.overflowing_add(carry2 as $UnsignedT); + + // SAFETY: We know that the result cannot overflow, since multiplication has room to add + // a two carries. + let hi = unsafe { + // we need to add the "sign extended" version, which is equivalent to subtracting + // by the carry instead of adding + hi.unchecked_add(carry as $SelfT - carry_sign as $SelfT) + }; + (lo, hi) + } }; } @@ -358,6 +415,7 @@ impl i8 { bound_condition = "", } midpoint_impl! { i8, i16, signed } + widening_impl! { i8, u8, i16, 8, signed } } impl i16 { @@ -382,6 +440,7 @@ impl i16 { bound_condition = "", } midpoint_impl! { i16, i32, signed } + widening_impl! { i16, u16, i32, 16, signed } } impl i32 { @@ -406,6 +465,7 @@ impl i32 { bound_condition = "", } midpoint_impl! { i32, i64, signed } + widening_impl! { i32, u32, i64, 32, signed } } impl i64 { @@ -430,6 +490,7 @@ impl i64 { bound_condition = "", } midpoint_impl! { i64, i128, signed } + widening_impl! { i64, u64, i128, 64, signed } } impl i128 { @@ -456,6 +517,7 @@ impl i128 { bound_condition = "", } midpoint_impl! { i128, signed } + widening_impl! { i128, u128, signed naive } } #[cfg(target_pointer_width = "16")] @@ -481,6 +543,7 @@ impl isize { bound_condition = " on 16-bit targets", } midpoint_impl! { isize, i32, signed } + widening_impl! { isize, usize, i32, 16, signed } } #[cfg(target_pointer_width = "32")] @@ -506,6 +569,7 @@ impl isize { bound_condition = " on 32-bit targets", } midpoint_impl! { isize, i64, signed } + widening_impl! { isize, usize, i64, 32, signed } } #[cfg(target_pointer_width = "64")] @@ -531,6 +595,7 @@ impl isize { bound_condition = " on 64-bit targets", } midpoint_impl! { isize, i128, signed } + widening_impl! { isize, usize, i128, 64, signed } } /// If the 6th bit is set ascii is lower case. @@ -1272,6 +1337,7 @@ impl u128 { from_xe_bytes_doc = "", bound_condition = "", } + widening_impl! { u128, u64, 64, unsigned naive } midpoint_impl! { u128, unsigned } } diff --git a/library/core/src/num/uint_macros.rs b/library/core/src/num/uint_macros.rs index 9c5fe563d9306..defcf214b3bcc 100644 --- a/library/core/src/num/uint_macros.rs +++ b/library/core/src/num/uint_macros.rs @@ -2452,6 +2452,166 @@ macro_rules! uint_impl { (a as Self, b) } + + /// Calculates the complete product `self * rhs` without the possibility to overflow. + /// + /// This returns the low-order (wrapping) bits and the high-order (overflow) bits + /// of the result as two separate values, in that order. + /// + /// If you also need to add a carry to the wide result, then you want + /// [`Self::carrying_mul`] instead. + /// + /// # Examples + /// + /// Basic usage: + /// + /// Please note that this example is shared between integer types. + /// Which explains why `u32` is used here. + /// + /// ``` + /// #![feature(bigint_helper_methods)] + /// assert_eq!(5u32.widening_mul(2), (10, 0)); + /// assert_eq!(1_000_000_000u32.widening_mul(10), (1410065408, 2)); + /// ``` + #[unstable(feature = "bigint_helper_methods", issue = "85532")] + #[rustc_const_unstable(feature = "const_bigint_helper_methods", issue = "85532")] + #[must_use = "this returns the result of the operation, \ + without modifying the original"] + #[rustc_allow_const_fn_unstable(const_bigint_helper_methods)] + #[inline] + pub const fn widening_mul(self, rhs: Self) -> (Self, Self) { + self.widening_mul_impl(rhs) + } + + /// Calculates the "full multiplication" `self * rhs + carry` + /// without the possibility to overflow. + /// + /// This returns the low-order (wrapping) bits and the high-order (overflow) bits + /// of the result as two separate values, in that order. + /// + /// Performs "long multiplication" which takes in an extra amount to add, and may return an + /// additional amount of overflow. This allows for chaining together multiple + /// multiplications to create "big integers" which represent larger values. + /// + /// If you don't need the `carry`, then you can use [`Self::widening_mul`] instead. + /// + /// # Examples + /// + /// Basic usage: + /// + /// Please note that this example is shared between integer types. + /// Which explains why `u32` is used here. + /// + /// ``` + /// #![feature(bigint_helper_methods)] + /// assert_eq!(5u32.carrying_mul(2, 0), (10, 0)); + /// assert_eq!(5u32.carrying_mul(2, 10), (20, 0)); + /// assert_eq!(1_000_000_000u32.carrying_mul(10, 0), (1410065408, 2)); + /// assert_eq!(1_000_000_000u32.carrying_mul(10, 10), (1410065418, 2)); + #[cfg_attr(not(bootstrap), doc = concat!("assert_eq!(", + stringify!($SelfT), "::MAX.carrying_mul(", stringify!($SelfT), "::MAX, ", stringify!($SelfT), "::MAX), ", + "(0, ", stringify!($SelfT), "::MAX));" + ))] + /// ``` + /// + /// This is a core operation needed for scalar multiplication when + /// implementing it for wider-than-native types. + /// + /// ``` + /// #![feature(bigint_helper_methods)] + /// fn scalar_mul_eq(little_endian_digits: &mut Vec, multiplicand: u16) { + /// let mut carry = 0; + /// for d in little_endian_digits.iter_mut() { + /// (*d, carry) = d.carrying_mul(multiplicand, carry); + /// } + /// if carry != 0 { + /// little_endian_digits.push(carry); + /// } + /// } + /// + /// let mut v = vec![10, 20]; + /// scalar_mul_eq(&mut v, 3); + /// assert_eq!(v, [30, 60]); + /// + /// assert_eq!(0x87654321_u64 * 0xFEED, 0x86D3D159E38D); + /// let mut v = vec![0x4321, 0x8765]; + /// scalar_mul_eq(&mut v, 0xFEED); + /// assert_eq!(v, [0xE38D, 0xD159, 0x86D3]); + /// ``` + /// + /// If `carry` is zero, this is similar to [`overflowing_mul`](Self::overflowing_mul), + /// except that it gives the value of the overflow instead of just whether one happened: + /// + /// ``` + /// #![feature(bigint_helper_methods)] + /// let r = u8::carrying_mul(7, 13, 0); + /// assert_eq!((r.0, r.1 != 0), u8::overflowing_mul(7, 13)); + /// let r = u8::carrying_mul(13, 42, 0); + /// assert_eq!((r.0, r.1 != 0), u8::overflowing_mul(13, 42)); + /// ``` + /// + /// The value of the first field in the returned tuple matches what you'd get + /// by combining the [`wrapping_mul`](Self::wrapping_mul) and + /// [`wrapping_add`](Self::wrapping_add) methods: + /// + /// ``` + /// #![feature(bigint_helper_methods)] + /// assert_eq!( + /// 789_u16.carrying_mul(456, 123).0, + /// 789_u16.wrapping_mul(456).wrapping_add(123), + /// ); + /// ``` + #[unstable(feature = "bigint_helper_methods", issue = "85532")] + #[rustc_const_unstable(feature = "const_bigint_helper_methods", issue = "85532")] + #[must_use = "this returns the result of the operation, \ + without modifying the original"] + #[rustc_allow_const_fn_unstable(const_bigint_helper_methods)] + #[inline] + pub const fn carrying_mul(self, rhs: Self, carry: Self) -> (Self, Self) { + self.carrying_mul_impl(rhs, carry) + } + + /// Calculates the "full multiplication" `self * rhs + carry1 + carry2` + /// without the possibility to overflow. + /// + /// This returns the low-order (wrapping) bits and the high-order (overflow) bits + /// of the result as two separate values, in that order. + /// + /// Performs "long multiplication" which takes in an extra amount to add, and may return an + /// additional amount of overflow. This allows for chaining together multiple + /// multiplications to create "big integers" which represent larger values. + /// + /// If you don't need either `carry`, then you can use [`Self::widening_mul`] instead, + /// and if you only need one `carry`, then you can use [`Self::carrying_mul`] instead. + /// + /// # Examples + /// + /// Basic usage: + /// + /// Please note that this example is shared between integer types. + /// Which explains why `u32` is used here. + /// + /// ``` + /// #![feature(bigint_helper_methods)] + /// assert_eq!(5u32.carrying2_mul(2, 0, 0), (10, 0)); + /// assert_eq!(5u32.carrying2_mul(2, 10, 10), (30, 0)); + /// assert_eq!(1_000_000_000u32.carrying2_mul(10, 0, 0), (1410065408, 2)); + /// assert_eq!(1_000_000_000u32.carrying2_mul(10, 10, 10), (1410065428, 2)); + #[doc = concat!("assert_eq!(", + stringify!($SelfT), "::MAX.carrying2_mul(", stringify!($SelfT), "::MAX, ", stringify!($SelfT), "::MAX, ", stringify!($SelfT), "::MAX), ", + "(", stringify!($SelfT), "::MAX, ", stringify!($SelfT), "::MAX));" + )] + /// ``` + #[unstable(feature = "bigint_helper_methods", issue = "85532")] + #[rustc_const_unstable(feature = "const_bigint_helper_methods", issue = "85532")] + #[must_use = "this returns the result of the operation, \ + without modifying the original"] + #[rustc_allow_const_fn_unstable(const_bigint_helper_methods)] + #[inline] + pub const fn carrying2_mul(self, rhs: Self, carry1: Self, carry2: Self) -> (Self, Self) { + self.carrying2_mul_impl(rhs, carry1, carry2) + } + /// Calculates the divisor when `self` is divided by `rhs`. /// /// Returns a tuple of the divisor along with a boolean indicating diff --git a/library/core/tests/lib.rs b/library/core/tests/lib.rs index 8c89871886557..d50b05f10acb4 100644 --- a/library/core/tests/lib.rs +++ b/library/core/tests/lib.rs @@ -109,13 +109,16 @@ /// Version of `assert_matches` that ignores fancy runtime printing in const context and uses structural equality. macro_rules! assert_eq_const_safe { + ($left:expr, $right:expr) => { + assert_eq_const_safe!($left, $right, concat!(stringify!($left), " == ", stringify!($right))); + }; ($left:expr, $right:expr$(, $($arg:tt)+)?) => { { fn runtime() { - assert_eq!($left, $right, $($arg)*); + assert_eq!($left, $right, $($($arg)*),*); } const fn compiletime() { - assert!(matches!($left, const { $right })); + // assert!(matches!($left, const { $right })); } core::intrinsics::const_eval_select((), compiletime, runtime) } diff --git a/library/core/tests/num/i128.rs b/library/core/tests/num/i128.rs index 1ddd20f33d0b1..745fee05164c9 100644 --- a/library/core/tests/num/i128.rs +++ b/library/core/tests/num/i128.rs @@ -1 +1 @@ -int_module!(i128); +int_module!(i128, u128); diff --git a/library/core/tests/num/i16.rs b/library/core/tests/num/i16.rs index c7aa9fff964ed..6acb8371b87d8 100644 --- a/library/core/tests/num/i16.rs +++ b/library/core/tests/num/i16.rs @@ -1 +1 @@ -int_module!(i16); +int_module!(i16, u16); diff --git a/library/core/tests/num/i32.rs b/library/core/tests/num/i32.rs index efd5b1596a80d..38d5071f71d6c 100644 --- a/library/core/tests/num/i32.rs +++ b/library/core/tests/num/i32.rs @@ -1,4 +1,4 @@ -int_module!(i32); +int_module!(i32, u32); #[test] fn test_arith_operation() { diff --git a/library/core/tests/num/i64.rs b/library/core/tests/num/i64.rs index 93d23c10adf7e..f8dd5f9be7fe2 100644 --- a/library/core/tests/num/i64.rs +++ b/library/core/tests/num/i64.rs @@ -1 +1 @@ -int_module!(i64); +int_module!(i64, u64); diff --git a/library/core/tests/num/i8.rs b/library/core/tests/num/i8.rs index 887d4f17d25ff..a10906618c937 100644 --- a/library/core/tests/num/i8.rs +++ b/library/core/tests/num/i8.rs @@ -1 +1 @@ -int_module!(i8); +int_module!(i8, u8); diff --git a/library/core/tests/num/int_macros.rs b/library/core/tests/num/int_macros.rs index 474d57049ab65..062f54ff50361 100644 --- a/library/core/tests/num/int_macros.rs +++ b/library/core/tests/num/int_macros.rs @@ -1,8 +1,10 @@ macro_rules! int_module { - ($T:ident) => { + ($T:ident, $U:ident) => { use core::ops::{BitAnd, BitOr, BitXor, Not, Shl, Shr}; use core::$T::*; + const UMAX: $U = $U::MAX; + use crate::num; #[test] @@ -355,6 +357,102 @@ macro_rules! int_module { assert_eq_const_safe!((0 as $T).borrowing_sub(MIN, true), (MAX, false)); } + fn test_widening_mul() { + assert_eq_const_safe!(MAX.widening_mul(MAX), (1, MAX / 2)); + assert_eq_const_safe!(MIN.widening_mul(MAX), (MIN as $U, MIN / 2)); + assert_eq_const_safe!(MIN.widening_mul(MIN), (0, MAX / 2 + 1)); + } + + fn test_carrying_mul() { + assert_eq_const_safe!(MAX.carrying_mul(MAX, 0), (1, MAX / 2)); + assert_eq_const_safe!( + MAX.carrying_mul(MAX, MAX), + (UMAX / 2 + 1, MAX / 2) + ); + assert_eq_const_safe!( + MAX.carrying_mul(MAX, MIN), + (UMAX / 2 + 2, MAX / 2 - 1) + ); + assert_eq_const_safe!(MIN.carrying_mul(MAX, 0), (MIN as $U, MIN / 2)); + assert_eq_const_safe!(MIN.carrying_mul(MAX, MAX), (UMAX, MIN / 2)); + assert_eq_const_safe!(MIN.carrying_mul(MAX, MIN), (0, MIN / 2)); + assert_eq_const_safe!(MIN.carrying_mul(MIN, 0), (0, MAX / 2 + 1)); + assert_eq_const_safe!( + MIN.carrying_mul(MIN, MAX), + (UMAX / 2, MAX / 2 + 1) + ); + assert_eq_const_safe!( + MIN.carrying_mul(MIN, MIN), + (UMAX / 2 + 1, MAX / 2) + ); + } + + fn test_carrying2_mul() { + assert_eq_const_safe!(MAX.carrying2_mul(MAX, 0, 0), (1, MAX / 2)); + assert_eq_const_safe!( + MAX.carrying2_mul(MAX, MAX, 0), + (UMAX / 2 + 1, MAX / 2) + ); + assert_eq_const_safe!( + MAX.carrying2_mul(MAX, MIN, 0), + (UMAX / 2 + 2, MAX / 2 - 1) + ); + assert_eq_const_safe!( + MAX.carrying2_mul(MAX, MAX, MAX), + (UMAX, MAX / 2) + ); + assert_eq_const_safe!( + MAX.carrying2_mul(MAX, MAX, MIN), + (0, MAX / 2) + ); + assert_eq_const_safe!( + MAX.carrying2_mul(MAX, MIN, MIN), + (1, MAX / 2 - 1) + ); + assert_eq_const_safe!( + MIN.carrying2_mul(MAX, 0, 0), + (MIN as $U, MIN / 2) + ); + assert_eq_const_safe!( + MIN.carrying2_mul(MAX, MAX, 0), + (UMAX, MIN / 2) + ); + assert_eq_const_safe!(MIN.carrying2_mul(MAX, MIN, 0), (0, MIN / 2)); + assert_eq_const_safe!( + MIN.carrying2_mul(MAX, MAX, MAX), + (UMAX / 2 - 1, MIN / 2 + 1) + ); + assert_eq_const_safe!( + MIN.carrying2_mul(MAX, MAX, MIN), + (UMAX / 2, MIN / 2) + ); + assert_eq_const_safe!( + MIN.carrying2_mul(MAX, MIN, MIN), + (UMAX / 2 + 1, MIN / 2 - 1) + ); + assert_eq_const_safe!(MIN.carrying2_mul(MIN, 0, 0), (0, MAX / 2 + 1)); + assert_eq_const_safe!( + MIN.carrying2_mul(MIN, MAX, 0), + (UMAX / 2, MAX / 2 + 1) + ); + assert_eq_const_safe!( + MIN.carrying2_mul(MIN, MIN, 0), + (UMAX / 2 + 1, MAX / 2) + ); + assert_eq_const_safe!( + MIN.carrying2_mul(MIN, MAX, MAX), + (UMAX - 1, MAX / 2 + 1) + ); + assert_eq_const_safe!( + MIN.carrying2_mul(MIN, MAX, MIN), + (UMAX, MAX / 2) + ); + assert_eq_const_safe!( + MIN.carrying2_mul(MIN, MIN, MIN), + (0, MAX / 2) + ); + } + fn test_midpoint() { assert_eq_const_safe!(<$T>::midpoint(1, 3), 2); assert_eq_const_safe!(<$T>::midpoint(3, 1), 2); diff --git a/library/core/tests/num/uint_macros.rs b/library/core/tests/num/uint_macros.rs index ad8e48491e829..8732be9c57bcd 100644 --- a/library/core/tests/num/uint_macros.rs +++ b/library/core/tests/num/uint_macros.rs @@ -277,6 +277,21 @@ macro_rules! uint_module { assert_eq_const_safe!($T::MAX.borrowing_sub($T::MAX, true), ($T::MAX, true)); } + fn test_widening_mul() { + assert_eq_const_safe!($T::MAX.widening_mul($T::MAX), (1, $T::MAX - 1)); + } + + fn test_carrying_mul() { + assert_eq_const_safe!($T::MAX.carrying_mul($T::MAX, 0), (1, $T::MAX - 1)); + assert_eq_const_safe!($T::MAX.carrying_mul($T::MAX, $T::MAX), (0, $T::MAX)); + } + + fn test_carrying2_mul() { + assert_eq_const_safe!($T::MAX.carrying2_mul($T::MAX, 0, 0), (1, $T::MAX - 1)); + assert_eq_const_safe!($T::MAX.carrying2_mul($T::MAX, $T::MAX, 0), (0, $T::MAX)); + assert_eq_const_safe!($T::MAX.carrying2_mul($T::MAX, $T::MAX, $T::MAX), ($T::MAX, $T::MAX)); + } + fn test_midpoint() { assert_eq_const_safe!(<$T>::midpoint(1, 3), 2); assert_eq_const_safe!(<$T>::midpoint(3, 1), 2);