diff --git a/README.md b/README.md index 46983a28..06137f3c 100644 --- a/README.md +++ b/README.md @@ -222,7 +222,7 @@ of being added to Rust. - [x] addtf3.c - [x] comparetf2.c -- [ ] divtf3.c +- [x] divtf3.c - [x] extenddftf2.c - [x] extendhfsf2.c - [x] extendhftf2.c diff --git a/build.rs b/build.rs index 5ccff76e..3b2805f8 100644 --- a/build.rs +++ b/build.rs @@ -526,7 +526,6 @@ mod c { ("__floatsitf", "floatsitf.c"), ("__floatunditf", "floatunditf.c"), ("__floatunsitf", "floatunsitf.c"), - ("__divtf3", "divtf3.c"), ("__powitf2", "powitf2.c"), ("__fe_getround", "fp_mode.c"), ("__fe_raise_inexact", "fp_mode.c"), diff --git a/examples/intrinsics.rs b/examples/intrinsics.rs index 8bb70767..6dcd3820 100644 --- a/examples/intrinsics.rs +++ b/examples/intrinsics.rs @@ -256,6 +256,10 @@ mod intrinsics { a * b } + pub fn divtf(a: f128, b: f128) -> f128 { + a / b + } + pub fn subtf(a: f128, b: f128) -> f128 { a - b } @@ -440,6 +444,7 @@ fn run() { bb(aeabi_uldivmod(bb(2), bb(3))); bb(ashlti3(bb(2), bb(2))); bb(ashrti3(bb(2), bb(2))); + bb(divtf(bb(2.), bb(2.))); bb(divti3(bb(2), bb(2))); bb(eqtf(bb(2.), bb(2.))); bb(extendhfdf(bb(2.))); diff --git a/src/float/div.rs b/src/float/div.rs index 2a57ee1a..f085455f 100644 --- a/src/float/div.rs +++ b/src/float/div.rs @@ -1,518 +1,149 @@ -// The functions are complex with many branches, and explicit -// `return`s makes it clear where function exit points are -#![allow(clippy::needless_return)] - -use crate::float::Float; -use crate::int::{CastInto, DInt, HInt, Int, MinInt}; +//! Floating point division routines. +//! +//! This module documentation gives an overview of the method used. More documentation is inline. +//! +//! # Relevant notation +//! +//! - `m_a`: the mantissa of `a`, in base 2 +//! - `p_a`: the exponent of `a`, in base 2. I.e. `a = m_a * 2^p_a` +//! - `uqN` (e.g. `uq1`): this refers to Q notation for fixed-point numbers. UQ1.31 is an unsigned +//! fixed-point number with 1 integral bit, and 31 decimal bits. A `uqN` variable of type `uM` +//! will have N bits of integer and M-N bits of fraction. +//! - `hw`: half width, i.e. for `f64` this will be a `u32`. +//! - `x` is the best estimate of `1/m_b` +//! +//! # Method Overview +//! +//! Division routines must solve for `a / b`, which is `res = m_a*2^p_a / m_b*2^p_b`. The basic +//! process is as follows: +//! +//! - Rearange the exponent and significand to simplify the operations: +//! `res = (m_a / m_b) * 2^{p_a - p_b}`. +//! - Check for early exits (infinity, zero, etc). +//! - If `a` or `b` are subnormal, normalize by shifting the mantissa and adjusting the exponent. +//! - Set the implicit bit so math is correct. +//! - Shift mantissa significant digits (with implicit bit) fully left such that fixed-point UQ1 +//! or UQ0 numbers can be used for mantissa math. These will have greater precision than the +//! actual mantissa, which is important for correct rounding. +//! - Calculate the reciprocal of `m_b`, `x`. +//! - Use the reciprocal to multiply rather than divide: `res = m_a * x_b * 2^{p_a - p_b}`. +//! - Reapply rounding. +//! +//! # Reciprocal calculation +//! +//! Calculating the reciprocal is the most complicated part of this process. It uses the +//! [Newton-Raphson method], which picks an initial estimation (of the reciprocal) and performs +//! a number of iterations to increase its precision. +//! +//! In general, Newton's method takes the following form: +//! +//! ```text +//! `x_n` is a guess or the result of a previous iteration. Increasing `n` converges to the +//! desired result. +//! +//! The result approaches a zero of `f(x)` by applying a correction to the previous gues. +//! +//! x_{n+1} = x_n - f(x_n) / f'(x_n) +//! ``` +//! +//! Applying this to find the reciprocal: +//! +//! ```text +//! 1 / x = b +//! +//! Rearrange so we can solve by finding a zero +//! 0 = (1 / x) - b = f(x) +//! +//! f'(x) = -x^{-2} +//! +//! x_{n+1} = 2*x_n - b*x_n^2 +//! ``` +//! +//! This is a process that can be repeated to calculate the reciprocal with enough precision to +//! achieve a correctly rounded result for the overall division operation. The maximum required +//! number of iterations is known since precision doubles with each iteration. +//! +//! # Half-width operations +//! +//! Calculating the reciprocal requires widening multiplication and performing arithmetic on the +//! results, meaning that emulated integer arithmetic on `u128` (for `f64`) and `u256` (for `f128`) +//! gets used instead of native math. +//! +//! To make this more efficient, all but the final operation can be computed using half-width +//! integers. For example, rather than computing four iterations using 128-bit integers for `f64`, +//! we can instead perform three iterations using native 64-bit integers and only one final +//! iteration using the full 128 bits. +//! +//! This works because of precision doubling. Some leeway is allowed here because the fixed-point +//! number has more bits than the final mantissa will. +//! +//! [Newton-Raphson method]: https://en.wikipedia.org/wiki/Newton%27s_method use super::HalfRep; +use crate::float::Float; +use crate::int::{CastFrom, CastInto, DInt, HInt, Int, MinInt}; +use core::mem::size_of; +use core::ops; -fn div32(a: F, b: F) -> F -where - u32: CastInto, - F::Int: CastInto, - i32: CastInto, - F::Int: CastInto, - F::Int: HInt, - ::Int: core::ops::Mul, -{ - const NUMBER_OF_HALF_ITERATIONS: usize = 0; - const NUMBER_OF_FULL_ITERATIONS: usize = 3; - const USE_NATIVE_FULL_ITERATIONS: bool = true; - - let one = F::Int::ONE; - let zero = F::Int::ZERO; - let hw = F::BITS / 2; - let lo_mask = u32::MAX >> hw; - - let significand_bits = F::SIGNIFICAND_BITS; - let max_exponent = F::EXPONENT_MAX; - - let exponent_bias = F::EXPONENT_BIAS; - - let implicit_bit = F::IMPLICIT_BIT; - let significand_mask = F::SIGNIFICAND_MASK; - let sign_bit = F::SIGN_MASK as F::Int; - let abs_mask = sign_bit - one; - let exponent_mask = F::EXPONENT_MASK; - let inf_rep = exponent_mask; - let quiet_bit = implicit_bit >> 1; - let qnan_rep = exponent_mask | quiet_bit; - - #[inline(always)] - fn negate_u32(a: u32) -> u32 { - (::wrapping_neg(a as i32)) as u32 - } - - let a_rep = a.repr(); - let b_rep = b.repr(); - - let a_exponent = (a_rep >> significand_bits) & max_exponent.cast(); - let b_exponent = (b_rep >> significand_bits) & max_exponent.cast(); - let quotient_sign = (a_rep ^ b_rep) & sign_bit; - - let mut a_significand = a_rep & significand_mask; - let mut b_significand = b_rep & significand_mask; - let mut scale = 0; - - // Detect if a or b is zero, denormal, infinity, or NaN. - if a_exponent.wrapping_sub(one) >= (max_exponent - 1).cast() - || b_exponent.wrapping_sub(one) >= (max_exponent - 1).cast() - { - let a_abs = a_rep & abs_mask; - let b_abs = b_rep & abs_mask; - - // NaN / anything = qNaN - if a_abs > inf_rep { - return F::from_repr(a_rep | quiet_bit); - } - // anything / NaN = qNaN - if b_abs > inf_rep { - return F::from_repr(b_rep | quiet_bit); - } - - if a_abs == inf_rep { - if b_abs == inf_rep { - // infinity / infinity = NaN - return F::from_repr(qnan_rep); - } else { - // infinity / anything else = +/- infinity - return F::from_repr(a_abs | quotient_sign); - } - } - - // anything else / infinity = +/- 0 - if b_abs == inf_rep { - return F::from_repr(quotient_sign); - } - - if a_abs == zero { - if b_abs == zero { - // zero / zero = NaN - return F::from_repr(qnan_rep); - } else { - // zero / anything else = +/- zero - return F::from_repr(quotient_sign); - } - } - - // anything else / zero = +/- infinity - if b_abs == zero { - return F::from_repr(inf_rep | quotient_sign); - } - - // one or both of a or b is denormal, the other (if applicable) is a - // normal number. Renormalize one or both of a and b, and set scale to - // include the necessary exponent adjustment. - if a_abs < implicit_bit { - let (exponent, significand) = F::normalize(a_significand); - scale += exponent; - a_significand = significand; - } - - if b_abs < implicit_bit { - let (exponent, significand) = F::normalize(b_significand); - scale -= exponent; - b_significand = significand; - } - } - - // Set the implicit significand bit. If we fell through from the - // denormal path it was already set by normalize( ), but setting it twice - // won't hurt anything. - a_significand |= implicit_bit; - b_significand |= implicit_bit; - - let written_exponent: i32 = CastInto::::cast( - a_exponent - .wrapping_sub(b_exponent) - .wrapping_add(scale.cast()), - ) - .wrapping_add(exponent_bias) as i32; - let b_uq1 = b_significand << (F::BITS - significand_bits - 1); - - // Align the significand of b as a UQ1.(n-1) fixed-point number in the range - // [1.0, 2.0) and get a UQ0.n approximate reciprocal using a small minimax - // polynomial approximation: x0 = 3/4 + 1/sqrt(2) - b/2. - // The max error for this approximation is achieved at endpoints, so - // abs(x0(b) - 1/b) <= abs(x0(1) - 1/1) = 3/4 - 1/sqrt(2) = 0.04289..., - // which is about 4.5 bits. - // The initial approximation is between x0(1.0) = 0.9571... and x0(2.0) = 0.4571... - - // Then, refine the reciprocal estimate using a quadratically converging - // Newton-Raphson iteration: - // x_{n+1} = x_n * (2 - x_n * b) - // - // Let b be the original divisor considered "in infinite precision" and - // obtained from IEEE754 representation of function argument (with the - // implicit bit set). Corresponds to rep_t-sized b_UQ1 represented in - // UQ1.(W-1). - // - // Let b_hw be an infinitely precise number obtained from the highest (HW-1) - // bits of divisor significand (with the implicit bit set). Corresponds to - // half_rep_t-sized b_UQ1_hw represented in UQ1.(HW-1) that is a **truncated** - // version of b_UQ1. - // - // Let e_n := x_n - 1/b_hw - // E_n := x_n - 1/b - // abs(E_n) <= abs(e_n) + (1/b_hw - 1/b) - // = abs(e_n) + (b - b_hw) / (b*b_hw) - // <= abs(e_n) + 2 * 2^-HW - - // rep_t-sized iterations may be slower than the corresponding half-width - // variant depending on the handware and whether single/double/quad precision - // is selected. - // NB: Using half-width iterations increases computation errors due to - // rounding, so error estimations have to be computed taking the selected - // mode into account! - - #[allow(clippy::absurd_extreme_comparisons)] - let mut x_uq0 = if NUMBER_OF_HALF_ITERATIONS > 0 { - // Starting with (n-1) half-width iterations - let b_uq1_hw: u16 = - (CastInto::::cast(b_significand) >> (significand_bits + 1 - hw)) as u16; - - // C is (3/4 + 1/sqrt(2)) - 1 truncated to W0 fractional bits as UQ0.HW - // with W0 being either 16 or 32 and W0 <= HW. - // That is, C is the aforementioned 3/4 + 1/sqrt(2) constant (from which - // b/2 is subtracted to obtain x0) wrapped to [0, 1) range. - - // HW is at least 32. Shifting into the highest bits if needed. - let c_hw = (0x7504_u32 as u16).wrapping_shl(hw.wrapping_sub(32)); - - // b >= 1, thus an upper bound for 3/4 + 1/sqrt(2) - b/2 is about 0.9572, - // so x0 fits to UQ0.HW without wrapping. - let x_uq0_hw: u16 = { - let mut x_uq0_hw: u16 = c_hw.wrapping_sub(b_uq1_hw /* exact b_hw/2 as UQ0.HW */); - // An e_0 error is comprised of errors due to - // * x0 being an inherently imprecise first approximation of 1/b_hw - // * C_hw being some (irrational) number **truncated** to W0 bits - // Please note that e_0 is calculated against the infinitely precise - // reciprocal of b_hw (that is, **truncated** version of b). - // - // e_0 <= 3/4 - 1/sqrt(2) + 2^-W0 - - // By construction, 1 <= b < 2 - // f(x) = x * (2 - b*x) = 2*x - b*x^2 - // f'(x) = 2 * (1 - b*x) - // - // On the [0, 1] interval, f(0) = 0, - // then it increses until f(1/b) = 1 / b, maximum on (0, 1), - // then it decreses to f(1) = 2 - b - // - // Let g(x) = x - f(x) = b*x^2 - x. - // On (0, 1/b), g(x) < 0 <=> f(x) > x - // On (1/b, 1], g(x) > 0 <=> f(x) < x - // - // For half-width iterations, b_hw is used instead of b. - #[allow(clippy::reversed_empty_ranges)] - for _ in 0..NUMBER_OF_HALF_ITERATIONS { - // corr_UQ1_hw can be **larger** than 2 - b_hw*x by at most 1*Ulp - // of corr_UQ1_hw. - // "0.0 - (...)" is equivalent to "2.0 - (...)" in UQ1.(HW-1). - // On the other hand, corr_UQ1_hw should not overflow from 2.0 to 0.0 provided - // no overflow occurred earlier: ((rep_t)x_UQ0_hw * b_UQ1_hw >> HW) is - // expected to be strictly positive because b_UQ1_hw has its highest bit set - // and x_UQ0_hw should be rather large (it converges to 1/2 < 1/b_hw <= 1). - let corr_uq1_hw: u16 = - 0.wrapping_sub((x_uq0_hw as u32).wrapping_mul(b_uq1_hw.cast()) >> hw) as u16; - - // Now, we should multiply UQ0.HW and UQ1.(HW-1) numbers, naturally - // obtaining an UQ1.(HW-1) number and proving its highest bit could be - // considered to be 0 to be able to represent it in UQ0.HW. - // From the above analysis of f(x), if corr_UQ1_hw would be represented - // without any intermediate loss of precision (that is, in twice_rep_t) - // x_UQ0_hw could be at most [1.]000... if b_hw is exactly 1.0 and strictly - // less otherwise. On the other hand, to obtain [1.]000..., one have to pass - // 1/b_hw == 1.0 to f(x), so this cannot occur at all without overflow (due - // to 1.0 being not representable as UQ0.HW). - // The fact corr_UQ1_hw was virtually round up (due to result of - // multiplication being **first** truncated, then negated - to improve - // error estimations) can increase x_UQ0_hw by up to 2*Ulp of x_UQ0_hw. - x_uq0_hw = ((x_uq0_hw as u32).wrapping_mul(corr_uq1_hw as u32) >> (hw - 1)) as u16; - // Now, either no overflow occurred or x_UQ0_hw is 0 or 1 in its half_rep_t - // representation. In the latter case, x_UQ0_hw will be either 0 or 1 after - // any number of iterations, so just subtract 2 from the reciprocal - // approximation after last iteration. - - // In infinite precision, with 0 <= eps1, eps2 <= U = 2^-HW: - // corr_UQ1_hw = 2 - (1/b_hw + e_n) * b_hw + 2*eps1 - // = 1 - e_n * b_hw + 2*eps1 - // x_UQ0_hw = (1/b_hw + e_n) * (1 - e_n*b_hw + 2*eps1) - eps2 - // = 1/b_hw - e_n + 2*eps1/b_hw + e_n - e_n^2*b_hw + 2*e_n*eps1 - eps2 - // = 1/b_hw + 2*eps1/b_hw - e_n^2*b_hw + 2*e_n*eps1 - eps2 - // e_{n+1} = -e_n^2*b_hw + 2*eps1/b_hw + 2*e_n*eps1 - eps2 - // = 2*e_n*eps1 - (e_n^2*b_hw + eps2) + 2*eps1/b_hw - // \------ >0 -------/ \-- >0 ---/ - // abs(e_{n+1}) <= 2*abs(e_n)*U + max(2*e_n^2 + U, 2 * U) - } - // For initial half-width iterations, U = 2^-HW - // Let abs(e_n) <= u_n * U, - // then abs(e_{n+1}) <= 2 * u_n * U^2 + max(2 * u_n^2 * U^2 + U, 2 * U) - // u_{n+1} <= 2 * u_n * U + max(2 * u_n^2 * U + 1, 2) - - // Account for possible overflow (see above). For an overflow to occur for the - // first time, for "ideal" corr_UQ1_hw (that is, without intermediate - // truncation), the result of x_UQ0_hw * corr_UQ1_hw should be either maximum - // value representable in UQ0.HW or less by 1. This means that 1/b_hw have to - // be not below that value (see g(x) above), so it is safe to decrement just - // once after the final iteration. On the other hand, an effective value of - // divisor changes after this point (from b_hw to b), so adjust here. - x_uq0_hw.wrapping_sub(1_u16) - }; - - // Error estimations for full-precision iterations are calculated just - // as above, but with U := 2^-W and taking extra decrementing into account. - // We need at least one such iteration. - - // Simulating operations on a twice_rep_t to perform a single final full-width - // iteration. Using ad-hoc multiplication implementations to take advantage - // of particular structure of operands. - - let blo: u32 = (CastInto::::cast(b_uq1)) & lo_mask; - // x_UQ0 = x_UQ0_hw * 2^HW - 1 - // x_UQ0 * b_UQ1 = (x_UQ0_hw * 2^HW) * (b_UQ1_hw * 2^HW + blo) - b_UQ1 - // - // <--- higher half ---><--- lower half ---> - // [x_UQ0_hw * b_UQ1_hw] - // + [ x_UQ0_hw * blo ] - // - [ b_UQ1 ] - // = [ result ][.... discarded ...] - let corr_uq1 = negate_u32( - (x_uq0_hw as u32) * (b_uq1_hw as u32) + (((x_uq0_hw as u32) * (blo)) >> hw) - 1, - ); // account for *possible* carry - let lo_corr = corr_uq1 & lo_mask; - let hi_corr = corr_uq1 >> hw; - // x_UQ0 * corr_UQ1 = (x_UQ0_hw * 2^HW) * (hi_corr * 2^HW + lo_corr) - corr_UQ1 - let mut x_uq0: ::Int = ((((x_uq0_hw as u32) * hi_corr) << 1) - .wrapping_add(((x_uq0_hw as u32) * lo_corr) >> (hw - 1)) - .wrapping_sub(2)) - .cast(); // 1 to account for the highest bit of corr_UQ1 can be 1 - // 1 to account for possible carry - // Just like the case of half-width iterations but with possibility - // of overflowing by one extra Ulp of x_UQ0. - x_uq0 -= one; - // ... and then traditional fixup by 2 should work - - // On error estimation: - // abs(E_{N-1}) <= (u_{N-1} + 2 /* due to conversion e_n -> E_n */) * 2^-HW - // + (2^-HW + 2^-W)) - // abs(E_{N-1}) <= (u_{N-1} + 3.01) * 2^-HW - - // Then like for the half-width iterations: - // With 0 <= eps1, eps2 < 2^-W - // E_N = 4 * E_{N-1} * eps1 - (E_{N-1}^2 * b + 4 * eps2) + 4 * eps1 / b - // abs(E_N) <= 2^-W * [ 4 * abs(E_{N-1}) + max(2 * abs(E_{N-1})^2 * 2^W + 4, 8)) ] - // abs(E_N) <= 2^-W * [ 4 * (u_{N-1} + 3.01) * 2^-HW + max(4 + 2 * (u_{N-1} + 3.01)^2, 8) ] - x_uq0 - } else { - // C is (3/4 + 1/sqrt(2)) - 1 truncated to 32 fractional bits as UQ0.n - let c: ::Int = (0x7504F333 << (F::BITS - 32)).cast(); - let x_uq0: ::Int = c.wrapping_sub(b_uq1); - // E_0 <= 3/4 - 1/sqrt(2) + 2 * 2^-32 - x_uq0 - }; - - let mut x_uq0 = if USE_NATIVE_FULL_ITERATIONS { - for _ in 0..NUMBER_OF_FULL_ITERATIONS { - let corr_uq1: u32 = 0.wrapping_sub( - ((CastInto::::cast(x_uq0) as u64) * (CastInto::::cast(b_uq1) as u64)) - >> F::BITS, - ) as u32; - x_uq0 = ((((CastInto::::cast(x_uq0) as u64) * (corr_uq1 as u64)) >> (F::BITS - 1)) - as u32) - .cast(); - } - x_uq0 - } else { - // not using native full iterations - x_uq0 - }; - - // Finally, account for possible overflow, as explained above. - x_uq0 = x_uq0.wrapping_sub(2.cast()); - - // u_n for different precisions (with N-1 half-width iterations): - // W0 is the precision of C - // u_0 = (3/4 - 1/sqrt(2) + 2^-W0) * 2^HW - - // Estimated with bc: - // define half1(un) { return 2.0 * (un + un^2) / 2.0^hw + 1.0; } - // define half2(un) { return 2.0 * un / 2.0^hw + 2.0; } - // define full1(un) { return 4.0 * (un + 3.01) / 2.0^hw + 2.0 * (un + 3.01)^2 + 4.0; } - // define full2(un) { return 4.0 * (un + 3.01) / 2.0^hw + 8.0; } - - // | f32 (0 + 3) | f32 (2 + 1) | f64 (3 + 1) | f128 (4 + 1) - // u_0 | < 184224974 | < 2812.1 | < 184224974 | < 791240234244348797 - // u_1 | < 15804007 | < 242.7 | < 15804007 | < 67877681371350440 - // u_2 | < 116308 | < 2.81 | < 116308 | < 499533100252317 - // u_3 | < 7.31 | | < 7.31 | < 27054456580 - // u_4 | | | | < 80.4 - // Final (U_N) | same as u_3 | < 72 | < 218 | < 13920 - - // Add 2 to U_N due to final decrement. - - let reciprocal_precision: ::Int = 10.cast(); - - // Suppose 1/b - P * 2^-W < x < 1/b + P * 2^-W - let x_uq0 = x_uq0 - reciprocal_precision; - // Now 1/b - (2*P) * 2^-W < x < 1/b - // FIXME Is x_UQ0 still >= 0.5? - - let mut quotient: ::Int = x_uq0.widen_mul(a_significand << 1).hi(); - // Now, a/b - 4*P * 2^-W < q < a/b for q= in UQ1.(SB+1+W). - - // quotient_UQ1 is in [0.5, 2.0) as UQ1.(SB+1), - // adjust it to be in [1.0, 2.0) as UQ1.SB. - let (mut residual, written_exponent) = if quotient < (implicit_bit << 1) { - // Highest bit is 0, so just reinterpret quotient_UQ1 as UQ1.SB, - // effectively doubling its value as well as its error estimation. - let residual_lo = (a_significand << (significand_bits + 1)).wrapping_sub( - (CastInto::::cast(quotient).wrapping_mul(CastInto::::cast(b_significand))) - .cast(), - ); - a_significand <<= 1; - (residual_lo, written_exponent.wrapping_sub(1)) - } else { - // Highest bit is 1 (the UQ1.(SB+1) value is in [1, 2)), convert it - // to UQ1.SB by right shifting by 1. Least significant bit is omitted. - quotient >>= 1; - let residual_lo = (a_significand << significand_bits).wrapping_sub( - (CastInto::::cast(quotient).wrapping_mul(CastInto::::cast(b_significand))) - .cast(), - ); - (residual_lo, written_exponent) - }; - - //drop mutability - let quotient = quotient; - - // NB: residualLo is calculated above for the normal result case. - // It is re-computed on denormal path that is expected to be not so - // performance-sensitive. - - // Now, q cannot be greater than a/b and can differ by at most 8*P * 2^-W + 2^-SB - // Each NextAfter() increments the floating point value by at least 2^-SB - // (more, if exponent was incremented). - // Different cases (<---> is of 2^-SB length, * = a/b that is shown as a midpoint): - // q - // | | * | | | | | - // <---> 2^t - // | | | | | * | | - // q - // To require at most one NextAfter(), an error should be less than 1.5 * 2^-SB. - // (8*P) * 2^-W + 2^-SB < 1.5 * 2^-SB - // (8*P) * 2^-W < 0.5 * 2^-SB - // P < 2^(W-4-SB) - // Generally, for at most R NextAfter() to be enough, - // P < (2*R - 1) * 2^(W-4-SB) - // For f32 (0+3): 10 < 32 (OK) - // For f32 (2+1): 32 < 74 < 32 * 3, so two NextAfter() are required - // For f64: 220 < 256 (OK) - // For f128: 4096 * 3 < 13922 < 4096 * 5 (three NextAfter() are required) - - // If we have overflowed the exponent, return infinity - if written_exponent >= max_exponent as i32 { - return F::from_repr(inf_rep | quotient_sign); - } - - // Now, quotient <= the correctly-rounded result - // and may need taking NextAfter() up to 3 times (see error estimates above) - // r = a - b * q - let abs_result = if written_exponent > 0 { - let mut ret = quotient & significand_mask; - ret |= ((written_exponent as u32) << significand_bits).cast(); - residual <<= 1; - ret - } else { - if (significand_bits as i32 + written_exponent) < 0 { - return F::from_repr(quotient_sign); - } - let ret = quotient.wrapping_shr(negate_u32(CastInto::::cast(written_exponent)) + 1); - residual = (CastInto::::cast( - a_significand.wrapping_shl( - significand_bits.wrapping_add(CastInto::::cast(written_exponent)), - ), - ) - .wrapping_sub( - (CastInto::::cast(ret).wrapping_mul(CastInto::::cast(b_significand))) << 1, - )) - .cast(); - ret - }; - // Round - let abs_result = { - residual += abs_result & one; // tie to even - // The above line conditionally turns the below LT comparison into LTE - - if residual > b_significand { - abs_result + one - } else { - abs_result - } - }; - F::from_repr(abs_result | quotient_sign) -} - -fn div64(a: F, b: F) -> F +fn div(a: F, b: F) -> F where - F::Int: CastInto, F::Int: CastInto, - F::Int: CastInto>, F::Int: From>, F::Int: From, - F::Int: CastInto, - F::Int: CastInto, F::Int: HInt + DInt, + ::D: ops::Shr::D>, + F::Int: From, u16: CastInto, i32: CastInto, - i64: CastInto, u32: CastInto, - u64: CastInto, - u64: CastInto>, + u128: CastInto>, { - const NUMBER_OF_HALF_ITERATIONS: usize = 3; - const NUMBER_OF_FULL_ITERATIONS: usize = 1; - const USE_NATIVE_FULL_ITERATIONS: bool = false; - let one = F::Int::ONE; let zero = F::Int::ZERO; + let one_hw = HalfRep::::ONE; + let zero_hw = HalfRep::::ZERO; let hw = F::BITS / 2; let lo_mask = F::Int::MAX >> hw; let significand_bits = F::SIGNIFICAND_BITS; - let max_exponent = F::EXPONENT_MAX; + // Saturated exponent, representing infinity + let exponent_sat: F::Int = F::EXPONENT_MAX.cast(); let exponent_bias = F::EXPONENT_BIAS; - let implicit_bit = F::IMPLICIT_BIT; let significand_mask = F::SIGNIFICAND_MASK; - let sign_bit = F::SIGN_MASK as F::Int; + let sign_bit = F::SIGN_MASK; let abs_mask = sign_bit - one; let exponent_mask = F::EXPONENT_MASK; let inf_rep = exponent_mask; let quiet_bit = implicit_bit >> 1; let qnan_rep = exponent_mask | quiet_bit; + let (mut half_iterations, full_iterations) = get_iterations::(); + let recip_precision = reciprocal_precision::(); - #[inline(always)] - fn negate_u64(a: u64) -> u64 { - (::wrapping_neg(a as i64)) as u64 + if F::BITS == 128 { + // FIXME(tgross35): f128 seems to require one more half iteration than expected + half_iterations += 1; } let a_rep = a.repr(); let b_rep = b.repr(); - let a_exponent = (a_rep >> significand_bits) & max_exponent.cast(); - let b_exponent = (b_rep >> significand_bits) & max_exponent.cast(); + // Exponent numeric representationm not accounting for bias + let a_exponent = (a_rep >> significand_bits) & exponent_sat; + let b_exponent = (b_rep >> significand_bits) & exponent_sat; let quotient_sign = (a_rep ^ b_rep) & sign_bit; let mut a_significand = a_rep & significand_mask; let mut b_significand = b_rep & significand_mask; - let mut scale = 0; + + // The exponent of our final result in its encoded form + let mut res_exponent: i32 = + i32::cast_from(a_exponent) - i32::cast_from(b_exponent) + (exponent_bias as i32); // Detect if a or b is zero, denormal, infinity, or NaN. - if a_exponent.wrapping_sub(one) >= (max_exponent - 1).cast() - || b_exponent.wrapping_sub(one) >= (max_exponent - 1).cast() + if a_exponent.wrapping_sub(one) >= (exponent_sat - one) + || b_exponent.wrapping_sub(one) >= (exponent_sat - one) { let a_abs = a_rep & abs_mask; let b_abs = b_rep & abs_mask; @@ -521,6 +152,7 @@ where if a_abs > inf_rep { return F::from_repr(a_rep | quiet_bit); } + // anything / NaN = qNaN if b_abs > inf_rep { return F::from_repr(b_rep | quiet_bit); @@ -556,34 +188,31 @@ where return F::from_repr(inf_rep | quotient_sign); } - // one or both of a or b is denormal, the other (if applicable) is a - // normal number. Renormalize one or both of a and b, and set scale to - // include the necessary exponent adjustment. + // a is denormal. Renormalize it and set the scale to include the necessary exponent + // adjustment. if a_abs < implicit_bit { let (exponent, significand) = F::normalize(a_significand); - scale += exponent; + res_exponent += exponent; a_significand = significand; } + // b is denormal. Renormalize it and set the scale to include the necessary exponent + // adjustment. if b_abs < implicit_bit { let (exponent, significand) = F::normalize(b_significand); - scale -= exponent; + res_exponent -= exponent; b_significand = significand; } } - // Set the implicit significand bit. If we fell through from the + // Set the implicit significand bit. If we fell through from the // denormal path it was already set by normalize( ), but setting it twice // won't hurt anything. a_significand |= implicit_bit; b_significand |= implicit_bit; - let written_exponent: i64 = CastInto::::cast( - a_exponent - .wrapping_sub(b_exponent) - .wrapping_add(scale.cast()), - ) - .wrapping_add(exponent_bias as u64) as i64; + // Transform to a fixed-point representation by shifting the significand to the high bits. We + // know this is in the range [1.0, 2.0] since the implicit bit is set to 1 above. let b_uq1 = b_significand << (F::BITS - significand_bits - 1); // Align the significand of b as a UQ1.(n-1) fixed-point number in the range @@ -593,7 +222,7 @@ where // abs(x0(b) - 1/b) <= abs(x0(1) - 1/1) = 3/4 - 1/sqrt(2) = 0.04289..., // which is about 4.5 bits. // The initial approximation is between x0(1.0) = 0.9571... and x0(2.0) = 0.4571... - + // // Then, refine the reciprocal estimate using a quadratically converging // Newton-Raphson iteration: // x_{n+1} = x_n * (2 - x_n * b) @@ -613,123 +242,116 @@ where // abs(E_n) <= abs(e_n) + (1/b_hw - 1/b) // = abs(e_n) + (b - b_hw) / (b*b_hw) // <= abs(e_n) + 2 * 2^-HW - + // // rep_t-sized iterations may be slower than the corresponding half-width // variant depending on the handware and whether single/double/quad precision // is selected. + // // NB: Using half-width iterations increases computation errors due to // rounding, so error estimations have to be computed taking the selected // mode into account! - - let mut x_uq0 = if NUMBER_OF_HALF_ITERATIONS > 0 { + let mut x_uq0 = if half_iterations > 0 { // Starting with (n-1) half-width iterations - let b_uq1_hw: HalfRep = CastInto::>::cast( - CastInto::::cast(b_significand) >> (significand_bits + 1 - hw), - ); + let b_uq1_hw: HalfRep = b_uq1.hi(); // C is (3/4 + 1/sqrt(2)) - 1 truncated to W0 fractional bits as UQ0.HW // with W0 being either 16 or 32 and W0 <= HW. // That is, C is the aforementioned 3/4 + 1/sqrt(2) constant (from which // b/2 is subtracted to obtain x0) wrapped to [0, 1) range. + let c_hw = c_hw::(); - // HW is at least 32. Shifting into the highest bits if needed. - let c_hw = (CastInto::>::cast(0x7504F333_u64)).wrapping_shl(hw.wrapping_sub(32)); + // Check that the top bit is set, i.e. value is within `[1, 2)`. + debug_assert!(b_uq1_hw & one_hw << (HalfRep::::BITS - 1) > zero_hw); // b >= 1, thus an upper bound for 3/4 + 1/sqrt(2) - b/2 is about 0.9572, // so x0 fits to UQ0.HW without wrapping. - let x_uq0_hw: HalfRep = { - let mut x_uq0_hw: HalfRep = - c_hw.wrapping_sub(b_uq1_hw /* exact b_hw/2 as UQ0.HW */); - // dbg!(x_uq0_hw); - // An e_0 error is comprised of errors due to - // * x0 being an inherently imprecise first approximation of 1/b_hw - // * C_hw being some (irrational) number **truncated** to W0 bits - // Please note that e_0 is calculated against the infinitely precise - // reciprocal of b_hw (that is, **truncated** version of b). - // - // e_0 <= 3/4 - 1/sqrt(2) + 2^-W0 - - // By construction, 1 <= b < 2 - // f(x) = x * (2 - b*x) = 2*x - b*x^2 - // f'(x) = 2 * (1 - b*x) + let mut x_uq0_hw: HalfRep = + c_hw.wrapping_sub(b_uq1_hw /* exact b_hw/2 as UQ0.HW */); + + // An e_0 error is comprised of errors due to + // * x0 being an inherently imprecise first approximation of 1/b_hw + // * C_hw being some (irrational) number **truncated** to W0 bits + // Please note that e_0 is calculated against the infinitely precise + // reciprocal of b_hw (that is, **truncated** version of b). + // + // e_0 <= 3/4 - 1/sqrt(2) + 2^-W0 + // + // By construction, 1 <= b < 2 + // f(x) = x * (2 - b*x) = 2*x - b*x^2 + // f'(x) = 2 * (1 - b*x) + // + // On the [0, 1] interval, f(0) = 0, + // then it increses until f(1/b) = 1 / b, maximum on (0, 1), + // then it decreses to f(1) = 2 - b + // + // Let g(x) = x - f(x) = b*x^2 - x. + // On (0, 1/b), g(x) < 0 <=> f(x) > x + // On (1/b, 1], g(x) > 0 <=> f(x) < x + // + // For half-width iterations, b_hw is used instead of b. + for _ in 0..half_iterations { + // corr_UQ1_hw can be **larger** than 2 - b_hw*x by at most 1*Ulp + // of corr_UQ1_hw. + // "0.0 - (...)" is equivalent to "2.0 - (...)" in UQ1.(HW-1). + // On the other hand, corr_UQ1_hw should not overflow from 2.0 to 0.0 provided + // no overflow occurred earlier: ((rep_t)x_UQ0_hw * b_UQ1_hw >> HW) is + // expected to be strictly positive because b_UQ1_hw has its highest bit set + // and x_UQ0_hw should be rather large (it converges to 1/2 < 1/b_hw <= 1). // - // On the [0, 1] interval, f(0) = 0, - // then it increses until f(1/b) = 1 / b, maximum on (0, 1), - // then it decreses to f(1) = 2 - b + // Now, we should multiply UQ0.HW and UQ1.(HW-1) numbers, naturally + // obtaining an UQ1.(HW-1) number and proving its highest bit could be + // considered to be 0 to be able to represent it in UQ0.HW. + // From the above analysis of f(x), if corr_UQ1_hw would be represented + // without any intermediate loss of precision (that is, in twice_rep_t) + // x_UQ0_hw could be at most [1.]000... if b_hw is exactly 1.0 and strictly + // less otherwise. On the other hand, to obtain [1.]000..., one have to pass + // 1/b_hw == 1.0 to f(x), so this cannot occur at all without overflow (due + // to 1.0 being not representable as UQ0.HW). + // The fact corr_UQ1_hw was virtually round up (due to result of + // multiplication being **first** truncated, then negated - to improve + // error estimations) can increase x_UQ0_hw by up to 2*Ulp of x_UQ0_hw. // - // Let g(x) = x - f(x) = b*x^2 - x. - // On (0, 1/b), g(x) < 0 <=> f(x) > x - // On (1/b, 1], g(x) > 0 <=> f(x) < x + // Now, either no overflow occurred or x_UQ0_hw is 0 or 1 in its half_rep_t + // representation. In the latter case, x_UQ0_hw will be either 0 or 1 after + // any number of iterations, so just subtract 2 from the reciprocal + // approximation after last iteration. // - // For half-width iterations, b_hw is used instead of b. - for _ in 0..NUMBER_OF_HALF_ITERATIONS { - // corr_UQ1_hw can be **larger** than 2 - b_hw*x by at most 1*Ulp - // of corr_UQ1_hw. - // "0.0 - (...)" is equivalent to "2.0 - (...)" in UQ1.(HW-1). - // On the other hand, corr_UQ1_hw should not overflow from 2.0 to 0.0 provided - // no overflow occurred earlier: ((rep_t)x_UQ0_hw * b_UQ1_hw >> HW) is - // expected to be strictly positive because b_UQ1_hw has its highest bit set - // and x_UQ0_hw should be rather large (it converges to 1/2 < 1/b_hw <= 1). - let corr_uq1_hw: HalfRep = CastInto::>::cast(zero.wrapping_sub( - ((F::Int::from(x_uq0_hw)).wrapping_mul(F::Int::from(b_uq1_hw))) >> hw, - )); - // dbg!(corr_uq1_hw); - - // Now, we should multiply UQ0.HW and UQ1.(HW-1) numbers, naturally - // obtaining an UQ1.(HW-1) number and proving its highest bit could be - // considered to be 0 to be able to represent it in UQ0.HW. - // From the above analysis of f(x), if corr_UQ1_hw would be represented - // without any intermediate loss of precision (that is, in twice_rep_t) - // x_UQ0_hw could be at most [1.]000... if b_hw is exactly 1.0 and strictly - // less otherwise. On the other hand, to obtain [1.]000..., one have to pass - // 1/b_hw == 1.0 to f(x), so this cannot occur at all without overflow (due - // to 1.0 being not representable as UQ0.HW). - // The fact corr_UQ1_hw was virtually round up (due to result of - // multiplication being **first** truncated, then negated - to improve - // error estimations) can increase x_UQ0_hw by up to 2*Ulp of x_UQ0_hw. - x_uq0_hw = ((F::Int::from(x_uq0_hw)).wrapping_mul(F::Int::from(corr_uq1_hw)) - >> (hw - 1)) - .cast(); - // dbg!(x_uq0_hw); - // Now, either no overflow occurred or x_UQ0_hw is 0 or 1 in its half_rep_t - // representation. In the latter case, x_UQ0_hw will be either 0 or 1 after - // any number of iterations, so just subtract 2 from the reciprocal - // approximation after last iteration. - - // In infinite precision, with 0 <= eps1, eps2 <= U = 2^-HW: - // corr_UQ1_hw = 2 - (1/b_hw + e_n) * b_hw + 2*eps1 - // = 1 - e_n * b_hw + 2*eps1 - // x_UQ0_hw = (1/b_hw + e_n) * (1 - e_n*b_hw + 2*eps1) - eps2 - // = 1/b_hw - e_n + 2*eps1/b_hw + e_n - e_n^2*b_hw + 2*e_n*eps1 - eps2 - // = 1/b_hw + 2*eps1/b_hw - e_n^2*b_hw + 2*e_n*eps1 - eps2 - // e_{n+1} = -e_n^2*b_hw + 2*eps1/b_hw + 2*e_n*eps1 - eps2 - // = 2*e_n*eps1 - (e_n^2*b_hw + eps2) + 2*eps1/b_hw - // \------ >0 -------/ \-- >0 ---/ - // abs(e_{n+1}) <= 2*abs(e_n)*U + max(2*e_n^2 + U, 2 * U) - } - // For initial half-width iterations, U = 2^-HW - // Let abs(e_n) <= u_n * U, - // then abs(e_{n+1}) <= 2 * u_n * U^2 + max(2 * u_n^2 * U^2 + U, 2 * U) - // u_{n+1} <= 2 * u_n * U + max(2 * u_n^2 * U + 1, 2) - - // Account for possible overflow (see above). For an overflow to occur for the - // first time, for "ideal" corr_UQ1_hw (that is, without intermediate - // truncation), the result of x_UQ0_hw * corr_UQ1_hw should be either maximum - // value representable in UQ0.HW or less by 1. This means that 1/b_hw have to - // be not below that value (see g(x) above), so it is safe to decrement just - // once after the final iteration. On the other hand, an effective value of - // divisor changes after this point (from b_hw to b), so adjust here. - x_uq0_hw.wrapping_sub(HalfRep::::ONE) - }; + // In infinite precision, with 0 <= eps1, eps2 <= U = 2^-HW: + // corr_UQ1_hw = 2 - (1/b_hw + e_n) * b_hw + 2*eps1 + // = 1 - e_n * b_hw + 2*eps1 + // x_UQ0_hw = (1/b_hw + e_n) * (1 - e_n*b_hw + 2*eps1) - eps2 + // = 1/b_hw - e_n + 2*eps1/b_hw + e_n - e_n^2*b_hw + 2*e_n*eps1 - eps2 + // = 1/b_hw + 2*eps1/b_hw - e_n^2*b_hw + 2*e_n*eps1 - eps2 + // e_{n+1} = -e_n^2*b_hw + 2*eps1/b_hw + 2*e_n*eps1 - eps2 + // = 2*e_n*eps1 - (e_n^2*b_hw + eps2) + 2*eps1/b_hw + // \------ >0 -------/ \-- >0 ---/ + // abs(e_{n+1}) <= 2*abs(e_n)*U + max(2*e_n^2 + U, 2 * U) + x_uq0_hw = next_guess(x_uq0_hw, b_uq1_hw); + } + + // For initial half-width iterations, U = 2^-HW + // Let abs(e_n) <= u_n * U, + // then abs(e_{n+1}) <= 2 * u_n * U^2 + max(2 * u_n^2 * U^2 + U, 2 * U) + // u_{n+1} <= 2 * u_n * U + max(2 * u_n^2 * U + 1, 2) + // + // Account for possible overflow (see above). For an overflow to occur for the + // first time, for "ideal" corr_UQ1_hw (that is, without intermediate + // truncation), the result of x_UQ0_hw * corr_UQ1_hw should be either maximum + // value representable in UQ0.HW or less by 1. This means that 1/b_hw have to + // be not below that value (see g(x) above), so it is safe to decrement just + // once after the final iteration. On the other hand, an effective value of + // divisor changes after this point (from b_hw to b), so adjust here. + x_uq0_hw = x_uq0_hw.wrapping_sub(one_hw); // Error estimations for full-precision iterations are calculated just // as above, but with U := 2^-W and taking extra decrementing into account. // We need at least one such iteration. - + // // Simulating operations on a twice_rep_t to perform a single final full-width // iteration. Using ad-hoc multiplication implementations to take advantage // of particular structure of operands. let blo: F::Int = b_uq1 & lo_mask; + // x_UQ0 = x_UQ0_hw * 2^HW - 1 // x_UQ0 * b_UQ1 = (x_UQ0_hw * 2^HW) * (b_UQ1_hw * 2^HW + blo) - b_UQ1 // @@ -742,16 +364,19 @@ where + ((F::Int::from(x_uq0_hw) * blo) >> hw)) .wrapping_sub(one) .wrapping_neg(); // account for *possible* carry + let lo_corr: F::Int = corr_uq1 & lo_mask; let hi_corr: F::Int = corr_uq1 >> hw; + // x_UQ0 * corr_UQ1 = (x_UQ0_hw * 2^HW) * (hi_corr * 2^HW + lo_corr) - corr_UQ1 let mut x_uq0: F::Int = ((F::Int::from(x_uq0_hw) * hi_corr) << 1) .wrapping_add((F::Int::from(x_uq0_hw) * lo_corr) >> (hw - 1)) + // 1 to account for the highest bit of corr_UQ1 can be 1 + // 1 to account for possible carry + // Just like the case of half-width iterations but with possibility + // of overflowing by one extra Ulp of x_UQ0. .wrapping_sub(F::Int::from(2u8)); - // 1 to account for the highest bit of corr_UQ1 can be 1 - // 1 to account for possible carry - // Just like the case of half-width iterations but with possibility - // of overflowing by one extra Ulp of x_UQ0. + x_uq0 -= one; // ... and then traditional fixup by 2 should work @@ -759,7 +384,7 @@ where // abs(E_{N-1}) <= (u_{N-1} + 2 /* due to conversion e_n -> E_n */) * 2^-HW // + (2^-HW + 2^-W)) // abs(E_{N-1}) <= (u_{N-1} + 3.01) * 2^-HW - + // // Then like for the half-width iterations: // With 0 <= eps1, eps2 < 2^-W // E_N = 4 * E_{N-1} * eps1 - (E_{N-1}^2 * b + 4 * eps2) + 4 * eps1 / b @@ -768,89 +393,54 @@ where x_uq0 } else { // C is (3/4 + 1/sqrt(2)) - 1 truncated to 64 fractional bits as UQ0.n - let c: F::Int = (0x7504F333 << (F::BITS - 32)).cast(); - let x_uq0: F::Int = c.wrapping_sub(b_uq1); - // E_0 <= 3/4 - 1/sqrt(2) + 2 * 2^-64 - x_uq0 - }; + let c: F::Int = F::Int::from(0x7504F333u32) << (F::BITS - 32); + let mut x_uq0: F::Int = c.wrapping_sub(b_uq1); - let mut x_uq0 = if USE_NATIVE_FULL_ITERATIONS { - for _ in 0..NUMBER_OF_FULL_ITERATIONS { - let corr_uq1: u64 = 0.wrapping_sub( - (CastInto::::cast(x_uq0) * (CastInto::::cast(b_uq1))) >> F::BITS, - ); - x_uq0 = ((((CastInto::::cast(x_uq0) as u128) * (corr_uq1 as u128)) - >> (F::BITS - 1)) as u64) - .cast(); + // E_0 <= 3/4 - 1/sqrt(2) + 2 * 2^-64 + // x_uq0 + for _ in 0..full_iterations { + x_uq0 = next_guess(x_uq0, b_uq1); } - x_uq0 - } else { - // not using native full iterations + x_uq0 }; // Finally, account for possible overflow, as explained above. x_uq0 = x_uq0.wrapping_sub(2.cast()); - // u_n for different precisions (with N-1 half-width iterations): - // W0 is the precision of C - // u_0 = (3/4 - 1/sqrt(2) + 2^-W0) * 2^HW - - // Estimated with bc: - // define half1(un) { return 2.0 * (un + un^2) / 2.0^hw + 1.0; } - // define half2(un) { return 2.0 * un / 2.0^hw + 2.0; } - // define full1(un) { return 4.0 * (un + 3.01) / 2.0^hw + 2.0 * (un + 3.01)^2 + 4.0; } - // define full2(un) { return 4.0 * (un + 3.01) / 2.0^hw + 8.0; } - - // | f32 (0 + 3) | f32 (2 + 1) | f64 (3 + 1) | f128 (4 + 1) - // u_0 | < 184224974 | < 2812.1 | < 184224974 | < 791240234244348797 - // u_1 | < 15804007 | < 242.7 | < 15804007 | < 67877681371350440 - // u_2 | < 116308 | < 2.81 | < 116308 | < 499533100252317 - // u_3 | < 7.31 | | < 7.31 | < 27054456580 - // u_4 | | | | < 80.4 - // Final (U_N) | same as u_3 | < 72 | < 218 | < 13920 - - // Add 2 to U_N due to final decrement. - - let reciprocal_precision: ::Int = 220.cast(); - // Suppose 1/b - P * 2^-W < x < 1/b + P * 2^-W - let x_uq0 = x_uq0 - reciprocal_precision; + x_uq0 -= recip_precision.cast(); + // Now 1/b - (2*P) * 2^-W < x < 1/b // FIXME Is x_UQ0 still >= 0.5? - let mut quotient: F::Int = x_uq0.widen_mul(a_significand << 1).hi(); + let mut quotient_uq1: F::Int = x_uq0.widen_mul(a_significand << 1).hi(); // Now, a/b - 4*P * 2^-W < q < a/b for q= in UQ1.(SB+1+W). // quotient_UQ1 is in [0.5, 2.0) as UQ1.(SB+1), // adjust it to be in [1.0, 2.0) as UQ1.SB. - let (mut residual, written_exponent) = if quotient < (implicit_bit << 1) { + let mut residual_lo = if quotient_uq1 < (implicit_bit << 1) { // Highest bit is 0, so just reinterpret quotient_UQ1 as UQ1.SB, // effectively doubling its value as well as its error estimation. - let residual_lo = (a_significand << (significand_bits + 1)).wrapping_sub( - (CastInto::::cast(quotient).wrapping_mul(CastInto::::cast(b_significand))) - .cast(), - ); + let residual_lo = (a_significand << (significand_bits + 1)) + .wrapping_sub(quotient_uq1.wrapping_mul(b_significand)); + res_exponent -= 1; a_significand <<= 1; - (residual_lo, written_exponent.wrapping_sub(1)) + residual_lo } else { // Highest bit is 1 (the UQ1.(SB+1) value is in [1, 2)), convert it // to UQ1.SB by right shifting by 1. Least significant bit is omitted. - quotient >>= 1; - let residual_lo = (a_significand << significand_bits).wrapping_sub( - (CastInto::::cast(quotient).wrapping_mul(CastInto::::cast(b_significand))) - .cast(), - ); - (residual_lo, written_exponent) + quotient_uq1 >>= 1; + (a_significand << significand_bits).wrapping_sub(quotient_uq1.wrapping_mul(b_significand)) }; - //drop mutability - let quotient = quotient; + // drop mutability + let quotient = quotient_uq1; // NB: residualLo is calculated above for the normal result case. // It is re-computed on denormal path that is expected to be not so // performance-sensitive. - + // // Now, q cannot be greater than a/b and can differ by at most 8*P * 2^-W + 2^-SB // Each NextAfter() increments the floating point value by at least 2^-SB // (more, if exponent was incremented). @@ -870,60 +460,178 @@ where // For f32 (2+1): 32 < 74 < 32 * 3, so two NextAfter() are required // For f64: 220 < 256 (OK) // For f128: 4096 * 3 < 13922 < 4096 * 5 (three NextAfter() are required) - + // // If we have overflowed the exponent, return infinity - if written_exponent >= max_exponent as i64 { + if res_exponent >= i32::cast_from(exponent_sat) { return F::from_repr(inf_rep | quotient_sign); } // Now, quotient <= the correctly-rounded result // and may need taking NextAfter() up to 3 times (see error estimates above) // r = a - b * q - let abs_result = if written_exponent > 0 { + let mut abs_result = if res_exponent > 0 { let mut ret = quotient & significand_mask; - ret |= written_exponent.cast() << significand_bits; - residual <<= 1; + ret |= F::Int::from(res_exponent as u32) << significand_bits; + residual_lo <<= 1; ret } else { - if (significand_bits as i64 + written_exponent) < 0 { + if ((significand_bits as i32) + res_exponent) < 0 { return F::from_repr(quotient_sign); } - let ret = - quotient.wrapping_shr((negate_u64(CastInto::::cast(written_exponent)) + 1) as u32); - residual = (CastInto::::cast( - a_significand.wrapping_shl( - significand_bits.wrapping_add(CastInto::::cast(written_exponent)), - ), - ) - .wrapping_sub( - (CastInto::::cast(ret).wrapping_mul(CastInto::::cast(b_significand))) << 1, - )) - .cast(); + + let ret = quotient.wrapping_shr(u32::cast_from(res_exponent.wrapping_neg()) + 1); + residual_lo = a_significand + .wrapping_shl(significand_bits.wrapping_add(CastInto::::cast(res_exponent))) + .wrapping_sub(ret.wrapping_mul(b_significand) << 1); ret }; - // Round - let abs_result = { - residual += abs_result & one; // tie to even - // conditionally turns the below LT comparison into LTE - if residual > b_significand { - abs_result + one - } else { - abs_result - } - }; + + residual_lo += abs_result & one; // tie to even + // conditionally turns the below LT comparison into LTE + abs_result += u8::from(residual_lo > b_significand).into(); + + if F::BITS == 128 || (F::BITS == 32 && half_iterations > 0) { + // Do not round Infinity to NaN + abs_result += + u8::from(abs_result < inf_rep && residual_lo > (2 + 1).cast() * b_significand).into(); + } + + if F::BITS == 128 { + abs_result += + u8::from(abs_result < inf_rep && residual_lo > (4 + 1).cast() * b_significand).into(); + } + F::from_repr(abs_result | quotient_sign) } +/// Calculate the number of iterations required for a float type's precision. +/// +/// This returns `(h, f)` where `h` is the number of iterations to be done using integers at half +/// the float's bit width, and `f` is the number of iterations done using integers of the float's +/// full width. This is further explained in the module documentation. +/// +/// # Requirements +/// +/// The initial estimate should have at least 8 bits of precision. If this is not true, results +/// will be inaccurate. +const fn get_iterations() -> (usize, usize) { + // Precision doubles with each iteration. Assume we start with 8 bits of precision. + let total_iterations = F::BITS.ilog2() as usize - 2; + + if 2 * size_of::() <= size_of::<*const ()>() { + // If widening multiplication will be efficient (uses word-sized integers), there is no + // reason to use half-sized iterations. + (0, total_iterations) + } else { + // Otherwise, do as many iterations as possible at half width. + (total_iterations - 1, 1) + } +} + +/// `u_n` for different precisions (with N-1 half-width iterations). +/// +/// W0 is the precision of C +/// u_0 = (3/4 - 1/sqrt(2) + 2^-W0) * 2^HW +/// +/// Estimated with bc: +/// +/// ```text +/// define half1(un) { return 2.0 * (un + un^2) / 2.0^hw + 1.0; } +/// define half2(un) { return 2.0 * un / 2.0^hw + 2.0; } +/// define full1(un) { return 4.0 * (un + 3.01) / 2.0^hw + 2.0 * (un + 3.01)^2 + 4.0; } +/// define full2(un) { return 4.0 * (un + 3.01) / 2.0^hw + 8.0; } +/// +/// | f32 (0 + 3) | f32 (2 + 1) | f64 (3 + 1) | f128 (4 + 1) +/// u_0 | < 184224974 | < 2812.1 | < 184224974 | < 791240234244348797 +/// u_1 | < 15804007 | < 242.7 | < 15804007 | < 67877681371350440 +/// u_2 | < 116308 | < 2.81 | < 116308 | < 499533100252317 +/// u_3 | < 7.31 | | < 7.31 | < 27054456580 +/// u_4 | | | | < 80.4 +/// Final (U_N) | same as u_3 | < 72 | < 218 | < 13920 +/// ```` +/// +/// Add 2 to `U_N` due to final decrement. +const fn reciprocal_precision() -> u16 { + let (half_iterations, full_iterations) = get_iterations::(); + + if full_iterations < 1 { + panic!("Must have at least one full iteration"); + } + + // FIXME(tgross35): calculate this programmatically + if F::BITS == 32 && half_iterations == 2 && full_iterations == 1 { + 74u16 + } else if F::BITS == 32 && half_iterations == 0 && full_iterations == 3 { + 10 + } else if F::BITS == 64 && half_iterations == 3 && full_iterations == 1 { + 220 + } else if F::BITS == 128 && half_iterations == 4 && full_iterations == 1 { + 13922 + } else { + panic!("Invalid number of iterations") + } +} + +/// The value of `C` adjusted to half width. +/// +/// C is (3/4 + 1/sqrt(2)) - 1 truncated to W0 fractional bits as UQ0.HW with W0 being either +/// 16 or 32 and W0 <= HW. That is, C is the aforementioned 3/4 + 1/sqrt(2) constant (from +/// which b/2 is subtracted to obtain x0) wrapped to [0, 1) range. +fn c_hw() -> HalfRep +where + F::Int: DInt, + u128: CastInto>, +{ + const C_U128: u128 = 0x7504f333f9de6108b2fb1366eaa6a542; + const { C_U128 >> (u128::BITS - >::BITS) }.cast() +} + +/// Perform one iteration at any width to approach `1/b`, given previous guess `x`. Returns +/// the next `x` as a UQ0 number. +/// +/// This is the `x_{n+1} = 2*x_n - b*x_n^2` algorithm, implemented as `x_n * (2 - b*x_n)`. It +/// uses widening multiplication to calculate the result with necessary precision. +fn next_guess(x_uq0: I, b_uq1: I) -> I +where + I: Int + HInt, + ::D: ops::Shr::D>, +{ + // `corr = 2 - b*x_n` + // + // This looks like `0 - b*x_n`. However, this works - in `UQ1`, `0.0 - x = 2.0 - x`. + let corr_uq1: I = I::ZERO.wrapping_sub(x_uq0.widen_mul(b_uq1).hi()); + + // `x_n * corr = x_n * (2 - b*x_n)` + (x_uq0.widen_mul(corr_uq1) >> (I::BITS - 1)).lo() +} + intrinsics! { #[avr_skip] #[arm_aeabi_alias = __aeabi_fdiv] pub extern "C" fn __divsf3(a: f32, b: f32) -> f32 { - div32(a, b) + div(a, b) } #[avr_skip] #[arm_aeabi_alias = __aeabi_ddiv] pub extern "C" fn __divdf3(a: f64, b: f64) -> f64 { - div64(a, b) + div(a, b) + } + + #[avr_skip] + #[ppc_alias = __divkf3] + #[cfg(not(feature = "no-f16-f128"))] + pub extern "C" fn __divtf3(a: f128, b: f128) -> f128 { + div(a, b) + } + + #[cfg(target_arch = "arm")] + pub extern "C" fn __divsf3vfp(a: f32, b: f32) -> f32 { + a / b + } + + #[cfg(target_arch = "arm")] + pub extern "C" fn __divdf3vfp(a: f64, b: f64) -> f64 { + a / b } } diff --git a/src/float/mod.rs b/src/float/mod.rs index 84737320..704bba0c 100644 --- a/src/float/mod.rs +++ b/src/float/mod.rs @@ -31,10 +31,10 @@ pub(crate) trait Float: + ops::Rem { /// A uint of the same width as the float - type Int: Int; + type Int: Int; /// A int of the same width as the float - type SignedInt: Int; + type SignedInt: Int + MinInt; /// An int capable of containing the exponent bits plus a sign bit. This is signed. type ExpInt: Int; @@ -51,7 +51,7 @@ pub(crate) trait Float: /// The bitwidth of the exponent const EXPONENT_BITS: u32 = Self::BITS - Self::SIGNIFICAND_BITS - 1; - /// The maximum value of the exponent + /// The saturated value of the exponent (infinite representation), in the rightmost postiion. const EXPONENT_MAX: u32 = (1 << Self::EXPONENT_BITS) - 1; /// The exponent bias value @@ -83,7 +83,7 @@ pub(crate) trait Float: /// Returns true if the sign is negative fn is_sign_negative(self) -> bool; - /// Returns the exponent with bias + /// Returns the exponent, not adjusting for bias. fn exp(self) -> Self::ExpInt; /// Returns the significand with no implicit bit (or the "fractional" part) @@ -175,7 +175,7 @@ macro_rules! float_impl { fn normalize(significand: Self::Int) -> (i32, Self::Int) { let shift = significand .leading_zeros() - .wrapping_sub((Self::Int::ONE << Self::SIGNIFICAND_BITS).leading_zeros()); + .wrapping_sub(Self::EXPONENT_BITS); ( 1i32.wrapping_sub(shift as i32), significand << shift as Self::Int, diff --git a/src/int/big.rs b/src/int/big.rs index 019dd728..e565da89 100644 --- a/src/int/big.rs +++ b/src/int/big.rs @@ -93,7 +93,7 @@ macro_rules! impl_common { type Output = Self; fn shl(self, rhs: u32) -> Self::Output { - todo!() + unimplemented!("only used to meet trait bounds") } } }; @@ -102,6 +102,41 @@ macro_rules! impl_common { impl_common!(i256); impl_common!(u256); +impl ops::Shr for u256 { + type Output = Self; + + fn shr(self, rhs: u32) -> Self::Output { + assert!(rhs < Self::BITS, "attempted to shift right with overflow"); + + if rhs == 0 { + return self; + } + + let mut ret = self; + let byte_shift = rhs / 64; + let bit_shift = rhs % 64; + + for idx in 0..4 { + let base_idx = idx + byte_shift as usize; + + let Some(base) = ret.0.get(base_idx) else { + ret.0[idx] = 0; + continue; + }; + + let mut new_val = base >> bit_shift; + + if let Some(new) = ret.0.get(base_idx + 1) { + new_val |= new.overflowing_shl(64 - bit_shift).0; + } + + ret.0[idx] = new_val; + } + + ret + } +} + macro_rules! word { (1, $val:expr) => { (($val >> (32 * 3)) & Self::from(WORD_LO_MASK)) as u64 diff --git a/testcrate/benches/float_div.rs b/testcrate/benches/float_div.rs index 6ba439b0..6a039a82 100644 --- a/testcrate/benches/float_div.rs +++ b/testcrate/benches/float_div.rs @@ -1,5 +1,7 @@ +#![cfg_attr(f128_enabled, feature(f128))] + use compiler_builtins::float::div; -use criterion::{criterion_group, criterion_main, Criterion}; +use criterion::{criterion_main, Criterion}; use testcrate::float_bench; float_bench! { @@ -64,5 +66,28 @@ float_bench! { ], } -criterion_group!(float_div, div_f32, div_f64); +#[cfg(f128_enabled)] +float_bench! { + name: div_f128, + sig: (a: f128, b: f128) -> f128, + crate_fn: div::__divtf3, + crate_fn_ppc: div::__divkf3, + sys_fn: __divtf3, + sys_fn_ppc: __divkf3, + sys_available: not(feature = "no-sys-f128"), + asm: [] +} + +pub fn float_div() { + let mut criterion = Criterion::default().configure_from_args(); + + div_f32(&mut criterion); + div_f64(&mut criterion); + + #[cfg(f128_enabled)] + { + div_f128(&mut criterion); + } +} + criterion_main!(float_div); diff --git a/testcrate/src/bench.rs b/testcrate/src/bench.rs index 1d571a6c..f831b5a6 100644 --- a/testcrate/src/bench.rs +++ b/testcrate/src/bench.rs @@ -30,13 +30,14 @@ pub fn skip_sys_checks(test_name: &str) -> bool { // FIXME(f16_f128): system symbols have incorrect results // - const X86_NO_SSE_SKIPPED: &[&str] = - &["add_f128", "sub_f128", "mul_f128", "powi_f32", "powi_f64"]; + const X86_NO_SSE_SKIPPED: &[&str] = &[ + "add_f128", "sub_f128", "mul_f128", "div_f128", "powi_f32", "powi_f64", + ]; // FIXME(f16_f128): Wide multiply carry bug in `compiler-rt`, re-enable when nightly no longer // uses `compiler-rt` version. // - const AARCH64_SKIPPED: &[&str] = &["mul_f128"]; + const AARCH64_SKIPPED: &[&str] = &["mul_f128", "div_f128"]; // FIXME(llvm): system symbols have incorrect results on Windows // diff --git a/testcrate/tests/big.rs b/testcrate/tests/big.rs index 128b5ddf..595f6225 100644 --- a/testcrate/tests/big.rs +++ b/testcrate/tests/big.rs @@ -59,3 +59,76 @@ fn widen_mul_u128() { } assert!(errors.is_empty()); } + +#[test] +fn not_u128() { + assert_eq!(!u256::ZERO, u256::MAX); +} + +#[test] +fn shr_u128() { + let only_low = [ + 1, + u16::MAX.into(), + u32::MAX.into(), + u64::MAX.into(), + u128::MAX, + ]; + + let mut errors = Vec::new(); + + for a in only_low { + for perturb in 0..10 { + let a = a.saturating_add(perturb); + for shift in 0..128 { + let res = a.widen() >> shift; + let expected = (a >> shift).widen(); + if res != expected { + errors.push((a.widen(), shift, res, expected)); + } + } + } + } + + let check = [ + ( + u256::MAX, + 1, + u256([u64::MAX, u64::MAX, u64::MAX, u64::MAX >> 1]), + ), + ( + u256::MAX, + 5, + u256([u64::MAX, u64::MAX, u64::MAX, u64::MAX >> 5]), + ), + (u256::MAX, 63, u256([u64::MAX, u64::MAX, u64::MAX, 1])), + (u256::MAX, 64, u256([u64::MAX, u64::MAX, u64::MAX, 0])), + (u256::MAX, 65, u256([u64::MAX, u64::MAX, u64::MAX >> 1, 0])), + (u256::MAX, 127, u256([u64::MAX, u64::MAX, 1, 0])), + (u256::MAX, 128, u256([u64::MAX, u64::MAX, 0, 0])), + (u256::MAX, 129, u256([u64::MAX, u64::MAX >> 1, 0, 0])), + (u256::MAX, 191, u256([u64::MAX, 1, 0, 0])), + (u256::MAX, 192, u256([u64::MAX, 0, 0, 0])), + (u256::MAX, 193, u256([u64::MAX >> 1, 0, 0, 0])), + (u256::MAX, 191, u256([u64::MAX, 1, 0, 0])), + (u256::MAX, 254, u256([0b11, 0, 0, 0])), + (u256::MAX, 255, u256([1, 0, 0, 0])), + ]; + + for (input, shift, expected) in check { + let res = input >> shift; + if res != expected { + errors.push((input, shift, res, expected)); + } + } + + for (a, b, res, expected) in &errors { + eprintln!( + "FAILURE: {} >> {b} = {} got {}", + hexu(*a), + hexu(*expected), + hexu(*res), + ); + } + assert!(errors.is_empty()); +} diff --git a/testcrate/tests/div_rem.rs b/testcrate/tests/div_rem.rs index 418e9c18..ac87eb63 100644 --- a/testcrate/tests/div_rem.rs +++ b/testcrate/tests/div_rem.rs @@ -1,3 +1,4 @@ +#![feature(f128)] #![allow(unused_macros)] use compiler_builtins::int::sdiv::{__divmoddi4, __divmodsi4, __divmodti4}; @@ -115,7 +116,13 @@ macro_rules! float { fuzz_float_2(N, |x: $f, y: $f| { let quo0: $f = apfloat_fallback!($f, $apfloat_ty, $sys_available, Div::div, x, y); let quo1: $f = $fn(x, y); - #[cfg(not(target_arch = "arm"))] + + // ARM SIMD instructions always flush subnormals to zero + if cfg!(target_arch = "arm") && + ((Float::is_subnormal(quo0)) || Float::is_subnormal(quo1)) { + return; + } + if !Float::eq_repr(quo0, quo1) { panic!( "{}({:?}, {:?}): std: {:?}, builtins: {:?}", @@ -126,21 +133,6 @@ macro_rules! float { quo1 ); } - - // ARM SIMD instructions always flush subnormals to zero - #[cfg(target_arch = "arm")] - if !(Float::is_subnormal(quo0) || Float::is_subnormal(quo1)) { - if !Float::eq_repr(quo0, quo1) { - panic!( - "{}({:?}, {:?}): std: {:?}, builtins: {:?}", - stringify!($fn), - x, - y, - quo0, - quo1 - ); - } - } }); } )* @@ -155,4 +147,19 @@ mod float_div { f32, __divsf3, Single, all(); f64, __divdf3, Double, all(); } + + #[cfg(not(feature = "no-f16-f128"))] + #[cfg(not(any(target_arch = "powerpc", target_arch = "powerpc64")))] + float! { + f128, __divtf3, Quad, + // FIXME(llvm): there is a bug in LLVM rt. + // See . + not(any(feature = "no-sys-f128", all(target_arch = "aarch64", target_os = "linux"))); + } + + #[cfg(not(feature = "no-f16-f128"))] + #[cfg(any(target_arch = "powerpc", target_arch = "powerpc64"))] + float! { + f128, __divkf3, Quad, not(feature = "no-sys-f128"); + } }