diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-30 03:57:31 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-30 03:57:31 +0000 |
commit | dc0db358abe19481e475e10c32149b53370f1a1c (patch) | |
tree | ab8ce99c4b255ce46f99ef402c27916055b899ee /vendor/compiler_builtins/src | |
parent | Releasing progress-linux version 1.71.1+dfsg1-2~progress7.99u1. (diff) | |
download | rustc-dc0db358abe19481e475e10c32149b53370f1a1c.tar.xz rustc-dc0db358abe19481e475e10c32149b53370f1a1c.zip |
Merging upstream version 1.72.1+dfsg1.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'vendor/compiler_builtins/src')
-rw-r--r-- | vendor/compiler_builtins/src/aarch64_linux.rs | 277 | ||||
-rw-r--r-- | vendor/compiler_builtins/src/float/cmp.rs | 14 | ||||
-rw-r--r-- | vendor/compiler_builtins/src/float/conv.rs | 2 | ||||
-rw-r--r-- | vendor/compiler_builtins/src/float/div.rs | 857 | ||||
-rw-r--r-- | vendor/compiler_builtins/src/lib.rs | 4 | ||||
-rw-r--r-- | vendor/compiler_builtins/src/macros.rs | 20 | ||||
-rw-r--r-- | vendor/compiler_builtins/src/math.rs | 5 |
7 files changed, 966 insertions, 213 deletions
diff --git a/vendor/compiler_builtins/src/aarch64_linux.rs b/vendor/compiler_builtins/src/aarch64_linux.rs new file mode 100644 index 000000000..62144e531 --- /dev/null +++ b/vendor/compiler_builtins/src/aarch64_linux.rs @@ -0,0 +1,277 @@ +//! Aarch64 targets have two possible implementations for atomics: +//! 1. Load-Locked, Store-Conditional (LL/SC), older and slower. +//! 2. Large System Extensions (LSE), newer and faster. +//! To avoid breaking backwards compat, C toolchains introduced a concept of "outlined atomics", +//! where atomic operations call into the compiler runtime to dispatch between two depending on +//! which is supported on the current CPU. +//! See https://community.arm.com/arm-community-blogs/b/tools-software-ides-blog/posts/making-the-most-of-the-arm-architecture-in-gcc-10#:~:text=out%20of%20line%20atomics for more discussion. +//! +//! Currently we only support LL/SC, because LSE requires `getauxval` from libc in order to do runtime detection. +//! Use the `compiler-rt` intrinsics if you want LSE support. +//! +//! Ported from `aarch64/lse.S` in LLVM's compiler-rt. +//! +//! Generate functions for each of the following symbols: +//! __aarch64_casM_ORDER +//! __aarch64_swpN_ORDER +//! __aarch64_ldaddN_ORDER +//! __aarch64_ldclrN_ORDER +//! __aarch64_ldeorN_ORDER +//! __aarch64_ldsetN_ORDER +//! for N = {1, 2, 4, 8}, M = {1, 2, 4, 8, 16}, ORDER = { relax, acq, rel, acq_rel } +//! +//! The original `lse.S` has some truly horrifying code that expects to be compiled multiple times with different constants. +//! We do something similar, but with macro arguments. +#![cfg_attr(feature = "c", allow(unused_macros))] // avoid putting the macros into a submodule + +// We don't do runtime dispatch so we don't have to worry about the `__aarch64_have_lse_atomics` global ctor. + +/// Translate a byte size to a Rust type. +#[rustfmt::skip] +macro_rules! int_ty { + (1) => { i8 }; + (2) => { i16 }; + (4) => { i32 }; + (8) => { i64 }; + (16) => { i128 }; +} + +/// Given a byte size and a register number, return a register of the appropriate size. +/// +/// See <https://developer.arm.com/documentation/102374/0101/Registers-in-AArch64---general-purpose-registers>. +#[rustfmt::skip] +macro_rules! reg { + (1, $num:literal) => { concat!("w", $num) }; + (2, $num:literal) => { concat!("w", $num) }; + (4, $num:literal) => { concat!("w", $num) }; + (8, $num:literal) => { concat!("x", $num) }; +} + +/// Given an atomic ordering, translate it to the acquire suffix for the lxdr aarch64 ASM instruction. +#[rustfmt::skip] +macro_rules! acquire { + (Relaxed) => { "" }; + (Acquire) => { "a" }; + (Release) => { "" }; + (AcqRel) => { "a" }; +} + +/// Given an atomic ordering, translate it to the release suffix for the stxr aarch64 ASM instruction. +#[rustfmt::skip] +macro_rules! release { + (Relaxed) => { "" }; + (Acquire) => { "" }; + (Release) => { "l" }; + (AcqRel) => { "l" }; +} + +/// Given a size in bytes, translate it to the byte suffix for an aarch64 ASM instruction. +#[rustfmt::skip] +macro_rules! size { + (1) => { "b" }; + (2) => { "h" }; + (4) => { "" }; + (8) => { "" }; + (16) => { "" }; +} + +/// Given a byte size, translate it to an Unsigned eXTend instruction +/// with the correct semantics. +/// +/// See <https://developer.arm.com/documentation/ddi0596/2020-12/Base-Instructions/UXTB--Unsigned-Extend-Byte--an-alias-of-UBFM-> +#[rustfmt::skip] +macro_rules! uxt { + (1) => { "uxtb" }; + (2) => { "uxth" }; + ($_:tt) => { "mov" }; +} + +/// Given an atomic ordering and byte size, translate it to a LoaD eXclusive Register instruction +/// with the correct semantics. +/// +/// See <https://developer.arm.com/documentation/ddi0596/2020-12/Base-Instructions/LDXR--Load-Exclusive-Register->. +macro_rules! ldxr { + ($ordering:ident, $bytes:tt) => { + concat!("ld", acquire!($ordering), "xr", size!($bytes)) + }; +} + +/// Given an atomic ordering and byte size, translate it to a STore eXclusive Register instruction +/// with the correct semantics. +/// +/// See <https://developer.arm.com/documentation/ddi0596/2020-12/Base-Instructions/STXR--Store-Exclusive-Register->. +macro_rules! stxr { + ($ordering:ident, $bytes:tt) => { + concat!("st", release!($ordering), "xr", size!($bytes)) + }; +} + +/// Given an atomic ordering and byte size, translate it to a LoaD eXclusive Pair of registers instruction +/// with the correct semantics. +/// +/// See <https://developer.arm.com/documentation/ddi0596/2020-12/Base-Instructions/LDXP--Load-Exclusive-Pair-of-Registers-> +macro_rules! ldxp { + ($ordering:ident) => { + concat!("ld", acquire!($ordering), "xp") + }; +} + +/// Given an atomic ordering and byte size, translate it to a STore eXclusive Pair of registers instruction +/// with the correct semantics. +/// +/// See <https://developer.arm.com/documentation/ddi0596/2020-12/Base-Instructions/STXP--Store-Exclusive-Pair-of-registers->. +macro_rules! stxp { + ($ordering:ident) => { + concat!("st", release!($ordering), "xp") + }; +} + +/// See <https://doc.rust-lang.org/stable/std/sync/atomic/struct.AtomicI8.html#method.compare_and_swap>. +macro_rules! compare_and_swap { + ($ordering:ident, $bytes:tt, $name:ident) => { + intrinsics! { + #[maybe_use_optimized_c_shim] + #[naked] + pub unsafe extern "C" fn $name ( + expected: int_ty!($bytes), desired: int_ty!($bytes), ptr: *mut int_ty!($bytes) + ) -> int_ty!($bytes) { + // We can't use `AtomicI8::compare_and_swap`; we *are* compare_and_swap. + unsafe { core::arch::asm! { + // UXT s(tmp0), s(0) + concat!(uxt!($bytes), " ", reg!($bytes, 16), ", ", reg!($bytes, 0)), + "0:", + // LDXR s(0), [x2] + concat!(ldxr!($ordering, $bytes), " ", reg!($bytes, 0), ", [x2]"), + // cmp s(0), s(tmp0) + concat!("cmp ", reg!($bytes, 0), ", ", reg!($bytes, 16)), + "bne 1f", + // STXR w(tmp1), s(1), [x2] + concat!(stxr!($ordering, $bytes), " w17, ", reg!($bytes, 1), ", [x2]"), + "cbnz w17, 0b", + "1:", + "ret", + options(noreturn) + } } + } + } + }; +} + +// i128 uses a completely different impl, so it has its own macro. +macro_rules! compare_and_swap_i128 { + ($ordering:ident, $name:ident) => { + intrinsics! { + #[maybe_use_optimized_c_shim] + #[naked] + pub unsafe extern "C" fn $name ( + expected: i128, desired: i128, ptr: *mut i128 + ) -> i128 { + unsafe { core::arch::asm! { + "mov x16, x0", + "mov x17, x1", + "0:", + // LDXP x0, x1, [x4] + concat!(ldxp!($ordering), " x0, x1, [x4]"), + "cmp x0, x16", + "ccmp x1, x17, #0, eq", + "bne 1f", + // STXP w(tmp2), x2, x3, [x4] + concat!(stxp!($ordering), " w15, x2, x3, [x4]"), + "cbnz w15, 0b", + "1:", + "ret", + options(noreturn) + } } + } + } + }; +} + +/// See <https://doc.rust-lang.org/stable/std/sync/atomic/struct.AtomicI8.html#method.swap>. +macro_rules! swap { + ($ordering:ident, $bytes:tt, $name:ident) => { + intrinsics! { + #[maybe_use_optimized_c_shim] + #[naked] + pub unsafe extern "C" fn $name ( + left: int_ty!($bytes), right_ptr: *mut int_ty!($bytes) + ) -> int_ty!($bytes) { + unsafe { core::arch::asm! { + // mov s(tmp0), s(0) + concat!("mov ", reg!($bytes, 16), ", ", reg!($bytes, 0)), + "0:", + // LDXR s(0), [x1] + concat!(ldxr!($ordering, $bytes), " ", reg!($bytes, 0), ", [x1]"), + // STXR w(tmp1), s(tmp0), [x1] + concat!(stxr!($ordering, $bytes), " w17, ", reg!($bytes, 16), ", [x1]"), + "cbnz w17, 0b", + "ret", + options(noreturn) + } } + } + } + }; +} + +/// See (e.g.) <https://doc.rust-lang.org/stable/std/sync/atomic/struct.AtomicI8.html#method.fetch_add>. +macro_rules! fetch_op { + ($ordering:ident, $bytes:tt, $name:ident, $op:literal) => { + intrinsics! { + #[maybe_use_optimized_c_shim] + #[naked] + pub unsafe extern "C" fn $name ( + val: int_ty!($bytes), ptr: *mut int_ty!($bytes) + ) -> int_ty!($bytes) { + unsafe { core::arch::asm! { + // mov s(tmp0), s(0) + concat!("mov ", reg!($bytes, 16), ", ", reg!($bytes, 0)), + "0:", + // LDXR s(0), [x1] + concat!(ldxr!($ordering, $bytes), " ", reg!($bytes, 0), ", [x1]"), + // OP s(tmp1), s(0), s(tmp0) + concat!($op, " ", reg!($bytes, 17), ", ", reg!($bytes, 0), ", ", reg!($bytes, 16)), + // STXR w(tmp2), s(tmp1), [x1] + concat!(stxr!($ordering, $bytes), " w15, ", reg!($bytes, 17), ", [x1]"), + "cbnz w15, 0b", + "ret", + options(noreturn) + } } + } + } + } +} + +// We need a single macro to pass to `foreach_ldadd`. +macro_rules! add { + ($ordering:ident, $bytes:tt, $name:ident) => { + fetch_op! { $ordering, $bytes, $name, "add" } + }; +} + +macro_rules! and { + ($ordering:ident, $bytes:tt, $name:ident) => { + fetch_op! { $ordering, $bytes, $name, "bic" } + }; +} + +macro_rules! xor { + ($ordering:ident, $bytes:tt, $name:ident) => { + fetch_op! { $ordering, $bytes, $name, "eor" } + }; +} + +macro_rules! or { + ($ordering:ident, $bytes:tt, $name:ident) => { + fetch_op! { $ordering, $bytes, $name, "orr" } + }; +} + +// See `generate_aarch64_outlined_atomics` in build.rs. +include!(concat!(env!("OUT_DIR"), "/outlined_atomics.rs")); +foreach_cas!(compare_and_swap); +foreach_cas16!(compare_and_swap_i128); +foreach_swp!(swap); +foreach_ldadd!(add); +foreach_ldclr!(and); +foreach_ldeor!(xor); +foreach_ldset!(or); diff --git a/vendor/compiler_builtins/src/float/cmp.rs b/vendor/compiler_builtins/src/float/cmp.rs index 1d4e38433..1bd7aa284 100644 --- a/vendor/compiler_builtins/src/float/cmp.rs +++ b/vendor/compiler_builtins/src/float/cmp.rs @@ -99,60 +99,74 @@ fn unord<F: Float>(a: F, b: F) -> bool { } intrinsics! { + #[avr_skip] pub extern "C" fn __lesf2(a: f32, b: f32) -> i32 { cmp(a, b).to_le_abi() } + #[avr_skip] pub extern "C" fn __gesf2(a: f32, b: f32) -> i32 { cmp(a, b).to_ge_abi() } + #[avr_skip] #[arm_aeabi_alias = __aeabi_fcmpun] pub extern "C" fn __unordsf2(a: f32, b: f32) -> i32 { unord(a, b) as i32 } + #[avr_skip] pub extern "C" fn __eqsf2(a: f32, b: f32) -> i32 { cmp(a, b).to_le_abi() } + #[avr_skip] pub extern "C" fn __ltsf2(a: f32, b: f32) -> i32 { cmp(a, b).to_le_abi() } + #[avr_skip] pub extern "C" fn __nesf2(a: f32, b: f32) -> i32 { cmp(a, b).to_le_abi() } + #[avr_skip] pub extern "C" fn __gtsf2(a: f32, b: f32) -> i32 { cmp(a, b).to_ge_abi() } + #[avr_skip] pub extern "C" fn __ledf2(a: f64, b: f64) -> i32 { cmp(a, b).to_le_abi() } + #[avr_skip] pub extern "C" fn __gedf2(a: f64, b: f64) -> i32 { cmp(a, b).to_ge_abi() } + #[avr_skip] #[arm_aeabi_alias = __aeabi_dcmpun] pub extern "C" fn __unorddf2(a: f64, b: f64) -> i32 { unord(a, b) as i32 } + #[avr_skip] pub extern "C" fn __eqdf2(a: f64, b: f64) -> i32 { cmp(a, b).to_le_abi() } + #[avr_skip] pub extern "C" fn __ltdf2(a: f64, b: f64) -> i32 { cmp(a, b).to_le_abi() } + #[avr_skip] pub extern "C" fn __nedf2(a: f64, b: f64) -> i32 { cmp(a, b).to_le_abi() } + #[avr_skip] pub extern "C" fn __gtdf2(a: f64, b: f64) -> i32 { cmp(a, b).to_ge_abi() } diff --git a/vendor/compiler_builtins/src/float/conv.rs b/vendor/compiler_builtins/src/float/conv.rs index a27d542fa..790c0ab9f 100644 --- a/vendor/compiler_builtins/src/float/conv.rs +++ b/vendor/compiler_builtins/src/float/conv.rs @@ -3,7 +3,7 @@ /// These are hand-optimized bit twiddling code, /// which unfortunately isn't the easiest kind of code to read. /// -/// The algorithm is explained here: https://blog.m-ou.se/floats/ +/// The algorithm is explained here: <https://blog.m-ou.se/floats/> mod int_to_float { pub fn u32_to_f32_bits(i: u32) -> u32 { if i == 0 { diff --git a/vendor/compiler_builtins/src/float/div.rs b/vendor/compiler_builtins/src/float/div.rs index c2d6c07e7..c0aae34fb 100644 --- a/vendor/compiler_builtins/src/float/div.rs +++ b/vendor/compiler_builtins/src/float/div.rs @@ -12,11 +12,17 @@ where i32: CastInto<F::Int>, F::Int: CastInto<i32>, F::Int: HInt, + <F as Float>::Int: core::ops::Mul, { + const NUMBER_OF_HALF_ITERATIONS: usize = 0; + const NUMBER_OF_FULL_ITERATIONS: usize = 3; + const USE_NATIVE_FULL_ITERATIONS: bool = true; + let one = F::Int::ONE; let zero = F::Int::ZERO; + let hw = F::BITS / 2; + let lo_mask = u32::MAX >> hw; - // let bits = F::BITS; let significand_bits = F::SIGNIFICAND_BITS; let max_exponent = F::EXPONENT_MAX; @@ -109,101 +115,341 @@ where } } - // Or in the implicit significand bit. (If we fell through from the + // Set the implicit significand bit. If we fell through from the // denormal path it was already set by normalize( ), but setting it twice - // won't hurt anything.) + // won't hurt anything. a_significand |= implicit_bit; b_significand |= implicit_bit; - let mut quotient_exponent: i32 = CastInto::<i32>::cast(a_exponent) - .wrapping_sub(CastInto::<i32>::cast(b_exponent)) - .wrapping_add(scale); - - // Align the significand of b as a Q31 fixed-point number in the range - // [1, 2.0) and get a Q32 approximate reciprocal using a small minimax - // polynomial approximation: reciprocal = 3/4 + 1/sqrt(2) - b/2. This - // is accurate to about 3.5 binary digits. - let q31b = CastInto::<u32>::cast(b_significand << 8.cast()); - let mut reciprocal = (0x7504f333u32).wrapping_sub(q31b); - - // Now refine the reciprocal estimate using a Newton-Raphson iteration: - // - // x1 = x0 * (2 - x0 * b) - // - // This doubles the number of correct binary digits in the approximation - // with each iteration, so after three iterations, we have about 28 binary - // digits of accuracy. - - let mut correction: u32 = - negate_u32(((reciprocal as u64).wrapping_mul(q31b as u64) >> 32) as u32); - reciprocal = ((reciprocal as u64).wrapping_mul(correction as u64) >> 31) as u32; - correction = negate_u32(((reciprocal as u64).wrapping_mul(q31b as u64) >> 32) as u32); - reciprocal = ((reciprocal as u64).wrapping_mul(correction as u64) >> 31) as u32; - correction = negate_u32(((reciprocal as u64).wrapping_mul(q31b as u64) >> 32) as u32); - reciprocal = ((reciprocal as u64).wrapping_mul(correction as u64) >> 31) as u32; - - // Exhaustive testing shows that the error in reciprocal after three steps - // is in the interval [-0x1.f58108p-31, 0x1.d0e48cp-29], in line with our - // expectations. We bump the reciprocal by a tiny value to force the error - // to be strictly positive (in the range [0x1.4fdfp-37,0x1.287246p-29], to - // be specific). This also causes 1/1 to give a sensible approximation - // instead of zero (due to overflow). - reciprocal = reciprocal.wrapping_sub(2); - - // The numerical reciprocal is accurate to within 2^-28, lies in the - // interval [0x1.000000eep-1, 0x1.fffffffcp-1], and is strictly smaller - // than the true reciprocal of b. Multiplying a by this reciprocal thus - // gives a numerical q = a/b in Q24 with the following properties: - // - // 1. q < a/b - // 2. q is in the interval [0x1.000000eep-1, 0x1.fffffffcp0) - // 3. the error in q is at most 2^-24 + 2^-27 -- the 2^24 term comes - // from the fact that we truncate the product, and the 2^27 term - // is the error in the reciprocal of b scaled by the maximum - // possible value of a. As a consequence of this error bound, - // either q or nextafter(q) is the correctly rounded - let mut quotient = (a_significand << 1).widen_mul(reciprocal.cast()).hi(); - - // Two cases: quotient is in [0.5, 1.0) or quotient is in [1.0, 2.0). - // In either case, we are going to compute a residual of the form - // - // r = a - q*b + + let written_exponent: i32 = CastInto::<u32>::cast( + a_exponent + .wrapping_sub(b_exponent) + .wrapping_add(scale.cast()), + ) + .wrapping_add(exponent_bias) as i32; + let b_uq1 = b_significand << (F::BITS - significand_bits - 1); + + // Align the significand of b as a UQ1.(n-1) fixed-point number in the range + // [1.0, 2.0) and get a UQ0.n approximate reciprocal using a small minimax + // polynomial approximation: x0 = 3/4 + 1/sqrt(2) - b/2. + // The max error for this approximation is achieved at endpoints, so + // abs(x0(b) - 1/b) <= abs(x0(1) - 1/1) = 3/4 - 1/sqrt(2) = 0.04289..., + // which is about 4.5 bits. + // The initial approximation is between x0(1.0) = 0.9571... and x0(2.0) = 0.4571... + + // Then, refine the reciprocal estimate using a quadratically converging + // Newton-Raphson iteration: + // x_{n+1} = x_n * (2 - x_n * b) // - // We know from the construction of q that r satisfies: + // Let b be the original divisor considered "in infinite precision" and + // obtained from IEEE754 representation of function argument (with the + // implicit bit set). Corresponds to rep_t-sized b_UQ1 represented in + // UQ1.(W-1). // - // 0 <= r < ulp(q)*b + // Let b_hw be an infinitely precise number obtained from the highest (HW-1) + // bits of divisor significand (with the implicit bit set). Corresponds to + // half_rep_t-sized b_UQ1_hw represented in UQ1.(HW-1) that is a **truncated** + // version of b_UQ1. // - // if r is greater than 1/2 ulp(q)*b, then q rounds up. Otherwise, we - // already have the correct result. The exact halfway case cannot occur. - // We also take this time to right shift quotient if it falls in the [1,2) - // range and adjust the exponent accordingly. - let residual = if quotient < (implicit_bit << 1) { - quotient_exponent = quotient_exponent.wrapping_sub(1); - (a_significand << (significand_bits + 1)).wrapping_sub(quotient.wrapping_mul(b_significand)) + // Let e_n := x_n - 1/b_hw + // E_n := x_n - 1/b + // abs(E_n) <= abs(e_n) + (1/b_hw - 1/b) + // = abs(e_n) + (b - b_hw) / (b*b_hw) + // <= abs(e_n) + 2 * 2^-HW + + // rep_t-sized iterations may be slower than the corresponding half-width + // variant depending on the handware and whether single/double/quad precision + // is selected. + // NB: Using half-width iterations increases computation errors due to + // rounding, so error estimations have to be computed taking the selected + // mode into account! + + #[allow(clippy::absurd_extreme_comparisons)] + let mut x_uq0 = if NUMBER_OF_HALF_ITERATIONS > 0 { + // Starting with (n-1) half-width iterations + let b_uq1_hw: u16 = + (CastInto::<u32>::cast(b_significand) >> (significand_bits + 1 - hw)) as u16; + + // C is (3/4 + 1/sqrt(2)) - 1 truncated to W0 fractional bits as UQ0.HW + // with W0 being either 16 or 32 and W0 <= HW. + // That is, C is the aforementioned 3/4 + 1/sqrt(2) constant (from which + // b/2 is subtracted to obtain x0) wrapped to [0, 1) range. + + // HW is at least 32. Shifting into the highest bits if needed. + let c_hw = (0x7504_u32 as u16).wrapping_shl(hw.wrapping_sub(32)); + + // b >= 1, thus an upper bound for 3/4 + 1/sqrt(2) - b/2 is about 0.9572, + // so x0 fits to UQ0.HW without wrapping. + let x_uq0_hw: u16 = { + let mut x_uq0_hw: u16 = c_hw.wrapping_sub(b_uq1_hw /* exact b_hw/2 as UQ0.HW */); + // An e_0 error is comprised of errors due to + // * x0 being an inherently imprecise first approximation of 1/b_hw + // * C_hw being some (irrational) number **truncated** to W0 bits + // Please note that e_0 is calculated against the infinitely precise + // reciprocal of b_hw (that is, **truncated** version of b). + // + // e_0 <= 3/4 - 1/sqrt(2) + 2^-W0 + + // By construction, 1 <= b < 2 + // f(x) = x * (2 - b*x) = 2*x - b*x^2 + // f'(x) = 2 * (1 - b*x) + // + // On the [0, 1] interval, f(0) = 0, + // then it increses until f(1/b) = 1 / b, maximum on (0, 1), + // then it decreses to f(1) = 2 - b + // + // Let g(x) = x - f(x) = b*x^2 - x. + // On (0, 1/b), g(x) < 0 <=> f(x) > x + // On (1/b, 1], g(x) > 0 <=> f(x) < x + // + // For half-width iterations, b_hw is used instead of b. + #[allow(clippy::reversed_empty_ranges)] + for _ in 0..NUMBER_OF_HALF_ITERATIONS { + // corr_UQ1_hw can be **larger** than 2 - b_hw*x by at most 1*Ulp + // of corr_UQ1_hw. + // "0.0 - (...)" is equivalent to "2.0 - (...)" in UQ1.(HW-1). + // On the other hand, corr_UQ1_hw should not overflow from 2.0 to 0.0 provided + // no overflow occurred earlier: ((rep_t)x_UQ0_hw * b_UQ1_hw >> HW) is + // expected to be strictly positive because b_UQ1_hw has its highest bit set + // and x_UQ0_hw should be rather large (it converges to 1/2 < 1/b_hw <= 1). + let corr_uq1_hw: u16 = + 0.wrapping_sub((x_uq0_hw as u32).wrapping_mul(b_uq1_hw.cast()) >> hw) as u16; + + // Now, we should multiply UQ0.HW and UQ1.(HW-1) numbers, naturally + // obtaining an UQ1.(HW-1) number and proving its highest bit could be + // considered to be 0 to be able to represent it in UQ0.HW. + // From the above analysis of f(x), if corr_UQ1_hw would be represented + // without any intermediate loss of precision (that is, in twice_rep_t) + // x_UQ0_hw could be at most [1.]000... if b_hw is exactly 1.0 and strictly + // less otherwise. On the other hand, to obtain [1.]000..., one have to pass + // 1/b_hw == 1.0 to f(x), so this cannot occur at all without overflow (due + // to 1.0 being not representable as UQ0.HW). + // The fact corr_UQ1_hw was virtually round up (due to result of + // multiplication being **first** truncated, then negated - to improve + // error estimations) can increase x_UQ0_hw by up to 2*Ulp of x_UQ0_hw. + x_uq0_hw = ((x_uq0_hw as u32).wrapping_mul(corr_uq1_hw as u32) >> (hw - 1)) as u16; + // Now, either no overflow occurred or x_UQ0_hw is 0 or 1 in its half_rep_t + // representation. In the latter case, x_UQ0_hw will be either 0 or 1 after + // any number of iterations, so just subtract 2 from the reciprocal + // approximation after last iteration. + + // In infinite precision, with 0 <= eps1, eps2 <= U = 2^-HW: + // corr_UQ1_hw = 2 - (1/b_hw + e_n) * b_hw + 2*eps1 + // = 1 - e_n * b_hw + 2*eps1 + // x_UQ0_hw = (1/b_hw + e_n) * (1 - e_n*b_hw + 2*eps1) - eps2 + // = 1/b_hw - e_n + 2*eps1/b_hw + e_n - e_n^2*b_hw + 2*e_n*eps1 - eps2 + // = 1/b_hw + 2*eps1/b_hw - e_n^2*b_hw + 2*e_n*eps1 - eps2 + // e_{n+1} = -e_n^2*b_hw + 2*eps1/b_hw + 2*e_n*eps1 - eps2 + // = 2*e_n*eps1 - (e_n^2*b_hw + eps2) + 2*eps1/b_hw + // \------ >0 -------/ \-- >0 ---/ + // abs(e_{n+1}) <= 2*abs(e_n)*U + max(2*e_n^2 + U, 2 * U) + } + // For initial half-width iterations, U = 2^-HW + // Let abs(e_n) <= u_n * U, + // then abs(e_{n+1}) <= 2 * u_n * U^2 + max(2 * u_n^2 * U^2 + U, 2 * U) + // u_{n+1} <= 2 * u_n * U + max(2 * u_n^2 * U + 1, 2) + + // Account for possible overflow (see above). For an overflow to occur for the + // first time, for "ideal" corr_UQ1_hw (that is, without intermediate + // truncation), the result of x_UQ0_hw * corr_UQ1_hw should be either maximum + // value representable in UQ0.HW or less by 1. This means that 1/b_hw have to + // be not below that value (see g(x) above), so it is safe to decrement just + // once after the final iteration. On the other hand, an effective value of + // divisor changes after this point (from b_hw to b), so adjust here. + x_uq0_hw.wrapping_sub(1_u16) + }; + + // Error estimations for full-precision iterations are calculated just + // as above, but with U := 2^-W and taking extra decrementing into account. + // We need at least one such iteration. + + // Simulating operations on a twice_rep_t to perform a single final full-width + // iteration. Using ad-hoc multiplication implementations to take advantage + // of particular structure of operands. + + let blo: u32 = (CastInto::<u32>::cast(b_uq1)) & lo_mask; + // x_UQ0 = x_UQ0_hw * 2^HW - 1 + // x_UQ0 * b_UQ1 = (x_UQ0_hw * 2^HW) * (b_UQ1_hw * 2^HW + blo) - b_UQ1 + // + // <--- higher half ---><--- lower half ---> + // [x_UQ0_hw * b_UQ1_hw] + // + [ x_UQ0_hw * blo ] + // - [ b_UQ1 ] + // = [ result ][.... discarded ...] + let corr_uq1 = negate_u32( + (x_uq0_hw as u32) * (b_uq1_hw as u32) + (((x_uq0_hw as u32) * (blo)) >> hw) - 1, + ); // account for *possible* carry + let lo_corr = corr_uq1 & lo_mask; + let hi_corr = corr_uq1 >> hw; + // x_UQ0 * corr_UQ1 = (x_UQ0_hw * 2^HW) * (hi_corr * 2^HW + lo_corr) - corr_UQ1 + let mut x_uq0: <F as Float>::Int = ((((x_uq0_hw as u32) * hi_corr) << 1) + .wrapping_add(((x_uq0_hw as u32) * lo_corr) >> (hw - 1)) + .wrapping_sub(2)) + .cast(); // 1 to account for the highest bit of corr_UQ1 can be 1 + // 1 to account for possible carry + // Just like the case of half-width iterations but with possibility + // of overflowing by one extra Ulp of x_UQ0. + x_uq0 -= one; + // ... and then traditional fixup by 2 should work + + // On error estimation: + // abs(E_{N-1}) <= (u_{N-1} + 2 /* due to conversion e_n -> E_n */) * 2^-HW + // + (2^-HW + 2^-W)) + // abs(E_{N-1}) <= (u_{N-1} + 3.01) * 2^-HW + + // Then like for the half-width iterations: + // With 0 <= eps1, eps2 < 2^-W + // E_N = 4 * E_{N-1} * eps1 - (E_{N-1}^2 * b + 4 * eps2) + 4 * eps1 / b + // abs(E_N) <= 2^-W * [ 4 * abs(E_{N-1}) + max(2 * abs(E_{N-1})^2 * 2^W + 4, 8)) ] + // abs(E_N) <= 2^-W * [ 4 * (u_{N-1} + 3.01) * 2^-HW + max(4 + 2 * (u_{N-1} + 3.01)^2, 8) ] + x_uq0 } else { - quotient >>= 1; - (a_significand << significand_bits).wrapping_sub(quotient.wrapping_mul(b_significand)) + // C is (3/4 + 1/sqrt(2)) - 1 truncated to 32 fractional bits as UQ0.n + let c: <F as Float>::Int = (0x7504F333 << (F::BITS - 32)).cast(); + let x_uq0: <F as Float>::Int = c.wrapping_sub(b_uq1); + // E_0 <= 3/4 - 1/sqrt(2) + 2 * 2^-32 + x_uq0 + }; + + let mut x_uq0 = if USE_NATIVE_FULL_ITERATIONS { + for _ in 0..NUMBER_OF_FULL_ITERATIONS { + let corr_uq1: u32 = 0.wrapping_sub( + ((CastInto::<u32>::cast(x_uq0) as u64) * (CastInto::<u32>::cast(b_uq1) as u64)) + >> F::BITS, + ) as u32; + x_uq0 = ((((CastInto::<u32>::cast(x_uq0) as u64) * (corr_uq1 as u64)) >> (F::BITS - 1)) + as u32) + .cast(); + } + x_uq0 + } else { + // not using native full iterations + x_uq0 }; - let written_exponent = quotient_exponent.wrapping_add(exponent_bias as i32); + // Finally, account for possible overflow, as explained above. + x_uq0 = x_uq0.wrapping_sub(2.cast()); + + // u_n for different precisions (with N-1 half-width iterations): + // W0 is the precision of C + // u_0 = (3/4 - 1/sqrt(2) + 2^-W0) * 2^HW + + // Estimated with bc: + // define half1(un) { return 2.0 * (un + un^2) / 2.0^hw + 1.0; } + // define half2(un) { return 2.0 * un / 2.0^hw + 2.0; } + // define full1(un) { return 4.0 * (un + 3.01) / 2.0^hw + 2.0 * (un + 3.01)^2 + 4.0; } + // define full2(un) { return 4.0 * (un + 3.01) / 2.0^hw + 8.0; } + + // | f32 (0 + 3) | f32 (2 + 1) | f64 (3 + 1) | f128 (4 + 1) + // u_0 | < 184224974 | < 2812.1 | < 184224974 | < 791240234244348797 + // u_1 | < 15804007 | < 242.7 | < 15804007 | < 67877681371350440 + // u_2 | < 116308 | < 2.81 | < 116308 | < 499533100252317 + // u_3 | < 7.31 | | < 7.31 | < 27054456580 + // u_4 | | | | < 80.4 + // Final (U_N) | same as u_3 | < 72 | < 218 | < 13920 + + // Add 2 to U_N due to final decrement. + + let reciprocal_precision: <F as Float>::Int = 10.cast(); + + // Suppose 1/b - P * 2^-W < x < 1/b + P * 2^-W + let x_uq0 = x_uq0 - reciprocal_precision; + // Now 1/b - (2*P) * 2^-W < x < 1/b + // FIXME Is x_UQ0 still >= 0.5? + + let mut quotient: <F as Float>::Int = x_uq0.widen_mul(a_significand << 1).hi(); + // Now, a/b - 4*P * 2^-W < q < a/b for q=<quotient_UQ1:dummy> in UQ1.(SB+1+W). + + // quotient_UQ1 is in [0.5, 2.0) as UQ1.(SB+1), + // adjust it to be in [1.0, 2.0) as UQ1.SB. + let (mut residual, written_exponent) = if quotient < (implicit_bit << 1) { + // Highest bit is 0, so just reinterpret quotient_UQ1 as UQ1.SB, + // effectively doubling its value as well as its error estimation. + let residual_lo = (a_significand << (significand_bits + 1)).wrapping_sub( + (CastInto::<u32>::cast(quotient).wrapping_mul(CastInto::<u32>::cast(b_significand))) + .cast(), + ); + a_significand <<= 1; + (residual_lo, written_exponent.wrapping_sub(1)) + } else { + // Highest bit is 1 (the UQ1.(SB+1) value is in [1, 2)), convert it + // to UQ1.SB by right shifting by 1. Least significant bit is omitted. + quotient >>= 1; + let residual_lo = (a_significand << significand_bits).wrapping_sub( + (CastInto::<u32>::cast(quotient).wrapping_mul(CastInto::<u32>::cast(b_significand))) + .cast(), + ); + (residual_lo, written_exponent) + }; + //drop mutability + let quotient = quotient; + + // NB: residualLo is calculated above for the normal result case. + // It is re-computed on denormal path that is expected to be not so + // performance-sensitive. + + // Now, q cannot be greater than a/b and can differ by at most 8*P * 2^-W + 2^-SB + // Each NextAfter() increments the floating point value by at least 2^-SB + // (more, if exponent was incremented). + // Different cases (<---> is of 2^-SB length, * = a/b that is shown as a midpoint): + // q + // | | * | | | | | + // <---> 2^t + // | | | | | * | | + // q + // To require at most one NextAfter(), an error should be less than 1.5 * 2^-SB. + // (8*P) * 2^-W + 2^-SB < 1.5 * 2^-SB + // (8*P) * 2^-W < 0.5 * 2^-SB + // P < 2^(W-4-SB) + // Generally, for at most R NextAfter() to be enough, + // P < (2*R - 1) * 2^(W-4-SB) + // For f32 (0+3): 10 < 32 (OK) + // For f32 (2+1): 32 < 74 < 32 * 3, so two NextAfter() are required + // For f64: 220 < 256 (OK) + // For f128: 4096 * 3 < 13922 < 4096 * 5 (three NextAfter() are required) + + // If we have overflowed the exponent, return infinity if written_exponent >= max_exponent as i32 { - // If we have overflowed the exponent, return infinity. return F::from_repr(inf_rep | quotient_sign); - } else if written_exponent < 1 { - // Flush denormals to zero. In the future, it would be nice to add - // code to round them correctly. - return F::from_repr(quotient_sign); - } else { - let round = ((residual << 1) > b_significand) as u32; - // Clear the implicit bits - let mut abs_result = quotient & significand_mask; - // Insert the exponent - abs_result |= written_exponent.cast() << significand_bits; - // Round - abs_result = abs_result.wrapping_add(round.cast()); - // Insert the sign and return - return F::from_repr(abs_result | quotient_sign); } + + // Now, quotient <= the correctly-rounded result + // and may need taking NextAfter() up to 3 times (see error estimates above) + // r = a - b * q + let abs_result = if written_exponent > 0 { + let mut ret = quotient & significand_mask; + ret |= ((written_exponent as u32) << significand_bits).cast(); + residual <<= 1; + ret + } else { + if (significand_bits as i32 + written_exponent) < 0 { + return F::from_repr(quotient_sign); + } + let ret = quotient.wrapping_shr(negate_u32(CastInto::<u32>::cast(written_exponent)) + 1); + residual = (CastInto::<u32>::cast( + a_significand.wrapping_shl( + significand_bits.wrapping_add(CastInto::<u32>::cast(written_exponent)), + ), + ) + .wrapping_sub( + (CastInto::<u32>::cast(ret).wrapping_mul(CastInto::<u32>::cast(b_significand))) << 1, + )) + .cast(); + ret + }; + // Round + let abs_result = { + residual += abs_result & one; // tie to even + // The above line conditionally turns the below LT comparison into LTE + + if residual > b_significand { + abs_result + one + } else { + abs_result + } + }; + F::from_repr(abs_result | quotient_sign) } fn div64<F: Float>(a: F, b: F) -> F @@ -218,10 +464,15 @@ where F::Int: CastInto<i64>, F::Int: HInt, { + const NUMBER_OF_HALF_ITERATIONS: usize = 3; + const NUMBER_OF_FULL_ITERATIONS: usize = 1; + const USE_NATIVE_FULL_ITERATIONS: bool = false; + let one = F::Int::ONE; let zero = F::Int::ZERO; + let hw = F::BITS / 2; + let lo_mask = u64::MAX >> hw; - // let bits = F::BITS; let significand_bits = F::SIGNIFICAND_BITS; let max_exponent = F::EXPONENT_MAX; @@ -235,12 +486,6 @@ where let inf_rep = exponent_mask; let quiet_bit = implicit_bit >> 1; let qnan_rep = exponent_mask | quiet_bit; - // let exponent_bits = F::EXPONENT_BITS; - - #[inline(always)] - fn negate_u32(a: u32) -> u32 { - (<i32>::wrapping_neg(a as i32)) as u32 - } #[inline(always)] fn negate_u64(a: u64) -> u64 { @@ -320,128 +565,340 @@ where } } - // Or in the implicit significand bit. (If we fell through from the + // Set the implicit significand bit. If we fell through from the // denormal path it was already set by normalize( ), but setting it twice - // won't hurt anything.) + // won't hurt anything. a_significand |= implicit_bit; b_significand |= implicit_bit; - let mut quotient_exponent: i32 = CastInto::<i32>::cast(a_exponent) - .wrapping_sub(CastInto::<i32>::cast(b_exponent)) - .wrapping_add(scale); - - // Align the significand of b as a Q31 fixed-point number in the range - // [1, 2.0) and get a Q32 approximate reciprocal using a small minimax - // polynomial approximation: reciprocal = 3/4 + 1/sqrt(2) - b/2. This - // is accurate to about 3.5 binary digits. - let q31b = CastInto::<u32>::cast(b_significand >> 21.cast()); - let mut recip32 = (0x7504f333u32).wrapping_sub(q31b); - - // Now refine the reciprocal estimate using a Newton-Raphson iteration: - // - // x1 = x0 * (2 - x0 * b) - // - // This doubles the number of correct binary digits in the approximation - // with each iteration, so after three iterations, we have about 28 binary - // digits of accuracy. - - let mut correction32: u32 = - negate_u32(((recip32 as u64).wrapping_mul(q31b as u64) >> 32) as u32); - recip32 = ((recip32 as u64).wrapping_mul(correction32 as u64) >> 31) as u32; - correction32 = negate_u32(((recip32 as u64).wrapping_mul(q31b as u64) >> 32) as u32); - recip32 = ((recip32 as u64).wrapping_mul(correction32 as u64) >> 31) as u32; - correction32 = negate_u32(((recip32 as u64).wrapping_mul(q31b as u64) >> 32) as u32); - recip32 = ((recip32 as u64).wrapping_mul(correction32 as u64) >> 31) as u32; - - // recip32 might have overflowed to exactly zero in the preceeding - // computation if the high word of b is exactly 1.0. This would sabotage - // the full-width final stage of the computation that follows, so we adjust - // recip32 downward by one bit. - recip32 = recip32.wrapping_sub(1); - - // We need to perform one more iteration to get us to 56 binary digits; - // The last iteration needs to happen with extra precision. - let q63blo = CastInto::<u32>::cast(b_significand << 11.cast()); - - let correction: u64 = negate_u64( - (recip32 as u64) - .wrapping_mul(q31b as u64) - .wrapping_add((recip32 as u64).wrapping_mul(q63blo as u64) >> 32), - ); - let c_hi = (correction >> 32) as u32; - let c_lo = correction as u32; - let mut reciprocal: u64 = (recip32 as u64) - .wrapping_mul(c_hi as u64) - .wrapping_add((recip32 as u64).wrapping_mul(c_lo as u64) >> 32); - - // We already adjusted the 32-bit estimate, now we need to adjust the final - // 64-bit reciprocal estimate downward to ensure that it is strictly smaller - // than the infinitely precise exact reciprocal. Because the computation - // of the Newton-Raphson step is truncating at every step, this adjustment - // is small; most of the work is already done. - reciprocal = reciprocal.wrapping_sub(2); - - // The numerical reciprocal is accurate to within 2^-56, lies in the - // interval [0.5, 1.0), and is strictly smaller than the true reciprocal - // of b. Multiplying a by this reciprocal thus gives a numerical q = a/b - // in Q53 with the following properties: - // - // 1. q < a/b - // 2. q is in the interval [0.5, 2.0) - // 3. the error in q is bounded away from 2^-53 (actually, we have a - // couple of bits to spare, but this is all we need). - - // We need a 64 x 64 multiply high to compute q, which isn't a basic - // operation in C, so we need to be a little bit fussy. - // let mut quotient: F::Int = ((((reciprocal as u64) - // .wrapping_mul(CastInto::<u32>::cast(a_significand << 1) as u64)) - // >> 32) as u32) - // .cast(); - - // We need a 64 x 64 multiply high to compute q, which isn't a basic - // operation in C, so we need to be a little bit fussy. - let mut quotient = (a_significand << 2).widen_mul(reciprocal.cast()).hi(); - - // Two cases: quotient is in [0.5, 1.0) or quotient is in [1.0, 2.0). - // In either case, we are going to compute a residual of the form - // - // r = a - q*b + + let written_exponent: i64 = CastInto::<u64>::cast( + a_exponent + .wrapping_sub(b_exponent) + .wrapping_add(scale.cast()), + ) + .wrapping_add(exponent_bias as u64) as i64; + let b_uq1 = b_significand << (F::BITS - significand_bits - 1); + + // Align the significand of b as a UQ1.(n-1) fixed-point number in the range + // [1.0, 2.0) and get a UQ0.n approximate reciprocal using a small minimax + // polynomial approximation: x0 = 3/4 + 1/sqrt(2) - b/2. + // The max error for this approximation is achieved at endpoints, so + // abs(x0(b) - 1/b) <= abs(x0(1) - 1/1) = 3/4 - 1/sqrt(2) = 0.04289..., + // which is about 4.5 bits. + // The initial approximation is between x0(1.0) = 0.9571... and x0(2.0) = 0.4571... + + // Then, refine the reciprocal estimate using a quadratically converging + // Newton-Raphson iteration: + // x_{n+1} = x_n * (2 - x_n * b) // - // We know from the construction of q that r satisfies: + // Let b be the original divisor considered "in infinite precision" and + // obtained from IEEE754 representation of function argument (with the + // implicit bit set). Corresponds to rep_t-sized b_UQ1 represented in + // UQ1.(W-1). // - // 0 <= r < ulp(q)*b + // Let b_hw be an infinitely precise number obtained from the highest (HW-1) + // bits of divisor significand (with the implicit bit set). Corresponds to + // half_rep_t-sized b_UQ1_hw represented in UQ1.(HW-1) that is a **truncated** + // version of b_UQ1. // - // if r is greater than 1/2 ulp(q)*b, then q rounds up. Otherwise, we - // already have the correct result. The exact halfway case cannot occur. - // We also take this time to right shift quotient if it falls in the [1,2) - // range and adjust the exponent accordingly. - let residual = if quotient < (implicit_bit << 1) { - quotient_exponent = quotient_exponent.wrapping_sub(1); - (a_significand << (significand_bits + 1)).wrapping_sub(quotient.wrapping_mul(b_significand)) + // Let e_n := x_n - 1/b_hw + // E_n := x_n - 1/b + // abs(E_n) <= abs(e_n) + (1/b_hw - 1/b) + // = abs(e_n) + (b - b_hw) / (b*b_hw) + // <= abs(e_n) + 2 * 2^-HW + + // rep_t-sized iterations may be slower than the corresponding half-width + // variant depending on the handware and whether single/double/quad precision + // is selected. + // NB: Using half-width iterations increases computation errors due to + // rounding, so error estimations have to be computed taking the selected + // mode into account! + + let mut x_uq0 = if NUMBER_OF_HALF_ITERATIONS > 0 { + // Starting with (n-1) half-width iterations + let b_uq1_hw: u32 = + (CastInto::<u64>::cast(b_significand) >> (significand_bits + 1 - hw)) as u32; + + // C is (3/4 + 1/sqrt(2)) - 1 truncated to W0 fractional bits as UQ0.HW + // with W0 being either 16 or 32 and W0 <= HW. + // That is, C is the aforementioned 3/4 + 1/sqrt(2) constant (from which + // b/2 is subtracted to obtain x0) wrapped to [0, 1) range. + + // HW is at least 32. Shifting into the highest bits if needed. + let c_hw = (0x7504F333_u64 as u32).wrapping_shl(hw.wrapping_sub(32)); + + // b >= 1, thus an upper bound for 3/4 + 1/sqrt(2) - b/2 is about 0.9572, + // so x0 fits to UQ0.HW without wrapping. + let x_uq0_hw: u32 = { + let mut x_uq0_hw: u32 = c_hw.wrapping_sub(b_uq1_hw /* exact b_hw/2 as UQ0.HW */); + // dbg!(x_uq0_hw); + // An e_0 error is comprised of errors due to + // * x0 being an inherently imprecise first approximation of 1/b_hw + // * C_hw being some (irrational) number **truncated** to W0 bits + // Please note that e_0 is calculated against the infinitely precise + // reciprocal of b_hw (that is, **truncated** version of b). + // + // e_0 <= 3/4 - 1/sqrt(2) + 2^-W0 + + // By construction, 1 <= b < 2 + // f(x) = x * (2 - b*x) = 2*x - b*x^2 + // f'(x) = 2 * (1 - b*x) + // + // On the [0, 1] interval, f(0) = 0, + // then it increses until f(1/b) = 1 / b, maximum on (0, 1), + // then it decreses to f(1) = 2 - b + // + // Let g(x) = x - f(x) = b*x^2 - x. + // On (0, 1/b), g(x) < 0 <=> f(x) > x + // On (1/b, 1], g(x) > 0 <=> f(x) < x + // + // For half-width iterations, b_hw is used instead of b. + for _ in 0..NUMBER_OF_HALF_ITERATIONS { + // corr_UQ1_hw can be **larger** than 2 - b_hw*x by at most 1*Ulp + // of corr_UQ1_hw. + // "0.0 - (...)" is equivalent to "2.0 - (...)" in UQ1.(HW-1). + // On the other hand, corr_UQ1_hw should not overflow from 2.0 to 0.0 provided + // no overflow occurred earlier: ((rep_t)x_UQ0_hw * b_UQ1_hw >> HW) is + // expected to be strictly positive because b_UQ1_hw has its highest bit set + // and x_UQ0_hw should be rather large (it converges to 1/2 < 1/b_hw <= 1). + let corr_uq1_hw: u32 = + 0.wrapping_sub(((x_uq0_hw as u64).wrapping_mul(b_uq1_hw as u64)) >> hw) as u32; + // dbg!(corr_uq1_hw); + + // Now, we should multiply UQ0.HW and UQ1.(HW-1) numbers, naturally + // obtaining an UQ1.(HW-1) number and proving its highest bit could be + // considered to be 0 to be able to represent it in UQ0.HW. + // From the above analysis of f(x), if corr_UQ1_hw would be represented + // without any intermediate loss of precision (that is, in twice_rep_t) + // x_UQ0_hw could be at most [1.]000... if b_hw is exactly 1.0 and strictly + // less otherwise. On the other hand, to obtain [1.]000..., one have to pass + // 1/b_hw == 1.0 to f(x), so this cannot occur at all without overflow (due + // to 1.0 being not representable as UQ0.HW). + // The fact corr_UQ1_hw was virtually round up (due to result of + // multiplication being **first** truncated, then negated - to improve + // error estimations) can increase x_UQ0_hw by up to 2*Ulp of x_UQ0_hw. + x_uq0_hw = ((x_uq0_hw as u64).wrapping_mul(corr_uq1_hw as u64) >> (hw - 1)) as u32; + // dbg!(x_uq0_hw); + // Now, either no overflow occurred or x_UQ0_hw is 0 or 1 in its half_rep_t + // representation. In the latter case, x_UQ0_hw will be either 0 or 1 after + // any number of iterations, so just subtract 2 from the reciprocal + // approximation after last iteration. + + // In infinite precision, with 0 <= eps1, eps2 <= U = 2^-HW: + // corr_UQ1_hw = 2 - (1/b_hw + e_n) * b_hw + 2*eps1 + // = 1 - e_n * b_hw + 2*eps1 + // x_UQ0_hw = (1/b_hw + e_n) * (1 - e_n*b_hw + 2*eps1) - eps2 + // = 1/b_hw - e_n + 2*eps1/b_hw + e_n - e_n^2*b_hw + 2*e_n*eps1 - eps2 + // = 1/b_hw + 2*eps1/b_hw - e_n^2*b_hw + 2*e_n*eps1 - eps2 + // e_{n+1} = -e_n^2*b_hw + 2*eps1/b_hw + 2*e_n*eps1 - eps2 + // = 2*e_n*eps1 - (e_n^2*b_hw + eps2) + 2*eps1/b_hw + // \------ >0 -------/ \-- >0 ---/ + // abs(e_{n+1}) <= 2*abs(e_n)*U + max(2*e_n^2 + U, 2 * U) + } + // For initial half-width iterations, U = 2^-HW + // Let abs(e_n) <= u_n * U, + // then abs(e_{n+1}) <= 2 * u_n * U^2 + max(2 * u_n^2 * U^2 + U, 2 * U) + // u_{n+1} <= 2 * u_n * U + max(2 * u_n^2 * U + 1, 2) + + // Account for possible overflow (see above). For an overflow to occur for the + // first time, for "ideal" corr_UQ1_hw (that is, without intermediate + // truncation), the result of x_UQ0_hw * corr_UQ1_hw should be either maximum + // value representable in UQ0.HW or less by 1. This means that 1/b_hw have to + // be not below that value (see g(x) above), so it is safe to decrement just + // once after the final iteration. On the other hand, an effective value of + // divisor changes after this point (from b_hw to b), so adjust here. + x_uq0_hw.wrapping_sub(1_u32) + }; + + // Error estimations for full-precision iterations are calculated just + // as above, but with U := 2^-W and taking extra decrementing into account. + // We need at least one such iteration. + + // Simulating operations on a twice_rep_t to perform a single final full-width + // iteration. Using ad-hoc multiplication implementations to take advantage + // of particular structure of operands. + let blo: u64 = (CastInto::<u64>::cast(b_uq1)) & lo_mask; + // x_UQ0 = x_UQ0_hw * 2^HW - 1 + // x_UQ0 * b_UQ1 = (x_UQ0_hw * 2^HW) * (b_UQ1_hw * 2^HW + blo) - b_UQ1 + // + // <--- higher half ---><--- lower half ---> + // [x_UQ0_hw * b_UQ1_hw] + // + [ x_UQ0_hw * blo ] + // - [ b_UQ1 ] + // = [ result ][.... discarded ...] + let corr_uq1 = negate_u64( + (x_uq0_hw as u64) * (b_uq1_hw as u64) + (((x_uq0_hw as u64) * (blo)) >> hw) - 1, + ); // account for *possible* carry + let lo_corr = corr_uq1 & lo_mask; + let hi_corr = corr_uq1 >> hw; + // x_UQ0 * corr_UQ1 = (x_UQ0_hw * 2^HW) * (hi_corr * 2^HW + lo_corr) - corr_UQ1 + let mut x_uq0: <F as Float>::Int = ((((x_uq0_hw as u64) * hi_corr) << 1) + .wrapping_add(((x_uq0_hw as u64) * lo_corr) >> (hw - 1)) + .wrapping_sub(2)) + .cast(); // 1 to account for the highest bit of corr_UQ1 can be 1 + // 1 to account for possible carry + // Just like the case of half-width iterations but with possibility + // of overflowing by one extra Ulp of x_UQ0. + x_uq0 -= one; + // ... and then traditional fixup by 2 should work + + // On error estimation: + // abs(E_{N-1}) <= (u_{N-1} + 2 /* due to conversion e_n -> E_n */) * 2^-HW + // + (2^-HW + 2^-W)) + // abs(E_{N-1}) <= (u_{N-1} + 3.01) * 2^-HW + + // Then like for the half-width iterations: + // With 0 <= eps1, eps2 < 2^-W + // E_N = 4 * E_{N-1} * eps1 - (E_{N-1}^2 * b + 4 * eps2) + 4 * eps1 / b + // abs(E_N) <= 2^-W * [ 4 * abs(E_{N-1}) + max(2 * abs(E_{N-1})^2 * 2^W + 4, 8)) ] + // abs(E_N) <= 2^-W * [ 4 * (u_{N-1} + 3.01) * 2^-HW + max(4 + 2 * (u_{N-1} + 3.01)^2, 8) ] + x_uq0 } else { - quotient >>= 1; - (a_significand << significand_bits).wrapping_sub(quotient.wrapping_mul(b_significand)) + // C is (3/4 + 1/sqrt(2)) - 1 truncated to 64 fractional bits as UQ0.n + let c: <F as Float>::Int = (0x7504F333 << (F::BITS - 32)).cast(); + let x_uq0: <F as Float>::Int = c.wrapping_sub(b_uq1); + // E_0 <= 3/4 - 1/sqrt(2) + 2 * 2^-64 + x_uq0 + }; + + let mut x_uq0 = if USE_NATIVE_FULL_ITERATIONS { + for _ in 0..NUMBER_OF_FULL_ITERATIONS { + let corr_uq1: u64 = 0.wrapping_sub( + (CastInto::<u64>::cast(x_uq0) * (CastInto::<u64>::cast(b_uq1))) >> F::BITS, + ); + x_uq0 = ((((CastInto::<u64>::cast(x_uq0) as u128) * (corr_uq1 as u128)) + >> (F::BITS - 1)) as u64) + .cast(); + } + x_uq0 + } else { + // not using native full iterations + x_uq0 }; - let written_exponent = quotient_exponent.wrapping_add(exponent_bias as i32); + // Finally, account for possible overflow, as explained above. + x_uq0 = x_uq0.wrapping_sub(2.cast()); + + // u_n for different precisions (with N-1 half-width iterations): + // W0 is the precision of C + // u_0 = (3/4 - 1/sqrt(2) + 2^-W0) * 2^HW + + // Estimated with bc: + // define half1(un) { return 2.0 * (un + un^2) / 2.0^hw + 1.0; } + // define half2(un) { return 2.0 * un / 2.0^hw + 2.0; } + // define full1(un) { return 4.0 * (un + 3.01) / 2.0^hw + 2.0 * (un + 3.01)^2 + 4.0; } + // define full2(un) { return 4.0 * (un + 3.01) / 2.0^hw + 8.0; } + + // | f32 (0 + 3) | f32 (2 + 1) | f64 (3 + 1) | f128 (4 + 1) + // u_0 | < 184224974 | < 2812.1 | < 184224974 | < 791240234244348797 + // u_1 | < 15804007 | < 242.7 | < 15804007 | < 67877681371350440 + // u_2 | < 116308 | < 2.81 | < 116308 | < 499533100252317 + // u_3 | < 7.31 | | < 7.31 | < 27054456580 + // u_4 | | | | < 80.4 + // Final (U_N) | same as u_3 | < 72 | < 218 | < 13920 + + // Add 2 to U_N due to final decrement. + + let reciprocal_precision: <F as Float>::Int = 220.cast(); + + // Suppose 1/b - P * 2^-W < x < 1/b + P * 2^-W + let x_uq0 = x_uq0 - reciprocal_precision; + // Now 1/b - (2*P) * 2^-W < x < 1/b + // FIXME Is x_UQ0 still >= 0.5? + + let mut quotient: <F as Float>::Int = x_uq0.widen_mul(a_significand << 1).hi(); + // Now, a/b - 4*P * 2^-W < q < a/b for q=<quotient_UQ1:dummy> in UQ1.(SB+1+W). + + // quotient_UQ1 is in [0.5, 2.0) as UQ1.(SB+1), + // adjust it to be in [1.0, 2.0) as UQ1.SB. + let (mut residual, written_exponent) = if quotient < (implicit_bit << 1) { + // Highest bit is 0, so just reinterpret quotient_UQ1 as UQ1.SB, + // effectively doubling its value as well as its error estimation. + let residual_lo = (a_significand << (significand_bits + 1)).wrapping_sub( + (CastInto::<u64>::cast(quotient).wrapping_mul(CastInto::<u64>::cast(b_significand))) + .cast(), + ); + a_significand <<= 1; + (residual_lo, written_exponent.wrapping_sub(1)) + } else { + // Highest bit is 1 (the UQ1.(SB+1) value is in [1, 2)), convert it + // to UQ1.SB by right shifting by 1. Least significant bit is omitted. + quotient >>= 1; + let residual_lo = (a_significand << significand_bits).wrapping_sub( + (CastInto::<u64>::cast(quotient).wrapping_mul(CastInto::<u64>::cast(b_significand))) + .cast(), + ); + (residual_lo, written_exponent) + }; - if written_exponent >= max_exponent as i32 { - // If we have overflowed the exponent, return infinity. + //drop mutability + let quotient = quotient; + + // NB: residualLo is calculated above for the normal result case. + // It is re-computed on denormal path that is expected to be not so + // performance-sensitive. + + // Now, q cannot be greater than a/b and can differ by at most 8*P * 2^-W + 2^-SB + // Each NextAfter() increments the floating point value by at least 2^-SB + // (more, if exponent was incremented). + // Different cases (<---> is of 2^-SB length, * = a/b that is shown as a midpoint): + // q + // | | * | | | | | + // <---> 2^t + // | | | | | * | | + // q + // To require at most one NextAfter(), an error should be less than 1.5 * 2^-SB. + // (8*P) * 2^-W + 2^-SB < 1.5 * 2^-SB + // (8*P) * 2^-W < 0.5 * 2^-SB + // P < 2^(W-4-SB) + // Generally, for at most R NextAfter() to be enough, + // P < (2*R - 1) * 2^(W-4-SB) + // For f32 (0+3): 10 < 32 (OK) + // For f32 (2+1): 32 < 74 < 32 * 3, so two NextAfter() are required + // For f64: 220 < 256 (OK) + // For f128: 4096 * 3 < 13922 < 4096 * 5 (three NextAfter() are required) + + // If we have overflowed the exponent, return infinity + if written_exponent >= max_exponent as i64 { return F::from_repr(inf_rep | quotient_sign); - } else if written_exponent < 1 { - // Flush denormals to zero. In the future, it would be nice to add - // code to round them correctly. - return F::from_repr(quotient_sign); - } else { - let round = ((residual << 1) > b_significand) as u32; - // Clear the implicit bits - let mut abs_result = quotient & significand_mask; - // Insert the exponent - abs_result |= written_exponent.cast() << significand_bits; - // Round - abs_result = abs_result.wrapping_add(round.cast()); - // Insert the sign and return - return F::from_repr(abs_result | quotient_sign); } + + // Now, quotient <= the correctly-rounded result + // and may need taking NextAfter() up to 3 times (see error estimates above) + // r = a - b * q + let abs_result = if written_exponent > 0 { + let mut ret = quotient & significand_mask; + ret |= ((written_exponent as u64) << significand_bits).cast(); + residual <<= 1; + ret + } else { + if (significand_bits as i64 + written_exponent) < 0 { + return F::from_repr(quotient_sign); + } + let ret = + quotient.wrapping_shr((negate_u64(CastInto::<u64>::cast(written_exponent)) + 1) as u32); + residual = (CastInto::<u64>::cast( + a_significand.wrapping_shl( + significand_bits.wrapping_add(CastInto::<u32>::cast(written_exponent)), + ), + ) + .wrapping_sub( + (CastInto::<u64>::cast(ret).wrapping_mul(CastInto::<u64>::cast(b_significand))) << 1, + )) + .cast(); + ret + }; + // Round + let abs_result = { + residual += abs_result & one; // tie to even + // conditionally turns the below LT comparison into LTE + if residual > b_significand { + abs_result + one + } else { + abs_result + } + }; + F::from_repr(abs_result | quotient_sign) } intrinsics! { diff --git a/vendor/compiler_builtins/src/lib.rs b/vendor/compiler_builtins/src/lib.rs index 71f249c8e..a6b61bdf5 100644 --- a/vendor/compiler_builtins/src/lib.rs +++ b/vendor/compiler_builtins/src/lib.rs @@ -48,6 +48,7 @@ pub mod int; all(target_arch = "x86_64", target_os = "uefi"), all(target_arch = "arm", target_os = "none"), all(target_arch = "xtensa", target_os = "none"), + all(target_arch = "mips", target_os = "none"), target_os = "xous", all(target_vendor = "fortanix", target_env = "sgx") ))] @@ -57,6 +58,9 @@ pub mod mem; #[cfg(target_arch = "arm")] pub mod arm; +#[cfg(all(target_arch = "aarch64", target_os = "linux", not(feature = "no-asm"),))] +pub mod aarch64_linux; + #[cfg(all( kernel_user_helpers, any(target_os = "linux", target_os = "android"), diff --git a/vendor/compiler_builtins/src/macros.rs b/vendor/compiler_builtins/src/macros.rs index 59f25317e..b11114f12 100644 --- a/vendor/compiler_builtins/src/macros.rs +++ b/vendor/compiler_builtins/src/macros.rs @@ -33,7 +33,7 @@ macro_rules! public_test_dep { /// /// This macro is structured to be invoked with a bunch of functions that looks /// like: -/// +/// ```ignore /// intrinsics! { /// pub extern "C" fn foo(a: i32) -> u32 { /// // ... @@ -44,6 +44,7 @@ macro_rules! public_test_dep { /// // ... /// } /// } +/// ``` /// /// Each function is defined in a manner that looks like a normal Rust function. /// The macro then accepts a few nonstandard attributes that can decorate @@ -203,7 +204,7 @@ macro_rules! intrinsics { ( #[maybe_use_optimized_c_shim] $(#[$($attr:tt)*])* - pub extern $abi:tt fn $name:ident( $($argname:ident: $ty:ty),* ) $(-> $ret:ty)? { + pub $(unsafe $(@ $empty:tt)? )? extern $abi:tt fn $name:ident( $($argname:ident: $ty:ty),* ) $(-> $ret:ty)? { $($body:tt)* } @@ -211,7 +212,7 @@ macro_rules! intrinsics { ) => ( #[cfg($name = "optimized-c")] #[cfg_attr(feature = "weak-intrinsics", linkage = "weak")] - pub extern $abi fn $name( $($argname: $ty),* ) $(-> $ret)? { + pub $(unsafe $($empty)? )? extern $abi fn $name( $($argname: $ty),* ) $(-> $ret)? { extern $abi { fn $name($($argname: $ty),*) $(-> $ret)?; } @@ -223,7 +224,7 @@ macro_rules! intrinsics { #[cfg(not($name = "optimized-c"))] intrinsics! { $(#[$($attr)*])* - pub extern $abi fn $name( $($argname: $ty),* ) $(-> $ret)? { + pub $(unsafe $($empty)? )? extern $abi fn $name( $($argname: $ty),* ) $(-> $ret)? { $($body)* } } @@ -437,12 +438,11 @@ macro_rules! intrinsics { intrinsics!($($rest)*); ); - // For division and modulo, AVR uses a custom calling convention¹ that does - // not match our definitions here. Ideally we would just use hand-written - // naked functions, but that's quite a lot of code to port² - so for the - // time being we are just ignoring the problematic functions, letting - // avr-gcc (which is required to compile to AVR anyway) link them from - // libgcc. + // For some intrinsics, AVR uses a custom calling convention¹ that does not + // match our definitions here. Ideally we would just use hand-written naked + // functions, but that's quite a lot of code to port² - so for the time + // being we are just ignoring the problematic functions, letting avr-gcc + // (which is required to compile to AVR anyway) link them from libgcc. // // ¹ https://gcc.gnu.org/wiki/avr-gcc (see "Exceptions to the Calling // Convention") diff --git a/vendor/compiler_builtins/src/math.rs b/vendor/compiler_builtins/src/math.rs index 498e4d85f..b4e5fc113 100644 --- a/vendor/compiler_builtins/src/math.rs +++ b/vendor/compiler_builtins/src/math.rs @@ -136,11 +136,12 @@ no_mangle! { fn truncf(x: f32) -> f32; } -// only for the thumb*-none-eabi*, riscv32*-none-elf and x86_64-unknown-none targets that lack the floating point instruction set +// only for the thumb*-none-eabi*, riscv32*-none-elf, x86_64-unknown-none and mips*-unknown-none targets that lack the floating point instruction set #[cfg(any( all(target_arch = "arm", target_os = "none"), all(target_arch = "riscv32", not(target_feature = "f"), target_os = "none"), - all(target_arch = "x86_64", target_os = "none") + all(target_arch = "x86_64", target_os = "none"), + all(target_arch = "mips", target_os = "none"), ))] no_mangle! { fn fmin(x: f64, y: f64) -> f64; |