summaryrefslogtreecommitdiffstats
path: root/vendor/compiler_builtins/src/float
diff options
context:
space:
mode:
Diffstat (limited to 'vendor/compiler_builtins/src/float')
-rw-r--r--vendor/compiler_builtins/src/float/add.rs213
-rw-r--r--vendor/compiler_builtins/src/float/cmp.rs253
-rw-r--r--vendor/compiler_builtins/src/float/conv.rs351
-rw-r--r--vendor/compiler_builtins/src/float/div.rs467
-rw-r--r--vendor/compiler_builtins/src/float/extend.rs83
-rw-r--r--vendor/compiler_builtins/src/float/mod.rs175
-rw-r--r--vendor/compiler_builtins/src/float/mul.rs209
-rw-r--r--vendor/compiler_builtins/src/float/pow.rs36
-rw-r--r--vendor/compiler_builtins/src/float/sub.rs25
-rw-r--r--vendor/compiler_builtins/src/float/trunc.rs125
10 files changed, 1937 insertions, 0 deletions
diff --git a/vendor/compiler_builtins/src/float/add.rs b/vendor/compiler_builtins/src/float/add.rs
new file mode 100644
index 000000000..67f6c2c14
--- /dev/null
+++ b/vendor/compiler_builtins/src/float/add.rs
@@ -0,0 +1,213 @@
+use float::Float;
+use int::{CastInto, Int};
+
+/// Returns `a + b`
+fn add<F: Float>(a: F, b: F) -> F
+where
+ u32: CastInto<F::Int>,
+ F::Int: CastInto<u32>,
+ i32: CastInto<F::Int>,
+ F::Int: CastInto<i32>,
+{
+ let one = F::Int::ONE;
+ let zero = F::Int::ZERO;
+
+ let bits = F::BITS.cast();
+ let significand_bits = F::SIGNIFICAND_BITS;
+ let max_exponent = F::EXPONENT_MAX;
+
+ let implicit_bit = F::IMPLICIT_BIT;
+ let significand_mask = F::SIGNIFICAND_MASK;
+ let sign_bit = F::SIGN_MASK as F::Int;
+ let abs_mask = sign_bit - one;
+ let exponent_mask = F::EXPONENT_MASK;
+ let inf_rep = exponent_mask;
+ let quiet_bit = implicit_bit >> 1;
+ let qnan_rep = exponent_mask | quiet_bit;
+
+ let mut a_rep = a.repr();
+ let mut b_rep = b.repr();
+ let a_abs = a_rep & abs_mask;
+ let b_abs = b_rep & abs_mask;
+
+ // Detect if a or b is zero, infinity, or NaN.
+ if a_abs.wrapping_sub(one) >= inf_rep - one || b_abs.wrapping_sub(one) >= inf_rep - one {
+ // NaN + anything = qNaN
+ if a_abs > inf_rep {
+ return F::from_repr(a_abs | quiet_bit);
+ }
+ // anything + NaN = qNaN
+ if b_abs > inf_rep {
+ return F::from_repr(b_abs | quiet_bit);
+ }
+
+ if a_abs == inf_rep {
+ // +/-infinity + -/+infinity = qNaN
+ if (a.repr() ^ b.repr()) == sign_bit {
+ return F::from_repr(qnan_rep);
+ } else {
+ // +/-infinity + anything remaining = +/- infinity
+ return a;
+ }
+ }
+
+ // anything remaining + +/-infinity = +/-infinity
+ if b_abs == inf_rep {
+ return b;
+ }
+
+ // zero + anything = anything
+ if a_abs == Int::ZERO {
+ // but we need to get the sign right for zero + zero
+ if b_abs == Int::ZERO {
+ return F::from_repr(a.repr() & b.repr());
+ } else {
+ return b;
+ }
+ }
+
+ // anything + zero = anything
+ if b_abs == Int::ZERO {
+ return a;
+ }
+ }
+
+ // Swap a and b if necessary so that a has the larger absolute value.
+ if b_abs > a_abs {
+ // Don't use mem::swap because it may generate references to memcpy in unoptimized code.
+ let tmp = a_rep;
+ a_rep = b_rep;
+ b_rep = tmp;
+ }
+
+ // Extract the exponent and significand from the (possibly swapped) a and b.
+ let mut a_exponent: i32 = ((a_rep & exponent_mask) >> significand_bits).cast();
+ let mut b_exponent: i32 = ((b_rep & exponent_mask) >> significand_bits).cast();
+ let mut a_significand = a_rep & significand_mask;
+ let mut b_significand = b_rep & significand_mask;
+
+ // normalize any denormals, and adjust the exponent accordingly.
+ if a_exponent == 0 {
+ let (exponent, significand) = F::normalize(a_significand);
+ a_exponent = exponent;
+ a_significand = significand;
+ }
+ if b_exponent == 0 {
+ let (exponent, significand) = F::normalize(b_significand);
+ b_exponent = exponent;
+ b_significand = significand;
+ }
+
+ // The sign of the result is the sign of the larger operand, a. If they
+ // have opposite signs, we are performing a subtraction; otherwise addition.
+ let result_sign = a_rep & sign_bit;
+ let subtraction = ((a_rep ^ b_rep) & sign_bit) != zero;
+
+ // Shift the significands to give us round, guard and sticky, and or in the
+ // implicit significand bit. (If we fell through from the denormal path it
+ // was already set by normalize(), but setting it twice won't hurt
+ // anything.)
+ a_significand = (a_significand | implicit_bit) << 3;
+ b_significand = (b_significand | implicit_bit) << 3;
+
+ // Shift the significand of b by the difference in exponents, with a sticky
+ // bottom bit to get rounding correct.
+ let align = a_exponent.wrapping_sub(b_exponent).cast();
+ if align != Int::ZERO {
+ if align < bits {
+ let sticky =
+ F::Int::from_bool(b_significand << bits.wrapping_sub(align).cast() != Int::ZERO);
+ b_significand = (b_significand >> align.cast()) | sticky;
+ } else {
+ b_significand = one; // sticky; b is known to be non-zero.
+ }
+ }
+ if subtraction {
+ a_significand = a_significand.wrapping_sub(b_significand);
+ // If a == -b, return +zero.
+ if a_significand == Int::ZERO {
+ return F::from_repr(Int::ZERO);
+ }
+
+ // If partial cancellation occured, we need to left-shift the result
+ // and adjust the exponent:
+ if a_significand < implicit_bit << 3 {
+ let shift =
+ a_significand.leading_zeros() as i32 - (implicit_bit << 3).leading_zeros() as i32;
+ a_significand <<= shift;
+ a_exponent -= shift;
+ }
+ } else {
+ // addition
+ a_significand += b_significand;
+
+ // If the addition carried up, we need to right-shift the result and
+ // adjust the exponent:
+ if a_significand & implicit_bit << 4 != Int::ZERO {
+ let sticky = F::Int::from_bool(a_significand & one != Int::ZERO);
+ a_significand = a_significand >> 1 | sticky;
+ a_exponent += 1;
+ }
+ }
+
+ // If we have overflowed the type, return +/- infinity:
+ if a_exponent >= max_exponent as i32 {
+ return F::from_repr(inf_rep | result_sign);
+ }
+
+ if a_exponent <= 0 {
+ // Result is denormal before rounding; the exponent is zero and we
+ // need to shift the significand.
+ let shift = (1 - a_exponent).cast();
+ let sticky =
+ F::Int::from_bool((a_significand << bits.wrapping_sub(shift).cast()) != Int::ZERO);
+ a_significand = a_significand >> shift.cast() | sticky;
+ a_exponent = 0;
+ }
+
+ // Low three bits are round, guard, and sticky.
+ let a_significand_i32: i32 = a_significand.cast();
+ let round_guard_sticky: i32 = a_significand_i32 & 0x7;
+
+ // Shift the significand into place, and mask off the implicit bit.
+ let mut result = a_significand >> 3 & significand_mask;
+
+ // Insert the exponent and sign.
+ result |= a_exponent.cast() << significand_bits;
+ result |= result_sign;
+
+ // Final rounding. The result may overflow to infinity, but that is the
+ // correct result in that case.
+ if round_guard_sticky > 0x4 {
+ result += one;
+ }
+ if round_guard_sticky == 0x4 {
+ result += result & one;
+ }
+
+ F::from_repr(result)
+}
+
+intrinsics! {
+ #[aapcs_on_arm]
+ #[arm_aeabi_alias = __aeabi_fadd]
+ pub extern "C" fn __addsf3(a: f32, b: f32) -> f32 {
+ add(a, b)
+ }
+
+ #[aapcs_on_arm]
+ #[arm_aeabi_alias = __aeabi_dadd]
+ pub extern "C" fn __adddf3(a: f64, b: f64) -> f64 {
+ add(a, b)
+ }
+
+ #[cfg(target_arch = "arm")]
+ pub extern "C" fn __addsf3vfp(a: f32, b: f32) -> f32 {
+ a + b
+ }
+
+ #[cfg(target_arch = "arm")]
+ pub extern "C" fn __adddf3vfp(a: f64, b: f64) -> f64 {
+ a + b
+ }
+}
diff --git a/vendor/compiler_builtins/src/float/cmp.rs b/vendor/compiler_builtins/src/float/cmp.rs
new file mode 100644
index 000000000..1d4e38433
--- /dev/null
+++ b/vendor/compiler_builtins/src/float/cmp.rs
@@ -0,0 +1,253 @@
+#![allow(unreachable_code)]
+
+use float::Float;
+use int::Int;
+
+#[derive(Clone, Copy)]
+enum Result {
+ Less,
+ Equal,
+ Greater,
+ Unordered,
+}
+
+impl Result {
+ fn to_le_abi(self) -> i32 {
+ match self {
+ Result::Less => -1,
+ Result::Equal => 0,
+ Result::Greater => 1,
+ Result::Unordered => 1,
+ }
+ }
+
+ fn to_ge_abi(self) -> i32 {
+ match self {
+ Result::Less => -1,
+ Result::Equal => 0,
+ Result::Greater => 1,
+ Result::Unordered => -1,
+ }
+ }
+}
+
+fn cmp<F: Float>(a: F, b: F) -> Result {
+ let one = F::Int::ONE;
+ let zero = F::Int::ZERO;
+ let szero = F::SignedInt::ZERO;
+
+ let sign_bit = F::SIGN_MASK as F::Int;
+ let abs_mask = sign_bit - one;
+ let exponent_mask = F::EXPONENT_MASK;
+ let inf_rep = exponent_mask;
+
+ let a_rep = a.repr();
+ let b_rep = b.repr();
+ let a_abs = a_rep & abs_mask;
+ let b_abs = b_rep & abs_mask;
+
+ // If either a or b is NaN, they are unordered.
+ if a_abs > inf_rep || b_abs > inf_rep {
+ return Result::Unordered;
+ }
+
+ // If a and b are both zeros, they are equal.
+ if a_abs | b_abs == zero {
+ return Result::Equal;
+ }
+
+ let a_srep = a.signed_repr();
+ let b_srep = b.signed_repr();
+
+ // If at least one of a and b is positive, we get the same result comparing
+ // a and b as signed integers as we would with a fp_ting-point compare.
+ if a_srep & b_srep >= szero {
+ if a_srep < b_srep {
+ Result::Less
+ } else if a_srep == b_srep {
+ Result::Equal
+ } else {
+ Result::Greater
+ }
+ // Otherwise, both are negative, so we need to flip the sense of the
+ // comparison to get the correct result. (This assumes a twos- or ones-
+ // complement integer representation; if integers are represented in a
+ // sign-magnitude representation, then this flip is incorrect).
+ } else if a_srep > b_srep {
+ Result::Less
+ } else if a_srep == b_srep {
+ Result::Equal
+ } else {
+ Result::Greater
+ }
+}
+
+fn unord<F: Float>(a: F, b: F) -> bool {
+ let one = F::Int::ONE;
+
+ let sign_bit = F::SIGN_MASK as F::Int;
+ let abs_mask = sign_bit - one;
+ let exponent_mask = F::EXPONENT_MASK;
+ let inf_rep = exponent_mask;
+
+ let a_rep = a.repr();
+ let b_rep = b.repr();
+ let a_abs = a_rep & abs_mask;
+ let b_abs = b_rep & abs_mask;
+
+ a_abs > inf_rep || b_abs > inf_rep
+}
+
+intrinsics! {
+ pub extern "C" fn __lesf2(a: f32, b: f32) -> i32 {
+ cmp(a, b).to_le_abi()
+ }
+
+ pub extern "C" fn __gesf2(a: f32, b: f32) -> i32 {
+ cmp(a, b).to_ge_abi()
+ }
+
+ #[arm_aeabi_alias = __aeabi_fcmpun]
+ pub extern "C" fn __unordsf2(a: f32, b: f32) -> i32 {
+ unord(a, b) as i32
+ }
+
+ pub extern "C" fn __eqsf2(a: f32, b: f32) -> i32 {
+ cmp(a, b).to_le_abi()
+ }
+
+ pub extern "C" fn __ltsf2(a: f32, b: f32) -> i32 {
+ cmp(a, b).to_le_abi()
+ }
+
+ pub extern "C" fn __nesf2(a: f32, b: f32) -> i32 {
+ cmp(a, b).to_le_abi()
+ }
+
+ pub extern "C" fn __gtsf2(a: f32, b: f32) -> i32 {
+ cmp(a, b).to_ge_abi()
+ }
+
+ pub extern "C" fn __ledf2(a: f64, b: f64) -> i32 {
+ cmp(a, b).to_le_abi()
+ }
+
+ pub extern "C" fn __gedf2(a: f64, b: f64) -> i32 {
+ cmp(a, b).to_ge_abi()
+ }
+
+ #[arm_aeabi_alias = __aeabi_dcmpun]
+ pub extern "C" fn __unorddf2(a: f64, b: f64) -> i32 {
+ unord(a, b) as i32
+ }
+
+ pub extern "C" fn __eqdf2(a: f64, b: f64) -> i32 {
+ cmp(a, b).to_le_abi()
+ }
+
+ pub extern "C" fn __ltdf2(a: f64, b: f64) -> i32 {
+ cmp(a, b).to_le_abi()
+ }
+
+ pub extern "C" fn __nedf2(a: f64, b: f64) -> i32 {
+ cmp(a, b).to_le_abi()
+ }
+
+ pub extern "C" fn __gtdf2(a: f64, b: f64) -> i32 {
+ cmp(a, b).to_ge_abi()
+ }
+}
+
+#[cfg(target_arch = "arm")]
+intrinsics! {
+ pub extern "aapcs" fn __aeabi_fcmple(a: f32, b: f32) -> i32 {
+ (__lesf2(a, b) <= 0) as i32
+ }
+
+ pub extern "aapcs" fn __aeabi_fcmpge(a: f32, b: f32) -> i32 {
+ (__gesf2(a, b) >= 0) as i32
+ }
+
+ pub extern "aapcs" fn __aeabi_fcmpeq(a: f32, b: f32) -> i32 {
+ (__eqsf2(a, b) == 0) as i32
+ }
+
+ pub extern "aapcs" fn __aeabi_fcmplt(a: f32, b: f32) -> i32 {
+ (__ltsf2(a, b) < 0) as i32
+ }
+
+ pub extern "aapcs" fn __aeabi_fcmpgt(a: f32, b: f32) -> i32 {
+ (__gtsf2(a, b) > 0) as i32
+ }
+
+ pub extern "aapcs" fn __aeabi_dcmple(a: f64, b: f64) -> i32 {
+ (__ledf2(a, b) <= 0) as i32
+ }
+
+ pub extern "aapcs" fn __aeabi_dcmpge(a: f64, b: f64) -> i32 {
+ (__gedf2(a, b) >= 0) as i32
+ }
+
+ pub extern "aapcs" fn __aeabi_dcmpeq(a: f64, b: f64) -> i32 {
+ (__eqdf2(a, b) == 0) as i32
+ }
+
+ pub extern "aapcs" fn __aeabi_dcmplt(a: f64, b: f64) -> i32 {
+ (__ltdf2(a, b) < 0) as i32
+ }
+
+ pub extern "aapcs" fn __aeabi_dcmpgt(a: f64, b: f64) -> i32 {
+ (__gtdf2(a, b) > 0) as i32
+ }
+
+ // On hard-float targets LLVM will use native instructions
+ // for all VFP intrinsics below
+
+ pub extern "C" fn __gesf2vfp(a: f32, b: f32) -> i32 {
+ (a >= b) as i32
+ }
+
+ pub extern "C" fn __gedf2vfp(a: f64, b: f64) -> i32 {
+ (a >= b) as i32
+ }
+
+ pub extern "C" fn __gtsf2vfp(a: f32, b: f32) -> i32 {
+ (a > b) as i32
+ }
+
+ pub extern "C" fn __gtdf2vfp(a: f64, b: f64) -> i32 {
+ (a > b) as i32
+ }
+
+ pub extern "C" fn __ltsf2vfp(a: f32, b: f32) -> i32 {
+ (a < b) as i32
+ }
+
+ pub extern "C" fn __ltdf2vfp(a: f64, b: f64) -> i32 {
+ (a < b) as i32
+ }
+
+ pub extern "C" fn __lesf2vfp(a: f32, b: f32) -> i32 {
+ (a <= b) as i32
+ }
+
+ pub extern "C" fn __ledf2vfp(a: f64, b: f64) -> i32 {
+ (a <= b) as i32
+ }
+
+ pub extern "C" fn __nesf2vfp(a: f32, b: f32) -> i32 {
+ (a != b) as i32
+ }
+
+ pub extern "C" fn __nedf2vfp(a: f64, b: f64) -> i32 {
+ (a != b) as i32
+ }
+
+ pub extern "C" fn __eqsf2vfp(a: f32, b: f32) -> i32 {
+ (a == b) as i32
+ }
+
+ pub extern "C" fn __eqdf2vfp(a: f64, b: f64) -> i32 {
+ (a == b) as i32
+ }
+}
diff --git a/vendor/compiler_builtins/src/float/conv.rs b/vendor/compiler_builtins/src/float/conv.rs
new file mode 100644
index 000000000..07b58f3d2
--- /dev/null
+++ b/vendor/compiler_builtins/src/float/conv.rs
@@ -0,0 +1,351 @@
+/// Conversions from integers to floats.
+///
+/// These are hand-optimized bit twiddling code,
+/// which unfortunately isn't the easiest kind of code to read.
+///
+/// The algorithm is explained here: https://blog.m-ou.se/floats/
+mod int_to_float {
+ pub fn u32_to_f32_bits(i: u32) -> u32 {
+ if i == 0 {
+ return 0;
+ }
+ let n = i.leading_zeros();
+ let a = (i << n) >> 8; // Significant bits, with bit 24 still in tact.
+ let b = (i << n) << 24; // Insignificant bits, only relevant for rounding.
+ let m = a + ((b - (b >> 31 & !a)) >> 31); // Add one when we need to round up. Break ties to even.
+ let e = 157 - n as u32; // Exponent plus 127, minus one.
+ (e << 23) + m // + not |, so the mantissa can overflow into the exponent.
+ }
+
+ pub fn u32_to_f64_bits(i: u32) -> u64 {
+ if i == 0 {
+ return 0;
+ }
+ let n = i.leading_zeros();
+ let m = (i as u64) << (21 + n); // Significant bits, with bit 53 still in tact.
+ let e = 1053 - n as u64; // Exponent plus 1023, minus one.
+ (e << 52) + m // Bit 53 of m will overflow into e.
+ }
+
+ pub fn u64_to_f32_bits(i: u64) -> u32 {
+ let n = i.leading_zeros();
+ let y = i.wrapping_shl(n);
+ let a = (y >> 40) as u32; // Significant bits, with bit 24 still in tact.
+ let b = (y >> 8 | y & 0xFFFF) as u32; // Insignificant bits, only relevant for rounding.
+ let m = a + ((b - (b >> 31 & !a)) >> 31); // Add one when we need to round up. Break ties to even.
+ let e = if i == 0 { 0 } else { 189 - n }; // Exponent plus 127, minus one, except for zero.
+ (e << 23) + m // + not |, so the mantissa can overflow into the exponent.
+ }
+
+ pub fn u64_to_f64_bits(i: u64) -> u64 {
+ if i == 0 {
+ return 0;
+ }
+ let n = i.leading_zeros();
+ let a = ((i << n) >> 11) as u64; // Significant bits, with bit 53 still in tact.
+ let b = ((i << n) << 53) as u64; // Insignificant bits, only relevant for rounding.
+ let m = a + ((b - (b >> 63 & !a)) >> 63); // Add one when we need to round up. Break ties to even.
+ let e = 1085 - n as u64; // Exponent plus 1023, minus one.
+ (e << 52) + m // + not |, so the mantissa can overflow into the exponent.
+ }
+
+ pub fn u128_to_f32_bits(i: u128) -> u32 {
+ let n = i.leading_zeros();
+ let y = i.wrapping_shl(n);
+ let a = (y >> 104) as u32; // Significant bits, with bit 24 still in tact.
+ let b = (y >> 72) as u32 | ((y << 32) >> 32 != 0) as u32; // Insignificant bits, only relevant for rounding.
+ let m = a + ((b - (b >> 31 & !a)) >> 31); // Add one when we need to round up. Break ties to even.
+ let e = if i == 0 { 0 } else { 253 - n }; // Exponent plus 127, minus one, except for zero.
+ (e << 23) + m // + not |, so the mantissa can overflow into the exponent.
+ }
+
+ pub fn u128_to_f64_bits(i: u128) -> u64 {
+ let n = i.leading_zeros();
+ let y = i.wrapping_shl(n);
+ let a = (y >> 75) as u64; // Significant bits, with bit 53 still in tact.
+ let b = (y >> 11 | y & 0xFFFF_FFFF) as u64; // Insignificant bits, only relevant for rounding.
+ let m = a + ((b - (b >> 63 & !a)) >> 63); // Add one when we need to round up. Break ties to even.
+ let e = if i == 0 { 0 } else { 1149 - n as u64 }; // Exponent plus 1023, minus one, except for zero.
+ (e << 52) + m // + not |, so the mantissa can overflow into the exponent.
+ }
+}
+
+// Conversions from unsigned integers to floats.
+intrinsics! {
+ #[arm_aeabi_alias = __aeabi_ui2f]
+ pub extern "C" fn __floatunsisf(i: u32) -> f32 {
+ f32::from_bits(int_to_float::u32_to_f32_bits(i))
+ }
+
+ #[arm_aeabi_alias = __aeabi_ui2d]
+ pub extern "C" fn __floatunsidf(i: u32) -> f64 {
+ f64::from_bits(int_to_float::u32_to_f64_bits(i))
+ }
+
+ #[arm_aeabi_alias = __aeabi_ul2f]
+ pub extern "C" fn __floatundisf(i: u64) -> f32 {
+ f32::from_bits(int_to_float::u64_to_f32_bits(i))
+ }
+
+ #[arm_aeabi_alias = __aeabi_ul2d]
+ pub extern "C" fn __floatundidf(i: u64) -> f64 {
+ f64::from_bits(int_to_float::u64_to_f64_bits(i))
+ }
+
+ #[cfg_attr(not(target_feature = "llvm14-builtins-abi"), unadjusted_on_win64)]
+ pub extern "C" fn __floatuntisf(i: u128) -> f32 {
+ f32::from_bits(int_to_float::u128_to_f32_bits(i))
+ }
+
+ #[cfg_attr(not(target_feature = "llvm14-builtins-abi"), unadjusted_on_win64)]
+ pub extern "C" fn __floatuntidf(i: u128) -> f64 {
+ f64::from_bits(int_to_float::u128_to_f64_bits(i))
+ }
+}
+
+// Conversions from signed integers to floats.
+intrinsics! {
+ #[arm_aeabi_alias = __aeabi_i2f]
+ pub extern "C" fn __floatsisf(i: i32) -> f32 {
+ let sign_bit = ((i >> 31) as u32) << 31;
+ f32::from_bits(int_to_float::u32_to_f32_bits(i.unsigned_abs()) | sign_bit)
+ }
+
+ #[arm_aeabi_alias = __aeabi_i2d]
+ pub extern "C" fn __floatsidf(i: i32) -> f64 {
+ let sign_bit = ((i >> 31) as u64) << 63;
+ f64::from_bits(int_to_float::u32_to_f64_bits(i.unsigned_abs()) | sign_bit)
+ }
+
+ #[arm_aeabi_alias = __aeabi_l2f]
+ pub extern "C" fn __floatdisf(i: i64) -> f32 {
+ let sign_bit = ((i >> 63) as u32) << 31;
+ f32::from_bits(int_to_float::u64_to_f32_bits(i.unsigned_abs()) | sign_bit)
+ }
+
+ #[arm_aeabi_alias = __aeabi_l2d]
+ pub extern "C" fn __floatdidf(i: i64) -> f64 {
+ let sign_bit = ((i >> 63) as u64) << 63;
+ f64::from_bits(int_to_float::u64_to_f64_bits(i.unsigned_abs()) | sign_bit)
+ }
+
+ #[cfg_attr(not(target_feature = "llvm14-builtins-abi"), unadjusted_on_win64)]
+ pub extern "C" fn __floattisf(i: i128) -> f32 {
+ let sign_bit = ((i >> 127) as u32) << 31;
+ f32::from_bits(int_to_float::u128_to_f32_bits(i.unsigned_abs()) | sign_bit)
+ }
+
+ #[cfg_attr(not(target_feature = "llvm14-builtins-abi"), unadjusted_on_win64)]
+ pub extern "C" fn __floattidf(i: i128) -> f64 {
+ let sign_bit = ((i >> 127) as u64) << 63;
+ f64::from_bits(int_to_float::u128_to_f64_bits(i.unsigned_abs()) | sign_bit)
+ }
+}
+
+// Conversions from floats to unsigned integers.
+intrinsics! {
+ #[arm_aeabi_alias = __aeabi_f2uiz]
+ pub extern "C" fn __fixunssfsi(f: f32) -> u32 {
+ let fbits = f.to_bits();
+ if fbits < 127 << 23 { // >= 0, < 1
+ 0
+ } else if fbits < 159 << 23 { // >= 1, < max
+ let m = 1 << 31 | fbits << 8; // Mantissa and the implicit 1-bit.
+ let s = 158 - (fbits >> 23); // Shift based on the exponent and bias.
+ m >> s
+ } else if fbits <= 255 << 23 { // >= max (incl. inf)
+ u32::MAX
+ } else { // Negative or NaN
+ 0
+ }
+ }
+
+ #[arm_aeabi_alias = __aeabi_f2ulz]
+ pub extern "C" fn __fixunssfdi(f: f32) -> u64 {
+ let fbits = f.to_bits();
+ if fbits < 127 << 23 { // >= 0, < 1
+ 0
+ } else if fbits < 191 << 23 { // >= 1, < max
+ let m = 1 << 63 | (fbits as u64) << 40; // Mantissa and the implicit 1-bit.
+ let s = 190 - (fbits >> 23); // Shift based on the exponent and bias.
+ m >> s
+ } else if fbits <= 255 << 23 { // >= max (incl. inf)
+ u64::MAX
+ } else { // Negative or NaN
+ 0
+ }
+ }
+
+ #[cfg_attr(target_feature = "llvm14-builtins-abi", win64_128bit_abi_hack)]
+ #[cfg_attr(not(target_feature = "llvm14-builtins-abi"), unadjusted_on_win64)]
+ pub extern "C" fn __fixunssfti(f: f32) -> u128 {
+ let fbits = f.to_bits();
+ if fbits < 127 << 23 { // >= 0, < 1
+ 0
+ } else if fbits < 255 << 23 { // >= 1, < inf
+ let m = 1 << 127 | (fbits as u128) << 104; // Mantissa and the implicit 1-bit.
+ let s = 254 - (fbits >> 23); // Shift based on the exponent and bias.
+ m >> s
+ } else if fbits == 255 << 23 { // == inf
+ u128::MAX
+ } else { // Negative or NaN
+ 0
+ }
+ }
+
+ #[arm_aeabi_alias = __aeabi_d2uiz]
+ pub extern "C" fn __fixunsdfsi(f: f64) -> u32 {
+ let fbits = f.to_bits();
+ if fbits < 1023 << 52 { // >= 0, < 1
+ 0
+ } else if fbits < 1055 << 52 { // >= 1, < max
+ let m = 1 << 31 | (fbits >> 21) as u32; // Mantissa and the implicit 1-bit.
+ let s = 1054 - (fbits >> 52); // Shift based on the exponent and bias.
+ m >> s
+ } else if fbits <= 2047 << 52 { // >= max (incl. inf)
+ u32::MAX
+ } else { // Negative or NaN
+ 0
+ }
+ }
+
+ #[arm_aeabi_alias = __aeabi_d2ulz]
+ pub extern "C" fn __fixunsdfdi(f: f64) -> u64 {
+ let fbits = f.to_bits();
+ if fbits < 1023 << 52 { // >= 0, < 1
+ 0
+ } else if fbits < 1087 << 52 { // >= 1, < max
+ let m = 1 << 63 | fbits << 11; // Mantissa and the implicit 1-bit.
+ let s = 1086 - (fbits >> 52); // Shift based on the exponent and bias.
+ m >> s
+ } else if fbits <= 2047 << 52 { // >= max (incl. inf)
+ u64::MAX
+ } else { // Negative or NaN
+ 0
+ }
+ }
+
+ #[cfg_attr(target_feature = "llvm14-builtins-abi", win64_128bit_abi_hack)]
+ #[cfg_attr(not(target_feature = "llvm14-builtins-abi"), unadjusted_on_win64)]
+ pub extern "C" fn __fixunsdfti(f: f64) -> u128 {
+ let fbits = f.to_bits();
+ if fbits < 1023 << 52 { // >= 0, < 1
+ 0
+ } else if fbits < 1151 << 52 { // >= 1, < max
+ let m = 1 << 127 | (fbits as u128) << 75; // Mantissa and the implicit 1-bit.
+ let s = 1150 - (fbits >> 52); // Shift based on the exponent and bias.
+ m >> s
+ } else if fbits <= 2047 << 52 { // >= max (incl. inf)
+ u128::MAX
+ } else { // Negative or NaN
+ 0
+ }
+ }
+}
+
+// Conversions from floats to signed integers.
+intrinsics! {
+ #[arm_aeabi_alias = __aeabi_f2iz]
+ pub extern "C" fn __fixsfsi(f: f32) -> i32 {
+ let fbits = f.to_bits() & !0 >> 1; // Remove sign bit.
+ if fbits < 127 << 23 { // >= 0, < 1
+ 0
+ } else if fbits < 158 << 23 { // >= 1, < max
+ let m = 1 << 31 | fbits << 8; // Mantissa and the implicit 1-bit.
+ let s = 158 - (fbits >> 23); // Shift based on the exponent and bias.
+ let u = (m >> s) as i32; // Unsigned result.
+ if f.is_sign_negative() { -u } else { u }
+ } else if fbits <= 255 << 23 { // >= max (incl. inf)
+ if f.is_sign_negative() { i32::MIN } else { i32::MAX }
+ } else { // NaN
+ 0
+ }
+ }
+
+ #[arm_aeabi_alias = __aeabi_f2lz]
+ pub extern "C" fn __fixsfdi(f: f32) -> i64 {
+ let fbits = f.to_bits() & !0 >> 1; // Remove sign bit.
+ if fbits < 127 << 23 { // >= 0, < 1
+ 0
+ } else if fbits < 190 << 23 { // >= 1, < max
+ let m = 1 << 63 | (fbits as u64) << 40; // Mantissa and the implicit 1-bit.
+ let s = 190 - (fbits >> 23); // Shift based on the exponent and bias.
+ let u = (m >> s) as i64; // Unsigned result.
+ if f.is_sign_negative() { -u } else { u }
+ } else if fbits <= 255 << 23 { // >= max (incl. inf)
+ if f.is_sign_negative() { i64::MIN } else { i64::MAX }
+ } else { // NaN
+ 0
+ }
+ }
+
+ #[cfg_attr(target_feature = "llvm14-builtins-abi", win64_128bit_abi_hack)]
+ #[cfg_attr(not(target_feature = "llvm14-builtins-abi"), unadjusted_on_win64)]
+ pub extern "C" fn __fixsfti(f: f32) -> i128 {
+ let fbits = f.to_bits() & !0 >> 1; // Remove sign bit.
+ if fbits < 127 << 23 { // >= 0, < 1
+ 0
+ } else if fbits < 254 << 23 { // >= 1, < max
+ let m = 1 << 127 | (fbits as u128) << 104; // Mantissa and the implicit 1-bit.
+ let s = 254 - (fbits >> 23); // Shift based on the exponent and bias.
+ let u = (m >> s) as i128; // Unsigned result.
+ if f.is_sign_negative() { -u } else { u }
+ } else if fbits <= 255 << 23 { // >= max (incl. inf)
+ if f.is_sign_negative() { i128::MIN } else { i128::MAX }
+ } else { // NaN
+ 0
+ }
+ }
+
+ #[arm_aeabi_alias = __aeabi_d2iz]
+ pub extern "C" fn __fixdfsi(f: f64) -> i32 {
+ let fbits = f.to_bits() & !0 >> 1; // Remove sign bit.
+ if fbits < 1023 << 52 { // >= 0, < 1
+ 0
+ } else if fbits < 1054 << 52 { // >= 1, < max
+ let m = 1 << 31 | (fbits >> 21) as u32; // Mantissa and the implicit 1-bit.
+ let s = 1054 - (fbits >> 52); // Shift based on the exponent and bias.
+ let u = (m >> s) as i32; // Unsigned result.
+ if f.is_sign_negative() { -u } else { u }
+ } else if fbits <= 2047 << 52 { // >= max (incl. inf)
+ if f.is_sign_negative() { i32::MIN } else { i32::MAX }
+ } else { // NaN
+ 0
+ }
+ }
+
+ #[arm_aeabi_alias = __aeabi_d2lz]
+ pub extern "C" fn __fixdfdi(f: f64) -> i64 {
+ let fbits = f.to_bits() & !0 >> 1; // Remove sign bit.
+ if fbits < 1023 << 52 { // >= 0, < 1
+ 0
+ } else if fbits < 1086 << 52 { // >= 1, < max
+ let m = 1 << 63 | fbits << 11; // Mantissa and the implicit 1-bit.
+ let s = 1086 - (fbits >> 52); // Shift based on the exponent and bias.
+ let u = (m >> s) as i64; // Unsigned result.
+ if f.is_sign_negative() { -u } else { u }
+ } else if fbits <= 2047 << 52 { // >= max (incl. inf)
+ if f.is_sign_negative() { i64::MIN } else { i64::MAX }
+ } else { // NaN
+ 0
+ }
+ }
+
+ #[cfg_attr(target_feature = "llvm14-builtins-abi", win64_128bit_abi_hack)]
+ #[cfg_attr(not(target_feature = "llvm14-builtins-abi"), unadjusted_on_win64)]
+ pub extern "C" fn __fixdfti(f: f64) -> i128 {
+ let fbits = f.to_bits() & !0 >> 1; // Remove sign bit.
+ if fbits < 1023 << 52 { // >= 0, < 1
+ 0
+ } else if fbits < 1150 << 52 { // >= 1, < max
+ let m = 1 << 127 | (fbits as u128) << 75; // Mantissa and the implicit 1-bit.
+ let s = 1150 - (fbits >> 52); // Shift based on the exponent and bias.
+ let u = (m >> s) as i128; // Unsigned result.
+ if f.is_sign_negative() { -u } else { u }
+ } else if fbits <= 2047 << 52 { // >= max (incl. inf)
+ if f.is_sign_negative() { i128::MIN } else { i128::MAX }
+ } else { // NaN
+ 0
+ }
+ }
+}
diff --git a/vendor/compiler_builtins/src/float/div.rs b/vendor/compiler_builtins/src/float/div.rs
new file mode 100644
index 000000000..528a8368d
--- /dev/null
+++ b/vendor/compiler_builtins/src/float/div.rs
@@ -0,0 +1,467 @@
+// The functions are complex with many branches, and explicit
+// `return`s makes it clear where function exit points are
+#![allow(clippy::needless_return)]
+
+use float::Float;
+use int::{CastInto, DInt, HInt, Int};
+
+fn div32<F: Float>(a: F, b: F) -> F
+where
+ u32: CastInto<F::Int>,
+ F::Int: CastInto<u32>,
+ i32: CastInto<F::Int>,
+ F::Int: CastInto<i32>,
+ F::Int: HInt,
+{
+ let one = F::Int::ONE;
+ let zero = F::Int::ZERO;
+
+ // let bits = F::BITS;
+ let significand_bits = F::SIGNIFICAND_BITS;
+ let max_exponent = F::EXPONENT_MAX;
+
+ let exponent_bias = F::EXPONENT_BIAS;
+
+ let implicit_bit = F::IMPLICIT_BIT;
+ let significand_mask = F::SIGNIFICAND_MASK;
+ let sign_bit = F::SIGN_MASK as F::Int;
+ let abs_mask = sign_bit - one;
+ let exponent_mask = F::EXPONENT_MASK;
+ let inf_rep = exponent_mask;
+ let quiet_bit = implicit_bit >> 1;
+ let qnan_rep = exponent_mask | quiet_bit;
+
+ #[inline(always)]
+ fn negate_u32(a: u32) -> u32 {
+ (<i32>::wrapping_neg(a as i32)) as u32
+ }
+
+ let a_rep = a.repr();
+ let b_rep = b.repr();
+
+ let a_exponent = (a_rep >> significand_bits) & max_exponent.cast();
+ let b_exponent = (b_rep >> significand_bits) & max_exponent.cast();
+ let quotient_sign = (a_rep ^ b_rep) & sign_bit;
+
+ let mut a_significand = a_rep & significand_mask;
+ let mut b_significand = b_rep & significand_mask;
+ let mut scale = 0;
+
+ // Detect if a or b is zero, denormal, infinity, or NaN.
+ if a_exponent.wrapping_sub(one) >= (max_exponent - 1).cast()
+ || b_exponent.wrapping_sub(one) >= (max_exponent - 1).cast()
+ {
+ let a_abs = a_rep & abs_mask;
+ let b_abs = b_rep & abs_mask;
+
+ // NaN / anything = qNaN
+ if a_abs > inf_rep {
+ return F::from_repr(a_rep | quiet_bit);
+ }
+ // anything / NaN = qNaN
+ if b_abs > inf_rep {
+ return F::from_repr(b_rep | quiet_bit);
+ }
+
+ if a_abs == inf_rep {
+ if b_abs == inf_rep {
+ // infinity / infinity = NaN
+ return F::from_repr(qnan_rep);
+ } else {
+ // infinity / anything else = +/- infinity
+ return F::from_repr(a_abs | quotient_sign);
+ }
+ }
+
+ // anything else / infinity = +/- 0
+ if b_abs == inf_rep {
+ return F::from_repr(quotient_sign);
+ }
+
+ if a_abs == zero {
+ if b_abs == zero {
+ // zero / zero = NaN
+ return F::from_repr(qnan_rep);
+ } else {
+ // zero / anything else = +/- zero
+ return F::from_repr(quotient_sign);
+ }
+ }
+
+ // anything else / zero = +/- infinity
+ if b_abs == zero {
+ return F::from_repr(inf_rep | quotient_sign);
+ }
+
+ // one or both of a or b is denormal, the other (if applicable) is a
+ // normal number. Renormalize one or both of a and b, and set scale to
+ // include the necessary exponent adjustment.
+ if a_abs < implicit_bit {
+ let (exponent, significand) = F::normalize(a_significand);
+ scale += exponent;
+ a_significand = significand;
+ }
+
+ if b_abs < implicit_bit {
+ let (exponent, significand) = F::normalize(b_significand);
+ scale -= exponent;
+ b_significand = significand;
+ }
+ }
+
+ // Or in the implicit significand bit. (If we fell through from the
+ // denormal path it was already set by normalize( ), but setting it twice
+ // won't hurt anything.)
+ a_significand |= implicit_bit;
+ b_significand |= implicit_bit;
+ let mut quotient_exponent: i32 = CastInto::<i32>::cast(a_exponent)
+ .wrapping_sub(CastInto::<i32>::cast(b_exponent))
+ .wrapping_add(scale);
+
+ // Align the significand of b as a Q31 fixed-point number in the range
+ // [1, 2.0) and get a Q32 approximate reciprocal using a small minimax
+ // polynomial approximation: reciprocal = 3/4 + 1/sqrt(2) - b/2. This
+ // is accurate to about 3.5 binary digits.
+ let q31b = CastInto::<u32>::cast(b_significand << 8.cast());
+ let mut reciprocal = (0x7504f333u32).wrapping_sub(q31b);
+
+ // Now refine the reciprocal estimate using a Newton-Raphson iteration:
+ //
+ // x1 = x0 * (2 - x0 * b)
+ //
+ // This doubles the number of correct binary digits in the approximation
+ // with each iteration, so after three iterations, we have about 28 binary
+ // digits of accuracy.
+
+ let mut correction: u32 =
+ negate_u32(((reciprocal as u64).wrapping_mul(q31b as u64) >> 32) as u32);
+ reciprocal = ((reciprocal as u64).wrapping_mul(correction as u64) as u64 >> 31) as u32;
+ correction = negate_u32(((reciprocal as u64).wrapping_mul(q31b as u64) >> 32) as u32);
+ reciprocal = ((reciprocal as u64).wrapping_mul(correction as u64) as u64 >> 31) as u32;
+ correction = negate_u32(((reciprocal as u64).wrapping_mul(q31b as u64) >> 32) as u32);
+ reciprocal = ((reciprocal as u64).wrapping_mul(correction as u64) as u64 >> 31) as u32;
+
+ // Exhaustive testing shows that the error in reciprocal after three steps
+ // is in the interval [-0x1.f58108p-31, 0x1.d0e48cp-29], in line with our
+ // expectations. We bump the reciprocal by a tiny value to force the error
+ // to be strictly positive (in the range [0x1.4fdfp-37,0x1.287246p-29], to
+ // be specific). This also causes 1/1 to give a sensible approximation
+ // instead of zero (due to overflow).
+ reciprocal = reciprocal.wrapping_sub(2);
+
+ // The numerical reciprocal is accurate to within 2^-28, lies in the
+ // interval [0x1.000000eep-1, 0x1.fffffffcp-1], and is strictly smaller
+ // than the true reciprocal of b. Multiplying a by this reciprocal thus
+ // gives a numerical q = a/b in Q24 with the following properties:
+ //
+ // 1. q < a/b
+ // 2. q is in the interval [0x1.000000eep-1, 0x1.fffffffcp0)
+ // 3. the error in q is at most 2^-24 + 2^-27 -- the 2^24 term comes
+ // from the fact that we truncate the product, and the 2^27 term
+ // is the error in the reciprocal of b scaled by the maximum
+ // possible value of a. As a consequence of this error bound,
+ // either q or nextafter(q) is the correctly rounded
+ let mut quotient = (a_significand << 1).widen_mul(reciprocal.cast()).hi();
+
+ // Two cases: quotient is in [0.5, 1.0) or quotient is in [1.0, 2.0).
+ // In either case, we are going to compute a residual of the form
+ //
+ // r = a - q*b
+ //
+ // We know from the construction of q that r satisfies:
+ //
+ // 0 <= r < ulp(q)*b
+ //
+ // if r is greater than 1/2 ulp(q)*b, then q rounds up. Otherwise, we
+ // already have the correct result. The exact halfway case cannot occur.
+ // We also take this time to right shift quotient if it falls in the [1,2)
+ // range and adjust the exponent accordingly.
+ let residual = if quotient < (implicit_bit << 1) {
+ quotient_exponent = quotient_exponent.wrapping_sub(1);
+ (a_significand << (significand_bits + 1)).wrapping_sub(quotient.wrapping_mul(b_significand))
+ } else {
+ quotient >>= 1;
+ (a_significand << significand_bits).wrapping_sub(quotient.wrapping_mul(b_significand))
+ };
+
+ let written_exponent = quotient_exponent.wrapping_add(exponent_bias as i32);
+
+ if written_exponent >= max_exponent as i32 {
+ // If we have overflowed the exponent, return infinity.
+ return F::from_repr(inf_rep | quotient_sign);
+ } else if written_exponent < 1 {
+ // Flush denormals to zero. In the future, it would be nice to add
+ // code to round them correctly.
+ return F::from_repr(quotient_sign);
+ } else {
+ let round = ((residual << 1) > b_significand) as u32;
+ // Clear the implicit bits
+ let mut abs_result = quotient & significand_mask;
+ // Insert the exponent
+ abs_result |= written_exponent.cast() << significand_bits;
+ // Round
+ abs_result = abs_result.wrapping_add(round.cast());
+ // Insert the sign and return
+ return F::from_repr(abs_result | quotient_sign);
+ }
+}
+
+fn div64<F: Float>(a: F, b: F) -> F
+where
+ u32: CastInto<F::Int>,
+ F::Int: CastInto<u32>,
+ i32: CastInto<F::Int>,
+ F::Int: CastInto<i32>,
+ u64: CastInto<F::Int>,
+ F::Int: CastInto<u64>,
+ i64: CastInto<F::Int>,
+ F::Int: CastInto<i64>,
+ F::Int: HInt,
+{
+ let one = F::Int::ONE;
+ let zero = F::Int::ZERO;
+
+ // let bits = F::BITS;
+ let significand_bits = F::SIGNIFICAND_BITS;
+ let max_exponent = F::EXPONENT_MAX;
+
+ let exponent_bias = F::EXPONENT_BIAS;
+
+ let implicit_bit = F::IMPLICIT_BIT;
+ let significand_mask = F::SIGNIFICAND_MASK;
+ let sign_bit = F::SIGN_MASK as F::Int;
+ let abs_mask = sign_bit - one;
+ let exponent_mask = F::EXPONENT_MASK;
+ let inf_rep = exponent_mask;
+ let quiet_bit = implicit_bit >> 1;
+ let qnan_rep = exponent_mask | quiet_bit;
+ // let exponent_bits = F::EXPONENT_BITS;
+
+ #[inline(always)]
+ fn negate_u32(a: u32) -> u32 {
+ (<i32>::wrapping_neg(a as i32)) as u32
+ }
+
+ #[inline(always)]
+ fn negate_u64(a: u64) -> u64 {
+ (<i64>::wrapping_neg(a as i64)) as u64
+ }
+
+ let a_rep = a.repr();
+ let b_rep = b.repr();
+
+ let a_exponent = (a_rep >> significand_bits) & max_exponent.cast();
+ let b_exponent = (b_rep >> significand_bits) & max_exponent.cast();
+ let quotient_sign = (a_rep ^ b_rep) & sign_bit;
+
+ let mut a_significand = a_rep & significand_mask;
+ let mut b_significand = b_rep & significand_mask;
+ let mut scale = 0;
+
+ // Detect if a or b is zero, denormal, infinity, or NaN.
+ if a_exponent.wrapping_sub(one) >= (max_exponent - 1).cast()
+ || b_exponent.wrapping_sub(one) >= (max_exponent - 1).cast()
+ {
+ let a_abs = a_rep & abs_mask;
+ let b_abs = b_rep & abs_mask;
+
+ // NaN / anything = qNaN
+ if a_abs > inf_rep {
+ return F::from_repr(a_rep | quiet_bit);
+ }
+ // anything / NaN = qNaN
+ if b_abs > inf_rep {
+ return F::from_repr(b_rep | quiet_bit);
+ }
+
+ if a_abs == inf_rep {
+ if b_abs == inf_rep {
+ // infinity / infinity = NaN
+ return F::from_repr(qnan_rep);
+ } else {
+ // infinity / anything else = +/- infinity
+ return F::from_repr(a_abs | quotient_sign);
+ }
+ }
+
+ // anything else / infinity = +/- 0
+ if b_abs == inf_rep {
+ return F::from_repr(quotient_sign);
+ }
+
+ if a_abs == zero {
+ if b_abs == zero {
+ // zero / zero = NaN
+ return F::from_repr(qnan_rep);
+ } else {
+ // zero / anything else = +/- zero
+ return F::from_repr(quotient_sign);
+ }
+ }
+
+ // anything else / zero = +/- infinity
+ if b_abs == zero {
+ return F::from_repr(inf_rep | quotient_sign);
+ }
+
+ // one or both of a or b is denormal, the other (if applicable) is a
+ // normal number. Renormalize one or both of a and b, and set scale to
+ // include the necessary exponent adjustment.
+ if a_abs < implicit_bit {
+ let (exponent, significand) = F::normalize(a_significand);
+ scale += exponent;
+ a_significand = significand;
+ }
+
+ if b_abs < implicit_bit {
+ let (exponent, significand) = F::normalize(b_significand);
+ scale -= exponent;
+ b_significand = significand;
+ }
+ }
+
+ // Or in the implicit significand bit. (If we fell through from the
+ // denormal path it was already set by normalize( ), but setting it twice
+ // won't hurt anything.)
+ a_significand |= implicit_bit;
+ b_significand |= implicit_bit;
+ let mut quotient_exponent: i32 = CastInto::<i32>::cast(a_exponent)
+ .wrapping_sub(CastInto::<i32>::cast(b_exponent))
+ .wrapping_add(scale);
+
+ // Align the significand of b as a Q31 fixed-point number in the range
+ // [1, 2.0) and get a Q32 approximate reciprocal using a small minimax
+ // polynomial approximation: reciprocal = 3/4 + 1/sqrt(2) - b/2. This
+ // is accurate to about 3.5 binary digits.
+ let q31b = CastInto::<u32>::cast(b_significand >> 21.cast());
+ let mut recip32 = (0x7504f333u32).wrapping_sub(q31b);
+
+ // Now refine the reciprocal estimate using a Newton-Raphson iteration:
+ //
+ // x1 = x0 * (2 - x0 * b)
+ //
+ // This doubles the number of correct binary digits in the approximation
+ // with each iteration, so after three iterations, we have about 28 binary
+ // digits of accuracy.
+
+ let mut correction32: u32 =
+ negate_u32(((recip32 as u64).wrapping_mul(q31b as u64) >> 32) as u32);
+ recip32 = ((recip32 as u64).wrapping_mul(correction32 as u64) >> 31) as u32;
+ correction32 = negate_u32(((recip32 as u64).wrapping_mul(q31b as u64) >> 32) as u32);
+ recip32 = ((recip32 as u64).wrapping_mul(correction32 as u64) >> 31) as u32;
+ correction32 = negate_u32(((recip32 as u64).wrapping_mul(q31b as u64) >> 32) as u32);
+ recip32 = ((recip32 as u64).wrapping_mul(correction32 as u64) >> 31) as u32;
+
+ // recip32 might have overflowed to exactly zero in the preceeding
+ // computation if the high word of b is exactly 1.0. This would sabotage
+ // the full-width final stage of the computation that follows, so we adjust
+ // recip32 downward by one bit.
+ recip32 = recip32.wrapping_sub(1);
+
+ // We need to perform one more iteration to get us to 56 binary digits;
+ // The last iteration needs to happen with extra precision.
+ let q63blo = CastInto::<u32>::cast(b_significand << 11.cast());
+
+ let correction: u64 = negate_u64(
+ (recip32 as u64)
+ .wrapping_mul(q31b as u64)
+ .wrapping_add((recip32 as u64).wrapping_mul(q63blo as u64) >> 32),
+ );
+ let c_hi = (correction >> 32) as u32;
+ let c_lo = correction as u32;
+ let mut reciprocal: u64 = (recip32 as u64)
+ .wrapping_mul(c_hi as u64)
+ .wrapping_add((recip32 as u64).wrapping_mul(c_lo as u64) >> 32);
+
+ // We already adjusted the 32-bit estimate, now we need to adjust the final
+ // 64-bit reciprocal estimate downward to ensure that it is strictly smaller
+ // than the infinitely precise exact reciprocal. Because the computation
+ // of the Newton-Raphson step is truncating at every step, this adjustment
+ // is small; most of the work is already done.
+ reciprocal = reciprocal.wrapping_sub(2);
+
+ // The numerical reciprocal is accurate to within 2^-56, lies in the
+ // interval [0.5, 1.0), and is strictly smaller than the true reciprocal
+ // of b. Multiplying a by this reciprocal thus gives a numerical q = a/b
+ // in Q53 with the following properties:
+ //
+ // 1. q < a/b
+ // 2. q is in the interval [0.5, 2.0)
+ // 3. the error in q is bounded away from 2^-53 (actually, we have a
+ // couple of bits to spare, but this is all we need).
+
+ // We need a 64 x 64 multiply high to compute q, which isn't a basic
+ // operation in C, so we need to be a little bit fussy.
+ // let mut quotient: F::Int = ((((reciprocal as u64)
+ // .wrapping_mul(CastInto::<u32>::cast(a_significand << 1) as u64))
+ // >> 32) as u32)
+ // .cast();
+
+ // We need a 64 x 64 multiply high to compute q, which isn't a basic
+ // operation in C, so we need to be a little bit fussy.
+ let mut quotient = (a_significand << 2).widen_mul(reciprocal.cast()).hi();
+
+ // Two cases: quotient is in [0.5, 1.0) or quotient is in [1.0, 2.0).
+ // In either case, we are going to compute a residual of the form
+ //
+ // r = a - q*b
+ //
+ // We know from the construction of q that r satisfies:
+ //
+ // 0 <= r < ulp(q)*b
+ //
+ // if r is greater than 1/2 ulp(q)*b, then q rounds up. Otherwise, we
+ // already have the correct result. The exact halfway case cannot occur.
+ // We also take this time to right shift quotient if it falls in the [1,2)
+ // range and adjust the exponent accordingly.
+ let residual = if quotient < (implicit_bit << 1) {
+ quotient_exponent = quotient_exponent.wrapping_sub(1);
+ (a_significand << (significand_bits + 1)).wrapping_sub(quotient.wrapping_mul(b_significand))
+ } else {
+ quotient >>= 1;
+ (a_significand << significand_bits).wrapping_sub(quotient.wrapping_mul(b_significand))
+ };
+
+ let written_exponent = quotient_exponent.wrapping_add(exponent_bias as i32);
+
+ if written_exponent >= max_exponent as i32 {
+ // If we have overflowed the exponent, return infinity.
+ return F::from_repr(inf_rep | quotient_sign);
+ } else if written_exponent < 1 {
+ // Flush denormals to zero. In the future, it would be nice to add
+ // code to round them correctly.
+ return F::from_repr(quotient_sign);
+ } else {
+ let round = ((residual << 1) > b_significand) as u32;
+ // Clear the implicit bits
+ let mut abs_result = quotient & significand_mask;
+ // Insert the exponent
+ abs_result |= written_exponent.cast() << significand_bits;
+ // Round
+ abs_result = abs_result.wrapping_add(round.cast());
+ // Insert the sign and return
+ return F::from_repr(abs_result | quotient_sign);
+ }
+}
+
+intrinsics! {
+ #[arm_aeabi_alias = __aeabi_fdiv]
+ pub extern "C" fn __divsf3(a: f32, b: f32) -> f32 {
+ div32(a, b)
+ }
+
+ #[arm_aeabi_alias = __aeabi_ddiv]
+ pub extern "C" fn __divdf3(a: f64, b: f64) -> f64 {
+ div64(a, b)
+ }
+
+ #[cfg(target_arch = "arm")]
+ pub extern "C" fn __divsf3vfp(a: f32, b: f32) -> f32 {
+ a / b
+ }
+
+ #[cfg(target_arch = "arm")]
+ pub extern "C" fn __divdf3vfp(a: f64, b: f64) -> f64 {
+ a / b
+ }
+}
diff --git a/vendor/compiler_builtins/src/float/extend.rs b/vendor/compiler_builtins/src/float/extend.rs
new file mode 100644
index 000000000..39633773b
--- /dev/null
+++ b/vendor/compiler_builtins/src/float/extend.rs
@@ -0,0 +1,83 @@
+use float::Float;
+use int::{CastInto, Int};
+
+/// Generic conversion from a narrower to a wider IEEE-754 floating-point type
+fn extend<F: Float, R: Float>(a: F) -> R
+where
+ F::Int: CastInto<u64>,
+ u64: CastInto<F::Int>,
+ u32: CastInto<R::Int>,
+ R::Int: CastInto<u32>,
+ R::Int: CastInto<u64>,
+ u64: CastInto<R::Int>,
+ F::Int: CastInto<R::Int>,
+{
+ let src_zero = F::Int::ZERO;
+ let src_one = F::Int::ONE;
+ let src_bits = F::BITS;
+ let src_sign_bits = F::SIGNIFICAND_BITS;
+ let src_exp_bias = F::EXPONENT_BIAS;
+ let src_min_normal = F::IMPLICIT_BIT;
+ let src_infinity = F::EXPONENT_MASK;
+ let src_sign_mask = F::SIGN_MASK as F::Int;
+ let src_abs_mask = src_sign_mask - src_one;
+ let src_qnan = F::SIGNIFICAND_MASK;
+ let src_nan_code = src_qnan - src_one;
+
+ let dst_bits = R::BITS;
+ let dst_sign_bits = R::SIGNIFICAND_BITS;
+ let dst_inf_exp = R::EXPONENT_MAX;
+ let dst_exp_bias = R::EXPONENT_BIAS;
+ let dst_min_normal = R::IMPLICIT_BIT;
+
+ let sign_bits_delta = dst_sign_bits - src_sign_bits;
+ let exp_bias_delta = dst_exp_bias - src_exp_bias;
+ let a_abs = a.repr() & src_abs_mask;
+ let mut abs_result = R::Int::ZERO;
+
+ if a_abs.wrapping_sub(src_min_normal) < src_infinity.wrapping_sub(src_min_normal) {
+ // a is a normal number.
+ // Extend to the destination type by shifting the significand and
+ // exponent into the proper position and rebiasing the exponent.
+ let abs_dst: R::Int = a_abs.cast();
+ let bias_dst: R::Int = exp_bias_delta.cast();
+ abs_result = abs_dst.wrapping_shl(sign_bits_delta);
+ abs_result += bias_dst.wrapping_shl(dst_sign_bits);
+ } else if a_abs >= src_infinity {
+ // a is NaN or infinity.
+ // Conjure the result by beginning with infinity, then setting the qNaN
+ // bit (if needed) and right-aligning the rest of the trailing NaN
+ // payload field.
+ let qnan_dst: R::Int = (a_abs & src_qnan).cast();
+ let nan_code_dst: R::Int = (a_abs & src_nan_code).cast();
+ let inf_exp_dst: R::Int = dst_inf_exp.cast();
+ abs_result = inf_exp_dst.wrapping_shl(dst_sign_bits);
+ abs_result |= qnan_dst.wrapping_shl(sign_bits_delta);
+ abs_result |= nan_code_dst.wrapping_shl(sign_bits_delta);
+ } else if a_abs != src_zero {
+ // a is denormal.
+ // Renormalize the significand and clear the leading bit, then insert
+ // the correct adjusted exponent in the destination type.
+ let scale = a_abs.leading_zeros() - src_min_normal.leading_zeros();
+ let abs_dst: R::Int = a_abs.cast();
+ let bias_dst: R::Int = (exp_bias_delta - scale + 1).cast();
+ abs_result = abs_dst.wrapping_shl(sign_bits_delta + scale);
+ abs_result = (abs_result ^ dst_min_normal) | (bias_dst.wrapping_shl(dst_sign_bits));
+ }
+
+ let sign_result: R::Int = (a.repr() & src_sign_mask).cast();
+ R::from_repr(abs_result | (sign_result.wrapping_shl(dst_bits - src_bits)))
+}
+
+intrinsics! {
+ #[aapcs_on_arm]
+ #[arm_aeabi_alias = __aeabi_f2d]
+ pub extern "C" fn __extendsfdf2(a: f32) -> f64 {
+ extend(a)
+ }
+
+ #[cfg(target_arch = "arm")]
+ pub extern "C" fn __extendsfdf2vfp(a: f32) -> f64 {
+ a as f64 // LLVM generate 'fcvtds'
+ }
+}
diff --git a/vendor/compiler_builtins/src/float/mod.rs b/vendor/compiler_builtins/src/float/mod.rs
new file mode 100644
index 000000000..01a5504d5
--- /dev/null
+++ b/vendor/compiler_builtins/src/float/mod.rs
@@ -0,0 +1,175 @@
+use core::ops;
+
+use super::int::Int;
+
+pub mod add;
+pub mod cmp;
+pub mod conv;
+pub mod div;
+pub mod extend;
+pub mod mul;
+pub mod pow;
+pub mod sub;
+pub mod trunc;
+
+public_test_dep! {
+/// Trait for some basic operations on floats
+pub(crate) trait Float:
+ Copy
+ + core::fmt::Debug
+ + PartialEq
+ + PartialOrd
+ + ops::AddAssign
+ + ops::MulAssign
+ + ops::Add<Output = Self>
+ + ops::Sub<Output = Self>
+ + ops::Div<Output = Self>
+ + ops::Rem<Output = Self>
+{
+ /// A uint of the same with as the float
+ type Int: Int;
+
+ /// A int of the same with as the float
+ type SignedInt: Int;
+
+ /// An int capable of containing the exponent bits plus a sign bit. This is signed.
+ type ExpInt: Int;
+
+ const ZERO: Self;
+ const ONE: Self;
+
+ /// The bitwidth of the float type
+ const BITS: u32;
+
+ /// The bitwidth of the significand
+ const SIGNIFICAND_BITS: u32;
+
+ /// The bitwidth of the exponent
+ const EXPONENT_BITS: u32 = Self::BITS - Self::SIGNIFICAND_BITS - 1;
+
+ /// The maximum value of the exponent
+ const EXPONENT_MAX: u32 = (1 << Self::EXPONENT_BITS) - 1;
+
+ /// The exponent bias value
+ const EXPONENT_BIAS: u32 = Self::EXPONENT_MAX >> 1;
+
+ /// A mask for the sign bit
+ const SIGN_MASK: Self::Int;
+
+ /// A mask for the significand
+ const SIGNIFICAND_MASK: Self::Int;
+
+ // The implicit bit of the float format
+ const IMPLICIT_BIT: Self::Int;
+
+ /// A mask for the exponent
+ const EXPONENT_MASK: Self::Int;
+
+ /// Returns `self` transmuted to `Self::Int`
+ fn repr(self) -> Self::Int;
+
+ /// Returns `self` transmuted to `Self::SignedInt`
+ fn signed_repr(self) -> Self::SignedInt;
+
+ /// Checks if two floats have the same bit representation. *Except* for NaNs! NaN can be
+ /// represented in multiple different ways. This method returns `true` if two NaNs are
+ /// compared.
+ fn eq_repr(self, rhs: Self) -> bool;
+
+ /// Returns the sign bit
+ fn sign(self) -> bool;
+
+ /// Returns the exponent with bias
+ fn exp(self) -> Self::ExpInt;
+
+ /// Returns the significand with no implicit bit (or the "fractional" part)
+ fn frac(self) -> Self::Int;
+
+ /// Returns the significand with implicit bit
+ fn imp_frac(self) -> Self::Int;
+
+ /// Returns a `Self::Int` transmuted back to `Self`
+ fn from_repr(a: Self::Int) -> Self;
+
+ /// Constructs a `Self` from its parts. Inputs are treated as bits and shifted into position.
+ fn from_parts(sign: bool, exponent: Self::Int, significand: Self::Int) -> Self;
+
+ /// Returns (normalized exponent, normalized significand)
+ fn normalize(significand: Self::Int) -> (i32, Self::Int);
+
+ /// Returns if `self` is subnormal
+ fn is_subnormal(self) -> bool;
+}
+}
+
+macro_rules! float_impl {
+ ($ty:ident, $ity:ident, $sity:ident, $expty:ident, $bits:expr, $significand_bits:expr) => {
+ impl Float for $ty {
+ type Int = $ity;
+ type SignedInt = $sity;
+ type ExpInt = $expty;
+
+ const ZERO: Self = 0.0;
+ const ONE: Self = 1.0;
+
+ const BITS: u32 = $bits;
+ const SIGNIFICAND_BITS: u32 = $significand_bits;
+
+ const SIGN_MASK: Self::Int = 1 << (Self::BITS - 1);
+ const SIGNIFICAND_MASK: Self::Int = (1 << Self::SIGNIFICAND_BITS) - 1;
+ const IMPLICIT_BIT: Self::Int = 1 << Self::SIGNIFICAND_BITS;
+ const EXPONENT_MASK: Self::Int = !(Self::SIGN_MASK | Self::SIGNIFICAND_MASK);
+
+ fn repr(self) -> Self::Int {
+ self.to_bits()
+ }
+ fn signed_repr(self) -> Self::SignedInt {
+ self.to_bits() as Self::SignedInt
+ }
+ fn eq_repr(self, rhs: Self) -> bool {
+ if self.is_nan() && rhs.is_nan() {
+ true
+ } else {
+ self.repr() == rhs.repr()
+ }
+ }
+ fn sign(self) -> bool {
+ self.signed_repr() < Self::SignedInt::ZERO
+ }
+ fn exp(self) -> Self::ExpInt {
+ ((self.to_bits() & Self::EXPONENT_MASK) >> Self::SIGNIFICAND_BITS) as Self::ExpInt
+ }
+ fn frac(self) -> Self::Int {
+ self.to_bits() & Self::SIGNIFICAND_MASK
+ }
+ fn imp_frac(self) -> Self::Int {
+ self.frac() | Self::IMPLICIT_BIT
+ }
+ fn from_repr(a: Self::Int) -> Self {
+ Self::from_bits(a)
+ }
+ fn from_parts(sign: bool, exponent: Self::Int, significand: Self::Int) -> Self {
+ Self::from_repr(
+ ((sign as Self::Int) << (Self::BITS - 1))
+ | ((exponent << Self::SIGNIFICAND_BITS) & Self::EXPONENT_MASK)
+ | (significand & Self::SIGNIFICAND_MASK),
+ )
+ }
+ fn normalize(significand: Self::Int) -> (i32, Self::Int) {
+ let shift = significand
+ .leading_zeros()
+ .wrapping_sub((Self::Int::ONE << Self::SIGNIFICAND_BITS).leading_zeros());
+ (
+ 1i32.wrapping_sub(shift as i32),
+ significand << shift as Self::Int,
+ )
+ }
+ fn is_subnormal(self) -> bool {
+ (self.repr() & Self::EXPONENT_MASK) == Self::Int::ZERO
+ }
+ }
+ };
+}
+
+float_impl!(f32, u32, i32, i16, 32, 23);
+float_impl!(f64, u64, i64, i16, 64, 52);
diff --git a/vendor/compiler_builtins/src/float/mul.rs b/vendor/compiler_builtins/src/float/mul.rs
new file mode 100644
index 000000000..c89f22756
--- /dev/null
+++ b/vendor/compiler_builtins/src/float/mul.rs
@@ -0,0 +1,209 @@
+use float::Float;
+use int::{CastInto, DInt, HInt, Int};
+
+fn mul<F: Float>(a: F, b: F) -> F
+where
+ u32: CastInto<F::Int>,
+ F::Int: CastInto<u32>,
+ i32: CastInto<F::Int>,
+ F::Int: CastInto<i32>,
+ F::Int: HInt,
+{
+ let one = F::Int::ONE;
+ let zero = F::Int::ZERO;
+
+ let bits = F::BITS;
+ let significand_bits = F::SIGNIFICAND_BITS;
+ let max_exponent = F::EXPONENT_MAX;
+
+ let exponent_bias = F::EXPONENT_BIAS;
+
+ let implicit_bit = F::IMPLICIT_BIT;
+ let significand_mask = F::SIGNIFICAND_MASK;
+ let sign_bit = F::SIGN_MASK as F::Int;
+ let abs_mask = sign_bit - one;
+ let exponent_mask = F::EXPONENT_MASK;
+ let inf_rep = exponent_mask;
+ let quiet_bit = implicit_bit >> 1;
+ let qnan_rep = exponent_mask | quiet_bit;
+ let exponent_bits = F::EXPONENT_BITS;
+
+ let a_rep = a.repr();
+ let b_rep = b.repr();
+
+ let a_exponent = (a_rep >> significand_bits) & max_exponent.cast();
+ let b_exponent = (b_rep >> significand_bits) & max_exponent.cast();
+ let product_sign = (a_rep ^ b_rep) & sign_bit;
+
+ let mut a_significand = a_rep & significand_mask;
+ let mut b_significand = b_rep & significand_mask;
+ let mut scale = 0;
+
+ // Detect if a or b is zero, denormal, infinity, or NaN.
+ if a_exponent.wrapping_sub(one) >= (max_exponent - 1).cast()
+ || b_exponent.wrapping_sub(one) >= (max_exponent - 1).cast()
+ {
+ let a_abs = a_rep & abs_mask;
+ let b_abs = b_rep & abs_mask;
+
+ // NaN + anything = qNaN
+ if a_abs > inf_rep {
+ return F::from_repr(a_rep | quiet_bit);
+ }
+ // anything + NaN = qNaN
+ if b_abs > inf_rep {
+ return F::from_repr(b_rep | quiet_bit);
+ }
+
+ if a_abs == inf_rep {
+ if b_abs != zero {
+ // infinity * non-zero = +/- infinity
+ return F::from_repr(a_abs | product_sign);
+ } else {
+ // infinity * zero = NaN
+ return F::from_repr(qnan_rep);
+ }
+ }
+
+ if b_abs == inf_rep {
+ if a_abs != zero {
+ // infinity * non-zero = +/- infinity
+ return F::from_repr(b_abs | product_sign);
+ } else {
+ // infinity * zero = NaN
+ return F::from_repr(qnan_rep);
+ }
+ }
+
+ // zero * anything = +/- zero
+ if a_abs == zero {
+ return F::from_repr(product_sign);
+ }
+
+ // anything * zero = +/- zero
+ if b_abs == zero {
+ return F::from_repr(product_sign);
+ }
+
+ // one or both of a or b is denormal, the other (if applicable) is a
+ // normal number. Renormalize one or both of a and b, and set scale to
+ // include the necessary exponent adjustment.
+ if a_abs < implicit_bit {
+ let (exponent, significand) = F::normalize(a_significand);
+ scale += exponent;
+ a_significand = significand;
+ }
+
+ if b_abs < implicit_bit {
+ let (exponent, significand) = F::normalize(b_significand);
+ scale += exponent;
+ b_significand = significand;
+ }
+ }
+
+ // Or in the implicit significand bit. (If we fell through from the
+ // denormal path it was already set by normalize( ), but setting it twice
+ // won't hurt anything.)
+ a_significand |= implicit_bit;
+ b_significand |= implicit_bit;
+
+ // Get the significand of a*b. Before multiplying the significands, shift
+ // one of them left to left-align it in the field. Thus, the product will
+ // have (exponentBits + 2) integral digits, all but two of which must be
+ // zero. Normalizing this result is just a conditional left-shift by one
+ // and bumping the exponent accordingly.
+ let (mut product_low, mut product_high) = a_significand
+ .widen_mul(b_significand << exponent_bits)
+ .lo_hi();
+
+ let a_exponent_i32: i32 = a_exponent.cast();
+ let b_exponent_i32: i32 = b_exponent.cast();
+ let mut product_exponent: i32 = a_exponent_i32
+ .wrapping_add(b_exponent_i32)
+ .wrapping_add(scale)
+ .wrapping_sub(exponent_bias as i32);
+
+ // Normalize the significand, adjust exponent if needed.
+ if (product_high & implicit_bit) != zero {
+ product_exponent = product_exponent.wrapping_add(1);
+ } else {
+ product_high = (product_high << 1) | (product_low >> (bits - 1));
+ product_low <<= 1;
+ }
+
+ // If we have overflowed the type, return +/- infinity.
+ if product_exponent >= max_exponent as i32 {
+ return F::from_repr(inf_rep | product_sign);
+ }
+
+ if product_exponent <= 0 {
+ // Result is denormal before rounding
+ //
+ // If the result is so small that it just underflows to zero, return
+ // a zero of the appropriate sign. Mathematically there is no need to
+ // handle this case separately, but we make it a special case to
+ // simplify the shift logic.
+ let shift = one.wrapping_sub(product_exponent.cast()).cast();
+ if shift >= bits {
+ return F::from_repr(product_sign);
+ }
+
+ // Otherwise, shift the significand of the result so that the round
+ // bit is the high bit of productLo.
+ if shift < bits {
+ let sticky = product_low << (bits - shift);
+ product_low = product_high << (bits - shift) | product_low >> shift | sticky;
+ product_high >>= shift;
+ } else if shift < (2 * bits) {
+ let sticky = product_high << (2 * bits - shift) | product_low;
+ product_low = product_high >> (shift - bits) | sticky;
+ product_high = zero;
+ } else {
+ product_high = zero;
+ }
+ } else {
+ // Result is normal before rounding; insert the exponent.
+ product_high &= significand_mask;
+ product_high |= product_exponent.cast() << significand_bits;
+ }
+
+ // Insert the sign of the result:
+ product_high |= product_sign;
+
+ // Final rounding. The final result may overflow to infinity, or underflow
+ // to zero, but those are the correct results in those cases. We use the
+ // default IEEE-754 round-to-nearest, ties-to-even rounding mode.
+ if product_low > sign_bit {
+ product_high += one;
+ }
+
+ if product_low == sign_bit {
+ product_high += product_high & one;
+ }
+
+ F::from_repr(product_high)
+}
+
+intrinsics! {
+ #[aapcs_on_arm]
+ #[arm_aeabi_alias = __aeabi_fmul]
+ pub extern "C" fn __mulsf3(a: f32, b: f32) -> f32 {
+ mul(a, b)
+ }
+
+ #[aapcs_on_arm]
+ #[arm_aeabi_alias = __aeabi_dmul]
+ pub extern "C" fn __muldf3(a: f64, b: f64) -> f64 {
+ mul(a, b)
+ }
+
+ #[cfg(target_arch = "arm")]
+ pub extern "C" fn __mulsf3vfp(a: f32, b: f32) -> f32 {
+ a * b
+ }
+
+ #[cfg(target_arch = "arm")]
+ pub extern "C" fn __muldf3vfp(a: f64, b: f64) -> f64 {
+ a * b
+ }
+}
diff --git a/vendor/compiler_builtins/src/float/pow.rs b/vendor/compiler_builtins/src/float/pow.rs
new file mode 100644
index 000000000..a75340c30
--- /dev/null
+++ b/vendor/compiler_builtins/src/float/pow.rs
@@ -0,0 +1,36 @@
+use float::Float;
+use int::Int;
+
+/// Returns `a` raised to the power `b`
+fn pow<F: Float>(a: F, b: i32) -> F {
+ let mut a = a;
+ let recip = b < 0;
+ let mut pow = Int::abs_diff(b, 0);
+ let mut mul = F::ONE;
+ loop {
+ if (pow & 1) != 0 {
+ mul *= a;
+ }
+ pow >>= 1;
+ if pow == 0 {
+ break;
+ }
+ a *= a;
+ }
+
+ if recip {
+ F::ONE / mul
+ } else {
+ mul
+ }
+}
+
+intrinsics! {
+ pub extern "C" fn __powisf2(a: f32, b: i32) -> f32 {
+ pow(a, b)
+ }
+
+ pub extern "C" fn __powidf2(a: f64, b: i32) -> f64 {
+ pow(a, b)
+ }
+}
diff --git a/vendor/compiler_builtins/src/float/sub.rs b/vendor/compiler_builtins/src/float/sub.rs
new file mode 100644
index 000000000..8d300e9d2
--- /dev/null
+++ b/vendor/compiler_builtins/src/float/sub.rs
@@ -0,0 +1,25 @@
+use float::add::__adddf3;
+use float::add::__addsf3;
+use float::Float;
+
+intrinsics! {
+ #[arm_aeabi_alias = __aeabi_fsub]
+ pub extern "C" fn __subsf3(a: f32, b: f32) -> f32 {
+ __addsf3(a, f32::from_repr(b.repr() ^ f32::SIGN_MASK))
+ }
+
+ #[arm_aeabi_alias = __aeabi_dsub]
+ pub extern "C" fn __subdf3(a: f64, b: f64) -> f64 {
+ __adddf3(a, f64::from_repr(b.repr() ^ f64::SIGN_MASK))
+ }
+
+ #[cfg(target_arch = "arm")]
+ pub extern "C" fn __subsf3vfp(a: f32, b: f32) -> f32 {
+ a - b
+ }
+
+ #[cfg(target_arch = "arm")]
+ pub extern "C" fn __subdf3vfp(a: f64, b: f64) -> f64 {
+ a - b
+ }
+}
diff --git a/vendor/compiler_builtins/src/float/trunc.rs b/vendor/compiler_builtins/src/float/trunc.rs
new file mode 100644
index 000000000..d73713084
--- /dev/null
+++ b/vendor/compiler_builtins/src/float/trunc.rs
@@ -0,0 +1,125 @@
+use float::Float;
+use int::{CastInto, Int};
+
+fn trunc<F: Float, R: Float>(a: F) -> R
+where
+ F::Int: CastInto<u64>,
+ F::Int: CastInto<u32>,
+ u64: CastInto<F::Int>,
+ u32: CastInto<F::Int>,
+
+ R::Int: CastInto<u32>,
+ u32: CastInto<R::Int>,
+ F::Int: CastInto<R::Int>,
+{
+ let src_zero = F::Int::ZERO;
+ let src_one = F::Int::ONE;
+ let src_bits = F::BITS;
+ let src_exp_bias = F::EXPONENT_BIAS;
+
+ let src_min_normal = F::IMPLICIT_BIT;
+ let src_significand_mask = F::SIGNIFICAND_MASK;
+ let src_infinity = F::EXPONENT_MASK;
+ let src_sign_mask = F::SIGN_MASK;
+ let src_abs_mask = src_sign_mask - src_one;
+ let round_mask = (src_one << (F::SIGNIFICAND_BITS - R::SIGNIFICAND_BITS)) - src_one;
+ let halfway = src_one << (F::SIGNIFICAND_BITS - R::SIGNIFICAND_BITS - 1);
+ let src_qnan = src_one << (F::SIGNIFICAND_BITS - 1);
+ let src_nan_code = src_qnan - src_one;
+
+ let dst_zero = R::Int::ZERO;
+ let dst_one = R::Int::ONE;
+ let dst_bits = R::BITS;
+ let dst_inf_exp = R::EXPONENT_MAX;
+ let dst_exp_bias = R::EXPONENT_BIAS;
+
+ let underflow_exponent: F::Int = (src_exp_bias + 1 - dst_exp_bias).cast();
+ let overflow_exponent: F::Int = (src_exp_bias + dst_inf_exp - dst_exp_bias).cast();
+ let underflow: F::Int = underflow_exponent << F::SIGNIFICAND_BITS;
+ let overflow: F::Int = overflow_exponent << F::SIGNIFICAND_BITS;
+
+ let dst_qnan = R::Int::ONE << (R::SIGNIFICAND_BITS - 1);
+ let dst_nan_code = dst_qnan - dst_one;
+
+ let sign_bits_delta = F::SIGNIFICAND_BITS - R::SIGNIFICAND_BITS;
+ // Break a into a sign and representation of the absolute value.
+ let a_abs = a.repr() & src_abs_mask;
+ let sign = a.repr() & src_sign_mask;
+ let mut abs_result: R::Int;
+
+ if a_abs.wrapping_sub(underflow) < a_abs.wrapping_sub(overflow) {
+ // The exponent of a is within the range of normal numbers in the
+ // destination format. We can convert by simply right-shifting with
+ // rounding and adjusting the exponent.
+ abs_result = (a_abs >> sign_bits_delta).cast();
+ let tmp = src_exp_bias.wrapping_sub(dst_exp_bias) << R::SIGNIFICAND_BITS;
+ abs_result = abs_result.wrapping_sub(tmp.cast());
+
+ let round_bits = a_abs & round_mask;
+ if round_bits > halfway {
+ // Round to nearest.
+ abs_result += dst_one;
+ } else if round_bits == halfway {
+ // Tie to even.
+ abs_result += abs_result & dst_one;
+ };
+ } else if a_abs > src_infinity {
+ // a is NaN.
+ // Conjure the result by beginning with infinity, setting the qNaN
+ // bit and inserting the (truncated) trailing NaN field.
+ abs_result = (dst_inf_exp << R::SIGNIFICAND_BITS).cast();
+ abs_result |= dst_qnan;
+ abs_result |= dst_nan_code
+ & ((a_abs & src_nan_code) >> (F::SIGNIFICAND_BITS - R::SIGNIFICAND_BITS)).cast();
+ } else if a_abs >= overflow {
+ // a overflows to infinity.
+ abs_result = (dst_inf_exp << R::SIGNIFICAND_BITS).cast();
+ } else {
+ // a underflows on conversion to the destination type or is an exact
+ // zero. The result may be a denormal or zero. Extract the exponent
+ // to get the shift amount for the denormalization.
+ let a_exp: u32 = (a_abs >> F::SIGNIFICAND_BITS).cast();
+ let shift = src_exp_bias - dst_exp_bias - a_exp + 1;
+
+ let significand = (a.repr() & src_significand_mask) | src_min_normal;
+
+ // Right shift by the denormalization amount with sticky.
+ if shift > F::SIGNIFICAND_BITS {
+ abs_result = dst_zero;
+ } else {
+ let sticky = if (significand << (src_bits - shift)) != src_zero {
+ src_one
+ } else {
+ src_zero
+ };
+ let denormalized_significand: F::Int = significand >> shift | sticky;
+ abs_result =
+ (denormalized_significand >> (F::SIGNIFICAND_BITS - R::SIGNIFICAND_BITS)).cast();
+ let round_bits = denormalized_significand & round_mask;
+ // Round to nearest
+ if round_bits > halfway {
+ abs_result += dst_one;
+ }
+ // Ties to even
+ else if round_bits == halfway {
+ abs_result += abs_result & dst_one;
+ };
+ }
+ }
+
+ // Apply the signbit to the absolute value.
+ R::from_repr(abs_result | sign.wrapping_shr(src_bits - dst_bits).cast())
+}
+
+intrinsics! {
+ #[aapcs_on_arm]
+ #[arm_aeabi_alias = __aeabi_d2f]
+ pub extern "C" fn __truncdfsf2(a: f64) -> f32 {
+ trunc(a)
+ }
+
+ #[cfg(target_arch = "arm")]
+ pub extern "C" fn __truncdfsf2vfp(a: f64) -> f32 {
+ a as f32
+ }
+}