summaryrefslogtreecommitdiffstats
path: root/vendor/libm/src/math
diff options
context:
space:
mode:
Diffstat (limited to 'vendor/libm/src/math')
-rw-r--r--vendor/libm/src/math/acos.rs2
-rw-r--r--vendor/libm/src/math/acosf.rs2
-rw-r--r--vendor/libm/src/math/acosh.rs1
-rw-r--r--vendor/libm/src/math/acoshf.rs1
-rw-r--r--vendor/libm/src/math/asin.rs2
-rw-r--r--vendor/libm/src/math/asinf.rs2
-rw-r--r--vendor/libm/src/math/asinh.rs1
-rw-r--r--vendor/libm/src/math/asinhf.rs1
-rw-r--r--vendor/libm/src/math/atan.rs1
-rw-r--r--vendor/libm/src/math/atan2.rs1
-rw-r--r--vendor/libm/src/math/atan2f.rs1
-rw-r--r--vendor/libm/src/math/atanf.rs9
-rw-r--r--vendor/libm/src/math/atanh.rs1
-rw-r--r--vendor/libm/src/math/atanhf.rs1
-rw-r--r--vendor/libm/src/math/cbrt.rs1
-rw-r--r--vendor/libm/src/math/cbrtf.rs1
-rw-r--r--vendor/libm/src/math/ceil.rs37
-rw-r--r--vendor/libm/src/math/ceilf.rs25
-rw-r--r--vendor/libm/src/math/copysign.rs1
-rw-r--r--vendor/libm/src/math/copysignf.rs1
-rw-r--r--vendor/libm/src/math/cos.rs1
-rw-r--r--vendor/libm/src/math/cosf.rs1
-rw-r--r--vendor/libm/src/math/cosh.rs1
-rw-r--r--vendor/libm/src/math/coshf.rs1
-rw-r--r--vendor/libm/src/math/erf.rs1
-rw-r--r--vendor/libm/src/math/erff.rs1
-rw-r--r--vendor/libm/src/math/exp.rs3
-rw-r--r--vendor/libm/src/math/exp10.rs5
-rw-r--r--vendor/libm/src/math/exp10f.rs5
-rw-r--r--vendor/libm/src/math/exp2.rs7
-rw-r--r--vendor/libm/src/math/exp2f.rs3
-rw-r--r--vendor/libm/src/math/expf.rs3
-rw-r--r--vendor/libm/src/math/expm1.rs1
-rw-r--r--vendor/libm/src/math/expm1f.rs1
-rw-r--r--vendor/libm/src/math/expo2.rs1
-rw-r--r--vendor/libm/src/math/fabs.rs25
-rw-r--r--vendor/libm/src/math/fabsf.rs27
-rw-r--r--vendor/libm/src/math/fdim.rs1
-rw-r--r--vendor/libm/src/math/fdimf.rs1
-rw-r--r--vendor/libm/src/math/fenv.rs20
-rw-r--r--vendor/libm/src/math/floor.rs42
-rw-r--r--vendor/libm/src/math/floorf.rs24
-rw-r--r--vendor/libm/src/math/fma.rs52
-rw-r--r--vendor/libm/src/math/fmaf.rs36
-rw-r--r--vendor/libm/src/math/fmax.rs1
-rw-r--r--vendor/libm/src/math/fmaxf.rs1
-rw-r--r--vendor/libm/src/math/fmin.rs1
-rw-r--r--vendor/libm/src/math/fminf.rs1
-rw-r--r--vendor/libm/src/math/fmod.rs1
-rw-r--r--vendor/libm/src/math/fmodf.rs1
-rw-r--r--vendor/libm/src/math/hypot.rs2
-rw-r--r--vendor/libm/src/math/hypotf.rs1
-rw-r--r--vendor/libm/src/math/ilogb.rs1
-rw-r--r--vendor/libm/src/math/ilogbf.rs1
-rw-r--r--vendor/libm/src/math/j1f.rs24
-rw-r--r--vendor/libm/src/math/k_cos.rs1
-rw-r--r--vendor/libm/src/math/k_cosf.rs1
-rw-r--r--vendor/libm/src/math/k_expo2.rs1
-rw-r--r--vendor/libm/src/math/k_expo2f.rs1
-rw-r--r--vendor/libm/src/math/k_sin.rs1
-rw-r--r--vendor/libm/src/math/k_sinf.rs1
-rw-r--r--vendor/libm/src/math/k_tan.rs2
-rw-r--r--vendor/libm/src/math/k_tanf.rs1
-rw-r--r--vendor/libm/src/math/ldexp.rs1
-rw-r--r--vendor/libm/src/math/ldexpf.rs1
-rw-r--r--vendor/libm/src/math/lgamma.rs1
-rw-r--r--vendor/libm/src/math/lgamma_r.rs7
-rw-r--r--vendor/libm/src/math/lgammaf.rs1
-rw-r--r--vendor/libm/src/math/lgammaf_r.rs7
-rw-r--r--vendor/libm/src/math/log.rs1
-rw-r--r--vendor/libm/src/math/log10.rs1
-rw-r--r--vendor/libm/src/math/log10f.rs1
-rw-r--r--vendor/libm/src/math/log1p.rs1
-rw-r--r--vendor/libm/src/math/log1pf.rs1
-rw-r--r--vendor/libm/src/math/log2.rs1
-rw-r--r--vendor/libm/src/math/log2f.rs1
-rw-r--r--vendor/libm/src/math/logf.rs1
-rw-r--r--vendor/libm/src/math/mod.rs40
-rw-r--r--vendor/libm/src/math/nextafter.rs37
-rw-r--r--vendor/libm/src/math/nextafterf.rs37
-rw-r--r--vendor/libm/src/math/pow.rs23
-rw-r--r--vendor/libm/src/math/powf.rs13
-rw-r--r--vendor/libm/src/math/rem_pio2.rs82
-rw-r--r--vendor/libm/src/math/rem_pio2_large.rs13
-rw-r--r--vendor/libm/src/math/rem_pio2f.rs8
-rw-r--r--vendor/libm/src/math/remainder.rs5
-rw-r--r--vendor/libm/src/math/remainderf.rs5
-rw-r--r--vendor/libm/src/math/remquo.rs15
-rw-r--r--vendor/libm/src/math/remquof.rs1
-rw-r--r--vendor/libm/src/math/rint.rs48
-rw-r--r--vendor/libm/src/math/rintf.rs48
-rw-r--r--vendor/libm/src/math/round.rs49
-rw-r--r--vendor/libm/src/math/roundf.rs51
-rw-r--r--vendor/libm/src/math/scalbn.rs1
-rw-r--r--vendor/libm/src/math/scalbnf.rs1
-rw-r--r--vendor/libm/src/math/sin.rs6
-rw-r--r--vendor/libm/src/math/sincos.rs79
-rw-r--r--vendor/libm/src/math/sincosf.rs82
-rw-r--r--vendor/libm/src/math/sinf.rs1
-rw-r--r--vendor/libm/src/math/sinh.rs1
-rw-r--r--vendor/libm/src/math/sinhf.rs1
-rw-r--r--vendor/libm/src/math/sqrt.rs259
-rw-r--r--vendor/libm/src/math/sqrtf.rs176
-rw-r--r--vendor/libm/src/math/tan.rs1
-rw-r--r--vendor/libm/src/math/tanf.rs1
-rw-r--r--vendor/libm/src/math/tanh.rs1
-rw-r--r--vendor/libm/src/math/tanhf.rs1
-rw-r--r--vendor/libm/src/math/tgamma.rs13
-rw-r--r--vendor/libm/src/math/tgammaf.rs1
-rw-r--r--vendor/libm/src/math/trunc.rs1
-rw-r--r--vendor/libm/src/math/truncf.rs3
111 files changed, 1083 insertions, 446 deletions
diff --git a/vendor/libm/src/math/acos.rs b/vendor/libm/src/math/acos.rs
index d5e1f6865..23b13251e 100644
--- a/vendor/libm/src/math/acos.rs
+++ b/vendor/libm/src/math/acos.rs
@@ -48,7 +48,6 @@ const QS2: f64 = 2.02094576023350569471e+00; /* 0x40002AE5, 0x9C598AC8 */
const QS3: f64 = -6.88283971605453293030e-01; /* 0xBFE6066C, 0x1B8D0159 */
const QS4: f64 = 7.70381505559019352791e-02; /* 0x3FB3B8C5, 0xB12E9282 */
-#[inline]
fn r(z: f64) -> f64 {
let p: f64 = z * (PS0 + z * (PS1 + z * (PS2 + z * (PS3 + z * (PS4 + z * PS5)))));
let q: f64 = 1.0 + z * (QS1 + z * (QS2 + z * (QS3 + z * QS4)));
@@ -60,7 +59,6 @@ fn r(z: f64) -> f64 {
/// Computes the inverse cosine (arc cosine) of the input value.
/// Arguments must be in the range -1 to 1.
/// Returns values in radians, in the range of 0 to pi.
-#[inline]
#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
pub fn acos(x: f64) -> f64 {
let x1p_120f = f64::from_bits(0x3870000000000000); // 0x1p-120 === 2 ^ -120
diff --git a/vendor/libm/src/math/acosf.rs b/vendor/libm/src/math/acosf.rs
index d0598e811..1a60479e3 100644
--- a/vendor/libm/src/math/acosf.rs
+++ b/vendor/libm/src/math/acosf.rs
@@ -22,7 +22,6 @@ const P_S1: f32 = -4.2743422091e-02;
const P_S2: f32 = -8.6563630030e-03;
const Q_S1: f32 = -7.0662963390e-01;
-#[inline]
fn r(z: f32) -> f32 {
let p = z * (P_S0 + z * (P_S1 + z * P_S2));
let q = 1. + z * Q_S1;
@@ -34,7 +33,6 @@ fn r(z: f32) -> f32 {
/// Computes the inverse cosine (arc cosine) of the input value.
/// Arguments must be in the range -1 to 1.
/// Returns values in radians, in the range of 0 to pi.
-#[inline]
#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
pub fn acosf(x: f32) -> f32 {
let x1p_120 = f32::from_bits(0x03800000); // 0x1p-120 === 2 ^ (-120)
diff --git a/vendor/libm/src/math/acosh.rs b/vendor/libm/src/math/acosh.rs
index ac7a5f1c6..d1f5b9fa9 100644
--- a/vendor/libm/src/math/acosh.rs
+++ b/vendor/libm/src/math/acosh.rs
@@ -7,6 +7,7 @@ const LN2: f64 = 0.693147180559945309417232121458176568; /* 0x3fe62e42, 0xfefa3
/// Calculates the inverse hyperbolic cosine of `x`.
/// Is defined as `log(x + sqrt(x*x-1))`.
/// `x` must be a number greater than or equal to 1.
+#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
pub fn acosh(x: f64) -> f64 {
let u = x.to_bits();
let e = ((u >> 52) as usize) & 0x7ff;
diff --git a/vendor/libm/src/math/acoshf.rs b/vendor/libm/src/math/acoshf.rs
index 0879e1edb..ad3455fdd 100644
--- a/vendor/libm/src/math/acoshf.rs
+++ b/vendor/libm/src/math/acoshf.rs
@@ -7,6 +7,7 @@ const LN2: f32 = 0.693147180559945309417232121458176568;
/// Calculates the inverse hyperbolic cosine of `x`.
/// Is defined as `log(x + sqrt(x*x-1))`.
/// `x` must be a number greater than or equal to 1.
+#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
pub fn acoshf(x: f32) -> f32 {
let u = x.to_bits();
let a = u & 0x7fffffff;
diff --git a/vendor/libm/src/math/asin.rs b/vendor/libm/src/math/asin.rs
index 774475e51..3e4b7c56e 100644
--- a/vendor/libm/src/math/asin.rs
+++ b/vendor/libm/src/math/asin.rs
@@ -55,7 +55,6 @@ const Q_S2: f64 = 2.02094576023350569471e+00; /* 0x40002AE5, 0x9C598AC8 */
const Q_S3: f64 = -6.88283971605453293030e-01; /* 0xBFE6066C, 0x1B8D0159 */
const Q_S4: f64 = 7.70381505559019352791e-02; /* 0x3FB3B8C5, 0xB12E9282 */
-#[inline]
fn comp_r(z: f64) -> f64 {
let p = z * (P_S0 + z * (P_S1 + z * (P_S2 + z * (P_S3 + z * (P_S4 + z * P_S5)))));
let q = 1.0 + z * (Q_S1 + z * (Q_S2 + z * (Q_S3 + z * Q_S4)));
@@ -67,7 +66,6 @@ fn comp_r(z: f64) -> f64 {
/// Computes the inverse sine (arc sine) of the argument `x`.
/// Arguments to asin must be in the range -1 to 1.
/// Returns values in radians, in the range of -pi/2 to pi/2.
-#[inline]
#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
pub fn asin(mut x: f64) -> f64 {
let z: f64;
diff --git a/vendor/libm/src/math/asinf.rs b/vendor/libm/src/math/asinf.rs
index ce0f4a997..6ec61b629 100644
--- a/vendor/libm/src/math/asinf.rs
+++ b/vendor/libm/src/math/asinf.rs
@@ -24,7 +24,6 @@ const P_S1: f32 = -4.2743422091e-02;
const P_S2: f32 = -8.6563630030e-03;
const Q_S1: f32 = -7.0662963390e-01;
-#[inline]
fn r(z: f32) -> f32 {
let p = z * (P_S0 + z * (P_S1 + z * P_S2));
let q = 1. + z * Q_S1;
@@ -36,7 +35,6 @@ fn r(z: f32) -> f32 {
/// Computes the inverse sine (arc sine) of the argument `x`.
/// Arguments to asin must be in the range -1 to 1.
/// Returns values in radians, in the range of -pi/2 to pi/2.
-#[inline]
#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
pub fn asinf(mut x: f32) -> f32 {
let x1p_120 = f64::from_bits(0x3870000000000000); // 0x1p-120 === 2 ^ (-120)
diff --git a/vendor/libm/src/math/asinh.rs b/vendor/libm/src/math/asinh.rs
index 14295357a..0abd80c2f 100644
--- a/vendor/libm/src/math/asinh.rs
+++ b/vendor/libm/src/math/asinh.rs
@@ -7,6 +7,7 @@ const LN2: f64 = 0.693147180559945309417232121458176568; /* 0x3fe62e42, 0xfefa3
///
/// Calculates the inverse hyperbolic sine of `x`.
/// Is defined as `sgn(x)*log(|x|+sqrt(x*x+1))`.
+#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
pub fn asinh(mut x: f64) -> f64 {
let mut u = x.to_bits();
let e = ((u >> 52) as usize) & 0x7ff;
diff --git a/vendor/libm/src/math/asinhf.rs b/vendor/libm/src/math/asinhf.rs
index e22a29132..09c77823e 100644
--- a/vendor/libm/src/math/asinhf.rs
+++ b/vendor/libm/src/math/asinhf.rs
@@ -7,6 +7,7 @@ const LN2: f32 = 0.693147180559945309417232121458176568;
///
/// Calculates the inverse hyperbolic sine of `x`.
/// Is defined as `sgn(x)*log(|x|+sqrt(x*x+1))`.
+#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
pub fn asinhf(mut x: f32) -> f32 {
let u = x.to_bits();
let i = u & 0x7fffffff;
diff --git a/vendor/libm/src/math/atan.rs b/vendor/libm/src/math/atan.rs
index d2684ece8..4259dc71a 100644
--- a/vendor/libm/src/math/atan.rs
+++ b/vendor/libm/src/math/atan.rs
@@ -64,7 +64,6 @@ const AT: [f64; 11] = [
///
/// Computes the inverse tangent (arc tangent) of the input value.
/// Returns a value in radians, in the range of -pi/2 to pi/2.
-#[inline]
#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
pub fn atan(x: f64) -> f64 {
let mut x = x;
diff --git a/vendor/libm/src/math/atan2.rs b/vendor/libm/src/math/atan2.rs
index 08385cd10..fb2ea4eda 100644
--- a/vendor/libm/src/math/atan2.rs
+++ b/vendor/libm/src/math/atan2.rs
@@ -48,7 +48,6 @@ const PI_LO: f64 = 1.2246467991473531772E-16; /* 0x3CA1A626, 0x33145C07 */
/// Computes the inverse tangent (arc tangent) of `y/x`.
/// Produces the correct result even for angles near pi/2 or -pi/2 (that is, when `x` is near 0).
/// Returns a value in radians, in the range of -pi to pi.
-#[inline]
#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
pub fn atan2(y: f64, x: f64) -> f64 {
if x.is_nan() || y.is_nan() {
diff --git a/vendor/libm/src/math/atan2f.rs b/vendor/libm/src/math/atan2f.rs
index 7bbe5f1d4..eae3b002d 100644
--- a/vendor/libm/src/math/atan2f.rs
+++ b/vendor/libm/src/math/atan2f.rs
@@ -24,7 +24,6 @@ const PI_LO: f32 = -8.7422776573e-08; /* 0xb3bbbd2e */
/// Computes the inverse tangent (arc tangent) of `y/x`.
/// Produces the correct result even for angles near pi/2 or -pi/2 (that is, when `x` is near 0).
/// Returns a value in radians, in the range of -pi to pi.
-#[inline]
#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
pub fn atan2f(y: f32, x: f32) -> f32 {
if x.is_nan() || y.is_nan() {
diff --git a/vendor/libm/src/math/atanf.rs b/vendor/libm/src/math/atanf.rs
index 363e11d64..d042b3bc0 100644
--- a/vendor/libm/src/math/atanf.rs
+++ b/vendor/libm/src/math/atanf.rs
@@ -41,7 +41,6 @@ const A_T: [f32; 5] = [
///
/// Computes the inverse tangent (arc tangent) of the input value.
/// Returns a value in radians, in the range of -pi/2 to pi/2.
-#[inline]
#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
pub fn atanf(mut x: f32) -> f32 {
let x1p_120 = f32::from_bits(0x03800000); // 0x1p-120 === 2 ^ (-120)
@@ -57,7 +56,7 @@ pub fn atanf(mut x: f32) -> f32 {
if x.is_nan() {
return x;
}
- z = ATAN_HI[3] + x1p_120;
+ z = i!(ATAN_HI, 3) + x1p_120;
return if sign { -z } else { z };
}
let id = if ix < 0x3ee00000 {
@@ -98,13 +97,13 @@ pub fn atanf(mut x: f32) -> f32 {
z = x * x;
let w = z * z;
/* break sum from i=0 to 10 aT[i]z**(i+1) into odd and even poly */
- let s1 = z * (A_T[0] + w * (A_T[2] + w * A_T[4]));
- let s2 = w * (A_T[1] + w * A_T[3]);
+ let s1 = z * (i!(A_T, 0) + w * (i!(A_T, 2) + w * i!(A_T, 4)));
+ let s2 = w * (i!(A_T, 1) + w * i!(A_T, 3));
if id < 0 {
return x - x * (s1 + s2);
}
let id = id as usize;
- let z = ATAN_HI[id] - ((x * (s1 + s2) - ATAN_LO[id]) - x);
+ let z = i!(ATAN_HI, id) - ((x * (s1 + s2) - i!(ATAN_LO, id)) - x);
if sign {
-z
} else {
diff --git a/vendor/libm/src/math/atanh.rs b/vendor/libm/src/math/atanh.rs
index 79a989c42..b984c4ac6 100644
--- a/vendor/libm/src/math/atanh.rs
+++ b/vendor/libm/src/math/atanh.rs
@@ -5,6 +5,7 @@ use super::log1p;
///
/// Calculates the inverse hyperbolic tangent of `x`.
/// Is defined as `log((1+x)/(1-x))/2 = log1p(2x/(1-x))/2`.
+#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
pub fn atanh(x: f64) -> f64 {
let u = x.to_bits();
let e = ((u >> 52) as usize) & 0x7ff;
diff --git a/vendor/libm/src/math/atanhf.rs b/vendor/libm/src/math/atanhf.rs
index 7b2f34d97..a1aa314a5 100644
--- a/vendor/libm/src/math/atanhf.rs
+++ b/vendor/libm/src/math/atanhf.rs
@@ -5,6 +5,7 @@ use super::log1pf;
///
/// Calculates the inverse hyperbolic tangent of `x`.
/// Is defined as `log((1+x)/(1-x))/2 = log1p(2x/(1-x))/2`.
+#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
pub fn atanhf(mut x: f32) -> f32 {
let mut u = x.to_bits();
let sign = (u >> 31) != 0;
diff --git a/vendor/libm/src/math/cbrt.rs b/vendor/libm/src/math/cbrt.rs
index 04469b159..b4e77eaa2 100644
--- a/vendor/libm/src/math/cbrt.rs
+++ b/vendor/libm/src/math/cbrt.rs
@@ -30,7 +30,6 @@ const P4: f64 = 0.145996192886612446982; /* 0x3fc2b000, 0xd4e4edd7 */
// Cube root (f64)
///
/// Computes the cube root of the argument.
-#[inline]
#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
pub fn cbrt(x: f64) -> f64 {
let x1p54 = f64::from_bits(0x4350000000000000); // 0x1p54 === 2 ^ 54
diff --git a/vendor/libm/src/math/cbrtf.rs b/vendor/libm/src/math/cbrtf.rs
index 6e589c099..9d70305c6 100644
--- a/vendor/libm/src/math/cbrtf.rs
+++ b/vendor/libm/src/math/cbrtf.rs
@@ -25,7 +25,6 @@ const B2: u32 = 642849266; /* B2 = (127-127.0/3-24/3-0.03306235651)*2**23 */
/// Cube root (f32)
///
/// Computes the cube root of the argument.
-#[inline]
#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
pub fn cbrtf(x: f32) -> f32 {
let x1p24 = f32::from_bits(0x4b800000); // 0x1p24f === 2 ^ 24
diff --git a/vendor/libm/src/math/ceil.rs b/vendor/libm/src/math/ceil.rs
index 59883a8a7..22d892971 100644
--- a/vendor/libm/src/math/ceil.rs
+++ b/vendor/libm/src/math/ceil.rs
@@ -1,3 +1,4 @@
+#![allow(unreachable_code)]
use core::f64;
const TOINT: f64 = 1. / f64::EPSILON;
@@ -5,7 +6,6 @@ const TOINT: f64 = 1. / f64::EPSILON;
/// Ceil (f64)
///
/// Finds the nearest integer greater than or equal to `x`.
-#[inline]
#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
pub fn ceil(x: f64) -> f64 {
// On wasm32 we know that LLVM's intrinsic will compile to an optimized
@@ -16,6 +16,24 @@ pub fn ceil(x: f64) -> f64 {
return unsafe { ::core::intrinsics::ceilf64(x) }
}
}
+ #[cfg(all(target_arch = "x86", not(target_feature = "sse2")))]
+ {
+ //use an alternative implementation on x86, because the
+ //main implementation fails with the x87 FPU used by
+ //debian i386, probablly due to excess precision issues.
+ //basic implementation taken from https://github.com/rust-lang/libm/issues/219
+ use super::fabs;
+ if fabs(x).to_bits() < 4503599627370496.0_f64.to_bits() {
+ let truncated = x as i64 as f64;
+ if truncated < x {
+ return truncated + 1.0;
+ } else {
+ return truncated;
+ }
+ } else {
+ return x;
+ }
+ }
let u: u64 = x.to_bits();
let e: i64 = (u >> 52 & 0x7ff) as i64;
let y: f64;
@@ -43,9 +61,22 @@ pub fn ceil(x: f64) -> f64 {
#[cfg(test)]
mod tests {
+ use super::*;
+ use core::f64::*;
+
#[test]
fn sanity_check() {
- assert_eq!(super::ceil(1.1), 2.0);
- assert_eq!(super::ceil(2.9), 3.0);
+ assert_eq!(ceil(1.1), 2.0);
+ assert_eq!(ceil(2.9), 3.0);
+ }
+
+ /// The spec: https://en.cppreference.com/w/cpp/numeric/math/ceil
+ #[test]
+ fn spec_tests() {
+ // Not Asserted: that the current rounding mode has no effect.
+ assert!(ceil(NAN).is_nan());
+ for f in [0.0, -0.0, INFINITY, NEG_INFINITY].iter().copied() {
+ assert_eq!(ceil(f), f);
+ }
}
}
diff --git a/vendor/libm/src/math/ceilf.rs b/vendor/libm/src/math/ceilf.rs
index 151a4f210..7bcc647ca 100644
--- a/vendor/libm/src/math/ceilf.rs
+++ b/vendor/libm/src/math/ceilf.rs
@@ -3,7 +3,6 @@ use core::f32;
/// Ceil (f32)
///
/// Finds the nearest integer greater than or equal to `x`.
-#[inline]
#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
pub fn ceilf(x: f32) -> f32 {
// On wasm32 we know that LLVM's intrinsic will compile to an optimized
@@ -40,3 +39,27 @@ pub fn ceilf(x: f32) -> f32 {
}
f32::from_bits(ui)
}
+
+// PowerPC tests are failing on LLVM 13: https://github.com/rust-lang/rust/issues/88520
+#[cfg(not(target_arch = "powerpc64"))]
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use core::f32::*;
+
+ #[test]
+ fn sanity_check() {
+ assert_eq!(ceilf(1.1), 2.0);
+ assert_eq!(ceilf(2.9), 3.0);
+ }
+
+ /// The spec: https://en.cppreference.com/w/cpp/numeric/math/ceil
+ #[test]
+ fn spec_tests() {
+ // Not Asserted: that the current rounding mode has no effect.
+ assert!(ceilf(NAN).is_nan());
+ for f in [0.0, -0.0, INFINITY, NEG_INFINITY].iter().copied() {
+ assert_eq!(ceilf(f), f);
+ }
+ }
+}
diff --git a/vendor/libm/src/math/copysign.rs b/vendor/libm/src/math/copysign.rs
index 1527fb6ea..1f4a35a33 100644
--- a/vendor/libm/src/math/copysign.rs
+++ b/vendor/libm/src/math/copysign.rs
@@ -2,6 +2,7 @@
///
/// Constructs a number with the magnitude (absolute value) of its
/// first argument, `x`, and the sign of its second argument, `y`.
+#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
pub fn copysign(x: f64, y: f64) -> f64 {
let mut ux = x.to_bits();
let uy = y.to_bits();
diff --git a/vendor/libm/src/math/copysignf.rs b/vendor/libm/src/math/copysignf.rs
index 35148561a..6c346e3a5 100644
--- a/vendor/libm/src/math/copysignf.rs
+++ b/vendor/libm/src/math/copysignf.rs
@@ -2,6 +2,7 @@
///
/// Constructs a number with the magnitude (absolute value) of its
/// first argument, `x`, and the sign of its second argument, `y`.
+#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
pub fn copysignf(x: f32, y: f32) -> f32 {
let mut ux = x.to_bits();
let uy = y.to_bits();
diff --git a/vendor/libm/src/math/cos.rs b/vendor/libm/src/math/cos.rs
index fe5a89919..db8bc4989 100644
--- a/vendor/libm/src/math/cos.rs
+++ b/vendor/libm/src/math/cos.rs
@@ -41,7 +41,6 @@ use super::{k_cos, k_sin, rem_pio2};
// Accuracy:
// TRIG(x) returns trig(x) nearly rounded
//
-#[inline]
#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
pub fn cos(x: f64) -> f64 {
let ix = (f64::to_bits(x) >> 32) as u32 & 0x7fffffff;
diff --git a/vendor/libm/src/math/cosf.rs b/vendor/libm/src/math/cosf.rs
index 48d76c8ee..424fa42ed 100644
--- a/vendor/libm/src/math/cosf.rs
+++ b/vendor/libm/src/math/cosf.rs
@@ -24,7 +24,6 @@ const C2_PIO2: f64 = 2. * FRAC_PI_2; /* 0x400921FB, 0x54442D18 */
const C3_PIO2: f64 = 3. * FRAC_PI_2; /* 0x4012D97C, 0x7F3321D2 */
const C4_PIO2: f64 = 4. * FRAC_PI_2; /* 0x401921FB, 0x54442D18 */
-#[inline]
#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
pub fn cosf(x: f32) -> f32 {
let x64 = x as f64;
diff --git a/vendor/libm/src/math/cosh.rs b/vendor/libm/src/math/cosh.rs
index bac875566..2fb568ab3 100644
--- a/vendor/libm/src/math/cosh.rs
+++ b/vendor/libm/src/math/cosh.rs
@@ -7,7 +7,6 @@ use super::k_expo2;
/// Computes the hyperbolic cosine of the argument x.
/// Is defined as `(exp(x) + exp(-x))/2`
/// Angles are specified in radians.
-#[inline]
#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
pub fn cosh(mut x: f64) -> f64 {
/* |x| */
diff --git a/vendor/libm/src/math/coshf.rs b/vendor/libm/src/math/coshf.rs
index bf99e42f0..e7b684587 100644
--- a/vendor/libm/src/math/coshf.rs
+++ b/vendor/libm/src/math/coshf.rs
@@ -7,7 +7,6 @@ use super::k_expo2f;
/// Computes the hyperbolic cosine of the argument x.
/// Is defined as `(exp(x) + exp(-x))/2`
/// Angles are specified in radians.
-#[inline]
#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
pub fn coshf(mut x: f32) -> f32 {
let x1p120 = f32::from_bits(0x7b800000); // 0x1p120f === 2 ^ 120
diff --git a/vendor/libm/src/math/erf.rs b/vendor/libm/src/math/erf.rs
index a2c617d34..5e21ba578 100644
--- a/vendor/libm/src/math/erf.rs
+++ b/vendor/libm/src/math/erf.rs
@@ -219,6 +219,7 @@ fn erfc2(ix: u32, mut x: f64) -> f64 {
/// Calculates an approximation to the “error function”, which estimates
/// the probability that an observation will fall within x standard
/// deviations of the mean (assuming a normal distribution).
+#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
pub fn erf(x: f64) -> f64 {
let r: f64;
let s: f64;
diff --git a/vendor/libm/src/math/erff.rs b/vendor/libm/src/math/erff.rs
index 384052293..f74d4b632 100644
--- a/vendor/libm/src/math/erff.rs
+++ b/vendor/libm/src/math/erff.rs
@@ -130,6 +130,7 @@ fn erfc2(mut ix: u32, mut x: f32) -> f32 {
/// Calculates an approximation to the “error function”, which estimates
/// the probability that an observation will fall within x standard
/// deviations of the mean (assuming a normal distribution).
+#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
pub fn erff(x: f32) -> f32 {
let r: f32;
let s: f32;
diff --git a/vendor/libm/src/math/exp.rs b/vendor/libm/src/math/exp.rs
index 5465b5693..d4994277f 100644
--- a/vendor/libm/src/math/exp.rs
+++ b/vendor/libm/src/math/exp.rs
@@ -81,7 +81,6 @@ const P5: f64 = 4.13813679705723846039e-08; /* 0x3E663769, 0x72BEA4D0 */
///
/// Calculate the exponential of `x`, that is, *e* raised to the power `x`
/// (where *e* is the base of the natural system of logarithms, approximately 2.71828).
-#[inline]
#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
pub fn exp(mut x: f64) -> f64 {
let x1p1023 = f64::from_bits(0x7fe0000000000000); // 0x1p1023 === 2 ^ 1023
@@ -125,7 +124,7 @@ pub fn exp(mut x: f64) -> f64 {
/* if |x| > 0.5 ln2 */
if hx >= 0x3ff0a2b2 {
/* if |x| >= 1.5 ln2 */
- k = (INVLN2 * x + HALF[sign as usize]) as i32;
+ k = (INVLN2 * x + i!(HALF, sign as usize)) as i32;
} else {
k = 1 - sign - sign;
}
diff --git a/vendor/libm/src/math/exp10.rs b/vendor/libm/src/math/exp10.rs
index 9537f76f1..559930e10 100644
--- a/vendor/libm/src/math/exp10.rs
+++ b/vendor/libm/src/math/exp10.rs
@@ -6,16 +6,17 @@ const P10: &[f64] = &[
1e0, 1e1, 1e2, 1e3, 1e4, 1e5, 1e6, 1e7, 1e8, 1e9, 1e10, 1e11, 1e12, 1e13, 1e14, 1e15,
];
+#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
pub fn exp10(x: f64) -> f64 {
let (mut y, n) = modf(x);
let u: u64 = n.to_bits();
/* fabs(n) < 16 without raising invalid on nan */
if (u >> 52 & 0x7ff) < 0x3ff + 4 {
if y == 0.0 {
- return P10[((n as isize) + 15) as usize];
+ return i!(P10, ((n as isize) + 15) as usize);
}
y = exp2(LN10 * y);
- return y * P10[((n as isize) + 15) as usize];
+ return y * i!(P10, ((n as isize) + 15) as usize);
}
return pow(10.0, x);
}
diff --git a/vendor/libm/src/math/exp10f.rs b/vendor/libm/src/math/exp10f.rs
index d45fff36e..1279bc6c5 100644
--- a/vendor/libm/src/math/exp10f.rs
+++ b/vendor/libm/src/math/exp10f.rs
@@ -6,16 +6,17 @@ const P10: &[f32] = &[
1e-7, 1e-6, 1e-5, 1e-4, 1e-3, 1e-2, 1e-1, 1e0, 1e1, 1e2, 1e3, 1e4, 1e5, 1e6, 1e7,
];
+#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
pub fn exp10f(x: f32) -> f32 {
let (mut y, n) = modff(x);
let u = n.to_bits();
/* fabsf(n) < 8 without raising invalid on nan */
if (u >> 23 & 0xff) < 0x7f + 3 {
if y == 0.0 {
- return P10[((n as isize) + 7) as usize];
+ return i!(P10, ((n as isize) + 7) as usize);
}
y = exp2f(LN10_F32 * y);
- return y * P10[((n as isize) + 7) as usize];
+ return y * i!(P10, ((n as isize) + 7) as usize);
}
return exp2(LN10_F64 * (x as f64)) as f32;
}
diff --git a/vendor/libm/src/math/exp2.rs b/vendor/libm/src/math/exp2.rs
index c2192fde5..e0e385df2 100644
--- a/vendor/libm/src/math/exp2.rs
+++ b/vendor/libm/src/math/exp2.rs
@@ -322,7 +322,6 @@ static TBL: [u64; TBLSIZE * 2] = [
/// Exponential, base 2 (f64)
///
/// Calculate `2^x`, that is, 2 raised to the power `x`.
-#[inline]
#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
pub fn exp2(mut x: f64) -> f64 {
let redux = f64::from_bits(0x4338000000000000) / TBLSIZE as f64;
@@ -375,14 +374,14 @@ pub fn exp2(mut x: f64) -> f64 {
let mut i0 = ui as u32;
i0 = i0.wrapping_add(TBLSIZE as u32 / 2);
let ku = i0 / TBLSIZE as u32 * TBLSIZE as u32;
- let ki = ku as i32 / TBLSIZE as i32;
+ let ki = div!(ku as i32, TBLSIZE as i32);
i0 %= TBLSIZE as u32;
let uf = f64::from_bits(ui) - redux;
let mut z = x - uf;
/* Compute r = exp2(y) = exp2t[i0] * p(z - eps[i]). */
- let t = f64::from_bits(TBL[2 * i0 as usize]); /* exp2t[i0] */
- z -= f64::from_bits(TBL[2 * i0 as usize + 1]); /* eps[i0] */
+ let t = f64::from_bits(i!(TBL, 2 * i0 as usize)); /* exp2t[i0] */
+ z -= f64::from_bits(i!(TBL, 2 * i0 as usize + 1)); /* eps[i0] */
let r = t + t * z * (p1 + z * (p2 + z * (p3 + z * (p4 + z * p5))));
scalbn(r, ki)
diff --git a/vendor/libm/src/math/exp2f.rs b/vendor/libm/src/math/exp2f.rs
index 12c9e76a4..f4867b80e 100644
--- a/vendor/libm/src/math/exp2f.rs
+++ b/vendor/libm/src/math/exp2f.rs
@@ -73,7 +73,6 @@ static EXP2FT: [u64; TBLSIZE] = [
/// Exponential, base 2 (f32)
///
/// Calculate `2^x`, that is, 2 raised to the power `x`.
-#[inline]
#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
pub fn exp2f(mut x: f32) -> f32 {
let redux = f32::from_bits(0x4b400000) / TBLSIZE as f32;
@@ -127,7 +126,7 @@ pub fn exp2f(mut x: f32) -> f32 {
uf -= redux;
let z: f64 = (x - uf) as f64;
/* Compute r = exp2(y) = exp2ft[i0] * p(z). */
- let r: f64 = f64::from_bits(EXP2FT[i0 as usize]);
+ let r: f64 = f64::from_bits(i!(EXP2FT, i0 as usize));
let t: f64 = r as f64 * z;
let r: f64 = r + t * (p1 as f64 + z * p2 as f64) + t * (z * z) * (p3 as f64 + z * p4 as f64);
diff --git a/vendor/libm/src/math/expf.rs b/vendor/libm/src/math/expf.rs
index 09323ec8d..a53aa90a6 100644
--- a/vendor/libm/src/math/expf.rs
+++ b/vendor/libm/src/math/expf.rs
@@ -30,7 +30,6 @@ const P2: f32 = -2.7667332906e-3; /* -0xb55215.0p-32 */
///
/// Calculate the exponential of `x`, that is, *e* raised to the power `x`
/// (where *e* is the base of the natural system of logarithms, approximately 2.71828).
-#[inline]
#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
pub fn expf(mut x: f32) -> f32 {
let x1p127 = f32::from_bits(0x7f000000); // 0x1p127f === 2 ^ 127
@@ -71,7 +70,7 @@ pub fn expf(mut x: f32) -> f32 {
/* if |x| > 0.5 ln2 */
if hx > 0x3f851592 {
/* if |x| > 1.5 ln2 */
- k = (INV_LN2 * x + HALF[sign as usize]) as i32;
+ k = (INV_LN2 * x + i!(HALF, sign as usize)) as i32;
} else {
k = 1 - sign - sign;
}
diff --git a/vendor/libm/src/math/expm1.rs b/vendor/libm/src/math/expm1.rs
index 0d43b4e10..42608509a 100644
--- a/vendor/libm/src/math/expm1.rs
+++ b/vendor/libm/src/math/expm1.rs
@@ -30,7 +30,6 @@ const Q5: f64 = -2.01099218183624371326e-07; /* BE8AFDB7 6E09C32D */
/// system of logarithms, approximately 2.71828).
/// The result is accurate even for small values of `x`,
/// where using `exp(x)-1` would lose many significant digits.
-#[inline]
#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
pub fn expm1(mut x: f64) -> f64 {
let hi: f64;
diff --git a/vendor/libm/src/math/expm1f.rs b/vendor/libm/src/math/expm1f.rs
index 9bb223448..3fc2a247b 100644
--- a/vendor/libm/src/math/expm1f.rs
+++ b/vendor/libm/src/math/expm1f.rs
@@ -32,7 +32,6 @@ const Q2: f32 = 1.5807170421e-3; /* 0xcf3010.0p-33 */
/// system of logarithms, approximately 2.71828).
/// The result is accurate even for small values of `x`,
/// where using `exp(x)-1` would lose many significant digits.
-#[inline]
#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
pub fn expm1f(mut x: f32) -> f32 {
let x1p127 = f32::from_bits(0x7f000000); // 0x1p127f === 2 ^ 127
diff --git a/vendor/libm/src/math/expo2.rs b/vendor/libm/src/math/expo2.rs
index ae6cc8121..82e9b360a 100644
--- a/vendor/libm/src/math/expo2.rs
+++ b/vendor/libm/src/math/expo2.rs
@@ -1,7 +1,6 @@
use super::{combine_words, exp};
/* exp(x)/2 for x >= log(DBL_MAX), slightly better than 0.5*exp(x/2)*exp(x/2) */
-#[inline]
#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
pub(crate) fn expo2(x: f64) -> f64 {
/* k is such that k*ln2 has minimal relative error and x - kln2 > log(DBL_MIN) */
diff --git a/vendor/libm/src/math/fabs.rs b/vendor/libm/src/math/fabs.rs
index 52a9adcbf..b2255ad32 100644
--- a/vendor/libm/src/math/fabs.rs
+++ b/vendor/libm/src/math/fabs.rs
@@ -3,7 +3,6 @@ use core::u64;
/// Absolute value (magnitude) (f64)
/// Calculates the absolute value (magnitude) of the argument `x`,
/// by direct manipulation of the bit representation of `x`.
-#[inline]
#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
pub fn fabs(x: f64) -> f64 {
// On wasm32 we know that LLVM's intrinsic will compile to an optimized
@@ -16,3 +15,27 @@ pub fn fabs(x: f64) -> f64 {
}
f64::from_bits(x.to_bits() & (u64::MAX / 2))
}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use core::f64::*;
+
+ #[test]
+ fn sanity_check() {
+ assert_eq!(fabs(-1.0), 1.0);
+ assert_eq!(fabs(2.8), 2.8);
+ }
+
+ /// The spec: https://en.cppreference.com/w/cpp/numeric/math/fabs
+ #[test]
+ fn spec_tests() {
+ assert!(fabs(NAN).is_nan());
+ for f in [0.0, -0.0].iter().copied() {
+ assert_eq!(fabs(f), 0.0);
+ }
+ for f in [INFINITY, NEG_INFINITY].iter().copied() {
+ assert_eq!(fabs(f), INFINITY);
+ }
+ }
+}
diff --git a/vendor/libm/src/math/fabsf.rs b/vendor/libm/src/math/fabsf.rs
index 5942d983a..23f3646dc 100644
--- a/vendor/libm/src/math/fabsf.rs
+++ b/vendor/libm/src/math/fabsf.rs
@@ -1,7 +1,6 @@
/// Absolute value (magnitude) (f32)
/// Calculates the absolute value (magnitude) of the argument `x`,
/// by direct manipulation of the bit representation of `x`.
-#[inline]
#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
pub fn fabsf(x: f32) -> f32 {
// On wasm32 we know that LLVM's intrinsic will compile to an optimized
@@ -14,3 +13,29 @@ pub fn fabsf(x: f32) -> f32 {
}
f32::from_bits(x.to_bits() & 0x7fffffff)
}
+
+// PowerPC tests are failing on LLVM 13: https://github.com/rust-lang/rust/issues/88520
+#[cfg(not(target_arch = "powerpc64"))]
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use core::f32::*;
+
+ #[test]
+ fn sanity_check() {
+ assert_eq!(fabsf(-1.0), 1.0);
+ assert_eq!(fabsf(2.8), 2.8);
+ }
+
+ /// The spec: https://en.cppreference.com/w/cpp/numeric/math/fabs
+ #[test]
+ fn spec_tests() {
+ assert!(fabsf(NAN).is_nan());
+ for f in [0.0, -0.0].iter().copied() {
+ assert_eq!(fabsf(f), 0.0);
+ }
+ for f in [INFINITY, NEG_INFINITY].iter().copied() {
+ assert_eq!(fabsf(f), INFINITY);
+ }
+ }
+}
diff --git a/vendor/libm/src/math/fdim.rs b/vendor/libm/src/math/fdim.rs
index 06edc9960..014930097 100644
--- a/vendor/libm/src/math/fdim.rs
+++ b/vendor/libm/src/math/fdim.rs
@@ -8,7 +8,6 @@ use core::f64;
/// * NAN if either argument is NAN.
///
/// A range error may occur.
-#[inline]
#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
pub fn fdim(x: f64, y: f64) -> f64 {
if x.is_nan() {
diff --git a/vendor/libm/src/math/fdimf.rs b/vendor/libm/src/math/fdimf.rs
index f1ad5896b..ea0b592d7 100644
--- a/vendor/libm/src/math/fdimf.rs
+++ b/vendor/libm/src/math/fdimf.rs
@@ -8,7 +8,6 @@ use core::f32;
/// * NAN if either argument is NAN.
///
/// A range error may occur.
-#[inline]
#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
pub fn fdimf(x: f32, y: f32) -> f32 {
if x.is_nan() {
diff --git a/vendor/libm/src/math/fenv.rs b/vendor/libm/src/math/fenv.rs
index 63bb20368..c91272e82 100644
--- a/vendor/libm/src/math/fenv.rs
+++ b/vendor/libm/src/math/fenv.rs
@@ -1,33 +1,27 @@
// src: musl/src/fenv/fenv.c
/* Dummy functions for archs lacking fenv implementation */
-pub const FE_UNDERFLOW: i32 = 0;
-pub const FE_INEXACT: i32 = 0;
+pub(crate) const FE_UNDERFLOW: i32 = 0;
+pub(crate) const FE_INEXACT: i32 = 0;
-pub const FE_TONEAREST: i32 = 0;
-pub const FE_TOWARDZERO: i32 = 0;
+pub(crate) const FE_TONEAREST: i32 = 0;
#[inline]
-pub fn feclearexcept(_mask: i32) -> i32 {
+pub(crate) fn feclearexcept(_mask: i32) -> i32 {
0
}
#[inline]
-pub fn feraiseexcept(_mask: i32) -> i32 {
+pub(crate) fn feraiseexcept(_mask: i32) -> i32 {
0
}
#[inline]
-pub fn fetestexcept(_mask: i32) -> i32 {
+pub(crate) fn fetestexcept(_mask: i32) -> i32 {
0
}
#[inline]
-pub fn fegetround() -> i32 {
+pub(crate) fn fegetround() -> i32 {
FE_TONEAREST
}
-
-#[inline]
-pub fn fesetround(_r: i32) -> i32 {
- 0
-}
diff --git a/vendor/libm/src/math/floor.rs b/vendor/libm/src/math/floor.rs
index f6068c697..d09f9a1a1 100644
--- a/vendor/libm/src/math/floor.rs
+++ b/vendor/libm/src/math/floor.rs
@@ -1,3 +1,4 @@
+#![allow(unreachable_code)]
use core::f64;
const TOINT: f64 = 1. / f64::EPSILON;
@@ -5,7 +6,6 @@ const TOINT: f64 = 1. / f64::EPSILON;
/// Floor (f64)
///
/// Finds the nearest integer less than or equal to `x`.
-#[inline]
#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
pub fn floor(x: f64) -> f64 {
// On wasm32 we know that LLVM's intrinsic will compile to an optimized
@@ -16,6 +16,24 @@ pub fn floor(x: f64) -> f64 {
return unsafe { ::core::intrinsics::floorf64(x) }
}
}
+ #[cfg(all(target_arch = "x86", not(target_feature = "sse2")))]
+ {
+ //use an alternative implementation on x86, because the
+ //main implementation fails with the x87 FPU used by
+ //debian i386, probablly due to excess precision issues.
+ //basic implementation taken from https://github.com/rust-lang/libm/issues/219
+ use super::fabs;
+ if fabs(x).to_bits() < 4503599627370496.0_f64.to_bits() {
+ let truncated = x as i64 as f64;
+ if truncated > x {
+ return truncated - 1.0;
+ } else {
+ return truncated;
+ }
+ } else {
+ return x;
+ }
+ }
let ui = x.to_bits();
let e = ((ui >> 52) & 0x7ff) as i32;
@@ -39,3 +57,25 @@ pub fn floor(x: f64) -> f64 {
x + y
}
}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use core::f64::*;
+
+ #[test]
+ fn sanity_check() {
+ assert_eq!(floor(1.1), 1.0);
+ assert_eq!(floor(2.9), 2.0);
+ }
+
+ /// The spec: https://en.cppreference.com/w/cpp/numeric/math/floor
+ #[test]
+ fn spec_tests() {
+ // Not Asserted: that the current rounding mode has no effect.
+ assert!(floor(NAN).is_nan());
+ for f in [0.0, -0.0, INFINITY, NEG_INFINITY].iter().copied() {
+ assert_eq!(floor(f), f);
+ }
+ }
+}
diff --git a/vendor/libm/src/math/floorf.rs b/vendor/libm/src/math/floorf.rs
index ae605e191..dfdab91a0 100644
--- a/vendor/libm/src/math/floorf.rs
+++ b/vendor/libm/src/math/floorf.rs
@@ -1,9 +1,8 @@
use core::f32;
-/// Floor (f64)
+/// Floor (f32)
///
/// Finds the nearest integer less than or equal to `x`.
-#[inline]
#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
pub fn floorf(x: f32) -> f32 {
// On wasm32 we know that LLVM's intrinsic will compile to an optimized
@@ -41,10 +40,27 @@ pub fn floorf(x: f32) -> f32 {
f32::from_bits(ui)
}
+// PowerPC tests are failing on LLVM 13: https://github.com/rust-lang/rust/issues/88520
+#[cfg(not(target_arch = "powerpc64"))]
#[cfg(test)]
mod tests {
+ use super::*;
+ use core::f32::*;
+
#[test]
- fn no_overflow() {
- assert_eq!(super::floorf(0.5), 0.0);
+ fn sanity_check() {
+ assert_eq!(floorf(0.5), 0.0);
+ assert_eq!(floorf(1.1), 1.0);
+ assert_eq!(floorf(2.9), 2.0);
+ }
+
+ /// The spec: https://en.cppreference.com/w/cpp/numeric/math/floor
+ #[test]
+ fn spec_tests() {
+ // Not Asserted: that the current rounding mode has no effect.
+ assert!(floorf(NAN).is_nan());
+ for f in [0.0, -0.0, INFINITY, NEG_INFINITY].iter().copied() {
+ assert_eq!(floorf(f), f);
+ }
}
}
diff --git a/vendor/libm/src/math/fma.rs b/vendor/libm/src/math/fma.rs
index 07d90f8b7..f9a86dc60 100644
--- a/vendor/libm/src/math/fma.rs
+++ b/vendor/libm/src/math/fma.rs
@@ -10,7 +10,6 @@ struct Num {
sign: i32,
}
-#[inline]
fn normalize(x: f64) -> Num {
let x1p63: f64 = f64::from_bits(0x43e0000000000000); // 0x1p63 === 2 ^ 63
@@ -30,7 +29,6 @@ fn normalize(x: f64) -> Num {
Num { m: ix, e, sign }
}
-#[inline]
fn mul(x: u64, y: u64) -> (u64, u64) {
let t1: u64;
let t2: u64;
@@ -53,7 +51,6 @@ fn mul(x: u64, y: u64) -> (u64, u64) {
/// Computes `(x*y)+z`, rounded as one ternary operation:
/// Computes the value (as if) to infinite precision and rounds once to the result format,
/// according to the rounding mode characterized by the value of FLT_ROUNDS.
-#[inline]
#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
pub fn fma(x: f64, y: f64, z: f64) -> f64 {
let x1p63: f64 = f64::from_bits(0x43e0000000000000); // 0x1p63 === 2 ^ 63
@@ -125,12 +122,12 @@ pub fn fma(x: f64, y: f64, z: f64) -> f64 {
rhi += zhi + (rlo < zlo) as u64;
} else {
/* r -= z */
- let t = rlo;
- rlo -= zlo;
- rhi = rhi - zhi - (t < rlo) as u64;
+ let (res, borrow) = rlo.overflowing_sub(zlo);
+ rlo = res;
+ rhi = rhi.wrapping_sub(zhi.wrapping_add(borrow as u64));
if (rhi >> 63) != 0 {
- rlo = (-(rlo as i64)) as u64;
- rhi = (-(rhi as i64)) as u64 - (rlo != 0) as u64;
+ rlo = (rlo as i64).wrapping_neg() as u64;
+ rhi = (rhi as i64).wrapping_neg() as u64 - (rlo != 0) as u64;
sign = (sign == 0) as i32;
}
nonzero = (rhi != 0) as i32;
@@ -205,3 +202,42 @@ pub fn fma(x: f64, y: f64, z: f64) -> f64 {
}
scalbn(r, e)
}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ #[test]
+ fn fma_segfault() {
+ // These two inputs cause fma to segfault on release due to overflow:
+ assert_eq!(
+ fma(
+ -0.0000000000000002220446049250313,
+ -0.0000000000000002220446049250313,
+ -0.0000000000000002220446049250313
+ ),
+ -0.00000000000000022204460492503126,
+ );
+
+ let result = fma(-0.992, -0.992, -0.992);
+ //force rounding to storage format on x87 to prevent superious errors.
+ #[cfg(all(target_arch = "x86", not(target_feature = "sse2")))]
+ let result = force_eval!(result);
+ assert_eq!(result, -0.007936000000000007,);
+ }
+
+ #[test]
+ fn fma_sbb() {
+ assert_eq!(
+ fma(-(1.0 - f64::EPSILON), f64::MIN, f64::MIN),
+ -3991680619069439e277
+ );
+ }
+
+ #[test]
+ fn fma_underflow() {
+ assert_eq!(
+ fma(1.1102230246251565e-16, -9.812526705433188e-305, 1.0894e-320),
+ 0.0,
+ );
+ }
+}
diff --git a/vendor/libm/src/math/fmaf.rs b/vendor/libm/src/math/fmaf.rs
index e77e0fa4a..2848f2aee 100644
--- a/vendor/libm/src/math/fmaf.rs
+++ b/vendor/libm/src/math/fmaf.rs
@@ -29,8 +29,7 @@ use core::f32;
use core::ptr::read_volatile;
use super::fenv::{
- feclearexcept, fegetround, feraiseexcept, fesetround, fetestexcept, FE_INEXACT, FE_TONEAREST,
- FE_TOWARDZERO, FE_UNDERFLOW,
+ feclearexcept, fegetround, feraiseexcept, fetestexcept, FE_INEXACT, FE_TONEAREST, FE_UNDERFLOW,
};
/*
@@ -46,7 +45,6 @@ use super::fenv::{
/// Computes `(x*y)+z`, rounded as one ternary operation:
/// Computes the value (as if) to infinite precision and rounds once to the result format,
/// according to the rounding mode characterized by the value of FLT_ROUNDS.
-#[inline]
#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
pub fn fmaf(x: f32, y: f32, mut z: f32) -> f32 {
let xy: f64;
@@ -92,16 +90,28 @@ pub fn fmaf(x: f32, y: f32, mut z: f32) -> f32 {
* If result is inexact, and exactly halfway between two float values,
* we need to adjust the low-order bit in the direction of the error.
*/
- fesetround(FE_TOWARDZERO);
- // prevent `vxy + z` from being CSE'd with `xy + z` above
- let vxy: f64 = unsafe { read_volatile(&xy) };
- let mut adjusted_result: f64 = vxy + z as f64;
- fesetround(FE_TONEAREST);
- if result == adjusted_result {
- ui = adjusted_result.to_bits();
+ let neg = ui >> 63 != 0;
+ let err = if neg == (z as f64 > xy) {
+ xy - result + z as f64
+ } else {
+ z as f64 - result + xy
+ };
+ if neg == (err < 0.0) {
ui += 1;
- adjusted_result = f64::from_bits(ui);
+ } else {
+ ui -= 1;
+ }
+ f64::from_bits(ui) as f32
+}
+
+#[cfg(test)]
+mod tests {
+ #[test]
+ fn issue_263() {
+ let a = f32::from_bits(1266679807);
+ let b = f32::from_bits(1300234242);
+ let c = f32::from_bits(1115553792);
+ let expected = f32::from_bits(1501560833);
+ assert_eq!(super::fmaf(a, b, c), expected);
}
- z = adjusted_result as f32;
- z
}
diff --git a/vendor/libm/src/math/fmax.rs b/vendor/libm/src/math/fmax.rs
index 22016d11c..93c97bc61 100644
--- a/vendor/libm/src/math/fmax.rs
+++ b/vendor/libm/src/math/fmax.rs
@@ -1,4 +1,3 @@
-#[inline]
#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
pub fn fmax(x: f64, y: f64) -> f64 {
// IEEE754 says: maxNum(x, y) is the canonicalized number y if x < y, x if y < x, the
diff --git a/vendor/libm/src/math/fmaxf.rs b/vendor/libm/src/math/fmaxf.rs
index a883fdaef..607746647 100644
--- a/vendor/libm/src/math/fmaxf.rs
+++ b/vendor/libm/src/math/fmaxf.rs
@@ -1,4 +1,3 @@
-#[inline]
#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
pub fn fmaxf(x: f32, y: f32) -> f32 {
// IEEE754 says: maxNum(x, y) is the canonicalized number y if x < y, x if y < x, the
diff --git a/vendor/libm/src/math/fmin.rs b/vendor/libm/src/math/fmin.rs
index d1ccc3a46..ab1509f34 100644
--- a/vendor/libm/src/math/fmin.rs
+++ b/vendor/libm/src/math/fmin.rs
@@ -1,4 +1,3 @@
-#[inline]
#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
pub fn fmin(x: f64, y: f64) -> f64 {
// IEEE754 says: minNum(x, y) is the canonicalized number x if x < y, y if y < x, the
diff --git a/vendor/libm/src/math/fminf.rs b/vendor/libm/src/math/fminf.rs
index 43ec97cb5..0049e7117 100644
--- a/vendor/libm/src/math/fminf.rs
+++ b/vendor/libm/src/math/fminf.rs
@@ -1,4 +1,3 @@
-#[inline]
#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
pub fn fminf(x: f32, y: f32) -> f32 {
// IEEE754 says: minNum(x, y) is the canonicalized number x if x < y, y if y < x, the
diff --git a/vendor/libm/src/math/fmod.rs b/vendor/libm/src/math/fmod.rs
index 2cdd8a9ba..d892ffd8b 100644
--- a/vendor/libm/src/math/fmod.rs
+++ b/vendor/libm/src/math/fmod.rs
@@ -1,6 +1,5 @@
use core::u64;
-#[inline]
#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
pub fn fmod(x: f64, y: f64) -> f64 {
let mut uxi = x.to_bits();
diff --git a/vendor/libm/src/math/fmodf.rs b/vendor/libm/src/math/fmodf.rs
index 3e6779a93..c53dc186a 100644
--- a/vendor/libm/src/math/fmodf.rs
+++ b/vendor/libm/src/math/fmodf.rs
@@ -1,7 +1,6 @@
use core::f32;
use core::u32;
-#[inline]
#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
pub fn fmodf(x: f32, y: f32) -> f32 {
let mut uxi = x.to_bits();
diff --git a/vendor/libm/src/math/hypot.rs b/vendor/libm/src/math/hypot.rs
index e53baf539..da458ea1d 100644
--- a/vendor/libm/src/math/hypot.rs
+++ b/vendor/libm/src/math/hypot.rs
@@ -4,7 +4,6 @@ use super::sqrt;
const SPLIT: f64 = 134217728. + 1.; // 0x1p27 + 1 === (2 ^ 27) + 1
-#[inline]
fn sq(x: f64) -> (f64, f64) {
let xh: f64;
let xl: f64;
@@ -18,7 +17,6 @@ fn sq(x: f64) -> (f64, f64) {
(hi, lo)
}
-#[inline]
#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
pub fn hypot(mut x: f64, mut y: f64) -> f64 {
let x1p700 = f64::from_bits(0x6bb0000000000000); // 0x1p700 === 2 ^ 700
diff --git a/vendor/libm/src/math/hypotf.rs b/vendor/libm/src/math/hypotf.rs
index 4636b8f1d..576eebb33 100644
--- a/vendor/libm/src/math/hypotf.rs
+++ b/vendor/libm/src/math/hypotf.rs
@@ -2,7 +2,6 @@ use core::f32;
use super::sqrtf;
-#[inline]
#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
pub fn hypotf(mut x: f32, mut y: f32) -> f32 {
let x1p90 = f32::from_bits(0x6c800000); // 0x1p90f === 2 ^ 90
diff --git a/vendor/libm/src/math/ilogb.rs b/vendor/libm/src/math/ilogb.rs
index 0a380b7ef..7d74dcfb6 100644
--- a/vendor/libm/src/math/ilogb.rs
+++ b/vendor/libm/src/math/ilogb.rs
@@ -1,6 +1,7 @@
const FP_ILOGBNAN: i32 = -1 - 0x7fffffff;
const FP_ILOGB0: i32 = FP_ILOGBNAN;
+#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
pub fn ilogb(x: f64) -> i32 {
let mut i: u64 = x.to_bits();
let e = ((i >> 52) & 0x7ff) as i32;
diff --git a/vendor/libm/src/math/ilogbf.rs b/vendor/libm/src/math/ilogbf.rs
index b384fa4b2..0fa58748c 100644
--- a/vendor/libm/src/math/ilogbf.rs
+++ b/vendor/libm/src/math/ilogbf.rs
@@ -1,6 +1,7 @@
const FP_ILOGBNAN: i32 = -1 - 0x7fffffff;
const FP_ILOGB0: i32 = FP_ILOGBNAN;
+#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
pub fn ilogbf(x: f32) -> i32 {
let mut i = x.to_bits();
let e = ((i >> 23) & 0xff) as i32;
diff --git a/vendor/libm/src/math/j1f.rs b/vendor/libm/src/math/j1f.rs
index 83ac1acff..c39f8ff7e 100644
--- a/vendor/libm/src/math/j1f.rs
+++ b/vendor/libm/src/math/j1f.rs
@@ -49,7 +49,7 @@ fn common(ix: u32, x: f32, y1: bool, sign: bool) -> f32 {
if sign {
cc = -cc;
}
- return INVSQRTPI * (cc as f32) / sqrtf(x);
+ return (((INVSQRTPI as f64) * cc) / (sqrtf(x) as f64)) as f32;
}
/* R0/S0 on [0,2] */
@@ -356,3 +356,25 @@ fn qonef(x: f32) -> f32 {
s = 1.0 + z * (q[0] + z * (q[1] + z * (q[2] + z * (q[3] + z * (q[4] + z * q[5])))));
return (0.375 + r / s) / x;
}
+
+// PowerPC tests are failing on LLVM 13: https://github.com/rust-lang/rust/issues/88520
+#[cfg(not(target_arch = "powerpc64"))]
+#[cfg(test)]
+mod tests {
+ use super::{j1f, y1f};
+ #[test]
+ fn test_j1f_2488() {
+ // 0x401F3E49
+ assert_eq!(j1f(2.4881766_f32), 0.49999475_f32);
+ }
+ #[test]
+ fn test_y1f_2002() {
+ //allow slightly different result on x87
+ let res = y1f(2.0000002_f32);
+ if cfg!(all(target_arch = "x86", not(target_feature = "sse2"))) && (res == -0.10703231_f32)
+ {
+ return;
+ }
+ assert_eq!(res, -0.10703229_f32);
+ }
+}
diff --git a/vendor/libm/src/math/k_cos.rs b/vendor/libm/src/math/k_cos.rs
index 4687b369a..49b2fc64d 100644
--- a/vendor/libm/src/math/k_cos.rs
+++ b/vendor/libm/src/math/k_cos.rs
@@ -51,7 +51,6 @@ const C6: f64 = -1.13596475577881948265e-11; /* 0xBDA8FAE9, 0xBE8838D4 */
// expression for cos(). Retention happens in all cases tested
// under FreeBSD, so don't pessimize things by forcibly clipping
// any extra precision in w.
-#[inline]
#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
pub(crate) fn k_cos(x: f64, y: f64) -> f64 {
let z = x * x;
diff --git a/vendor/libm/src/math/k_cosf.rs b/vendor/libm/src/math/k_cosf.rs
index 79d0f238f..e99f2348c 100644
--- a/vendor/libm/src/math/k_cosf.rs
+++ b/vendor/libm/src/math/k_cosf.rs
@@ -20,7 +20,6 @@ const C1: f64 = 0.0416666233237390631894; /* 0x155553e1053a42.0p-57 */
const C2: f64 = -0.00138867637746099294692; /* -0x16c087e80f1e27.0p-62 */
const C3: f64 = 0.0000243904487962774090654; /* 0x199342e0ee5069.0p-68 */
-#[inline]
#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
pub(crate) fn k_cosf(x: f64) -> f32 {
let z = x * x;
diff --git a/vendor/libm/src/math/k_expo2.rs b/vendor/libm/src/math/k_expo2.rs
index 0a9562eae..7345075f3 100644
--- a/vendor/libm/src/math/k_expo2.rs
+++ b/vendor/libm/src/math/k_expo2.rs
@@ -4,7 +4,6 @@ use super::exp;
const K: i32 = 2043;
/* expf(x)/2 for x >= log(FLT_MAX), slightly better than 0.5f*expf(x/2)*expf(x/2) */
-#[inline]
#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
pub(crate) fn k_expo2(x: f64) -> f64 {
let k_ln2 = f64::from_bits(0x40962066151add8b);
diff --git a/vendor/libm/src/math/k_expo2f.rs b/vendor/libm/src/math/k_expo2f.rs
index de8507772..fbd7b27d5 100644
--- a/vendor/libm/src/math/k_expo2f.rs
+++ b/vendor/libm/src/math/k_expo2f.rs
@@ -4,7 +4,6 @@ use super::expf;
const K: i32 = 235;
/* expf(x)/2 for x >= log(FLT_MAX), slightly better than 0.5f*expf(x/2)*expf(x/2) */
-#[inline]
#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
pub(crate) fn k_expo2f(x: f32) -> f32 {
let k_ln2 = f32::from_bits(0x4322e3bc);
diff --git a/vendor/libm/src/math/k_sin.rs b/vendor/libm/src/math/k_sin.rs
index 5d2bd68aa..9dd96c944 100644
--- a/vendor/libm/src/math/k_sin.rs
+++ b/vendor/libm/src/math/k_sin.rs
@@ -43,7 +43,6 @@ const S6: f64 = 1.58969099521155010221e-10; /* 0x3DE5D93A, 0x5ACFD57C */
// r = x *(S2+x *(S3+x *(S4+x *(S5+x *S6))))
// then 3 2
// sin(x) = x + (S1*x + (x *(r-y/2)+y))
-#[inline]
#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
pub(crate) fn k_sin(x: f64, y: f64, iy: i32) -> f64 {
let z = x * x;
diff --git a/vendor/libm/src/math/k_sinf.rs b/vendor/libm/src/math/k_sinf.rs
index 68fe926c2..88d10caba 100644
--- a/vendor/libm/src/math/k_sinf.rs
+++ b/vendor/libm/src/math/k_sinf.rs
@@ -20,7 +20,6 @@ const S2: f64 = 0.0083333293858894631756; /* 0x111110896efbb2.0p-59 */
const S3: f64 = -0.000198393348360966317347; /* -0x1a00f9e2cae774.0p-65 */
const S4: f64 = 0.0000027183114939898219064; /* 0x16cd878c3b46a7.0p-71 */
-#[inline]
#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
pub(crate) fn k_sinf(x: f64) -> f32 {
let z = x * x;
diff --git a/vendor/libm/src/math/k_tan.rs b/vendor/libm/src/math/k_tan.rs
index ea3c386b0..d177010bb 100644
--- a/vendor/libm/src/math/k_tan.rs
+++ b/vendor/libm/src/math/k_tan.rs
@@ -58,7 +58,6 @@ static T: [f64; 13] = [
const PIO4: f64 = 7.85398163397448278999e-01; /* 3FE921FB, 54442D18 */
const PIO4_LO: f64 = 3.06161699786838301793e-17; /* 3C81A626, 33145C07 */
-#[inline]
#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
pub(crate) fn k_tan(mut x: f64, mut y: f64, odd: i32) -> f64 {
let hx = (f64::to_bits(x) >> 32) as u32;
@@ -101,7 +100,6 @@ pub(crate) fn k_tan(mut x: f64, mut y: f64, odd: i32) -> f64 {
a0 + a * (1.0 + a0 * w0 + a0 * v)
}
-#[inline]
fn zero_low_word(x: f64) -> f64 {
f64::from_bits(f64::to_bits(x) & 0xFFFF_FFFF_0000_0000)
}
diff --git a/vendor/libm/src/math/k_tanf.rs b/vendor/libm/src/math/k_tanf.rs
index 52651378d..af8db539d 100644
--- a/vendor/libm/src/math/k_tanf.rs
+++ b/vendor/libm/src/math/k_tanf.rs
@@ -19,7 +19,6 @@ const T: [f64; 6] = [
0.00946564784943673166728, /* 0x1362b9bf971bcd.0p-59 */
];
-#[inline]
#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
pub(crate) fn k_tanf(x: f64, odd: bool) -> f32 {
let z = x * x;
diff --git a/vendor/libm/src/math/ldexp.rs b/vendor/libm/src/math/ldexp.rs
index 780ddfc11..e46242e55 100644
--- a/vendor/libm/src/math/ldexp.rs
+++ b/vendor/libm/src/math/ldexp.rs
@@ -1,4 +1,3 @@
-#[inline]
#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
pub fn ldexp(x: f64, n: i32) -> f64 {
super::scalbn(x, n)
diff --git a/vendor/libm/src/math/ldexpf.rs b/vendor/libm/src/math/ldexpf.rs
index 70935a002..95b27fc49 100644
--- a/vendor/libm/src/math/ldexpf.rs
+++ b/vendor/libm/src/math/ldexpf.rs
@@ -1,4 +1,3 @@
-#[inline]
#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
pub fn ldexpf(x: f32, n: i32) -> f32 {
super::scalbnf(x, n)
diff --git a/vendor/libm/src/math/lgamma.rs b/vendor/libm/src/math/lgamma.rs
index 5bc87e85e..a08bc5b64 100644
--- a/vendor/libm/src/math/lgamma.rs
+++ b/vendor/libm/src/math/lgamma.rs
@@ -1,5 +1,6 @@
use super::lgamma_r;
+#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
pub fn lgamma(x: f64) -> f64 {
lgamma_r(x).0
}
diff --git a/vendor/libm/src/math/lgamma_r.rs b/vendor/libm/src/math/lgamma_r.rs
index 382a501fc..b26177e6e 100644
--- a/vendor/libm/src/math/lgamma_r.rs
+++ b/vendor/libm/src/math/lgamma_r.rs
@@ -152,7 +152,7 @@ fn sin_pi(mut x: f64) -> f64 {
x = 2.0 * (x * 0.5 - floor(x * 0.5)); /* x mod 2.0 */
n = (x * 4.0) as i32;
- n = (n + 1) / 2;
+ n = div!(n + 1, 2);
x -= (n as f64) * 0.5;
x *= PI;
@@ -164,6 +164,7 @@ fn sin_pi(mut x: f64) -> f64 {
}
}
+#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
pub fn lgamma_r(mut x: f64) -> (f64, i32) {
let u: u64 = x.to_bits();
let mut t: f64;
@@ -270,9 +271,9 @@ pub fn lgamma_r(mut x: f64) -> (f64, i32) {
p2 = 1.0 + y * (V1 + y * (V2 + y * (V3 + y * (V4 + y * V5))));
r += -0.5 * y + p1 / p2;
}
- #[cfg(feature = "checked")]
+ #[cfg(debug_assertions)]
_ => unreachable!(),
- #[cfg(not(feature = "checked"))]
+ #[cfg(not(debug_assertions))]
_ => {}
}
} else if ix < 0x40200000 {
diff --git a/vendor/libm/src/math/lgammaf.rs b/vendor/libm/src/math/lgammaf.rs
index dfdc87f96..a9c2da75b 100644
--- a/vendor/libm/src/math/lgammaf.rs
+++ b/vendor/libm/src/math/lgammaf.rs
@@ -1,5 +1,6 @@
use super::lgammaf_r;
+#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
pub fn lgammaf(x: f32) -> f32 {
lgammaf_r(x).0
}
diff --git a/vendor/libm/src/math/lgammaf_r.rs b/vendor/libm/src/math/lgammaf_r.rs
index 0745359a2..723c90daf 100644
--- a/vendor/libm/src/math/lgammaf_r.rs
+++ b/vendor/libm/src/math/lgammaf_r.rs
@@ -88,7 +88,7 @@ fn sin_pi(mut x: f32) -> f32 {
x = 2.0 * (x * 0.5 - floorf(x * 0.5)); /* x mod 2.0 */
n = (x * 4.0) as isize;
- n = (n + 1) / 2;
+ n = div!(n + 1, 2);
y = (x as f64) - (n as f64) * 0.5;
y *= 3.14159265358979323846;
match n {
@@ -99,6 +99,7 @@ fn sin_pi(mut x: f32) -> f32 {
}
}
+#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
pub fn lgammaf_r(mut x: f32) -> (f32, i32) {
let u = x.to_bits();
let mut t: f32;
@@ -205,9 +206,9 @@ pub fn lgammaf_r(mut x: f32) -> (f32, i32) {
p2 = 1.0 + y * (V1 + y * (V2 + y * (V3 + y * (V4 + y * V5))));
r += -0.5 * y + p1 / p2;
}
- #[cfg(feature = "checked")]
+ #[cfg(debug_assertions)]
_ => unreachable!(),
- #[cfg(not(feature = "checked"))]
+ #[cfg(not(debug_assertions))]
_ => {}
}
} else if ix < 0x41000000 {
diff --git a/vendor/libm/src/math/log.rs b/vendor/libm/src/math/log.rs
index 4126e413b..27a26da60 100644
--- a/vendor/libm/src/math/log.rs
+++ b/vendor/libm/src/math/log.rs
@@ -70,7 +70,6 @@ const LG5: f64 = 1.818357216161805012e-01; /* 3FC74664 96CB03DE */
const LG6: f64 = 1.531383769920937332e-01; /* 3FC39A09 D078C69F */
const LG7: f64 = 1.479819860511658591e-01; /* 3FC2F112 DF3E5244 */
-#[inline]
#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
pub fn log(mut x: f64) -> f64 {
let x1p54 = f64::from_bits(0x4350000000000000); // 0x1p54 === 2 ^ 54
diff --git a/vendor/libm/src/math/log10.rs b/vendor/libm/src/math/log10.rs
index c99696040..40dacf2c9 100644
--- a/vendor/libm/src/math/log10.rs
+++ b/vendor/libm/src/math/log10.rs
@@ -31,7 +31,6 @@ const LG5: f64 = 1.818357216161805012e-01; /* 3FC74664 96CB03DE */
const LG6: f64 = 1.531383769920937332e-01; /* 3FC39A09 D078C69F */
const LG7: f64 = 1.479819860511658591e-01; /* 3FC2F112 DF3E5244 */
-#[inline]
#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
pub fn log10(mut x: f64) -> f64 {
let x1p54 = f64::from_bits(0x4350000000000000); // 0x1p54 === 2 ^ 54
diff --git a/vendor/libm/src/math/log10f.rs b/vendor/libm/src/math/log10f.rs
index 9845cda5d..108dfa8b5 100644
--- a/vendor/libm/src/math/log10f.rs
+++ b/vendor/libm/src/math/log10f.rs
@@ -25,7 +25,6 @@ const LG2: f32 = 0.40000972152; /* 0xccce13.0p-25 */
const LG3: f32 = 0.28498786688; /* 0x91e9ee.0p-25 */
const LG4: f32 = 0.24279078841; /* 0xf89e26.0p-26 */
-#[inline]
#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
pub fn log10f(mut x: f32) -> f32 {
let x1p25f = f32::from_bits(0x4c000000); // 0x1p25f === 2 ^ 25
diff --git a/vendor/libm/src/math/log1p.rs b/vendor/libm/src/math/log1p.rs
index cd7045ac9..4fd1c73eb 100644
--- a/vendor/libm/src/math/log1p.rs
+++ b/vendor/libm/src/math/log1p.rs
@@ -65,7 +65,6 @@ const LG5: f64 = 1.818357216161805012e-01; /* 3FC74664 96CB03DE */
const LG6: f64 = 1.531383769920937332e-01; /* 3FC39A09 D078C69F */
const LG7: f64 = 1.479819860511658591e-01; /* 3FC2F112 DF3E5244 */
-#[inline]
#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
pub fn log1p(x: f64) -> f64 {
let mut ui: u64 = x.to_bits();
diff --git a/vendor/libm/src/math/log1pf.rs b/vendor/libm/src/math/log1pf.rs
index 8e9651357..500e8eeaa 100644
--- a/vendor/libm/src/math/log1pf.rs
+++ b/vendor/libm/src/math/log1pf.rs
@@ -20,7 +20,6 @@ const LG2: f32 = 0.40000972152; /* 0xccce13.0p-25 */
const LG3: f32 = 0.28498786688; /* 0x91e9ee.0p-25 */
const LG4: f32 = 0.24279078841; /* 0xf89e26.0p-26 */
-#[inline]
#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
pub fn log1pf(x: f32) -> f32 {
let mut ui: u32 = x.to_bits();
diff --git a/vendor/libm/src/math/log2.rs b/vendor/libm/src/math/log2.rs
index a3d43e55c..83da3a193 100644
--- a/vendor/libm/src/math/log2.rs
+++ b/vendor/libm/src/math/log2.rs
@@ -29,7 +29,6 @@ const LG5: f64 = 1.818357216161805012e-01; /* 3FC74664 96CB03DE */
const LG6: f64 = 1.531383769920937332e-01; /* 3FC39A09 D078C69F */
const LG7: f64 = 1.479819860511658591e-01; /* 3FC2F112 DF3E5244 */
-#[inline]
#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
pub fn log2(mut x: f64) -> f64 {
let x1p54 = f64::from_bits(0x4350000000000000); // 0x1p54 === 2 ^ 54
diff --git a/vendor/libm/src/math/log2f.rs b/vendor/libm/src/math/log2f.rs
index 53a37e503..3a20fb15b 100644
--- a/vendor/libm/src/math/log2f.rs
+++ b/vendor/libm/src/math/log2f.rs
@@ -23,7 +23,6 @@ const LG2: f32 = 0.40000972152; /* 0xccce13.0p-25 */
const LG3: f32 = 0.28498786688; /* 0x91e9ee.0p-25 */
const LG4: f32 = 0.24279078841; /* 0xf89e26.0p-26 */
-#[inline]
#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
pub fn log2f(mut x: f32) -> f32 {
let x1p25f = f32::from_bits(0x4c000000); // 0x1p25f === 2 ^ 25
diff --git a/vendor/libm/src/math/logf.rs b/vendor/libm/src/math/logf.rs
index 95195601c..2b57b934f 100644
--- a/vendor/libm/src/math/logf.rs
+++ b/vendor/libm/src/math/logf.rs
@@ -21,7 +21,6 @@ const LG2: f32 = 0.40000972152; /* 0xccce13.0p-25 */
const LG3: f32 = 0.28498786688; /* 0x91e9ee.0p-25 */
const LG4: f32 = 0.24279078841; /* 0xf89e26.0p-26 */
-#[inline]
#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
pub fn logf(mut x: f32) -> f32 {
let x1p25 = f32::from_bits(0x4c000000); // 0x1p25f === 2 ^ 25
diff --git a/vendor/libm/src/math/mod.rs b/vendor/libm/src/math/mod.rs
index 35ffe1a2c..05ebb708c 100644
--- a/vendor/libm/src/math/mod.rs
+++ b/vendor/libm/src/math/mod.rs
@@ -1,12 +1,10 @@
macro_rules! force_eval {
($e:expr) => {
- unsafe {
- ::core::ptr::read_volatile(&$e);
- }
+ unsafe { ::core::ptr::read_volatile(&$e) }
};
}
-#[cfg(not(feature = "checked"))]
+#[cfg(not(debug_assertions))]
macro_rules! i {
($array:expr, $index:expr) => {
unsafe { *$array.get_unchecked($index) }
@@ -36,7 +34,7 @@ macro_rules! i {
};
}
-#[cfg(feature = "checked")]
+#[cfg(debug_assertions)]
macro_rules! i {
($array:expr, $index:expr) => {
*$array.get($index).unwrap()
@@ -58,9 +56,27 @@ macro_rules! i {
};
}
+// Temporary macro to avoid panic codegen for division (in debug mode too). At
+// the time of this writing this is only used in a few places, and once
+// rust-lang/rust#72751 is fixed then this macro will no longer be necessary and
+// the native `/` operator can be used and panics won't be codegen'd.
+#[cfg(any(debug_assertions, not(feature = "unstable")))]
+macro_rules! div {
+ ($a:expr, $b:expr) => {
+ $a / $b
+ };
+}
+
+#[cfg(all(not(debug_assertions), feature = "unstable"))]
+macro_rules! div {
+ ($a:expr, $b:expr) => {
+ unsafe { core::intrinsics::unchecked_div($a, $b) }
+ };
+}
+
macro_rules! llvm_intrinsically_optimized {
(#[cfg($($clause:tt)*)] $e:expr) => {
- #[cfg(all(not(feature = "stable"), $($clause)*))]
+ #[cfg(all(feature = "unstable", $($clause)*))]
{
if true { // thwart the dead code lint
$e
@@ -146,10 +162,16 @@ mod log2f;
mod logf;
mod modf;
mod modff;
+mod nextafter;
+mod nextafterf;
mod pow;
mod powf;
+mod remainder;
+mod remainderf;
mod remquo;
mod remquof;
+mod rint;
+mod rintf;
mod round;
mod roundf;
mod scalbn;
@@ -256,10 +278,16 @@ pub use self::log2f::log2f;
pub use self::logf::logf;
pub use self::modf::modf;
pub use self::modff::modff;
+pub use self::nextafter::nextafter;
+pub use self::nextafterf::nextafterf;
pub use self::pow::pow;
pub use self::powf::powf;
+pub use self::remainder::remainder;
+pub use self::remainderf::remainderf;
pub use self::remquo::remquo;
pub use self::remquof::remquof;
+pub use self::rint::rint;
+pub use self::rintf::rintf;
pub use self::round::round;
pub use self::roundf::roundf;
pub use self::scalbn::scalbn;
diff --git a/vendor/libm/src/math/nextafter.rs b/vendor/libm/src/math/nextafter.rs
new file mode 100644
index 000000000..13094a17c
--- /dev/null
+++ b/vendor/libm/src/math/nextafter.rs
@@ -0,0 +1,37 @@
+#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+pub fn nextafter(x: f64, y: f64) -> f64 {
+ if x.is_nan() || y.is_nan() {
+ return x + y;
+ }
+
+ let mut ux_i = x.to_bits();
+ let uy_i = y.to_bits();
+ if ux_i == uy_i {
+ return y;
+ }
+
+ let ax = ux_i & !1_u64 / 2;
+ let ay = uy_i & !1_u64 / 2;
+ if ax == 0 {
+ if ay == 0 {
+ return y;
+ }
+ ux_i = (uy_i & 1_u64 << 63) | 1;
+ } else if ax > ay || ((ux_i ^ uy_i) & 1_u64 << 63) != 0 {
+ ux_i -= 1;
+ } else {
+ ux_i += 1;
+ }
+
+ let e = ux_i.wrapping_shr(52 & 0x7ff);
+ // raise overflow if ux.f is infinite and x is finite
+ if e == 0x7ff {
+ force_eval!(x + x);
+ }
+ let ux_f = f64::from_bits(ux_i);
+ // raise underflow if ux.f is subnormal or zero
+ if e == 0 {
+ force_eval!(x * x + ux_f * ux_f);
+ }
+ ux_f
+}
diff --git a/vendor/libm/src/math/nextafterf.rs b/vendor/libm/src/math/nextafterf.rs
new file mode 100644
index 000000000..df9b10829
--- /dev/null
+++ b/vendor/libm/src/math/nextafterf.rs
@@ -0,0 +1,37 @@
+#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+pub fn nextafterf(x: f32, y: f32) -> f32 {
+ if x.is_nan() || y.is_nan() {
+ return x + y;
+ }
+
+ let mut ux_i = x.to_bits();
+ let uy_i = y.to_bits();
+ if ux_i == uy_i {
+ return y;
+ }
+
+ let ax = ux_i & 0x7fff_ffff_u32;
+ let ay = uy_i & 0x7fff_ffff_u32;
+ if ax == 0 {
+ if ay == 0 {
+ return y;
+ }
+ ux_i = (uy_i & 0x8000_0000_u32) | 1;
+ } else if ax > ay || ((ux_i ^ uy_i) & 0x8000_0000_u32) != 0 {
+ ux_i -= 1;
+ } else {
+ ux_i += 1;
+ }
+
+ let e = ux_i.wrapping_shr(0x7f80_0000_u32);
+ // raise overflow if ux_f is infinite and x is finite
+ if e == 0x7f80_0000_u32 {
+ force_eval!(x + x);
+ }
+ let ux_f = f32::from_bits(ux_i);
+ // raise underflow if ux_f is subnormal or zero
+ if e == 0 {
+ force_eval!(x * x + ux_f * ux_f);
+ }
+ ux_f
+}
diff --git a/vendor/libm/src/math/pow.rs b/vendor/libm/src/math/pow.rs
index 111d712ff..6a19ae601 100644
--- a/vendor/libm/src/math/pow.rs
+++ b/vendor/libm/src/math/pow.rs
@@ -89,7 +89,6 @@ const IVLN2: f64 = 1.44269504088896338700e+00; /* 0x3ff71547_652b82fe =1/ln2 */
const IVLN2_H: f64 = 1.44269502162933349609e+00; /* 0x3ff71547_60000000 =24b 1/ln2*/
const IVLN2_L: f64 = 1.92596299112661746887e-08; /* 0x3e54ae0b_f85ddf44 =1/ln2 tail*/
-#[inline]
#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
pub fn pow(x: f64, y: f64) -> f64 {
let t1: f64;
@@ -300,8 +299,8 @@ pub fn pow(x: f64, y: f64) -> f64 {
ax = with_set_high_word(ax, ix as u32);
/* compute ss = s_h+s_l = (x-1)/(x+1) or (x-1.5)/(x+1.5) */
- let u: f64 = ax - BP[k as usize]; /* bp[0]=1.0, bp[1]=1.5 */
- let v: f64 = 1.0 / (ax + BP[k as usize]);
+ let u: f64 = ax - i!(BP, k as usize); /* bp[0]=1.0, bp[1]=1.5 */
+ let v: f64 = 1.0 / (ax + i!(BP, k as usize));
let ss: f64 = u * v;
let s_h = with_set_low_word(ss, 0);
@@ -310,7 +309,7 @@ pub fn pow(x: f64, y: f64) -> f64 {
0.0,
((ix as u32 >> 1) | 0x20000000) + 0x00080000 + ((k as u32) << 18),
);
- let t_l: f64 = ax - (t_h - BP[k as usize]);
+ let t_l: f64 = ax - (t_h - i!(BP, k as usize));
let s_l: f64 = v * ((u - s_h * t_h) - s_h * t_l);
/* compute log(ax) */
@@ -329,12 +328,12 @@ pub fn pow(x: f64, y: f64) -> f64 {
let p_h: f64 = with_set_low_word(u + v, 0);
let p_l = v - (p_h - u);
let z_h: f64 = CP_H * p_h; /* cp_h+cp_l = 2/(3*log2) */
- let z_l: f64 = CP_L * p_h + p_l * CP + DP_L[k as usize];
+ let z_l: f64 = CP_L * p_h + p_l * CP + i!(DP_L, k as usize);
/* log2(ax) = (ss+..)*2/(3*log2) = n + dp_h + z_h + z_l */
let t: f64 = n as f64;
- t1 = with_set_low_word(((z_h + z_l) + DP_H[k as usize]) + t, 0);
- t2 = z_l - (((t1 - t) - DP_H[k as usize]) - z_h);
+ t1 = with_set_low_word(((z_h + z_l) + i!(DP_H, k as usize)) + t, 0);
+ t2 = z_l - (((t1 - t) - i!(DP_H, k as usize)) - z_h);
}
/* split up y into y1+y2 and compute (y1+y2)*(t1+t2) */
@@ -479,12 +478,16 @@ mod tests {
.for_each(|s| s.iter().for_each(|val| pow_test(base, *val, expected)));
}
- fn test_sets(sets: &[&[f64]], computed: &Fn(f64) -> f64, expected: &Fn(f64) -> f64) {
+ fn test_sets(sets: &[&[f64]], computed: &dyn Fn(f64) -> f64, expected: &dyn Fn(f64) -> f64) {
sets.iter().for_each(|s| {
s.iter().for_each(|val| {
let exp = expected(*val);
let res = computed(*val);
+ #[cfg(all(target_arch = "x86", not(target_feature = "sse2")))]
+ let exp = force_eval!(exp);
+ #[cfg(all(target_arch = "x86", not(target_feature = "sse2")))]
+ let res = force_eval!(res);
assert!(
if exp.is_nan() {
res.is_nan()
@@ -605,7 +608,7 @@ mod tests {
// Factoring -1 out:
// (negative anything ^ integer should be (-1 ^ integer) * (positive anything ^ integer))
- &[POS_ZERO, NEG_ZERO, POS_ONE, NEG_ONE, POS_EVENS, NEG_EVENS]
+ (&[POS_ZERO, NEG_ZERO, POS_ONE, NEG_ONE, POS_EVENS, NEG_EVENS])
.iter()
.for_each(|int_set| {
int_set.iter().for_each(|int| {
@@ -617,7 +620,7 @@ mod tests {
// Negative base (imaginary results):
// (-anything except 0 and Infinity ^ non-integer should be NAN)
- &NEG[1..(NEG.len() - 1)].iter().for_each(|set| {
+ (&NEG[1..(NEG.len() - 1)]).iter().for_each(|set| {
set.iter().for_each(|val| {
test_sets(&ALL[3..7], &|v: f64| pow(*val, v), &|_| NAN);
})
diff --git a/vendor/libm/src/math/powf.rs b/vendor/libm/src/math/powf.rs
index 015bade86..68d2083bb 100644
--- a/vendor/libm/src/math/powf.rs
+++ b/vendor/libm/src/math/powf.rs
@@ -43,7 +43,6 @@ const IVLN2: f32 = 1.4426950216e+00;
const IVLN2_H: f32 = 1.4426879883e+00;
const IVLN2_L: f32 = 7.0526075433e-06;
-#[inline]
#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
pub fn powf(x: f32, y: f32) -> f32 {
let mut z: f32;
@@ -239,8 +238,8 @@ pub fn powf(x: f32, y: f32) -> f32 {
ax = f32::from_bits(ix as u32);
/* compute s = s_h+s_l = (x-1)/(x+1) or (x-1.5)/(x+1.5) */
- u = ax - BP[k as usize]; /* bp[0]=1.0, bp[1]=1.5 */
- v = 1.0 / (ax + BP[k as usize]);
+ u = ax - i!(BP, k as usize); /* bp[0]=1.0, bp[1]=1.5 */
+ v = 1.0 / (ax + i!(BP, k as usize));
s = u * v;
s_h = s;
is = s_h.to_bits() as i32;
@@ -248,7 +247,7 @@ pub fn powf(x: f32, y: f32) -> f32 {
/* t_h=ax+bp[k] High */
is = (((ix as u32 >> 1) & 0xfffff000) | 0x20000000) as i32;
t_h = f32::from_bits(is as u32 + 0x00400000 + ((k as u32) << 21));
- t_l = ax - (t_h - BP[k as usize]);
+ t_l = ax - (t_h - i!(BP, k as usize));
s_l = v * ((u - s_h * t_h) - s_h * t_l);
/* compute log(ax) */
s2 = s * s;
@@ -268,13 +267,13 @@ pub fn powf(x: f32, y: f32) -> f32 {
p_h = f32::from_bits(is as u32 & 0xfffff000);
p_l = v - (p_h - u);
z_h = CP_H * p_h; /* cp_h+cp_l = 2/(3*log2) */
- z_l = CP_L * p_h + p_l * CP + DP_L[k as usize];
+ z_l = CP_L * p_h + p_l * CP + i!(DP_L, k as usize);
/* log2(ax) = (s+..)*2/(3*log2) = n + dp_h + z_h + z_l */
t = n as f32;
- t1 = ((z_h + z_l) + DP_H[k as usize]) + t;
+ t1 = ((z_h + z_l) + i!(DP_H, k as usize)) + t;
is = t1.to_bits() as i32;
t1 = f32::from_bits(is as u32 & 0xfffff000);
- t2 = z_l - (((t1 - t) - DP_H[k as usize]) - z_h);
+ t2 = z_l - (((t1 - t) - i!(DP_H, k as usize)) - z_h);
};
/* split up y into y1+y2 and compute (y1+y2)*(t1+t2) */
diff --git a/vendor/libm/src/math/rem_pio2.rs b/vendor/libm/src/math/rem_pio2.rs
index 536dfac3c..644616f2d 100644
--- a/vendor/libm/src/math/rem_pio2.rs
+++ b/vendor/libm/src/math/rem_pio2.rs
@@ -41,7 +41,6 @@ const PIO2_3T: f64 = 8.47842766036889956997e-32; /* 0x397B839A, 0x252049C1 */
// use rem_pio2_large() for large x
//
// caller must handle the case when reduction is not needed: |x| ~<= pi/4 */
-#[inline]
#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
pub(crate) fn rem_pio2(x: f64) -> (i32, f64, f64) {
let x1p24 = f64::from_bits(0x4170000000000000);
@@ -49,10 +48,14 @@ pub(crate) fn rem_pio2(x: f64) -> (i32, f64, f64) {
let sign = (f64::to_bits(x) >> 63) as i32;
let ix = (f64::to_bits(x) >> 32) as u32 & 0x7fffffff;
- #[inline]
fn medium(x: f64, ix: u32) -> (i32, f64, f64) {
/* rint(x/(pi/2)), Assume round-to-nearest. */
- let f_n = x as f64 * INV_PIO2 + TO_INT - TO_INT;
+ let tmp = x as f64 * INV_PIO2 + TO_INT;
+ // force rounding of tmp to it's storage format on x87 to avoid
+ // excess precision issues.
+ #[cfg(all(target_arch = "x86", not(target_feature = "sse2")))]
+ let tmp = force_eval!(tmp);
+ let f_n = tmp - TO_INT;
let n = f_n as i32;
let mut r = x - f_n * PIO2_1;
let mut w = f_n * PIO2_1T; /* 1st round, good to 85 bits */
@@ -169,39 +172,62 @@ pub(crate) fn rem_pio2(x: f64) -> (i32, f64, f64) {
let mut z = f64::from_bits(ui);
let mut tx = [0.0; 3];
for i in 0..2 {
- tx[i] = z as i32 as f64;
- z = (z - tx[i]) * x1p24;
+ i!(tx,i, =, z as i32 as f64);
+ z = (z - i!(tx, i)) * x1p24;
}
- tx[2] = z;
+ i!(tx,2, =, z);
/* skip zero terms, first term is non-zero */
let mut i = 2;
- while i != 0 && tx[i] == 0.0 {
+ while i != 0 && i!(tx, i) == 0.0 {
i -= 1;
}
let mut ty = [0.0; 3];
- let n = rem_pio2_large(&tx[..=i], &mut ty, ((ix >> 20) - (0x3ff + 23)) as i32, 1);
+ let n = rem_pio2_large(&tx[..=i], &mut ty, ((ix as i32) >> 20) - (0x3ff + 23), 1);
if sign != 0 {
- return (-n, -ty[0], -ty[1]);
+ return (-n, -i!(ty, 0), -i!(ty, 1));
}
- (n, ty[0], ty[1])
+ (n, i!(ty, 0), i!(ty, 1))
}
-#[test]
-fn test_near_pi() {
- assert_eq!(
- rem_pio2(3.141592025756836),
- (2, -6.278329573009626e-7, -2.1125998133974653e-23)
- );
- assert_eq!(
- rem_pio2(3.141592033207416),
- (2, -6.20382377148128e-7, -2.1125998133974653e-23)
- );
- assert_eq!(
- rem_pio2(3.141592144966125),
- (2, -5.086236681942706e-7, -2.1125998133974653e-23)
- );
- assert_eq!(
- rem_pio2(3.141592979431152),
- (2, 3.2584135866119817e-7, -2.1125998133974653e-23)
- );
+#[cfg(test)]
+mod tests {
+ use super::rem_pio2;
+
+ #[test]
+ fn test_near_pi() {
+ let arg = 3.141592025756836;
+ let arg = force_eval!(arg);
+ assert_eq!(
+ rem_pio2(arg),
+ (2, -6.278329573009626e-7, -2.1125998133974653e-23)
+ );
+ let arg = 3.141592033207416;
+ let arg = force_eval!(arg);
+ assert_eq!(
+ rem_pio2(arg),
+ (2, -6.20382377148128e-7, -2.1125998133974653e-23)
+ );
+ let arg = 3.141592144966125;
+ let arg = force_eval!(arg);
+ assert_eq!(
+ rem_pio2(arg),
+ (2, -5.086236681942706e-7, -2.1125998133974653e-23)
+ );
+ let arg = 3.141592979431152;
+ let arg = force_eval!(arg);
+ assert_eq!(
+ rem_pio2(arg),
+ (2, 3.2584135866119817e-7, -2.1125998133974653e-23)
+ );
+ }
+
+ #[test]
+ fn test_overflow_b9b847() {
+ let _ = rem_pio2(-3054214.5490637687);
+ }
+
+ #[test]
+ fn test_overflow_4747b9() {
+ let _ = rem_pio2(917340800458.2274);
+ }
}
diff --git a/vendor/libm/src/math/rem_pio2_large.rs b/vendor/libm/src/math/rem_pio2_large.rs
index 006d3e153..db97a39d4 100644
--- a/vendor/libm/src/math/rem_pio2_large.rs
+++ b/vendor/libm/src/math/rem_pio2_large.rs
@@ -27,7 +27,7 @@ const INIT_JK: [usize; 4] = [3, 4, 4, 6];
//
// NB: This table must have at least (e0-3)/24 + jk terms.
// For quad precision (e0 <= 16360, jk = 6), this is 686.
-#[cfg(target_pointer_width = "32")]
+#[cfg(any(target_pointer_width = "32", target_pointer_width = "16"))]
const IPIO2: [i32; 66] = [
0xA2F983, 0x6E4E44, 0x1529FC, 0x2757D1, 0xF534DD, 0xC0DB62, 0x95993C, 0x439041, 0xFE5163,
0xABDEBB, 0xC561B7, 0x246E3A, 0x424DD2, 0xE00649, 0x2EEA09, 0xD1921C, 0xFE1DEB, 0x1CB129,
@@ -222,7 +222,6 @@ const PIO2: [f64; 8] = [
/// skip the part of the product that are known to be a huge integer (
/// more accurately, = 0 mod 8 ). Thus the number of operations are
/// independent of the exponent of the input.
-#[inline]
#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
pub(crate) fn rem_pio2_large(x: &[f64], y: &mut [f64], e0: i32, prec: usize) -> i32 {
let x1p24 = f64::from_bits(0x4170000000000000); // 0x1p24 === 2 ^ 24
@@ -243,12 +242,12 @@ pub(crate) fn rem_pio2_large(x: &[f64], y: &mut [f64], e0: i32, prec: usize) ->
let mut iq: [i32; 20] = [0; 20];
/* initialize jk*/
- let jk = INIT_JK[prec];
+ let jk = i!(INIT_JK, prec);
let jp = jk;
/* determine jx,jv,q0, note that 3>q0 */
let jx = nx - 1;
- let mut jv = (e0 - 3) / 24;
+ let mut jv = div!(e0 - 3, 24);
if jv < 0 {
jv = 0;
}
@@ -256,7 +255,7 @@ pub(crate) fn rem_pio2_large(x: &[f64], y: &mut [f64], e0: i32, prec: usize) ->
let jv = jv as usize;
/* set up f[0] to f[jx+jk] where f[jx+jk] = ipio2[jv+jk] */
- let mut j = (jv - jx) as i32;
+ let mut j = (jv as i32) - (jx as i32);
let m = jx + jk;
for i in 0..=m {
i!(f, i, =, if j < 0 {
@@ -462,9 +461,9 @@ pub(crate) fn rem_pio2_large(x: &[f64], y: &mut [f64], e0: i32, prec: usize) ->
i!(y, 2, =, -fw);
}
}
- #[cfg(feature = "checked")]
+ #[cfg(debug_assertions)]
_ => unreachable!(),
- #[cfg(not(feature = "checked"))]
+ #[cfg(not(debug_assertions))]
_ => {}
}
n & 7
diff --git a/vendor/libm/src/math/rem_pio2f.rs b/vendor/libm/src/math/rem_pio2f.rs
index af2745d1b..775f5d750 100644
--- a/vendor/libm/src/math/rem_pio2f.rs
+++ b/vendor/libm/src/math/rem_pio2f.rs
@@ -31,7 +31,6 @@ const PIO2_1T: f64 = 1.58932547735281966916e-08; /* 0x3E5110b4, 0x611A6263 */
///
/// use double precision for everything except passing x
/// use __rem_pio2_large() for large x
-#[inline]
#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
pub(crate) fn rem_pio2f(x: f32) -> (i32, f64) {
let x64 = x as f64;
@@ -44,7 +43,12 @@ pub(crate) fn rem_pio2f(x: f32) -> (i32, f64) {
if ix < 0x4dc90fdb {
/* |x| ~< 2^28*(pi/2), medium size */
/* Use a specialized rint() to get fn. Assume round-to-nearest. */
- let f_n = x64 * INV_PIO2 + TOINT - TOINT;
+ let tmp = x64 * INV_PIO2 + TOINT;
+ // force rounding of tmp to it's storage format on x87 to avoid
+ // excess precision issues.
+ #[cfg(all(target_arch = "x86", not(target_feature = "sse2")))]
+ let tmp = force_eval!(tmp);
+ let f_n = tmp - TOINT;
return (f_n as i32, x64 - f_n * PIO2_1 - f_n * PIO2_1T);
}
if ix >= 0x7f800000 {
diff --git a/vendor/libm/src/math/remainder.rs b/vendor/libm/src/math/remainder.rs
new file mode 100644
index 000000000..9e966c9ed
--- /dev/null
+++ b/vendor/libm/src/math/remainder.rs
@@ -0,0 +1,5 @@
+#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+pub fn remainder(x: f64, y: f64) -> f64 {
+ let (result, _) = super::remquo(x, y);
+ result
+}
diff --git a/vendor/libm/src/math/remainderf.rs b/vendor/libm/src/math/remainderf.rs
new file mode 100644
index 000000000..b1407cf2a
--- /dev/null
+++ b/vendor/libm/src/math/remainderf.rs
@@ -0,0 +1,5 @@
+#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+pub fn remainderf(x: f32, y: f32) -> f32 {
+ let (result, _) = super::remquof(x, y);
+ result
+}
diff --git a/vendor/libm/src/math/remquo.rs b/vendor/libm/src/math/remquo.rs
index 1c2ba8918..0afd1f7f5 100644
--- a/vendor/libm/src/math/remquo.rs
+++ b/vendor/libm/src/math/remquo.rs
@@ -1,3 +1,4 @@
+#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
pub fn remquo(mut x: f64, mut y: f64) -> (f64, i32) {
let ux: u64 = x.to_bits();
let mut uy: u64 = y.to_bits();
@@ -85,7 +86,8 @@ pub fn remquo(mut x: f64, mut y: f64) -> (f64, i32) {
}
if ex == ey || (ex + 1 == ey && (2.0 * x > y || (2.0 * x == y && (q % 2) != 0))) {
x -= y;
- q += 1;
+ // TODO: this matches musl behavior, but it is incorrect
+ q = q.wrapping_add(1);
}
q &= 0x7fffffff;
let quo = if sx ^ sy { -(q as i32) } else { q as i32 };
@@ -95,3 +97,14 @@ pub fn remquo(mut x: f64, mut y: f64) -> (f64, i32) {
(x, quo)
}
}
+
+#[cfg(test)]
+mod tests {
+ use super::remquo;
+
+ #[test]
+ fn test_q_overflow() {
+ // 0xc000000000000001, 0x04c0000000000004
+ let _ = remquo(-2.0000000000000004, 8.406091369059082e-286);
+ }
+}
diff --git a/vendor/libm/src/math/remquof.rs b/vendor/libm/src/math/remquof.rs
index 871d0c7d6..d71bd38e3 100644
--- a/vendor/libm/src/math/remquof.rs
+++ b/vendor/libm/src/math/remquof.rs
@@ -1,3 +1,4 @@
+#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
pub fn remquof(mut x: f32, mut y: f32) -> (f32, i32) {
let ux: u32 = x.to_bits();
let mut uy: u32 = y.to_bits();
diff --git a/vendor/libm/src/math/rint.rs b/vendor/libm/src/math/rint.rs
new file mode 100644
index 000000000..0c6025c1f
--- /dev/null
+++ b/vendor/libm/src/math/rint.rs
@@ -0,0 +1,48 @@
+#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+pub fn rint(x: f64) -> f64 {
+ let one_over_e = 1.0 / f64::EPSILON;
+ let as_u64: u64 = x.to_bits();
+ let exponent: u64 = as_u64 >> 52 & 0x7ff;
+ let is_positive = (as_u64 >> 63) == 0;
+ if exponent >= 0x3ff + 52 {
+ x
+ } else {
+ let ans = if is_positive {
+ x + one_over_e - one_over_e
+ } else {
+ x - one_over_e + one_over_e
+ };
+
+ if ans == 0.0 {
+ if is_positive {
+ 0.0
+ } else {
+ -0.0
+ }
+ } else {
+ ans
+ }
+ }
+}
+
+// PowerPC tests are failing on LLVM 13: https://github.com/rust-lang/rust/issues/88520
+#[cfg(not(target_arch = "powerpc64"))]
+#[cfg(test)]
+mod tests {
+ use super::rint;
+
+ #[test]
+ fn negative_zero() {
+ assert_eq!(rint(-0.0_f64).to_bits(), (-0.0_f64).to_bits());
+ }
+
+ #[test]
+ fn sanity_check() {
+ assert_eq!(rint(-1.0), -1.0);
+ assert_eq!(rint(2.8), 3.0);
+ assert_eq!(rint(-0.5), -0.0);
+ assert_eq!(rint(0.5), 0.0);
+ assert_eq!(rint(-1.5), -2.0);
+ assert_eq!(rint(1.5), 2.0);
+ }
+}
diff --git a/vendor/libm/src/math/rintf.rs b/vendor/libm/src/math/rintf.rs
new file mode 100644
index 000000000..d427793f7
--- /dev/null
+++ b/vendor/libm/src/math/rintf.rs
@@ -0,0 +1,48 @@
+#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+pub fn rintf(x: f32) -> f32 {
+ let one_over_e = 1.0 / f32::EPSILON;
+ let as_u32: u32 = x.to_bits();
+ let exponent: u32 = as_u32 >> 23 & 0xff;
+ let is_positive = (as_u32 >> 31) == 0;
+ if exponent >= 0x7f + 23 {
+ x
+ } else {
+ let ans = if is_positive {
+ x + one_over_e - one_over_e
+ } else {
+ x - one_over_e + one_over_e
+ };
+
+ if ans == 0.0 {
+ if is_positive {
+ 0.0
+ } else {
+ -0.0
+ }
+ } else {
+ ans
+ }
+ }
+}
+
+// PowerPC tests are failing on LLVM 13: https://github.com/rust-lang/rust/issues/88520
+#[cfg(not(target_arch = "powerpc64"))]
+#[cfg(test)]
+mod tests {
+ use super::rintf;
+
+ #[test]
+ fn negative_zero() {
+ assert_eq!(rintf(-0.0_f32).to_bits(), (-0.0_f32).to_bits());
+ }
+
+ #[test]
+ fn sanity_check() {
+ assert_eq!(rintf(-1.0), -1.0);
+ assert_eq!(rintf(2.8), 3.0);
+ assert_eq!(rintf(-0.5), -0.0);
+ assert_eq!(rintf(0.5), 0.0);
+ assert_eq!(rintf(-1.5), -2.0);
+ assert_eq!(rintf(1.5), 2.0);
+ }
+}
diff --git a/vendor/libm/src/math/round.rs b/vendor/libm/src/math/round.rs
index 9a9723cfb..46fabc90f 100644
--- a/vendor/libm/src/math/round.rs
+++ b/vendor/libm/src/math/round.rs
@@ -1,37 +1,28 @@
+use super::copysign;
+use super::trunc;
use core::f64;
-const TOINT: f64 = 1.0 / f64::EPSILON;
-
-#[inline]
#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
-pub fn round(mut x: f64) -> f64 {
- let (f, i) = (x, x.to_bits());
- let e: u64 = i >> 52 & 0x7ff;
- let mut y: f64;
+pub fn round(x: f64) -> f64 {
+ trunc(x + copysign(0.5 - 0.25 * f64::EPSILON, x))
+}
- if e >= 0x3ff + 52 {
- return x;
- }
- if i >> 63 != 0 {
- x = -x;
- }
- if e < 0x3ff - 1 {
- // raise inexact if x!=0
- force_eval!(x + TOINT);
- return 0.0 * f;
- }
- y = x + TOINT - TOINT - x;
- if y > 0.5 {
- y = y + x - 1.0;
- } else if y <= -0.5 {
- y = y + x + 1.0;
- } else {
- y = y + x;
+#[cfg(test)]
+mod tests {
+ use super::round;
+
+ #[test]
+ fn negative_zero() {
+ assert_eq!(round(-0.0_f64).to_bits(), (-0.0_f64).to_bits());
}
- if i >> 63 != 0 {
- -y
- } else {
- y
+ #[test]
+ fn sanity_check() {
+ assert_eq!(round(-1.0), -1.0);
+ assert_eq!(round(2.8), 3.0);
+ assert_eq!(round(-0.5), -1.0);
+ assert_eq!(round(0.5), 1.0);
+ assert_eq!(round(-1.5), -2.0);
+ assert_eq!(round(1.5), 2.0);
}
}
diff --git a/vendor/libm/src/math/roundf.rs b/vendor/libm/src/math/roundf.rs
index 839d9469a..becdb5620 100644
--- a/vendor/libm/src/math/roundf.rs
+++ b/vendor/libm/src/math/roundf.rs
@@ -1,35 +1,30 @@
+use super::copysignf;
+use super::truncf;
use core::f32;
-const TOINT: f32 = 1.0 / f32::EPSILON;
-
-#[inline]
#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
-pub fn roundf(mut x: f32) -> f32 {
- let i = x.to_bits();
- let e: u32 = i >> 23 & 0xff;
- let mut y: f32;
+pub fn roundf(x: f32) -> f32 {
+ truncf(x + copysignf(0.5 - 0.25 * f32::EPSILON, x))
+}
- if e >= 0x7f + 23 {
- return x;
- }
- if i >> 31 != 0 {
- x = -x;
- }
- if e < 0x7f - 1 {
- force_eval!(x + TOINT);
- return 0.0 * x;
- }
- y = x + TOINT - TOINT - x;
- if y > 0.5f32 {
- y = y + x - 1.0;
- } else if y <= -0.5f32 {
- y = y + x + 1.0;
- } else {
- y = y + x;
+// PowerPC tests are failing on LLVM 13: https://github.com/rust-lang/rust/issues/88520
+#[cfg(not(target_arch = "powerpc64"))]
+#[cfg(test)]
+mod tests {
+ use super::roundf;
+
+ #[test]
+ fn negative_zero() {
+ assert_eq!(roundf(-0.0_f32).to_bits(), (-0.0_f32).to_bits());
}
- if i >> 31 != 0 {
- -y
- } else {
- y
+
+ #[test]
+ fn sanity_check() {
+ assert_eq!(roundf(-1.0), -1.0);
+ assert_eq!(roundf(2.8), 3.0);
+ assert_eq!(roundf(-0.5), -1.0);
+ assert_eq!(roundf(0.5), 1.0);
+ assert_eq!(roundf(-1.5), -2.0);
+ assert_eq!(roundf(1.5), 2.0);
}
}
diff --git a/vendor/libm/src/math/scalbn.rs b/vendor/libm/src/math/scalbn.rs
index d8c8409ac..00c455a10 100644
--- a/vendor/libm/src/math/scalbn.rs
+++ b/vendor/libm/src/math/scalbn.rs
@@ -1,4 +1,3 @@
-#[inline]
#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
pub fn scalbn(x: f64, mut n: i32) -> f64 {
let x1p1023 = f64::from_bits(0x7fe0000000000000); // 0x1p1023 === 2 ^ 1023
diff --git a/vendor/libm/src/math/scalbnf.rs b/vendor/libm/src/math/scalbnf.rs
index 4e9771175..73f4bb57a 100644
--- a/vendor/libm/src/math/scalbnf.rs
+++ b/vendor/libm/src/math/scalbnf.rs
@@ -1,4 +1,3 @@
-#[inline]
#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
pub fn scalbnf(mut x: f32, mut n: i32) -> f32 {
let x1p127 = f32::from_bits(0x7f000000); // 0x1p127f === 2 ^ 127
diff --git a/vendor/libm/src/math/sin.rs b/vendor/libm/src/math/sin.rs
index 51aed88a8..a53843dcd 100644
--- a/vendor/libm/src/math/sin.rs
+++ b/vendor/libm/src/math/sin.rs
@@ -40,7 +40,6 @@ use super::{k_cos, k_sin, rem_pio2};
//
// Accuracy:
// TRIG(x) returns trig(x) nearly rounded
-#[inline]
#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
pub fn sin(x: f64) -> f64 {
let x1p120 = f64::from_bits(0x4770000000000000); // 0x1p120f === 2 ^ 120
@@ -82,5 +81,8 @@ pub fn sin(x: f64) -> f64 {
fn test_near_pi() {
let x = f64::from_bits(0x400921fb000FD5DD); // 3.141592026217707
let sx = f64::from_bits(0x3ea50d15ced1a4a2); // 6.273720864039205e-7
- assert_eq!(sin(x), sx);
+ let result = sin(x);
+ #[cfg(all(target_arch = "x86", not(target_feature = "sse2")))]
+ let result = force_eval!(result);
+ assert_eq!(result, sx);
}
diff --git a/vendor/libm/src/math/sincos.rs b/vendor/libm/src/math/sincos.rs
index 750908df4..ff5d87a1c 100644
--- a/vendor/libm/src/math/sincos.rs
+++ b/vendor/libm/src/math/sincos.rs
@@ -12,6 +12,7 @@
use super::{get_high_word, k_cos, k_sin, rem_pio2};
+#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
pub fn sincos(x: f64) -> (f64, f64) {
let s: f64;
let c: f64;
@@ -51,9 +52,83 @@ pub fn sincos(x: f64) -> (f64, f64) {
1 => (c, -s),
2 => (-s, -c),
3 => (-c, s),
- #[cfg(feature = "checked")]
+ #[cfg(debug_assertions)]
_ => unreachable!(),
- #[cfg(not(feature = "checked"))]
+ #[cfg(not(debug_assertions))]
_ => (0.0, 1.0),
}
}
+
+// These tests are based on those from sincosf.rs
+#[cfg(test)]
+mod tests {
+ use super::sincos;
+
+ const TOLERANCE: f64 = 1e-6;
+
+ #[test]
+ fn with_pi() {
+ let (s, c) = sincos(core::f64::consts::PI);
+ assert!(
+ (s - 0.0).abs() < TOLERANCE,
+ "|{} - {}| = {} >= {}",
+ s,
+ 0.0,
+ (s - 0.0).abs(),
+ TOLERANCE
+ );
+ assert!(
+ (c + 1.0).abs() < TOLERANCE,
+ "|{} + {}| = {} >= {}",
+ c,
+ 1.0,
+ (s + 1.0).abs(),
+ TOLERANCE
+ );
+ }
+
+ #[test]
+ fn rotational_symmetry() {
+ use core::f64::consts::PI;
+ const N: usize = 24;
+ for n in 0..N {
+ let theta = 2. * PI * (n as f64) / (N as f64);
+ let (s, c) = sincos(theta);
+ let (s_plus, c_plus) = sincos(theta + 2. * PI);
+ let (s_minus, c_minus) = sincos(theta - 2. * PI);
+
+ assert!(
+ (s - s_plus).abs() < TOLERANCE,
+ "|{} - {}| = {} >= {}",
+ s,
+ s_plus,
+ (s - s_plus).abs(),
+ TOLERANCE
+ );
+ assert!(
+ (s - s_minus).abs() < TOLERANCE,
+ "|{} - {}| = {} >= {}",
+ s,
+ s_minus,
+ (s - s_minus).abs(),
+ TOLERANCE
+ );
+ assert!(
+ (c - c_plus).abs() < TOLERANCE,
+ "|{} - {}| = {} >= {}",
+ c,
+ c_plus,
+ (c - c_plus).abs(),
+ TOLERANCE
+ );
+ assert!(
+ (c - c_minus).abs() < TOLERANCE,
+ "|{} - {}| = {} >= {}",
+ c,
+ c_minus,
+ (c - c_minus).abs(),
+ TOLERANCE
+ );
+ }
+ }
+}
diff --git a/vendor/libm/src/math/sincosf.rs b/vendor/libm/src/math/sincosf.rs
index bb9a00392..9a4c36104 100644
--- a/vendor/libm/src/math/sincosf.rs
+++ b/vendor/libm/src/math/sincosf.rs
@@ -23,6 +23,7 @@ const S2PIO2: f32 = 2.0 * PI_2; /* 0x400921FB, 0x54442D18 */
const S3PIO2: f32 = 3.0 * PI_2; /* 0x4012D97C, 0x7F3321D2 */
const S4PIO2: f32 = 4.0 * PI_2; /* 0x401921FB, 0x54442D18 */
+#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
pub fn sincosf(x: f32) -> (f32, f32) {
let s: f32;
let c: f32;
@@ -65,11 +66,11 @@ pub fn sincosf(x: f32) -> (f32, f32) {
/* -sin(x+c) is not correct if x+c could be 0: -0 vs +0 */
else {
if sign {
- s = k_sinf((x + S2PIO2) as f64);
- c = k_cosf((x + S2PIO2) as f64);
+ s = -k_sinf((x + S2PIO2) as f64);
+ c = -k_cosf((x + S2PIO2) as f64);
} else {
- s = k_sinf((x - S2PIO2) as f64);
- c = k_cosf((x - S2PIO2) as f64);
+ s = -k_sinf((x - S2PIO2) as f64);
+ c = -k_cosf((x - S2PIO2) as f64);
}
}
@@ -89,11 +90,11 @@ pub fn sincosf(x: f32) -> (f32, f32) {
}
} else {
if sign {
- s = k_cosf((x + S4PIO2) as f64);
- c = k_sinf((x + S4PIO2) as f64);
+ s = k_sinf((x + S4PIO2) as f64);
+ c = k_cosf((x + S4PIO2) as f64);
} else {
- s = k_cosf((x - S4PIO2) as f64);
- c = k_sinf((x - S4PIO2) as f64);
+ s = k_sinf((x - S4PIO2) as f64);
+ c = k_cosf((x - S4PIO2) as f64);
}
}
@@ -115,9 +116,70 @@ pub fn sincosf(x: f32) -> (f32, f32) {
1 => (c, -s),
2 => (-s, -c),
3 => (-c, s),
- #[cfg(feature = "checked")]
+ #[cfg(debug_assertions)]
_ => unreachable!(),
- #[cfg(not(feature = "checked"))]
+ #[cfg(not(debug_assertions))]
_ => (0.0, 1.0),
}
}
+
+// PowerPC tests are failing on LLVM 13: https://github.com/rust-lang/rust/issues/88520
+#[cfg(not(target_arch = "powerpc64"))]
+#[cfg(test)]
+mod tests {
+ use super::sincosf;
+ use crate::_eqf;
+
+ #[test]
+ fn with_pi() {
+ let (s, c) = sincosf(core::f32::consts::PI);
+ _eqf(s.abs(), 0.0).unwrap();
+ _eqf(c, -1.0).unwrap();
+ }
+
+ #[test]
+ fn rotational_symmetry() {
+ use core::f32::consts::PI;
+ const N: usize = 24;
+ for n in 0..N {
+ let theta = 2. * PI * (n as f32) / (N as f32);
+ let (s, c) = sincosf(theta);
+ let (s_plus, c_plus) = sincosf(theta + 2. * PI);
+ let (s_minus, c_minus) = sincosf(theta - 2. * PI);
+
+ const TOLERANCE: f32 = 1e-6;
+ assert!(
+ (s - s_plus).abs() < TOLERANCE,
+ "|{} - {}| = {} >= {}",
+ s,
+ s_plus,
+ (s - s_plus).abs(),
+ TOLERANCE
+ );
+ assert!(
+ (s - s_minus).abs() < TOLERANCE,
+ "|{} - {}| = {} >= {}",
+ s,
+ s_minus,
+ (s - s_minus).abs(),
+ TOLERANCE
+ );
+ assert!(
+ (c - c_plus).abs() < TOLERANCE,
+ "|{} - {}| = {} >= {}",
+ c,
+ c_plus,
+ (c - c_plus).abs(),
+ TOLERANCE
+ );
+ assert!(
+ (c - c_minus).abs() < TOLERANCE,
+ "|{} - {}| = {} >= {}",
+ c,
+ c_minus,
+ (c - c_minus).abs(),
+ TOLERANCE
+ );
+ }
+ }
+}
diff --git a/vendor/libm/src/math/sinf.rs b/vendor/libm/src/math/sinf.rs
index 0c31099ed..6e20be2ae 100644
--- a/vendor/libm/src/math/sinf.rs
+++ b/vendor/libm/src/math/sinf.rs
@@ -24,7 +24,6 @@ const S2_PIO2: f64 = 2. * FRAC_PI_2; /* 0x400921FB, 0x54442D18 */
const S3_PIO2: f64 = 3. * FRAC_PI_2; /* 0x4012D97C, 0x7F3321D2 */
const S4_PIO2: f64 = 4. * FRAC_PI_2; /* 0x401921FB, 0x54442D18 */
-#[inline]
#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
pub fn sinf(x: f32) -> f32 {
let x64 = x as f64;
diff --git a/vendor/libm/src/math/sinh.rs b/vendor/libm/src/math/sinh.rs
index d36de66c1..fd24fd20c 100644
--- a/vendor/libm/src/math/sinh.rs
+++ b/vendor/libm/src/math/sinh.rs
@@ -4,7 +4,6 @@ use super::{expm1, expo2};
// = (exp(x)-1 + (exp(x)-1)/exp(x))/2
// = x + x^3/6 + o(x^5)
//
-#[inline]
#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
pub fn sinh(x: f64) -> f64 {
// union {double f; uint64_t i;} u = {.f = x};
diff --git a/vendor/libm/src/math/sinhf.rs b/vendor/libm/src/math/sinhf.rs
index fd0b2bfc8..24f863c44 100644
--- a/vendor/libm/src/math/sinhf.rs
+++ b/vendor/libm/src/math/sinhf.rs
@@ -1,7 +1,6 @@
use super::expm1f;
use super::k_expo2f;
-#[inline]
#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
pub fn sinhf(x: f32) -> f32 {
let mut h = 0.5f32;
diff --git a/vendor/libm/src/math/sqrt.rs b/vendor/libm/src/math/sqrt.rs
index 14404d4eb..f06b209a4 100644
--- a/vendor/libm/src/math/sqrt.rs
+++ b/vendor/libm/src/math/sqrt.rs
@@ -37,7 +37,7 @@
* If (2) is false, then q = q ; otherwise q = q + 2 .
* i+1 i i+1 i
*
- * With some algebric manipulation, it is not difficult to see
+ * With some algebraic manipulation, it is not difficult to see
* that (2) is equivalent to
* -(i+1)
* s + 2 <= y (3)
@@ -77,11 +77,7 @@
*/
use core::f64;
-use core::num::Wrapping;
-const TINY: f64 = 1.0e-300;
-
-#[inline]
#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
pub fn sqrt(x: f64) -> f64 {
// On wasm32 we know that LLVM's intrinsic will compile to an optimized
@@ -96,128 +92,173 @@ pub fn sqrt(x: f64) -> f64 {
}
}
}
- let mut z: f64;
- let sign: Wrapping<u32> = Wrapping(0x80000000);
- let mut ix0: i32;
- let mut s0: i32;
- let mut q: i32;
- let mut m: i32;
- let mut t: i32;
- let mut i: i32;
- let mut r: Wrapping<u32>;
- let mut t1: Wrapping<u32>;
- let mut s1: Wrapping<u32>;
- let mut ix1: Wrapping<u32>;
- let mut q1: Wrapping<u32>;
+ #[cfg(target_feature = "sse2")]
+ {
+ // Note: This path is unlikely since LLVM will usually have already
+ // optimized sqrt calls into hardware instructions if sse2 is available,
+ // but if someone does end up here they'll apprected the speed increase.
+ #[cfg(target_arch = "x86")]
+ use core::arch::x86::*;
+ #[cfg(target_arch = "x86_64")]
+ use core::arch::x86_64::*;
+ unsafe {
+ let m = _mm_set_sd(x);
+ let m_sqrt = _mm_sqrt_pd(m);
+ _mm_cvtsd_f64(m_sqrt)
+ }
+ }
+ #[cfg(not(target_feature = "sse2"))]
+ {
+ use core::num::Wrapping;
- ix0 = (x.to_bits() >> 32) as i32;
- ix1 = Wrapping(x.to_bits() as u32);
+ const TINY: f64 = 1.0e-300;
- /* take care of Inf and NaN */
- if (ix0 & 0x7ff00000) == 0x7ff00000 {
- return x * x + x; /* sqrt(NaN)=NaN, sqrt(+inf)=+inf, sqrt(-inf)=sNaN */
- }
- /* take care of zero */
- if ix0 <= 0 {
- if ((ix0 & !(sign.0 as i32)) | ix1.0 as i32) == 0 {
- return x; /* sqrt(+-0) = +-0 */
+ let mut z: f64;
+ let sign: Wrapping<u32> = Wrapping(0x80000000);
+ let mut ix0: i32;
+ let mut s0: i32;
+ let mut q: i32;
+ let mut m: i32;
+ let mut t: i32;
+ let mut i: i32;
+ let mut r: Wrapping<u32>;
+ let mut t1: Wrapping<u32>;
+ let mut s1: Wrapping<u32>;
+ let mut ix1: Wrapping<u32>;
+ let mut q1: Wrapping<u32>;
+
+ ix0 = (x.to_bits() >> 32) as i32;
+ ix1 = Wrapping(x.to_bits() as u32);
+
+ /* take care of Inf and NaN */
+ if (ix0 & 0x7ff00000) == 0x7ff00000 {
+ return x * x + x; /* sqrt(NaN)=NaN, sqrt(+inf)=+inf, sqrt(-inf)=sNaN */
}
- if ix0 < 0 {
- return (x - x) / (x - x); /* sqrt(-ve) = sNaN */
+ /* take care of zero */
+ if ix0 <= 0 {
+ if ((ix0 & !(sign.0 as i32)) | ix1.0 as i32) == 0 {
+ return x; /* sqrt(+-0) = +-0 */
+ }
+ if ix0 < 0 {
+ return (x - x) / (x - x); /* sqrt(-ve) = sNaN */
+ }
}
- }
- /* normalize x */
- m = ix0 >> 20;
- if m == 0 {
- /* subnormal x */
- while ix0 == 0 {
- m -= 21;
- ix0 |= (ix1 >> 11).0 as i32;
- ix1 <<= 21;
+ /* normalize x */
+ m = ix0 >> 20;
+ if m == 0 {
+ /* subnormal x */
+ while ix0 == 0 {
+ m -= 21;
+ ix0 |= (ix1 >> 11).0 as i32;
+ ix1 <<= 21;
+ }
+ i = 0;
+ while (ix0 & 0x00100000) == 0 {
+ i += 1;
+ ix0 <<= 1;
+ }
+ m -= i - 1;
+ ix0 |= (ix1 >> (32 - i) as usize).0 as i32;
+ ix1 = ix1 << i as usize;
}
- i = 0;
- while (ix0 & 0x00100000) == 0 {
- i += 1;
- ix0 <<= 1;
+ m -= 1023; /* unbias exponent */
+ ix0 = (ix0 & 0x000fffff) | 0x00100000;
+ if (m & 1) == 1 {
+ /* odd m, double x to make it even */
+ ix0 += ix0 + ((ix1 & sign) >> 31).0 as i32;
+ ix1 += ix1;
}
- m -= i - 1;
- ix0 |= (ix1 >> (32 - i) as usize).0 as i32;
- ix1 = ix1 << i as usize;
- }
- m -= 1023; /* unbias exponent */
- ix0 = (ix0 & 0x000fffff) | 0x00100000;
- if (m & 1) == 1 {
- /* odd m, double x to make it even */
- ix0 += ix0 + ((ix1 & sign) >> 31).0 as i32;
- ix1 += ix1;
- }
- m >>= 1; /* m = [m/2] */
+ m >>= 1; /* m = [m/2] */
- /* generate sqrt(x) bit by bit */
- ix0 += ix0 + ((ix1 & sign) >> 31).0 as i32;
- ix1 += ix1;
- q = 0; /* [q,q1] = sqrt(x) */
- q1 = Wrapping(0);
- s0 = 0;
- s1 = Wrapping(0);
- r = Wrapping(0x00200000); /* r = moving bit from right to left */
-
- while r != Wrapping(0) {
- t = s0 + r.0 as i32;
- if t <= ix0 {
- s0 = t + r.0 as i32;
- ix0 -= t;
- q += r.0 as i32;
- }
+ /* generate sqrt(x) bit by bit */
ix0 += ix0 + ((ix1 & sign) >> 31).0 as i32;
ix1 += ix1;
- r >>= 1;
- }
+ q = 0; /* [q,q1] = sqrt(x) */
+ q1 = Wrapping(0);
+ s0 = 0;
+ s1 = Wrapping(0);
+ r = Wrapping(0x00200000); /* r = moving bit from right to left */
- r = sign;
- while r != Wrapping(0) {
- t1 = s1 + r;
- t = s0;
- if t < ix0 || (t == ix0 && t1 <= ix1) {
- s1 = t1 + r;
- if (t1 & sign) == sign && (s1 & sign) == Wrapping(0) {
- s0 += 1;
+ while r != Wrapping(0) {
+ t = s0 + r.0 as i32;
+ if t <= ix0 {
+ s0 = t + r.0 as i32;
+ ix0 -= t;
+ q += r.0 as i32;
}
- ix0 -= t;
- if ix1 < t1 {
- ix0 -= 1;
+ ix0 += ix0 + ((ix1 & sign) >> 31).0 as i32;
+ ix1 += ix1;
+ r >>= 1;
+ }
+
+ r = sign;
+ while r != Wrapping(0) {
+ t1 = s1 + r;
+ t = s0;
+ if t < ix0 || (t == ix0 && t1 <= ix1) {
+ s1 = t1 + r;
+ if (t1 & sign) == sign && (s1 & sign) == Wrapping(0) {
+ s0 += 1;
+ }
+ ix0 -= t;
+ if ix1 < t1 {
+ ix0 -= 1;
+ }
+ ix1 -= t1;
+ q1 += r;
}
- ix1 -= t1;
- q1 += r;
+ ix0 += ix0 + ((ix1 & sign) >> 31).0 as i32;
+ ix1 += ix1;
+ r >>= 1;
}
- ix0 += ix0 + ((ix1 & sign) >> 31).0 as i32;
- ix1 += ix1;
- r >>= 1;
- }
- /* use floating add to find out rounding direction */
- if (ix0 as u32 | ix1.0) != 0 {
- z = 1.0 - TINY; /* raise inexact flag */
- if z >= 1.0 {
- z = 1.0 + TINY;
- if q1.0 == 0xffffffff {
- q1 = Wrapping(0);
- q += 1;
- } else if z > 1.0 {
- if q1.0 == 0xfffffffe {
+ /* use floating add to find out rounding direction */
+ if (ix0 as u32 | ix1.0) != 0 {
+ z = 1.0 - TINY; /* raise inexact flag */
+ if z >= 1.0 {
+ z = 1.0 + TINY;
+ if q1.0 == 0xffffffff {
+ q1 = Wrapping(0);
q += 1;
+ } else if z > 1.0 {
+ if q1.0 == 0xfffffffe {
+ q += 1;
+ }
+ q1 += Wrapping(2);
+ } else {
+ q1 += q1 & Wrapping(1);
}
- q1 += Wrapping(2);
- } else {
- q1 += q1 & Wrapping(1);
}
}
+ ix0 = (q >> 1) + 0x3fe00000;
+ ix1 = q1 >> 1;
+ if (q & 1) == 1 {
+ ix1 |= sign;
+ }
+ ix0 += m << 20;
+ f64::from_bits((ix0 as u64) << 32 | ix1.0 as u64)
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use core::f64::*;
+
+ #[test]
+ fn sanity_check() {
+ assert_eq!(sqrt(100.0), 10.0);
+ assert_eq!(sqrt(4.0), 2.0);
}
- ix0 = (q >> 1) + 0x3fe00000;
- ix1 = q1 >> 1;
- if (q & 1) == 1 {
- ix1 |= sign;
+
+ /// The spec: https://en.cppreference.com/w/cpp/numeric/math/sqrt
+ #[test]
+ fn spec_tests() {
+ // Not Asserted: FE_INVALID exception is raised if argument is negative.
+ assert!(sqrt(-1.0).is_nan());
+ assert!(sqrt(NAN).is_nan());
+ for f in [0.0, -0.0, INFINITY].iter().copied() {
+ assert_eq!(sqrt(f), f);
+ }
}
- ix0 += m << 20;
- f64::from_bits((ix0 as u64) << 32 | ix1.0 as u64)
}
diff --git a/vendor/libm/src/math/sqrtf.rs b/vendor/libm/src/math/sqrtf.rs
index b9365c617..00b20e578 100644
--- a/vendor/libm/src/math/sqrtf.rs
+++ b/vendor/libm/src/math/sqrtf.rs
@@ -13,9 +13,6 @@
* ====================================================
*/
-const TINY: f32 = 1.0e-30;
-
-#[inline]
#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
pub fn sqrtf(x: f32) -> f32 {
// On wasm32 we know that LLVM's intrinsic will compile to an optimized
@@ -30,83 +27,128 @@ pub fn sqrtf(x: f32) -> f32 {
}
}
}
- let mut z: f32;
- let sign: i32 = 0x80000000u32 as i32;
- let mut ix: i32;
- let mut s: i32;
- let mut q: i32;
- let mut m: i32;
- let mut t: i32;
- let mut i: i32;
- let mut r: u32;
+ #[cfg(target_feature = "sse")]
+ {
+ // Note: This path is unlikely since LLVM will usually have already
+ // optimized sqrt calls into hardware instructions if sse is available,
+ // but if someone does end up here they'll apprected the speed increase.
+ #[cfg(target_arch = "x86")]
+ use core::arch::x86::*;
+ #[cfg(target_arch = "x86_64")]
+ use core::arch::x86_64::*;
+ unsafe {
+ let m = _mm_set_ss(x);
+ let m_sqrt = _mm_sqrt_ss(m);
+ _mm_cvtss_f32(m_sqrt)
+ }
+ }
+ #[cfg(not(target_feature = "sse"))]
+ {
+ const TINY: f32 = 1.0e-30;
- ix = x.to_bits() as i32;
+ let mut z: f32;
+ let sign: i32 = 0x80000000u32 as i32;
+ let mut ix: i32;
+ let mut s: i32;
+ let mut q: i32;
+ let mut m: i32;
+ let mut t: i32;
+ let mut i: i32;
+ let mut r: u32;
- /* take care of Inf and NaN */
- if (ix as u32 & 0x7f800000) == 0x7f800000 {
- return x * x + x; /* sqrt(NaN)=NaN, sqrt(+inf)=+inf, sqrt(-inf)=sNaN */
- }
+ ix = x.to_bits() as i32;
- /* take care of zero */
- if ix <= 0 {
- if (ix & !sign) == 0 {
- return x; /* sqrt(+-0) = +-0 */
+ /* take care of Inf and NaN */
+ if (ix as u32 & 0x7f800000) == 0x7f800000 {
+ return x * x + x; /* sqrt(NaN)=NaN, sqrt(+inf)=+inf, sqrt(-inf)=sNaN */
}
- if ix < 0 {
- return (x - x) / (x - x); /* sqrt(-ve) = sNaN */
+
+ /* take care of zero */
+ if ix <= 0 {
+ if (ix & !sign) == 0 {
+ return x; /* sqrt(+-0) = +-0 */
+ }
+ if ix < 0 {
+ return (x - x) / (x - x); /* sqrt(-ve) = sNaN */
+ }
}
- }
- /* normalize x */
- m = ix >> 23;
- if m == 0 {
- /* subnormal x */
- i = 0;
- while ix & 0x00800000 == 0 {
- ix <<= 1;
- i = i + 1;
+ /* normalize x */
+ m = ix >> 23;
+ if m == 0 {
+ /* subnormal x */
+ i = 0;
+ while ix & 0x00800000 == 0 {
+ ix <<= 1;
+ i = i + 1;
+ }
+ m -= i - 1;
}
- m -= i - 1;
- }
- m -= 127; /* unbias exponent */
- ix = (ix & 0x007fffff) | 0x00800000;
- if m & 1 == 1 {
- /* odd m, double x to make it even */
- ix += ix;
- }
- m >>= 1; /* m = [m/2] */
+ m -= 127; /* unbias exponent */
+ ix = (ix & 0x007fffff) | 0x00800000;
+ if m & 1 == 1 {
+ /* odd m, double x to make it even */
+ ix += ix;
+ }
+ m >>= 1; /* m = [m/2] */
- /* generate sqrt(x) bit by bit */
- ix += ix;
- q = 0;
- s = 0;
- r = 0x01000000; /* r = moving bit from right to left */
+ /* generate sqrt(x) bit by bit */
+ ix += ix;
+ q = 0;
+ s = 0;
+ r = 0x01000000; /* r = moving bit from right to left */
- while r != 0 {
- t = s + r as i32;
- if t <= ix {
- s = t + r as i32;
- ix -= t;
- q += r as i32;
+ while r != 0 {
+ t = s + r as i32;
+ if t <= ix {
+ s = t + r as i32;
+ ix -= t;
+ q += r as i32;
+ }
+ ix += ix;
+ r >>= 1;
}
- ix += ix;
- r >>= 1;
- }
- /* use floating add to find out rounding direction */
- if ix != 0 {
- z = 1.0 - TINY; /* raise inexact flag */
- if z >= 1.0 {
- z = 1.0 + TINY;
- if z > 1.0 {
- q += 2;
- } else {
- q += q & 1;
+ /* use floating add to find out rounding direction */
+ if ix != 0 {
+ z = 1.0 - TINY; /* raise inexact flag */
+ if z >= 1.0 {
+ z = 1.0 + TINY;
+ if z > 1.0 {
+ q += 2;
+ } else {
+ q += q & 1;
+ }
}
}
+
+ ix = (q >> 1) + 0x3f000000;
+ ix += m << 23;
+ f32::from_bits(ix as u32)
+ }
+}
+
+// PowerPC tests are failing on LLVM 13: https://github.com/rust-lang/rust/issues/88520
+#[cfg(not(target_arch = "powerpc64"))]
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use core::f32::*;
+
+ #[test]
+ fn sanity_check() {
+ assert_eq!(sqrtf(100.0), 10.0);
+ assert_eq!(sqrtf(4.0), 2.0);
}
- ix = (q >> 1) + 0x3f000000;
- ix += m << 23;
- f32::from_bits(ix as u32)
+ /// The spec: https://en.cppreference.com/w/cpp/numeric/math/sqrt
+ #[test]
+ fn spec_tests() {
+ // Not Asserted: FE_INVALID exception is raised if argument is negative.
+ assert!(sqrtf(-1.0).is_nan());
+ assert!(sqrtf(NAN).is_nan());
+ for f in [0.0, -0.0, INFINITY].iter().copied() {
+ assert_eq!(sqrtf(f), f);
+ }
+ }
}
diff --git a/vendor/libm/src/math/tan.rs b/vendor/libm/src/math/tan.rs
index e5c94cbb1..5a72f6801 100644
--- a/vendor/libm/src/math/tan.rs
+++ b/vendor/libm/src/math/tan.rs
@@ -39,7 +39,6 @@ use super::{k_tan, rem_pio2};
//
// Accuracy:
// TRIG(x) returns trig(x) nearly rounded
-#[inline]
#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
pub fn tan(x: f64) -> f64 {
let x1p120 = f32::from_bits(0x7b800000); // 0x1p120f === 2 ^ 120
diff --git a/vendor/libm/src/math/tanf.rs b/vendor/libm/src/math/tanf.rs
index c286cdeb4..10de59c39 100644
--- a/vendor/libm/src/math/tanf.rs
+++ b/vendor/libm/src/math/tanf.rs
@@ -24,7 +24,6 @@ const T2_PIO2: f64 = 2. * FRAC_PI_2; /* 0x400921FB, 0x54442D18 */
const T3_PIO2: f64 = 3. * FRAC_PI_2; /* 0x4012D97C, 0x7F3321D2 */
const T4_PIO2: f64 = 4. * FRAC_PI_2; /* 0x401921FB, 0x54442D18 */
-#[inline]
#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
pub fn tanf(x: f32) -> f32 {
let x64 = x as f64;
diff --git a/vendor/libm/src/math/tanh.rs b/vendor/libm/src/math/tanh.rs
index 75d695cf7..980c68554 100644
--- a/vendor/libm/src/math/tanh.rs
+++ b/vendor/libm/src/math/tanh.rs
@@ -4,7 +4,6 @@ use super::expm1;
* = (exp(2*x) - 1)/(exp(2*x) - 1 + 2)
* = (1 - exp(-2*x))/(exp(-2*x) - 1 + 2)
*/
-#[inline]
#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
pub fn tanh(mut x: f64) -> f64 {
let mut uf: f64 = x;
diff --git a/vendor/libm/src/math/tanhf.rs b/vendor/libm/src/math/tanhf.rs
index ac4657b5a..fc94e3ddd 100644
--- a/vendor/libm/src/math/tanhf.rs
+++ b/vendor/libm/src/math/tanhf.rs
@@ -1,6 +1,5 @@
use super::expm1f;
-#[inline]
#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
pub fn tanhf(mut x: f32) -> f32 {
/* x = |x| */
diff --git a/vendor/libm/src/math/tgamma.rs b/vendor/libm/src/math/tgamma.rs
index f8ccf669a..e64eff61f 100644
--- a/vendor/libm/src/math/tgamma.rs
+++ b/vendor/libm/src/math/tgamma.rs
@@ -38,7 +38,7 @@ fn sinpi(mut x: f64) -> f64 {
/* reduce x into [-.25,.25] */
n = (4.0 * x) as isize;
- n = (n + 1) / 2;
+ n = div!(n + 1, 2);
x -= (n as f64) * 0.5;
x *= PI;
@@ -118,18 +118,19 @@ fn s(x: f64) -> f64 {
/* to avoid overflow handle large x differently */
if x < 8.0 {
for i in (0..=N).rev() {
- num = num * x + SNUM[i];
- den = den * x + SDEN[i];
+ num = num * x + i!(SNUM, i);
+ den = den * x + i!(SDEN, i);
}
} else {
for i in 0..=N {
- num = num / x + SNUM[i];
- den = den / x + SDEN[i];
+ num = num / x + i!(SNUM, i);
+ den = den / x + i!(SDEN, i);
}
}
return num / den;
}
+#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
pub fn tgamma(mut x: f64) -> f64 {
let u: u64 = x.to_bits();
let absx: f64;
@@ -157,7 +158,7 @@ pub fn tgamma(mut x: f64) -> f64 {
return 0.0 / 0.0;
}
if x <= FACT.len() as f64 {
- return FACT[(x as usize) - 1];
+ return i!(FACT, (x as usize) - 1);
}
}
diff --git a/vendor/libm/src/math/tgammaf.rs b/vendor/libm/src/math/tgammaf.rs
index a8f161f0c..23e3814f9 100644
--- a/vendor/libm/src/math/tgammaf.rs
+++ b/vendor/libm/src/math/tgammaf.rs
@@ -1,5 +1,6 @@
use super::tgamma;
+#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
pub fn tgammaf(x: f32) -> f32 {
tgamma(x as f64) as f32
}
diff --git a/vendor/libm/src/math/trunc.rs b/vendor/libm/src/math/trunc.rs
index 1ee46fc7d..f7892a2c5 100644
--- a/vendor/libm/src/math/trunc.rs
+++ b/vendor/libm/src/math/trunc.rs
@@ -1,6 +1,5 @@
use core::f64;
-#[inline]
#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
pub fn trunc(x: f64) -> f64 {
// On wasm32 we know that LLVM's intrinsic will compile to an optimized
diff --git a/vendor/libm/src/math/truncf.rs b/vendor/libm/src/math/truncf.rs
index f93383269..20d5b73bd 100644
--- a/vendor/libm/src/math/truncf.rs
+++ b/vendor/libm/src/math/truncf.rs
@@ -1,6 +1,5 @@
use core::f32;
-#[inline]
#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
pub fn truncf(x: f32) -> f32 {
// On wasm32 we know that LLVM's intrinsic will compile to an optimized
@@ -32,6 +31,8 @@ pub fn truncf(x: f32) -> f32 {
f32::from_bits(i)
}
+// PowerPC tests are failing on LLVM 13: https://github.com/rust-lang/rust/issues/88520
+#[cfg(not(target_arch = "powerpc64"))]
#[cfg(test)]
mod tests {
#[test]