summaryrefslogtreecommitdiffstats
path: root/third_party/rust/ordered-float
diff options
context:
space:
mode:
Diffstat (limited to 'third_party/rust/ordered-float')
-rw-r--r--third_party/rust/ordered-float/.cargo-checksum.json1
-rw-r--r--third_party/rust/ordered-float/Cargo.toml35
-rw-r--r--third_party/rust/ordered-float/LICENSE-MIT25
-rw-r--r--third_party/rust/ordered-float/README.md29
-rw-r--r--third_party/rust/ordered-float/src/lib.rs758
-rw-r--r--third_party/rust/ordered-float/tests/test.rs532
-rw-r--r--third_party/rust/ordered-float/tests/test_deprecated_names.rs526
7 files changed, 1906 insertions, 0 deletions
diff --git a/third_party/rust/ordered-float/.cargo-checksum.json b/third_party/rust/ordered-float/.cargo-checksum.json
new file mode 100644
index 0000000000..00b28dcb63
--- /dev/null
+++ b/third_party/rust/ordered-float/.cargo-checksum.json
@@ -0,0 +1 @@
+{"files":{"Cargo.toml":"3c58ad023bf0f3648553123f643212ce8aaf812960649099f0839a44ab4cf332","LICENSE-MIT":"f7715d38a3fa1b4ac97c5729740752505a39cb92ee83ab5b102aeb5eaa7cdea4","README.md":"c4aee96bd98f215fe099f498385e4f31319bff37d2415998f8e6ac27bbea3cc5","src/lib.rs":"a0f3650fb55786bdde477a0120e0a5665cca109a8377edc0f0ada93199710dc8","tests/test.rs":"a9d946dc02c92826e4dc0ff31960a76ff9370fa45feae5a0de72c8d020b56c5b","tests/test_deprecated_names.rs":"6f661c27e8b4d625c02202895f220d573e3dccc8cf684c77e754c444403939f7"},"package":"2f0015e9e8e28ee20c581cfbfe47c650cedeb9ed0721090e0b7ebb10b9cdbcc2"} \ No newline at end of file
diff --git a/third_party/rust/ordered-float/Cargo.toml b/third_party/rust/ordered-float/Cargo.toml
new file mode 100644
index 0000000000..980a78894c
--- /dev/null
+++ b/third_party/rust/ordered-float/Cargo.toml
@@ -0,0 +1,35 @@
+# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
+#
+# When uploading crates to the registry Cargo will automatically
+# "normalize" Cargo.toml files for maximal compatibility
+# with all versions of Cargo and also rewrite `path` dependencies
+# to registry (e.g. crates.io) dependencies
+#
+# If you believe there's an error in this file please file an
+# issue against the rust-lang/cargo repository. If you're
+# editing this file be aware that the upstream Cargo.toml
+# will likely look very different (and much more reasonable)
+
+[package]
+name = "ordered-float"
+version = "1.0.1"
+authors = ["Jonathan Reem <jonathan.reem@gmail.com>", "Matt Brubeck <mbrubeck@limpet.net>"]
+description = "Wrappers for total ordering on floats"
+readme = "README.md"
+keywords = ["no_std", "ord", "f64", "f32", "sort"]
+categories = ["science", "rust-patterns", "no-std"]
+license = "MIT"
+repository = "https://github.com/reem/rust-ordered-float"
+[dependencies.num-traits]
+version = "0.2"
+
+[dependencies.serde]
+version = "1.0"
+optional = true
+default-features = false
+[dev-dependencies.serde_test]
+version = "1.0"
+
+[features]
+default = ["std"]
+std = []
diff --git a/third_party/rust/ordered-float/LICENSE-MIT b/third_party/rust/ordered-float/LICENSE-MIT
new file mode 100644
index 0000000000..c8e0f5ec7a
--- /dev/null
+++ b/third_party/rust/ordered-float/LICENSE-MIT
@@ -0,0 +1,25 @@
+Copyright (c) 2015 Jonathan Reem
+
+Permission is hereby granted, free of charge, to any
+person obtaining a copy of this software and associated
+documentation files (the "Software"), to deal in the
+Software without restriction, including without
+limitation the rights to use, copy, modify, merge,
+publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software
+is furnished to do so, subject to the following
+conditions:
+
+The above copyright notice and this permission notice
+shall be included in all copies or substantial portions
+of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
+ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
+TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
+PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
+IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+DEALINGS IN THE SOFTWARE.
diff --git a/third_party/rust/ordered-float/README.md b/third_party/rust/ordered-float/README.md
new file mode 100644
index 0000000000..b5e33021bf
--- /dev/null
+++ b/third_party/rust/ordered-float/README.md
@@ -0,0 +1,29 @@
+# Ordered Floats
+
+Provides several wrapper types for Ord and Eq implementations on f64.
+
+## Usage
+
+Use the crates.io repository; add this to your `Cargo.toml` along
+with the rest of your dependencies:
+
+```toml
+[dependencies]
+ordered-float = "1.0"
+```
+
+See the [API documentation](https://docs.rs/ordered-float) for further details.
+
+## no_std
+
+To use `ordered_float` without requiring the Rust standard library, disable
+the default `std` feature:
+
+```toml
+[dependencies]
+ordered-float = { version = "1.0", default-features = false }
+```
+
+## License
+
+MIT
diff --git a/third_party/rust/ordered-float/src/lib.rs b/third_party/rust/ordered-float/src/lib.rs
new file mode 100644
index 0000000000..38a849fcc7
--- /dev/null
+++ b/third_party/rust/ordered-float/src/lib.rs
@@ -0,0 +1,758 @@
+#![no_std]
+#![cfg_attr(test, deny(warnings))]
+#![deny(missing_docs)]
+
+//! Wrappers for total order on Floats.
+
+extern crate num_traits;
+#[cfg(feature = "std")] extern crate std;
+
+use core::cmp::Ordering;
+use core::ops::{Add, AddAssign, Deref, DerefMut, Div, DivAssign, Mul, MulAssign, Neg, Rem,
+ RemAssign, Sub, SubAssign};
+use core::hash::{Hash, Hasher};
+use core::fmt;
+use core::mem;
+use core::hint::unreachable_unchecked;
+use num_traits::{Bounded, Float, FromPrimitive, Num, NumCast, One, Signed, ToPrimitive,
+ Zero};
+
+/// A wrapper around Floats providing an implementation of Ord and Hash.
+///
+/// A NaN value cannot be stored in this type.
+#[deprecated(since = "0.6.0", note = "renamed to `NotNan`")]
+pub type NotNaN<T> = NotNan<T>;
+
+/// An error indicating an attempt to construct NotNan from a NaN
+#[deprecated(since = "0.6.0", note = "renamed to `FloatIsNan`")]
+pub type FloatIsNaN = FloatIsNan;
+
+// masks for the parts of the IEEE 754 float
+const SIGN_MASK: u64 = 0x8000000000000000u64;
+const EXP_MASK: u64 = 0x7ff0000000000000u64;
+const MAN_MASK: u64 = 0x000fffffffffffffu64;
+
+// canonical raw bit patterns (for hashing)
+const CANONICAL_NAN_BITS: u64 = 0x7ff8000000000000u64;
+const CANONICAL_ZERO_BITS: u64 = 0x0u64;
+
+/// A wrapper around Floats providing an implementation of Ord and Hash.
+///
+/// NaN is sorted as *greater* than all other values and *equal*
+/// to itself, in contradiction with the IEEE standard.
+#[derive(Debug, Default, Clone, Copy)]
+#[repr(transparent)]
+pub struct OrderedFloat<T: Float>(pub T);
+
+impl<T: Float> OrderedFloat<T> {
+ /// Get the value out.
+ pub fn into_inner(self) -> T {
+ let OrderedFloat(val) = self;
+ val
+ }
+}
+
+impl<T: Float> AsRef<T> for OrderedFloat<T> {
+ fn as_ref(&self) -> &T {
+ let OrderedFloat(ref val) = *self;
+ val
+ }
+}
+
+impl<T: Float> AsMut<T> for OrderedFloat<T> {
+ fn as_mut(&mut self) -> &mut T {
+ let OrderedFloat(ref mut val) = *self;
+ val
+ }
+}
+
+impl<T: Float> PartialOrd for OrderedFloat<T> {
+ fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
+ Some(self.cmp(other))
+ }
+}
+
+impl<T: Float> Ord for OrderedFloat<T> {
+ fn cmp(&self, other: &Self) -> Ordering {
+ let lhs = self.as_ref();
+ let rhs = other.as_ref();
+ match lhs.partial_cmp(&rhs) {
+ Some(ordering) => ordering,
+ None => {
+ if lhs.is_nan() {
+ if rhs.is_nan() {
+ Ordering::Equal
+ } else {
+ Ordering::Greater
+ }
+ } else {
+ Ordering::Less
+ }
+ }
+ }
+ }
+}
+
+impl<T: Float> PartialEq for OrderedFloat<T> {
+ fn eq(&self, other: &OrderedFloat<T>) -> bool {
+ if self.as_ref().is_nan() {
+ other.as_ref().is_nan()
+ } else if other.as_ref().is_nan() {
+ false
+ } else {
+ self.as_ref() == other.as_ref()
+ }
+ }
+}
+
+impl<T: Float> Hash for OrderedFloat<T> {
+ fn hash<H: Hasher>(&self, state: &mut H) {
+ if self.is_nan() {
+ // normalize to one representation of NaN
+ hash_float(&T::nan(), state)
+ } else {
+ hash_float(self.as_ref(), state)
+ }
+ }
+}
+
+impl<T: Float + fmt::Display> fmt::Display for OrderedFloat<T> {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ self.as_ref().fmt(f)
+ }
+}
+
+impl Into<f32> for OrderedFloat<f32> {
+ fn into(self) -> f32 {
+ self.into_inner()
+ }
+}
+
+impl Into<f64> for OrderedFloat<f64> {
+ fn into(self) -> f64 {
+ self.into_inner()
+ }
+}
+
+impl<T: Float> From<T> for OrderedFloat<T> {
+ fn from(val: T) -> Self {
+ OrderedFloat(val)
+ }
+}
+
+impl<T: Float> Deref for OrderedFloat<T> {
+ type Target = T;
+
+ fn deref(&self) -> &Self::Target {
+ self.as_ref()
+ }
+}
+
+impl<T: Float> DerefMut for OrderedFloat<T> {
+ fn deref_mut(&mut self) -> &mut Self::Target {
+ self.as_mut()
+ }
+}
+
+impl<T: Float> Eq for OrderedFloat<T> {}
+
+impl<T: Float> Bounded for OrderedFloat<T> {
+ fn min_value() -> Self {
+ OrderedFloat(T::min_value())
+ }
+
+ fn max_value() -> Self {
+ OrderedFloat(T::max_value())
+ }
+}
+
+/// A wrapper around Floats providing an implementation of Ord and Hash.
+///
+/// A NaN value cannot be stored in this type.
+#[derive(PartialOrd, PartialEq, Debug, Default, Clone, Copy)]
+#[repr(transparent)]
+pub struct NotNan<T: Float>(T);
+
+impl<T: Float> NotNan<T> {
+ /// Create a NotNan value.
+ ///
+ /// Returns Err if val is NaN
+ pub fn new(val: T) -> Result<Self, FloatIsNan> {
+ match val {
+ ref val if val.is_nan() => Err(FloatIsNan),
+ val => Ok(NotNan(val)),
+ }
+ }
+
+ /// Create a NotNan value from a value that is guaranteed to not be NaN
+ ///
+ /// Behaviour is undefined if `val` is NaN
+ pub unsafe fn unchecked_new(val: T) -> Self {
+ debug_assert!(!val.is_nan());
+ NotNan(val)
+ }
+
+ /// Get the value out.
+ pub fn into_inner(self) -> T {
+ let NotNan(val) = self;
+ val
+ }
+}
+
+impl<T: Float> AsRef<T> for NotNan<T> {
+ fn as_ref(&self) -> &T {
+ let NotNan(ref val) = *self;
+ val
+ }
+}
+
+impl<T: Float> Ord for NotNan<T> {
+ fn cmp(&self, other: &NotNan<T>) -> Ordering {
+ match self.partial_cmp(&other) {
+ Some(ord) => ord,
+ None => unsafe { unreachable_unchecked() },
+ }
+ }
+}
+
+impl<T: Float> Hash for NotNan<T> {
+ fn hash<H: Hasher>(&self, state: &mut H) {
+ hash_float(self.as_ref(), state)
+ }
+}
+
+impl<T: Float + fmt::Display> fmt::Display for NotNan<T> {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ self.as_ref().fmt(f)
+ }
+}
+
+impl Into<f32> for NotNan<f32> {
+ fn into(self) -> f32 {
+ self.into_inner()
+ }
+}
+
+impl Into<f64> for NotNan<f64> {
+ fn into(self) -> f64 {
+ self.into_inner()
+ }
+}
+
+/// Creates a NotNan value from a Float.
+///
+/// Panics if the provided value is NaN or the computation results in NaN
+impl<T: Float> From<T> for NotNan<T> {
+ fn from(v: T) -> Self {
+ assert!(!v.is_nan());
+ NotNan(v)
+ }
+}
+
+impl<T: Float> Deref for NotNan<T> {
+ type Target = T;
+
+ fn deref(&self) -> &Self::Target {
+ self.as_ref()
+ }
+}
+
+impl<T: Float + PartialEq> Eq for NotNan<T> {}
+
+/// Adds two NotNans.
+///
+/// Panics if the computation results in NaN
+impl<T: Float> Add for NotNan<T> {
+ type Output = Self;
+
+ fn add(self, other: Self) -> Self {
+ NotNan::new(self.0 + other.0).expect("Addition resulted in NaN")
+ }
+}
+
+/// Adds a float directly.
+///
+/// Panics if the provided value is NaN or the computation results in NaN
+impl<T: Float> Add<T> for NotNan<T> {
+ type Output = Self;
+
+ fn add(self, other: T) -> Self {
+ assert!(!other.is_nan());
+ NotNan::new(self.0 + other).expect("Addition resulted in NaN")
+ }
+}
+
+impl AddAssign for NotNan<f64> {
+ fn add_assign(&mut self, other: Self) {
+ self.0 += other.0;
+ assert!(!self.0.is_nan(), "Addition resulted in NaN")
+ }
+}
+
+impl AddAssign for NotNan<f32> {
+ fn add_assign(&mut self, other: Self) {
+ self.0 += other.0;
+ assert!(!self.0.is_nan(), "Addition resulted in NaN")
+ }
+}
+
+/// Adds a float directly.
+///
+/// Panics if the provided value is NaN or the computation results in NaN
+impl AddAssign<f64> for NotNan<f64> {
+ fn add_assign(&mut self, other: f64) {
+ assert!(!other.is_nan());
+ self.0 += other;
+ assert!(!self.0.is_nan(), "Addition resulted in NaN")
+ }
+}
+
+/// Adds a float directly.
+///
+/// Panics if the provided value is NaN.
+impl AddAssign<f32> for NotNan<f32> {
+ fn add_assign(&mut self, other: f32) {
+ assert!(!other.is_nan());
+ self.0 += other;
+ assert!(!self.0.is_nan(), "Addition resulted in NaN")
+ }
+}
+
+impl<T: Float> Sub for NotNan<T> {
+ type Output = Self;
+
+ fn sub(self, other: Self) -> Self {
+ NotNan::new(self.0 - other.0).expect("Subtraction resulted in NaN")
+ }
+}
+
+/// Subtracts a float directly.
+///
+/// Panics if the provided value is NaN or the computation results in NaN
+impl<T: Float> Sub<T> for NotNan<T> {
+ type Output = Self;
+
+ fn sub(self, other: T) -> Self {
+ assert!(!other.is_nan());
+ NotNan::new(self.0 - other).expect("Subtraction resulted in NaN")
+ }
+}
+
+impl SubAssign for NotNan<f64> {
+ fn sub_assign(&mut self, other: Self) {
+ self.0 -= other.0;
+ assert!(!self.0.is_nan(), "Subtraction resulted in NaN")
+ }
+}
+
+impl SubAssign for NotNan<f32> {
+ fn sub_assign(&mut self, other: Self) {
+ self.0 -= other.0;
+ assert!(!self.0.is_nan(), "Subtraction resulted in NaN")
+ }
+}
+
+/// Subtracts a float directly.
+///
+/// Panics if the provided value is NaN or the computation results in NaN
+impl SubAssign<f64> for NotNan<f64> {
+ fn sub_assign(&mut self, other: f64) {
+ assert!(!other.is_nan());
+ self.0 -= other;
+ assert!(!self.0.is_nan(), "Subtraction resulted in NaN")
+ }
+}
+
+/// Subtracts a float directly.
+///
+/// Panics if the provided value is NaN or the computation results in NaN
+impl SubAssign<f32> for NotNan<f32> {
+ fn sub_assign(&mut self, other: f32) {
+ assert!(!other.is_nan());
+ self.0 -= other;
+ assert!(!self.0.is_nan(), "Subtraction resulted in NaN")
+ }
+}
+
+impl<T: Float> Mul for NotNan<T> {
+ type Output = Self;
+
+ fn mul(self, other: Self) -> Self {
+ NotNan::new(self.0 * other.0).expect("Multiplication resulted in NaN")
+ }
+}
+
+/// Multiplies a float directly.
+///
+/// Panics if the provided value is NaN or the computation results in NaN
+impl<T: Float> Mul<T> for NotNan<T> {
+ type Output = Self;
+
+ fn mul(self, other: T) -> Self {
+ assert!(!other.is_nan());
+ NotNan::new(self.0 * other).expect("Multiplication resulted in NaN")
+ }
+}
+
+impl MulAssign for NotNan<f64> {
+ fn mul_assign(&mut self, other: Self) {
+ self.0 *= other.0;
+ assert!(!self.0.is_nan(), "Multiplication resulted in NaN")
+ }
+}
+
+impl MulAssign for NotNan<f32> {
+ fn mul_assign(&mut self, other: Self) {
+ self.0 *= other.0;
+ assert!(!self.0.is_nan(), "Multiplication resulted in NaN")
+ }
+}
+
+/// Multiplies a float directly.
+///
+/// Panics if the provided value is NaN.
+impl MulAssign<f64> for NotNan<f64> {
+ fn mul_assign(&mut self, other: f64) {
+ assert!(!other.is_nan());
+ self.0 *= other;
+ }
+}
+
+/// Multiplies a float directly.
+///
+/// Panics if the provided value is NaN or the computation results in NaN
+impl MulAssign<f32> for NotNan<f32> {
+ fn mul_assign(&mut self, other: f32) {
+ assert!(!other.is_nan());
+ self.0 *= other;
+ assert!(!self.0.is_nan(), "Multiplication resulted in NaN")
+ }
+}
+
+impl<T: Float> Div for NotNan<T> {
+ type Output = Self;
+
+ fn div(self, other: Self) -> Self {
+ NotNan::new(self.0 / other.0).expect("Division resulted in NaN")
+ }
+}
+
+/// Divides a float directly.
+///
+/// Panics if the provided value is NaN or the computation results in NaN
+impl<T: Float> Div<T> for NotNan<T> {
+ type Output = Self;
+
+ fn div(self, other: T) -> Self {
+ assert!(!other.is_nan());
+ NotNan::new(self.0 / other).expect("Division resulted in NaN")
+ }
+}
+
+impl DivAssign for NotNan<f64> {
+ fn div_assign(&mut self, other: Self) {
+ self.0 /= other.0;
+ assert!(!self.0.is_nan(), "Division resulted in NaN")
+ }
+}
+
+impl DivAssign for NotNan<f32> {
+ fn div_assign(&mut self, other: Self) {
+ self.0 /= other.0;
+ assert!(!self.0.is_nan(), "Division resulted in NaN")
+ }
+}
+
+/// Divides a float directly.
+///
+/// Panics if the provided value is NaN or the computation results in NaN
+impl DivAssign<f64> for NotNan<f64> {
+ fn div_assign(&mut self, other: f64) {
+ assert!(!other.is_nan());
+ self.0 /= other;
+ assert!(!self.0.is_nan(), "Division resulted in NaN")
+ }
+}
+
+/// Divides a float directly.
+///
+/// Panics if the provided value is NaN or the computation results in NaN
+impl DivAssign<f32> for NotNan<f32> {
+ fn div_assign(&mut self, other: f32) {
+ assert!(!other.is_nan());
+ self.0 /= other;
+ assert!(!self.0.is_nan(), "Division resulted in NaN")
+ }
+}
+
+impl<T: Float> Rem for NotNan<T> {
+ type Output = Self;
+
+ fn rem(self, other: Self) -> Self {
+ NotNan::new(self.0 % other.0).expect("Rem resulted in NaN")
+ }
+}
+
+/// Calculates `%` with a float directly.
+///
+/// Panics if the provided value is NaN or the computation results in NaN
+impl<T: Float> Rem<T> for NotNan<T> {
+ type Output = Self;
+
+ fn rem(self, other: T) -> Self {
+ assert!(!other.is_nan());
+ NotNan::new(self.0 % other).expect("Rem resulted in NaN")
+ }
+}
+
+impl RemAssign for NotNan<f64> {
+ fn rem_assign(&mut self, other: Self) {
+ self.0 %= other.0;
+ assert!(!self.0.is_nan(), "Rem resulted in NaN")
+ }
+}
+
+impl RemAssign for NotNan<f32> {
+ fn rem_assign(&mut self, other: Self) {
+ self.0 %= other.0;
+ assert!(!self.0.is_nan(), "Rem resulted in NaN")
+ }
+}
+
+/// Calculates `%=` with a float directly.
+///
+/// Panics if the provided value is NaN or the computation results in NaN
+impl RemAssign<f64> for NotNan<f64> {
+ fn rem_assign(&mut self, other: f64) {
+ assert!(!other.is_nan());
+ self.0 %= other;
+ assert!(!self.0.is_nan(), "Rem resulted in NaN")
+ }
+}
+
+/// Calculates `%=` with a float directly.
+///
+/// Panics if the provided value is NaN or the computation results in NaN
+impl RemAssign<f32> for NotNan<f32> {
+ fn rem_assign(&mut self, other: f32) {
+ assert!(!other.is_nan());
+ self.0 %= other;
+ assert!(!self.0.is_nan(), "Rem resulted in NaN")
+ }
+}
+
+impl<T: Float> Neg for NotNan<T> {
+ type Output = Self;
+
+ fn neg(self) -> Self {
+ NotNan::new(-self.0).expect("Negation resulted in NaN")
+ }
+}
+
+/// An error indicating an attempt to construct NotNan from a NaN
+#[derive(Copy, Clone, PartialEq, Eq, Debug)]
+pub struct FloatIsNan;
+
+#[cfg(feature = "std")]
+impl std::error::Error for FloatIsNan {
+ fn description(&self) -> &str {
+ "NotNan constructed with NaN"
+ }
+}
+
+impl fmt::Display for FloatIsNan {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ write!(f, "NotNan constructed with NaN")
+ }
+}
+
+#[cfg(feature = "std")]
+impl Into<std::io::Error> for FloatIsNan {
+ fn into(self) -> std::io::Error {
+ std::io::Error::new(std::io::ErrorKind::InvalidInput, self)
+ }
+}
+
+#[inline]
+fn hash_float<F: Float, H: Hasher>(f: &F, state: &mut H) {
+ raw_double_bits(f).hash(state);
+}
+
+#[inline]
+fn raw_double_bits<F: Float>(f: &F) -> u64 {
+ if f.is_nan() {
+ return CANONICAL_NAN_BITS;
+ }
+
+ let (man, exp, sign) = f.integer_decode();
+ if man == 0 {
+ return CANONICAL_ZERO_BITS;
+ }
+
+ let exp_u64 = unsafe { mem::transmute::<i16, u16>(exp) } as u64;
+ let sign_u64 = if sign > 0 { 1u64 } else { 0u64 };
+ (man & MAN_MASK) | ((exp_u64 << 52) & EXP_MASK) | ((sign_u64 << 63) & SIGN_MASK)
+}
+
+impl<T: Float> Zero for NotNan<T> {
+ fn zero() -> Self { NotNan(T::zero()) }
+
+ fn is_zero(&self) -> bool { self.0.is_zero() }
+}
+
+impl<T: Float> One for NotNan<T> {
+ fn one() -> Self { NotNan(T::one()) }
+}
+
+impl<T: Float> Bounded for NotNan<T> {
+ fn min_value() -> Self {
+ NotNan(T::min_value())
+ }
+
+ fn max_value() -> Self {
+ NotNan(T::max_value())
+ }
+}
+
+impl<T: Float + FromPrimitive> FromPrimitive for NotNan<T> {
+ fn from_i64(n: i64) -> Option<Self> { T::from_i64(n).and_then(|n| NotNan::new(n).ok()) }
+ fn from_u64(n: u64) -> Option<Self> { T::from_u64(n).and_then(|n| NotNan::new(n).ok()) }
+
+ fn from_isize(n: isize) -> Option<Self> { T::from_isize(n).and_then(|n| NotNan::new(n).ok()) }
+ fn from_i8(n: i8) -> Option<Self> { T::from_i8(n).and_then(|n| NotNan::new(n).ok()) }
+ fn from_i16(n: i16) -> Option<Self> { T::from_i16(n).and_then(|n| NotNan::new(n).ok()) }
+ fn from_i32(n: i32) -> Option<Self> { T::from_i32(n).and_then(|n| NotNan::new(n).ok()) }
+ fn from_usize(n: usize) -> Option<Self> { T::from_usize(n).and_then(|n| NotNan::new(n).ok()) }
+ fn from_u8(n: u8) -> Option<Self> { T::from_u8(n).and_then(|n| NotNan::new(n).ok()) }
+ fn from_u16(n: u16) -> Option<Self> { T::from_u16(n).and_then(|n| NotNan::new(n).ok()) }
+ fn from_u32(n: u32) -> Option<Self> { T::from_u32(n).and_then(|n| NotNan::new(n).ok()) }
+ fn from_f32(n: f32) -> Option<Self> { T::from_f32(n).and_then(|n| NotNan::new(n).ok()) }
+ fn from_f64(n: f64) -> Option<Self> { T::from_f64(n).and_then(|n| NotNan::new(n).ok()) }
+}
+
+impl<T: Float> ToPrimitive for NotNan<T> {
+ fn to_i64(&self) -> Option<i64> { self.0.to_i64() }
+ fn to_u64(&self) -> Option<u64> { self.0.to_u64() }
+
+ fn to_isize(&self) -> Option<isize> { self.0.to_isize() }
+ fn to_i8(&self) -> Option<i8> { self.0.to_i8() }
+ fn to_i16(&self) -> Option<i16> { self.0.to_i16() }
+ fn to_i32(&self) -> Option<i32> { self.0.to_i32() }
+ fn to_usize(&self) -> Option<usize> { self.0.to_usize() }
+ fn to_u8(&self) -> Option<u8> { self.0.to_u8() }
+ fn to_u16(&self) -> Option<u16> { self.0.to_u16() }
+ fn to_u32(&self) -> Option<u32> { self.0.to_u32() }
+ fn to_f32(&self) -> Option<f32> { self.0.to_f32() }
+ fn to_f64(&self) -> Option<f64> { self.0.to_f64() }
+}
+
+/// An error indicating a parse error from a string for `NotNan`.
+#[derive(Copy, Clone, PartialEq, Eq, Debug)]
+pub enum ParseNotNanError<E> {
+ /// A plain parse error from the underlying float type.
+ ParseFloatError(E),
+ /// The parsed float value resulted in a NaN.
+ IsNaN,
+}
+
+#[cfg(feature = "std")]
+impl<E: fmt::Debug> std::error::Error for ParseNotNanError<E> {
+ fn description(&self) -> &str {
+ return "Error parsing a not-NaN floating point value";
+ }
+}
+
+impl<E: fmt::Debug> fmt::Display for ParseNotNanError<E> {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ <Self as fmt::Debug>::fmt(self, f)
+ }
+}
+
+impl<T: Float> Num for NotNan<T> {
+ type FromStrRadixErr = ParseNotNanError<T::FromStrRadixErr>;
+
+ fn from_str_radix(src: &str, radix: u32) -> Result<Self, Self::FromStrRadixErr> {
+ T::from_str_radix(src, radix)
+ .map_err(|err| ParseNotNanError::ParseFloatError(err))
+ .and_then(|n| NotNan::new(n).map_err(|_| ParseNotNanError::IsNaN))
+ }
+}
+
+impl<T: Float + Signed> Signed for NotNan<T> {
+ fn abs(&self) -> Self { NotNan(self.0.abs()) }
+
+ fn abs_sub(&self, other: &Self) -> Self {
+ NotNan::new(self.0.abs_sub(other.0)).expect("Subtraction resulted in NaN")
+ }
+
+ fn signum(&self) -> Self { NotNan(self.0.signum()) }
+ fn is_positive(&self) -> bool { self.0.is_positive() }
+ fn is_negative(&self) -> bool { self.0.is_negative() }
+}
+
+impl<T: Float> NumCast for NotNan<T> {
+ fn from<F: ToPrimitive>(n: F) -> Option<Self> {
+ T::from(n).and_then(|n| NotNan::new(n).ok())
+ }
+}
+
+#[cfg(feature = "serde")]
+mod impl_serde {
+ extern crate serde;
+ use self::serde::{Serialize, Serializer, Deserialize, Deserializer};
+ use self::serde::de::{Error, Unexpected};
+ use super::{OrderedFloat, NotNan};
+ use num_traits::Float;
+ use core::f64;
+
+ #[cfg(test)]
+ extern crate serde_test;
+ #[cfg(test)]
+ use self::serde_test::{Token, assert_tokens, assert_de_tokens_error};
+
+ impl<T: Float + Serialize> Serialize for OrderedFloat<T> {
+ fn serialize<S: Serializer>(&self, s: S) -> Result<S::Ok, S::Error> {
+ self.0.serialize(s)
+ }
+ }
+
+ impl<'de, T: Float + Deserialize<'de>> Deserialize<'de> for OrderedFloat<T> {
+ fn deserialize<D: Deserializer<'de>>(d: D) -> Result<Self, D::Error> {
+ T::deserialize(d).map(OrderedFloat)
+ }
+ }
+
+ impl<T: Float + Serialize> Serialize for NotNan<T> {
+ fn serialize<S: Serializer>(&self, s: S) -> Result<S::Ok, S::Error> {
+ self.0.serialize(s)
+ }
+ }
+
+ impl<'de, T: Float + Deserialize<'de>> Deserialize<'de> for NotNan<T> {
+ fn deserialize<D: Deserializer<'de>>(d: D) -> Result<Self, D::Error> {
+ let float = T::deserialize(d)?;
+ NotNan::new(float).map_err(|_| {
+ Error::invalid_value(Unexpected::Float(f64::NAN), &"float (but not NaN)")
+ })
+ }
+ }
+
+ #[test]
+ fn test_ordered_float() {
+ let float = OrderedFloat(1.0f64);
+ assert_tokens(&float, &[Token::F64(1.0)]);
+ }
+
+ #[test]
+ fn test_not_nan() {
+ let float = NotNan(1.0f64);
+ assert_tokens(&float, &[Token::F64(1.0)]);
+ }
+
+ #[test]
+ fn test_fail_on_nan() {
+ assert_de_tokens_error::<NotNan<f64>>(
+ &[Token::F64(f64::NAN)],
+ "invalid value: floating point `NaN`, expected float (but not NaN)");
+ }
+}
diff --git a/third_party/rust/ordered-float/tests/test.rs b/third_party/rust/ordered-float/tests/test.rs
new file mode 100644
index 0000000000..195d76af5e
--- /dev/null
+++ b/third_party/rust/ordered-float/tests/test.rs
@@ -0,0 +1,532 @@
+extern crate num_traits;
+extern crate ordered_float;
+
+pub use ordered_float::*;
+pub use num_traits::{Bounded, Float, FromPrimitive, Num, One, Signed, ToPrimitive, Zero};
+pub use std::cmp::Ordering::*;
+pub use std::{f32, f64, panic};
+
+pub use std::collections::hash_map::RandomState;
+pub use std::collections::HashSet;
+pub use std::hash::*;
+
+#[test]
+fn ordered_f32_compare_regular_floats() {
+ assert_eq!(OrderedFloat(7.0f32).cmp(&OrderedFloat(7.0)), Equal);
+ assert_eq!(OrderedFloat(8.0f32).cmp(&OrderedFloat(7.0)), Greater);
+ assert_eq!(OrderedFloat(4.0f32).cmp(&OrderedFloat(7.0)), Less);
+}
+
+#[test]
+fn ordered_f32_compare_regular_floats_op() {
+ assert!(OrderedFloat(7.0f32) == OrderedFloat(7.0));
+ assert!(OrderedFloat(7.0f32) <= OrderedFloat(7.0));
+ assert!(OrderedFloat(7.0f32) >= OrderedFloat(7.0));
+ assert!(OrderedFloat(8.0f32) > OrderedFloat(7.0));
+ assert!(OrderedFloat(8.0f32) >= OrderedFloat(7.0));
+ assert!(OrderedFloat(4.0f32) < OrderedFloat(7.0));
+ assert!(OrderedFloat(4.0f32) <= OrderedFloat(7.0));
+}
+
+#[test]
+fn ordered_f32_compare_nan() {
+ let f32_nan: f32 = Float::nan();
+ assert_eq!(OrderedFloat(f32_nan).cmp(&OrderedFloat(Float::nan())), Equal);
+ assert_eq!(OrderedFloat(f32_nan).cmp(&OrderedFloat(-100000.0f32)), Greater);
+ assert_eq!(OrderedFloat(-100.0f32).cmp(&OrderedFloat(Float::nan())), Less);
+}
+
+#[test]
+fn ordered_f32_compare_nan_op() {
+ let f32_nan: OrderedFloat<f32> = OrderedFloat(Float::nan());
+ assert!(f32_nan == f32_nan);
+ assert!(f32_nan <= f32_nan);
+ assert!(f32_nan >= f32_nan);
+ assert!(f32_nan > OrderedFloat(-100000.0f32));
+ assert!(f32_nan >= OrderedFloat(-100000.0f32));
+ assert!(OrderedFloat(-100.0f32) < f32_nan);
+ assert!(OrderedFloat(-100.0f32) <= f32_nan);
+ assert!(f32_nan > OrderedFloat(Float::infinity()));
+ assert!(f32_nan >= OrderedFloat(Float::infinity()));
+ assert!(f32_nan > OrderedFloat(Float::neg_infinity()));
+ assert!(f32_nan >= OrderedFloat(Float::neg_infinity()));
+}
+
+#[test]
+fn ordered_f64_compare_regular_floats() {
+ assert_eq!(OrderedFloat(7.0f64).cmp(&OrderedFloat(7.0)), Equal);
+ assert_eq!(OrderedFloat(8.0f64).cmp(&OrderedFloat(7.0)), Greater);
+ assert_eq!(OrderedFloat(4.0f64).cmp(&OrderedFloat(7.0)), Less);
+}
+
+#[test]
+fn not_nan32_zero() {
+ assert_eq!(NotNan::<f32>::zero(), NotNan::from(0.0f32));
+ assert!(NotNan::<f32>::zero().is_zero());
+}
+
+#[test]
+fn not_nan32_one() {
+ assert_eq!(NotNan::<f32>::one(), NotNan::from(1.0f32))
+}
+
+#[test]
+fn not_nan32_bounded() {
+ assert_eq!(NotNan::<f32>::min_value(), NotNan::from(<f32 as Bounded>::min_value()));
+ assert_eq!(NotNan::<f32>::max_value(), NotNan::from(<f32 as Bounded>::max_value()));
+}
+
+#[test]
+fn not_nan32_from_primitive() {
+ assert_eq!(NotNan::<f32>::from_i8(42i8), Some(NotNan::from(42.0)));
+ assert_eq!(NotNan::<f32>::from_u8(42u8), Some(NotNan::from(42.0)));
+ assert_eq!(NotNan::<f32>::from_i16(42i16), Some(NotNan::from(42.0)));
+ assert_eq!(NotNan::<f32>::from_u16(42u16), Some(NotNan::from(42.0)));
+ assert_eq!(NotNan::<f32>::from_i32(42i32), Some(NotNan::from(42.0)));
+ assert_eq!(NotNan::<f32>::from_u32(42u32), Some(NotNan::from(42.0)));
+ assert_eq!(NotNan::<f32>::from_i64(42i64), Some(NotNan::from(42.0)));
+ assert_eq!(NotNan::<f32>::from_u64(42u64), Some(NotNan::from(42.0)));
+ assert_eq!(NotNan::<f32>::from_isize(42isize), Some(NotNan::from(42.0)));
+ assert_eq!(NotNan::<f32>::from_usize(42usize), Some(NotNan::from(42.0)));
+ assert_eq!(NotNan::<f32>::from_f32(42f32), Some(NotNan::from(42.0)));
+ assert_eq!(NotNan::<f32>::from_f32(42f32), Some(NotNan::from(42.0)));
+ assert_eq!(NotNan::<f32>::from_f64(42f64), Some(NotNan::from(42.0)));
+ assert_eq!(NotNan::<f32>::from_f64(42f64), Some(NotNan::from(42.0)));
+ assert_eq!(NotNan::<f32>::from_f32(Float::nan()), None);
+ assert_eq!(NotNan::<f32>::from_f64(Float::nan()), None);
+}
+
+#[test]
+fn not_nan32_to_primitive() {
+ let x = NotNan::from(42.0f32);
+ assert_eq!(x.to_u8(), Some(42u8));
+ assert_eq!(x.to_i8(), Some(42i8));
+ assert_eq!(x.to_u16(), Some(42u16));
+ assert_eq!(x.to_i16(), Some(42i16));
+ assert_eq!(x.to_u32(), Some(42u32));
+ assert_eq!(x.to_i32(), Some(42i32));
+ assert_eq!(x.to_u64(), Some(42u64));
+ assert_eq!(x.to_i64(), Some(42i64));
+ assert_eq!(x.to_usize(), Some(42usize));
+ assert_eq!(x.to_isize(), Some(42isize));
+ assert_eq!(x.to_f32(), Some(42f32));
+ assert_eq!(x.to_f32(), Some(42f32));
+ assert_eq!(x.to_f64(), Some(42f64));
+ assert_eq!(x.to_f64(), Some(42f64));
+}
+
+#[test]
+fn not_nan32_num() {
+ assert_eq!(NotNan::<f32>::from_str_radix("42.0", 10).unwrap(), NotNan::from(42.0f32));
+ assert!(NotNan::<f32>::from_str_radix("NaN", 10).is_err());
+}
+
+#[test]
+fn not_nan32_signed() {
+ assert_eq!(NotNan::from(42f32).abs(), NotNan::from(42f32));
+ assert_eq!(NotNan::from(-42f32).abs(), NotNan::from(42f32));
+
+ assert_eq!(NotNan::from(50f32).abs_sub(&NotNan::from(8f32)), NotNan::from(42f32));
+ assert_eq!(NotNan::from(8f32).abs_sub(&NotNan::from(50f32)), NotNan::from(0f32));
+}
+
+#[test]
+fn not_nan32_num_cast() {
+ assert_eq!(<NotNan<f32> as num_traits::NumCast>::from(42), Some(NotNan::from(42f32)));
+ assert_eq!(<NotNan<f32> as num_traits::NumCast>::from(f32::nan()), None);
+}
+
+#[test]
+fn ordered_f64_compare_nan() {
+ let f64_nan: f64 = Float::nan();
+ assert_eq!(
+ OrderedFloat(f64_nan).cmp(&OrderedFloat(Float::nan())),
+ Equal
+ );
+ assert_eq!(
+ OrderedFloat(f64_nan).cmp(&OrderedFloat(-100000.0f64)),
+ Greater
+ );
+ assert_eq!(
+ OrderedFloat(-100.0f64).cmp(&OrderedFloat(Float::nan())),
+ Less
+ );
+}
+
+#[test]
+fn ordered_f64_compare_regular_floats_op() {
+ assert!(OrderedFloat(7.0) == OrderedFloat(7.0));
+ assert!(OrderedFloat(7.0) <= OrderedFloat(7.0));
+ assert!(OrderedFloat(7.0) >= OrderedFloat(7.0));
+ assert!(OrderedFloat(8.0) > OrderedFloat(7.0));
+ assert!(OrderedFloat(8.0) >= OrderedFloat(7.0));
+ assert!(OrderedFloat(4.0) < OrderedFloat(7.0));
+ assert!(OrderedFloat(4.0) <= OrderedFloat(7.0));
+}
+
+#[test]
+fn ordered_f64_compare_nan_op() {
+ let f64_nan: OrderedFloat<f64> = OrderedFloat(Float::nan());
+ assert!(f64_nan == f64_nan);
+ assert!(f64_nan <= f64_nan);
+ assert!(f64_nan >= f64_nan);
+ assert!(f64_nan > OrderedFloat(-100000.0));
+ assert!(f64_nan >= OrderedFloat(-100000.0));
+ assert!(OrderedFloat(-100.0) < f64_nan);
+ assert!(OrderedFloat(-100.0) <= f64_nan);
+ assert!(f64_nan > OrderedFloat(Float::infinity()));
+ assert!(f64_nan >= OrderedFloat(Float::infinity()));
+ assert!(f64_nan > OrderedFloat(Float::neg_infinity()));
+ assert!(f64_nan >= OrderedFloat(Float::neg_infinity()));
+}
+
+#[test]
+fn not_nan32_compare_regular_floats() {
+ assert_eq!(NotNan::from(7.0f32).cmp(&NotNan::from(7.0)), Equal);
+ assert_eq!(NotNan::from(8.0f32).cmp(&NotNan::from(7.0)), Greater);
+ assert_eq!(NotNan::from(4.0f32).cmp(&NotNan::from(7.0)), Less);
+}
+
+#[test]
+fn not_nan32_fail_when_constructing_with_nan() {
+ let f32_nan: f32 = Float::nan();
+ assert!(NotNan::new(f32_nan).is_err());
+}
+
+#[test]
+fn not_nan32_calculate_correctly() {
+ assert_eq!(
+ *(NotNan::from(5.0f32) + NotNan::from(4.0f32)),
+ 5.0f32 + 4.0f32
+ );
+ assert_eq!(*(NotNan::from(5.0f32) + 4.0f32), 5.0f32 + 4.0f32);
+ assert_eq!(
+ *(NotNan::from(5.0f32) - NotNan::from(4.0f32)),
+ 5.0f32 - 4.0f32
+ );
+ assert_eq!(*(NotNan::from(5.0f32) - 4.0f32), 5.0f32 - 4.0f32);
+ assert_eq!(
+ *(NotNan::from(5.0f32) * NotNan::from(4.0f32)),
+ 5.0f32 * 4.0f32
+ );
+ assert_eq!(*(NotNan::from(5.0f32) * 4.0f32), 5.0f32 * 4.0f32);
+ assert_eq!(
+ *(NotNan::from(8.0f32) / NotNan::from(4.0f32)),
+ 8.0f32 / 4.0f32
+ );
+ assert_eq!(*(NotNan::from(8.0f32) / 4.0f32), 8.0f32 / 4.0f32);
+ assert_eq!(
+ *(NotNan::from(8.0f32) % NotNan::from(4.0f32)),
+ 8.0f32 % 4.0f32
+ );
+ assert_eq!(*(NotNan::from(8.0f32) % 4.0f32), 8.0f32 % 4.0f32);
+ assert_eq!(*(-NotNan::from(1.0f32)), -1.0f32);
+
+ assert!(panic::catch_unwind(|| NotNan::from(0.0f32) + f32::NAN).is_err());
+ assert!(panic::catch_unwind(|| NotNan::from(0.0f32) - f32::NAN).is_err());
+ assert!(panic::catch_unwind(|| NotNan::from(0.0f32) * f32::NAN).is_err());
+ assert!(panic::catch_unwind(|| NotNan::from(0.0f32) / f32::NAN).is_err());
+ assert!(panic::catch_unwind(|| NotNan::from(0.0f32) % f32::NAN).is_err());
+
+ let mut number = NotNan::from(5.0f32);
+ number += NotNan::from(4.0f32);
+ assert_eq!(*number, 9.0f32);
+ number -= NotNan::from(4.0f32);
+ assert_eq!(*number, 5.0f32);
+ number *= NotNan::from(4.0f32);
+ assert_eq!(*number, 20.0f32);
+ number /= NotNan::from(4.0f32);
+ assert_eq!(*number, 5.0f32);
+ number %= NotNan::from(4.0f32);
+ assert_eq!(*number, 1.0f32);
+
+ number = NotNan::from(5.0f32);
+ number += 4.0f32;
+ assert_eq!(*number, 9.0f32);
+ number -= 4.0f32;
+ assert_eq!(*number, 5.0f32);
+ number *= 4.0f32;
+ assert_eq!(*number, 20.0f32);
+ number /= 4.0f32;
+ assert_eq!(*number, 5.0f32);
+ number %= 4.0f32;
+ assert_eq!(*number, 1.0f32);
+
+ assert!(
+ panic::catch_unwind(|| {
+ let mut tmp = NotNan::from(0.0f32);
+ tmp += f32::NAN;
+ }).is_err()
+ );
+ assert!(
+ panic::catch_unwind(|| {
+ let mut tmp = NotNan::from(0.0f32);
+ tmp -= f32::NAN;
+ }).is_err()
+ );
+ assert!(
+ panic::catch_unwind(|| {
+ let mut tmp = NotNan::from(0.0f32);
+ tmp *= f32::NAN;
+ }).is_err()
+ );
+ assert!(
+ panic::catch_unwind(|| {
+ let mut tmp = NotNan::from(0.0f32);
+ tmp /= f32::NAN;
+ }).is_err()
+ );
+ assert!(
+ panic::catch_unwind(|| {
+ let mut tmp = NotNan::from(0.0f32);
+ tmp %= f32::NAN;
+ }).is_err()
+ );
+}
+
+#[test]
+fn not_nan64_compare_regular_floats() {
+ assert_eq!(NotNan::from(7.0f64).cmp(&NotNan::from(7.0)), Equal);
+ assert_eq!(NotNan::from(8.0f64).cmp(&NotNan::from(7.0)), Greater);
+ assert_eq!(NotNan::from(4.0f64).cmp(&NotNan::from(7.0)), Less);
+}
+
+#[test]
+fn not_nan64_fail_when_constructing_with_nan() {
+ let f64_nan: f64 = Float::nan();
+ assert!(NotNan::new(f64_nan).is_err());
+}
+
+#[test]
+fn not_nan64_calculate_correctly() {
+ assert_eq!(
+ *(NotNan::from(5.0f64) + NotNan::from(4.0f64)),
+ 5.0f64 + 4.0f64
+ );
+ assert_eq!(*(NotNan::from(5.0f64) + 4.0f64), 5.0f64 + 4.0f64);
+ assert_eq!(
+ *(NotNan::from(5.0f64) - NotNan::from(4.0f64)),
+ 5.0f64 - 4.0f64
+ );
+ assert_eq!(*(NotNan::from(5.0f64) - 4.0f64), 5.0f64 - 4.0f64);
+ assert_eq!(
+ *(NotNan::from(5.0f64) * NotNan::from(4.0f64)),
+ 5.0f64 * 4.0f64
+ );
+ assert_eq!(*(NotNan::from(5.0f64) * 4.0f64), 5.0f64 * 4.0f64);
+ assert_eq!(
+ *(NotNan::from(8.0f64) / NotNan::from(4.0f64)),
+ 8.0f64 / 4.0f64
+ );
+ assert_eq!(*(NotNan::from(8.0f64) / 4.0f64), 8.0f64 / 4.0f64);
+ assert_eq!(
+ *(NotNan::from(8.0f64) % NotNan::from(4.0f64)),
+ 8.0f64 % 4.0f64
+ );
+ assert_eq!(*(NotNan::from(8.0f64) % 4.0f64), 8.0f64 % 4.0f64);
+ assert_eq!(*(-NotNan::from(1.0f64)), -1.0f64);
+
+ assert!(panic::catch_unwind(|| NotNan::from(0.0f64) + f64::NAN).is_err());
+ assert!(panic::catch_unwind(|| NotNan::from(0.0f64) - f64::NAN).is_err());
+ assert!(panic::catch_unwind(|| NotNan::from(0.0f64) * f64::NAN).is_err());
+ assert!(panic::catch_unwind(|| NotNan::from(0.0f64) / f64::NAN).is_err());
+ assert!(panic::catch_unwind(|| NotNan::from(0.0f64) % f64::NAN).is_err());
+
+ let mut number = NotNan::from(5.0f64);
+ number += NotNan::from(4.0f64);
+ assert_eq!(*number, 9.0f64);
+ number -= NotNan::from(4.0f64);
+ assert_eq!(*number, 5.0f64);
+ number *= NotNan::from(4.0f64);
+ assert_eq!(*number, 20.0f64);
+ number /= NotNan::from(4.0f64);
+ assert_eq!(*number, 5.0f64);
+ number %= NotNan::from(4.0f64);
+ assert_eq!(*number, 1.0f64);
+
+ number = NotNan::from(5.0f64);
+ number += 4.0f64;
+ assert_eq!(*number, 9.0f64);
+ number -= 4.0f64;
+ assert_eq!(*number, 5.0f64);
+ number *= 4.0f64;
+ assert_eq!(*number, 20.0f64);
+ number /= 4.0f64;
+ assert_eq!(*number, 5.0f64);
+ number %= 4.0f64;
+ assert_eq!(*number, 1.0f64);
+
+ assert!(
+ panic::catch_unwind(|| {
+ let mut tmp = NotNan::from(0.0f64);
+ tmp += f64::NAN;
+ }).is_err()
+ );
+ assert!(
+ panic::catch_unwind(|| {
+ let mut tmp = NotNan::from(0.0f64);
+ tmp -= f64::NAN;
+ }).is_err()
+ );
+ assert!(
+ panic::catch_unwind(|| {
+ let mut tmp = NotNan::from(0.0f64);
+ tmp *= f64::NAN;
+ }).is_err()
+ );
+ assert!(
+ panic::catch_unwind(|| {
+ let mut tmp = NotNan::from(0.0f64);
+ tmp /= f64::NAN;
+ }).is_err()
+ );
+ assert!(
+ panic::catch_unwind(|| {
+ let mut tmp = NotNan::from(0.0f64);
+ tmp %= f64::NAN;
+ }).is_err()
+ );
+}
+
+#[test]
+fn not_nan64_zero() {
+ assert_eq!(NotNan::<f64>::zero(), NotNan::from(0.0f64));
+ assert!(NotNan::<f64>::zero().is_zero());
+}
+
+#[test]
+fn not_nan64_one() {
+ assert_eq!(NotNan::<f64>::one(), NotNan::from(1.0f64))
+}
+
+#[test]
+fn not_nan64_bounded() {
+ assert_eq!(NotNan::<f64>::min_value(), NotNan::from(<f64 as Bounded>::min_value()));
+ assert_eq!(NotNan::<f64>::max_value(), NotNan::from(<f64 as Bounded>::max_value()));
+}
+
+#[test]
+fn not_nan64_from_primitive() {
+ assert_eq!(NotNan::<f64>::from_i8(42i8), Some(NotNan::from(42.0)));
+ assert_eq!(NotNan::<f64>::from_u8(42u8), Some(NotNan::from(42.0)));
+ assert_eq!(NotNan::<f64>::from_i16(42i16), Some(NotNan::from(42.0)));
+ assert_eq!(NotNan::<f64>::from_u16(42u16), Some(NotNan::from(42.0)));
+ assert_eq!(NotNan::<f64>::from_i32(42i32), Some(NotNan::from(42.0)));
+ assert_eq!(NotNan::<f64>::from_u32(42u32), Some(NotNan::from(42.0)));
+ assert_eq!(NotNan::<f64>::from_i64(42i64), Some(NotNan::from(42.0)));
+ assert_eq!(NotNan::<f64>::from_u64(42u64), Some(NotNan::from(42.0)));
+ assert_eq!(NotNan::<f64>::from_isize(42isize), Some(NotNan::from(42.0)));
+ assert_eq!(NotNan::<f64>::from_usize(42usize), Some(NotNan::from(42.0)));
+ assert_eq!(NotNan::<f64>::from_f64(42f64), Some(NotNan::from(42.0)));
+ assert_eq!(NotNan::<f64>::from_f64(42f64), Some(NotNan::from(42.0)));
+ assert_eq!(NotNan::<f64>::from_f64(42f64), Some(NotNan::from(42.0)));
+ assert_eq!(NotNan::<f64>::from_f64(42f64), Some(NotNan::from(42.0)));
+ assert_eq!(NotNan::<f64>::from_f64(Float::nan()), None);
+ assert_eq!(NotNan::<f64>::from_f64(Float::nan()), None);
+}
+
+#[test]
+fn not_nan64_to_primitive() {
+ let x = NotNan::from(42.0f64);
+ assert_eq!(x.to_u8(), Some(42u8));
+ assert_eq!(x.to_i8(), Some(42i8));
+ assert_eq!(x.to_u16(), Some(42u16));
+ assert_eq!(x.to_i16(), Some(42i16));
+ assert_eq!(x.to_u32(), Some(42u32));
+ assert_eq!(x.to_i32(), Some(42i32));
+ assert_eq!(x.to_u64(), Some(42u64));
+ assert_eq!(x.to_i64(), Some(42i64));
+ assert_eq!(x.to_usize(), Some(42usize));
+ assert_eq!(x.to_isize(), Some(42isize));
+ assert_eq!(x.to_f64(), Some(42f64));
+ assert_eq!(x.to_f64(), Some(42f64));
+ assert_eq!(x.to_f64(), Some(42f64));
+ assert_eq!(x.to_f64(), Some(42f64));
+}
+
+#[test]
+fn not_nan64_num() {
+ assert_eq!(NotNan::<f64>::from_str_radix("42.0", 10).unwrap(), NotNan::from(42.0f64));
+ assert!(NotNan::<f64>::from_str_radix("NaN", 10).is_err());
+}
+
+#[test]
+fn not_nan64_signed() {
+ assert_eq!(NotNan::from(42f64).abs(), NotNan::from(42f64));
+ assert_eq!(NotNan::from(-42f64).abs(), NotNan::from(42f64));
+
+ assert_eq!(NotNan::from(50f64).abs_sub(&NotNan::from(8f64)), NotNan::from(42f64));
+ assert_eq!(NotNan::from(8f64).abs_sub(&NotNan::from(50f64)), NotNan::from(0f64));
+}
+
+#[test]
+fn not_nan64_num_cast() {
+ assert_eq!(<NotNan<f64> as num_traits::NumCast>::from(42), Some(NotNan::from(42f64)));
+ assert_eq!(<NotNan<f64> as num_traits::NumCast>::from(f64::nan()), None);
+}
+
+#[test]
+fn hash_zero_and_neg_zero_to_the_same_hc() {
+ let state = RandomState::new();
+ let mut h1 = state.build_hasher();
+ let mut h2 = state.build_hasher();
+ OrderedFloat::from(0f64).hash(&mut h1);
+ OrderedFloat::from(-0f64).hash(&mut h2);
+ assert_eq!(h1.finish(), h2.finish());
+}
+
+#[test]
+fn hash_inf_and_neg_inf_to_different_hcs() {
+ let state = RandomState::new();
+ let mut h1 = state.build_hasher();
+ let mut h2 = state.build_hasher();
+ OrderedFloat::from(f64::INFINITY).hash(&mut h1);
+ OrderedFloat::from(f64::NEG_INFINITY).hash(&mut h2);
+ assert!(h1.finish() != h2.finish());
+}
+
+#[test]
+fn hash_is_good_for_whole_numbers() {
+ let state = RandomState::new();
+ let limit = 10000;
+
+ let mut set = ::std::collections::HashSet::with_capacity(limit);
+ for i in 0..limit {
+ let mut h = state.build_hasher();
+ OrderedFloat::from(i as f64).hash(&mut h);
+ set.insert(h.finish());
+ }
+
+ // This allows 100 collisions, which is far too
+ // many, but should guard against transient issues
+ // that will result from using RandomState
+ let pct_unique = set.len() as f64 / limit as f64;
+ assert!(0.99f64 < pct_unique, "percent-unique={}", pct_unique);
+}
+
+#[test]
+fn hash_is_good_for_fractional_numbers() {
+ let state = RandomState::new();
+ let limit = 10000;
+
+ let mut set = ::std::collections::HashSet::with_capacity(limit);
+ for i in 0..limit {
+ let mut h = state.build_hasher();
+ OrderedFloat::from(i as f64 * (1f64 / limit as f64)).hash(&mut h);
+ set.insert(h.finish());
+ }
+
+ // This allows 100 collisions, which is far too
+ // many, but should guard against transient issues
+ // that will result from using RandomState
+ let pct_unique = set.len() as f64 / limit as f64;
+ assert!(0.99f64 < pct_unique, "percent-unique={}", pct_unique);
+}
+
+#[test]
+#[should_panic]
+fn test_add_fails_on_nan() {
+ let a = NotNan::new(std::f32::INFINITY).unwrap();
+ let b = NotNan::new(std::f32::NEG_INFINITY).unwrap();
+ let _c = a + b;
+}
diff --git a/third_party/rust/ordered-float/tests/test_deprecated_names.rs b/third_party/rust/ordered-float/tests/test_deprecated_names.rs
new file mode 100644
index 0000000000..78f8e06edb
--- /dev/null
+++ b/third_party/rust/ordered-float/tests/test_deprecated_names.rs
@@ -0,0 +1,526 @@
+#![allow(deprecated)]
+
+extern crate num_traits;
+extern crate ordered_float;
+
+pub use ordered_float::*;
+pub use num_traits::{Bounded, Float, FromPrimitive, Num, One, Signed, ToPrimitive, Zero};
+pub use std::cmp::Ordering::*;
+pub use std::{f32, f64, panic};
+
+pub use std::collections::hash_map::RandomState;
+pub use std::collections::HashSet;
+pub use std::hash::*;
+
+#[test]
+fn ordered_f32_compare_regular_floats() {
+ assert_eq!(OrderedFloat(7.0f32).cmp(&OrderedFloat(7.0)), Equal);
+ assert_eq!(OrderedFloat(8.0f32).cmp(&OrderedFloat(7.0)), Greater);
+ assert_eq!(OrderedFloat(4.0f32).cmp(&OrderedFloat(7.0)), Less);
+}
+
+#[test]
+fn ordered_f32_compare_regular_floats_op() {
+ assert!(OrderedFloat(7.0f32) == OrderedFloat(7.0));
+ assert!(OrderedFloat(7.0f32) <= OrderedFloat(7.0));
+ assert!(OrderedFloat(7.0f32) >= OrderedFloat(7.0));
+ assert!(OrderedFloat(8.0f32) > OrderedFloat(7.0));
+ assert!(OrderedFloat(8.0f32) >= OrderedFloat(7.0));
+ assert!(OrderedFloat(4.0f32) < OrderedFloat(7.0));
+ assert!(OrderedFloat(4.0f32) <= OrderedFloat(7.0));
+}
+
+#[test]
+fn ordered_f32_compare_nan() {
+ let f32_nan: f32 = Float::nan();
+ assert_eq!(OrderedFloat(f32_nan).cmp(&OrderedFloat(Float::nan())), Equal);
+ assert_eq!(OrderedFloat(f32_nan).cmp(&OrderedFloat(-100000.0f32)), Greater);
+ assert_eq!(OrderedFloat(-100.0f32).cmp(&OrderedFloat(Float::nan())), Less);
+}
+
+#[test]
+fn ordered_f32_compare_nan_op() {
+ let f32_nan: OrderedFloat<f32> = OrderedFloat(Float::nan());
+ assert!(f32_nan == f32_nan);
+ assert!(f32_nan <= f32_nan);
+ assert!(f32_nan >= f32_nan);
+ assert!(f32_nan > OrderedFloat(-100000.0f32));
+ assert!(f32_nan >= OrderedFloat(-100000.0f32));
+ assert!(OrderedFloat(-100.0f32) < f32_nan);
+ assert!(OrderedFloat(-100.0f32) <= f32_nan);
+ assert!(f32_nan > OrderedFloat(Float::infinity()));
+ assert!(f32_nan >= OrderedFloat(Float::infinity()));
+ assert!(f32_nan > OrderedFloat(Float::neg_infinity()));
+ assert!(f32_nan >= OrderedFloat(Float::neg_infinity()));
+}
+
+#[test]
+fn ordered_f64_compare_regular_floats() {
+ assert_eq!(OrderedFloat(7.0f64).cmp(&OrderedFloat(7.0)), Equal);
+ assert_eq!(OrderedFloat(8.0f64).cmp(&OrderedFloat(7.0)), Greater);
+ assert_eq!(OrderedFloat(4.0f64).cmp(&OrderedFloat(7.0)), Less);
+}
+
+#[test]
+fn not_nan32_zero() {
+ assert_eq!(NotNaN::<f32>::zero(), NotNaN::from(0.0f32));
+ assert!(NotNaN::<f32>::zero().is_zero());
+}
+
+#[test]
+fn not_nan32_one() {
+ assert_eq!(NotNaN::<f32>::one(), NotNaN::from(1.0f32))
+}
+
+#[test]
+fn not_nan32_bounded() {
+ assert_eq!(NotNaN::<f32>::min_value(), NotNaN::from(<f32 as Bounded>::min_value()));
+ assert_eq!(NotNaN::<f32>::max_value(), NotNaN::from(<f32 as Bounded>::max_value()));
+}
+
+#[test]
+fn not_nan32_from_primitive() {
+ assert_eq!(NotNaN::<f32>::from_i8(42i8), Some(NotNaN::from(42.0)));
+ assert_eq!(NotNaN::<f32>::from_u8(42u8), Some(NotNaN::from(42.0)));
+ assert_eq!(NotNaN::<f32>::from_i16(42i16), Some(NotNaN::from(42.0)));
+ assert_eq!(NotNaN::<f32>::from_u16(42u16), Some(NotNaN::from(42.0)));
+ assert_eq!(NotNaN::<f32>::from_i32(42i32), Some(NotNaN::from(42.0)));
+ assert_eq!(NotNaN::<f32>::from_u32(42u32), Some(NotNaN::from(42.0)));
+ assert_eq!(NotNaN::<f32>::from_i64(42i64), Some(NotNaN::from(42.0)));
+ assert_eq!(NotNaN::<f32>::from_u64(42u64), Some(NotNaN::from(42.0)));
+ assert_eq!(NotNaN::<f32>::from_isize(42isize), Some(NotNaN::from(42.0)));
+ assert_eq!(NotNaN::<f32>::from_usize(42usize), Some(NotNaN::from(42.0)));
+ assert_eq!(NotNaN::<f32>::from_f32(42f32), Some(NotNaN::from(42.0)));
+ assert_eq!(NotNaN::<f32>::from_f32(42f32), Some(NotNaN::from(42.0)));
+ assert_eq!(NotNaN::<f32>::from_f64(42f64), Some(NotNaN::from(42.0)));
+ assert_eq!(NotNaN::<f32>::from_f64(42f64), Some(NotNaN::from(42.0)));
+ assert_eq!(NotNaN::<f32>::from_f32(Float::nan()), None);
+ assert_eq!(NotNaN::<f32>::from_f64(Float::nan()), None);
+}
+
+#[test]
+fn not_nan32_to_primitive() {
+ let x = NotNaN::from(42.0f32);
+ assert_eq!(x.to_u8(), Some(42u8));
+ assert_eq!(x.to_i8(), Some(42i8));
+ assert_eq!(x.to_u16(), Some(42u16));
+ assert_eq!(x.to_i16(), Some(42i16));
+ assert_eq!(x.to_u32(), Some(42u32));
+ assert_eq!(x.to_i32(), Some(42i32));
+ assert_eq!(x.to_u64(), Some(42u64));
+ assert_eq!(x.to_i64(), Some(42i64));
+ assert_eq!(x.to_usize(), Some(42usize));
+ assert_eq!(x.to_isize(), Some(42isize));
+ assert_eq!(x.to_f32(), Some(42f32));
+ assert_eq!(x.to_f32(), Some(42f32));
+ assert_eq!(x.to_f64(), Some(42f64));
+ assert_eq!(x.to_f64(), Some(42f64));
+}
+
+#[test]
+fn not_nan32_num() {
+ assert_eq!(NotNaN::<f32>::from_str_radix("42.0", 10).unwrap(), NotNaN::from(42.0f32));
+ assert!(NotNaN::<f32>::from_str_radix("NaN", 10).is_err());
+}
+
+#[test]
+fn not_nan32_signed() {
+ assert_eq!(NotNaN::from(42f32).abs(), NotNaN::from(42f32));
+ assert_eq!(NotNaN::from(-42f32).abs(), NotNaN::from(42f32));
+
+ assert_eq!(NotNaN::from(50f32).abs_sub(&NotNaN::from(8f32)), NotNaN::from(42f32));
+ assert_eq!(NotNaN::from(8f32).abs_sub(&NotNaN::from(50f32)), NotNaN::from(0f32));
+}
+
+#[test]
+fn not_nan32_num_cast() {
+ assert_eq!(<NotNaN<f32> as num_traits::NumCast>::from(42), Some(NotNaN::from(42f32)));
+ assert_eq!(<NotNaN<f32> as num_traits::NumCast>::from(f32::nan()), None);
+}
+
+#[test]
+fn ordered_f64_compare_nan() {
+ let f64_nan: f64 = Float::nan();
+ assert_eq!(
+ OrderedFloat(f64_nan).cmp(&OrderedFloat(Float::nan())),
+ Equal
+ );
+ assert_eq!(
+ OrderedFloat(f64_nan).cmp(&OrderedFloat(-100000.0f64)),
+ Greater
+ );
+ assert_eq!(
+ OrderedFloat(-100.0f64).cmp(&OrderedFloat(Float::nan())),
+ Less
+ );
+}
+
+#[test]
+fn ordered_f64_compare_regular_floats_op() {
+ assert!(OrderedFloat(7.0) == OrderedFloat(7.0));
+ assert!(OrderedFloat(7.0) <= OrderedFloat(7.0));
+ assert!(OrderedFloat(7.0) >= OrderedFloat(7.0));
+ assert!(OrderedFloat(8.0) > OrderedFloat(7.0));
+ assert!(OrderedFloat(8.0) >= OrderedFloat(7.0));
+ assert!(OrderedFloat(4.0) < OrderedFloat(7.0));
+ assert!(OrderedFloat(4.0) <= OrderedFloat(7.0));
+}
+
+#[test]
+fn ordered_f64_compare_nan_op() {
+ let f64_nan: OrderedFloat<f64> = OrderedFloat(Float::nan());
+ assert!(f64_nan == f64_nan);
+ assert!(f64_nan <= f64_nan);
+ assert!(f64_nan >= f64_nan);
+ assert!(f64_nan > OrderedFloat(-100000.0));
+ assert!(f64_nan >= OrderedFloat(-100000.0));
+ assert!(OrderedFloat(-100.0) < f64_nan);
+ assert!(OrderedFloat(-100.0) <= f64_nan);
+ assert!(f64_nan > OrderedFloat(Float::infinity()));
+ assert!(f64_nan >= OrderedFloat(Float::infinity()));
+ assert!(f64_nan > OrderedFloat(Float::neg_infinity()));
+ assert!(f64_nan >= OrderedFloat(Float::neg_infinity()));
+}
+
+#[test]
+fn not_nan32_compare_regular_floats() {
+ assert_eq!(NotNaN::from(7.0f32).cmp(&NotNaN::from(7.0)), Equal);
+ assert_eq!(NotNaN::from(8.0f32).cmp(&NotNaN::from(7.0)), Greater);
+ assert_eq!(NotNaN::from(4.0f32).cmp(&NotNaN::from(7.0)), Less);
+}
+
+#[test]
+fn not_nan32_fail_when_constructing_with_nan() {
+ let f32_nan: f32 = Float::nan();
+ assert!(NotNaN::new(f32_nan).is_err());
+}
+
+#[test]
+fn not_nan32_calculate_correctly() {
+ assert_eq!(
+ *(NotNaN::from(5.0f32) + NotNaN::from(4.0f32)),
+ 5.0f32 + 4.0f32
+ );
+ assert_eq!(*(NotNaN::from(5.0f32) + 4.0f32), 5.0f32 + 4.0f32);
+ assert_eq!(
+ *(NotNaN::from(5.0f32) - NotNaN::from(4.0f32)),
+ 5.0f32 - 4.0f32
+ );
+ assert_eq!(*(NotNaN::from(5.0f32) - 4.0f32), 5.0f32 - 4.0f32);
+ assert_eq!(
+ *(NotNaN::from(5.0f32) * NotNaN::from(4.0f32)),
+ 5.0f32 * 4.0f32
+ );
+ assert_eq!(*(NotNaN::from(5.0f32) * 4.0f32), 5.0f32 * 4.0f32);
+ assert_eq!(
+ *(NotNaN::from(8.0f32) / NotNaN::from(4.0f32)),
+ 8.0f32 / 4.0f32
+ );
+ assert_eq!(*(NotNaN::from(8.0f32) / 4.0f32), 8.0f32 / 4.0f32);
+ assert_eq!(
+ *(NotNaN::from(8.0f32) % NotNaN::from(4.0f32)),
+ 8.0f32 % 4.0f32
+ );
+ assert_eq!(*(NotNaN::from(8.0f32) % 4.0f32), 8.0f32 % 4.0f32);
+ assert_eq!(*(-NotNaN::from(1.0f32)), -1.0f32);
+
+ assert!(panic::catch_unwind(|| NotNaN::from(0.0f32) + f32::NAN).is_err());
+ assert!(panic::catch_unwind(|| NotNaN::from(0.0f32) - f32::NAN).is_err());
+ assert!(panic::catch_unwind(|| NotNaN::from(0.0f32) * f32::NAN).is_err());
+ assert!(panic::catch_unwind(|| NotNaN::from(0.0f32) / f32::NAN).is_err());
+ assert!(panic::catch_unwind(|| NotNaN::from(0.0f32) % f32::NAN).is_err());
+
+ let mut number = NotNaN::from(5.0f32);
+ number += NotNaN::from(4.0f32);
+ assert_eq!(*number, 9.0f32);
+ number -= NotNaN::from(4.0f32);
+ assert_eq!(*number, 5.0f32);
+ number *= NotNaN::from(4.0f32);
+ assert_eq!(*number, 20.0f32);
+ number /= NotNaN::from(4.0f32);
+ assert_eq!(*number, 5.0f32);
+ number %= NotNaN::from(4.0f32);
+ assert_eq!(*number, 1.0f32);
+
+ number = NotNaN::from(5.0f32);
+ number += 4.0f32;
+ assert_eq!(*number, 9.0f32);
+ number -= 4.0f32;
+ assert_eq!(*number, 5.0f32);
+ number *= 4.0f32;
+ assert_eq!(*number, 20.0f32);
+ number /= 4.0f32;
+ assert_eq!(*number, 5.0f32);
+ number %= 4.0f32;
+ assert_eq!(*number, 1.0f32);
+
+ assert!(
+ panic::catch_unwind(|| {
+ let mut tmp = NotNaN::from(0.0f32);
+ tmp += f32::NAN;
+ }).is_err()
+ );
+ assert!(
+ panic::catch_unwind(|| {
+ let mut tmp = NotNaN::from(0.0f32);
+ tmp -= f32::NAN;
+ }).is_err()
+ );
+ assert!(
+ panic::catch_unwind(|| {
+ let mut tmp = NotNaN::from(0.0f32);
+ tmp *= f32::NAN;
+ }).is_err()
+ );
+ assert!(
+ panic::catch_unwind(|| {
+ let mut tmp = NotNaN::from(0.0f32);
+ tmp /= f32::NAN;
+ }).is_err()
+ );
+ assert!(
+ panic::catch_unwind(|| {
+ let mut tmp = NotNaN::from(0.0f32);
+ tmp %= f32::NAN;
+ }).is_err()
+ );
+}
+
+#[test]
+fn not_nan64_compare_regular_floats() {
+ assert_eq!(NotNaN::from(7.0f64).cmp(&NotNaN::from(7.0)), Equal);
+ assert_eq!(NotNaN::from(8.0f64).cmp(&NotNaN::from(7.0)), Greater);
+ assert_eq!(NotNaN::from(4.0f64).cmp(&NotNaN::from(7.0)), Less);
+}
+
+#[test]
+fn not_nan64_fail_when_constructing_with_nan() {
+ let f64_nan: f64 = Float::nan();
+ assert!(NotNaN::new(f64_nan).is_err());
+}
+
+#[test]
+fn not_nan64_calculate_correctly() {
+ assert_eq!(
+ *(NotNaN::from(5.0f64) + NotNaN::from(4.0f64)),
+ 5.0f64 + 4.0f64
+ );
+ assert_eq!(*(NotNaN::from(5.0f64) + 4.0f64), 5.0f64 + 4.0f64);
+ assert_eq!(
+ *(NotNaN::from(5.0f64) - NotNaN::from(4.0f64)),
+ 5.0f64 - 4.0f64
+ );
+ assert_eq!(*(NotNaN::from(5.0f64) - 4.0f64), 5.0f64 - 4.0f64);
+ assert_eq!(
+ *(NotNaN::from(5.0f64) * NotNaN::from(4.0f64)),
+ 5.0f64 * 4.0f64
+ );
+ assert_eq!(*(NotNaN::from(5.0f64) * 4.0f64), 5.0f64 * 4.0f64);
+ assert_eq!(
+ *(NotNaN::from(8.0f64) / NotNaN::from(4.0f64)),
+ 8.0f64 / 4.0f64
+ );
+ assert_eq!(*(NotNaN::from(8.0f64) / 4.0f64), 8.0f64 / 4.0f64);
+ assert_eq!(
+ *(NotNaN::from(8.0f64) % NotNaN::from(4.0f64)),
+ 8.0f64 % 4.0f64
+ );
+ assert_eq!(*(NotNaN::from(8.0f64) % 4.0f64), 8.0f64 % 4.0f64);
+ assert_eq!(*(-NotNaN::from(1.0f64)), -1.0f64);
+
+ assert!(panic::catch_unwind(|| NotNaN::from(0.0f64) + f64::NAN).is_err());
+ assert!(panic::catch_unwind(|| NotNaN::from(0.0f64) - f64::NAN).is_err());
+ assert!(panic::catch_unwind(|| NotNaN::from(0.0f64) * f64::NAN).is_err());
+ assert!(panic::catch_unwind(|| NotNaN::from(0.0f64) / f64::NAN).is_err());
+ assert!(panic::catch_unwind(|| NotNaN::from(0.0f64) % f64::NAN).is_err());
+
+ let mut number = NotNaN::from(5.0f64);
+ number += NotNaN::from(4.0f64);
+ assert_eq!(*number, 9.0f64);
+ number -= NotNaN::from(4.0f64);
+ assert_eq!(*number, 5.0f64);
+ number *= NotNaN::from(4.0f64);
+ assert_eq!(*number, 20.0f64);
+ number /= NotNaN::from(4.0f64);
+ assert_eq!(*number, 5.0f64);
+ number %= NotNaN::from(4.0f64);
+ assert_eq!(*number, 1.0f64);
+
+ number = NotNaN::from(5.0f64);
+ number += 4.0f64;
+ assert_eq!(*number, 9.0f64);
+ number -= 4.0f64;
+ assert_eq!(*number, 5.0f64);
+ number *= 4.0f64;
+ assert_eq!(*number, 20.0f64);
+ number /= 4.0f64;
+ assert_eq!(*number, 5.0f64);
+ number %= 4.0f64;
+ assert_eq!(*number, 1.0f64);
+
+ assert!(
+ panic::catch_unwind(|| {
+ let mut tmp = NotNaN::from(0.0f64);
+ tmp += f64::NAN;
+ }).is_err()
+ );
+ assert!(
+ panic::catch_unwind(|| {
+ let mut tmp = NotNaN::from(0.0f64);
+ tmp -= f64::NAN;
+ }).is_err()
+ );
+ assert!(
+ panic::catch_unwind(|| {
+ let mut tmp = NotNaN::from(0.0f64);
+ tmp *= f64::NAN;
+ }).is_err()
+ );
+ assert!(
+ panic::catch_unwind(|| {
+ let mut tmp = NotNaN::from(0.0f64);
+ tmp /= f64::NAN;
+ }).is_err()
+ );
+ assert!(
+ panic::catch_unwind(|| {
+ let mut tmp = NotNaN::from(0.0f64);
+ tmp %= f64::NAN;
+ }).is_err()
+ );
+}
+
+#[test]
+fn not_nan64_zero() {
+ assert_eq!(NotNaN::<f64>::zero(), NotNaN::from(0.0f64));
+ assert!(NotNaN::<f64>::zero().is_zero());
+}
+
+#[test]
+fn not_nan64_one() {
+ assert_eq!(NotNaN::<f64>::one(), NotNaN::from(1.0f64))
+}
+
+#[test]
+fn not_nan64_bounded() {
+ assert_eq!(NotNaN::<f64>::min_value(), NotNaN::from(<f64 as Bounded>::min_value()));
+ assert_eq!(NotNaN::<f64>::max_value(), NotNaN::from(<f64 as Bounded>::max_value()));
+}
+
+#[test]
+fn not_nan64_from_primitive() {
+ assert_eq!(NotNaN::<f64>::from_i8(42i8), Some(NotNaN::from(42.0)));
+ assert_eq!(NotNaN::<f64>::from_u8(42u8), Some(NotNaN::from(42.0)));
+ assert_eq!(NotNaN::<f64>::from_i16(42i16), Some(NotNaN::from(42.0)));
+ assert_eq!(NotNaN::<f64>::from_u16(42u16), Some(NotNaN::from(42.0)));
+ assert_eq!(NotNaN::<f64>::from_i32(42i32), Some(NotNaN::from(42.0)));
+ assert_eq!(NotNaN::<f64>::from_u32(42u32), Some(NotNaN::from(42.0)));
+ assert_eq!(NotNaN::<f64>::from_i64(42i64), Some(NotNaN::from(42.0)));
+ assert_eq!(NotNaN::<f64>::from_u64(42u64), Some(NotNaN::from(42.0)));
+ assert_eq!(NotNaN::<f64>::from_isize(42isize), Some(NotNaN::from(42.0)));
+ assert_eq!(NotNaN::<f64>::from_usize(42usize), Some(NotNaN::from(42.0)));
+ assert_eq!(NotNaN::<f64>::from_f64(42f64), Some(NotNaN::from(42.0)));
+ assert_eq!(NotNaN::<f64>::from_f64(42f64), Some(NotNaN::from(42.0)));
+ assert_eq!(NotNaN::<f64>::from_f64(42f64), Some(NotNaN::from(42.0)));
+ assert_eq!(NotNaN::<f64>::from_f64(42f64), Some(NotNaN::from(42.0)));
+ assert_eq!(NotNaN::<f64>::from_f64(Float::nan()), None);
+ assert_eq!(NotNaN::<f64>::from_f64(Float::nan()), None);
+}
+
+#[test]
+fn not_nan64_to_primitive() {
+ let x = NotNaN::from(42.0f64);
+ assert_eq!(x.to_u8(), Some(42u8));
+ assert_eq!(x.to_i8(), Some(42i8));
+ assert_eq!(x.to_u16(), Some(42u16));
+ assert_eq!(x.to_i16(), Some(42i16));
+ assert_eq!(x.to_u32(), Some(42u32));
+ assert_eq!(x.to_i32(), Some(42i32));
+ assert_eq!(x.to_u64(), Some(42u64));
+ assert_eq!(x.to_i64(), Some(42i64));
+ assert_eq!(x.to_usize(), Some(42usize));
+ assert_eq!(x.to_isize(), Some(42isize));
+ assert_eq!(x.to_f64(), Some(42f64));
+ assert_eq!(x.to_f64(), Some(42f64));
+ assert_eq!(x.to_f64(), Some(42f64));
+ assert_eq!(x.to_f64(), Some(42f64));
+}
+
+#[test]
+fn not_nan64_num() {
+ assert_eq!(NotNaN::<f64>::from_str_radix("42.0", 10).unwrap(), NotNaN::from(42.0f64));
+ assert!(NotNaN::<f64>::from_str_radix("NaN", 10).is_err());
+}
+
+#[test]
+fn not_nan64_signed() {
+ assert_eq!(NotNaN::from(42f64).abs(), NotNaN::from(42f64));
+ assert_eq!(NotNaN::from(-42f64).abs(), NotNaN::from(42f64));
+
+ assert_eq!(NotNaN::from(50f64).abs_sub(&NotNaN::from(8f64)), NotNaN::from(42f64));
+ assert_eq!(NotNaN::from(8f64).abs_sub(&NotNaN::from(50f64)), NotNaN::from(0f64));
+}
+
+#[test]
+fn not_nan64_num_cast() {
+ assert_eq!(<NotNaN<f64> as num_traits::NumCast>::from(42), Some(NotNaN::from(42f64)));
+ assert_eq!(<NotNaN<f64> as num_traits::NumCast>::from(f64::nan()), None);
+}
+
+#[test]
+fn hash_zero_and_neg_zero_to_the_same_hc() {
+ let state = RandomState::new();
+ let mut h1 = state.build_hasher();
+ let mut h2 = state.build_hasher();
+ OrderedFloat::from(0f64).hash(&mut h1);
+ OrderedFloat::from(-0f64).hash(&mut h2);
+ assert_eq!(h1.finish(), h2.finish());
+}
+
+#[test]
+fn hash_inf_and_neg_inf_to_different_hcs() {
+ let state = RandomState::new();
+ let mut h1 = state.build_hasher();
+ let mut h2 = state.build_hasher();
+ OrderedFloat::from(f64::INFINITY).hash(&mut h1);
+ OrderedFloat::from(f64::NEG_INFINITY).hash(&mut h2);
+ assert!(h1.finish() != h2.finish());
+}
+
+#[test]
+fn hash_is_good_for_whole_numbers() {
+ let state = RandomState::new();
+ let limit = 10000;
+
+ let mut set = ::std::collections::HashSet::with_capacity(limit);
+ for i in 0..limit {
+ let mut h = state.build_hasher();
+ OrderedFloat::from(i as f64).hash(&mut h);
+ set.insert(h.finish());
+ }
+
+ // This allows 100 collisions, which is far too
+ // many, but should guard against transient issues
+ // that will result from using RandomState
+ let pct_unique = set.len() as f64 / limit as f64;
+ assert!(0.99f64 < pct_unique, "percent-unique={}", pct_unique);
+}
+
+#[test]
+fn hash_is_good_for_fractional_numbers() {
+ let state = RandomState::new();
+ let limit = 10000;
+
+ let mut set = ::std::collections::HashSet::with_capacity(limit);
+ for i in 0..limit {
+ let mut h = state.build_hasher();
+ OrderedFloat::from(i as f64 * (1f64 / limit as f64)).hash(&mut h);
+ set.insert(h.finish());
+ }
+
+ // This allows 100 collisions, which is far too
+ // many, but should guard against transient issues
+ // that will result from using RandomState
+ let pct_unique = set.len() as f64 / limit as f64;
+ assert!(0.99f64 < pct_unique, "percent-unique={}", pct_unique);
+}