summaryrefslogtreecommitdiffstats
path: root/vendor/num-traits/src/ops
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-17 12:02:58 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-17 12:02:58 +0000
commit698f8c2f01ea549d77d7dc3338a12e04c11057b9 (patch)
tree173a775858bd501c378080a10dca74132f05bc50 /vendor/num-traits/src/ops
parentInitial commit. (diff)
downloadrustc-698f8c2f01ea549d77d7dc3338a12e04c11057b9.tar.xz
rustc-698f8c2f01ea549d77d7dc3338a12e04c11057b9.zip
Adding upstream version 1.64.0+dfsg1.upstream/1.64.0+dfsg1
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'vendor/num-traits/src/ops')
-rw-r--r--vendor/num-traits/src/ops/checked.rs277
-rw-r--r--vendor/num-traits/src/ops/inv.rs47
-rw-r--r--vendor/num-traits/src/ops/mod.rs5
-rw-r--r--vendor/num-traits/src/ops/mul_add.rs151
-rw-r--r--vendor/num-traits/src/ops/saturating.rs137
-rw-r--r--vendor/num-traits/src/ops/wrapping.rs337
6 files changed, 954 insertions, 0 deletions
diff --git a/vendor/num-traits/src/ops/checked.rs b/vendor/num-traits/src/ops/checked.rs
new file mode 100644
index 000000000..386557003
--- /dev/null
+++ b/vendor/num-traits/src/ops/checked.rs
@@ -0,0 +1,277 @@
+use core::ops::{Add, Div, Mul, Rem, Shl, Shr, Sub};
+
+/// Performs addition that returns `None` instead of wrapping around on
+/// overflow.
+pub trait CheckedAdd: Sized + Add<Self, Output = Self> {
+ /// Adds two numbers, checking for overflow. If overflow happens, `None` is
+ /// returned.
+ fn checked_add(&self, v: &Self) -> Option<Self>;
+}
+
+macro_rules! checked_impl {
+ ($trait_name:ident, $method:ident, $t:ty) => {
+ impl $trait_name for $t {
+ #[inline]
+ fn $method(&self, v: &$t) -> Option<$t> {
+ <$t>::$method(*self, *v)
+ }
+ }
+ };
+}
+
+checked_impl!(CheckedAdd, checked_add, u8);
+checked_impl!(CheckedAdd, checked_add, u16);
+checked_impl!(CheckedAdd, checked_add, u32);
+checked_impl!(CheckedAdd, checked_add, u64);
+checked_impl!(CheckedAdd, checked_add, usize);
+#[cfg(has_i128)]
+checked_impl!(CheckedAdd, checked_add, u128);
+
+checked_impl!(CheckedAdd, checked_add, i8);
+checked_impl!(CheckedAdd, checked_add, i16);
+checked_impl!(CheckedAdd, checked_add, i32);
+checked_impl!(CheckedAdd, checked_add, i64);
+checked_impl!(CheckedAdd, checked_add, isize);
+#[cfg(has_i128)]
+checked_impl!(CheckedAdd, checked_add, i128);
+
+/// Performs subtraction that returns `None` instead of wrapping around on underflow.
+pub trait CheckedSub: Sized + Sub<Self, Output = Self> {
+ /// Subtracts two numbers, checking for underflow. If underflow happens,
+ /// `None` is returned.
+ fn checked_sub(&self, v: &Self) -> Option<Self>;
+}
+
+checked_impl!(CheckedSub, checked_sub, u8);
+checked_impl!(CheckedSub, checked_sub, u16);
+checked_impl!(CheckedSub, checked_sub, u32);
+checked_impl!(CheckedSub, checked_sub, u64);
+checked_impl!(CheckedSub, checked_sub, usize);
+#[cfg(has_i128)]
+checked_impl!(CheckedSub, checked_sub, u128);
+
+checked_impl!(CheckedSub, checked_sub, i8);
+checked_impl!(CheckedSub, checked_sub, i16);
+checked_impl!(CheckedSub, checked_sub, i32);
+checked_impl!(CheckedSub, checked_sub, i64);
+checked_impl!(CheckedSub, checked_sub, isize);
+#[cfg(has_i128)]
+checked_impl!(CheckedSub, checked_sub, i128);
+
+/// Performs multiplication that returns `None` instead of wrapping around on underflow or
+/// overflow.
+pub trait CheckedMul: Sized + Mul<Self, Output = Self> {
+ /// Multiplies two numbers, checking for underflow or overflow. If underflow
+ /// or overflow happens, `None` is returned.
+ fn checked_mul(&self, v: &Self) -> Option<Self>;
+}
+
+checked_impl!(CheckedMul, checked_mul, u8);
+checked_impl!(CheckedMul, checked_mul, u16);
+checked_impl!(CheckedMul, checked_mul, u32);
+checked_impl!(CheckedMul, checked_mul, u64);
+checked_impl!(CheckedMul, checked_mul, usize);
+#[cfg(has_i128)]
+checked_impl!(CheckedMul, checked_mul, u128);
+
+checked_impl!(CheckedMul, checked_mul, i8);
+checked_impl!(CheckedMul, checked_mul, i16);
+checked_impl!(CheckedMul, checked_mul, i32);
+checked_impl!(CheckedMul, checked_mul, i64);
+checked_impl!(CheckedMul, checked_mul, isize);
+#[cfg(has_i128)]
+checked_impl!(CheckedMul, checked_mul, i128);
+
+/// Performs division that returns `None` instead of panicking on division by zero and instead of
+/// wrapping around on underflow and overflow.
+pub trait CheckedDiv: Sized + Div<Self, Output = Self> {
+ /// Divides two numbers, checking for underflow, overflow and division by
+ /// zero. If any of that happens, `None` is returned.
+ fn checked_div(&self, v: &Self) -> Option<Self>;
+}
+
+checked_impl!(CheckedDiv, checked_div, u8);
+checked_impl!(CheckedDiv, checked_div, u16);
+checked_impl!(CheckedDiv, checked_div, u32);
+checked_impl!(CheckedDiv, checked_div, u64);
+checked_impl!(CheckedDiv, checked_div, usize);
+#[cfg(has_i128)]
+checked_impl!(CheckedDiv, checked_div, u128);
+
+checked_impl!(CheckedDiv, checked_div, i8);
+checked_impl!(CheckedDiv, checked_div, i16);
+checked_impl!(CheckedDiv, checked_div, i32);
+checked_impl!(CheckedDiv, checked_div, i64);
+checked_impl!(CheckedDiv, checked_div, isize);
+#[cfg(has_i128)]
+checked_impl!(CheckedDiv, checked_div, i128);
+
+/// Performs an integral remainder that returns `None` instead of panicking on division by zero and
+/// instead of wrapping around on underflow and overflow.
+pub trait CheckedRem: Sized + Rem<Self, Output = Self> {
+ /// Finds the remainder of dividing two numbers, checking for underflow, overflow and division
+ /// by zero. If any of that happens, `None` is returned.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_traits::CheckedRem;
+ /// use std::i32::MIN;
+ ///
+ /// assert_eq!(CheckedRem::checked_rem(&10, &7), Some(3));
+ /// assert_eq!(CheckedRem::checked_rem(&10, &-7), Some(3));
+ /// assert_eq!(CheckedRem::checked_rem(&-10, &7), Some(-3));
+ /// assert_eq!(CheckedRem::checked_rem(&-10, &-7), Some(-3));
+ ///
+ /// assert_eq!(CheckedRem::checked_rem(&10, &0), None);
+ ///
+ /// assert_eq!(CheckedRem::checked_rem(&MIN, &1), Some(0));
+ /// assert_eq!(CheckedRem::checked_rem(&MIN, &-1), None);
+ /// ```
+ fn checked_rem(&self, v: &Self) -> Option<Self>;
+}
+
+checked_impl!(CheckedRem, checked_rem, u8);
+checked_impl!(CheckedRem, checked_rem, u16);
+checked_impl!(CheckedRem, checked_rem, u32);
+checked_impl!(CheckedRem, checked_rem, u64);
+checked_impl!(CheckedRem, checked_rem, usize);
+#[cfg(has_i128)]
+checked_impl!(CheckedRem, checked_rem, u128);
+
+checked_impl!(CheckedRem, checked_rem, i8);
+checked_impl!(CheckedRem, checked_rem, i16);
+checked_impl!(CheckedRem, checked_rem, i32);
+checked_impl!(CheckedRem, checked_rem, i64);
+checked_impl!(CheckedRem, checked_rem, isize);
+#[cfg(has_i128)]
+checked_impl!(CheckedRem, checked_rem, i128);
+
+macro_rules! checked_impl_unary {
+ ($trait_name:ident, $method:ident, $t:ty) => {
+ impl $trait_name for $t {
+ #[inline]
+ fn $method(&self) -> Option<$t> {
+ <$t>::$method(*self)
+ }
+ }
+ };
+}
+
+/// Performs negation that returns `None` if the result can't be represented.
+pub trait CheckedNeg: Sized {
+ /// Negates a number, returning `None` for results that can't be represented, like signed `MIN`
+ /// values that can't be positive, or non-zero unsigned values that can't be negative.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_traits::CheckedNeg;
+ /// use std::i32::MIN;
+ ///
+ /// assert_eq!(CheckedNeg::checked_neg(&1_i32), Some(-1));
+ /// assert_eq!(CheckedNeg::checked_neg(&-1_i32), Some(1));
+ /// assert_eq!(CheckedNeg::checked_neg(&MIN), None);
+ ///
+ /// assert_eq!(CheckedNeg::checked_neg(&0_u32), Some(0));
+ /// assert_eq!(CheckedNeg::checked_neg(&1_u32), None);
+ /// ```
+ fn checked_neg(&self) -> Option<Self>;
+}
+
+checked_impl_unary!(CheckedNeg, checked_neg, u8);
+checked_impl_unary!(CheckedNeg, checked_neg, u16);
+checked_impl_unary!(CheckedNeg, checked_neg, u32);
+checked_impl_unary!(CheckedNeg, checked_neg, u64);
+checked_impl_unary!(CheckedNeg, checked_neg, usize);
+#[cfg(has_i128)]
+checked_impl_unary!(CheckedNeg, checked_neg, u128);
+
+checked_impl_unary!(CheckedNeg, checked_neg, i8);
+checked_impl_unary!(CheckedNeg, checked_neg, i16);
+checked_impl_unary!(CheckedNeg, checked_neg, i32);
+checked_impl_unary!(CheckedNeg, checked_neg, i64);
+checked_impl_unary!(CheckedNeg, checked_neg, isize);
+#[cfg(has_i128)]
+checked_impl_unary!(CheckedNeg, checked_neg, i128);
+
+/// Performs a left shift that returns `None` on shifts larger than
+/// the type width.
+pub trait CheckedShl: Sized + Shl<u32, Output = Self> {
+ /// Checked shift left. Computes `self << rhs`, returning `None`
+ /// if `rhs` is larger than or equal to the number of bits in `self`.
+ ///
+ /// ```
+ /// use num_traits::CheckedShl;
+ ///
+ /// let x: u16 = 0x0001;
+ ///
+ /// assert_eq!(CheckedShl::checked_shl(&x, 0), Some(0x0001));
+ /// assert_eq!(CheckedShl::checked_shl(&x, 1), Some(0x0002));
+ /// assert_eq!(CheckedShl::checked_shl(&x, 15), Some(0x8000));
+ /// assert_eq!(CheckedShl::checked_shl(&x, 16), None);
+ /// ```
+ fn checked_shl(&self, rhs: u32) -> Option<Self>;
+}
+
+macro_rules! checked_shift_impl {
+ ($trait_name:ident, $method:ident, $t:ty) => {
+ impl $trait_name for $t {
+ #[inline]
+ fn $method(&self, rhs: u32) -> Option<$t> {
+ <$t>::$method(*self, rhs)
+ }
+ }
+ };
+}
+
+checked_shift_impl!(CheckedShl, checked_shl, u8);
+checked_shift_impl!(CheckedShl, checked_shl, u16);
+checked_shift_impl!(CheckedShl, checked_shl, u32);
+checked_shift_impl!(CheckedShl, checked_shl, u64);
+checked_shift_impl!(CheckedShl, checked_shl, usize);
+#[cfg(has_i128)]
+checked_shift_impl!(CheckedShl, checked_shl, u128);
+
+checked_shift_impl!(CheckedShl, checked_shl, i8);
+checked_shift_impl!(CheckedShl, checked_shl, i16);
+checked_shift_impl!(CheckedShl, checked_shl, i32);
+checked_shift_impl!(CheckedShl, checked_shl, i64);
+checked_shift_impl!(CheckedShl, checked_shl, isize);
+#[cfg(has_i128)]
+checked_shift_impl!(CheckedShl, checked_shl, i128);
+
+/// Performs a right shift that returns `None` on shifts larger than
+/// the type width.
+pub trait CheckedShr: Sized + Shr<u32, Output = Self> {
+ /// Checked shift right. Computes `self >> rhs`, returning `None`
+ /// if `rhs` is larger than or equal to the number of bits in `self`.
+ ///
+ /// ```
+ /// use num_traits::CheckedShr;
+ ///
+ /// let x: u16 = 0x8000;
+ ///
+ /// assert_eq!(CheckedShr::checked_shr(&x, 0), Some(0x8000));
+ /// assert_eq!(CheckedShr::checked_shr(&x, 1), Some(0x4000));
+ /// assert_eq!(CheckedShr::checked_shr(&x, 15), Some(0x0001));
+ /// assert_eq!(CheckedShr::checked_shr(&x, 16), None);
+ /// ```
+ fn checked_shr(&self, rhs: u32) -> Option<Self>;
+}
+
+checked_shift_impl!(CheckedShr, checked_shr, u8);
+checked_shift_impl!(CheckedShr, checked_shr, u16);
+checked_shift_impl!(CheckedShr, checked_shr, u32);
+checked_shift_impl!(CheckedShr, checked_shr, u64);
+checked_shift_impl!(CheckedShr, checked_shr, usize);
+#[cfg(has_i128)]
+checked_shift_impl!(CheckedShr, checked_shr, u128);
+
+checked_shift_impl!(CheckedShr, checked_shr, i8);
+checked_shift_impl!(CheckedShr, checked_shr, i16);
+checked_shift_impl!(CheckedShr, checked_shr, i32);
+checked_shift_impl!(CheckedShr, checked_shr, i64);
+checked_shift_impl!(CheckedShr, checked_shr, isize);
+#[cfg(has_i128)]
+checked_shift_impl!(CheckedShr, checked_shr, i128);
diff --git a/vendor/num-traits/src/ops/inv.rs b/vendor/num-traits/src/ops/inv.rs
new file mode 100644
index 000000000..7087d09d0
--- /dev/null
+++ b/vendor/num-traits/src/ops/inv.rs
@@ -0,0 +1,47 @@
+/// Unary operator for retrieving the multiplicative inverse, or reciprocal, of a value.
+pub trait Inv {
+ /// The result after applying the operator.
+ type Output;
+
+ /// Returns the multiplicative inverse of `self`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::f64::INFINITY;
+ /// use num_traits::Inv;
+ ///
+ /// assert_eq!(7.0.inv() * 7.0, 1.0);
+ /// assert_eq!((-0.0).inv(), -INFINITY);
+ /// ```
+ fn inv(self) -> Self::Output;
+}
+
+impl Inv for f32 {
+ type Output = f32;
+ #[inline]
+ fn inv(self) -> f32 {
+ 1.0 / self
+ }
+}
+impl Inv for f64 {
+ type Output = f64;
+ #[inline]
+ fn inv(self) -> f64 {
+ 1.0 / self
+ }
+}
+impl<'a> Inv for &'a f32 {
+ type Output = f32;
+ #[inline]
+ fn inv(self) -> f32 {
+ 1.0 / *self
+ }
+}
+impl<'a> Inv for &'a f64 {
+ type Output = f64;
+ #[inline]
+ fn inv(self) -> f64 {
+ 1.0 / *self
+ }
+}
diff --git a/vendor/num-traits/src/ops/mod.rs b/vendor/num-traits/src/ops/mod.rs
new file mode 100644
index 000000000..fd1695d99
--- /dev/null
+++ b/vendor/num-traits/src/ops/mod.rs
@@ -0,0 +1,5 @@
+pub mod checked;
+pub mod inv;
+pub mod mul_add;
+pub mod saturating;
+pub mod wrapping;
diff --git a/vendor/num-traits/src/ops/mul_add.rs b/vendor/num-traits/src/ops/mul_add.rs
new file mode 100644
index 000000000..c5835d3d0
--- /dev/null
+++ b/vendor/num-traits/src/ops/mul_add.rs
@@ -0,0 +1,151 @@
+/// Fused multiply-add. Computes `(self * a) + b` with only one rounding
+/// error, yielding a more accurate result than an unfused multiply-add.
+///
+/// Using `mul_add` can be more performant than an unfused multiply-add if
+/// the target architecture has a dedicated `fma` CPU instruction.
+///
+/// Note that `A` and `B` are `Self` by default, but this is not mandatory.
+///
+/// # Example
+///
+/// ```
+/// use std::f32;
+///
+/// let m = 10.0_f32;
+/// let x = 4.0_f32;
+/// let b = 60.0_f32;
+///
+/// // 100.0
+/// let abs_difference = (m.mul_add(x, b) - (m*x + b)).abs();
+///
+/// assert!(abs_difference <= 100.0 * f32::EPSILON);
+/// ```
+pub trait MulAdd<A = Self, B = Self> {
+ /// The resulting type after applying the fused multiply-add.
+ type Output;
+
+ /// Performs the fused multiply-add operation.
+ fn mul_add(self, a: A, b: B) -> Self::Output;
+}
+
+/// The fused multiply-add assignment operation.
+pub trait MulAddAssign<A = Self, B = Self> {
+ /// Performs the fused multiply-add operation.
+ fn mul_add_assign(&mut self, a: A, b: B);
+}
+
+#[cfg(any(feature = "std", feature = "libm"))]
+impl MulAdd<f32, f32> for f32 {
+ type Output = Self;
+
+ #[inline]
+ fn mul_add(self, a: Self, b: Self) -> Self::Output {
+ <Self as ::Float>::mul_add(self, a, b)
+ }
+}
+
+#[cfg(any(feature = "std", feature = "libm"))]
+impl MulAdd<f64, f64> for f64 {
+ type Output = Self;
+
+ #[inline]
+ fn mul_add(self, a: Self, b: Self) -> Self::Output {
+ <Self as ::Float>::mul_add(self, a, b)
+ }
+}
+
+macro_rules! mul_add_impl {
+ ($trait_name:ident for $($t:ty)*) => {$(
+ impl $trait_name for $t {
+ type Output = Self;
+
+ #[inline]
+ fn mul_add(self, a: Self, b: Self) -> Self::Output {
+ (self * a) + b
+ }
+ }
+ )*}
+}
+
+mul_add_impl!(MulAdd for isize usize i8 u8 i16 u16 i32 u32 i64 u64);
+#[cfg(has_i128)]
+mul_add_impl!(MulAdd for i128 u128);
+
+#[cfg(any(feature = "std", feature = "libm"))]
+impl MulAddAssign<f32, f32> for f32 {
+ #[inline]
+ fn mul_add_assign(&mut self, a: Self, b: Self) {
+ *self = <Self as ::Float>::mul_add(*self, a, b)
+ }
+}
+
+#[cfg(any(feature = "std", feature = "libm"))]
+impl MulAddAssign<f64, f64> for f64 {
+ #[inline]
+ fn mul_add_assign(&mut self, a: Self, b: Self) {
+ *self = <Self as ::Float>::mul_add(*self, a, b)
+ }
+}
+
+macro_rules! mul_add_assign_impl {
+ ($trait_name:ident for $($t:ty)*) => {$(
+ impl $trait_name for $t {
+ #[inline]
+ fn mul_add_assign(&mut self, a: Self, b: Self) {
+ *self = (*self * a) + b
+ }
+ }
+ )*}
+}
+
+mul_add_assign_impl!(MulAddAssign for isize usize i8 u8 i16 u16 i32 u32 i64 u64);
+#[cfg(has_i128)]
+mul_add_assign_impl!(MulAddAssign for i128 u128);
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn mul_add_integer() {
+ macro_rules! test_mul_add {
+ ($($t:ident)+) => {
+ $(
+ {
+ let m: $t = 2;
+ let x: $t = 3;
+ let b: $t = 4;
+
+ assert_eq!(MulAdd::mul_add(m, x, b), (m*x + b));
+ }
+ )+
+ };
+ }
+
+ test_mul_add!(usize u8 u16 u32 u64 isize i8 i16 i32 i64);
+ }
+
+ #[test]
+ #[cfg(feature = "std")]
+ fn mul_add_float() {
+ macro_rules! test_mul_add {
+ ($($t:ident)+) => {
+ $(
+ {
+ use core::$t;
+
+ let m: $t = 12.0;
+ let x: $t = 3.4;
+ let b: $t = 5.6;
+
+ let abs_difference = (MulAdd::mul_add(m, x, b) - (m*x + b)).abs();
+
+ assert!(abs_difference <= 46.4 * $t::EPSILON);
+ }
+ )+
+ };
+ }
+
+ test_mul_add!(f32 f64);
+ }
+}
diff --git a/vendor/num-traits/src/ops/saturating.rs b/vendor/num-traits/src/ops/saturating.rs
new file mode 100644
index 000000000..e39cfd7b6
--- /dev/null
+++ b/vendor/num-traits/src/ops/saturating.rs
@@ -0,0 +1,137 @@
+use core::ops::{Add, Mul, Sub};
+
+/// Saturating math operations. Deprecated, use `SaturatingAdd`, `SaturatingSub` and
+/// `SaturatingMul` instead.
+pub trait Saturating {
+ /// Saturating addition operator.
+ /// Returns a+b, saturating at the numeric bounds instead of overflowing.
+ fn saturating_add(self, v: Self) -> Self;
+
+ /// Saturating subtraction operator.
+ /// Returns a-b, saturating at the numeric bounds instead of overflowing.
+ fn saturating_sub(self, v: Self) -> Self;
+}
+
+macro_rules! deprecated_saturating_impl {
+ ($trait_name:ident for $($t:ty)*) => {$(
+ impl $trait_name for $t {
+ #[inline]
+ fn saturating_add(self, v: Self) -> Self {
+ Self::saturating_add(self, v)
+ }
+
+ #[inline]
+ fn saturating_sub(self, v: Self) -> Self {
+ Self::saturating_sub(self, v)
+ }
+ }
+ )*}
+}
+
+deprecated_saturating_impl!(Saturating for isize usize i8 u8 i16 u16 i32 u32 i64 u64);
+#[cfg(has_i128)]
+deprecated_saturating_impl!(Saturating for i128 u128);
+
+macro_rules! saturating_impl {
+ ($trait_name:ident, $method:ident, $t:ty) => {
+ impl $trait_name for $t {
+ #[inline]
+ fn $method(&self, v: &Self) -> Self {
+ <$t>::$method(*self, *v)
+ }
+ }
+ };
+}
+
+/// Performs addition that saturates at the numeric bounds instead of overflowing.
+pub trait SaturatingAdd: Sized + Add<Self, Output = Self> {
+ /// Saturating addition. Computes `self + other`, saturating at the relevant high or low boundary of
+ /// the type.
+ fn saturating_add(&self, v: &Self) -> Self;
+}
+
+saturating_impl!(SaturatingAdd, saturating_add, u8);
+saturating_impl!(SaturatingAdd, saturating_add, u16);
+saturating_impl!(SaturatingAdd, saturating_add, u32);
+saturating_impl!(SaturatingAdd, saturating_add, u64);
+saturating_impl!(SaturatingAdd, saturating_add, usize);
+#[cfg(has_i128)]
+saturating_impl!(SaturatingAdd, saturating_add, u128);
+
+saturating_impl!(SaturatingAdd, saturating_add, i8);
+saturating_impl!(SaturatingAdd, saturating_add, i16);
+saturating_impl!(SaturatingAdd, saturating_add, i32);
+saturating_impl!(SaturatingAdd, saturating_add, i64);
+saturating_impl!(SaturatingAdd, saturating_add, isize);
+#[cfg(has_i128)]
+saturating_impl!(SaturatingAdd, saturating_add, i128);
+
+/// Performs subtraction that saturates at the numeric bounds instead of overflowing.
+pub trait SaturatingSub: Sized + Sub<Self, Output = Self> {
+ /// Saturating subtraction. Computes `self - other`, saturating at the relevant high or low boundary of
+ /// the type.
+ fn saturating_sub(&self, v: &Self) -> Self;
+}
+
+saturating_impl!(SaturatingSub, saturating_sub, u8);
+saturating_impl!(SaturatingSub, saturating_sub, u16);
+saturating_impl!(SaturatingSub, saturating_sub, u32);
+saturating_impl!(SaturatingSub, saturating_sub, u64);
+saturating_impl!(SaturatingSub, saturating_sub, usize);
+#[cfg(has_i128)]
+saturating_impl!(SaturatingSub, saturating_sub, u128);
+
+saturating_impl!(SaturatingSub, saturating_sub, i8);
+saturating_impl!(SaturatingSub, saturating_sub, i16);
+saturating_impl!(SaturatingSub, saturating_sub, i32);
+saturating_impl!(SaturatingSub, saturating_sub, i64);
+saturating_impl!(SaturatingSub, saturating_sub, isize);
+#[cfg(has_i128)]
+saturating_impl!(SaturatingSub, saturating_sub, i128);
+
+/// Performs multiplication that saturates at the numeric bounds instead of overflowing.
+pub trait SaturatingMul: Sized + Mul<Self, Output = Self> {
+ /// Saturating multiplication. Computes `self * other`, saturating at the relevant high or low boundary of
+ /// the type.
+ fn saturating_mul(&self, v: &Self) -> Self;
+}
+
+saturating_impl!(SaturatingMul, saturating_mul, u8);
+saturating_impl!(SaturatingMul, saturating_mul, u16);
+saturating_impl!(SaturatingMul, saturating_mul, u32);
+saturating_impl!(SaturatingMul, saturating_mul, u64);
+saturating_impl!(SaturatingMul, saturating_mul, usize);
+#[cfg(has_i128)]
+saturating_impl!(SaturatingMul, saturating_mul, u128);
+
+saturating_impl!(SaturatingMul, saturating_mul, i8);
+saturating_impl!(SaturatingMul, saturating_mul, i16);
+saturating_impl!(SaturatingMul, saturating_mul, i32);
+saturating_impl!(SaturatingMul, saturating_mul, i64);
+saturating_impl!(SaturatingMul, saturating_mul, isize);
+#[cfg(has_i128)]
+saturating_impl!(SaturatingMul, saturating_mul, i128);
+
+// TODO: add SaturatingNeg for signed integer primitives once the saturating_neg() API is stable.
+
+#[test]
+fn test_saturating_traits() {
+ fn saturating_add<T: SaturatingAdd>(a: T, b: T) -> T {
+ a.saturating_add(&b)
+ }
+ fn saturating_sub<T: SaturatingSub>(a: T, b: T) -> T {
+ a.saturating_sub(&b)
+ }
+ fn saturating_mul<T: SaturatingMul>(a: T, b: T) -> T {
+ a.saturating_mul(&b)
+ }
+ assert_eq!(saturating_add(255, 1), 255u8);
+ assert_eq!(saturating_add(127, 1), 127i8);
+ assert_eq!(saturating_add(-128, -1), -128i8);
+ assert_eq!(saturating_sub(0, 1), 0u8);
+ assert_eq!(saturating_sub(-128, 1), -128i8);
+ assert_eq!(saturating_sub(127, -1), 127i8);
+ assert_eq!(saturating_mul(255, 2), 255u8);
+ assert_eq!(saturating_mul(127, 2), 127i8);
+ assert_eq!(saturating_mul(-128, 2), -128i8);
+}
diff --git a/vendor/num-traits/src/ops/wrapping.rs b/vendor/num-traits/src/ops/wrapping.rs
new file mode 100644
index 000000000..265b8f3bb
--- /dev/null
+++ b/vendor/num-traits/src/ops/wrapping.rs
@@ -0,0 +1,337 @@
+use core::num::Wrapping;
+use core::ops::{Add, Mul, Neg, Shl, Shr, Sub};
+
+macro_rules! wrapping_impl {
+ ($trait_name:ident, $method:ident, $t:ty) => {
+ impl $trait_name for $t {
+ #[inline]
+ fn $method(&self, v: &Self) -> Self {
+ <$t>::$method(*self, *v)
+ }
+ }
+ };
+ ($trait_name:ident, $method:ident, $t:ty, $rhs:ty) => {
+ impl $trait_name<$rhs> for $t {
+ #[inline]
+ fn $method(&self, v: &$rhs) -> Self {
+ <$t>::$method(*self, *v)
+ }
+ }
+ };
+}
+
+/// Performs addition that wraps around on overflow.
+pub trait WrappingAdd: Sized + Add<Self, Output = Self> {
+ /// Wrapping (modular) addition. Computes `self + other`, wrapping around at the boundary of
+ /// the type.
+ fn wrapping_add(&self, v: &Self) -> Self;
+}
+
+wrapping_impl!(WrappingAdd, wrapping_add, u8);
+wrapping_impl!(WrappingAdd, wrapping_add, u16);
+wrapping_impl!(WrappingAdd, wrapping_add, u32);
+wrapping_impl!(WrappingAdd, wrapping_add, u64);
+wrapping_impl!(WrappingAdd, wrapping_add, usize);
+#[cfg(has_i128)]
+wrapping_impl!(WrappingAdd, wrapping_add, u128);
+
+wrapping_impl!(WrappingAdd, wrapping_add, i8);
+wrapping_impl!(WrappingAdd, wrapping_add, i16);
+wrapping_impl!(WrappingAdd, wrapping_add, i32);
+wrapping_impl!(WrappingAdd, wrapping_add, i64);
+wrapping_impl!(WrappingAdd, wrapping_add, isize);
+#[cfg(has_i128)]
+wrapping_impl!(WrappingAdd, wrapping_add, i128);
+
+/// Performs subtraction that wraps around on overflow.
+pub trait WrappingSub: Sized + Sub<Self, Output = Self> {
+ /// Wrapping (modular) subtraction. Computes `self - other`, wrapping around at the boundary
+ /// of the type.
+ fn wrapping_sub(&self, v: &Self) -> Self;
+}
+
+wrapping_impl!(WrappingSub, wrapping_sub, u8);
+wrapping_impl!(WrappingSub, wrapping_sub, u16);
+wrapping_impl!(WrappingSub, wrapping_sub, u32);
+wrapping_impl!(WrappingSub, wrapping_sub, u64);
+wrapping_impl!(WrappingSub, wrapping_sub, usize);
+#[cfg(has_i128)]
+wrapping_impl!(WrappingSub, wrapping_sub, u128);
+
+wrapping_impl!(WrappingSub, wrapping_sub, i8);
+wrapping_impl!(WrappingSub, wrapping_sub, i16);
+wrapping_impl!(WrappingSub, wrapping_sub, i32);
+wrapping_impl!(WrappingSub, wrapping_sub, i64);
+wrapping_impl!(WrappingSub, wrapping_sub, isize);
+#[cfg(has_i128)]
+wrapping_impl!(WrappingSub, wrapping_sub, i128);
+
+/// Performs multiplication that wraps around on overflow.
+pub trait WrappingMul: Sized + Mul<Self, Output = Self> {
+ /// Wrapping (modular) multiplication. Computes `self * other`, wrapping around at the boundary
+ /// of the type.
+ fn wrapping_mul(&self, v: &Self) -> Self;
+}
+
+wrapping_impl!(WrappingMul, wrapping_mul, u8);
+wrapping_impl!(WrappingMul, wrapping_mul, u16);
+wrapping_impl!(WrappingMul, wrapping_mul, u32);
+wrapping_impl!(WrappingMul, wrapping_mul, u64);
+wrapping_impl!(WrappingMul, wrapping_mul, usize);
+#[cfg(has_i128)]
+wrapping_impl!(WrappingMul, wrapping_mul, u128);
+
+wrapping_impl!(WrappingMul, wrapping_mul, i8);
+wrapping_impl!(WrappingMul, wrapping_mul, i16);
+wrapping_impl!(WrappingMul, wrapping_mul, i32);
+wrapping_impl!(WrappingMul, wrapping_mul, i64);
+wrapping_impl!(WrappingMul, wrapping_mul, isize);
+#[cfg(has_i128)]
+wrapping_impl!(WrappingMul, wrapping_mul, i128);
+
+macro_rules! wrapping_unary_impl {
+ ($trait_name:ident, $method:ident, $t:ty) => {
+ impl $trait_name for $t {
+ #[inline]
+ fn $method(&self) -> $t {
+ <$t>::$method(*self)
+ }
+ }
+ };
+}
+
+/// Performs a negation that does not panic.
+pub trait WrappingNeg: Sized {
+ /// Wrapping (modular) negation. Computes `-self`,
+ /// wrapping around at the boundary of the type.
+ ///
+ /// Since unsigned types do not have negative equivalents
+ /// all applications of this function will wrap (except for `-0`).
+ /// For values smaller than the corresponding signed type's maximum
+ /// the result is the same as casting the corresponding signed value.
+ /// Any larger values are equivalent to `MAX + 1 - (val - MAX - 1)` where
+ /// `MAX` is the corresponding signed type's maximum.
+ ///
+ /// ```
+ /// use num_traits::WrappingNeg;
+ ///
+ /// assert_eq!(100i8.wrapping_neg(), -100);
+ /// assert_eq!((-100i8).wrapping_neg(), 100);
+ /// assert_eq!((-128i8).wrapping_neg(), -128); // wrapped!
+ /// ```
+ fn wrapping_neg(&self) -> Self;
+}
+
+wrapping_unary_impl!(WrappingNeg, wrapping_neg, u8);
+wrapping_unary_impl!(WrappingNeg, wrapping_neg, u16);
+wrapping_unary_impl!(WrappingNeg, wrapping_neg, u32);
+wrapping_unary_impl!(WrappingNeg, wrapping_neg, u64);
+wrapping_unary_impl!(WrappingNeg, wrapping_neg, usize);
+#[cfg(has_i128)]
+wrapping_unary_impl!(WrappingNeg, wrapping_neg, u128);
+wrapping_unary_impl!(WrappingNeg, wrapping_neg, i8);
+wrapping_unary_impl!(WrappingNeg, wrapping_neg, i16);
+wrapping_unary_impl!(WrappingNeg, wrapping_neg, i32);
+wrapping_unary_impl!(WrappingNeg, wrapping_neg, i64);
+wrapping_unary_impl!(WrappingNeg, wrapping_neg, isize);
+#[cfg(has_i128)]
+wrapping_unary_impl!(WrappingNeg, wrapping_neg, i128);
+
+macro_rules! wrapping_shift_impl {
+ ($trait_name:ident, $method:ident, $t:ty) => {
+ impl $trait_name for $t {
+ #[inline]
+ fn $method(&self, rhs: u32) -> $t {
+ <$t>::$method(*self, rhs)
+ }
+ }
+ };
+}
+
+/// Performs a left shift that does not panic.
+pub trait WrappingShl: Sized + Shl<usize, Output = Self> {
+ /// Panic-free bitwise shift-left; yields `self << mask(rhs)`,
+ /// where `mask` removes any high order bits of `rhs` that would
+ /// cause the shift to exceed the bitwidth of the type.
+ ///
+ /// ```
+ /// use num_traits::WrappingShl;
+ ///
+ /// let x: u16 = 0x0001;
+ ///
+ /// assert_eq!(WrappingShl::wrapping_shl(&x, 0), 0x0001);
+ /// assert_eq!(WrappingShl::wrapping_shl(&x, 1), 0x0002);
+ /// assert_eq!(WrappingShl::wrapping_shl(&x, 15), 0x8000);
+ /// assert_eq!(WrappingShl::wrapping_shl(&x, 16), 0x0001);
+ /// ```
+ fn wrapping_shl(&self, rhs: u32) -> Self;
+}
+
+wrapping_shift_impl!(WrappingShl, wrapping_shl, u8);
+wrapping_shift_impl!(WrappingShl, wrapping_shl, u16);
+wrapping_shift_impl!(WrappingShl, wrapping_shl, u32);
+wrapping_shift_impl!(WrappingShl, wrapping_shl, u64);
+wrapping_shift_impl!(WrappingShl, wrapping_shl, usize);
+#[cfg(has_i128)]
+wrapping_shift_impl!(WrappingShl, wrapping_shl, u128);
+
+wrapping_shift_impl!(WrappingShl, wrapping_shl, i8);
+wrapping_shift_impl!(WrappingShl, wrapping_shl, i16);
+wrapping_shift_impl!(WrappingShl, wrapping_shl, i32);
+wrapping_shift_impl!(WrappingShl, wrapping_shl, i64);
+wrapping_shift_impl!(WrappingShl, wrapping_shl, isize);
+#[cfg(has_i128)]
+wrapping_shift_impl!(WrappingShl, wrapping_shl, i128);
+
+/// Performs a right shift that does not panic.
+pub trait WrappingShr: Sized + Shr<usize, Output = Self> {
+ /// Panic-free bitwise shift-right; yields `self >> mask(rhs)`,
+ /// where `mask` removes any high order bits of `rhs` that would
+ /// cause the shift to exceed the bitwidth of the type.
+ ///
+ /// ```
+ /// use num_traits::WrappingShr;
+ ///
+ /// let x: u16 = 0x8000;
+ ///
+ /// assert_eq!(WrappingShr::wrapping_shr(&x, 0), 0x8000);
+ /// assert_eq!(WrappingShr::wrapping_shr(&x, 1), 0x4000);
+ /// assert_eq!(WrappingShr::wrapping_shr(&x, 15), 0x0001);
+ /// assert_eq!(WrappingShr::wrapping_shr(&x, 16), 0x8000);
+ /// ```
+ fn wrapping_shr(&self, rhs: u32) -> Self;
+}
+
+wrapping_shift_impl!(WrappingShr, wrapping_shr, u8);
+wrapping_shift_impl!(WrappingShr, wrapping_shr, u16);
+wrapping_shift_impl!(WrappingShr, wrapping_shr, u32);
+wrapping_shift_impl!(WrappingShr, wrapping_shr, u64);
+wrapping_shift_impl!(WrappingShr, wrapping_shr, usize);
+#[cfg(has_i128)]
+wrapping_shift_impl!(WrappingShr, wrapping_shr, u128);
+
+wrapping_shift_impl!(WrappingShr, wrapping_shr, i8);
+wrapping_shift_impl!(WrappingShr, wrapping_shr, i16);
+wrapping_shift_impl!(WrappingShr, wrapping_shr, i32);
+wrapping_shift_impl!(WrappingShr, wrapping_shr, i64);
+wrapping_shift_impl!(WrappingShr, wrapping_shr, isize);
+#[cfg(has_i128)]
+wrapping_shift_impl!(WrappingShr, wrapping_shr, i128);
+
+// Well this is a bit funny, but all the more appropriate.
+impl<T: WrappingAdd> WrappingAdd for Wrapping<T>
+where
+ Wrapping<T>: Add<Output = Wrapping<T>>,
+{
+ fn wrapping_add(&self, v: &Self) -> Self {
+ Wrapping(self.0.wrapping_add(&v.0))
+ }
+}
+impl<T: WrappingSub> WrappingSub for Wrapping<T>
+where
+ Wrapping<T>: Sub<Output = Wrapping<T>>,
+{
+ fn wrapping_sub(&self, v: &Self) -> Self {
+ Wrapping(self.0.wrapping_sub(&v.0))
+ }
+}
+impl<T: WrappingMul> WrappingMul for Wrapping<T>
+where
+ Wrapping<T>: Mul<Output = Wrapping<T>>,
+{
+ fn wrapping_mul(&self, v: &Self) -> Self {
+ Wrapping(self.0.wrapping_mul(&v.0))
+ }
+}
+impl<T: WrappingNeg> WrappingNeg for Wrapping<T>
+where
+ Wrapping<T>: Neg<Output = Wrapping<T>>,
+{
+ fn wrapping_neg(&self) -> Self {
+ Wrapping(self.0.wrapping_neg())
+ }
+}
+impl<T: WrappingShl> WrappingShl for Wrapping<T>
+where
+ Wrapping<T>: Shl<usize, Output = Wrapping<T>>,
+{
+ fn wrapping_shl(&self, rhs: u32) -> Self {
+ Wrapping(self.0.wrapping_shl(rhs))
+ }
+}
+impl<T: WrappingShr> WrappingShr for Wrapping<T>
+where
+ Wrapping<T>: Shr<usize, Output = Wrapping<T>>,
+{
+ fn wrapping_shr(&self, rhs: u32) -> Self {
+ Wrapping(self.0.wrapping_shr(rhs))
+ }
+}
+
+#[test]
+fn test_wrapping_traits() {
+ fn wrapping_add<T: WrappingAdd>(a: T, b: T) -> T {
+ a.wrapping_add(&b)
+ }
+ fn wrapping_sub<T: WrappingSub>(a: T, b: T) -> T {
+ a.wrapping_sub(&b)
+ }
+ fn wrapping_mul<T: WrappingMul>(a: T, b: T) -> T {
+ a.wrapping_mul(&b)
+ }
+ fn wrapping_neg<T: WrappingNeg>(a: T) -> T {
+ a.wrapping_neg()
+ }
+ fn wrapping_shl<T: WrappingShl>(a: T, b: u32) -> T {
+ a.wrapping_shl(b)
+ }
+ fn wrapping_shr<T: WrappingShr>(a: T, b: u32) -> T {
+ a.wrapping_shr(b)
+ }
+ assert_eq!(wrapping_add(255, 1), 0u8);
+ assert_eq!(wrapping_sub(0, 1), 255u8);
+ assert_eq!(wrapping_mul(255, 2), 254u8);
+ assert_eq!(wrapping_neg(255), 1u8);
+ assert_eq!(wrapping_shl(255, 8), 255u8);
+ assert_eq!(wrapping_shr(255, 8), 255u8);
+ assert_eq!(wrapping_add(255, 1), (Wrapping(255u8) + Wrapping(1u8)).0);
+ assert_eq!(wrapping_sub(0, 1), (Wrapping(0u8) - Wrapping(1u8)).0);
+ assert_eq!(wrapping_mul(255, 2), (Wrapping(255u8) * Wrapping(2u8)).0);
+ // TODO: Test for Wrapping::Neg. Not possible yet since core::ops::Neg was
+ // only added to core::num::Wrapping<_> in Rust 1.10.
+ assert_eq!(wrapping_shl(255, 8), (Wrapping(255u8) << 8).0);
+ assert_eq!(wrapping_shr(255, 8), (Wrapping(255u8) >> 8).0);
+}
+
+#[test]
+fn wrapping_is_wrappingadd() {
+ fn require_wrappingadd<T: WrappingAdd>(_: &T) {}
+ require_wrappingadd(&Wrapping(42));
+}
+
+#[test]
+fn wrapping_is_wrappingsub() {
+ fn require_wrappingsub<T: WrappingSub>(_: &T) {}
+ require_wrappingsub(&Wrapping(42));
+}
+
+#[test]
+fn wrapping_is_wrappingmul() {
+ fn require_wrappingmul<T: WrappingMul>(_: &T) {}
+ require_wrappingmul(&Wrapping(42));
+}
+
+// TODO: Test for Wrapping::Neg. Not possible yet since core::ops::Neg was
+// only added to core::num::Wrapping<_> in Rust 1.10.
+
+#[test]
+fn wrapping_is_wrappingshl() {
+ fn require_wrappingshl<T: WrappingShl>(_: &T) {}
+ require_wrappingshl(&Wrapping(42));
+}
+
+#[test]
+fn wrapping_is_wrappingshr() {
+ fn require_wrappingshr<T: WrappingShr>(_: &T) {}
+ require_wrappingshr(&Wrapping(42));
+}