summaryrefslogtreecommitdiffstats
path: root/rust/vendor/num-bigint/src/biguint
diff options
context:
space:
mode:
Diffstat (limited to 'rust/vendor/num-bigint/src/biguint')
-rw-r--r--rust/vendor/num-bigint/src/biguint/addition.rs254
-rw-r--r--rust/vendor/num-bigint/src/biguint/arbitrary.rs34
-rw-r--r--rust/vendor/num-bigint/src/biguint/bits.rs93
-rw-r--r--rust/vendor/num-bigint/src/biguint/convert.rs820
-rw-r--r--rust/vendor/num-bigint/src/biguint/division.rs652
-rw-r--r--rust/vendor/num-bigint/src/biguint/iter.rs358
-rw-r--r--rust/vendor/num-bigint/src/biguint/monty.rs225
-rw-r--r--rust/vendor/num-bigint/src/biguint/multiplication.rs568
-rw-r--r--rust/vendor/num-bigint/src/biguint/power.rs258
-rw-r--r--rust/vendor/num-bigint/src/biguint/serde.rs119
-rw-r--r--rust/vendor/num-bigint/src/biguint/shift.rs172
-rw-r--r--rust/vendor/num-bigint/src/biguint/subtraction.rs312
12 files changed, 3865 insertions, 0 deletions
diff --git a/rust/vendor/num-bigint/src/biguint/addition.rs b/rust/vendor/num-bigint/src/biguint/addition.rs
new file mode 100644
index 0000000..ac6c0dd
--- /dev/null
+++ b/rust/vendor/num-bigint/src/biguint/addition.rs
@@ -0,0 +1,254 @@
+#[cfg(not(u64_digit))]
+use super::u32_from_u128;
+use super::{BigUint, IntDigits};
+
+use crate::big_digit::{self, BigDigit};
+use crate::UsizePromotion;
+
+use core::iter::Sum;
+use core::ops::{Add, AddAssign};
+use num_traits::{CheckedAdd, Zero};
+
+#[cfg(all(use_addcarry, target_arch = "x86_64"))]
+use core::arch::x86_64 as arch;
+
+#[cfg(all(use_addcarry, target_arch = "x86"))]
+use core::arch::x86 as arch;
+
+// Add with carry:
+#[cfg(all(use_addcarry, u64_digit))]
+#[inline]
+fn adc(carry: u8, a: u64, b: u64, out: &mut u64) -> u8 {
+ // Safety: There are absolutely no safety concerns with calling `_addcarry_u64`.
+ // It's just unsafe for API consistency with other intrinsics.
+ unsafe { arch::_addcarry_u64(carry, a, b, out) }
+}
+
+#[cfg(all(use_addcarry, not(u64_digit)))]
+#[inline]
+fn adc(carry: u8, a: u32, b: u32, out: &mut u32) -> u8 {
+ // Safety: There are absolutely no safety concerns with calling `_addcarry_u32`.
+ // It's just unsafe for API consistency with other intrinsics.
+ unsafe { arch::_addcarry_u32(carry, a, b, out) }
+}
+
+// fallback for environments where we don't have an addcarry intrinsic
+#[cfg(not(use_addcarry))]
+#[inline]
+fn adc(carry: u8, a: BigDigit, b: BigDigit, out: &mut BigDigit) -> u8 {
+ use crate::big_digit::DoubleBigDigit;
+
+ let sum = DoubleBigDigit::from(a) + DoubleBigDigit::from(b) + DoubleBigDigit::from(carry);
+ *out = sum as BigDigit;
+ (sum >> big_digit::BITS) as u8
+}
+
+/// Two argument addition of raw slices, `a += b`, returning the carry.
+///
+/// This is used when the data `Vec` might need to resize to push a non-zero carry, so we perform
+/// the addition first hoping that it will fit.
+///
+/// The caller _must_ ensure that `a` is at least as long as `b`.
+#[inline]
+pub(super) fn __add2(a: &mut [BigDigit], b: &[BigDigit]) -> BigDigit {
+ debug_assert!(a.len() >= b.len());
+
+ let mut carry = 0;
+ let (a_lo, a_hi) = a.split_at_mut(b.len());
+
+ for (a, b) in a_lo.iter_mut().zip(b) {
+ carry = adc(carry, *a, *b, a);
+ }
+
+ if carry != 0 {
+ for a in a_hi {
+ carry = adc(carry, *a, 0, a);
+ if carry == 0 {
+ break;
+ }
+ }
+ }
+
+ carry as BigDigit
+}
+
+/// Two argument addition of raw slices:
+/// a += b
+///
+/// The caller _must_ ensure that a is big enough to store the result - typically this means
+/// resizing a to max(a.len(), b.len()) + 1, to fit a possible carry.
+pub(super) fn add2(a: &mut [BigDigit], b: &[BigDigit]) {
+ let carry = __add2(a, b);
+
+ debug_assert!(carry == 0);
+}
+
+forward_all_binop_to_val_ref_commutative!(impl Add for BigUint, add);
+forward_val_assign!(impl AddAssign for BigUint, add_assign);
+
+impl Add<&BigUint> for BigUint {
+ type Output = BigUint;
+
+ fn add(mut self, other: &BigUint) -> BigUint {
+ self += other;
+ self
+ }
+}
+impl AddAssign<&BigUint> for BigUint {
+ #[inline]
+ fn add_assign(&mut self, other: &BigUint) {
+ let self_len = self.data.len();
+ let carry = if self_len < other.data.len() {
+ let lo_carry = __add2(&mut self.data[..], &other.data[..self_len]);
+ self.data.extend_from_slice(&other.data[self_len..]);
+ __add2(&mut self.data[self_len..], &[lo_carry])
+ } else {
+ __add2(&mut self.data[..], &other.data[..])
+ };
+ if carry != 0 {
+ self.data.push(carry);
+ }
+ }
+}
+
+promote_unsigned_scalars!(impl Add for BigUint, add);
+promote_unsigned_scalars_assign!(impl AddAssign for BigUint, add_assign);
+forward_all_scalar_binop_to_val_val_commutative!(impl Add<u32> for BigUint, add);
+forward_all_scalar_binop_to_val_val_commutative!(impl Add<u64> for BigUint, add);
+forward_all_scalar_binop_to_val_val_commutative!(impl Add<u128> for BigUint, add);
+
+impl Add<u32> for BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn add(mut self, other: u32) -> BigUint {
+ self += other;
+ self
+ }
+}
+
+impl AddAssign<u32> for BigUint {
+ #[inline]
+ fn add_assign(&mut self, other: u32) {
+ if other != 0 {
+ if self.data.is_empty() {
+ self.data.push(0);
+ }
+
+ let carry = __add2(&mut self.data, &[other as BigDigit]);
+ if carry != 0 {
+ self.data.push(carry);
+ }
+ }
+ }
+}
+
+impl Add<u64> for BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn add(mut self, other: u64) -> BigUint {
+ self += other;
+ self
+ }
+}
+
+impl AddAssign<u64> for BigUint {
+ #[cfg(not(u64_digit))]
+ #[inline]
+ fn add_assign(&mut self, other: u64) {
+ let (hi, lo) = big_digit::from_doublebigdigit(other);
+ if hi == 0 {
+ *self += lo;
+ } else {
+ while self.data.len() < 2 {
+ self.data.push(0);
+ }
+
+ let carry = __add2(&mut self.data, &[lo, hi]);
+ if carry != 0 {
+ self.data.push(carry);
+ }
+ }
+ }
+
+ #[cfg(u64_digit)]
+ #[inline]
+ fn add_assign(&mut self, other: u64) {
+ if other != 0 {
+ if self.data.is_empty() {
+ self.data.push(0);
+ }
+
+ let carry = __add2(&mut self.data, &[other as BigDigit]);
+ if carry != 0 {
+ self.data.push(carry);
+ }
+ }
+ }
+}
+
+impl Add<u128> for BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn add(mut self, other: u128) -> BigUint {
+ self += other;
+ self
+ }
+}
+
+impl AddAssign<u128> for BigUint {
+ #[cfg(not(u64_digit))]
+ #[inline]
+ fn add_assign(&mut self, other: u128) {
+ if other <= u128::from(u64::max_value()) {
+ *self += other as u64
+ } else {
+ let (a, b, c, d) = u32_from_u128(other);
+ let carry = if a > 0 {
+ while self.data.len() < 4 {
+ self.data.push(0);
+ }
+ __add2(&mut self.data, &[d, c, b, a])
+ } else {
+ debug_assert!(b > 0);
+ while self.data.len() < 3 {
+ self.data.push(0);
+ }
+ __add2(&mut self.data, &[d, c, b])
+ };
+
+ if carry != 0 {
+ self.data.push(carry);
+ }
+ }
+ }
+
+ #[cfg(u64_digit)]
+ #[inline]
+ fn add_assign(&mut self, other: u128) {
+ let (hi, lo) = big_digit::from_doublebigdigit(other);
+ if hi == 0 {
+ *self += lo;
+ } else {
+ while self.data.len() < 2 {
+ self.data.push(0);
+ }
+
+ let carry = __add2(&mut self.data, &[lo, hi]);
+ if carry != 0 {
+ self.data.push(carry);
+ }
+ }
+ }
+}
+
+impl CheckedAdd for BigUint {
+ #[inline]
+ fn checked_add(&self, v: &BigUint) -> Option<BigUint> {
+ Some(self.add(v))
+ }
+}
+
+impl_sum_iter_type!(BigUint);
diff --git a/rust/vendor/num-bigint/src/biguint/arbitrary.rs b/rust/vendor/num-bigint/src/biguint/arbitrary.rs
new file mode 100644
index 0000000..6fa91c0
--- /dev/null
+++ b/rust/vendor/num-bigint/src/biguint/arbitrary.rs
@@ -0,0 +1,34 @@
+use super::{biguint_from_vec, BigUint};
+
+use crate::big_digit::BigDigit;
+#[cfg(feature = "quickcheck")]
+use crate::std_alloc::Box;
+use crate::std_alloc::Vec;
+
+#[cfg(feature = "quickcheck")]
+impl quickcheck::Arbitrary for BigUint {
+ fn arbitrary(g: &mut quickcheck::Gen) -> Self {
+ // Use arbitrary from Vec
+ biguint_from_vec(Vec::<BigDigit>::arbitrary(g))
+ }
+
+ fn shrink(&self) -> Box<dyn Iterator<Item = Self>> {
+ // Use shrinker from Vec
+ Box::new(self.data.shrink().map(biguint_from_vec))
+ }
+}
+
+#[cfg(feature = "arbitrary")]
+impl arbitrary::Arbitrary<'_> for BigUint {
+ fn arbitrary(u: &mut arbitrary::Unstructured<'_>) -> arbitrary::Result<Self> {
+ Ok(biguint_from_vec(Vec::<BigDigit>::arbitrary(u)?))
+ }
+
+ fn arbitrary_take_rest(u: arbitrary::Unstructured<'_>) -> arbitrary::Result<Self> {
+ Ok(biguint_from_vec(Vec::<BigDigit>::arbitrary_take_rest(u)?))
+ }
+
+ fn size_hint(depth: usize) -> (usize, Option<usize>) {
+ Vec::<BigDigit>::size_hint(depth)
+ }
+}
diff --git a/rust/vendor/num-bigint/src/biguint/bits.rs b/rust/vendor/num-bigint/src/biguint/bits.rs
new file mode 100644
index 0000000..42d7ec0
--- /dev/null
+++ b/rust/vendor/num-bigint/src/biguint/bits.rs
@@ -0,0 +1,93 @@
+use super::{BigUint, IntDigits};
+
+use core::ops::{BitAnd, BitAndAssign, BitOr, BitOrAssign, BitXor, BitXorAssign};
+
+forward_val_val_binop!(impl BitAnd for BigUint, bitand);
+forward_ref_val_binop!(impl BitAnd for BigUint, bitand);
+
+// do not use forward_ref_ref_binop_commutative! for bitand so that we can
+// clone the smaller value rather than the larger, avoiding over-allocation
+impl BitAnd<&BigUint> for &BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn bitand(self, other: &BigUint) -> BigUint {
+ // forward to val-ref, choosing the smaller to clone
+ if self.data.len() <= other.data.len() {
+ self.clone() & other
+ } else {
+ other.clone() & self
+ }
+ }
+}
+
+forward_val_assign!(impl BitAndAssign for BigUint, bitand_assign);
+
+impl BitAnd<&BigUint> for BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn bitand(mut self, other: &BigUint) -> BigUint {
+ self &= other;
+ self
+ }
+}
+impl BitAndAssign<&BigUint> for BigUint {
+ #[inline]
+ fn bitand_assign(&mut self, other: &BigUint) {
+ for (ai, &bi) in self.data.iter_mut().zip(other.data.iter()) {
+ *ai &= bi;
+ }
+ self.data.truncate(other.data.len());
+ self.normalize();
+ }
+}
+
+forward_all_binop_to_val_ref_commutative!(impl BitOr for BigUint, bitor);
+forward_val_assign!(impl BitOrAssign for BigUint, bitor_assign);
+
+impl BitOr<&BigUint> for BigUint {
+ type Output = BigUint;
+
+ fn bitor(mut self, other: &BigUint) -> BigUint {
+ self |= other;
+ self
+ }
+}
+impl BitOrAssign<&BigUint> for BigUint {
+ #[inline]
+ fn bitor_assign(&mut self, other: &BigUint) {
+ for (ai, &bi) in self.data.iter_mut().zip(other.data.iter()) {
+ *ai |= bi;
+ }
+ if other.data.len() > self.data.len() {
+ let extra = &other.data[self.data.len()..];
+ self.data.extend(extra.iter().cloned());
+ }
+ }
+}
+
+forward_all_binop_to_val_ref_commutative!(impl BitXor for BigUint, bitxor);
+forward_val_assign!(impl BitXorAssign for BigUint, bitxor_assign);
+
+impl BitXor<&BigUint> for BigUint {
+ type Output = BigUint;
+
+ fn bitxor(mut self, other: &BigUint) -> BigUint {
+ self ^= other;
+ self
+ }
+}
+impl BitXorAssign<&BigUint> for BigUint {
+ #[inline]
+ fn bitxor_assign(&mut self, other: &BigUint) {
+ for (ai, &bi) in self.data.iter_mut().zip(other.data.iter()) {
+ *ai ^= bi;
+ }
+ if other.data.len() > self.data.len() {
+ let extra = &other.data[self.data.len()..];
+ self.data.extend(extra.iter().cloned());
+ }
+ self.normalize();
+ }
+}
diff --git a/rust/vendor/num-bigint/src/biguint/convert.rs b/rust/vendor/num-bigint/src/biguint/convert.rs
new file mode 100644
index 0000000..f19bc75
--- /dev/null
+++ b/rust/vendor/num-bigint/src/biguint/convert.rs
@@ -0,0 +1,820 @@
+// This uses stdlib features higher than the MSRV
+#![allow(clippy::manual_range_contains)] // 1.35
+
+use super::{biguint_from_vec, BigUint, ToBigUint};
+
+use super::addition::add2;
+use super::division::div_rem_digit;
+use super::multiplication::mac_with_carry;
+
+use crate::big_digit::{self, BigDigit};
+use crate::std_alloc::Vec;
+use crate::ParseBigIntError;
+#[cfg(has_try_from)]
+use crate::TryFromBigIntError;
+
+use core::cmp::Ordering::{Equal, Greater, Less};
+#[cfg(has_try_from)]
+use core::convert::TryFrom;
+use core::mem;
+use core::str::FromStr;
+use num_integer::{Integer, Roots};
+use num_traits::float::FloatCore;
+use num_traits::{FromPrimitive, Num, One, PrimInt, ToPrimitive, Zero};
+
+/// Find last set bit
+/// fls(0) == 0, fls(u32::MAX) == 32
+fn fls<T: PrimInt>(v: T) -> u8 {
+ mem::size_of::<T>() as u8 * 8 - v.leading_zeros() as u8
+}
+
+fn ilog2<T: PrimInt>(v: T) -> u8 {
+ fls(v) - 1
+}
+
+impl FromStr for BigUint {
+ type Err = ParseBigIntError;
+
+ #[inline]
+ fn from_str(s: &str) -> Result<BigUint, ParseBigIntError> {
+ BigUint::from_str_radix(s, 10)
+ }
+}
+
+// Convert from a power of two radix (bits == ilog2(radix)) where bits evenly divides
+// BigDigit::BITS
+pub(super) fn from_bitwise_digits_le(v: &[u8], bits: u8) -> BigUint {
+ debug_assert!(!v.is_empty() && bits <= 8 && big_digit::BITS % bits == 0);
+ debug_assert!(v.iter().all(|&c| BigDigit::from(c) < (1 << bits)));
+
+ let digits_per_big_digit = big_digit::BITS / bits;
+
+ let data = v
+ .chunks(digits_per_big_digit.into())
+ .map(|chunk| {
+ chunk
+ .iter()
+ .rev()
+ .fold(0, |acc, &c| (acc << bits) | BigDigit::from(c))
+ })
+ .collect();
+
+ biguint_from_vec(data)
+}
+
+// Convert from a power of two radix (bits == ilog2(radix)) where bits doesn't evenly divide
+// BigDigit::BITS
+fn from_inexact_bitwise_digits_le(v: &[u8], bits: u8) -> BigUint {
+ debug_assert!(!v.is_empty() && bits <= 8 && big_digit::BITS % bits != 0);
+ debug_assert!(v.iter().all(|&c| BigDigit::from(c) < (1 << bits)));
+
+ let total_bits = (v.len() as u64).saturating_mul(bits.into());
+ let big_digits = Integer::div_ceil(&total_bits, &big_digit::BITS.into())
+ .to_usize()
+ .unwrap_or(core::usize::MAX);
+ let mut data = Vec::with_capacity(big_digits);
+
+ let mut d = 0;
+ let mut dbits = 0; // number of bits we currently have in d
+
+ // walk v accumululating bits in d; whenever we accumulate big_digit::BITS in d, spit out a
+ // big_digit:
+ for &c in v {
+ d |= BigDigit::from(c) << dbits;
+ dbits += bits;
+
+ if dbits >= big_digit::BITS {
+ data.push(d);
+ dbits -= big_digit::BITS;
+ // if dbits was > big_digit::BITS, we dropped some of the bits in c (they couldn't fit
+ // in d) - grab the bits we lost here:
+ d = BigDigit::from(c) >> (bits - dbits);
+ }
+ }
+
+ if dbits > 0 {
+ debug_assert!(dbits < big_digit::BITS);
+ data.push(d as BigDigit);
+ }
+
+ biguint_from_vec(data)
+}
+
+// Read little-endian radix digits
+fn from_radix_digits_be(v: &[u8], radix: u32) -> BigUint {
+ debug_assert!(!v.is_empty() && !radix.is_power_of_two());
+ debug_assert!(v.iter().all(|&c| u32::from(c) < radix));
+
+ // Estimate how big the result will be, so we can pre-allocate it.
+ #[cfg(feature = "std")]
+ let big_digits = {
+ let radix_log2 = f64::from(radix).log2();
+ let bits = radix_log2 * v.len() as f64;
+ (bits / big_digit::BITS as f64).ceil()
+ };
+ #[cfg(not(feature = "std"))]
+ let big_digits = {
+ let radix_log2 = ilog2(radix.next_power_of_two()) as usize;
+ let bits = radix_log2 * v.len();
+ (bits / big_digit::BITS as usize) + 1
+ };
+
+ let mut data = Vec::with_capacity(big_digits.to_usize().unwrap_or(0));
+
+ let (base, power) = get_radix_base(radix, big_digit::BITS);
+ let radix = radix as BigDigit;
+
+ let r = v.len() % power;
+ let i = if r == 0 { power } else { r };
+ let (head, tail) = v.split_at(i);
+
+ let first = head
+ .iter()
+ .fold(0, |acc, &d| acc * radix + BigDigit::from(d));
+ data.push(first);
+
+ debug_assert!(tail.len() % power == 0);
+ for chunk in tail.chunks(power) {
+ if data.last() != Some(&0) {
+ data.push(0);
+ }
+
+ let mut carry = 0;
+ for d in data.iter_mut() {
+ *d = mac_with_carry(0, *d, base, &mut carry);
+ }
+ debug_assert!(carry == 0);
+
+ let n = chunk
+ .iter()
+ .fold(0, |acc, &d| acc * radix + BigDigit::from(d));
+ add2(&mut data, &[n]);
+ }
+
+ biguint_from_vec(data)
+}
+
+pub(super) fn from_radix_be(buf: &[u8], radix: u32) -> Option<BigUint> {
+ assert!(
+ 2 <= radix && radix <= 256,
+ "The radix must be within 2...256"
+ );
+
+ if buf.is_empty() {
+ return Some(Zero::zero());
+ }
+
+ if radix != 256 && buf.iter().any(|&b| b >= radix as u8) {
+ return None;
+ }
+
+ let res = if radix.is_power_of_two() {
+ // Powers of two can use bitwise masks and shifting instead of multiplication
+ let bits = ilog2(radix);
+ let mut v = Vec::from(buf);
+ v.reverse();
+ if big_digit::BITS % bits == 0 {
+ from_bitwise_digits_le(&v, bits)
+ } else {
+ from_inexact_bitwise_digits_le(&v, bits)
+ }
+ } else {
+ from_radix_digits_be(buf, radix)
+ };
+
+ Some(res)
+}
+
+pub(super) fn from_radix_le(buf: &[u8], radix: u32) -> Option<BigUint> {
+ assert!(
+ 2 <= radix && radix <= 256,
+ "The radix must be within 2...256"
+ );
+
+ if buf.is_empty() {
+ return Some(Zero::zero());
+ }
+
+ if radix != 256 && buf.iter().any(|&b| b >= radix as u8) {
+ return None;
+ }
+
+ let res = if radix.is_power_of_two() {
+ // Powers of two can use bitwise masks and shifting instead of multiplication
+ let bits = ilog2(radix);
+ if big_digit::BITS % bits == 0 {
+ from_bitwise_digits_le(buf, bits)
+ } else {
+ from_inexact_bitwise_digits_le(buf, bits)
+ }
+ } else {
+ let mut v = Vec::from(buf);
+ v.reverse();
+ from_radix_digits_be(&v, radix)
+ };
+
+ Some(res)
+}
+
+impl Num for BigUint {
+ type FromStrRadixErr = ParseBigIntError;
+
+ /// Creates and initializes a `BigUint`.
+ fn from_str_radix(s: &str, radix: u32) -> Result<BigUint, ParseBigIntError> {
+ assert!(2 <= radix && radix <= 36, "The radix must be within 2...36");
+ let mut s = s;
+ if s.starts_with('+') {
+ let tail = &s[1..];
+ if !tail.starts_with('+') {
+ s = tail
+ }
+ }
+
+ if s.is_empty() {
+ return Err(ParseBigIntError::empty());
+ }
+
+ if s.starts_with('_') {
+ // Must lead with a real digit!
+ return Err(ParseBigIntError::invalid());
+ }
+
+ // First normalize all characters to plain digit values
+ let mut v = Vec::with_capacity(s.len());
+ for b in s.bytes() {
+ let d = match b {
+ b'0'..=b'9' => b - b'0',
+ b'a'..=b'z' => b - b'a' + 10,
+ b'A'..=b'Z' => b - b'A' + 10,
+ b'_' => continue,
+ _ => core::u8::MAX,
+ };
+ if d < radix as u8 {
+ v.push(d);
+ } else {
+ return Err(ParseBigIntError::invalid());
+ }
+ }
+
+ let res = if radix.is_power_of_two() {
+ // Powers of two can use bitwise masks and shifting instead of multiplication
+ let bits = ilog2(radix);
+ v.reverse();
+ if big_digit::BITS % bits == 0 {
+ from_bitwise_digits_le(&v, bits)
+ } else {
+ from_inexact_bitwise_digits_le(&v, bits)
+ }
+ } else {
+ from_radix_digits_be(&v, radix)
+ };
+ Ok(res)
+ }
+}
+
+fn high_bits_to_u64(v: &BigUint) -> u64 {
+ match v.data.len() {
+ 0 => 0,
+ 1 => {
+ // XXX Conversion is useless if already 64-bit.
+ #[allow(clippy::useless_conversion)]
+ let v0 = u64::from(v.data[0]);
+ v0
+ }
+ _ => {
+ let mut bits = v.bits();
+ let mut ret = 0u64;
+ let mut ret_bits = 0;
+
+ for d in v.data.iter().rev() {
+ let digit_bits = (bits - 1) % u64::from(big_digit::BITS) + 1;
+ let bits_want = Ord::min(64 - ret_bits, digit_bits);
+
+ if bits_want != 0 {
+ if bits_want != 64 {
+ ret <<= bits_want;
+ }
+ // XXX Conversion is useless if already 64-bit.
+ #[allow(clippy::useless_conversion)]
+ let d0 = u64::from(*d) >> (digit_bits - bits_want);
+ ret |= d0;
+ }
+
+ // Implement round-to-odd: If any lower bits are 1, set LSB to 1
+ // so that rounding again to floating point value using
+ // nearest-ties-to-even is correct.
+ //
+ // See: https://en.wikipedia.org/wiki/Rounding#Rounding_to_prepare_for_shorter_precision
+
+ if digit_bits - bits_want != 0 {
+ // XXX Conversion is useless if already 64-bit.
+ #[allow(clippy::useless_conversion)]
+ let masked = u64::from(*d) << (64 - (digit_bits - bits_want) as u32);
+ ret |= (masked != 0) as u64;
+ }
+
+ ret_bits += bits_want;
+ bits -= bits_want;
+ }
+
+ ret
+ }
+ }
+}
+
+impl ToPrimitive for BigUint {
+ #[inline]
+ fn to_i64(&self) -> Option<i64> {
+ self.to_u64().as_ref().and_then(u64::to_i64)
+ }
+
+ #[inline]
+ fn to_i128(&self) -> Option<i128> {
+ self.to_u128().as_ref().and_then(u128::to_i128)
+ }
+
+ #[allow(clippy::useless_conversion)]
+ #[inline]
+ fn to_u64(&self) -> Option<u64> {
+ let mut ret: u64 = 0;
+ let mut bits = 0;
+
+ for i in self.data.iter() {
+ if bits >= 64 {
+ return None;
+ }
+
+ // XXX Conversion is useless if already 64-bit.
+ ret += u64::from(*i) << bits;
+ bits += big_digit::BITS;
+ }
+
+ Some(ret)
+ }
+
+ #[inline]
+ fn to_u128(&self) -> Option<u128> {
+ let mut ret: u128 = 0;
+ let mut bits = 0;
+
+ for i in self.data.iter() {
+ if bits >= 128 {
+ return None;
+ }
+
+ ret |= u128::from(*i) << bits;
+ bits += big_digit::BITS;
+ }
+
+ Some(ret)
+ }
+
+ #[inline]
+ fn to_f32(&self) -> Option<f32> {
+ let mantissa = high_bits_to_u64(self);
+ let exponent = self.bits() - u64::from(fls(mantissa));
+
+ if exponent > core::f32::MAX_EXP as u64 {
+ Some(core::f32::INFINITY)
+ } else {
+ Some((mantissa as f32) * 2.0f32.powi(exponent as i32))
+ }
+ }
+
+ #[inline]
+ fn to_f64(&self) -> Option<f64> {
+ let mantissa = high_bits_to_u64(self);
+ let exponent = self.bits() - u64::from(fls(mantissa));
+
+ if exponent > core::f64::MAX_EXP as u64 {
+ Some(core::f64::INFINITY)
+ } else {
+ Some((mantissa as f64) * 2.0f64.powi(exponent as i32))
+ }
+ }
+}
+
+macro_rules! impl_try_from_biguint {
+ ($T:ty, $to_ty:path) => {
+ #[cfg(has_try_from)]
+ impl TryFrom<&BigUint> for $T {
+ type Error = TryFromBigIntError<()>;
+
+ #[inline]
+ fn try_from(value: &BigUint) -> Result<$T, TryFromBigIntError<()>> {
+ $to_ty(value).ok_or(TryFromBigIntError::new(()))
+ }
+ }
+
+ #[cfg(has_try_from)]
+ impl TryFrom<BigUint> for $T {
+ type Error = TryFromBigIntError<BigUint>;
+
+ #[inline]
+ fn try_from(value: BigUint) -> Result<$T, TryFromBigIntError<BigUint>> {
+ <$T>::try_from(&value).map_err(|_| TryFromBigIntError::new(value))
+ }
+ }
+ };
+}
+
+impl_try_from_biguint!(u8, ToPrimitive::to_u8);
+impl_try_from_biguint!(u16, ToPrimitive::to_u16);
+impl_try_from_biguint!(u32, ToPrimitive::to_u32);
+impl_try_from_biguint!(u64, ToPrimitive::to_u64);
+impl_try_from_biguint!(usize, ToPrimitive::to_usize);
+impl_try_from_biguint!(u128, ToPrimitive::to_u128);
+
+impl_try_from_biguint!(i8, ToPrimitive::to_i8);
+impl_try_from_biguint!(i16, ToPrimitive::to_i16);
+impl_try_from_biguint!(i32, ToPrimitive::to_i32);
+impl_try_from_biguint!(i64, ToPrimitive::to_i64);
+impl_try_from_biguint!(isize, ToPrimitive::to_isize);
+impl_try_from_biguint!(i128, ToPrimitive::to_i128);
+
+impl FromPrimitive for BigUint {
+ #[inline]
+ fn from_i64(n: i64) -> Option<BigUint> {
+ if n >= 0 {
+ Some(BigUint::from(n as u64))
+ } else {
+ None
+ }
+ }
+
+ #[inline]
+ fn from_i128(n: i128) -> Option<BigUint> {
+ if n >= 0 {
+ Some(BigUint::from(n as u128))
+ } else {
+ None
+ }
+ }
+
+ #[inline]
+ fn from_u64(n: u64) -> Option<BigUint> {
+ Some(BigUint::from(n))
+ }
+
+ #[inline]
+ fn from_u128(n: u128) -> Option<BigUint> {
+ Some(BigUint::from(n))
+ }
+
+ #[inline]
+ fn from_f64(mut n: f64) -> Option<BigUint> {
+ // handle NAN, INFINITY, NEG_INFINITY
+ if !n.is_finite() {
+ return None;
+ }
+
+ // match the rounding of casting from float to int
+ n = n.trunc();
+
+ // handle 0.x, -0.x
+ if n.is_zero() {
+ return Some(BigUint::zero());
+ }
+
+ let (mantissa, exponent, sign) = FloatCore::integer_decode(n);
+
+ if sign == -1 {
+ return None;
+ }
+
+ let mut ret = BigUint::from(mantissa);
+ match exponent.cmp(&0) {
+ Greater => ret <<= exponent as usize,
+ Equal => {}
+ Less => ret >>= (-exponent) as usize,
+ }
+ Some(ret)
+ }
+}
+
+impl From<u64> for BigUint {
+ #[inline]
+ fn from(mut n: u64) -> Self {
+ let mut ret: BigUint = Zero::zero();
+
+ while n != 0 {
+ ret.data.push(n as BigDigit);
+ // don't overflow if BITS is 64:
+ n = (n >> 1) >> (big_digit::BITS - 1);
+ }
+
+ ret
+ }
+}
+
+impl From<u128> for BigUint {
+ #[inline]
+ fn from(mut n: u128) -> Self {
+ let mut ret: BigUint = Zero::zero();
+
+ while n != 0 {
+ ret.data.push(n as BigDigit);
+ n >>= big_digit::BITS;
+ }
+
+ ret
+ }
+}
+
+macro_rules! impl_biguint_from_uint {
+ ($T:ty) => {
+ impl From<$T> for BigUint {
+ #[inline]
+ fn from(n: $T) -> Self {
+ BigUint::from(n as u64)
+ }
+ }
+ };
+}
+
+impl_biguint_from_uint!(u8);
+impl_biguint_from_uint!(u16);
+impl_biguint_from_uint!(u32);
+impl_biguint_from_uint!(usize);
+
+macro_rules! impl_biguint_try_from_int {
+ ($T:ty, $from_ty:path) => {
+ #[cfg(has_try_from)]
+ impl TryFrom<$T> for BigUint {
+ type Error = TryFromBigIntError<()>;
+
+ #[inline]
+ fn try_from(value: $T) -> Result<BigUint, TryFromBigIntError<()>> {
+ $from_ty(value).ok_or(TryFromBigIntError::new(()))
+ }
+ }
+ };
+}
+
+impl_biguint_try_from_int!(i8, FromPrimitive::from_i8);
+impl_biguint_try_from_int!(i16, FromPrimitive::from_i16);
+impl_biguint_try_from_int!(i32, FromPrimitive::from_i32);
+impl_biguint_try_from_int!(i64, FromPrimitive::from_i64);
+impl_biguint_try_from_int!(isize, FromPrimitive::from_isize);
+impl_biguint_try_from_int!(i128, FromPrimitive::from_i128);
+
+impl ToBigUint for BigUint {
+ #[inline]
+ fn to_biguint(&self) -> Option<BigUint> {
+ Some(self.clone())
+ }
+}
+
+macro_rules! impl_to_biguint {
+ ($T:ty, $from_ty:path) => {
+ impl ToBigUint for $T {
+ #[inline]
+ fn to_biguint(&self) -> Option<BigUint> {
+ $from_ty(*self)
+ }
+ }
+ };
+}
+
+impl_to_biguint!(isize, FromPrimitive::from_isize);
+impl_to_biguint!(i8, FromPrimitive::from_i8);
+impl_to_biguint!(i16, FromPrimitive::from_i16);
+impl_to_biguint!(i32, FromPrimitive::from_i32);
+impl_to_biguint!(i64, FromPrimitive::from_i64);
+impl_to_biguint!(i128, FromPrimitive::from_i128);
+
+impl_to_biguint!(usize, FromPrimitive::from_usize);
+impl_to_biguint!(u8, FromPrimitive::from_u8);
+impl_to_biguint!(u16, FromPrimitive::from_u16);
+impl_to_biguint!(u32, FromPrimitive::from_u32);
+impl_to_biguint!(u64, FromPrimitive::from_u64);
+impl_to_biguint!(u128, FromPrimitive::from_u128);
+
+impl_to_biguint!(f32, FromPrimitive::from_f32);
+impl_to_biguint!(f64, FromPrimitive::from_f64);
+
+impl From<bool> for BigUint {
+ fn from(x: bool) -> Self {
+ if x {
+ One::one()
+ } else {
+ Zero::zero()
+ }
+ }
+}
+
+// Extract bitwise digits that evenly divide BigDigit
+pub(super) fn to_bitwise_digits_le(u: &BigUint, bits: u8) -> Vec<u8> {
+ debug_assert!(!u.is_zero() && bits <= 8 && big_digit::BITS % bits == 0);
+
+ let last_i = u.data.len() - 1;
+ let mask: BigDigit = (1 << bits) - 1;
+ let digits_per_big_digit = big_digit::BITS / bits;
+ let digits = Integer::div_ceil(&u.bits(), &u64::from(bits))
+ .to_usize()
+ .unwrap_or(core::usize::MAX);
+ let mut res = Vec::with_capacity(digits);
+
+ for mut r in u.data[..last_i].iter().cloned() {
+ for _ in 0..digits_per_big_digit {
+ res.push((r & mask) as u8);
+ r >>= bits;
+ }
+ }
+
+ let mut r = u.data[last_i];
+ while r != 0 {
+ res.push((r & mask) as u8);
+ r >>= bits;
+ }
+
+ res
+}
+
+// Extract bitwise digits that don't evenly divide BigDigit
+fn to_inexact_bitwise_digits_le(u: &BigUint, bits: u8) -> Vec<u8> {
+ debug_assert!(!u.is_zero() && bits <= 8 && big_digit::BITS % bits != 0);
+
+ let mask: BigDigit = (1 << bits) - 1;
+ let digits = Integer::div_ceil(&u.bits(), &u64::from(bits))
+ .to_usize()
+ .unwrap_or(core::usize::MAX);
+ let mut res = Vec::with_capacity(digits);
+
+ let mut r = 0;
+ let mut rbits = 0;
+
+ for c in &u.data {
+ r |= *c << rbits;
+ rbits += big_digit::BITS;
+
+ while rbits >= bits {
+ res.push((r & mask) as u8);
+ r >>= bits;
+
+ // r had more bits than it could fit - grab the bits we lost
+ if rbits > big_digit::BITS {
+ r = *c >> (big_digit::BITS - (rbits - bits));
+ }
+
+ rbits -= bits;
+ }
+ }
+
+ if rbits != 0 {
+ res.push(r as u8);
+ }
+
+ while let Some(&0) = res.last() {
+ res.pop();
+ }
+
+ res
+}
+
+// Extract little-endian radix digits
+#[inline(always)] // forced inline to get const-prop for radix=10
+pub(super) fn to_radix_digits_le(u: &BigUint, radix: u32) -> Vec<u8> {
+ debug_assert!(!u.is_zero() && !radix.is_power_of_two());
+
+ #[cfg(feature = "std")]
+ let radix_digits = {
+ let radix_log2 = f64::from(radix).log2();
+ ((u.bits() as f64) / radix_log2).ceil()
+ };
+ #[cfg(not(feature = "std"))]
+ let radix_digits = {
+ let radix_log2 = ilog2(radix) as usize;
+ ((u.bits() as usize) / radix_log2) + 1
+ };
+
+ // Estimate how big the result will be, so we can pre-allocate it.
+ let mut res = Vec::with_capacity(radix_digits.to_usize().unwrap_or(0));
+
+ let mut digits = u.clone();
+
+ let (base, power) = get_radix_base(radix, big_digit::HALF_BITS);
+ let radix = radix as BigDigit;
+
+ // For very large numbers, the O(n²) loop of repeated `div_rem_digit` dominates the
+ // performance. We can mitigate this by dividing into chunks of a larger base first.
+ // The threshold for this was chosen by anecdotal performance measurements to
+ // approximate where this starts to make a noticeable difference.
+ if digits.data.len() >= 64 {
+ let mut big_base = BigUint::from(base * base);
+ let mut big_power = 2usize;
+
+ // Choose a target base length near √n.
+ let target_len = digits.data.len().sqrt();
+ while big_base.data.len() < target_len {
+ big_base = &big_base * &big_base;
+ big_power *= 2;
+ }
+
+ // This outer loop will run approximately √n times.
+ while digits > big_base {
+ // This is still the dominating factor, with n digits divided by √n digits.
+ let (q, mut big_r) = digits.div_rem(&big_base);
+ digits = q;
+
+ // This inner loop now has O(√n²)=O(n) behavior altogether.
+ for _ in 0..big_power {
+ let (q, mut r) = div_rem_digit(big_r, base);
+ big_r = q;
+ for _ in 0..power {
+ res.push((r % radix) as u8);
+ r /= radix;
+ }
+ }
+ }
+ }
+
+ while digits.data.len() > 1 {
+ let (q, mut r) = div_rem_digit(digits, base);
+ for _ in 0..power {
+ res.push((r % radix) as u8);
+ r /= radix;
+ }
+ digits = q;
+ }
+
+ let mut r = digits.data[0];
+ while r != 0 {
+ res.push((r % radix) as u8);
+ r /= radix;
+ }
+
+ res
+}
+
+pub(super) fn to_radix_le(u: &BigUint, radix: u32) -> Vec<u8> {
+ if u.is_zero() {
+ vec![0]
+ } else if radix.is_power_of_two() {
+ // Powers of two can use bitwise masks and shifting instead of division
+ let bits = ilog2(radix);
+ if big_digit::BITS % bits == 0 {
+ to_bitwise_digits_le(u, bits)
+ } else {
+ to_inexact_bitwise_digits_le(u, bits)
+ }
+ } else if radix == 10 {
+ // 10 is so common that it's worth separating out for const-propagation.
+ // Optimizers can often turn constant division into a faster multiplication.
+ to_radix_digits_le(u, 10)
+ } else {
+ to_radix_digits_le(u, radix)
+ }
+}
+
+pub(crate) fn to_str_radix_reversed(u: &BigUint, radix: u32) -> Vec<u8> {
+ assert!(2 <= radix && radix <= 36, "The radix must be within 2...36");
+
+ if u.is_zero() {
+ return vec![b'0'];
+ }
+
+ let mut res = to_radix_le(u, radix);
+
+ // Now convert everything to ASCII digits.
+ for r in &mut res {
+ debug_assert!(u32::from(*r) < radix);
+ if *r < 10 {
+ *r += b'0';
+ } else {
+ *r += b'a' - 10;
+ }
+ }
+ res
+}
+
+/// Returns the greatest power of the radix for the given bit size
+#[inline]
+fn get_radix_base(radix: u32, bits: u8) -> (BigDigit, usize) {
+ mod gen {
+ include! { concat!(env!("OUT_DIR"), "/radix_bases.rs") }
+ }
+
+ debug_assert!(
+ 2 <= radix && radix <= 256,
+ "The radix must be within 2...256"
+ );
+ debug_assert!(!radix.is_power_of_two());
+ debug_assert!(bits <= big_digit::BITS);
+
+ match bits {
+ 16 => {
+ let (base, power) = gen::BASES_16[radix as usize];
+ (base as BigDigit, power)
+ }
+ 32 => {
+ let (base, power) = gen::BASES_32[radix as usize];
+ (base as BigDigit, power)
+ }
+ 64 => {
+ let (base, power) = gen::BASES_64[radix as usize];
+ (base as BigDigit, power)
+ }
+ _ => panic!("Invalid bigdigit size"),
+ }
+}
diff --git a/rust/vendor/num-bigint/src/biguint/division.rs b/rust/vendor/num-bigint/src/biguint/division.rs
new file mode 100644
index 0000000..2999838
--- /dev/null
+++ b/rust/vendor/num-bigint/src/biguint/division.rs
@@ -0,0 +1,652 @@
+use super::addition::__add2;
+#[cfg(not(u64_digit))]
+use super::u32_to_u128;
+use super::{cmp_slice, BigUint};
+
+use crate::big_digit::{self, BigDigit, DoubleBigDigit};
+use crate::UsizePromotion;
+
+use core::cmp::Ordering::{Equal, Greater, Less};
+use core::mem;
+use core::ops::{Div, DivAssign, Rem, RemAssign};
+use num_integer::Integer;
+use num_traits::{CheckedDiv, CheckedEuclid, Euclid, One, ToPrimitive, Zero};
+
+/// Divide a two digit numerator by a one digit divisor, returns quotient and remainder:
+///
+/// Note: the caller must ensure that both the quotient and remainder will fit into a single digit.
+/// This is _not_ true for an arbitrary numerator/denominator.
+///
+/// (This function also matches what the x86 divide instruction does).
+#[inline]
+fn div_wide(hi: BigDigit, lo: BigDigit, divisor: BigDigit) -> (BigDigit, BigDigit) {
+ debug_assert!(hi < divisor);
+
+ let lhs = big_digit::to_doublebigdigit(hi, lo);
+ let rhs = DoubleBigDigit::from(divisor);
+ ((lhs / rhs) as BigDigit, (lhs % rhs) as BigDigit)
+}
+
+/// For small divisors, we can divide without promoting to `DoubleBigDigit` by
+/// using half-size pieces of digit, like long-division.
+#[inline]
+fn div_half(rem: BigDigit, digit: BigDigit, divisor: BigDigit) -> (BigDigit, BigDigit) {
+ use crate::big_digit::{HALF, HALF_BITS};
+
+ debug_assert!(rem < divisor && divisor <= HALF);
+ let (hi, rem) = ((rem << HALF_BITS) | (digit >> HALF_BITS)).div_rem(&divisor);
+ let (lo, rem) = ((rem << HALF_BITS) | (digit & HALF)).div_rem(&divisor);
+ ((hi << HALF_BITS) | lo, rem)
+}
+
+#[inline]
+pub(super) fn div_rem_digit(mut a: BigUint, b: BigDigit) -> (BigUint, BigDigit) {
+ if b == 0 {
+ panic!("attempt to divide by zero")
+ }
+
+ let mut rem = 0;
+
+ if b <= big_digit::HALF {
+ for d in a.data.iter_mut().rev() {
+ let (q, r) = div_half(rem, *d, b);
+ *d = q;
+ rem = r;
+ }
+ } else {
+ for d in a.data.iter_mut().rev() {
+ let (q, r) = div_wide(rem, *d, b);
+ *d = q;
+ rem = r;
+ }
+ }
+
+ (a.normalized(), rem)
+}
+
+#[inline]
+fn rem_digit(a: &BigUint, b: BigDigit) -> BigDigit {
+ if b == 0 {
+ panic!("attempt to divide by zero")
+ }
+
+ let mut rem = 0;
+
+ if b <= big_digit::HALF {
+ for &digit in a.data.iter().rev() {
+ let (_, r) = div_half(rem, digit, b);
+ rem = r;
+ }
+ } else {
+ for &digit in a.data.iter().rev() {
+ let (_, r) = div_wide(rem, digit, b);
+ rem = r;
+ }
+ }
+
+ rem
+}
+
+/// Subtract a multiple.
+/// a -= b * c
+/// Returns a borrow (if a < b then borrow > 0).
+fn sub_mul_digit_same_len(a: &mut [BigDigit], b: &[BigDigit], c: BigDigit) -> BigDigit {
+ debug_assert!(a.len() == b.len());
+
+ // carry is between -big_digit::MAX and 0, so to avoid overflow we store
+ // offset_carry = carry + big_digit::MAX
+ let mut offset_carry = big_digit::MAX;
+
+ for (x, y) in a.iter_mut().zip(b) {
+ // We want to calculate sum = x - y * c + carry.
+ // sum >= -(big_digit::MAX * big_digit::MAX) - big_digit::MAX
+ // sum <= big_digit::MAX
+ // Offsetting sum by (big_digit::MAX << big_digit::BITS) puts it in DoubleBigDigit range.
+ let offset_sum = big_digit::to_doublebigdigit(big_digit::MAX, *x)
+ - big_digit::MAX as DoubleBigDigit
+ + offset_carry as DoubleBigDigit
+ - *y as DoubleBigDigit * c as DoubleBigDigit;
+
+ let (new_offset_carry, new_x) = big_digit::from_doublebigdigit(offset_sum);
+ offset_carry = new_offset_carry;
+ *x = new_x;
+ }
+
+ // Return the borrow.
+ big_digit::MAX - offset_carry
+}
+
+fn div_rem(mut u: BigUint, mut d: BigUint) -> (BigUint, BigUint) {
+ if d.is_zero() {
+ panic!("attempt to divide by zero")
+ }
+ if u.is_zero() {
+ return (Zero::zero(), Zero::zero());
+ }
+
+ if d.data.len() == 1 {
+ if d.data == [1] {
+ return (u, Zero::zero());
+ }
+ let (div, rem) = div_rem_digit(u, d.data[0]);
+ // reuse d
+ d.data.clear();
+ d += rem;
+ return (div, d);
+ }
+
+ // Required or the q_len calculation below can underflow:
+ match u.cmp(&d) {
+ Less => return (Zero::zero(), u),
+ Equal => {
+ u.set_one();
+ return (u, Zero::zero());
+ }
+ Greater => {} // Do nothing
+ }
+
+ // This algorithm is from Knuth, TAOCP vol 2 section 4.3, algorithm D:
+ //
+ // First, normalize the arguments so the highest bit in the highest digit of the divisor is
+ // set: the main loop uses the highest digit of the divisor for generating guesses, so we
+ // want it to be the largest number we can efficiently divide by.
+ //
+ let shift = d.data.last().unwrap().leading_zeros() as usize;
+
+ if shift == 0 {
+ // no need to clone d
+ div_rem_core(u, &d.data)
+ } else {
+ let (q, r) = div_rem_core(u << shift, &(d << shift).data);
+ // renormalize the remainder
+ (q, r >> shift)
+ }
+}
+
+pub(super) fn div_rem_ref(u: &BigUint, d: &BigUint) -> (BigUint, BigUint) {
+ if d.is_zero() {
+ panic!("attempt to divide by zero")
+ }
+ if u.is_zero() {
+ return (Zero::zero(), Zero::zero());
+ }
+
+ if d.data.len() == 1 {
+ if d.data == [1] {
+ return (u.clone(), Zero::zero());
+ }
+
+ let (div, rem) = div_rem_digit(u.clone(), d.data[0]);
+ return (div, rem.into());
+ }
+
+ // Required or the q_len calculation below can underflow:
+ match u.cmp(d) {
+ Less => return (Zero::zero(), u.clone()),
+ Equal => return (One::one(), Zero::zero()),
+ Greater => {} // Do nothing
+ }
+
+ // This algorithm is from Knuth, TAOCP vol 2 section 4.3, algorithm D:
+ //
+ // First, normalize the arguments so the highest bit in the highest digit of the divisor is
+ // set: the main loop uses the highest digit of the divisor for generating guesses, so we
+ // want it to be the largest number we can efficiently divide by.
+ //
+ let shift = d.data.last().unwrap().leading_zeros() as usize;
+
+ if shift == 0 {
+ // no need to clone d
+ div_rem_core(u.clone(), &d.data)
+ } else {
+ let (q, r) = div_rem_core(u << shift, &(d << shift).data);
+ // renormalize the remainder
+ (q, r >> shift)
+ }
+}
+
+/// An implementation of the base division algorithm.
+/// Knuth, TAOCP vol 2 section 4.3.1, algorithm D, with an improvement from exercises 19-21.
+fn div_rem_core(mut a: BigUint, b: &[BigDigit]) -> (BigUint, BigUint) {
+ debug_assert!(a.data.len() >= b.len() && b.len() > 1);
+ debug_assert!(b.last().unwrap().leading_zeros() == 0);
+
+ // The algorithm works by incrementally calculating "guesses", q0, for the next digit of the
+ // quotient. Once we have any number q0 such that (q0 << j) * b <= a, we can set
+ //
+ // q += q0 << j
+ // a -= (q0 << j) * b
+ //
+ // and then iterate until a < b. Then, (q, a) will be our desired quotient and remainder.
+ //
+ // q0, our guess, is calculated by dividing the last three digits of a by the last two digits of
+ // b - this will give us a guess that is close to the actual quotient, but is possibly greater.
+ // It can only be greater by 1 and only in rare cases, with probability at most
+ // 2^-(big_digit::BITS-1) for random a, see TAOCP 4.3.1 exercise 21.
+ //
+ // If the quotient turns out to be too large, we adjust it by 1:
+ // q -= 1 << j
+ // a += b << j
+
+ // a0 stores an additional extra most significant digit of the dividend, not stored in a.
+ let mut a0 = 0;
+
+ // [b1, b0] are the two most significant digits of the divisor. They never change.
+ let b0 = *b.last().unwrap();
+ let b1 = b[b.len() - 2];
+
+ let q_len = a.data.len() - b.len() + 1;
+ let mut q = BigUint {
+ data: vec![0; q_len],
+ };
+
+ for j in (0..q_len).rev() {
+ debug_assert!(a.data.len() == b.len() + j);
+
+ let a1 = *a.data.last().unwrap();
+ let a2 = a.data[a.data.len() - 2];
+
+ // The first q0 estimate is [a1,a0] / b0. It will never be too small, it may be too large
+ // by at most 2.
+ let (mut q0, mut r) = if a0 < b0 {
+ let (q0, r) = div_wide(a0, a1, b0);
+ (q0, r as DoubleBigDigit)
+ } else {
+ debug_assert!(a0 == b0);
+ // Avoid overflowing q0, we know the quotient fits in BigDigit.
+ // [a1,a0] = b0 * (1<<BITS - 1) + (a0 + a1)
+ (big_digit::MAX, a0 as DoubleBigDigit + a1 as DoubleBigDigit)
+ };
+
+ // r = [a1,a0] - q0 * b0
+ //
+ // Now we want to compute a more precise estimate [a2,a1,a0] / [b1,b0] which can only be
+ // less or equal to the current q0.
+ //
+ // q0 is too large if:
+ // [a2,a1,a0] < q0 * [b1,b0]
+ // (r << BITS) + a2 < q0 * b1
+ while r <= big_digit::MAX as DoubleBigDigit
+ && big_digit::to_doublebigdigit(r as BigDigit, a2)
+ < q0 as DoubleBigDigit * b1 as DoubleBigDigit
+ {
+ q0 -= 1;
+ r += b0 as DoubleBigDigit;
+ }
+
+ // q0 is now either the correct quotient digit, or in rare cases 1 too large.
+ // Subtract (q0 << j) from a. This may overflow, in which case we will have to correct.
+
+ let mut borrow = sub_mul_digit_same_len(&mut a.data[j..], b, q0);
+ if borrow > a0 {
+ // q0 is too large. We need to add back one multiple of b.
+ q0 -= 1;
+ borrow -= __add2(&mut a.data[j..], b);
+ }
+ // The top digit of a, stored in a0, has now been zeroed.
+ debug_assert!(borrow == a0);
+
+ q.data[j] = q0;
+
+ // Pop off the next top digit of a.
+ a0 = a.data.pop().unwrap();
+ }
+
+ a.data.push(a0);
+ a.normalize();
+
+ debug_assert_eq!(cmp_slice(&a.data, b), Less);
+
+ (q.normalized(), a)
+}
+
+forward_val_ref_binop!(impl Div for BigUint, div);
+forward_ref_val_binop!(impl Div for BigUint, div);
+forward_val_assign!(impl DivAssign for BigUint, div_assign);
+
+impl Div<BigUint> for BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn div(self, other: BigUint) -> BigUint {
+ let (q, _) = div_rem(self, other);
+ q
+ }
+}
+
+impl Div<&BigUint> for &BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn div(self, other: &BigUint) -> BigUint {
+ let (q, _) = self.div_rem(other);
+ q
+ }
+}
+impl DivAssign<&BigUint> for BigUint {
+ #[inline]
+ fn div_assign(&mut self, other: &BigUint) {
+ *self = &*self / other;
+ }
+}
+
+promote_unsigned_scalars!(impl Div for BigUint, div);
+promote_unsigned_scalars_assign!(impl DivAssign for BigUint, div_assign);
+forward_all_scalar_binop_to_val_val!(impl Div<u32> for BigUint, div);
+forward_all_scalar_binop_to_val_val!(impl Div<u64> for BigUint, div);
+forward_all_scalar_binop_to_val_val!(impl Div<u128> for BigUint, div);
+
+impl Div<u32> for BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn div(self, other: u32) -> BigUint {
+ let (q, _) = div_rem_digit(self, other as BigDigit);
+ q
+ }
+}
+impl DivAssign<u32> for BigUint {
+ #[inline]
+ fn div_assign(&mut self, other: u32) {
+ *self = &*self / other;
+ }
+}
+
+impl Div<BigUint> for u32 {
+ type Output = BigUint;
+
+ #[inline]
+ fn div(self, other: BigUint) -> BigUint {
+ match other.data.len() {
+ 0 => panic!("attempt to divide by zero"),
+ 1 => From::from(self as BigDigit / other.data[0]),
+ _ => Zero::zero(),
+ }
+ }
+}
+
+impl Div<u64> for BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn div(self, other: u64) -> BigUint {
+ let (q, _) = div_rem(self, From::from(other));
+ q
+ }
+}
+impl DivAssign<u64> for BigUint {
+ #[inline]
+ fn div_assign(&mut self, other: u64) {
+ // a vec of size 0 does not allocate, so this is fairly cheap
+ let temp = mem::replace(self, Zero::zero());
+ *self = temp / other;
+ }
+}
+
+impl Div<BigUint> for u64 {
+ type Output = BigUint;
+
+ #[cfg(not(u64_digit))]
+ #[inline]
+ fn div(self, other: BigUint) -> BigUint {
+ match other.data.len() {
+ 0 => panic!("attempt to divide by zero"),
+ 1 => From::from(self / u64::from(other.data[0])),
+ 2 => From::from(self / big_digit::to_doublebigdigit(other.data[1], other.data[0])),
+ _ => Zero::zero(),
+ }
+ }
+
+ #[cfg(u64_digit)]
+ #[inline]
+ fn div(self, other: BigUint) -> BigUint {
+ match other.data.len() {
+ 0 => panic!("attempt to divide by zero"),
+ 1 => From::from(self / other.data[0]),
+ _ => Zero::zero(),
+ }
+ }
+}
+
+impl Div<u128> for BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn div(self, other: u128) -> BigUint {
+ let (q, _) = div_rem(self, From::from(other));
+ q
+ }
+}
+
+impl DivAssign<u128> for BigUint {
+ #[inline]
+ fn div_assign(&mut self, other: u128) {
+ *self = &*self / other;
+ }
+}
+
+impl Div<BigUint> for u128 {
+ type Output = BigUint;
+
+ #[cfg(not(u64_digit))]
+ #[inline]
+ fn div(self, other: BigUint) -> BigUint {
+ match other.data.len() {
+ 0 => panic!("attempt to divide by zero"),
+ 1 => From::from(self / u128::from(other.data[0])),
+ 2 => From::from(
+ self / u128::from(big_digit::to_doublebigdigit(other.data[1], other.data[0])),
+ ),
+ 3 => From::from(self / u32_to_u128(0, other.data[2], other.data[1], other.data[0])),
+ 4 => From::from(
+ self / u32_to_u128(other.data[3], other.data[2], other.data[1], other.data[0]),
+ ),
+ _ => Zero::zero(),
+ }
+ }
+
+ #[cfg(u64_digit)]
+ #[inline]
+ fn div(self, other: BigUint) -> BigUint {
+ match other.data.len() {
+ 0 => panic!("attempt to divide by zero"),
+ 1 => From::from(self / other.data[0] as u128),
+ 2 => From::from(self / big_digit::to_doublebigdigit(other.data[1], other.data[0])),
+ _ => Zero::zero(),
+ }
+ }
+}
+
+forward_val_ref_binop!(impl Rem for BigUint, rem);
+forward_ref_val_binop!(impl Rem for BigUint, rem);
+forward_val_assign!(impl RemAssign for BigUint, rem_assign);
+
+impl Rem<BigUint> for BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn rem(self, other: BigUint) -> BigUint {
+ if let Some(other) = other.to_u32() {
+ &self % other
+ } else {
+ let (_, r) = div_rem(self, other);
+ r
+ }
+ }
+}
+
+impl Rem<&BigUint> for &BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn rem(self, other: &BigUint) -> BigUint {
+ if let Some(other) = other.to_u32() {
+ self % other
+ } else {
+ let (_, r) = self.div_rem(other);
+ r
+ }
+ }
+}
+impl RemAssign<&BigUint> for BigUint {
+ #[inline]
+ fn rem_assign(&mut self, other: &BigUint) {
+ *self = &*self % other;
+ }
+}
+
+promote_unsigned_scalars!(impl Rem for BigUint, rem);
+promote_unsigned_scalars_assign!(impl RemAssign for BigUint, rem_assign);
+forward_all_scalar_binop_to_ref_val!(impl Rem<u32> for BigUint, rem);
+forward_all_scalar_binop_to_val_val!(impl Rem<u64> for BigUint, rem);
+forward_all_scalar_binop_to_val_val!(impl Rem<u128> for BigUint, rem);
+
+impl Rem<u32> for &BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn rem(self, other: u32) -> BigUint {
+ rem_digit(self, other as BigDigit).into()
+ }
+}
+impl RemAssign<u32> for BigUint {
+ #[inline]
+ fn rem_assign(&mut self, other: u32) {
+ *self = &*self % other;
+ }
+}
+
+impl Rem<&BigUint> for u32 {
+ type Output = BigUint;
+
+ #[inline]
+ fn rem(mut self, other: &BigUint) -> BigUint {
+ self %= other;
+ From::from(self)
+ }
+}
+
+macro_rules! impl_rem_assign_scalar {
+ ($scalar:ty, $to_scalar:ident) => {
+ forward_val_assign_scalar!(impl RemAssign for BigUint, $scalar, rem_assign);
+ impl RemAssign<&BigUint> for $scalar {
+ #[inline]
+ fn rem_assign(&mut self, other: &BigUint) {
+ *self = match other.$to_scalar() {
+ None => *self,
+ Some(0) => panic!("attempt to divide by zero"),
+ Some(v) => *self % v
+ };
+ }
+ }
+ }
+}
+
+// we can scalar %= BigUint for any scalar, including signed types
+impl_rem_assign_scalar!(u128, to_u128);
+impl_rem_assign_scalar!(usize, to_usize);
+impl_rem_assign_scalar!(u64, to_u64);
+impl_rem_assign_scalar!(u32, to_u32);
+impl_rem_assign_scalar!(u16, to_u16);
+impl_rem_assign_scalar!(u8, to_u8);
+impl_rem_assign_scalar!(i128, to_i128);
+impl_rem_assign_scalar!(isize, to_isize);
+impl_rem_assign_scalar!(i64, to_i64);
+impl_rem_assign_scalar!(i32, to_i32);
+impl_rem_assign_scalar!(i16, to_i16);
+impl_rem_assign_scalar!(i8, to_i8);
+
+impl Rem<u64> for BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn rem(self, other: u64) -> BigUint {
+ let (_, r) = div_rem(self, From::from(other));
+ r
+ }
+}
+impl RemAssign<u64> for BigUint {
+ #[inline]
+ fn rem_assign(&mut self, other: u64) {
+ *self = &*self % other;
+ }
+}
+
+impl Rem<BigUint> for u64 {
+ type Output = BigUint;
+
+ #[inline]
+ fn rem(mut self, other: BigUint) -> BigUint {
+ self %= other;
+ From::from(self)
+ }
+}
+
+impl Rem<u128> for BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn rem(self, other: u128) -> BigUint {
+ let (_, r) = div_rem(self, From::from(other));
+ r
+ }
+}
+
+impl RemAssign<u128> for BigUint {
+ #[inline]
+ fn rem_assign(&mut self, other: u128) {
+ *self = &*self % other;
+ }
+}
+
+impl Rem<BigUint> for u128 {
+ type Output = BigUint;
+
+ #[inline]
+ fn rem(mut self, other: BigUint) -> BigUint {
+ self %= other;
+ From::from(self)
+ }
+}
+
+impl CheckedDiv for BigUint {
+ #[inline]
+ fn checked_div(&self, v: &BigUint) -> Option<BigUint> {
+ if v.is_zero() {
+ return None;
+ }
+ Some(self.div(v))
+ }
+}
+
+impl CheckedEuclid for BigUint {
+ #[inline]
+ fn checked_div_euclid(&self, v: &BigUint) -> Option<BigUint> {
+ if v.is_zero() {
+ return None;
+ }
+ Some(self.div_euclid(v))
+ }
+
+ #[inline]
+ fn checked_rem_euclid(&self, v: &BigUint) -> Option<BigUint> {
+ if v.is_zero() {
+ return None;
+ }
+ Some(self.rem_euclid(v))
+ }
+}
+
+impl Euclid for BigUint {
+ #[inline]
+ fn div_euclid(&self, v: &BigUint) -> BigUint {
+ // trivially same as regular division
+ self / v
+ }
+
+ #[inline]
+ fn rem_euclid(&self, v: &BigUint) -> BigUint {
+ // trivially same as regular remainder
+ self % v
+ }
+}
diff --git a/rust/vendor/num-bigint/src/biguint/iter.rs b/rust/vendor/num-bigint/src/biguint/iter.rs
new file mode 100644
index 0000000..1e673e4
--- /dev/null
+++ b/rust/vendor/num-bigint/src/biguint/iter.rs
@@ -0,0 +1,358 @@
+use core::iter::FusedIterator;
+
+#[cfg(not(u64_digit))]
+use super::u32_chunk_to_u64;
+
+/// An iterator of `u32` digits representation of a `BigUint` or `BigInt`,
+/// ordered least significant digit first.
+pub struct U32Digits<'a> {
+ #[cfg(u64_digit)]
+ data: &'a [u64],
+ #[cfg(u64_digit)]
+ next_is_lo: bool,
+ #[cfg(u64_digit)]
+ last_hi_is_zero: bool,
+
+ #[cfg(not(u64_digit))]
+ it: core::slice::Iter<'a, u32>,
+}
+
+#[cfg(u64_digit)]
+impl<'a> U32Digits<'a> {
+ #[inline]
+ pub(super) fn new(data: &'a [u64]) -> Self {
+ let last_hi_is_zero = data
+ .last()
+ .map(|&last| {
+ let last_hi = (last >> 32) as u32;
+ last_hi == 0
+ })
+ .unwrap_or(false);
+ U32Digits {
+ data,
+ next_is_lo: true,
+ last_hi_is_zero,
+ }
+ }
+}
+
+#[cfg(u64_digit)]
+impl Iterator for U32Digits<'_> {
+ type Item = u32;
+ #[inline]
+ fn next(&mut self) -> Option<u32> {
+ match self.data.split_first() {
+ Some((&first, data)) => {
+ let next_is_lo = self.next_is_lo;
+ self.next_is_lo = !next_is_lo;
+ if next_is_lo {
+ Some(first as u32)
+ } else {
+ self.data = data;
+ if data.is_empty() && self.last_hi_is_zero {
+ self.last_hi_is_zero = false;
+ None
+ } else {
+ Some((first >> 32) as u32)
+ }
+ }
+ }
+ None => None,
+ }
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ let len = self.len();
+ (len, Some(len))
+ }
+
+ #[inline]
+ fn last(self) -> Option<u32> {
+ self.data.last().map(|&last| {
+ if self.last_hi_is_zero {
+ last as u32
+ } else {
+ (last >> 32) as u32
+ }
+ })
+ }
+
+ #[inline]
+ fn count(self) -> usize {
+ self.len()
+ }
+}
+
+#[cfg(u64_digit)]
+impl DoubleEndedIterator for U32Digits<'_> {
+ fn next_back(&mut self) -> Option<Self::Item> {
+ match self.data.split_last() {
+ Some((&last, data)) => {
+ let last_is_lo = self.last_hi_is_zero;
+ self.last_hi_is_zero = !last_is_lo;
+ if last_is_lo {
+ self.data = data;
+ if data.is_empty() && !self.next_is_lo {
+ self.next_is_lo = true;
+ None
+ } else {
+ Some(last as u32)
+ }
+ } else {
+ Some((last >> 32) as u32)
+ }
+ }
+ None => None,
+ }
+ }
+}
+
+#[cfg(u64_digit)]
+impl ExactSizeIterator for U32Digits<'_> {
+ #[inline]
+ fn len(&self) -> usize {
+ self.data.len() * 2 - usize::from(self.last_hi_is_zero) - usize::from(!self.next_is_lo)
+ }
+}
+
+#[cfg(not(u64_digit))]
+impl<'a> U32Digits<'a> {
+ #[inline]
+ pub(super) fn new(data: &'a [u32]) -> Self {
+ Self { it: data.iter() }
+ }
+}
+
+#[cfg(not(u64_digit))]
+impl Iterator for U32Digits<'_> {
+ type Item = u32;
+ #[inline]
+ fn next(&mut self) -> Option<u32> {
+ self.it.next().cloned()
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.it.size_hint()
+ }
+
+ #[inline]
+ fn nth(&mut self, n: usize) -> Option<u32> {
+ self.it.nth(n).cloned()
+ }
+
+ #[inline]
+ fn last(self) -> Option<u32> {
+ self.it.last().cloned()
+ }
+
+ #[inline]
+ fn count(self) -> usize {
+ self.it.count()
+ }
+}
+
+#[cfg(not(u64_digit))]
+impl DoubleEndedIterator for U32Digits<'_> {
+ fn next_back(&mut self) -> Option<Self::Item> {
+ self.it.next_back().copied()
+ }
+}
+
+#[cfg(not(u64_digit))]
+impl ExactSizeIterator for U32Digits<'_> {
+ #[inline]
+ fn len(&self) -> usize {
+ self.it.len()
+ }
+}
+
+impl FusedIterator for U32Digits<'_> {}
+
+/// An iterator of `u64` digits representation of a `BigUint` or `BigInt`,
+/// ordered least significant digit first.
+pub struct U64Digits<'a> {
+ #[cfg(not(u64_digit))]
+ it: core::slice::Chunks<'a, u32>,
+
+ #[cfg(u64_digit)]
+ it: core::slice::Iter<'a, u64>,
+}
+
+#[cfg(not(u64_digit))]
+impl<'a> U64Digits<'a> {
+ #[inline]
+ pub(super) fn new(data: &'a [u32]) -> Self {
+ U64Digits { it: data.chunks(2) }
+ }
+}
+
+#[cfg(not(u64_digit))]
+impl Iterator for U64Digits<'_> {
+ type Item = u64;
+ #[inline]
+ fn next(&mut self) -> Option<u64> {
+ self.it.next().map(u32_chunk_to_u64)
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ let len = self.len();
+ (len, Some(len))
+ }
+
+ #[inline]
+ fn last(self) -> Option<u64> {
+ self.it.last().map(u32_chunk_to_u64)
+ }
+
+ #[inline]
+ fn count(self) -> usize {
+ self.len()
+ }
+}
+
+#[cfg(not(u64_digit))]
+impl DoubleEndedIterator for U64Digits<'_> {
+ fn next_back(&mut self) -> Option<Self::Item> {
+ self.it.next_back().map(u32_chunk_to_u64)
+ }
+}
+
+#[cfg(u64_digit)]
+impl<'a> U64Digits<'a> {
+ #[inline]
+ pub(super) fn new(data: &'a [u64]) -> Self {
+ Self { it: data.iter() }
+ }
+}
+
+#[cfg(u64_digit)]
+impl Iterator for U64Digits<'_> {
+ type Item = u64;
+ #[inline]
+ fn next(&mut self) -> Option<u64> {
+ self.it.next().cloned()
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.it.size_hint()
+ }
+
+ #[inline]
+ fn nth(&mut self, n: usize) -> Option<u64> {
+ self.it.nth(n).cloned()
+ }
+
+ #[inline]
+ fn last(self) -> Option<u64> {
+ self.it.last().cloned()
+ }
+
+ #[inline]
+ fn count(self) -> usize {
+ self.it.count()
+ }
+}
+
+#[cfg(u64_digit)]
+impl DoubleEndedIterator for U64Digits<'_> {
+ fn next_back(&mut self) -> Option<Self::Item> {
+ self.it.next_back().cloned()
+ }
+}
+
+impl ExactSizeIterator for U64Digits<'_> {
+ #[inline]
+ fn len(&self) -> usize {
+ self.it.len()
+ }
+}
+
+impl FusedIterator for U64Digits<'_> {}
+
+#[test]
+fn test_iter_u32_digits() {
+ let n = super::BigUint::from(5u8);
+ let mut it = n.iter_u32_digits();
+ assert_eq!(it.len(), 1);
+ assert_eq!(it.next(), Some(5));
+ assert_eq!(it.len(), 0);
+ assert_eq!(it.next(), None);
+ assert_eq!(it.len(), 0);
+ assert_eq!(it.next(), None);
+
+ let n = super::BigUint::from(112500000000u64);
+ let mut it = n.iter_u32_digits();
+ assert_eq!(it.len(), 2);
+ assert_eq!(it.next(), Some(830850304));
+ assert_eq!(it.len(), 1);
+ assert_eq!(it.next(), Some(26));
+ assert_eq!(it.len(), 0);
+ assert_eq!(it.next(), None);
+}
+
+#[test]
+fn test_iter_u64_digits() {
+ let n = super::BigUint::from(5u8);
+ let mut it = n.iter_u64_digits();
+ assert_eq!(it.len(), 1);
+ assert_eq!(it.next(), Some(5));
+ assert_eq!(it.len(), 0);
+ assert_eq!(it.next(), None);
+ assert_eq!(it.len(), 0);
+ assert_eq!(it.next(), None);
+
+ let n = super::BigUint::from(18_446_744_073_709_551_616u128);
+ let mut it = n.iter_u64_digits();
+ assert_eq!(it.len(), 2);
+ assert_eq!(it.next(), Some(0));
+ assert_eq!(it.len(), 1);
+ assert_eq!(it.next(), Some(1));
+ assert_eq!(it.len(), 0);
+ assert_eq!(it.next(), None);
+}
+
+#[test]
+fn test_iter_u32_digits_be() {
+ let n = super::BigUint::from(5u8);
+ let mut it = n.iter_u32_digits();
+ assert_eq!(it.len(), 1);
+ assert_eq!(it.next(), Some(5));
+ assert_eq!(it.len(), 0);
+ assert_eq!(it.next(), None);
+ assert_eq!(it.len(), 0);
+ assert_eq!(it.next(), None);
+
+ let n = super::BigUint::from(112500000000u64);
+ let mut it = n.iter_u32_digits();
+ assert_eq!(it.len(), 2);
+ assert_eq!(it.next(), Some(830850304));
+ assert_eq!(it.len(), 1);
+ assert_eq!(it.next(), Some(26));
+ assert_eq!(it.len(), 0);
+ assert_eq!(it.next(), None);
+}
+
+#[test]
+fn test_iter_u64_digits_be() {
+ let n = super::BigUint::from(5u8);
+ let mut it = n.iter_u64_digits();
+ assert_eq!(it.len(), 1);
+ assert_eq!(it.next_back(), Some(5));
+ assert_eq!(it.len(), 0);
+ assert_eq!(it.next(), None);
+ assert_eq!(it.len(), 0);
+ assert_eq!(it.next(), None);
+
+ let n = super::BigUint::from(18_446_744_073_709_551_616u128);
+ let mut it = n.iter_u64_digits();
+ assert_eq!(it.len(), 2);
+ assert_eq!(it.next_back(), Some(1));
+ assert_eq!(it.len(), 1);
+ assert_eq!(it.next_back(), Some(0));
+ assert_eq!(it.len(), 0);
+ assert_eq!(it.next(), None);
+}
diff --git a/rust/vendor/num-bigint/src/biguint/monty.rs b/rust/vendor/num-bigint/src/biguint/monty.rs
new file mode 100644
index 0000000..abaca50
--- /dev/null
+++ b/rust/vendor/num-bigint/src/biguint/monty.rs
@@ -0,0 +1,225 @@
+use crate::std_alloc::Vec;
+use core::mem;
+use core::ops::Shl;
+use num_traits::{One, Zero};
+
+use crate::big_digit::{self, BigDigit, DoubleBigDigit, SignedDoubleBigDigit};
+use crate::biguint::BigUint;
+
+struct MontyReducer {
+ n0inv: BigDigit,
+}
+
+// k0 = -m**-1 mod 2**BITS. Algorithm from: Dumas, J.G. "On Newton–Raphson
+// Iteration for Multiplicative Inverses Modulo Prime Powers".
+fn inv_mod_alt(b: BigDigit) -> BigDigit {
+ assert_ne!(b & 1, 0);
+
+ let mut k0 = 2 - b as SignedDoubleBigDigit;
+ let mut t = (b - 1) as SignedDoubleBigDigit;
+ let mut i = 1;
+ while i < big_digit::BITS {
+ t = t.wrapping_mul(t);
+ k0 = k0.wrapping_mul(t + 1);
+
+ i <<= 1;
+ }
+ -k0 as BigDigit
+}
+
+impl MontyReducer {
+ fn new(n: &BigUint) -> Self {
+ let n0inv = inv_mod_alt(n.data[0]);
+ MontyReducer { n0inv }
+ }
+}
+
+/// Computes z mod m = x * y * 2 ** (-n*_W) mod m
+/// assuming k = -1/m mod 2**_W
+/// See Gueron, "Efficient Software Implementations of Modular Exponentiation".
+/// <https://eprint.iacr.org/2011/239.pdf>
+/// In the terminology of that paper, this is an "Almost Montgomery Multiplication":
+/// x and y are required to satisfy 0 <= z < 2**(n*_W) and then the result
+/// z is guaranteed to satisfy 0 <= z < 2**(n*_W), but it may not be < m.
+#[allow(clippy::many_single_char_names)]
+fn montgomery(x: &BigUint, y: &BigUint, m: &BigUint, k: BigDigit, n: usize) -> BigUint {
+ // This code assumes x, y, m are all the same length, n.
+ // (required by addMulVVW and the for loop).
+ // It also assumes that x, y are already reduced mod m,
+ // or else the result will not be properly reduced.
+ assert!(
+ x.data.len() == n && y.data.len() == n && m.data.len() == n,
+ "{:?} {:?} {:?} {}",
+ x,
+ y,
+ m,
+ n
+ );
+
+ let mut z = BigUint::zero();
+ z.data.resize(n * 2, 0);
+
+ let mut c: BigDigit = 0;
+ for i in 0..n {
+ let c2 = add_mul_vvw(&mut z.data[i..n + i], &x.data, y.data[i]);
+ let t = z.data[i].wrapping_mul(k);
+ let c3 = add_mul_vvw(&mut z.data[i..n + i], &m.data, t);
+ let cx = c.wrapping_add(c2);
+ let cy = cx.wrapping_add(c3);
+ z.data[n + i] = cy;
+ if cx < c2 || cy < c3 {
+ c = 1;
+ } else {
+ c = 0;
+ }
+ }
+
+ if c == 0 {
+ z.data = z.data[n..].to_vec();
+ } else {
+ {
+ let (first, second) = z.data.split_at_mut(n);
+ sub_vv(first, second, &m.data);
+ }
+ z.data = z.data[..n].to_vec();
+ }
+
+ z
+}
+
+#[inline(always)]
+fn add_mul_vvw(z: &mut [BigDigit], x: &[BigDigit], y: BigDigit) -> BigDigit {
+ let mut c = 0;
+ for (zi, xi) in z.iter_mut().zip(x.iter()) {
+ let (z1, z0) = mul_add_www(*xi, y, *zi);
+ let (c_, zi_) = add_ww(z0, c, 0);
+ *zi = zi_;
+ c = c_ + z1;
+ }
+
+ c
+}
+
+/// The resulting carry c is either 0 or 1.
+#[inline(always)]
+fn sub_vv(z: &mut [BigDigit], x: &[BigDigit], y: &[BigDigit]) -> BigDigit {
+ let mut c = 0;
+ for (i, (xi, yi)) in x.iter().zip(y.iter()).enumerate().take(z.len()) {
+ let zi = xi.wrapping_sub(*yi).wrapping_sub(c);
+ z[i] = zi;
+ // see "Hacker's Delight", section 2-12 (overflow detection)
+ c = ((yi & !xi) | ((yi | !xi) & zi)) >> (big_digit::BITS - 1)
+ }
+
+ c
+}
+
+/// z1<<_W + z0 = x+y+c, with c == 0 or 1
+#[inline(always)]
+fn add_ww(x: BigDigit, y: BigDigit, c: BigDigit) -> (BigDigit, BigDigit) {
+ let yc = y.wrapping_add(c);
+ let z0 = x.wrapping_add(yc);
+ let z1 = if z0 < x || yc < y { 1 } else { 0 };
+
+ (z1, z0)
+}
+
+/// z1 << _W + z0 = x * y + c
+#[inline(always)]
+fn mul_add_www(x: BigDigit, y: BigDigit, c: BigDigit) -> (BigDigit, BigDigit) {
+ let z = x as DoubleBigDigit * y as DoubleBigDigit + c as DoubleBigDigit;
+ ((z >> big_digit::BITS) as BigDigit, z as BigDigit)
+}
+
+/// Calculates x ** y mod m using a fixed, 4-bit window.
+#[allow(clippy::many_single_char_names)]
+pub(super) fn monty_modpow(x: &BigUint, y: &BigUint, m: &BigUint) -> BigUint {
+ assert!(m.data[0] & 1 == 1);
+ let mr = MontyReducer::new(m);
+ let num_words = m.data.len();
+
+ let mut x = x.clone();
+
+ // We want the lengths of x and m to be equal.
+ // It is OK if x >= m as long as len(x) == len(m).
+ if x.data.len() > num_words {
+ x %= m;
+ // Note: now len(x) <= numWords, not guaranteed ==.
+ }
+ if x.data.len() < num_words {
+ x.data.resize(num_words, 0);
+ }
+
+ // rr = 2**(2*_W*len(m)) mod m
+ let mut rr = BigUint::one();
+ rr = (rr.shl(2 * num_words as u64 * u64::from(big_digit::BITS))) % m;
+ if rr.data.len() < num_words {
+ rr.data.resize(num_words, 0);
+ }
+ // one = 1, with equal length to that of m
+ let mut one = BigUint::one();
+ one.data.resize(num_words, 0);
+
+ let n = 4;
+ // powers[i] contains x^i
+ let mut powers = Vec::with_capacity(1 << n);
+ powers.push(montgomery(&one, &rr, m, mr.n0inv, num_words));
+ powers.push(montgomery(&x, &rr, m, mr.n0inv, num_words));
+ for i in 2..1 << n {
+ let r = montgomery(&powers[i - 1], &powers[1], m, mr.n0inv, num_words);
+ powers.push(r);
+ }
+
+ // initialize z = 1 (Montgomery 1)
+ let mut z = powers[0].clone();
+ z.data.resize(num_words, 0);
+ let mut zz = BigUint::zero();
+ zz.data.resize(num_words, 0);
+
+ // same windowed exponent, but with Montgomery multiplications
+ for i in (0..y.data.len()).rev() {
+ let mut yi = y.data[i];
+ let mut j = 0;
+ while j < big_digit::BITS {
+ if i != y.data.len() - 1 || j != 0 {
+ zz = montgomery(&z, &z, m, mr.n0inv, num_words);
+ z = montgomery(&zz, &zz, m, mr.n0inv, num_words);
+ zz = montgomery(&z, &z, m, mr.n0inv, num_words);
+ z = montgomery(&zz, &zz, m, mr.n0inv, num_words);
+ }
+ zz = montgomery(
+ &z,
+ &powers[(yi >> (big_digit::BITS - n)) as usize],
+ m,
+ mr.n0inv,
+ num_words,
+ );
+ mem::swap(&mut z, &mut zz);
+ yi <<= n;
+ j += n;
+ }
+ }
+
+ // convert to regular number
+ zz = montgomery(&z, &one, m, mr.n0inv, num_words);
+
+ zz.normalize();
+ // One last reduction, just in case.
+ // See golang.org/issue/13907.
+ if zz >= *m {
+ // Common case is m has high bit set; in that case,
+ // since zz is the same length as m, there can be just
+ // one multiple of m to remove. Just subtract.
+ // We think that the subtract should be sufficient in general,
+ // so do that unconditionally, but double-check,
+ // in case our beliefs are wrong.
+ // The div is not expected to be reached.
+ zz -= m;
+ if zz >= *m {
+ zz %= m;
+ }
+ }
+
+ zz.normalize();
+ zz
+}
diff --git a/rust/vendor/num-bigint/src/biguint/multiplication.rs b/rust/vendor/num-bigint/src/biguint/multiplication.rs
new file mode 100644
index 0000000..4d7f1f2
--- /dev/null
+++ b/rust/vendor/num-bigint/src/biguint/multiplication.rs
@@ -0,0 +1,568 @@
+use super::addition::{__add2, add2};
+use super::subtraction::sub2;
+#[cfg(not(u64_digit))]
+use super::u32_from_u128;
+use super::{biguint_from_vec, cmp_slice, BigUint, IntDigits};
+
+use crate::big_digit::{self, BigDigit, DoubleBigDigit};
+use crate::Sign::{self, Minus, NoSign, Plus};
+use crate::{BigInt, UsizePromotion};
+
+use core::cmp::Ordering;
+use core::iter::Product;
+use core::ops::{Mul, MulAssign};
+use num_traits::{CheckedMul, FromPrimitive, One, Zero};
+
+#[inline]
+pub(super) fn mac_with_carry(
+ a: BigDigit,
+ b: BigDigit,
+ c: BigDigit,
+ acc: &mut DoubleBigDigit,
+) -> BigDigit {
+ *acc += DoubleBigDigit::from(a);
+ *acc += DoubleBigDigit::from(b) * DoubleBigDigit::from(c);
+ let lo = *acc as BigDigit;
+ *acc >>= big_digit::BITS;
+ lo
+}
+
+#[inline]
+fn mul_with_carry(a: BigDigit, b: BigDigit, acc: &mut DoubleBigDigit) -> BigDigit {
+ *acc += DoubleBigDigit::from(a) * DoubleBigDigit::from(b);
+ let lo = *acc as BigDigit;
+ *acc >>= big_digit::BITS;
+ lo
+}
+
+/// Three argument multiply accumulate:
+/// acc += b * c
+fn mac_digit(acc: &mut [BigDigit], b: &[BigDigit], c: BigDigit) {
+ if c == 0 {
+ return;
+ }
+
+ let mut carry = 0;
+ let (a_lo, a_hi) = acc.split_at_mut(b.len());
+
+ for (a, &b) in a_lo.iter_mut().zip(b) {
+ *a = mac_with_carry(*a, b, c, &mut carry);
+ }
+
+ let (carry_hi, carry_lo) = big_digit::from_doublebigdigit(carry);
+
+ let final_carry = if carry_hi == 0 {
+ __add2(a_hi, &[carry_lo])
+ } else {
+ __add2(a_hi, &[carry_hi, carry_lo])
+ };
+ assert_eq!(final_carry, 0, "carry overflow during multiplication!");
+}
+
+fn bigint_from_slice(slice: &[BigDigit]) -> BigInt {
+ BigInt::from(biguint_from_vec(slice.to_vec()))
+}
+
+/// Three argument multiply accumulate:
+/// acc += b * c
+#[allow(clippy::many_single_char_names)]
+fn mac3(mut acc: &mut [BigDigit], mut b: &[BigDigit], mut c: &[BigDigit]) {
+ // Least-significant zeros have no effect on the output.
+ if let Some(&0) = b.first() {
+ if let Some(nz) = b.iter().position(|&d| d != 0) {
+ b = &b[nz..];
+ acc = &mut acc[nz..];
+ } else {
+ return;
+ }
+ }
+ if let Some(&0) = c.first() {
+ if let Some(nz) = c.iter().position(|&d| d != 0) {
+ c = &c[nz..];
+ acc = &mut acc[nz..];
+ } else {
+ return;
+ }
+ }
+
+ let acc = acc;
+ let (x, y) = if b.len() < c.len() { (b, c) } else { (c, b) };
+
+ // We use three algorithms for different input sizes.
+ //
+ // - For small inputs, long multiplication is fastest.
+ // - Next we use Karatsuba multiplication (Toom-2), which we have optimized
+ // to avoid unnecessary allocations for intermediate values.
+ // - For the largest inputs we use Toom-3, which better optimizes the
+ // number of operations, but uses more temporary allocations.
+ //
+ // The thresholds are somewhat arbitrary, chosen by evaluating the results
+ // of `cargo bench --bench bigint multiply`.
+
+ if x.len() <= 32 {
+ // Long multiplication:
+ for (i, xi) in x.iter().enumerate() {
+ mac_digit(&mut acc[i..], y, *xi);
+ }
+ } else if x.len() <= 256 {
+ // Karatsuba multiplication:
+ //
+ // The idea is that we break x and y up into two smaller numbers that each have about half
+ // as many digits, like so (note that multiplying by b is just a shift):
+ //
+ // x = x0 + x1 * b
+ // y = y0 + y1 * b
+ //
+ // With some algebra, we can compute x * y with three smaller products, where the inputs to
+ // each of the smaller products have only about half as many digits as x and y:
+ //
+ // x * y = (x0 + x1 * b) * (y0 + y1 * b)
+ //
+ // x * y = x0 * y0
+ // + x0 * y1 * b
+ // + x1 * y0 * b
+ // + x1 * y1 * b^2
+ //
+ // Let p0 = x0 * y0 and p2 = x1 * y1:
+ //
+ // x * y = p0
+ // + (x0 * y1 + x1 * y0) * b
+ // + p2 * b^2
+ //
+ // The real trick is that middle term:
+ //
+ // x0 * y1 + x1 * y0
+ //
+ // = x0 * y1 + x1 * y0 - p0 + p0 - p2 + p2
+ //
+ // = x0 * y1 + x1 * y0 - x0 * y0 - x1 * y1 + p0 + p2
+ //
+ // Now we complete the square:
+ //
+ // = -(x0 * y0 - x0 * y1 - x1 * y0 + x1 * y1) + p0 + p2
+ //
+ // = -((x1 - x0) * (y1 - y0)) + p0 + p2
+ //
+ // Let p1 = (x1 - x0) * (y1 - y0), and substitute back into our original formula:
+ //
+ // x * y = p0
+ // + (p0 + p2 - p1) * b
+ // + p2 * b^2
+ //
+ // Where the three intermediate products are:
+ //
+ // p0 = x0 * y0
+ // p1 = (x1 - x0) * (y1 - y0)
+ // p2 = x1 * y1
+ //
+ // In doing the computation, we take great care to avoid unnecessary temporary variables
+ // (since creating a BigUint requires a heap allocation): thus, we rearrange the formula a
+ // bit so we can use the same temporary variable for all the intermediate products:
+ //
+ // x * y = p2 * b^2 + p2 * b
+ // + p0 * b + p0
+ // - p1 * b
+ //
+ // The other trick we use is instead of doing explicit shifts, we slice acc at the
+ // appropriate offset when doing the add.
+
+ // When x is smaller than y, it's significantly faster to pick b such that x is split in
+ // half, not y:
+ let b = x.len() / 2;
+ let (x0, x1) = x.split_at(b);
+ let (y0, y1) = y.split_at(b);
+
+ // We reuse the same BigUint for all the intermediate multiplies and have to size p
+ // appropriately here: x1.len() >= x0.len and y1.len() >= y0.len():
+ let len = x1.len() + y1.len() + 1;
+ let mut p = BigUint { data: vec![0; len] };
+
+ // p2 = x1 * y1
+ mac3(&mut p.data, x1, y1);
+
+ // Not required, but the adds go faster if we drop any unneeded 0s from the end:
+ p.normalize();
+
+ add2(&mut acc[b..], &p.data);
+ add2(&mut acc[b * 2..], &p.data);
+
+ // Zero out p before the next multiply:
+ p.data.truncate(0);
+ p.data.resize(len, 0);
+
+ // p0 = x0 * y0
+ mac3(&mut p.data, x0, y0);
+ p.normalize();
+
+ add2(acc, &p.data);
+ add2(&mut acc[b..], &p.data);
+
+ // p1 = (x1 - x0) * (y1 - y0)
+ // We do this one last, since it may be negative and acc can't ever be negative:
+ let (j0_sign, j0) = sub_sign(x1, x0);
+ let (j1_sign, j1) = sub_sign(y1, y0);
+
+ match j0_sign * j1_sign {
+ Plus => {
+ p.data.truncate(0);
+ p.data.resize(len, 0);
+
+ mac3(&mut p.data, &j0.data, &j1.data);
+ p.normalize();
+
+ sub2(&mut acc[b..], &p.data);
+ }
+ Minus => {
+ mac3(&mut acc[b..], &j0.data, &j1.data);
+ }
+ NoSign => (),
+ }
+ } else {
+ // Toom-3 multiplication:
+ //
+ // Toom-3 is like Karatsuba above, but dividing the inputs into three parts.
+ // Both are instances of Toom-Cook, using `k=3` and `k=2` respectively.
+ //
+ // The general idea is to treat the large integers digits as
+ // polynomials of a certain degree and determine the coefficients/digits
+ // of the product of the two via interpolation of the polynomial product.
+ let i = y.len() / 3 + 1;
+
+ let x0_len = Ord::min(x.len(), i);
+ let x1_len = Ord::min(x.len() - x0_len, i);
+
+ let y0_len = i;
+ let y1_len = Ord::min(y.len() - y0_len, i);
+
+ // Break x and y into three parts, representating an order two polynomial.
+ // t is chosen to be the size of a digit so we can use faster shifts
+ // in place of multiplications.
+ //
+ // x(t) = x2*t^2 + x1*t + x0
+ let x0 = bigint_from_slice(&x[..x0_len]);
+ let x1 = bigint_from_slice(&x[x0_len..x0_len + x1_len]);
+ let x2 = bigint_from_slice(&x[x0_len + x1_len..]);
+
+ // y(t) = y2*t^2 + y1*t + y0
+ let y0 = bigint_from_slice(&y[..y0_len]);
+ let y1 = bigint_from_slice(&y[y0_len..y0_len + y1_len]);
+ let y2 = bigint_from_slice(&y[y0_len + y1_len..]);
+
+ // Let w(t) = x(t) * y(t)
+ //
+ // This gives us the following order-4 polynomial.
+ //
+ // w(t) = w4*t^4 + w3*t^3 + w2*t^2 + w1*t + w0
+ //
+ // We need to find the coefficients w4, w3, w2, w1 and w0. Instead
+ // of simply multiplying the x and y in total, we can evaluate w
+ // at 5 points. An n-degree polynomial is uniquely identified by (n + 1)
+ // points.
+ //
+ // It is arbitrary as to what points we evaluate w at but we use the
+ // following.
+ //
+ // w(t) at t = 0, 1, -1, -2 and inf
+ //
+ // The values for w(t) in terms of x(t)*y(t) at these points are:
+ //
+ // let a = w(0) = x0 * y0
+ // let b = w(1) = (x2 + x1 + x0) * (y2 + y1 + y0)
+ // let c = w(-1) = (x2 - x1 + x0) * (y2 - y1 + y0)
+ // let d = w(-2) = (4*x2 - 2*x1 + x0) * (4*y2 - 2*y1 + y0)
+ // let e = w(inf) = x2 * y2 as t -> inf
+
+ // x0 + x2, avoiding temporaries
+ let p = &x0 + &x2;
+
+ // y0 + y2, avoiding temporaries
+ let q = &y0 + &y2;
+
+ // x2 - x1 + x0, avoiding temporaries
+ let p2 = &p - &x1;
+
+ // y2 - y1 + y0, avoiding temporaries
+ let q2 = &q - &y1;
+
+ // w(0)
+ let r0 = &x0 * &y0;
+
+ // w(inf)
+ let r4 = &x2 * &y2;
+
+ // w(1)
+ let r1 = (p + x1) * (q + y1);
+
+ // w(-1)
+ let r2 = &p2 * &q2;
+
+ // w(-2)
+ let r3 = ((p2 + x2) * 2 - x0) * ((q2 + y2) * 2 - y0);
+
+ // Evaluating these points gives us the following system of linear equations.
+ //
+ // 0 0 0 0 1 | a
+ // 1 1 1 1 1 | b
+ // 1 -1 1 -1 1 | c
+ // 16 -8 4 -2 1 | d
+ // 1 0 0 0 0 | e
+ //
+ // The solved equation (after gaussian elimination or similar)
+ // in terms of its coefficients:
+ //
+ // w0 = w(0)
+ // w1 = w(0)/2 + w(1)/3 - w(-1) + w(2)/6 - 2*w(inf)
+ // w2 = -w(0) + w(1)/2 + w(-1)/2 - w(inf)
+ // w3 = -w(0)/2 + w(1)/6 + w(-1)/2 - w(1)/6
+ // w4 = w(inf)
+ //
+ // This particular sequence is given by Bodrato and is an interpolation
+ // of the above equations.
+ let mut comp3: BigInt = (r3 - &r1) / 3u32;
+ let mut comp1: BigInt = (r1 - &r2) >> 1;
+ let mut comp2: BigInt = r2 - &r0;
+ comp3 = ((&comp2 - comp3) >> 1) + (&r4 << 1);
+ comp2 += &comp1 - &r4;
+ comp1 -= &comp3;
+
+ // Recomposition. The coefficients of the polynomial are now known.
+ //
+ // Evaluate at w(t) where t is our given base to get the result.
+ //
+ // let bits = u64::from(big_digit::BITS) * i as u64;
+ // let result = r0
+ // + (comp1 << bits)
+ // + (comp2 << (2 * bits))
+ // + (comp3 << (3 * bits))
+ // + (r4 << (4 * bits));
+ // let result_pos = result.to_biguint().unwrap();
+ // add2(&mut acc[..], &result_pos.data);
+ //
+ // But with less intermediate copying:
+ for (j, result) in [&r0, &comp1, &comp2, &comp3, &r4].iter().enumerate().rev() {
+ match result.sign() {
+ Plus => add2(&mut acc[i * j..], result.digits()),
+ Minus => sub2(&mut acc[i * j..], result.digits()),
+ NoSign => {}
+ }
+ }
+ }
+}
+
+fn mul3(x: &[BigDigit], y: &[BigDigit]) -> BigUint {
+ let len = x.len() + y.len() + 1;
+ let mut prod = BigUint { data: vec![0; len] };
+
+ mac3(&mut prod.data, x, y);
+ prod.normalized()
+}
+
+fn scalar_mul(a: &mut BigUint, b: BigDigit) {
+ match b {
+ 0 => a.set_zero(),
+ 1 => {}
+ _ => {
+ if b.is_power_of_two() {
+ *a <<= b.trailing_zeros();
+ } else {
+ let mut carry = 0;
+ for a in a.data.iter_mut() {
+ *a = mul_with_carry(*a, b, &mut carry);
+ }
+ if carry != 0 {
+ a.data.push(carry as BigDigit);
+ }
+ }
+ }
+ }
+}
+
+fn sub_sign(mut a: &[BigDigit], mut b: &[BigDigit]) -> (Sign, BigUint) {
+ // Normalize:
+ if let Some(&0) = a.last() {
+ a = &a[..a.iter().rposition(|&x| x != 0).map_or(0, |i| i + 1)];
+ }
+ if let Some(&0) = b.last() {
+ b = &b[..b.iter().rposition(|&x| x != 0).map_or(0, |i| i + 1)];
+ }
+
+ match cmp_slice(a, b) {
+ Ordering::Greater => {
+ let mut a = a.to_vec();
+ sub2(&mut a, b);
+ (Plus, biguint_from_vec(a))
+ }
+ Ordering::Less => {
+ let mut b = b.to_vec();
+ sub2(&mut b, a);
+ (Minus, biguint_from_vec(b))
+ }
+ Ordering::Equal => (NoSign, Zero::zero()),
+ }
+}
+
+macro_rules! impl_mul {
+ ($(impl Mul<$Other:ty> for $Self:ty;)*) => {$(
+ impl Mul<$Other> for $Self {
+ type Output = BigUint;
+
+ #[inline]
+ fn mul(self, other: $Other) -> BigUint {
+ match (&*self.data, &*other.data) {
+ // multiply by zero
+ (&[], _) | (_, &[]) => BigUint::zero(),
+ // multiply by a scalar
+ (_, &[digit]) => self * digit,
+ (&[digit], _) => other * digit,
+ // full multiplication
+ (x, y) => mul3(x, y),
+ }
+ }
+ }
+ )*}
+}
+impl_mul! {
+ impl Mul<BigUint> for BigUint;
+ impl Mul<BigUint> for &BigUint;
+ impl Mul<&BigUint> for BigUint;
+ impl Mul<&BigUint> for &BigUint;
+}
+
+macro_rules! impl_mul_assign {
+ ($(impl MulAssign<$Other:ty> for BigUint;)*) => {$(
+ impl MulAssign<$Other> for BigUint {
+ #[inline]
+ fn mul_assign(&mut self, other: $Other) {
+ match (&*self.data, &*other.data) {
+ // multiply by zero
+ (&[], _) => {},
+ (_, &[]) => self.set_zero(),
+ // multiply by a scalar
+ (_, &[digit]) => *self *= digit,
+ (&[digit], _) => *self = other * digit,
+ // full multiplication
+ (x, y) => *self = mul3(x, y),
+ }
+ }
+ }
+ )*}
+}
+impl_mul_assign! {
+ impl MulAssign<BigUint> for BigUint;
+ impl MulAssign<&BigUint> for BigUint;
+}
+
+promote_unsigned_scalars!(impl Mul for BigUint, mul);
+promote_unsigned_scalars_assign!(impl MulAssign for BigUint, mul_assign);
+forward_all_scalar_binop_to_val_val_commutative!(impl Mul<u32> for BigUint, mul);
+forward_all_scalar_binop_to_val_val_commutative!(impl Mul<u64> for BigUint, mul);
+forward_all_scalar_binop_to_val_val_commutative!(impl Mul<u128> for BigUint, mul);
+
+impl Mul<u32> for BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn mul(mut self, other: u32) -> BigUint {
+ self *= other;
+ self
+ }
+}
+impl MulAssign<u32> for BigUint {
+ #[inline]
+ fn mul_assign(&mut self, other: u32) {
+ scalar_mul(self, other as BigDigit);
+ }
+}
+
+impl Mul<u64> for BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn mul(mut self, other: u64) -> BigUint {
+ self *= other;
+ self
+ }
+}
+impl MulAssign<u64> for BigUint {
+ #[cfg(not(u64_digit))]
+ #[inline]
+ fn mul_assign(&mut self, other: u64) {
+ if let Some(other) = BigDigit::from_u64(other) {
+ scalar_mul(self, other);
+ } else {
+ let (hi, lo) = big_digit::from_doublebigdigit(other);
+ *self = mul3(&self.data, &[lo, hi]);
+ }
+ }
+
+ #[cfg(u64_digit)]
+ #[inline]
+ fn mul_assign(&mut self, other: u64) {
+ scalar_mul(self, other);
+ }
+}
+
+impl Mul<u128> for BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn mul(mut self, other: u128) -> BigUint {
+ self *= other;
+ self
+ }
+}
+
+impl MulAssign<u128> for BigUint {
+ #[cfg(not(u64_digit))]
+ #[inline]
+ fn mul_assign(&mut self, other: u128) {
+ if let Some(other) = BigDigit::from_u128(other) {
+ scalar_mul(self, other);
+ } else {
+ *self = match u32_from_u128(other) {
+ (0, 0, c, d) => mul3(&self.data, &[d, c]),
+ (0, b, c, d) => mul3(&self.data, &[d, c, b]),
+ (a, b, c, d) => mul3(&self.data, &[d, c, b, a]),
+ };
+ }
+ }
+
+ #[cfg(u64_digit)]
+ #[inline]
+ fn mul_assign(&mut self, other: u128) {
+ if let Some(other) = BigDigit::from_u128(other) {
+ scalar_mul(self, other);
+ } else {
+ let (hi, lo) = big_digit::from_doublebigdigit(other);
+ *self = mul3(&self.data, &[lo, hi]);
+ }
+ }
+}
+
+impl CheckedMul for BigUint {
+ #[inline]
+ fn checked_mul(&self, v: &BigUint) -> Option<BigUint> {
+ Some(self.mul(v))
+ }
+}
+
+impl_product_iter_type!(BigUint);
+
+#[test]
+fn test_sub_sign() {
+ use crate::BigInt;
+ use num_traits::Num;
+
+ fn sub_sign_i(a: &[BigDigit], b: &[BigDigit]) -> BigInt {
+ let (sign, val) = sub_sign(a, b);
+ BigInt::from_biguint(sign, val)
+ }
+
+ let a = BigUint::from_str_radix("265252859812191058636308480000000", 10).unwrap();
+ let b = BigUint::from_str_radix("26525285981219105863630848000000", 10).unwrap();
+ let a_i = BigInt::from(a.clone());
+ let b_i = BigInt::from(b.clone());
+
+ assert_eq!(sub_sign_i(&a.data, &b.data), &a_i - &b_i);
+ assert_eq!(sub_sign_i(&b.data, &a.data), &b_i - &a_i);
+}
diff --git a/rust/vendor/num-bigint/src/biguint/power.rs b/rust/vendor/num-bigint/src/biguint/power.rs
new file mode 100644
index 0000000..621e1b1
--- /dev/null
+++ b/rust/vendor/num-bigint/src/biguint/power.rs
@@ -0,0 +1,258 @@
+use super::monty::monty_modpow;
+use super::BigUint;
+
+use crate::big_digit::{self, BigDigit};
+
+use num_integer::Integer;
+use num_traits::{One, Pow, ToPrimitive, Zero};
+
+impl Pow<&BigUint> for BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn pow(self, exp: &BigUint) -> BigUint {
+ if self.is_one() || exp.is_zero() {
+ BigUint::one()
+ } else if self.is_zero() {
+ BigUint::zero()
+ } else if let Some(exp) = exp.to_u64() {
+ self.pow(exp)
+ } else if let Some(exp) = exp.to_u128() {
+ self.pow(exp)
+ } else {
+ // At this point, `self >= 2` and `exp >= 2¹²⁸`. The smallest possible result given
+ // `2.pow(2¹²⁸)` would require far more memory than 64-bit targets can address!
+ panic!("memory overflow")
+ }
+ }
+}
+
+impl Pow<BigUint> for BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn pow(self, exp: BigUint) -> BigUint {
+ Pow::pow(self, &exp)
+ }
+}
+
+impl Pow<&BigUint> for &BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn pow(self, exp: &BigUint) -> BigUint {
+ if self.is_one() || exp.is_zero() {
+ BigUint::one()
+ } else if self.is_zero() {
+ BigUint::zero()
+ } else {
+ self.clone().pow(exp)
+ }
+ }
+}
+
+impl Pow<BigUint> for &BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn pow(self, exp: BigUint) -> BigUint {
+ Pow::pow(self, &exp)
+ }
+}
+
+macro_rules! pow_impl {
+ ($T:ty) => {
+ impl Pow<$T> for BigUint {
+ type Output = BigUint;
+
+ fn pow(self, mut exp: $T) -> BigUint {
+ if exp == 0 {
+ return BigUint::one();
+ }
+ let mut base = self;
+
+ while exp & 1 == 0 {
+ base = &base * &base;
+ exp >>= 1;
+ }
+
+ if exp == 1 {
+ return base;
+ }
+
+ let mut acc = base.clone();
+ while exp > 1 {
+ exp >>= 1;
+ base = &base * &base;
+ if exp & 1 == 1 {
+ acc *= &base;
+ }
+ }
+ acc
+ }
+ }
+
+ impl Pow<&$T> for BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn pow(self, exp: &$T) -> BigUint {
+ Pow::pow(self, *exp)
+ }
+ }
+
+ impl Pow<$T> for &BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn pow(self, exp: $T) -> BigUint {
+ if exp == 0 {
+ return BigUint::one();
+ }
+ Pow::pow(self.clone(), exp)
+ }
+ }
+
+ impl Pow<&$T> for &BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn pow(self, exp: &$T) -> BigUint {
+ Pow::pow(self, *exp)
+ }
+ }
+ };
+}
+
+pow_impl!(u8);
+pow_impl!(u16);
+pow_impl!(u32);
+pow_impl!(u64);
+pow_impl!(usize);
+pow_impl!(u128);
+
+pub(super) fn modpow(x: &BigUint, exponent: &BigUint, modulus: &BigUint) -> BigUint {
+ assert!(
+ !modulus.is_zero(),
+ "attempt to calculate with zero modulus!"
+ );
+
+ if modulus.is_odd() {
+ // For an odd modulus, we can use Montgomery multiplication in base 2^32.
+ monty_modpow(x, exponent, modulus)
+ } else {
+ // Otherwise do basically the same as `num::pow`, but with a modulus.
+ plain_modpow(x, &exponent.data, modulus)
+ }
+}
+
+fn plain_modpow(base: &BigUint, exp_data: &[BigDigit], modulus: &BigUint) -> BigUint {
+ assert!(
+ !modulus.is_zero(),
+ "attempt to calculate with zero modulus!"
+ );
+
+ let i = match exp_data.iter().position(|&r| r != 0) {
+ None => return BigUint::one(),
+ Some(i) => i,
+ };
+
+ let mut base = base % modulus;
+ for _ in 0..i {
+ for _ in 0..big_digit::BITS {
+ base = &base * &base % modulus;
+ }
+ }
+
+ let mut r = exp_data[i];
+ let mut b = 0u8;
+ while r.is_even() {
+ base = &base * &base % modulus;
+ r >>= 1;
+ b += 1;
+ }
+
+ let mut exp_iter = exp_data[i + 1..].iter();
+ if exp_iter.len() == 0 && r.is_one() {
+ return base;
+ }
+
+ let mut acc = base.clone();
+ r >>= 1;
+ b += 1;
+
+ {
+ let mut unit = |exp_is_odd| {
+ base = &base * &base % modulus;
+ if exp_is_odd {
+ acc *= &base;
+ acc %= modulus;
+ }
+ };
+
+ if let Some(&last) = exp_iter.next_back() {
+ // consume exp_data[i]
+ for _ in b..big_digit::BITS {
+ unit(r.is_odd());
+ r >>= 1;
+ }
+
+ // consume all other digits before the last
+ for &r in exp_iter {
+ let mut r = r;
+ for _ in 0..big_digit::BITS {
+ unit(r.is_odd());
+ r >>= 1;
+ }
+ }
+ r = last;
+ }
+
+ debug_assert_ne!(r, 0);
+ while !r.is_zero() {
+ unit(r.is_odd());
+ r >>= 1;
+ }
+ }
+ acc
+}
+
+#[test]
+fn test_plain_modpow() {
+ let two = &BigUint::from(2u32);
+ let modulus = BigUint::from(0x1100u32);
+
+ let exp = vec![0, 0b1];
+ assert_eq!(
+ two.pow(0b1_00000000_u32) % &modulus,
+ plain_modpow(two, &exp, &modulus)
+ );
+ let exp = vec![0, 0b10];
+ assert_eq!(
+ two.pow(0b10_00000000_u32) % &modulus,
+ plain_modpow(two, &exp, &modulus)
+ );
+ let exp = vec![0, 0b110010];
+ assert_eq!(
+ two.pow(0b110010_00000000_u32) % &modulus,
+ plain_modpow(two, &exp, &modulus)
+ );
+ let exp = vec![0b1, 0b1];
+ assert_eq!(
+ two.pow(0b1_00000001_u32) % &modulus,
+ plain_modpow(two, &exp, &modulus)
+ );
+ let exp = vec![0b1100, 0, 0b1];
+ assert_eq!(
+ two.pow(0b1_00000000_00001100_u32) % &modulus,
+ plain_modpow(two, &exp, &modulus)
+ );
+}
+
+#[test]
+fn test_pow_biguint() {
+ let base = BigUint::from(5u8);
+ let exponent = BigUint::from(3u8);
+
+ assert_eq!(BigUint::from(125u8), base.pow(exponent));
+}
diff --git a/rust/vendor/num-bigint/src/biguint/serde.rs b/rust/vendor/num-bigint/src/biguint/serde.rs
new file mode 100644
index 0000000..3240f09
--- /dev/null
+++ b/rust/vendor/num-bigint/src/biguint/serde.rs
@@ -0,0 +1,119 @@
+use super::{biguint_from_vec, BigUint};
+
+use crate::std_alloc::Vec;
+
+use core::{cmp, fmt, mem};
+use serde::de::{SeqAccess, Visitor};
+use serde::{Deserialize, Deserializer, Serialize, Serializer};
+
+// `cautious` is based on the function of the same name in `serde`, but specialized to `u32`:
+// https://github.com/dtolnay/serde/blob/399ef081ecc36d2f165ff1f6debdcbf6a1dc7efb/serde/src/private/size_hint.rs#L11-L22
+fn cautious(hint: Option<usize>) -> usize {
+ const MAX_PREALLOC_BYTES: usize = 1024 * 1024;
+
+ cmp::min(
+ hint.unwrap_or(0),
+ MAX_PREALLOC_BYTES / mem::size_of::<u32>(),
+ )
+}
+
+impl Serialize for BigUint {
+ #[cfg(not(u64_digit))]
+ fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
+ where
+ S: Serializer,
+ {
+ // Note: do not change the serialization format, or it may break forward
+ // and backward compatibility of serialized data! If we ever change the
+ // internal representation, we should still serialize in base-`u32`.
+ let data: &[u32] = &self.data;
+ data.serialize(serializer)
+ }
+
+ #[cfg(u64_digit)]
+ fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
+ where
+ S: Serializer,
+ {
+ use serde::ser::SerializeSeq;
+
+ if let Some((&last, data)) = self.data.split_last() {
+ let last_lo = last as u32;
+ let last_hi = (last >> 32) as u32;
+ let u32_len = data.len() * 2 + 1 + (last_hi != 0) as usize;
+ let mut seq = serializer.serialize_seq(Some(u32_len))?;
+ for &x in data {
+ seq.serialize_element(&(x as u32))?;
+ seq.serialize_element(&((x >> 32) as u32))?;
+ }
+ seq.serialize_element(&last_lo)?;
+ if last_hi != 0 {
+ seq.serialize_element(&last_hi)?;
+ }
+ seq.end()
+ } else {
+ let data: &[u32] = &[];
+ data.serialize(serializer)
+ }
+ }
+}
+
+impl<'de> Deserialize<'de> for BigUint {
+ fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
+ where
+ D: Deserializer<'de>,
+ {
+ deserializer.deserialize_seq(U32Visitor)
+ }
+}
+
+struct U32Visitor;
+
+impl<'de> Visitor<'de> for U32Visitor {
+ type Value = BigUint;
+
+ fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
+ formatter.write_str("a sequence of unsigned 32-bit numbers")
+ }
+
+ #[cfg(not(u64_digit))]
+ fn visit_seq<S>(self, mut seq: S) -> Result<Self::Value, S::Error>
+ where
+ S: SeqAccess<'de>,
+ {
+ let len = cautious(seq.size_hint());
+ let mut data = Vec::with_capacity(len);
+
+ while let Some(value) = seq.next_element::<u32>()? {
+ data.push(value);
+ }
+
+ Ok(biguint_from_vec(data))
+ }
+
+ #[cfg(u64_digit)]
+ fn visit_seq<S>(self, mut seq: S) -> Result<Self::Value, S::Error>
+ where
+ S: SeqAccess<'de>,
+ {
+ use crate::big_digit::BigDigit;
+ use num_integer::Integer;
+
+ let u32_len = cautious(seq.size_hint());
+ let len = Integer::div_ceil(&u32_len, &2);
+ let mut data = Vec::with_capacity(len);
+
+ while let Some(lo) = seq.next_element::<u32>()? {
+ let mut value = BigDigit::from(lo);
+ if let Some(hi) = seq.next_element::<u32>()? {
+ value |= BigDigit::from(hi) << 32;
+ data.push(value);
+ } else {
+ data.push(value);
+ break;
+ }
+ }
+
+ Ok(biguint_from_vec(data))
+ }
+}
diff --git a/rust/vendor/num-bigint/src/biguint/shift.rs b/rust/vendor/num-bigint/src/biguint/shift.rs
new file mode 100644
index 0000000..00326bb
--- /dev/null
+++ b/rust/vendor/num-bigint/src/biguint/shift.rs
@@ -0,0 +1,172 @@
+use super::{biguint_from_vec, BigUint};
+
+use crate::big_digit;
+use crate::std_alloc::{Cow, Vec};
+
+use core::mem;
+use core::ops::{Shl, ShlAssign, Shr, ShrAssign};
+use num_traits::{PrimInt, Zero};
+
+#[inline]
+fn biguint_shl<T: PrimInt>(n: Cow<'_, BigUint>, shift: T) -> BigUint {
+ if shift < T::zero() {
+ panic!("attempt to shift left with negative");
+ }
+ if n.is_zero() {
+ return n.into_owned();
+ }
+ let bits = T::from(big_digit::BITS).unwrap();
+ let digits = (shift / bits).to_usize().expect("capacity overflow");
+ let shift = (shift % bits).to_u8().unwrap();
+ biguint_shl2(n, digits, shift)
+}
+
+fn biguint_shl2(n: Cow<'_, BigUint>, digits: usize, shift: u8) -> BigUint {
+ let mut data = match digits {
+ 0 => n.into_owned().data,
+ _ => {
+ let len = digits.saturating_add(n.data.len() + 1);
+ let mut data = Vec::with_capacity(len);
+ data.resize(digits, 0);
+ data.extend(n.data.iter());
+ data
+ }
+ };
+
+ if shift > 0 {
+ let mut carry = 0;
+ let carry_shift = big_digit::BITS - shift;
+ for elem in data[digits..].iter_mut() {
+ let new_carry = *elem >> carry_shift;
+ *elem = (*elem << shift) | carry;
+ carry = new_carry;
+ }
+ if carry != 0 {
+ data.push(carry);
+ }
+ }
+
+ biguint_from_vec(data)
+}
+
+#[inline]
+fn biguint_shr<T: PrimInt>(n: Cow<'_, BigUint>, shift: T) -> BigUint {
+ if shift < T::zero() {
+ panic!("attempt to shift right with negative");
+ }
+ if n.is_zero() {
+ return n.into_owned();
+ }
+ let bits = T::from(big_digit::BITS).unwrap();
+ let digits = (shift / bits).to_usize().unwrap_or(core::usize::MAX);
+ let shift = (shift % bits).to_u8().unwrap();
+ biguint_shr2(n, digits, shift)
+}
+
+fn biguint_shr2(n: Cow<'_, BigUint>, digits: usize, shift: u8) -> BigUint {
+ if digits >= n.data.len() {
+ let mut n = n.into_owned();
+ n.set_zero();
+ return n;
+ }
+ let mut data = match n {
+ Cow::Borrowed(n) => n.data[digits..].to_vec(),
+ Cow::Owned(mut n) => {
+ n.data.drain(..digits);
+ n.data
+ }
+ };
+
+ if shift > 0 {
+ let mut borrow = 0;
+ let borrow_shift = big_digit::BITS - shift;
+ for elem in data.iter_mut().rev() {
+ let new_borrow = *elem << borrow_shift;
+ *elem = (*elem >> shift) | borrow;
+ borrow = new_borrow;
+ }
+ }
+
+ biguint_from_vec(data)
+}
+
+macro_rules! impl_shift {
+ (@ref $Shx:ident :: $shx:ident, $ShxAssign:ident :: $shx_assign:ident, $rhs:ty) => {
+ impl $Shx<&$rhs> for BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn $shx(self, rhs: &$rhs) -> BigUint {
+ $Shx::$shx(self, *rhs)
+ }
+ }
+ impl $Shx<&$rhs> for &BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn $shx(self, rhs: &$rhs) -> BigUint {
+ $Shx::$shx(self, *rhs)
+ }
+ }
+ impl $ShxAssign<&$rhs> for BigUint {
+ #[inline]
+ fn $shx_assign(&mut self, rhs: &$rhs) {
+ $ShxAssign::$shx_assign(self, *rhs);
+ }
+ }
+ };
+ ($($rhs:ty),+) => {$(
+ impl Shl<$rhs> for BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn shl(self, rhs: $rhs) -> BigUint {
+ biguint_shl(Cow::Owned(self), rhs)
+ }
+ }
+ impl Shl<$rhs> for &BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn shl(self, rhs: $rhs) -> BigUint {
+ biguint_shl(Cow::Borrowed(self), rhs)
+ }
+ }
+ impl ShlAssign<$rhs> for BigUint {
+ #[inline]
+ fn shl_assign(&mut self, rhs: $rhs) {
+ let n = mem::replace(self, BigUint::zero());
+ *self = n << rhs;
+ }
+ }
+ impl_shift! { @ref Shl::shl, ShlAssign::shl_assign, $rhs }
+
+ impl Shr<$rhs> for BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn shr(self, rhs: $rhs) -> BigUint {
+ biguint_shr(Cow::Owned(self), rhs)
+ }
+ }
+ impl Shr<$rhs> for &BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn shr(self, rhs: $rhs) -> BigUint {
+ biguint_shr(Cow::Borrowed(self), rhs)
+ }
+ }
+ impl ShrAssign<$rhs> for BigUint {
+ #[inline]
+ fn shr_assign(&mut self, rhs: $rhs) {
+ let n = mem::replace(self, BigUint::zero());
+ *self = n >> rhs;
+ }
+ }
+ impl_shift! { @ref Shr::shr, ShrAssign::shr_assign, $rhs }
+ )*};
+}
+
+impl_shift! { u8, u16, u32, u64, u128, usize }
+impl_shift! { i8, i16, i32, i64, i128, isize }
diff --git a/rust/vendor/num-bigint/src/biguint/subtraction.rs b/rust/vendor/num-bigint/src/biguint/subtraction.rs
new file mode 100644
index 0000000..b7cf59d
--- /dev/null
+++ b/rust/vendor/num-bigint/src/biguint/subtraction.rs
@@ -0,0 +1,312 @@
+#[cfg(not(u64_digit))]
+use super::u32_from_u128;
+use super::BigUint;
+
+use crate::big_digit::{self, BigDigit};
+use crate::UsizePromotion;
+
+use core::cmp::Ordering::{Equal, Greater, Less};
+use core::ops::{Sub, SubAssign};
+use num_traits::{CheckedSub, Zero};
+
+#[cfg(all(use_addcarry, target_arch = "x86_64"))]
+use core::arch::x86_64 as arch;
+
+#[cfg(all(use_addcarry, target_arch = "x86"))]
+use core::arch::x86 as arch;
+
+// Subtract with borrow:
+#[cfg(all(use_addcarry, u64_digit))]
+#[inline]
+fn sbb(borrow: u8, a: u64, b: u64, out: &mut u64) -> u8 {
+ // Safety: There are absolutely no safety concerns with calling `_subborrow_u64`.
+ // It's just unsafe for API consistency with other intrinsics.
+ unsafe { arch::_subborrow_u64(borrow, a, b, out) }
+}
+
+#[cfg(all(use_addcarry, not(u64_digit)))]
+#[inline]
+fn sbb(borrow: u8, a: u32, b: u32, out: &mut u32) -> u8 {
+ // Safety: There are absolutely no safety concerns with calling `_subborrow_u32`.
+ // It's just unsafe for API consistency with other intrinsics.
+ unsafe { arch::_subborrow_u32(borrow, a, b, out) }
+}
+
+// fallback for environments where we don't have a subborrow intrinsic
+#[cfg(not(use_addcarry))]
+#[inline]
+fn sbb(borrow: u8, a: BigDigit, b: BigDigit, out: &mut BigDigit) -> u8 {
+ use crate::big_digit::SignedDoubleBigDigit;
+
+ let difference = SignedDoubleBigDigit::from(a)
+ - SignedDoubleBigDigit::from(b)
+ - SignedDoubleBigDigit::from(borrow);
+ *out = difference as BigDigit;
+ u8::from(difference < 0)
+}
+
+pub(super) fn sub2(a: &mut [BigDigit], b: &[BigDigit]) {
+ let mut borrow = 0;
+
+ let len = Ord::min(a.len(), b.len());
+ let (a_lo, a_hi) = a.split_at_mut(len);
+ let (b_lo, b_hi) = b.split_at(len);
+
+ for (a, b) in a_lo.iter_mut().zip(b_lo) {
+ borrow = sbb(borrow, *a, *b, a);
+ }
+
+ if borrow != 0 {
+ for a in a_hi {
+ borrow = sbb(borrow, *a, 0, a);
+ if borrow == 0 {
+ break;
+ }
+ }
+ }
+
+ // note: we're _required_ to fail on underflow
+ assert!(
+ borrow == 0 && b_hi.iter().all(|x| *x == 0),
+ "Cannot subtract b from a because b is larger than a."
+ );
+}
+
+// Only for the Sub impl. `a` and `b` must have same length.
+#[inline]
+fn __sub2rev(a: &[BigDigit], b: &mut [BigDigit]) -> u8 {
+ debug_assert!(b.len() == a.len());
+
+ let mut borrow = 0;
+
+ for (ai, bi) in a.iter().zip(b) {
+ borrow = sbb(borrow, *ai, *bi, bi);
+ }
+
+ borrow
+}
+
+fn sub2rev(a: &[BigDigit], b: &mut [BigDigit]) {
+ debug_assert!(b.len() >= a.len());
+
+ let len = Ord::min(a.len(), b.len());
+ let (a_lo, a_hi) = a.split_at(len);
+ let (b_lo, b_hi) = b.split_at_mut(len);
+
+ let borrow = __sub2rev(a_lo, b_lo);
+
+ assert!(a_hi.is_empty());
+
+ // note: we're _required_ to fail on underflow
+ assert!(
+ borrow == 0 && b_hi.iter().all(|x| *x == 0),
+ "Cannot subtract b from a because b is larger than a."
+ );
+}
+
+forward_val_val_binop!(impl Sub for BigUint, sub);
+forward_ref_ref_binop!(impl Sub for BigUint, sub);
+forward_val_assign!(impl SubAssign for BigUint, sub_assign);
+
+impl Sub<&BigUint> for BigUint {
+ type Output = BigUint;
+
+ fn sub(mut self, other: &BigUint) -> BigUint {
+ self -= other;
+ self
+ }
+}
+impl SubAssign<&BigUint> for BigUint {
+ fn sub_assign(&mut self, other: &BigUint) {
+ sub2(&mut self.data[..], &other.data[..]);
+ self.normalize();
+ }
+}
+
+impl Sub<BigUint> for &BigUint {
+ type Output = BigUint;
+
+ fn sub(self, mut other: BigUint) -> BigUint {
+ let other_len = other.data.len();
+ if other_len < self.data.len() {
+ let lo_borrow = __sub2rev(&self.data[..other_len], &mut other.data);
+ other.data.extend_from_slice(&self.data[other_len..]);
+ if lo_borrow != 0 {
+ sub2(&mut other.data[other_len..], &[1])
+ }
+ } else {
+ sub2rev(&self.data[..], &mut other.data[..]);
+ }
+ other.normalized()
+ }
+}
+
+promote_unsigned_scalars!(impl Sub for BigUint, sub);
+promote_unsigned_scalars_assign!(impl SubAssign for BigUint, sub_assign);
+forward_all_scalar_binop_to_val_val!(impl Sub<u32> for BigUint, sub);
+forward_all_scalar_binop_to_val_val!(impl Sub<u64> for BigUint, sub);
+forward_all_scalar_binop_to_val_val!(impl Sub<u128> for BigUint, sub);
+
+impl Sub<u32> for BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn sub(mut self, other: u32) -> BigUint {
+ self -= other;
+ self
+ }
+}
+
+impl SubAssign<u32> for BigUint {
+ fn sub_assign(&mut self, other: u32) {
+ sub2(&mut self.data[..], &[other as BigDigit]);
+ self.normalize();
+ }
+}
+
+impl Sub<BigUint> for u32 {
+ type Output = BigUint;
+
+ #[cfg(not(u64_digit))]
+ #[inline]
+ fn sub(self, mut other: BigUint) -> BigUint {
+ if other.data.len() == 0 {
+ other.data.push(self);
+ } else {
+ sub2rev(&[self], &mut other.data[..]);
+ }
+ other.normalized()
+ }
+
+ #[cfg(u64_digit)]
+ #[inline]
+ fn sub(self, mut other: BigUint) -> BigUint {
+ if other.data.is_empty() {
+ other.data.push(self as BigDigit);
+ } else {
+ sub2rev(&[self as BigDigit], &mut other.data[..]);
+ }
+ other.normalized()
+ }
+}
+
+impl Sub<u64> for BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn sub(mut self, other: u64) -> BigUint {
+ self -= other;
+ self
+ }
+}
+
+impl SubAssign<u64> for BigUint {
+ #[cfg(not(u64_digit))]
+ #[inline]
+ fn sub_assign(&mut self, other: u64) {
+ let (hi, lo) = big_digit::from_doublebigdigit(other);
+ sub2(&mut self.data[..], &[lo, hi]);
+ self.normalize();
+ }
+
+ #[cfg(u64_digit)]
+ #[inline]
+ fn sub_assign(&mut self, other: u64) {
+ sub2(&mut self.data[..], &[other as BigDigit]);
+ self.normalize();
+ }
+}
+
+impl Sub<BigUint> for u64 {
+ type Output = BigUint;
+
+ #[cfg(not(u64_digit))]
+ #[inline]
+ fn sub(self, mut other: BigUint) -> BigUint {
+ while other.data.len() < 2 {
+ other.data.push(0);
+ }
+
+ let (hi, lo) = big_digit::from_doublebigdigit(self);
+ sub2rev(&[lo, hi], &mut other.data[..]);
+ other.normalized()
+ }
+
+ #[cfg(u64_digit)]
+ #[inline]
+ fn sub(self, mut other: BigUint) -> BigUint {
+ if other.data.is_empty() {
+ other.data.push(self);
+ } else {
+ sub2rev(&[self], &mut other.data[..]);
+ }
+ other.normalized()
+ }
+}
+
+impl Sub<u128> for BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn sub(mut self, other: u128) -> BigUint {
+ self -= other;
+ self
+ }
+}
+
+impl SubAssign<u128> for BigUint {
+ #[cfg(not(u64_digit))]
+ #[inline]
+ fn sub_assign(&mut self, other: u128) {
+ let (a, b, c, d) = u32_from_u128(other);
+ sub2(&mut self.data[..], &[d, c, b, a]);
+ self.normalize();
+ }
+
+ #[cfg(u64_digit)]
+ #[inline]
+ fn sub_assign(&mut self, other: u128) {
+ let (hi, lo) = big_digit::from_doublebigdigit(other);
+ sub2(&mut self.data[..], &[lo, hi]);
+ self.normalize();
+ }
+}
+
+impl Sub<BigUint> for u128 {
+ type Output = BigUint;
+
+ #[cfg(not(u64_digit))]
+ #[inline]
+ fn sub(self, mut other: BigUint) -> BigUint {
+ while other.data.len() < 4 {
+ other.data.push(0);
+ }
+
+ let (a, b, c, d) = u32_from_u128(self);
+ sub2rev(&[d, c, b, a], &mut other.data[..]);
+ other.normalized()
+ }
+
+ #[cfg(u64_digit)]
+ #[inline]
+ fn sub(self, mut other: BigUint) -> BigUint {
+ while other.data.len() < 2 {
+ other.data.push(0);
+ }
+
+ let (hi, lo) = big_digit::from_doublebigdigit(self);
+ sub2rev(&[lo, hi], &mut other.data[..]);
+ other.normalized()
+ }
+}
+
+impl CheckedSub for BigUint {
+ #[inline]
+ fn checked_sub(&self, v: &BigUint) -> Option<BigUint> {
+ match self.cmp(v) {
+ Less => None,
+ Equal => Some(Zero::zero()),
+ Greater => Some(self.sub(v)),
+ }
+ }
+}