summaryrefslogtreecommitdiffstats
path: root/third_party/rust/num-bigint/src
diff options
context:
space:
mode:
Diffstat (limited to 'third_party/rust/num-bigint/src')
-rw-r--r--third_party/rust/num-bigint/src/algorithms.rs789
-rw-r--r--third_party/rust/num-bigint/src/bigint.rs3084
-rw-r--r--third_party/rust/num-bigint/src/bigrand.rs218
-rw-r--r--third_party/rust/num-bigint/src/biguint.rs3088
-rw-r--r--third_party/rust/num-bigint/src/lib.rs206
-rw-r--r--third_party/rust/num-bigint/src/macros.rs445
-rw-r--r--third_party/rust/num-bigint/src/monty.rs129
7 files changed, 7959 insertions, 0 deletions
diff --git a/third_party/rust/num-bigint/src/algorithms.rs b/third_party/rust/num-bigint/src/algorithms.rs
new file mode 100644
index 0000000000..26f29b8154
--- /dev/null
+++ b/third_party/rust/num-bigint/src/algorithms.rs
@@ -0,0 +1,789 @@
+use std::borrow::Cow;
+use std::cmp;
+use std::cmp::Ordering::{self, Equal, Greater, Less};
+use std::iter::repeat;
+use std::mem;
+use traits;
+use traits::{One, Zero};
+
+use biguint::BigUint;
+
+use bigint::BigInt;
+use bigint::Sign;
+use bigint::Sign::{Minus, NoSign, Plus};
+
+use big_digit::{self, BigDigit, DoubleBigDigit, SignedDoubleBigDigit};
+
+// Generic functions for add/subtract/multiply with carry/borrow:
+
+// Add with carry:
+#[inline]
+fn adc(a: BigDigit, b: BigDigit, acc: &mut DoubleBigDigit) -> BigDigit {
+ *acc += DoubleBigDigit::from(a);
+ *acc += DoubleBigDigit::from(b);
+ let lo = *acc as BigDigit;
+ *acc >>= big_digit::BITS;
+ lo
+}
+
+// Subtract with borrow:
+#[inline]
+fn sbb(a: BigDigit, b: BigDigit, acc: &mut SignedDoubleBigDigit) -> BigDigit {
+ *acc += SignedDoubleBigDigit::from(a);
+ *acc -= SignedDoubleBigDigit::from(b);
+ let lo = *acc as BigDigit;
+ *acc >>= big_digit::BITS;
+ lo
+}
+
+#[inline]
+pub fn mac_with_carry(a: BigDigit, b: BigDigit, c: BigDigit, acc: &mut DoubleBigDigit) -> BigDigit {
+ *acc += DoubleBigDigit::from(a);
+ *acc += DoubleBigDigit::from(b) * DoubleBigDigit::from(c);
+ let lo = *acc as BigDigit;
+ *acc >>= big_digit::BITS;
+ lo
+}
+
+#[inline]
+pub fn mul_with_carry(a: BigDigit, b: BigDigit, acc: &mut DoubleBigDigit) -> BigDigit {
+ *acc += DoubleBigDigit::from(a) * DoubleBigDigit::from(b);
+ let lo = *acc as BigDigit;
+ *acc >>= big_digit::BITS;
+ lo
+}
+
+/// Divide a two digit numerator by a one digit divisor, returns quotient and remainder:
+///
+/// Note: the caller must ensure that both the quotient and remainder will fit into a single digit.
+/// This is _not_ true for an arbitrary numerator/denominator.
+///
+/// (This function also matches what the x86 divide instruction does).
+#[inline]
+fn div_wide(hi: BigDigit, lo: BigDigit, divisor: BigDigit) -> (BigDigit, BigDigit) {
+ debug_assert!(hi < divisor);
+
+ let lhs = big_digit::to_doublebigdigit(hi, lo);
+ let rhs = DoubleBigDigit::from(divisor);
+ ((lhs / rhs) as BigDigit, (lhs % rhs) as BigDigit)
+}
+
+pub fn div_rem_digit(mut a: BigUint, b: BigDigit) -> (BigUint, BigDigit) {
+ let mut rem = 0;
+
+ for d in a.data.iter_mut().rev() {
+ let (q, r) = div_wide(rem, *d, b);
+ *d = q;
+ rem = r;
+ }
+
+ (a.normalized(), rem)
+}
+
+pub fn rem_digit(a: &BigUint, b: BigDigit) -> BigDigit {
+ let mut rem: DoubleBigDigit = 0;
+ for &digit in a.data.iter().rev() {
+ rem = (rem << big_digit::BITS) + DoubleBigDigit::from(digit);
+ rem %= DoubleBigDigit::from(b);
+ }
+
+ rem as BigDigit
+}
+
+// Only for the Add impl:
+#[inline]
+pub fn __add2(a: &mut [BigDigit], b: &[BigDigit]) -> BigDigit {
+ debug_assert!(a.len() >= b.len());
+
+ let mut carry = 0;
+ let (a_lo, a_hi) = a.split_at_mut(b.len());
+
+ for (a, b) in a_lo.iter_mut().zip(b) {
+ *a = adc(*a, *b, &mut carry);
+ }
+
+ if carry != 0 {
+ for a in a_hi {
+ *a = adc(*a, 0, &mut carry);
+ if carry == 0 {
+ break;
+ }
+ }
+ }
+
+ carry as BigDigit
+}
+
+/// Two argument addition of raw slices:
+/// a += b
+///
+/// The caller _must_ ensure that a is big enough to store the result - typically this means
+/// resizing a to max(a.len(), b.len()) + 1, to fit a possible carry.
+pub fn add2(a: &mut [BigDigit], b: &[BigDigit]) {
+ let carry = __add2(a, b);
+
+ debug_assert!(carry == 0);
+}
+
+pub fn sub2(a: &mut [BigDigit], b: &[BigDigit]) {
+ let mut borrow = 0;
+
+ let len = cmp::min(a.len(), b.len());
+ let (a_lo, a_hi) = a.split_at_mut(len);
+ let (b_lo, b_hi) = b.split_at(len);
+
+ for (a, b) in a_lo.iter_mut().zip(b_lo) {
+ *a = sbb(*a, *b, &mut borrow);
+ }
+
+ if borrow != 0 {
+ for a in a_hi {
+ *a = sbb(*a, 0, &mut borrow);
+ if borrow == 0 {
+ break;
+ }
+ }
+ }
+
+ // note: we're _required_ to fail on underflow
+ assert!(
+ borrow == 0 && b_hi.iter().all(|x| *x == 0),
+ "Cannot subtract b from a because b is larger than a."
+ );
+}
+
+// Only for the Sub impl. `a` and `b` must have same length.
+#[inline]
+pub fn __sub2rev(a: &[BigDigit], b: &mut [BigDigit]) -> BigDigit {
+ debug_assert!(b.len() == a.len());
+
+ let mut borrow = 0;
+
+ for (ai, bi) in a.iter().zip(b) {
+ *bi = sbb(*ai, *bi, &mut borrow);
+ }
+
+ borrow as BigDigit
+}
+
+pub fn sub2rev(a: &[BigDigit], b: &mut [BigDigit]) {
+ debug_assert!(b.len() >= a.len());
+
+ let len = cmp::min(a.len(), b.len());
+ let (a_lo, a_hi) = a.split_at(len);
+ let (b_lo, b_hi) = b.split_at_mut(len);
+
+ let borrow = __sub2rev(a_lo, b_lo);
+
+ assert!(a_hi.is_empty());
+
+ // note: we're _required_ to fail on underflow
+ assert!(
+ borrow == 0 && b_hi.iter().all(|x| *x == 0),
+ "Cannot subtract b from a because b is larger than a."
+ );
+}
+
+pub fn sub_sign(a: &[BigDigit], b: &[BigDigit]) -> (Sign, BigUint) {
+ // Normalize:
+ let a = &a[..a.iter().rposition(|&x| x != 0).map_or(0, |i| i + 1)];
+ let b = &b[..b.iter().rposition(|&x| x != 0).map_or(0, |i| i + 1)];
+
+ match cmp_slice(a, b) {
+ Greater => {
+ let mut a = a.to_vec();
+ sub2(&mut a, b);
+ (Plus, BigUint::new(a))
+ }
+ Less => {
+ let mut b = b.to_vec();
+ sub2(&mut b, a);
+ (Minus, BigUint::new(b))
+ }
+ _ => (NoSign, Zero::zero()),
+ }
+}
+
+/// Three argument multiply accumulate:
+/// acc += b * c
+pub fn mac_digit(acc: &mut [BigDigit], b: &[BigDigit], c: BigDigit) {
+ if c == 0 {
+ return;
+ }
+
+ let mut carry = 0;
+ let (a_lo, a_hi) = acc.split_at_mut(b.len());
+
+ for (a, &b) in a_lo.iter_mut().zip(b) {
+ *a = mac_with_carry(*a, b, c, &mut carry);
+ }
+
+ let mut a = a_hi.iter_mut();
+ while carry != 0 {
+ let a = a.next().expect("carry overflow during multiplication!");
+ *a = adc(*a, 0, &mut carry);
+ }
+}
+
+/// Three argument multiply accumulate:
+/// acc += b * c
+fn mac3(acc: &mut [BigDigit], b: &[BigDigit], c: &[BigDigit]) {
+ let (x, y) = if b.len() < c.len() { (b, c) } else { (c, b) };
+
+ // We use three algorithms for different input sizes.
+ //
+ // - For small inputs, long multiplication is fastest.
+ // - Next we use Karatsuba multiplication (Toom-2), which we have optimized
+ // to avoid unnecessary allocations for intermediate values.
+ // - For the largest inputs we use Toom-3, which better optimizes the
+ // number of operations, but uses more temporary allocations.
+ //
+ // The thresholds are somewhat arbitrary, chosen by evaluating the results
+ // of `cargo bench --bench bigint multiply`.
+
+ if x.len() <= 32 {
+ // Long multiplication:
+ for (i, xi) in x.iter().enumerate() {
+ mac_digit(&mut acc[i..], y, *xi);
+ }
+ } else if x.len() <= 256 {
+ /*
+ * Karatsuba multiplication:
+ *
+ * The idea is that we break x and y up into two smaller numbers that each have about half
+ * as many digits, like so (note that multiplying by b is just a shift):
+ *
+ * x = x0 + x1 * b
+ * y = y0 + y1 * b
+ *
+ * With some algebra, we can compute x * y with three smaller products, where the inputs to
+ * each of the smaller products have only about half as many digits as x and y:
+ *
+ * x * y = (x0 + x1 * b) * (y0 + y1 * b)
+ *
+ * x * y = x0 * y0
+ * + x0 * y1 * b
+ * + x1 * y0 * b
+ * + x1 * y1 * b^2
+ *
+ * Let p0 = x0 * y0 and p2 = x1 * y1:
+ *
+ * x * y = p0
+ * + (x0 * y1 + x1 * y0) * b
+ * + p2 * b^2
+ *
+ * The real trick is that middle term:
+ *
+ * x0 * y1 + x1 * y0
+ *
+ * = x0 * y1 + x1 * y0 - p0 + p0 - p2 + p2
+ *
+ * = x0 * y1 + x1 * y0 - x0 * y0 - x1 * y1 + p0 + p2
+ *
+ * Now we complete the square:
+ *
+ * = -(x0 * y0 - x0 * y1 - x1 * y0 + x1 * y1) + p0 + p2
+ *
+ * = -((x1 - x0) * (y1 - y0)) + p0 + p2
+ *
+ * Let p1 = (x1 - x0) * (y1 - y0), and substitute back into our original formula:
+ *
+ * x * y = p0
+ * + (p0 + p2 - p1) * b
+ * + p2 * b^2
+ *
+ * Where the three intermediate products are:
+ *
+ * p0 = x0 * y0
+ * p1 = (x1 - x0) * (y1 - y0)
+ * p2 = x1 * y1
+ *
+ * In doing the computation, we take great care to avoid unnecessary temporary variables
+ * (since creating a BigUint requires a heap allocation): thus, we rearrange the formula a
+ * bit so we can use the same temporary variable for all the intermediate products:
+ *
+ * x * y = p2 * b^2 + p2 * b
+ * + p0 * b + p0
+ * - p1 * b
+ *
+ * The other trick we use is instead of doing explicit shifts, we slice acc at the
+ * appropriate offset when doing the add.
+ */
+
+ /*
+ * When x is smaller than y, it's significantly faster to pick b such that x is split in
+ * half, not y:
+ */
+ let b = x.len() / 2;
+ let (x0, x1) = x.split_at(b);
+ let (y0, y1) = y.split_at(b);
+
+ /*
+ * We reuse the same BigUint for all the intermediate multiplies and have to size p
+ * appropriately here: x1.len() >= x0.len and y1.len() >= y0.len():
+ */
+ let len = x1.len() + y1.len() + 1;
+ let mut p = BigUint { data: vec![0; len] };
+
+ // p2 = x1 * y1
+ mac3(&mut p.data[..], x1, y1);
+
+ // Not required, but the adds go faster if we drop any unneeded 0s from the end:
+ p.normalize();
+
+ add2(&mut acc[b..], &p.data[..]);
+ add2(&mut acc[b * 2..], &p.data[..]);
+
+ // Zero out p before the next multiply:
+ p.data.truncate(0);
+ p.data.extend(repeat(0).take(len));
+
+ // p0 = x0 * y0
+ mac3(&mut p.data[..], x0, y0);
+ p.normalize();
+
+ add2(&mut acc[..], &p.data[..]);
+ add2(&mut acc[b..], &p.data[..]);
+
+ // p1 = (x1 - x0) * (y1 - y0)
+ // We do this one last, since it may be negative and acc can't ever be negative:
+ let (j0_sign, j0) = sub_sign(x1, x0);
+ let (j1_sign, j1) = sub_sign(y1, y0);
+
+ match j0_sign * j1_sign {
+ Plus => {
+ p.data.truncate(0);
+ p.data.extend(repeat(0).take(len));
+
+ mac3(&mut p.data[..], &j0.data[..], &j1.data[..]);
+ p.normalize();
+
+ sub2(&mut acc[b..], &p.data[..]);
+ }
+ Minus => {
+ mac3(&mut acc[b..], &j0.data[..], &j1.data[..]);
+ }
+ NoSign => (),
+ }
+ } else {
+ // Toom-3 multiplication:
+ //
+ // Toom-3 is like Karatsuba above, but dividing the inputs into three parts.
+ // Both are instances of Toom-Cook, using `k=3` and `k=2` respectively.
+ //
+ // The general idea is to treat the large integers digits as
+ // polynomials of a certain degree and determine the coefficients/digits
+ // of the product of the two via interpolation of the polynomial product.
+ let i = y.len() / 3 + 1;
+
+ let x0_len = cmp::min(x.len(), i);
+ let x1_len = cmp::min(x.len() - x0_len, i);
+
+ let y0_len = i;
+ let y1_len = cmp::min(y.len() - y0_len, i);
+
+ // Break x and y into three parts, representating an order two polynomial.
+ // t is chosen to be the size of a digit so we can use faster shifts
+ // in place of multiplications.
+ //
+ // x(t) = x2*t^2 + x1*t + x0
+ let x0 = BigInt::from_slice(Plus, &x[..x0_len]);
+ let x1 = BigInt::from_slice(Plus, &x[x0_len..x0_len + x1_len]);
+ let x2 = BigInt::from_slice(Plus, &x[x0_len + x1_len..]);
+
+ // y(t) = y2*t^2 + y1*t + y0
+ let y0 = BigInt::from_slice(Plus, &y[..y0_len]);
+ let y1 = BigInt::from_slice(Plus, &y[y0_len..y0_len + y1_len]);
+ let y2 = BigInt::from_slice(Plus, &y[y0_len + y1_len..]);
+
+ // Let w(t) = x(t) * y(t)
+ //
+ // This gives us the following order-4 polynomial.
+ //
+ // w(t) = w4*t^4 + w3*t^3 + w2*t^2 + w1*t + w0
+ //
+ // We need to find the coefficients w4, w3, w2, w1 and w0. Instead
+ // of simply multiplying the x and y in total, we can evaluate w
+ // at 5 points. An n-degree polynomial is uniquely identified by (n + 1)
+ // points.
+ //
+ // It is arbitrary as to what points we evaluate w at but we use the
+ // following.
+ //
+ // w(t) at t = 0, 1, -1, -2 and inf
+ //
+ // The values for w(t) in terms of x(t)*y(t) at these points are:
+ //
+ // let a = w(0) = x0 * y0
+ // let b = w(1) = (x2 + x1 + x0) * (y2 + y1 + y0)
+ // let c = w(-1) = (x2 - x1 + x0) * (y2 - y1 + y0)
+ // let d = w(-2) = (4*x2 - 2*x1 + x0) * (4*y2 - 2*y1 + y0)
+ // let e = w(inf) = x2 * y2 as t -> inf
+
+ // x0 + x2, avoiding temporaries
+ let p = &x0 + &x2;
+
+ // y0 + y2, avoiding temporaries
+ let q = &y0 + &y2;
+
+ // x2 - x1 + x0, avoiding temporaries
+ let p2 = &p - &x1;
+
+ // y2 - y1 + y0, avoiding temporaries
+ let q2 = &q - &y1;
+
+ // w(0)
+ let r0 = &x0 * &y0;
+
+ // w(inf)
+ let r4 = &x2 * &y2;
+
+ // w(1)
+ let r1 = (p + x1) * (q + y1);
+
+ // w(-1)
+ let r2 = &p2 * &q2;
+
+ // w(-2)
+ let r3 = ((p2 + x2) * 2 - x0) * ((q2 + y2) * 2 - y0);
+
+ // Evaluating these points gives us the following system of linear equations.
+ //
+ // 0 0 0 0 1 | a
+ // 1 1 1 1 1 | b
+ // 1 -1 1 -1 1 | c
+ // 16 -8 4 -2 1 | d
+ // 1 0 0 0 0 | e
+ //
+ // The solved equation (after gaussian elimination or similar)
+ // in terms of its coefficients:
+ //
+ // w0 = w(0)
+ // w1 = w(0)/2 + w(1)/3 - w(-1) + w(2)/6 - 2*w(inf)
+ // w2 = -w(0) + w(1)/2 + w(-1)/2 - w(inf)
+ // w3 = -w(0)/2 + w(1)/6 + w(-1)/2 - w(1)/6
+ // w4 = w(inf)
+ //
+ // This particular sequence is given by Bodrato and is an interpolation
+ // of the above equations.
+ let mut comp3: BigInt = (r3 - &r1) / 3;
+ let mut comp1: BigInt = (r1 - &r2) / 2;
+ let mut comp2: BigInt = r2 - &r0;
+ comp3 = (&comp2 - comp3) / 2 + &r4 * 2;
+ comp2 = comp2 + &comp1 - &r4;
+ comp1 = comp1 - &comp3;
+
+ // Recomposition. The coefficients of the polynomial are now known.
+ //
+ // Evaluate at w(t) where t is our given base to get the result.
+ let result = r0
+ + (comp1 << 32 * i)
+ + (comp2 << 2 * 32 * i)
+ + (comp3 << 3 * 32 * i)
+ + (r4 << 4 * 32 * i);
+ let result_pos = result.to_biguint().unwrap();
+ add2(&mut acc[..], &result_pos.data);
+ }
+}
+
+pub fn mul3(x: &[BigDigit], y: &[BigDigit]) -> BigUint {
+ let len = x.len() + y.len() + 1;
+ let mut prod = BigUint { data: vec![0; len] };
+
+ mac3(&mut prod.data[..], x, y);
+ prod.normalized()
+}
+
+pub fn scalar_mul(a: &mut [BigDigit], b: BigDigit) -> BigDigit {
+ let mut carry = 0;
+ for a in a.iter_mut() {
+ *a = mul_with_carry(*a, b, &mut carry);
+ }
+ carry as BigDigit
+}
+
+pub fn div_rem(mut u: BigUint, mut d: BigUint) -> (BigUint, BigUint) {
+ if d.is_zero() {
+ panic!()
+ }
+ if u.is_zero() {
+ return (Zero::zero(), Zero::zero());
+ }
+
+ if d.data.len() == 1 {
+ if d.data == [1] {
+ return (u, Zero::zero());
+ }
+ let (div, rem) = div_rem_digit(u, d.data[0]);
+ // reuse d
+ d.data.clear();
+ d += rem;
+ return (div, d);
+ }
+
+ // Required or the q_len calculation below can underflow:
+ match u.cmp(&d) {
+ Less => return (Zero::zero(), u),
+ Equal => {
+ u.set_one();
+ return (u, Zero::zero());
+ }
+ Greater => {} // Do nothing
+ }
+
+ // This algorithm is from Knuth, TAOCP vol 2 section 4.3, algorithm D:
+ //
+ // First, normalize the arguments so the highest bit in the highest digit of the divisor is
+ // set: the main loop uses the highest digit of the divisor for generating guesses, so we
+ // want it to be the largest number we can efficiently divide by.
+ //
+ let shift = d.data.last().unwrap().leading_zeros() as usize;
+ let (q, r) = if shift == 0 {
+ // no need to clone d
+ div_rem_core(u, &d)
+ } else {
+ div_rem_core(u << shift, &(d << shift))
+ };
+ // renormalize the remainder
+ (q, r >> shift)
+}
+
+pub fn div_rem_ref(u: &BigUint, d: &BigUint) -> (BigUint, BigUint) {
+ if d.is_zero() {
+ panic!()
+ }
+ if u.is_zero() {
+ return (Zero::zero(), Zero::zero());
+ }
+
+ if d.data.len() == 1 {
+ if d.data == [1] {
+ return (u.clone(), Zero::zero());
+ }
+
+ let (div, rem) = div_rem_digit(u.clone(), d.data[0]);
+ return (div, rem.into());
+ }
+
+ // Required or the q_len calculation below can underflow:
+ match u.cmp(d) {
+ Less => return (Zero::zero(), u.clone()),
+ Equal => return (One::one(), Zero::zero()),
+ Greater => {} // Do nothing
+ }
+
+ // This algorithm is from Knuth, TAOCP vol 2 section 4.3, algorithm D:
+ //
+ // First, normalize the arguments so the highest bit in the highest digit of the divisor is
+ // set: the main loop uses the highest digit of the divisor for generating guesses, so we
+ // want it to be the largest number we can efficiently divide by.
+ //
+ let shift = d.data.last().unwrap().leading_zeros() as usize;
+
+ let (q, r) = if shift == 0 {
+ // no need to clone d
+ div_rem_core(u.clone(), d)
+ } else {
+ div_rem_core(u << shift, &(d << shift))
+ };
+ // renormalize the remainder
+ (q, r >> shift)
+}
+
+/// an implementation of Knuth, TAOCP vol 2 section 4.3, algorithm D
+///
+/// # Correctness
+///
+/// This function requires the following conditions to run correctly and/or effectively
+///
+/// - `a > b`
+/// - `d.data.len() > 1`
+/// - `d.data.last().unwrap().leading_zeros() == 0`
+fn div_rem_core(mut a: BigUint, b: &BigUint) -> (BigUint, BigUint) {
+ // The algorithm works by incrementally calculating "guesses", q0, for part of the
+ // remainder. Once we have any number q0 such that q0 * b <= a, we can set
+ //
+ // q += q0
+ // a -= q0 * b
+ //
+ // and then iterate until a < b. Then, (q, a) will be our desired quotient and remainder.
+ //
+ // q0, our guess, is calculated by dividing the last few digits of a by the last digit of b
+ // - this should give us a guess that is "close" to the actual quotient, but is possibly
+ // greater than the actual quotient. If q0 * b > a, we simply use iterated subtraction
+ // until we have a guess such that q0 * b <= a.
+ //
+
+ let bn = *b.data.last().unwrap();
+ let q_len = a.data.len() - b.data.len() + 1;
+ let mut q = BigUint {
+ data: vec![0; q_len],
+ };
+
+ // We reuse the same temporary to avoid hitting the allocator in our inner loop - this is
+ // sized to hold a0 (in the common case; if a particular digit of the quotient is zero a0
+ // can be bigger).
+ //
+ let mut tmp = BigUint {
+ data: Vec::with_capacity(2),
+ };
+
+ for j in (0..q_len).rev() {
+ /*
+ * When calculating our next guess q0, we don't need to consider the digits below j
+ * + b.data.len() - 1: we're guessing digit j of the quotient (i.e. q0 << j) from
+ * digit bn of the divisor (i.e. bn << (b.data.len() - 1) - so the product of those
+ * two numbers will be zero in all digits up to (j + b.data.len() - 1).
+ */
+ let offset = j + b.data.len() - 1;
+ if offset >= a.data.len() {
+ continue;
+ }
+
+ /* just avoiding a heap allocation: */
+ let mut a0 = tmp;
+ a0.data.truncate(0);
+ a0.data.extend(a.data[offset..].iter().cloned());
+
+ /*
+ * q0 << j * big_digit::BITS is our actual quotient estimate - we do the shifts
+ * implicitly at the end, when adding and subtracting to a and q. Not only do we
+ * save the cost of the shifts, the rest of the arithmetic gets to work with
+ * smaller numbers.
+ */
+ let (mut q0, _) = div_rem_digit(a0, bn);
+ let mut prod = b * &q0;
+
+ while cmp_slice(&prod.data[..], &a.data[j..]) == Greater {
+ let one: BigUint = One::one();
+ q0 = q0 - one;
+ prod = prod - b;
+ }
+
+ add2(&mut q.data[j..], &q0.data[..]);
+ sub2(&mut a.data[j..], &prod.data[..]);
+ a.normalize();
+
+ tmp = q0;
+ }
+
+ debug_assert!(&a < b);
+
+ (q.normalized(), a)
+}
+
+/// Find last set bit
+/// fls(0) == 0, fls(u32::MAX) == 32
+pub fn fls<T: traits::PrimInt>(v: T) -> usize {
+ mem::size_of::<T>() * 8 - v.leading_zeros() as usize
+}
+
+pub fn ilog2<T: traits::PrimInt>(v: T) -> usize {
+ fls(v) - 1
+}
+
+#[inline]
+pub fn biguint_shl(n: Cow<BigUint>, bits: usize) -> BigUint {
+ let n_unit = bits / big_digit::BITS;
+ let mut data = match n_unit {
+ 0 => n.into_owned().data,
+ _ => {
+ let len = n_unit + n.data.len() + 1;
+ let mut data = Vec::with_capacity(len);
+ data.extend(repeat(0).take(n_unit));
+ data.extend(n.data.iter().cloned());
+ data
+ }
+ };
+
+ let n_bits = bits % big_digit::BITS;
+ if n_bits > 0 {
+ let mut carry = 0;
+ for elem in data[n_unit..].iter_mut() {
+ let new_carry = *elem >> (big_digit::BITS - n_bits);
+ *elem = (*elem << n_bits) | carry;
+ carry = new_carry;
+ }
+ if carry != 0 {
+ data.push(carry);
+ }
+ }
+
+ BigUint::new(data)
+}
+
+#[inline]
+pub fn biguint_shr(n: Cow<BigUint>, bits: usize) -> BigUint {
+ let n_unit = bits / big_digit::BITS;
+ if n_unit >= n.data.len() {
+ return Zero::zero();
+ }
+ let mut data = match n {
+ Cow::Borrowed(n) => n.data[n_unit..].to_vec(),
+ Cow::Owned(mut n) => {
+ n.data.drain(..n_unit);
+ n.data
+ }
+ };
+
+ let n_bits = bits % big_digit::BITS;
+ if n_bits > 0 {
+ let mut borrow = 0;
+ for elem in data.iter_mut().rev() {
+ let new_borrow = *elem << (big_digit::BITS - n_bits);
+ *elem = (*elem >> n_bits) | borrow;
+ borrow = new_borrow;
+ }
+ }
+
+ BigUint::new(data)
+}
+
+pub fn cmp_slice(a: &[BigDigit], b: &[BigDigit]) -> Ordering {
+ debug_assert!(a.last() != Some(&0));
+ debug_assert!(b.last() != Some(&0));
+
+ let (a_len, b_len) = (a.len(), b.len());
+ if a_len < b_len {
+ return Less;
+ }
+ if a_len > b_len {
+ return Greater;
+ }
+
+ for (&ai, &bi) in a.iter().rev().zip(b.iter().rev()) {
+ if ai < bi {
+ return Less;
+ }
+ if ai > bi {
+ return Greater;
+ }
+ }
+ return Equal;
+}
+
+#[cfg(test)]
+mod algorithm_tests {
+ use big_digit::BigDigit;
+ use traits::Num;
+ use Sign::Plus;
+ use {BigInt, BigUint};
+
+ #[test]
+ fn test_sub_sign() {
+ use super::sub_sign;
+
+ fn sub_sign_i(a: &[BigDigit], b: &[BigDigit]) -> BigInt {
+ let (sign, val) = sub_sign(a, b);
+ BigInt::from_biguint(sign, val)
+ }
+
+ let a = BigUint::from_str_radix("265252859812191058636308480000000", 10).unwrap();
+ let b = BigUint::from_str_radix("26525285981219105863630848000000", 10).unwrap();
+ let a_i = BigInt::from_biguint(Plus, a.clone());
+ let b_i = BigInt::from_biguint(Plus, b.clone());
+
+ assert_eq!(sub_sign_i(&a.data[..], &b.data[..]), &a_i - &b_i);
+ assert_eq!(sub_sign_i(&b.data[..], &a.data[..]), &b_i - &a_i);
+ }
+}
diff --git a/third_party/rust/num-bigint/src/bigint.rs b/third_party/rust/num-bigint/src/bigint.rs
new file mode 100644
index 0000000000..93c72be6af
--- /dev/null
+++ b/third_party/rust/num-bigint/src/bigint.rs
@@ -0,0 +1,3084 @@
+#[allow(deprecated, unused_imports)]
+use std::ascii::AsciiExt;
+use std::cmp::Ordering::{self, Equal, Greater, Less};
+use std::default::Default;
+use std::fmt;
+use std::iter::{Product, Sum};
+use std::mem;
+use std::ops::{
+ Add, AddAssign, BitAnd, BitAndAssign, BitOr, BitOrAssign, BitXor, BitXorAssign, Div, DivAssign,
+ Mul, MulAssign, Neg, Not, Rem, RemAssign, Shl, ShlAssign, Shr, ShrAssign, Sub, SubAssign,
+};
+use std::str::{self, FromStr};
+#[cfg(has_i128)]
+use std::{i128, u128};
+use std::{i64, u64};
+
+#[cfg(feature = "serde")]
+use serde;
+
+use integer::{Integer, Roots};
+use traits::{
+ CheckedAdd, CheckedDiv, CheckedMul, CheckedSub, FromPrimitive, Num, One, Pow, Signed,
+ ToPrimitive, Zero,
+};
+
+use self::Sign::{Minus, NoSign, Plus};
+
+use super::ParseBigIntError;
+use big_digit::{self, BigDigit, DoubleBigDigit};
+use biguint;
+use biguint::to_str_radix_reversed;
+use biguint::{BigUint, IntDigits};
+
+use IsizePromotion;
+use UsizePromotion;
+
+#[cfg(feature = "quickcheck")]
+use quickcheck::{Arbitrary, Gen};
+
+/// A Sign is a `BigInt`'s composing element.
+#[derive(PartialEq, PartialOrd, Eq, Ord, Copy, Clone, Debug, Hash)]
+pub enum Sign {
+ Minus,
+ NoSign,
+ Plus,
+}
+
+impl Neg for Sign {
+ type Output = Sign;
+
+ /// Negate Sign value.
+ #[inline]
+ fn neg(self) -> Sign {
+ match self {
+ Minus => Plus,
+ NoSign => NoSign,
+ Plus => Minus,
+ }
+ }
+}
+
+impl Mul<Sign> for Sign {
+ type Output = Sign;
+
+ #[inline]
+ fn mul(self, other: Sign) -> Sign {
+ match (self, other) {
+ (NoSign, _) | (_, NoSign) => NoSign,
+ (Plus, Plus) | (Minus, Minus) => Plus,
+ (Plus, Minus) | (Minus, Plus) => Minus,
+ }
+ }
+}
+
+#[cfg(feature = "serde")]
+impl serde::Serialize for Sign {
+ fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
+ where
+ S: serde::Serializer,
+ {
+ // Note: do not change the serialization format, or it may break
+ // forward and backward compatibility of serialized data!
+ match *self {
+ Sign::Minus => (-1i8).serialize(serializer),
+ Sign::NoSign => 0i8.serialize(serializer),
+ Sign::Plus => 1i8.serialize(serializer),
+ }
+ }
+}
+
+#[cfg(feature = "serde")]
+impl<'de> serde::Deserialize<'de> for Sign {
+ fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
+ where
+ D: serde::Deserializer<'de>,
+ {
+ use serde::de::Error;
+ use serde::de::Unexpected;
+
+ let sign: i8 = serde::Deserialize::deserialize(deserializer)?;
+ match sign {
+ -1 => Ok(Sign::Minus),
+ 0 => Ok(Sign::NoSign),
+ 1 => Ok(Sign::Plus),
+ _ => Err(D::Error::invalid_value(
+ Unexpected::Signed(sign.into()),
+ &"a sign of -1, 0, or 1",
+ )),
+ }
+ }
+}
+
+/// A big signed integer type.
+#[derive(Clone, Debug, Hash)]
+pub struct BigInt {
+ sign: Sign,
+ data: BigUint,
+}
+
+#[cfg(feature = "quickcheck")]
+impl Arbitrary for BigInt {
+ fn arbitrary<G: Gen>(g: &mut G) -> Self {
+ let positive = bool::arbitrary(g);
+ let sign = if positive { Sign::Plus } else { Sign::Minus };
+ Self::from_biguint(sign, BigUint::arbitrary(g))
+ }
+
+ #[allow(bare_trait_objects)] // `dyn` needs Rust 1.27 to parse, even when cfg-disabled
+ fn shrink(&self) -> Box<Iterator<Item = Self>> {
+ let sign = self.sign();
+ let unsigned_shrink = self.data.shrink();
+ Box::new(unsigned_shrink.map(move |x| BigInt::from_biguint(sign, x)))
+ }
+}
+
+/// Return the magnitude of a `BigInt`.
+///
+/// This is in a private module, pseudo pub(crate)
+#[cfg(feature = "rand")]
+pub fn magnitude(i: &BigInt) -> &BigUint {
+ &i.data
+}
+
+/// Return the owned magnitude of a `BigInt`.
+///
+/// This is in a private module, pseudo pub(crate)
+#[cfg(feature = "rand")]
+pub fn into_magnitude(i: BigInt) -> BigUint {
+ i.data
+}
+
+impl PartialEq for BigInt {
+ #[inline]
+ fn eq(&self, other: &BigInt) -> bool {
+ self.cmp(other) == Equal
+ }
+}
+
+impl Eq for BigInt {}
+
+impl PartialOrd for BigInt {
+ #[inline]
+ fn partial_cmp(&self, other: &BigInt) -> Option<Ordering> {
+ Some(self.cmp(other))
+ }
+}
+
+impl Ord for BigInt {
+ #[inline]
+ fn cmp(&self, other: &BigInt) -> Ordering {
+ let scmp = self.sign.cmp(&other.sign);
+ if scmp != Equal {
+ return scmp;
+ }
+
+ match self.sign {
+ NoSign => Equal,
+ Plus => self.data.cmp(&other.data),
+ Minus => other.data.cmp(&self.data),
+ }
+ }
+}
+
+impl Default for BigInt {
+ #[inline]
+ fn default() -> BigInt {
+ Zero::zero()
+ }
+}
+
+impl fmt::Display for BigInt {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ f.pad_integral(!self.is_negative(), "", &self.data.to_str_radix(10))
+ }
+}
+
+impl fmt::Binary for BigInt {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ f.pad_integral(!self.is_negative(), "0b", &self.data.to_str_radix(2))
+ }
+}
+
+impl fmt::Octal for BigInt {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ f.pad_integral(!self.is_negative(), "0o", &self.data.to_str_radix(8))
+ }
+}
+
+impl fmt::LowerHex for BigInt {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ f.pad_integral(!self.is_negative(), "0x", &self.data.to_str_radix(16))
+ }
+}
+
+impl fmt::UpperHex for BigInt {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ let mut s = self.data.to_str_radix(16);
+ s.make_ascii_uppercase();
+ f.pad_integral(!self.is_negative(), "0x", &s)
+ }
+}
+
+// Negation in two's complement.
+// acc must be initialized as 1 for least-significant digit.
+//
+// When negating, a carry (acc == 1) means that all the digits
+// considered to this point were zero. This means that if all the
+// digits of a negative BigInt have been considered, carry must be
+// zero as we cannot have negative zero.
+//
+// 01 -> ...f ff
+// ff -> ...f 01
+// 01 00 -> ...f ff 00
+// 01 01 -> ...f fe ff
+// 01 ff -> ...f fe 01
+// ff 00 -> ...f 01 00
+// ff 01 -> ...f 00 ff
+// ff ff -> ...f 00 01
+#[inline]
+fn negate_carry(a: BigDigit, acc: &mut DoubleBigDigit) -> BigDigit {
+ *acc += DoubleBigDigit::from(!a);
+ let lo = *acc as BigDigit;
+ *acc >>= big_digit::BITS;
+ lo
+}
+
+// !-2 = !...f fe = ...0 01 = +1
+// !-1 = !...f ff = ...0 00 = 0
+// ! 0 = !...0 00 = ...f ff = -1
+// !+1 = !...0 01 = ...f fe = -2
+impl Not for BigInt {
+ type Output = BigInt;
+
+ fn not(mut self) -> BigInt {
+ match self.sign {
+ NoSign | Plus => {
+ self.data += 1u32;
+ self.sign = Minus;
+ }
+ Minus => {
+ self.data -= 1u32;
+ self.sign = if self.data.is_zero() { NoSign } else { Plus };
+ }
+ }
+ self
+ }
+}
+
+impl<'a> Not for &'a BigInt {
+ type Output = BigInt;
+
+ fn not(self) -> BigInt {
+ match self.sign {
+ NoSign | Plus => BigInt::from_biguint(Minus, &self.data + 1u32),
+ Minus => BigInt::from_biguint(Plus, &self.data - 1u32),
+ }
+ }
+}
+
+// + 1 & -ff = ...0 01 & ...f 01 = ...0 01 = + 1
+// +ff & - 1 = ...0 ff & ...f ff = ...0 ff = +ff
+// answer is pos, has length of a
+fn bitand_pos_neg(a: &mut Vec<BigDigit>, b: &[BigDigit]) {
+ let mut carry_b = 1;
+ for (ai, &bi) in a.iter_mut().zip(b.iter()) {
+ let twos_b = negate_carry(bi, &mut carry_b);
+ *ai &= twos_b;
+ }
+ debug_assert!(b.len() > a.len() || carry_b == 0);
+}
+
+// - 1 & +ff = ...f ff & ...0 ff = ...0 ff = +ff
+// -ff & + 1 = ...f 01 & ...0 01 = ...0 01 = + 1
+// answer is pos, has length of b
+fn bitand_neg_pos(a: &mut Vec<BigDigit>, b: &[BigDigit]) {
+ let mut carry_a = 1;
+ for (ai, &bi) in a.iter_mut().zip(b.iter()) {
+ let twos_a = negate_carry(*ai, &mut carry_a);
+ *ai = twos_a & bi;
+ }
+ debug_assert!(a.len() > b.len() || carry_a == 0);
+ if a.len() > b.len() {
+ a.truncate(b.len());
+ } else if b.len() > a.len() {
+ let extra = &b[a.len()..];
+ a.extend(extra.iter().cloned());
+ }
+}
+
+// - 1 & -ff = ...f ff & ...f 01 = ...f 01 = - ff
+// -ff & - 1 = ...f 01 & ...f ff = ...f 01 = - ff
+// -ff & -fe = ...f 01 & ...f 02 = ...f 00 = -100
+// answer is neg, has length of longest with a possible carry
+fn bitand_neg_neg(a: &mut Vec<BigDigit>, b: &[BigDigit]) {
+ let mut carry_a = 1;
+ let mut carry_b = 1;
+ let mut carry_and = 1;
+ for (ai, &bi) in a.iter_mut().zip(b.iter()) {
+ let twos_a = negate_carry(*ai, &mut carry_a);
+ let twos_b = negate_carry(bi, &mut carry_b);
+ *ai = negate_carry(twos_a & twos_b, &mut carry_and);
+ }
+ debug_assert!(a.len() > b.len() || carry_a == 0);
+ debug_assert!(b.len() > a.len() || carry_b == 0);
+ if a.len() > b.len() {
+ for ai in a[b.len()..].iter_mut() {
+ let twos_a = negate_carry(*ai, &mut carry_a);
+ *ai = negate_carry(twos_a, &mut carry_and);
+ }
+ debug_assert!(carry_a == 0);
+ } else if b.len() > a.len() {
+ let extra = &b[a.len()..];
+ a.extend(extra.iter().map(|&bi| {
+ let twos_b = negate_carry(bi, &mut carry_b);
+ negate_carry(twos_b, &mut carry_and)
+ }));
+ debug_assert!(carry_b == 0);
+ }
+ if carry_and != 0 {
+ a.push(1);
+ }
+}
+
+forward_val_val_binop!(impl BitAnd for BigInt, bitand);
+forward_ref_val_binop!(impl BitAnd for BigInt, bitand);
+
+// do not use forward_ref_ref_binop_commutative! for bitand so that we can
+// clone as needed, avoiding over-allocation
+impl<'a, 'b> BitAnd<&'b BigInt> for &'a BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn bitand(self, other: &BigInt) -> BigInt {
+ match (self.sign, other.sign) {
+ (NoSign, _) | (_, NoSign) => BigInt::from_slice(NoSign, &[]),
+ (Plus, Plus) => BigInt::from_biguint(Plus, &self.data & &other.data),
+ (Plus, Minus) => self.clone() & other,
+ (Minus, Plus) => other.clone() & self,
+ (Minus, Minus) => {
+ // forward to val-ref, choosing the larger to clone
+ if self.len() >= other.len() {
+ self.clone() & other
+ } else {
+ other.clone() & self
+ }
+ }
+ }
+ }
+}
+
+impl<'a> BitAnd<&'a BigInt> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn bitand(mut self, other: &BigInt) -> BigInt {
+ self &= other;
+ self
+ }
+}
+
+forward_val_assign!(impl BitAndAssign for BigInt, bitand_assign);
+
+impl<'a> BitAndAssign<&'a BigInt> for BigInt {
+ fn bitand_assign(&mut self, other: &BigInt) {
+ match (self.sign, other.sign) {
+ (NoSign, _) => {}
+ (_, NoSign) => self.assign_from_slice(NoSign, &[]),
+ (Plus, Plus) => {
+ self.data &= &other.data;
+ if self.data.is_zero() {
+ self.sign = NoSign;
+ }
+ }
+ (Plus, Minus) => {
+ bitand_pos_neg(self.digits_mut(), other.digits());
+ self.normalize();
+ }
+ (Minus, Plus) => {
+ bitand_neg_pos(self.digits_mut(), other.digits());
+ self.sign = Plus;
+ self.normalize();
+ }
+ (Minus, Minus) => {
+ bitand_neg_neg(self.digits_mut(), other.digits());
+ self.normalize();
+ }
+ }
+ }
+}
+
+// + 1 | -ff = ...0 01 | ...f 01 = ...f 01 = -ff
+// +ff | - 1 = ...0 ff | ...f ff = ...f ff = - 1
+// answer is neg, has length of b
+fn bitor_pos_neg(a: &mut Vec<BigDigit>, b: &[BigDigit]) {
+ let mut carry_b = 1;
+ let mut carry_or = 1;
+ for (ai, &bi) in a.iter_mut().zip(b.iter()) {
+ let twos_b = negate_carry(bi, &mut carry_b);
+ *ai = negate_carry(*ai | twos_b, &mut carry_or);
+ }
+ debug_assert!(b.len() > a.len() || carry_b == 0);
+ if a.len() > b.len() {
+ a.truncate(b.len());
+ } else if b.len() > a.len() {
+ let extra = &b[a.len()..];
+ a.extend(extra.iter().map(|&bi| {
+ let twos_b = negate_carry(bi, &mut carry_b);
+ negate_carry(twos_b, &mut carry_or)
+ }));
+ debug_assert!(carry_b == 0);
+ }
+ // for carry_or to be non-zero, we would need twos_b == 0
+ debug_assert!(carry_or == 0);
+}
+
+// - 1 | +ff = ...f ff | ...0 ff = ...f ff = - 1
+// -ff | + 1 = ...f 01 | ...0 01 = ...f 01 = -ff
+// answer is neg, has length of a
+fn bitor_neg_pos(a: &mut Vec<BigDigit>, b: &[BigDigit]) {
+ let mut carry_a = 1;
+ let mut carry_or = 1;
+ for (ai, &bi) in a.iter_mut().zip(b.iter()) {
+ let twos_a = negate_carry(*ai, &mut carry_a);
+ *ai = negate_carry(twos_a | bi, &mut carry_or);
+ }
+ debug_assert!(a.len() > b.len() || carry_a == 0);
+ if a.len() > b.len() {
+ for ai in a[b.len()..].iter_mut() {
+ let twos_a = negate_carry(*ai, &mut carry_a);
+ *ai = negate_carry(twos_a, &mut carry_or);
+ }
+ debug_assert!(carry_a == 0);
+ }
+ // for carry_or to be non-zero, we would need twos_a == 0
+ debug_assert!(carry_or == 0);
+}
+
+// - 1 | -ff = ...f ff | ...f 01 = ...f ff = -1
+// -ff | - 1 = ...f 01 | ...f ff = ...f ff = -1
+// answer is neg, has length of shortest
+fn bitor_neg_neg(a: &mut Vec<BigDigit>, b: &[BigDigit]) {
+ let mut carry_a = 1;
+ let mut carry_b = 1;
+ let mut carry_or = 1;
+ for (ai, &bi) in a.iter_mut().zip(b.iter()) {
+ let twos_a = negate_carry(*ai, &mut carry_a);
+ let twos_b = negate_carry(bi, &mut carry_b);
+ *ai = negate_carry(twos_a | twos_b, &mut carry_or);
+ }
+ debug_assert!(a.len() > b.len() || carry_a == 0);
+ debug_assert!(b.len() > a.len() || carry_b == 0);
+ if a.len() > b.len() {
+ a.truncate(b.len());
+ }
+ // for carry_or to be non-zero, we would need twos_a == 0 or twos_b == 0
+ debug_assert!(carry_or == 0);
+}
+
+forward_val_val_binop!(impl BitOr for BigInt, bitor);
+forward_ref_val_binop!(impl BitOr for BigInt, bitor);
+
+// do not use forward_ref_ref_binop_commutative! for bitor so that we can
+// clone as needed, avoiding over-allocation
+impl<'a, 'b> BitOr<&'b BigInt> for &'a BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn bitor(self, other: &BigInt) -> BigInt {
+ match (self.sign, other.sign) {
+ (NoSign, _) => other.clone(),
+ (_, NoSign) => self.clone(),
+ (Plus, Plus) => BigInt::from_biguint(Plus, &self.data | &other.data),
+ (Plus, Minus) => other.clone() | self,
+ (Minus, Plus) => self.clone() | other,
+ (Minus, Minus) => {
+ // forward to val-ref, choosing the smaller to clone
+ if self.len() <= other.len() {
+ self.clone() | other
+ } else {
+ other.clone() | self
+ }
+ }
+ }
+ }
+}
+
+impl<'a> BitOr<&'a BigInt> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn bitor(mut self, other: &BigInt) -> BigInt {
+ self |= other;
+ self
+ }
+}
+
+forward_val_assign!(impl BitOrAssign for BigInt, bitor_assign);
+
+impl<'a> BitOrAssign<&'a BigInt> for BigInt {
+ fn bitor_assign(&mut self, other: &BigInt) {
+ match (self.sign, other.sign) {
+ (_, NoSign) => {}
+ (NoSign, _) => self.assign_from_slice(other.sign, other.digits()),
+ (Plus, Plus) => self.data |= &other.data,
+ (Plus, Minus) => {
+ bitor_pos_neg(self.digits_mut(), other.digits());
+ self.sign = Minus;
+ self.normalize();
+ }
+ (Minus, Plus) => {
+ bitor_neg_pos(self.digits_mut(), other.digits());
+ self.normalize();
+ }
+ (Minus, Minus) => {
+ bitor_neg_neg(self.digits_mut(), other.digits());
+ self.normalize();
+ }
+ }
+ }
+}
+
+// + 1 ^ -ff = ...0 01 ^ ...f 01 = ...f 00 = -100
+// +ff ^ - 1 = ...0 ff ^ ...f ff = ...f 00 = -100
+// answer is neg, has length of longest with a possible carry
+fn bitxor_pos_neg(a: &mut Vec<BigDigit>, b: &[BigDigit]) {
+ let mut carry_b = 1;
+ let mut carry_xor = 1;
+ for (ai, &bi) in a.iter_mut().zip(b.iter()) {
+ let twos_b = negate_carry(bi, &mut carry_b);
+ *ai = negate_carry(*ai ^ twos_b, &mut carry_xor);
+ }
+ debug_assert!(b.len() > a.len() || carry_b == 0);
+ if a.len() > b.len() {
+ for ai in a[b.len()..].iter_mut() {
+ let twos_b = !0;
+ *ai = negate_carry(*ai ^ twos_b, &mut carry_xor);
+ }
+ } else if b.len() > a.len() {
+ let extra = &b[a.len()..];
+ a.extend(extra.iter().map(|&bi| {
+ let twos_b = negate_carry(bi, &mut carry_b);
+ negate_carry(twos_b, &mut carry_xor)
+ }));
+ debug_assert!(carry_b == 0);
+ }
+ if carry_xor != 0 {
+ a.push(1);
+ }
+}
+
+// - 1 ^ +ff = ...f ff ^ ...0 ff = ...f 00 = -100
+// -ff ^ + 1 = ...f 01 ^ ...0 01 = ...f 00 = -100
+// answer is neg, has length of longest with a possible carry
+fn bitxor_neg_pos(a: &mut Vec<BigDigit>, b: &[BigDigit]) {
+ let mut carry_a = 1;
+ let mut carry_xor = 1;
+ for (ai, &bi) in a.iter_mut().zip(b.iter()) {
+ let twos_a = negate_carry(*ai, &mut carry_a);
+ *ai = negate_carry(twos_a ^ bi, &mut carry_xor);
+ }
+ debug_assert!(a.len() > b.len() || carry_a == 0);
+ if a.len() > b.len() {
+ for ai in a[b.len()..].iter_mut() {
+ let twos_a = negate_carry(*ai, &mut carry_a);
+ *ai = negate_carry(twos_a, &mut carry_xor);
+ }
+ debug_assert!(carry_a == 0);
+ } else if b.len() > a.len() {
+ let extra = &b[a.len()..];
+ a.extend(extra.iter().map(|&bi| {
+ let twos_a = !0;
+ negate_carry(twos_a ^ bi, &mut carry_xor)
+ }));
+ }
+ if carry_xor != 0 {
+ a.push(1);
+ }
+}
+
+// - 1 ^ -ff = ...f ff ^ ...f 01 = ...0 fe = +fe
+// -ff & - 1 = ...f 01 ^ ...f ff = ...0 fe = +fe
+// answer is pos, has length of longest
+fn bitxor_neg_neg(a: &mut Vec<BigDigit>, b: &[BigDigit]) {
+ let mut carry_a = 1;
+ let mut carry_b = 1;
+ for (ai, &bi) in a.iter_mut().zip(b.iter()) {
+ let twos_a = negate_carry(*ai, &mut carry_a);
+ let twos_b = negate_carry(bi, &mut carry_b);
+ *ai = twos_a ^ twos_b;
+ }
+ debug_assert!(a.len() > b.len() || carry_a == 0);
+ debug_assert!(b.len() > a.len() || carry_b == 0);
+ if a.len() > b.len() {
+ for ai in a[b.len()..].iter_mut() {
+ let twos_a = negate_carry(*ai, &mut carry_a);
+ let twos_b = !0;
+ *ai = twos_a ^ twos_b;
+ }
+ debug_assert!(carry_a == 0);
+ } else if b.len() > a.len() {
+ let extra = &b[a.len()..];
+ a.extend(extra.iter().map(|&bi| {
+ let twos_a = !0;
+ let twos_b = negate_carry(bi, &mut carry_b);
+ twos_a ^ twos_b
+ }));
+ debug_assert!(carry_b == 0);
+ }
+}
+
+forward_all_binop_to_val_ref_commutative!(impl BitXor for BigInt, bitxor);
+
+impl<'a> BitXor<&'a BigInt> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn bitxor(mut self, other: &BigInt) -> BigInt {
+ self ^= other;
+ self
+ }
+}
+
+forward_val_assign!(impl BitXorAssign for BigInt, bitxor_assign);
+
+impl<'a> BitXorAssign<&'a BigInt> for BigInt {
+ fn bitxor_assign(&mut self, other: &BigInt) {
+ match (self.sign, other.sign) {
+ (_, NoSign) => {}
+ (NoSign, _) => self.assign_from_slice(other.sign, other.digits()),
+ (Plus, Plus) => {
+ self.data ^= &other.data;
+ if self.data.is_zero() {
+ self.sign = NoSign;
+ }
+ }
+ (Plus, Minus) => {
+ bitxor_pos_neg(self.digits_mut(), other.digits());
+ self.sign = Minus;
+ self.normalize();
+ }
+ (Minus, Plus) => {
+ bitxor_neg_pos(self.digits_mut(), other.digits());
+ self.normalize();
+ }
+ (Minus, Minus) => {
+ bitxor_neg_neg(self.digits_mut(), other.digits());
+ self.sign = Plus;
+ self.normalize();
+ }
+ }
+ }
+}
+
+impl FromStr for BigInt {
+ type Err = ParseBigIntError;
+
+ #[inline]
+ fn from_str(s: &str) -> Result<BigInt, ParseBigIntError> {
+ BigInt::from_str_radix(s, 10)
+ }
+}
+
+impl Num for BigInt {
+ type FromStrRadixErr = ParseBigIntError;
+
+ /// Creates and initializes a BigInt.
+ #[inline]
+ fn from_str_radix(mut s: &str, radix: u32) -> Result<BigInt, ParseBigIntError> {
+ let sign = if s.starts_with('-') {
+ let tail = &s[1..];
+ if !tail.starts_with('+') {
+ s = tail
+ }
+ Minus
+ } else {
+ Plus
+ };
+ let bu = BigUint::from_str_radix(s, radix)?;
+ Ok(BigInt::from_biguint(sign, bu))
+ }
+}
+
+impl Shl<usize> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn shl(mut self, rhs: usize) -> BigInt {
+ self <<= rhs;
+ self
+ }
+}
+
+impl<'a> Shl<usize> for &'a BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn shl(self, rhs: usize) -> BigInt {
+ BigInt::from_biguint(self.sign, &self.data << rhs)
+ }
+}
+
+impl ShlAssign<usize> for BigInt {
+ #[inline]
+ fn shl_assign(&mut self, rhs: usize) {
+ self.data <<= rhs;
+ }
+}
+
+// Negative values need a rounding adjustment if there are any ones in the
+// bits that are getting shifted out.
+fn shr_round_down(i: &BigInt, rhs: usize) -> bool {
+ i.is_negative()
+ && biguint::trailing_zeros(&i.data)
+ .map(|n| n < rhs)
+ .unwrap_or(false)
+}
+
+impl Shr<usize> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn shr(mut self, rhs: usize) -> BigInt {
+ self >>= rhs;
+ self
+ }
+}
+
+impl<'a> Shr<usize> for &'a BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn shr(self, rhs: usize) -> BigInt {
+ let round_down = shr_round_down(self, rhs);
+ let data = &self.data >> rhs;
+ BigInt::from_biguint(self.sign, if round_down { data + 1u8 } else { data })
+ }
+}
+
+impl ShrAssign<usize> for BigInt {
+ #[inline]
+ fn shr_assign(&mut self, rhs: usize) {
+ let round_down = shr_round_down(self, rhs);
+ self.data >>= rhs;
+ if round_down {
+ self.data += 1u8;
+ } else if self.data.is_zero() {
+ self.sign = NoSign;
+ }
+ }
+}
+
+impl Zero for BigInt {
+ #[inline]
+ fn zero() -> BigInt {
+ BigInt::from_biguint(NoSign, Zero::zero())
+ }
+
+ #[inline]
+ fn set_zero(&mut self) {
+ self.data.set_zero();
+ self.sign = NoSign;
+ }
+
+ #[inline]
+ fn is_zero(&self) -> bool {
+ self.sign == NoSign
+ }
+}
+
+impl One for BigInt {
+ #[inline]
+ fn one() -> BigInt {
+ BigInt::from_biguint(Plus, One::one())
+ }
+
+ #[inline]
+ fn set_one(&mut self) {
+ self.data.set_one();
+ self.sign = Plus;
+ }
+
+ #[inline]
+ fn is_one(&self) -> bool {
+ self.sign == Plus && self.data.is_one()
+ }
+}
+
+impl Signed for BigInt {
+ #[inline]
+ fn abs(&self) -> BigInt {
+ match self.sign {
+ Plus | NoSign => self.clone(),
+ Minus => BigInt::from_biguint(Plus, self.data.clone()),
+ }
+ }
+
+ #[inline]
+ fn abs_sub(&self, other: &BigInt) -> BigInt {
+ if *self <= *other {
+ Zero::zero()
+ } else {
+ self - other
+ }
+ }
+
+ #[inline]
+ fn signum(&self) -> BigInt {
+ match self.sign {
+ Plus => BigInt::from_biguint(Plus, One::one()),
+ Minus => BigInt::from_biguint(Minus, One::one()),
+ NoSign => Zero::zero(),
+ }
+ }
+
+ #[inline]
+ fn is_positive(&self) -> bool {
+ self.sign == Plus
+ }
+
+ #[inline]
+ fn is_negative(&self) -> bool {
+ self.sign == Minus
+ }
+}
+
+/// Help function for pow
+///
+/// Computes the effect of the exponent on the sign.
+#[inline]
+fn powsign<T: Integer>(sign: Sign, other: &T) -> Sign {
+ if other.is_zero() {
+ Plus
+ } else if sign != Minus {
+ sign
+ } else if other.is_odd() {
+ sign
+ } else {
+ -sign
+ }
+}
+
+macro_rules! pow_impl {
+ ($T:ty) => {
+ impl<'a> Pow<$T> for &'a BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn pow(self, rhs: $T) -> BigInt {
+ BigInt::from_biguint(powsign(self.sign, &rhs), (&self.data).pow(rhs))
+ }
+ }
+
+ impl<'a, 'b> Pow<&'b $T> for &'a BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn pow(self, rhs: &$T) -> BigInt {
+ BigInt::from_biguint(powsign(self.sign, rhs), (&self.data).pow(rhs))
+ }
+ }
+ };
+}
+
+pow_impl!(u8);
+pow_impl!(u16);
+pow_impl!(u32);
+pow_impl!(u64);
+pow_impl!(usize);
+#[cfg(has_i128)]
+pow_impl!(u128);
+pow_impl!(BigUint);
+
+// A convenience method for getting the absolute value of an i32 in a u32.
+#[inline]
+fn i32_abs_as_u32(a: i32) -> u32 {
+ if a == i32::min_value() {
+ a as u32
+ } else {
+ a.abs() as u32
+ }
+}
+
+// A convenience method for getting the absolute value of an i64 in a u64.
+#[inline]
+fn i64_abs_as_u64(a: i64) -> u64 {
+ if a == i64::min_value() {
+ a as u64
+ } else {
+ a.abs() as u64
+ }
+}
+
+// A convenience method for getting the absolute value of an i128 in a u128.
+#[cfg(has_i128)]
+#[inline]
+fn i128_abs_as_u128(a: i128) -> u128 {
+ if a == i128::min_value() {
+ a as u128
+ } else {
+ a.abs() as u128
+ }
+}
+
+// We want to forward to BigUint::add, but it's not clear how that will go until
+// we compare both sign and magnitude. So we duplicate this body for every
+// val/ref combination, deferring that decision to BigUint's own forwarding.
+macro_rules! bigint_add {
+ ($a:expr, $a_owned:expr, $a_data:expr, $b:expr, $b_owned:expr, $b_data:expr) => {
+ match ($a.sign, $b.sign) {
+ (_, NoSign) => $a_owned,
+ (NoSign, _) => $b_owned,
+ // same sign => keep the sign with the sum of magnitudes
+ (Plus, Plus) | (Minus, Minus) => BigInt::from_biguint($a.sign, $a_data + $b_data),
+ // opposite signs => keep the sign of the larger with the difference of magnitudes
+ (Plus, Minus) | (Minus, Plus) => match $a.data.cmp(&$b.data) {
+ Less => BigInt::from_biguint($b.sign, $b_data - $a_data),
+ Greater => BigInt::from_biguint($a.sign, $a_data - $b_data),
+ Equal => Zero::zero(),
+ },
+ }
+ };
+}
+
+impl<'a, 'b> Add<&'b BigInt> for &'a BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn add(self, other: &BigInt) -> BigInt {
+ bigint_add!(
+ self,
+ self.clone(),
+ &self.data,
+ other,
+ other.clone(),
+ &other.data
+ )
+ }
+}
+
+impl<'a> Add<BigInt> for &'a BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn add(self, other: BigInt) -> BigInt {
+ bigint_add!(self, self.clone(), &self.data, other, other, other.data)
+ }
+}
+
+impl<'a> Add<&'a BigInt> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn add(self, other: &BigInt) -> BigInt {
+ bigint_add!(self, self, self.data, other, other.clone(), &other.data)
+ }
+}
+
+impl Add<BigInt> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn add(self, other: BigInt) -> BigInt {
+ bigint_add!(self, self, self.data, other, other, other.data)
+ }
+}
+
+impl<'a> AddAssign<&'a BigInt> for BigInt {
+ #[inline]
+ fn add_assign(&mut self, other: &BigInt) {
+ let n = mem::replace(self, BigInt::zero());
+ *self = n + other;
+ }
+}
+forward_val_assign!(impl AddAssign for BigInt, add_assign);
+
+promote_all_scalars!(impl Add for BigInt, add);
+promote_all_scalars_assign!(impl AddAssign for BigInt, add_assign);
+forward_all_scalar_binop_to_val_val_commutative!(impl Add<u32> for BigInt, add);
+forward_all_scalar_binop_to_val_val_commutative!(impl Add<u64> for BigInt, add);
+#[cfg(has_i128)]
+forward_all_scalar_binop_to_val_val_commutative!(impl Add<u128> for BigInt, add);
+
+impl Add<u32> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn add(self, other: u32) -> BigInt {
+ match self.sign {
+ NoSign => From::from(other),
+ Plus => BigInt::from_biguint(Plus, self.data + other),
+ Minus => match self.data.cmp(&From::from(other)) {
+ Equal => Zero::zero(),
+ Less => BigInt::from_biguint(Plus, other - self.data),
+ Greater => BigInt::from_biguint(Minus, self.data - other),
+ },
+ }
+ }
+}
+impl AddAssign<u32> for BigInt {
+ #[inline]
+ fn add_assign(&mut self, other: u32) {
+ let n = mem::replace(self, BigInt::zero());
+ *self = n + other;
+ }
+}
+
+impl Add<u64> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn add(self, other: u64) -> BigInt {
+ match self.sign {
+ NoSign => From::from(other),
+ Plus => BigInt::from_biguint(Plus, self.data + other),
+ Minus => match self.data.cmp(&From::from(other)) {
+ Equal => Zero::zero(),
+ Less => BigInt::from_biguint(Plus, other - self.data),
+ Greater => BigInt::from_biguint(Minus, self.data - other),
+ },
+ }
+ }
+}
+impl AddAssign<u64> for BigInt {
+ #[inline]
+ fn add_assign(&mut self, other: u64) {
+ let n = mem::replace(self, BigInt::zero());
+ *self = n + other;
+ }
+}
+
+#[cfg(has_i128)]
+impl Add<u128> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn add(self, other: u128) -> BigInt {
+ match self.sign {
+ NoSign => From::from(other),
+ Plus => BigInt::from_biguint(Plus, self.data + other),
+ Minus => match self.data.cmp(&From::from(other)) {
+ Equal => Zero::zero(),
+ Less => BigInt::from_biguint(Plus, other - self.data),
+ Greater => BigInt::from_biguint(Minus, self.data - other),
+ },
+ }
+ }
+}
+#[cfg(has_i128)]
+impl AddAssign<u128> for BigInt {
+ #[inline]
+ fn add_assign(&mut self, other: u128) {
+ let n = mem::replace(self, BigInt::zero());
+ *self = n + other;
+ }
+}
+
+forward_all_scalar_binop_to_val_val_commutative!(impl Add<i32> for BigInt, add);
+forward_all_scalar_binop_to_val_val_commutative!(impl Add<i64> for BigInt, add);
+#[cfg(has_i128)]
+forward_all_scalar_binop_to_val_val_commutative!(impl Add<i128> for BigInt, add);
+
+impl Add<i32> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn add(self, other: i32) -> BigInt {
+ if other >= 0 {
+ self + other as u32
+ } else {
+ self - i32_abs_as_u32(other)
+ }
+ }
+}
+impl AddAssign<i32> for BigInt {
+ #[inline]
+ fn add_assign(&mut self, other: i32) {
+ if other >= 0 {
+ *self += other as u32;
+ } else {
+ *self -= i32_abs_as_u32(other);
+ }
+ }
+}
+
+impl Add<i64> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn add(self, other: i64) -> BigInt {
+ if other >= 0 {
+ self + other as u64
+ } else {
+ self - i64_abs_as_u64(other)
+ }
+ }
+}
+impl AddAssign<i64> for BigInt {
+ #[inline]
+ fn add_assign(&mut self, other: i64) {
+ if other >= 0 {
+ *self += other as u64;
+ } else {
+ *self -= i64_abs_as_u64(other);
+ }
+ }
+}
+
+#[cfg(has_i128)]
+impl Add<i128> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn add(self, other: i128) -> BigInt {
+ if other >= 0 {
+ self + other as u128
+ } else {
+ self - i128_abs_as_u128(other)
+ }
+ }
+}
+#[cfg(has_i128)]
+impl AddAssign<i128> for BigInt {
+ #[inline]
+ fn add_assign(&mut self, other: i128) {
+ if other >= 0 {
+ *self += other as u128;
+ } else {
+ *self -= i128_abs_as_u128(other);
+ }
+ }
+}
+
+// We want to forward to BigUint::sub, but it's not clear how that will go until
+// we compare both sign and magnitude. So we duplicate this body for every
+// val/ref combination, deferring that decision to BigUint's own forwarding.
+macro_rules! bigint_sub {
+ ($a:expr, $a_owned:expr, $a_data:expr, $b:expr, $b_owned:expr, $b_data:expr) => {
+ match ($a.sign, $b.sign) {
+ (_, NoSign) => $a_owned,
+ (NoSign, _) => -$b_owned,
+ // opposite signs => keep the sign of the left with the sum of magnitudes
+ (Plus, Minus) | (Minus, Plus) => BigInt::from_biguint($a.sign, $a_data + $b_data),
+ // same sign => keep or toggle the sign of the left with the difference of magnitudes
+ (Plus, Plus) | (Minus, Minus) => match $a.data.cmp(&$b.data) {
+ Less => BigInt::from_biguint(-$a.sign, $b_data - $a_data),
+ Greater => BigInt::from_biguint($a.sign, $a_data - $b_data),
+ Equal => Zero::zero(),
+ },
+ }
+ };
+}
+
+impl<'a, 'b> Sub<&'b BigInt> for &'a BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn sub(self, other: &BigInt) -> BigInt {
+ bigint_sub!(
+ self,
+ self.clone(),
+ &self.data,
+ other,
+ other.clone(),
+ &other.data
+ )
+ }
+}
+
+impl<'a> Sub<BigInt> for &'a BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn sub(self, other: BigInt) -> BigInt {
+ bigint_sub!(self, self.clone(), &self.data, other, other, other.data)
+ }
+}
+
+impl<'a> Sub<&'a BigInt> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn sub(self, other: &BigInt) -> BigInt {
+ bigint_sub!(self, self, self.data, other, other.clone(), &other.data)
+ }
+}
+
+impl Sub<BigInt> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn sub(self, other: BigInt) -> BigInt {
+ bigint_sub!(self, self, self.data, other, other, other.data)
+ }
+}
+
+impl<'a> SubAssign<&'a BigInt> for BigInt {
+ #[inline]
+ fn sub_assign(&mut self, other: &BigInt) {
+ let n = mem::replace(self, BigInt::zero());
+ *self = n - other;
+ }
+}
+forward_val_assign!(impl SubAssign for BigInt, sub_assign);
+
+promote_all_scalars!(impl Sub for BigInt, sub);
+promote_all_scalars_assign!(impl SubAssign for BigInt, sub_assign);
+forward_all_scalar_binop_to_val_val!(impl Sub<u32> for BigInt, sub);
+forward_all_scalar_binop_to_val_val!(impl Sub<u64> for BigInt, sub);
+#[cfg(has_i128)]
+forward_all_scalar_binop_to_val_val!(impl Sub<u128> for BigInt, sub);
+
+impl Sub<u32> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn sub(self, other: u32) -> BigInt {
+ match self.sign {
+ NoSign => BigInt::from_biguint(Minus, From::from(other)),
+ Minus => BigInt::from_biguint(Minus, self.data + other),
+ Plus => match self.data.cmp(&From::from(other)) {
+ Equal => Zero::zero(),
+ Greater => BigInt::from_biguint(Plus, self.data - other),
+ Less => BigInt::from_biguint(Minus, other - self.data),
+ },
+ }
+ }
+}
+impl SubAssign<u32> for BigInt {
+ #[inline]
+ fn sub_assign(&mut self, other: u32) {
+ let n = mem::replace(self, BigInt::zero());
+ *self = n - other;
+ }
+}
+
+impl Sub<BigInt> for u32 {
+ type Output = BigInt;
+
+ #[inline]
+ fn sub(self, other: BigInt) -> BigInt {
+ -(other - self)
+ }
+}
+
+impl Sub<BigInt> for u64 {
+ type Output = BigInt;
+
+ #[inline]
+ fn sub(self, other: BigInt) -> BigInt {
+ -(other - self)
+ }
+}
+#[cfg(has_i128)]
+impl Sub<BigInt> for u128 {
+ type Output = BigInt;
+
+ #[inline]
+ fn sub(self, other: BigInt) -> BigInt {
+ -(other - self)
+ }
+}
+
+impl Sub<u64> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn sub(self, other: u64) -> BigInt {
+ match self.sign {
+ NoSign => BigInt::from_biguint(Minus, From::from(other)),
+ Minus => BigInt::from_biguint(Minus, self.data + other),
+ Plus => match self.data.cmp(&From::from(other)) {
+ Equal => Zero::zero(),
+ Greater => BigInt::from_biguint(Plus, self.data - other),
+ Less => BigInt::from_biguint(Minus, other - self.data),
+ },
+ }
+ }
+}
+impl SubAssign<u64> for BigInt {
+ #[inline]
+ fn sub_assign(&mut self, other: u64) {
+ let n = mem::replace(self, BigInt::zero());
+ *self = n - other;
+ }
+}
+
+#[cfg(has_i128)]
+impl Sub<u128> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn sub(self, other: u128) -> BigInt {
+ match self.sign {
+ NoSign => BigInt::from_biguint(Minus, From::from(other)),
+ Minus => BigInt::from_biguint(Minus, self.data + other),
+ Plus => match self.data.cmp(&From::from(other)) {
+ Equal => Zero::zero(),
+ Greater => BigInt::from_biguint(Plus, self.data - other),
+ Less => BigInt::from_biguint(Minus, other - self.data),
+ },
+ }
+ }
+}
+#[cfg(has_i128)]
+impl SubAssign<u128> for BigInt {
+ #[inline]
+ fn sub_assign(&mut self, other: u128) {
+ let n = mem::replace(self, BigInt::zero());
+ *self = n - other;
+ }
+}
+
+forward_all_scalar_binop_to_val_val!(impl Sub<i32> for BigInt, sub);
+forward_all_scalar_binop_to_val_val!(impl Sub<i64> for BigInt, sub);
+#[cfg(has_i128)]
+forward_all_scalar_binop_to_val_val!(impl Sub<i128> for BigInt, sub);
+
+impl Sub<i32> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn sub(self, other: i32) -> BigInt {
+ if other >= 0 {
+ self - other as u32
+ } else {
+ self + i32_abs_as_u32(other)
+ }
+ }
+}
+impl SubAssign<i32> for BigInt {
+ #[inline]
+ fn sub_assign(&mut self, other: i32) {
+ if other >= 0 {
+ *self -= other as u32;
+ } else {
+ *self += i32_abs_as_u32(other);
+ }
+ }
+}
+
+impl Sub<BigInt> for i32 {
+ type Output = BigInt;
+
+ #[inline]
+ fn sub(self, other: BigInt) -> BigInt {
+ if self >= 0 {
+ self as u32 - other
+ } else {
+ -other - i32_abs_as_u32(self)
+ }
+ }
+}
+
+impl Sub<i64> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn sub(self, other: i64) -> BigInt {
+ if other >= 0 {
+ self - other as u64
+ } else {
+ self + i64_abs_as_u64(other)
+ }
+ }
+}
+impl SubAssign<i64> for BigInt {
+ #[inline]
+ fn sub_assign(&mut self, other: i64) {
+ if other >= 0 {
+ *self -= other as u64;
+ } else {
+ *self += i64_abs_as_u64(other);
+ }
+ }
+}
+
+impl Sub<BigInt> for i64 {
+ type Output = BigInt;
+
+ #[inline]
+ fn sub(self, other: BigInt) -> BigInt {
+ if self >= 0 {
+ self as u64 - other
+ } else {
+ -other - i64_abs_as_u64(self)
+ }
+ }
+}
+
+#[cfg(has_i128)]
+impl Sub<i128> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn sub(self, other: i128) -> BigInt {
+ if other >= 0 {
+ self - other as u128
+ } else {
+ self + i128_abs_as_u128(other)
+ }
+ }
+}
+#[cfg(has_i128)]
+impl SubAssign<i128> for BigInt {
+ #[inline]
+ fn sub_assign(&mut self, other: i128) {
+ if other >= 0 {
+ *self -= other as u128;
+ } else {
+ *self += i128_abs_as_u128(other);
+ }
+ }
+}
+#[cfg(has_i128)]
+impl Sub<BigInt> for i128 {
+ type Output = BigInt;
+
+ #[inline]
+ fn sub(self, other: BigInt) -> BigInt {
+ if self >= 0 {
+ self as u128 - other
+ } else {
+ -other - i128_abs_as_u128(self)
+ }
+ }
+}
+
+forward_all_binop_to_ref_ref!(impl Mul for BigInt, mul);
+
+impl<'a, 'b> Mul<&'b BigInt> for &'a BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn mul(self, other: &BigInt) -> BigInt {
+ BigInt::from_biguint(self.sign * other.sign, &self.data * &other.data)
+ }
+}
+
+impl<'a> MulAssign<&'a BigInt> for BigInt {
+ #[inline]
+ fn mul_assign(&mut self, other: &BigInt) {
+ *self = &*self * other;
+ }
+}
+forward_val_assign!(impl MulAssign for BigInt, mul_assign);
+
+promote_all_scalars!(impl Mul for BigInt, mul);
+promote_all_scalars_assign!(impl MulAssign for BigInt, mul_assign);
+forward_all_scalar_binop_to_val_val_commutative!(impl Mul<u32> for BigInt, mul);
+forward_all_scalar_binop_to_val_val_commutative!(impl Mul<u64> for BigInt, mul);
+#[cfg(has_i128)]
+forward_all_scalar_binop_to_val_val_commutative!(impl Mul<u128> for BigInt, mul);
+
+impl Mul<u32> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn mul(self, other: u32) -> BigInt {
+ BigInt::from_biguint(self.sign, self.data * other)
+ }
+}
+
+impl MulAssign<u32> for BigInt {
+ #[inline]
+ fn mul_assign(&mut self, other: u32) {
+ self.data *= other;
+ if self.data.is_zero() {
+ self.sign = NoSign;
+ }
+ }
+}
+
+impl Mul<u64> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn mul(self, other: u64) -> BigInt {
+ BigInt::from_biguint(self.sign, self.data * other)
+ }
+}
+
+impl MulAssign<u64> for BigInt {
+ #[inline]
+ fn mul_assign(&mut self, other: u64) {
+ self.data *= other;
+ if self.data.is_zero() {
+ self.sign = NoSign;
+ }
+ }
+}
+#[cfg(has_i128)]
+impl Mul<u128> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn mul(self, other: u128) -> BigInt {
+ BigInt::from_biguint(self.sign, self.data * other)
+ }
+}
+#[cfg(has_i128)]
+impl MulAssign<u128> for BigInt {
+ #[inline]
+ fn mul_assign(&mut self, other: u128) {
+ self.data *= other;
+ if self.data.is_zero() {
+ self.sign = NoSign;
+ }
+ }
+}
+
+forward_all_scalar_binop_to_val_val_commutative!(impl Mul<i32> for BigInt, mul);
+forward_all_scalar_binop_to_val_val_commutative!(impl Mul<i64> for BigInt, mul);
+#[cfg(has_i128)]
+forward_all_scalar_binop_to_val_val_commutative!(impl Mul<i128> for BigInt, mul);
+
+impl Mul<i32> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn mul(self, other: i32) -> BigInt {
+ if other >= 0 {
+ self * other as u32
+ } else {
+ -(self * i32_abs_as_u32(other))
+ }
+ }
+}
+
+impl MulAssign<i32> for BigInt {
+ #[inline]
+ fn mul_assign(&mut self, other: i32) {
+ if other >= 0 {
+ *self *= other as u32;
+ } else {
+ self.sign = -self.sign;
+ *self *= i32_abs_as_u32(other);
+ }
+ }
+}
+
+impl Mul<i64> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn mul(self, other: i64) -> BigInt {
+ if other >= 0 {
+ self * other as u64
+ } else {
+ -(self * i64_abs_as_u64(other))
+ }
+ }
+}
+
+impl MulAssign<i64> for BigInt {
+ #[inline]
+ fn mul_assign(&mut self, other: i64) {
+ if other >= 0 {
+ *self *= other as u64;
+ } else {
+ self.sign = -self.sign;
+ *self *= i64_abs_as_u64(other);
+ }
+ }
+}
+#[cfg(has_i128)]
+impl Mul<i128> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn mul(self, other: i128) -> BigInt {
+ if other >= 0 {
+ self * other as u128
+ } else {
+ -(self * i128_abs_as_u128(other))
+ }
+ }
+}
+#[cfg(has_i128)]
+impl MulAssign<i128> for BigInt {
+ #[inline]
+ fn mul_assign(&mut self, other: i128) {
+ if other >= 0 {
+ *self *= other as u128;
+ } else {
+ self.sign = -self.sign;
+ *self *= i128_abs_as_u128(other);
+ }
+ }
+}
+
+forward_all_binop_to_ref_ref!(impl Div for BigInt, div);
+
+impl<'a, 'b> Div<&'b BigInt> for &'a BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn div(self, other: &BigInt) -> BigInt {
+ let (q, _) = self.div_rem(other);
+ q
+ }
+}
+
+impl<'a> DivAssign<&'a BigInt> for BigInt {
+ #[inline]
+ fn div_assign(&mut self, other: &BigInt) {
+ *self = &*self / other;
+ }
+}
+forward_val_assign!(impl DivAssign for BigInt, div_assign);
+
+promote_all_scalars!(impl Div for BigInt, div);
+promote_all_scalars_assign!(impl DivAssign for BigInt, div_assign);
+forward_all_scalar_binop_to_val_val!(impl Div<u32> for BigInt, div);
+forward_all_scalar_binop_to_val_val!(impl Div<u64> for BigInt, div);
+#[cfg(has_i128)]
+forward_all_scalar_binop_to_val_val!(impl Div<u128> for BigInt, div);
+
+impl Div<u32> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn div(self, other: u32) -> BigInt {
+ BigInt::from_biguint(self.sign, self.data / other)
+ }
+}
+
+impl DivAssign<u32> for BigInt {
+ #[inline]
+ fn div_assign(&mut self, other: u32) {
+ self.data /= other;
+ if self.data.is_zero() {
+ self.sign = NoSign;
+ }
+ }
+}
+
+impl Div<BigInt> for u32 {
+ type Output = BigInt;
+
+ #[inline]
+ fn div(self, other: BigInt) -> BigInt {
+ BigInt::from_biguint(other.sign, self / other.data)
+ }
+}
+
+impl Div<u64> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn div(self, other: u64) -> BigInt {
+ BigInt::from_biguint(self.sign, self.data / other)
+ }
+}
+
+impl DivAssign<u64> for BigInt {
+ #[inline]
+ fn div_assign(&mut self, other: u64) {
+ self.data /= other;
+ if self.data.is_zero() {
+ self.sign = NoSign;
+ }
+ }
+}
+
+impl Div<BigInt> for u64 {
+ type Output = BigInt;
+
+ #[inline]
+ fn div(self, other: BigInt) -> BigInt {
+ BigInt::from_biguint(other.sign, self / other.data)
+ }
+}
+
+#[cfg(has_i128)]
+impl Div<u128> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn div(self, other: u128) -> BigInt {
+ BigInt::from_biguint(self.sign, self.data / other)
+ }
+}
+
+#[cfg(has_i128)]
+impl DivAssign<u128> for BigInt {
+ #[inline]
+ fn div_assign(&mut self, other: u128) {
+ self.data /= other;
+ if self.data.is_zero() {
+ self.sign = NoSign;
+ }
+ }
+}
+
+#[cfg(has_i128)]
+impl Div<BigInt> for u128 {
+ type Output = BigInt;
+
+ #[inline]
+ fn div(self, other: BigInt) -> BigInt {
+ BigInt::from_biguint(other.sign, self / other.data)
+ }
+}
+
+forward_all_scalar_binop_to_val_val!(impl Div<i32> for BigInt, div);
+forward_all_scalar_binop_to_val_val!(impl Div<i64> for BigInt, div);
+#[cfg(has_i128)]
+forward_all_scalar_binop_to_val_val!(impl Div<i128> for BigInt, div);
+
+impl Div<i32> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn div(self, other: i32) -> BigInt {
+ if other >= 0 {
+ self / other as u32
+ } else {
+ -(self / i32_abs_as_u32(other))
+ }
+ }
+}
+
+impl DivAssign<i32> for BigInt {
+ #[inline]
+ fn div_assign(&mut self, other: i32) {
+ if other >= 0 {
+ *self /= other as u32;
+ } else {
+ self.sign = -self.sign;
+ *self /= i32_abs_as_u32(other);
+ }
+ }
+}
+
+impl Div<BigInt> for i32 {
+ type Output = BigInt;
+
+ #[inline]
+ fn div(self, other: BigInt) -> BigInt {
+ if self >= 0 {
+ self as u32 / other
+ } else {
+ -(i32_abs_as_u32(self) / other)
+ }
+ }
+}
+
+impl Div<i64> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn div(self, other: i64) -> BigInt {
+ if other >= 0 {
+ self / other as u64
+ } else {
+ -(self / i64_abs_as_u64(other))
+ }
+ }
+}
+
+impl DivAssign<i64> for BigInt {
+ #[inline]
+ fn div_assign(&mut self, other: i64) {
+ if other >= 0 {
+ *self /= other as u64;
+ } else {
+ self.sign = -self.sign;
+ *self /= i64_abs_as_u64(other);
+ }
+ }
+}
+
+impl Div<BigInt> for i64 {
+ type Output = BigInt;
+
+ #[inline]
+ fn div(self, other: BigInt) -> BigInt {
+ if self >= 0 {
+ self as u64 / other
+ } else {
+ -(i64_abs_as_u64(self) / other)
+ }
+ }
+}
+
+#[cfg(has_i128)]
+impl Div<i128> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn div(self, other: i128) -> BigInt {
+ if other >= 0 {
+ self / other as u128
+ } else {
+ -(self / i128_abs_as_u128(other))
+ }
+ }
+}
+
+#[cfg(has_i128)]
+impl DivAssign<i128> for BigInt {
+ #[inline]
+ fn div_assign(&mut self, other: i128) {
+ if other >= 0 {
+ *self /= other as u128;
+ } else {
+ self.sign = -self.sign;
+ *self /= i128_abs_as_u128(other);
+ }
+ }
+}
+
+#[cfg(has_i128)]
+impl Div<BigInt> for i128 {
+ type Output = BigInt;
+
+ #[inline]
+ fn div(self, other: BigInt) -> BigInt {
+ if self >= 0 {
+ self as u128 / other
+ } else {
+ -(i128_abs_as_u128(self) / other)
+ }
+ }
+}
+
+forward_all_binop_to_ref_ref!(impl Rem for BigInt, rem);
+
+impl<'a, 'b> Rem<&'b BigInt> for &'a BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn rem(self, other: &BigInt) -> BigInt {
+ let (_, r) = self.div_rem(other);
+ r
+ }
+}
+
+impl<'a> RemAssign<&'a BigInt> for BigInt {
+ #[inline]
+ fn rem_assign(&mut self, other: &BigInt) {
+ *self = &*self % other;
+ }
+}
+forward_val_assign!(impl RemAssign for BigInt, rem_assign);
+
+promote_all_scalars!(impl Rem for BigInt, rem);
+promote_all_scalars_assign!(impl RemAssign for BigInt, rem_assign);
+forward_all_scalar_binop_to_val_val!(impl Rem<u32> for BigInt, rem);
+forward_all_scalar_binop_to_val_val!(impl Rem<u64> for BigInt, rem);
+#[cfg(has_i128)]
+forward_all_scalar_binop_to_val_val!(impl Rem<u128> for BigInt, rem);
+
+impl Rem<u32> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn rem(self, other: u32) -> BigInt {
+ BigInt::from_biguint(self.sign, self.data % other)
+ }
+}
+
+impl RemAssign<u32> for BigInt {
+ #[inline]
+ fn rem_assign(&mut self, other: u32) {
+ self.data %= other;
+ if self.data.is_zero() {
+ self.sign = NoSign;
+ }
+ }
+}
+
+impl Rem<BigInt> for u32 {
+ type Output = BigInt;
+
+ #[inline]
+ fn rem(self, other: BigInt) -> BigInt {
+ BigInt::from_biguint(Plus, self % other.data)
+ }
+}
+
+impl Rem<u64> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn rem(self, other: u64) -> BigInt {
+ BigInt::from_biguint(self.sign, self.data % other)
+ }
+}
+
+impl RemAssign<u64> for BigInt {
+ #[inline]
+ fn rem_assign(&mut self, other: u64) {
+ self.data %= other;
+ if self.data.is_zero() {
+ self.sign = NoSign;
+ }
+ }
+}
+
+impl Rem<BigInt> for u64 {
+ type Output = BigInt;
+
+ #[inline]
+ fn rem(self, other: BigInt) -> BigInt {
+ BigInt::from_biguint(Plus, self % other.data)
+ }
+}
+
+#[cfg(has_i128)]
+impl Rem<u128> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn rem(self, other: u128) -> BigInt {
+ BigInt::from_biguint(self.sign, self.data % other)
+ }
+}
+
+#[cfg(has_i128)]
+impl RemAssign<u128> for BigInt {
+ #[inline]
+ fn rem_assign(&mut self, other: u128) {
+ self.data %= other;
+ if self.data.is_zero() {
+ self.sign = NoSign;
+ }
+ }
+}
+
+#[cfg(has_i128)]
+impl Rem<BigInt> for u128 {
+ type Output = BigInt;
+
+ #[inline]
+ fn rem(self, other: BigInt) -> BigInt {
+ BigInt::from_biguint(Plus, self % other.data)
+ }
+}
+
+forward_all_scalar_binop_to_val_val!(impl Rem<i32> for BigInt, rem);
+forward_all_scalar_binop_to_val_val!(impl Rem<i64> for BigInt, rem);
+#[cfg(has_i128)]
+forward_all_scalar_binop_to_val_val!(impl Rem<i128> for BigInt, rem);
+
+impl Rem<i32> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn rem(self, other: i32) -> BigInt {
+ if other >= 0 {
+ self % other as u32
+ } else {
+ self % i32_abs_as_u32(other)
+ }
+ }
+}
+
+impl RemAssign<i32> for BigInt {
+ #[inline]
+ fn rem_assign(&mut self, other: i32) {
+ if other >= 0 {
+ *self %= other as u32;
+ } else {
+ *self %= i32_abs_as_u32(other);
+ }
+ }
+}
+
+impl Rem<BigInt> for i32 {
+ type Output = BigInt;
+
+ #[inline]
+ fn rem(self, other: BigInt) -> BigInt {
+ if self >= 0 {
+ self as u32 % other
+ } else {
+ -(i32_abs_as_u32(self) % other)
+ }
+ }
+}
+
+impl Rem<i64> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn rem(self, other: i64) -> BigInt {
+ if other >= 0 {
+ self % other as u64
+ } else {
+ self % i64_abs_as_u64(other)
+ }
+ }
+}
+
+impl RemAssign<i64> for BigInt {
+ #[inline]
+ fn rem_assign(&mut self, other: i64) {
+ if other >= 0 {
+ *self %= other as u64;
+ } else {
+ *self %= i64_abs_as_u64(other);
+ }
+ }
+}
+
+impl Rem<BigInt> for i64 {
+ type Output = BigInt;
+
+ #[inline]
+ fn rem(self, other: BigInt) -> BigInt {
+ if self >= 0 {
+ self as u64 % other
+ } else {
+ -(i64_abs_as_u64(self) % other)
+ }
+ }
+}
+
+#[cfg(has_i128)]
+impl Rem<i128> for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn rem(self, other: i128) -> BigInt {
+ if other >= 0 {
+ self % other as u128
+ } else {
+ self % i128_abs_as_u128(other)
+ }
+ }
+}
+#[cfg(has_i128)]
+impl RemAssign<i128> for BigInt {
+ #[inline]
+ fn rem_assign(&mut self, other: i128) {
+ if other >= 0 {
+ *self %= other as u128;
+ } else {
+ *self %= i128_abs_as_u128(other);
+ }
+ }
+}
+#[cfg(has_i128)]
+impl Rem<BigInt> for i128 {
+ type Output = BigInt;
+
+ #[inline]
+ fn rem(self, other: BigInt) -> BigInt {
+ if self >= 0 {
+ self as u128 % other
+ } else {
+ -(i128_abs_as_u128(self) % other)
+ }
+ }
+}
+
+impl Neg for BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn neg(mut self) -> BigInt {
+ self.sign = -self.sign;
+ self
+ }
+}
+
+impl<'a> Neg for &'a BigInt {
+ type Output = BigInt;
+
+ #[inline]
+ fn neg(self) -> BigInt {
+ -self.clone()
+ }
+}
+
+impl CheckedAdd for BigInt {
+ #[inline]
+ fn checked_add(&self, v: &BigInt) -> Option<BigInt> {
+ return Some(self.add(v));
+ }
+}
+
+impl CheckedSub for BigInt {
+ #[inline]
+ fn checked_sub(&self, v: &BigInt) -> Option<BigInt> {
+ return Some(self.sub(v));
+ }
+}
+
+impl CheckedMul for BigInt {
+ #[inline]
+ fn checked_mul(&self, v: &BigInt) -> Option<BigInt> {
+ return Some(self.mul(v));
+ }
+}
+
+impl CheckedDiv for BigInt {
+ #[inline]
+ fn checked_div(&self, v: &BigInt) -> Option<BigInt> {
+ if v.is_zero() {
+ return None;
+ }
+ return Some(self.div(v));
+ }
+}
+
+impl Integer for BigInt {
+ #[inline]
+ fn div_rem(&self, other: &BigInt) -> (BigInt, BigInt) {
+ // r.sign == self.sign
+ let (d_ui, r_ui) = self.data.div_mod_floor(&other.data);
+ let d = BigInt::from_biguint(self.sign, d_ui);
+ let r = BigInt::from_biguint(self.sign, r_ui);
+ if other.is_negative() {
+ (-d, r)
+ } else {
+ (d, r)
+ }
+ }
+
+ #[inline]
+ fn div_floor(&self, other: &BigInt) -> BigInt {
+ let (d, _) = self.div_mod_floor(other);
+ d
+ }
+
+ #[inline]
+ fn mod_floor(&self, other: &BigInt) -> BigInt {
+ let (_, m) = self.div_mod_floor(other);
+ m
+ }
+
+ fn div_mod_floor(&self, other: &BigInt) -> (BigInt, BigInt) {
+ // m.sign == other.sign
+ let (d_ui, m_ui) = self.data.div_rem(&other.data);
+ let d = BigInt::from_biguint(Plus, d_ui);
+ let m = BigInt::from_biguint(Plus, m_ui);
+ let one: BigInt = One::one();
+ match (self.sign, other.sign) {
+ (_, NoSign) => panic!(),
+ (Plus, Plus) | (NoSign, Plus) => (d, m),
+ (Plus, Minus) | (NoSign, Minus) => {
+ if m.is_zero() {
+ (-d, Zero::zero())
+ } else {
+ (-d - one, m + other)
+ }
+ }
+ (Minus, Plus) => {
+ if m.is_zero() {
+ (-d, Zero::zero())
+ } else {
+ (-d - one, other - m)
+ }
+ }
+ (Minus, Minus) => (d, -m),
+ }
+ }
+
+ /// Calculates the Greatest Common Divisor (GCD) of the number and `other`.
+ ///
+ /// The result is always positive.
+ #[inline]
+ fn gcd(&self, other: &BigInt) -> BigInt {
+ BigInt::from_biguint(Plus, self.data.gcd(&other.data))
+ }
+
+ /// Calculates the Lowest Common Multiple (LCM) of the number and `other`.
+ #[inline]
+ fn lcm(&self, other: &BigInt) -> BigInt {
+ BigInt::from_biguint(Plus, self.data.lcm(&other.data))
+ }
+
+ /// Deprecated, use `is_multiple_of` instead.
+ #[inline]
+ fn divides(&self, other: &BigInt) -> bool {
+ return self.is_multiple_of(other);
+ }
+
+ /// Returns `true` if the number is a multiple of `other`.
+ #[inline]
+ fn is_multiple_of(&self, other: &BigInt) -> bool {
+ self.data.is_multiple_of(&other.data)
+ }
+
+ /// Returns `true` if the number is divisible by `2`.
+ #[inline]
+ fn is_even(&self) -> bool {
+ self.data.is_even()
+ }
+
+ /// Returns `true` if the number is not divisible by `2`.
+ #[inline]
+ fn is_odd(&self) -> bool {
+ self.data.is_odd()
+ }
+}
+
+impl Roots for BigInt {
+ fn nth_root(&self, n: u32) -> Self {
+ assert!(
+ !(self.is_negative() && n.is_even()),
+ "root of degree {} is imaginary",
+ n
+ );
+
+ BigInt::from_biguint(self.sign, self.data.nth_root(n))
+ }
+
+ fn sqrt(&self) -> Self {
+ assert!(!self.is_negative(), "square root is imaginary");
+
+ BigInt::from_biguint(self.sign, self.data.sqrt())
+ }
+
+ fn cbrt(&self) -> Self {
+ BigInt::from_biguint(self.sign, self.data.cbrt())
+ }
+}
+
+impl ToPrimitive for BigInt {
+ #[inline]
+ fn to_i64(&self) -> Option<i64> {
+ match self.sign {
+ Plus => self.data.to_i64(),
+ NoSign => Some(0),
+ Minus => self.data.to_u64().and_then(|n| {
+ let m: u64 = 1 << 63;
+ if n < m {
+ Some(-(n as i64))
+ } else if n == m {
+ Some(i64::MIN)
+ } else {
+ None
+ }
+ }),
+ }
+ }
+
+ #[inline]
+ #[cfg(has_i128)]
+ fn to_i128(&self) -> Option<i128> {
+ match self.sign {
+ Plus => self.data.to_i128(),
+ NoSign => Some(0),
+ Minus => self.data.to_u128().and_then(|n| {
+ let m: u128 = 1 << 127;
+ if n < m {
+ Some(-(n as i128))
+ } else if n == m {
+ Some(i128::MIN)
+ } else {
+ None
+ }
+ }),
+ }
+ }
+
+ #[inline]
+ fn to_u64(&self) -> Option<u64> {
+ match self.sign {
+ Plus => self.data.to_u64(),
+ NoSign => Some(0),
+ Minus => None,
+ }
+ }
+
+ #[inline]
+ #[cfg(has_i128)]
+ fn to_u128(&self) -> Option<u128> {
+ match self.sign {
+ Plus => self.data.to_u128(),
+ NoSign => Some(0),
+ Minus => None,
+ }
+ }
+
+ #[inline]
+ fn to_f32(&self) -> Option<f32> {
+ self.data
+ .to_f32()
+ .map(|n| if self.sign == Minus { -n } else { n })
+ }
+
+ #[inline]
+ fn to_f64(&self) -> Option<f64> {
+ self.data
+ .to_f64()
+ .map(|n| if self.sign == Minus { -n } else { n })
+ }
+}
+
+impl FromPrimitive for BigInt {
+ #[inline]
+ fn from_i64(n: i64) -> Option<BigInt> {
+ Some(BigInt::from(n))
+ }
+
+ #[inline]
+ #[cfg(has_i128)]
+ fn from_i128(n: i128) -> Option<BigInt> {
+ Some(BigInt::from(n))
+ }
+
+ #[inline]
+ fn from_u64(n: u64) -> Option<BigInt> {
+ Some(BigInt::from(n))
+ }
+
+ #[inline]
+ #[cfg(has_i128)]
+ fn from_u128(n: u128) -> Option<BigInt> {
+ Some(BigInt::from(n))
+ }
+
+ #[inline]
+ fn from_f64(n: f64) -> Option<BigInt> {
+ if n >= 0.0 {
+ BigUint::from_f64(n).map(|x| BigInt::from_biguint(Plus, x))
+ } else {
+ BigUint::from_f64(-n).map(|x| BigInt::from_biguint(Minus, x))
+ }
+ }
+}
+
+impl From<i64> for BigInt {
+ #[inline]
+ fn from(n: i64) -> Self {
+ if n >= 0 {
+ BigInt::from(n as u64)
+ } else {
+ let u = u64::MAX - (n as u64) + 1;
+ BigInt {
+ sign: Minus,
+ data: BigUint::from(u),
+ }
+ }
+ }
+}
+
+#[cfg(has_i128)]
+impl From<i128> for BigInt {
+ #[inline]
+ fn from(n: i128) -> Self {
+ if n >= 0 {
+ BigInt::from(n as u128)
+ } else {
+ let u = u128::MAX - (n as u128) + 1;
+ BigInt {
+ sign: Minus,
+ data: BigUint::from(u),
+ }
+ }
+ }
+}
+
+macro_rules! impl_bigint_from_int {
+ ($T:ty) => {
+ impl From<$T> for BigInt {
+ #[inline]
+ fn from(n: $T) -> Self {
+ BigInt::from(n as i64)
+ }
+ }
+ };
+}
+
+impl_bigint_from_int!(i8);
+impl_bigint_from_int!(i16);
+impl_bigint_from_int!(i32);
+impl_bigint_from_int!(isize);
+
+impl From<u64> for BigInt {
+ #[inline]
+ fn from(n: u64) -> Self {
+ if n > 0 {
+ BigInt {
+ sign: Plus,
+ data: BigUint::from(n),
+ }
+ } else {
+ BigInt::zero()
+ }
+ }
+}
+
+#[cfg(has_i128)]
+impl From<u128> for BigInt {
+ #[inline]
+ fn from(n: u128) -> Self {
+ if n > 0 {
+ BigInt {
+ sign: Plus,
+ data: BigUint::from(n),
+ }
+ } else {
+ BigInt::zero()
+ }
+ }
+}
+
+macro_rules! impl_bigint_from_uint {
+ ($T:ty) => {
+ impl From<$T> for BigInt {
+ #[inline]
+ fn from(n: $T) -> Self {
+ BigInt::from(n as u64)
+ }
+ }
+ };
+}
+
+impl_bigint_from_uint!(u8);
+impl_bigint_from_uint!(u16);
+impl_bigint_from_uint!(u32);
+impl_bigint_from_uint!(usize);
+
+impl From<BigUint> for BigInt {
+ #[inline]
+ fn from(n: BigUint) -> Self {
+ if n.is_zero() {
+ BigInt::zero()
+ } else {
+ BigInt {
+ sign: Plus,
+ data: n,
+ }
+ }
+ }
+}
+
+impl IntDigits for BigInt {
+ #[inline]
+ fn digits(&self) -> &[BigDigit] {
+ self.data.digits()
+ }
+ #[inline]
+ fn digits_mut(&mut self) -> &mut Vec<BigDigit> {
+ self.data.digits_mut()
+ }
+ #[inline]
+ fn normalize(&mut self) {
+ self.data.normalize();
+ if self.data.is_zero() {
+ self.sign = NoSign;
+ }
+ }
+ #[inline]
+ fn capacity(&self) -> usize {
+ self.data.capacity()
+ }
+ #[inline]
+ fn len(&self) -> usize {
+ self.data.len()
+ }
+}
+
+#[cfg(feature = "serde")]
+impl serde::Serialize for BigInt {
+ fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
+ where
+ S: serde::Serializer,
+ {
+ // Note: do not change the serialization format, or it may break
+ // forward and backward compatibility of serialized data!
+ (self.sign, &self.data).serialize(serializer)
+ }
+}
+
+#[cfg(feature = "serde")]
+impl<'de> serde::Deserialize<'de> for BigInt {
+ fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
+ where
+ D: serde::Deserializer<'de>,
+ {
+ let (sign, data) = serde::Deserialize::deserialize(deserializer)?;
+ Ok(BigInt::from_biguint(sign, data))
+ }
+}
+
+/// A generic trait for converting a value to a `BigInt`. This may return
+/// `None` when converting from `f32` or `f64`, and will always succeed
+/// when converting from any integer or unsigned primitive, or `BigUint`.
+pub trait ToBigInt {
+ /// Converts the value of `self` to a `BigInt`.
+ fn to_bigint(&self) -> Option<BigInt>;
+}
+
+impl ToBigInt for BigInt {
+ #[inline]
+ fn to_bigint(&self) -> Option<BigInt> {
+ Some(self.clone())
+ }
+}
+
+impl ToBigInt for BigUint {
+ #[inline]
+ fn to_bigint(&self) -> Option<BigInt> {
+ if self.is_zero() {
+ Some(Zero::zero())
+ } else {
+ Some(BigInt {
+ sign: Plus,
+ data: self.clone(),
+ })
+ }
+ }
+}
+
+impl biguint::ToBigUint for BigInt {
+ #[inline]
+ fn to_biguint(&self) -> Option<BigUint> {
+ match self.sign() {
+ Plus => Some(self.data.clone()),
+ NoSign => Some(Zero::zero()),
+ Minus => None,
+ }
+ }
+}
+
+macro_rules! impl_to_bigint {
+ ($T:ty, $from_ty:path) => {
+ impl ToBigInt for $T {
+ #[inline]
+ fn to_bigint(&self) -> Option<BigInt> {
+ $from_ty(*self)
+ }
+ }
+ };
+}
+
+impl_to_bigint!(isize, FromPrimitive::from_isize);
+impl_to_bigint!(i8, FromPrimitive::from_i8);
+impl_to_bigint!(i16, FromPrimitive::from_i16);
+impl_to_bigint!(i32, FromPrimitive::from_i32);
+impl_to_bigint!(i64, FromPrimitive::from_i64);
+#[cfg(has_i128)]
+impl_to_bigint!(i128, FromPrimitive::from_i128);
+
+impl_to_bigint!(usize, FromPrimitive::from_usize);
+impl_to_bigint!(u8, FromPrimitive::from_u8);
+impl_to_bigint!(u16, FromPrimitive::from_u16);
+impl_to_bigint!(u32, FromPrimitive::from_u32);
+impl_to_bigint!(u64, FromPrimitive::from_u64);
+#[cfg(has_i128)]
+impl_to_bigint!(u128, FromPrimitive::from_u128);
+
+impl_to_bigint!(f32, FromPrimitive::from_f32);
+impl_to_bigint!(f64, FromPrimitive::from_f64);
+
+impl BigInt {
+ /// Creates and initializes a BigInt.
+ ///
+ /// The digits are in little-endian base 2<sup>32</sup>.
+ #[inline]
+ pub fn new(sign: Sign, digits: Vec<u32>) -> BigInt {
+ BigInt::from_biguint(sign, BigUint::new(digits))
+ }
+
+ /// Creates and initializes a `BigInt`.
+ ///
+ /// The digits are in little-endian base 2<sup>32</sup>.
+ #[inline]
+ pub fn from_biguint(mut sign: Sign, mut data: BigUint) -> BigInt {
+ if sign == NoSign {
+ data.assign_from_slice(&[]);
+ } else if data.is_zero() {
+ sign = NoSign;
+ }
+
+ BigInt {
+ sign: sign,
+ data: data,
+ }
+ }
+
+ /// Creates and initializes a `BigInt`.
+ #[inline]
+ pub fn from_slice(sign: Sign, slice: &[u32]) -> BigInt {
+ BigInt::from_biguint(sign, BigUint::from_slice(slice))
+ }
+
+ /// Reinitializes a `BigInt`.
+ #[inline]
+ pub fn assign_from_slice(&mut self, sign: Sign, slice: &[u32]) {
+ if sign == NoSign {
+ self.data.assign_from_slice(&[]);
+ self.sign = NoSign;
+ } else {
+ self.data.assign_from_slice(slice);
+ self.sign = match self.data.is_zero() {
+ true => NoSign,
+ false => sign,
+ }
+ }
+ }
+
+ /// Creates and initializes a `BigInt`.
+ ///
+ /// The bytes are in big-endian byte order.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_bigint::{BigInt, Sign};
+ ///
+ /// assert_eq!(BigInt::from_bytes_be(Sign::Plus, b"A"),
+ /// BigInt::parse_bytes(b"65", 10).unwrap());
+ /// assert_eq!(BigInt::from_bytes_be(Sign::Plus, b"AA"),
+ /// BigInt::parse_bytes(b"16705", 10).unwrap());
+ /// assert_eq!(BigInt::from_bytes_be(Sign::Plus, b"AB"),
+ /// BigInt::parse_bytes(b"16706", 10).unwrap());
+ /// assert_eq!(BigInt::from_bytes_be(Sign::Plus, b"Hello world!"),
+ /// BigInt::parse_bytes(b"22405534230753963835153736737", 10).unwrap());
+ /// ```
+ #[inline]
+ pub fn from_bytes_be(sign: Sign, bytes: &[u8]) -> BigInt {
+ BigInt::from_biguint(sign, BigUint::from_bytes_be(bytes))
+ }
+
+ /// Creates and initializes a `BigInt`.
+ ///
+ /// The bytes are in little-endian byte order.
+ #[inline]
+ pub fn from_bytes_le(sign: Sign, bytes: &[u8]) -> BigInt {
+ BigInt::from_biguint(sign, BigUint::from_bytes_le(bytes))
+ }
+
+ /// Creates and initializes a `BigInt` from an array of bytes in
+ /// two's complement binary representation.
+ ///
+ /// The digits are in big-endian base 2<sup>8</sup>.
+ #[inline]
+ pub fn from_signed_bytes_be(digits: &[u8]) -> BigInt {
+ let sign = match digits.first() {
+ Some(v) if *v > 0x7f => Sign::Minus,
+ Some(_) => Sign::Plus,
+ None => return BigInt::zero(),
+ };
+
+ if sign == Sign::Minus {
+ // two's-complement the content to retrieve the magnitude
+ let mut digits = Vec::from(digits);
+ twos_complement_be(&mut digits);
+ BigInt::from_biguint(sign, BigUint::from_bytes_be(&*digits))
+ } else {
+ BigInt::from_biguint(sign, BigUint::from_bytes_be(digits))
+ }
+ }
+
+ /// Creates and initializes a `BigInt` from an array of bytes in two's complement.
+ ///
+ /// The digits are in little-endian base 2<sup>8</sup>.
+ #[inline]
+ pub fn from_signed_bytes_le(digits: &[u8]) -> BigInt {
+ let sign = match digits.last() {
+ Some(v) if *v > 0x7f => Sign::Minus,
+ Some(_) => Sign::Plus,
+ None => return BigInt::zero(),
+ };
+
+ if sign == Sign::Minus {
+ // two's-complement the content to retrieve the magnitude
+ let mut digits = Vec::from(digits);
+ twos_complement_le(&mut digits);
+ BigInt::from_biguint(sign, BigUint::from_bytes_le(&*digits))
+ } else {
+ BigInt::from_biguint(sign, BigUint::from_bytes_le(digits))
+ }
+ }
+
+ /// Creates and initializes a `BigInt`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_bigint::{BigInt, ToBigInt};
+ ///
+ /// assert_eq!(BigInt::parse_bytes(b"1234", 10), ToBigInt::to_bigint(&1234));
+ /// assert_eq!(BigInt::parse_bytes(b"ABCD", 16), ToBigInt::to_bigint(&0xABCD));
+ /// assert_eq!(BigInt::parse_bytes(b"G", 16), None);
+ /// ```
+ #[inline]
+ pub fn parse_bytes(buf: &[u8], radix: u32) -> Option<BigInt> {
+ str::from_utf8(buf)
+ .ok()
+ .and_then(|s| BigInt::from_str_radix(s, radix).ok())
+ }
+
+ /// Creates and initializes a `BigInt`. Each u8 of the input slice is
+ /// interpreted as one digit of the number
+ /// and must therefore be less than `radix`.
+ ///
+ /// The bytes are in big-endian byte order.
+ /// `radix` must be in the range `2...256`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_bigint::{BigInt, Sign};
+ ///
+ /// let inbase190 = vec![15, 33, 125, 12, 14];
+ /// let a = BigInt::from_radix_be(Sign::Minus, &inbase190, 190).unwrap();
+ /// assert_eq!(a.to_radix_be(190), (Sign:: Minus, inbase190));
+ /// ```
+ pub fn from_radix_be(sign: Sign, buf: &[u8], radix: u32) -> Option<BigInt> {
+ BigUint::from_radix_be(buf, radix).map(|u| BigInt::from_biguint(sign, u))
+ }
+
+ /// Creates and initializes a `BigInt`. Each u8 of the input slice is
+ /// interpreted as one digit of the number
+ /// and must therefore be less than `radix`.
+ ///
+ /// The bytes are in little-endian byte order.
+ /// `radix` must be in the range `2...256`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_bigint::{BigInt, Sign};
+ ///
+ /// let inbase190 = vec![14, 12, 125, 33, 15];
+ /// let a = BigInt::from_radix_be(Sign::Minus, &inbase190, 190).unwrap();
+ /// assert_eq!(a.to_radix_be(190), (Sign::Minus, inbase190));
+ /// ```
+ pub fn from_radix_le(sign: Sign, buf: &[u8], radix: u32) -> Option<BigInt> {
+ BigUint::from_radix_le(buf, radix).map(|u| BigInt::from_biguint(sign, u))
+ }
+
+ /// Returns the sign and the byte representation of the `BigInt` in big-endian byte order.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_bigint::{ToBigInt, Sign};
+ ///
+ /// let i = -1125.to_bigint().unwrap();
+ /// assert_eq!(i.to_bytes_be(), (Sign::Minus, vec![4, 101]));
+ /// ```
+ #[inline]
+ pub fn to_bytes_be(&self) -> (Sign, Vec<u8>) {
+ (self.sign, self.data.to_bytes_be())
+ }
+
+ /// Returns the sign and the byte representation of the `BigInt` in little-endian byte order.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_bigint::{ToBigInt, Sign};
+ ///
+ /// let i = -1125.to_bigint().unwrap();
+ /// assert_eq!(i.to_bytes_le(), (Sign::Minus, vec![101, 4]));
+ /// ```
+ #[inline]
+ pub fn to_bytes_le(&self) -> (Sign, Vec<u8>) {
+ (self.sign, self.data.to_bytes_le())
+ }
+
+ /// Returns the two's complement byte representation of the `BigInt` in big-endian byte order.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_bigint::ToBigInt;
+ ///
+ /// let i = -1125.to_bigint().unwrap();
+ /// assert_eq!(i.to_signed_bytes_be(), vec![251, 155]);
+ /// ```
+ #[inline]
+ pub fn to_signed_bytes_be(&self) -> Vec<u8> {
+ let mut bytes = self.data.to_bytes_be();
+ let first_byte = bytes.first().map(|v| *v).unwrap_or(0);
+ if first_byte > 0x7f
+ && !(first_byte == 0x80
+ && bytes.iter().skip(1).all(Zero::is_zero)
+ && self.sign == Sign::Minus)
+ {
+ // msb used by magnitude, extend by 1 byte
+ bytes.insert(0, 0);
+ }
+ if self.sign == Sign::Minus {
+ twos_complement_be(&mut bytes);
+ }
+ bytes
+ }
+
+ /// Returns the two's complement byte representation of the `BigInt` in little-endian byte order.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_bigint::ToBigInt;
+ ///
+ /// let i = -1125.to_bigint().unwrap();
+ /// assert_eq!(i.to_signed_bytes_le(), vec![155, 251]);
+ /// ```
+ #[inline]
+ pub fn to_signed_bytes_le(&self) -> Vec<u8> {
+ let mut bytes = self.data.to_bytes_le();
+ let last_byte = bytes.last().map(|v| *v).unwrap_or(0);
+ if last_byte > 0x7f
+ && !(last_byte == 0x80
+ && bytes.iter().rev().skip(1).all(Zero::is_zero)
+ && self.sign == Sign::Minus)
+ {
+ // msb used by magnitude, extend by 1 byte
+ bytes.push(0);
+ }
+ if self.sign == Sign::Minus {
+ twos_complement_le(&mut bytes);
+ }
+ bytes
+ }
+
+ /// Returns the integer formatted as a string in the given radix.
+ /// `radix` must be in the range `2...36`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_bigint::BigInt;
+ ///
+ /// let i = BigInt::parse_bytes(b"ff", 16).unwrap();
+ /// assert_eq!(i.to_str_radix(16), "ff");
+ /// ```
+ #[inline]
+ pub fn to_str_radix(&self, radix: u32) -> String {
+ let mut v = to_str_radix_reversed(&self.data, radix);
+
+ if self.is_negative() {
+ v.push(b'-');
+ }
+
+ v.reverse();
+ unsafe { String::from_utf8_unchecked(v) }
+ }
+
+ /// Returns the integer in the requested base in big-endian digit order.
+ /// The output is not given in a human readable alphabet but as a zero
+ /// based u8 number.
+ /// `radix` must be in the range `2...256`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_bigint::{BigInt, Sign};
+ ///
+ /// assert_eq!(BigInt::from(-0xFFFFi64).to_radix_be(159),
+ /// (Sign::Minus, vec![2, 94, 27]));
+ /// // 0xFFFF = 65535 = 2*(159^2) + 94*159 + 27
+ /// ```
+ #[inline]
+ pub fn to_radix_be(&self, radix: u32) -> (Sign, Vec<u8>) {
+ (self.sign, self.data.to_radix_be(radix))
+ }
+
+ /// Returns the integer in the requested base in little-endian digit order.
+ /// The output is not given in a human readable alphabet but as a zero
+ /// based u8 number.
+ /// `radix` must be in the range `2...256`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_bigint::{BigInt, Sign};
+ ///
+ /// assert_eq!(BigInt::from(-0xFFFFi64).to_radix_le(159),
+ /// (Sign::Minus, vec![27, 94, 2]));
+ /// // 0xFFFF = 65535 = 27 + 94*159 + 2*(159^2)
+ /// ```
+ #[inline]
+ pub fn to_radix_le(&self, radix: u32) -> (Sign, Vec<u8>) {
+ (self.sign, self.data.to_radix_le(radix))
+ }
+
+ /// Returns the sign of the `BigInt` as a `Sign`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_bigint::{ToBigInt, Sign};
+ ///
+ /// assert_eq!(ToBigInt::to_bigint(&1234).unwrap().sign(), Sign::Plus);
+ /// assert_eq!(ToBigInt::to_bigint(&-4321).unwrap().sign(), Sign::Minus);
+ /// assert_eq!(ToBigInt::to_bigint(&0).unwrap().sign(), Sign::NoSign);
+ /// ```
+ #[inline]
+ pub fn sign(&self) -> Sign {
+ self.sign
+ }
+
+ /// Determines the fewest bits necessary to express the `BigInt`,
+ /// not including the sign.
+ #[inline]
+ pub fn bits(&self) -> usize {
+ self.data.bits()
+ }
+
+ /// Converts this `BigInt` into a `BigUint`, if it's not negative.
+ #[inline]
+ pub fn to_biguint(&self) -> Option<BigUint> {
+ match self.sign {
+ Plus => Some(self.data.clone()),
+ NoSign => Some(Zero::zero()),
+ Minus => None,
+ }
+ }
+
+ #[inline]
+ pub fn checked_add(&self, v: &BigInt) -> Option<BigInt> {
+ return Some(self.add(v));
+ }
+
+ #[inline]
+ pub fn checked_sub(&self, v: &BigInt) -> Option<BigInt> {
+ return Some(self.sub(v));
+ }
+
+ #[inline]
+ pub fn checked_mul(&self, v: &BigInt) -> Option<BigInt> {
+ return Some(self.mul(v));
+ }
+
+ #[inline]
+ pub fn checked_div(&self, v: &BigInt) -> Option<BigInt> {
+ if v.is_zero() {
+ return None;
+ }
+ return Some(self.div(v));
+ }
+
+ /// Returns `(self ^ exponent) mod modulus`
+ ///
+ /// Note that this rounds like `mod_floor`, not like the `%` operator,
+ /// which makes a difference when given a negative `self` or `modulus`.
+ /// The result will be in the interval `[0, modulus)` for `modulus > 0`,
+ /// or in the interval `(modulus, 0]` for `modulus < 0`
+ ///
+ /// Panics if the exponent is negative or the modulus is zero.
+ pub fn modpow(&self, exponent: &Self, modulus: &Self) -> Self {
+ assert!(
+ !exponent.is_negative(),
+ "negative exponentiation is not supported!"
+ );
+ assert!(!modulus.is_zero(), "divide by zero!");
+
+ let result = self.data.modpow(&exponent.data, &modulus.data);
+ if result.is_zero() {
+ return BigInt::zero();
+ }
+
+ // The sign of the result follows the modulus, like `mod_floor`.
+ let (sign, mag) = match (self.is_negative(), modulus.is_negative()) {
+ (false, false) => (Plus, result),
+ (true, false) => (Plus, &modulus.data - result),
+ (false, true) => (Minus, &modulus.data - result),
+ (true, true) => (Minus, result),
+ };
+ BigInt::from_biguint(sign, mag)
+ }
+
+ /// Returns the truncated principal square root of `self` --
+ /// see [Roots::sqrt](https://docs.rs/num-integer/0.1/num_integer/trait.Roots.html#method.sqrt).
+ pub fn sqrt(&self) -> Self {
+ Roots::sqrt(self)
+ }
+
+ /// Returns the truncated principal cube root of `self` --
+ /// see [Roots::cbrt](https://docs.rs/num-integer/0.1/num_integer/trait.Roots.html#method.cbrt).
+ pub fn cbrt(&self) -> Self {
+ Roots::cbrt(self)
+ }
+
+ /// Returns the truncated principal `n`th root of `self` --
+ /// See [Roots::nth_root](https://docs.rs/num-integer/0.1/num_integer/trait.Roots.html#tymethod.nth_root).
+ pub fn nth_root(&self, n: u32) -> Self {
+ Roots::nth_root(self, n)
+ }
+}
+
+impl_sum_iter_type!(BigInt);
+impl_product_iter_type!(BigInt);
+
+/// Perform in-place two's complement of the given binary representation,
+/// in little-endian byte order.
+#[inline]
+fn twos_complement_le(digits: &mut [u8]) {
+ twos_complement(digits)
+}
+
+/// Perform in-place two's complement of the given binary representation
+/// in big-endian byte order.
+#[inline]
+fn twos_complement_be(digits: &mut [u8]) {
+ twos_complement(digits.iter_mut().rev())
+}
+
+/// Perform in-place two's complement of the given digit iterator
+/// starting from the least significant byte.
+#[inline]
+fn twos_complement<'a, I>(digits: I)
+where
+ I: IntoIterator<Item = &'a mut u8>,
+{
+ let mut carry = true;
+ for d in digits {
+ *d = d.not();
+ if carry {
+ *d = d.wrapping_add(1);
+ carry = d.is_zero();
+ }
+ }
+}
+
+#[test]
+fn test_from_biguint() {
+ fn check(inp_s: Sign, inp_n: usize, ans_s: Sign, ans_n: usize) {
+ let inp = BigInt::from_biguint(inp_s, FromPrimitive::from_usize(inp_n).unwrap());
+ let ans = BigInt {
+ sign: ans_s,
+ data: FromPrimitive::from_usize(ans_n).unwrap(),
+ };
+ assert_eq!(inp, ans);
+ }
+ check(Plus, 1, Plus, 1);
+ check(Plus, 0, NoSign, 0);
+ check(Minus, 1, Minus, 1);
+ check(NoSign, 1, NoSign, 0);
+}
+
+#[test]
+fn test_from_slice() {
+ fn check(inp_s: Sign, inp_n: u32, ans_s: Sign, ans_n: u32) {
+ let inp = BigInt::from_slice(inp_s, &[inp_n]);
+ let ans = BigInt {
+ sign: ans_s,
+ data: FromPrimitive::from_u32(ans_n).unwrap(),
+ };
+ assert_eq!(inp, ans);
+ }
+ check(Plus, 1, Plus, 1);
+ check(Plus, 0, NoSign, 0);
+ check(Minus, 1, Minus, 1);
+ check(NoSign, 1, NoSign, 0);
+}
+
+#[test]
+fn test_assign_from_slice() {
+ fn check(inp_s: Sign, inp_n: u32, ans_s: Sign, ans_n: u32) {
+ let mut inp = BigInt::from_slice(Minus, &[2627_u32, 0_u32, 9182_u32, 42_u32]);
+ inp.assign_from_slice(inp_s, &[inp_n]);
+ let ans = BigInt {
+ sign: ans_s,
+ data: FromPrimitive::from_u32(ans_n).unwrap(),
+ };
+ assert_eq!(inp, ans);
+ }
+ check(Plus, 1, Plus, 1);
+ check(Plus, 0, NoSign, 0);
+ check(Minus, 1, Minus, 1);
+ check(NoSign, 1, NoSign, 0);
+}
diff --git a/third_party/rust/num-bigint/src/bigrand.rs b/third_party/rust/num-bigint/src/bigrand.rs
new file mode 100644
index 0000000000..4a13b29df4
--- /dev/null
+++ b/third_party/rust/num-bigint/src/bigrand.rs
@@ -0,0 +1,218 @@
+//! Randomization of big integers
+
+use rand::distributions::uniform::{SampleUniform, UniformSampler};
+use rand::prelude::*;
+use rand::AsByteSliceMut;
+
+use BigInt;
+use BigUint;
+use Sign::*;
+
+use big_digit::BigDigit;
+use bigint::{into_magnitude, magnitude};
+
+use integer::Integer;
+use traits::Zero;
+
+pub trait RandBigInt {
+ /// Generate a random `BigUint` of the given bit size.
+ fn gen_biguint(&mut self, bit_size: usize) -> BigUint;
+
+ /// Generate a random BigInt of the given bit size.
+ fn gen_bigint(&mut self, bit_size: usize) -> BigInt;
+
+ /// Generate a random `BigUint` less than the given bound. Fails
+ /// when the bound is zero.
+ fn gen_biguint_below(&mut self, bound: &BigUint) -> BigUint;
+
+ /// Generate a random `BigUint` within the given range. The lower
+ /// bound is inclusive; the upper bound is exclusive. Fails when
+ /// the upper bound is not greater than the lower bound.
+ fn gen_biguint_range(&mut self, lbound: &BigUint, ubound: &BigUint) -> BigUint;
+
+ /// Generate a random `BigInt` within the given range. The lower
+ /// bound is inclusive; the upper bound is exclusive. Fails when
+ /// the upper bound is not greater than the lower bound.
+ fn gen_bigint_range(&mut self, lbound: &BigInt, ubound: &BigInt) -> BigInt;
+}
+
+impl<R: Rng + ?Sized> RandBigInt for R {
+ fn gen_biguint(&mut self, bit_size: usize) -> BigUint {
+ use super::big_digit::BITS;
+ let (digits, rem) = bit_size.div_rem(&BITS);
+ let mut data = vec![BigDigit::default(); digits + (rem > 0) as usize];
+ // `fill_bytes` is faster than many `gen::<u32>` calls
+ self.fill_bytes(data[..].as_byte_slice_mut());
+ // Swap bytes per the `Rng::fill` source. This might be
+ // unnecessary if reproducibility across architectures is not
+ // desired.
+ data.to_le();
+ if rem > 0 {
+ data[digits] >>= BITS - rem;
+ }
+ BigUint::new(data)
+ }
+
+ fn gen_bigint(&mut self, bit_size: usize) -> BigInt {
+ loop {
+ // Generate a random BigUint...
+ let biguint = self.gen_biguint(bit_size);
+ // ...and then randomly assign it a Sign...
+ let sign = if biguint.is_zero() {
+ // ...except that if the BigUint is zero, we need to try
+ // again with probability 0.5. This is because otherwise,
+ // the probability of generating a zero BigInt would be
+ // double that of any other number.
+ if self.gen() {
+ continue;
+ } else {
+ NoSign
+ }
+ } else if self.gen() {
+ Plus
+ } else {
+ Minus
+ };
+ return BigInt::from_biguint(sign, biguint);
+ }
+ }
+
+ fn gen_biguint_below(&mut self, bound: &BigUint) -> BigUint {
+ assert!(!bound.is_zero());
+ let bits = bound.bits();
+ loop {
+ let n = self.gen_biguint(bits);
+ if n < *bound {
+ return n;
+ }
+ }
+ }
+
+ fn gen_biguint_range(&mut self, lbound: &BigUint, ubound: &BigUint) -> BigUint {
+ assert!(*lbound < *ubound);
+ if lbound.is_zero() {
+ self.gen_biguint_below(ubound)
+ } else {
+ lbound + self.gen_biguint_below(&(ubound - lbound))
+ }
+ }
+
+ fn gen_bigint_range(&mut self, lbound: &BigInt, ubound: &BigInt) -> BigInt {
+ assert!(*lbound < *ubound);
+ if lbound.is_zero() {
+ BigInt::from(self.gen_biguint_below(magnitude(&ubound)))
+ } else if ubound.is_zero() {
+ lbound + BigInt::from(self.gen_biguint_below(magnitude(&lbound)))
+ } else {
+ let delta = ubound - lbound;
+ lbound + BigInt::from(self.gen_biguint_below(magnitude(&delta)))
+ }
+ }
+}
+
+/// The back-end implementing rand's `UniformSampler` for `BigUint`.
+#[derive(Clone, Debug)]
+pub struct UniformBigUint {
+ base: BigUint,
+ len: BigUint,
+}
+
+impl UniformSampler for UniformBigUint {
+ type X = BigUint;
+
+ #[inline]
+ fn new(low: Self::X, high: Self::X) -> Self {
+ assert!(low < high);
+ UniformBigUint {
+ len: high - &low,
+ base: low,
+ }
+ }
+
+ #[inline]
+ fn new_inclusive(low: Self::X, high: Self::X) -> Self {
+ assert!(low <= high);
+ Self::new(low, high + 1u32)
+ }
+
+ #[inline]
+ fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> Self::X {
+ &self.base + rng.gen_biguint_below(&self.len)
+ }
+
+ #[inline]
+ fn sample_single<R: Rng + ?Sized>(low: Self::X, high: Self::X, rng: &mut R) -> Self::X {
+ rng.gen_biguint_range(&low, &high)
+ }
+}
+
+impl SampleUniform for BigUint {
+ type Sampler = UniformBigUint;
+}
+
+/// The back-end implementing rand's `UniformSampler` for `BigInt`.
+#[derive(Clone, Debug)]
+pub struct UniformBigInt {
+ base: BigInt,
+ len: BigUint,
+}
+
+impl UniformSampler for UniformBigInt {
+ type X = BigInt;
+
+ #[inline]
+ fn new(low: Self::X, high: Self::X) -> Self {
+ assert!(low < high);
+ UniformBigInt {
+ len: into_magnitude(high - &low),
+ base: low,
+ }
+ }
+
+ #[inline]
+ fn new_inclusive(low: Self::X, high: Self::X) -> Self {
+ assert!(low <= high);
+ Self::new(low, high + 1u32)
+ }
+
+ #[inline]
+ fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> Self::X {
+ &self.base + BigInt::from(rng.gen_biguint_below(&self.len))
+ }
+
+ #[inline]
+ fn sample_single<R: Rng + ?Sized>(low: Self::X, high: Self::X, rng: &mut R) -> Self::X {
+ rng.gen_bigint_range(&low, &high)
+ }
+}
+
+impl SampleUniform for BigInt {
+ type Sampler = UniformBigInt;
+}
+
+/// A random distribution for `BigUint` and `BigInt` values of a particular bit size.
+#[derive(Clone, Copy, Debug)]
+pub struct RandomBits {
+ bits: usize,
+}
+
+impl RandomBits {
+ #[inline]
+ pub fn new(bits: usize) -> RandomBits {
+ RandomBits { bits }
+ }
+}
+
+impl Distribution<BigUint> for RandomBits {
+ #[inline]
+ fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> BigUint {
+ rng.gen_biguint(self.bits)
+ }
+}
+
+impl Distribution<BigInt> for RandomBits {
+ #[inline]
+ fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> BigInt {
+ rng.gen_bigint(self.bits)
+ }
+}
diff --git a/third_party/rust/num-bigint/src/biguint.rs b/third_party/rust/num-bigint/src/biguint.rs
new file mode 100644
index 0000000000..e6e9fbcce5
--- /dev/null
+++ b/third_party/rust/num-bigint/src/biguint.rs
@@ -0,0 +1,3088 @@
+#[allow(deprecated, unused_imports)]
+use std::ascii::AsciiExt;
+use std::borrow::Cow;
+use std::cmp;
+use std::cmp::Ordering::{self, Equal, Greater, Less};
+use std::default::Default;
+use std::fmt;
+use std::iter::{Product, Sum};
+use std::mem;
+use std::ops::{
+ Add, AddAssign, BitAnd, BitAndAssign, BitOr, BitOrAssign, BitXor, BitXorAssign, Div, DivAssign,
+ Mul, MulAssign, Neg, Rem, RemAssign, Shl, ShlAssign, Shr, ShrAssign, Sub, SubAssign,
+};
+use std::str::{self, FromStr};
+use std::{f32, f64};
+use std::{u64, u8};
+
+#[cfg(feature = "serde")]
+use serde;
+
+use integer::{Integer, Roots};
+use traits::{
+ CheckedAdd, CheckedDiv, CheckedMul, CheckedSub, Float, FromPrimitive, Num, One, Pow,
+ ToPrimitive, Unsigned, Zero,
+};
+
+use big_digit::{self, BigDigit};
+
+#[path = "algorithms.rs"]
+mod algorithms;
+#[path = "monty.rs"]
+mod monty;
+
+use self::algorithms::{__add2, __sub2rev, add2, sub2, sub2rev};
+use self::algorithms::{biguint_shl, biguint_shr};
+use self::algorithms::{cmp_slice, fls, ilog2};
+use self::algorithms::{div_rem, div_rem_digit, div_rem_ref, rem_digit};
+use self::algorithms::{mac_with_carry, mul3, scalar_mul};
+use self::monty::monty_modpow;
+
+use UsizePromotion;
+
+use ParseBigIntError;
+
+#[cfg(feature = "quickcheck")]
+use quickcheck::{Arbitrary, Gen};
+
+/// A big unsigned integer type.
+#[derive(Clone, Debug, Hash)]
+pub struct BigUint {
+ data: Vec<BigDigit>,
+}
+
+#[cfg(feature = "quickcheck")]
+impl Arbitrary for BigUint {
+ fn arbitrary<G: Gen>(g: &mut G) -> Self {
+ // Use arbitrary from Vec
+ Self::new(Vec::<u32>::arbitrary(g))
+ }
+
+ #[allow(bare_trait_objects)] // `dyn` needs Rust 1.27 to parse, even when cfg-disabled
+ fn shrink(&self) -> Box<Iterator<Item = Self>> {
+ // Use shrinker from Vec
+ Box::new(self.data.shrink().map(|x| BigUint::new(x)))
+ }
+}
+
+impl PartialEq for BigUint {
+ #[inline]
+ fn eq(&self, other: &BigUint) -> bool {
+ match self.cmp(other) {
+ Equal => true,
+ _ => false,
+ }
+ }
+}
+impl Eq for BigUint {}
+
+impl PartialOrd for BigUint {
+ #[inline]
+ fn partial_cmp(&self, other: &BigUint) -> Option<Ordering> {
+ Some(self.cmp(other))
+ }
+}
+
+impl Ord for BigUint {
+ #[inline]
+ fn cmp(&self, other: &BigUint) -> Ordering {
+ cmp_slice(&self.data[..], &other.data[..])
+ }
+}
+
+impl Default for BigUint {
+ #[inline]
+ fn default() -> BigUint {
+ Zero::zero()
+ }
+}
+
+impl fmt::Display for BigUint {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ f.pad_integral(true, "", &self.to_str_radix(10))
+ }
+}
+
+impl fmt::LowerHex for BigUint {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ f.pad_integral(true, "0x", &self.to_str_radix(16))
+ }
+}
+
+impl fmt::UpperHex for BigUint {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ let mut s = self.to_str_radix(16);
+ s.make_ascii_uppercase();
+ f.pad_integral(true, "0x", &s)
+ }
+}
+
+impl fmt::Binary for BigUint {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ f.pad_integral(true, "0b", &self.to_str_radix(2))
+ }
+}
+
+impl fmt::Octal for BigUint {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ f.pad_integral(true, "0o", &self.to_str_radix(8))
+ }
+}
+
+impl FromStr for BigUint {
+ type Err = ParseBigIntError;
+
+ #[inline]
+ fn from_str(s: &str) -> Result<BigUint, ParseBigIntError> {
+ BigUint::from_str_radix(s, 10)
+ }
+}
+
+// Convert from a power of two radix (bits == ilog2(radix)) where bits evenly divides
+// BigDigit::BITS
+fn from_bitwise_digits_le(v: &[u8], bits: usize) -> BigUint {
+ debug_assert!(!v.is_empty() && bits <= 8 && big_digit::BITS % bits == 0);
+ debug_assert!(v.iter().all(|&c| BigDigit::from(c) < (1 << bits)));
+
+ let digits_per_big_digit = big_digit::BITS / bits;
+
+ let data = v
+ .chunks(digits_per_big_digit)
+ .map(|chunk| {
+ chunk
+ .iter()
+ .rev()
+ .fold(0, |acc, &c| (acc << bits) | BigDigit::from(c))
+ })
+ .collect();
+
+ BigUint::new(data)
+}
+
+// Convert from a power of two radix (bits == ilog2(radix)) where bits doesn't evenly divide
+// BigDigit::BITS
+fn from_inexact_bitwise_digits_le(v: &[u8], bits: usize) -> BigUint {
+ debug_assert!(!v.is_empty() && bits <= 8 && big_digit::BITS % bits != 0);
+ debug_assert!(v.iter().all(|&c| BigDigit::from(c) < (1 << bits)));
+
+ let big_digits = (v.len() * bits + big_digit::BITS - 1) / big_digit::BITS;
+ let mut data = Vec::with_capacity(big_digits);
+
+ let mut d = 0;
+ let mut dbits = 0; // number of bits we currently have in d
+
+ // walk v accumululating bits in d; whenever we accumulate big_digit::BITS in d, spit out a
+ // big_digit:
+ for &c in v {
+ d |= BigDigit::from(c) << dbits;
+ dbits += bits;
+
+ if dbits >= big_digit::BITS {
+ data.push(d);
+ dbits -= big_digit::BITS;
+ // if dbits was > big_digit::BITS, we dropped some of the bits in c (they couldn't fit
+ // in d) - grab the bits we lost here:
+ d = BigDigit::from(c) >> (bits - dbits);
+ }
+ }
+
+ if dbits > 0 {
+ debug_assert!(dbits < big_digit::BITS);
+ data.push(d as BigDigit);
+ }
+
+ BigUint::new(data)
+}
+
+// Read little-endian radix digits
+fn from_radix_digits_be(v: &[u8], radix: u32) -> BigUint {
+ debug_assert!(!v.is_empty() && !radix.is_power_of_two());
+ debug_assert!(v.iter().all(|&c| u32::from(c) < radix));
+
+ // Estimate how big the result will be, so we can pre-allocate it.
+ let bits = f64::from(radix).log2() * v.len() as f64;
+ let big_digits = (bits / big_digit::BITS as f64).ceil();
+ let mut data = Vec::with_capacity(big_digits as usize);
+
+ let (base, power) = get_radix_base(radix);
+ let radix = radix as BigDigit;
+
+ let r = v.len() % power;
+ let i = if r == 0 { power } else { r };
+ let (head, tail) = v.split_at(i);
+
+ let first = head
+ .iter()
+ .fold(0, |acc, &d| acc * radix + BigDigit::from(d));
+ data.push(first);
+
+ debug_assert!(tail.len() % power == 0);
+ for chunk in tail.chunks(power) {
+ if data.last() != Some(&0) {
+ data.push(0);
+ }
+
+ let mut carry = 0;
+ for d in data.iter_mut() {
+ *d = mac_with_carry(0, *d, base, &mut carry);
+ }
+ debug_assert!(carry == 0);
+
+ let n = chunk
+ .iter()
+ .fold(0, |acc, &d| acc * radix + BigDigit::from(d));
+ add2(&mut data, &[n]);
+ }
+
+ BigUint::new(data)
+}
+
+impl Num for BigUint {
+ type FromStrRadixErr = ParseBigIntError;
+
+ /// Creates and initializes a `BigUint`.
+ fn from_str_radix(s: &str, radix: u32) -> Result<BigUint, ParseBigIntError> {
+ assert!(2 <= radix && radix <= 36, "The radix must be within 2...36");
+ let mut s = s;
+ if s.starts_with('+') {
+ let tail = &s[1..];
+ if !tail.starts_with('+') {
+ s = tail
+ }
+ }
+
+ if s.is_empty() {
+ return Err(ParseBigIntError::empty());
+ }
+
+ if s.starts_with('_') {
+ // Must lead with a real digit!
+ return Err(ParseBigIntError::invalid());
+ }
+
+ // First normalize all characters to plain digit values
+ let mut v = Vec::with_capacity(s.len());
+ for b in s.bytes() {
+ #[allow(unknown_lints, ellipsis_inclusive_range_patterns)]
+ let d = match b {
+ b'0'...b'9' => b - b'0',
+ b'a'...b'z' => b - b'a' + 10,
+ b'A'...b'Z' => b - b'A' + 10,
+ b'_' => continue,
+ _ => u8::MAX,
+ };
+ if d < radix as u8 {
+ v.push(d);
+ } else {
+ return Err(ParseBigIntError::invalid());
+ }
+ }
+
+ let res = if radix.is_power_of_two() {
+ // Powers of two can use bitwise masks and shifting instead of multiplication
+ let bits = ilog2(radix);
+ v.reverse();
+ if big_digit::BITS % bits == 0 {
+ from_bitwise_digits_le(&v, bits)
+ } else {
+ from_inexact_bitwise_digits_le(&v, bits)
+ }
+ } else {
+ from_radix_digits_be(&v, radix)
+ };
+ Ok(res)
+ }
+}
+
+forward_val_val_binop!(impl BitAnd for BigUint, bitand);
+forward_ref_val_binop!(impl BitAnd for BigUint, bitand);
+
+// do not use forward_ref_ref_binop_commutative! for bitand so that we can
+// clone the smaller value rather than the larger, avoiding over-allocation
+impl<'a, 'b> BitAnd<&'b BigUint> for &'a BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn bitand(self, other: &BigUint) -> BigUint {
+ // forward to val-ref, choosing the smaller to clone
+ if self.data.len() <= other.data.len() {
+ self.clone() & other
+ } else {
+ other.clone() & self
+ }
+ }
+}
+
+forward_val_assign!(impl BitAndAssign for BigUint, bitand_assign);
+
+impl<'a> BitAnd<&'a BigUint> for BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn bitand(mut self, other: &BigUint) -> BigUint {
+ self &= other;
+ self
+ }
+}
+impl<'a> BitAndAssign<&'a BigUint> for BigUint {
+ #[inline]
+ fn bitand_assign(&mut self, other: &BigUint) {
+ for (ai, &bi) in self.data.iter_mut().zip(other.data.iter()) {
+ *ai &= bi;
+ }
+ self.data.truncate(other.data.len());
+ self.normalize();
+ }
+}
+
+forward_all_binop_to_val_ref_commutative!(impl BitOr for BigUint, bitor);
+forward_val_assign!(impl BitOrAssign for BigUint, bitor_assign);
+
+impl<'a> BitOr<&'a BigUint> for BigUint {
+ type Output = BigUint;
+
+ fn bitor(mut self, other: &BigUint) -> BigUint {
+ self |= other;
+ self
+ }
+}
+impl<'a> BitOrAssign<&'a BigUint> for BigUint {
+ #[inline]
+ fn bitor_assign(&mut self, other: &BigUint) {
+ for (ai, &bi) in self.data.iter_mut().zip(other.data.iter()) {
+ *ai |= bi;
+ }
+ if other.data.len() > self.data.len() {
+ let extra = &other.data[self.data.len()..];
+ self.data.extend(extra.iter().cloned());
+ }
+ }
+}
+
+forward_all_binop_to_val_ref_commutative!(impl BitXor for BigUint, bitxor);
+forward_val_assign!(impl BitXorAssign for BigUint, bitxor_assign);
+
+impl<'a> BitXor<&'a BigUint> for BigUint {
+ type Output = BigUint;
+
+ fn bitxor(mut self, other: &BigUint) -> BigUint {
+ self ^= other;
+ self
+ }
+}
+impl<'a> BitXorAssign<&'a BigUint> for BigUint {
+ #[inline]
+ fn bitxor_assign(&mut self, other: &BigUint) {
+ for (ai, &bi) in self.data.iter_mut().zip(other.data.iter()) {
+ *ai ^= bi;
+ }
+ if other.data.len() > self.data.len() {
+ let extra = &other.data[self.data.len()..];
+ self.data.extend(extra.iter().cloned());
+ }
+ self.normalize();
+ }
+}
+
+impl Shl<usize> for BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn shl(self, rhs: usize) -> BigUint {
+ biguint_shl(Cow::Owned(self), rhs)
+ }
+}
+impl<'a> Shl<usize> for &'a BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn shl(self, rhs: usize) -> BigUint {
+ biguint_shl(Cow::Borrowed(self), rhs)
+ }
+}
+
+impl ShlAssign<usize> for BigUint {
+ #[inline]
+ fn shl_assign(&mut self, rhs: usize) {
+ let n = mem::replace(self, BigUint::zero());
+ *self = n << rhs;
+ }
+}
+
+impl Shr<usize> for BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn shr(self, rhs: usize) -> BigUint {
+ biguint_shr(Cow::Owned(self), rhs)
+ }
+}
+impl<'a> Shr<usize> for &'a BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn shr(self, rhs: usize) -> BigUint {
+ biguint_shr(Cow::Borrowed(self), rhs)
+ }
+}
+
+impl ShrAssign<usize> for BigUint {
+ #[inline]
+ fn shr_assign(&mut self, rhs: usize) {
+ let n = mem::replace(self, BigUint::zero());
+ *self = n >> rhs;
+ }
+}
+
+impl Zero for BigUint {
+ #[inline]
+ fn zero() -> BigUint {
+ BigUint::new(Vec::new())
+ }
+
+ #[inline]
+ fn set_zero(&mut self) {
+ self.data.clear();
+ }
+
+ #[inline]
+ fn is_zero(&self) -> bool {
+ self.data.is_empty()
+ }
+}
+
+impl One for BigUint {
+ #[inline]
+ fn one() -> BigUint {
+ BigUint::new(vec![1])
+ }
+
+ #[inline]
+ fn set_one(&mut self) {
+ self.data.clear();
+ self.data.push(1);
+ }
+
+ #[inline]
+ fn is_one(&self) -> bool {
+ self.data[..] == [1]
+ }
+}
+
+impl Unsigned for BigUint {}
+
+impl<'a> Pow<BigUint> for &'a BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn pow(self, exp: BigUint) -> Self::Output {
+ self.pow(&exp)
+ }
+}
+
+impl<'a, 'b> Pow<&'b BigUint> for &'a BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn pow(self, exp: &BigUint) -> Self::Output {
+ if self.is_one() || exp.is_zero() {
+ BigUint::one()
+ } else if self.is_zero() {
+ BigUint::zero()
+ } else if let Some(exp) = exp.to_u64() {
+ self.pow(exp)
+ } else {
+ // At this point, `self >= 2` and `exp >= 2⁶⁴`. The smallest possible result
+ // given `2.pow(2⁶⁴)` would take 2.3 exabytes of memory!
+ panic!("memory overflow")
+ }
+ }
+}
+
+macro_rules! pow_impl {
+ ($T:ty) => {
+ impl<'a> Pow<$T> for &'a BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn pow(self, mut exp: $T) -> Self::Output {
+ if exp == 0 {
+ return BigUint::one();
+ }
+ let mut base = self.clone();
+
+ while exp & 1 == 0 {
+ base = &base * &base;
+ exp >>= 1;
+ }
+
+ if exp == 1 {
+ return base;
+ }
+
+ let mut acc = base.clone();
+ while exp > 1 {
+ exp >>= 1;
+ base = &base * &base;
+ if exp & 1 == 1 {
+ acc = &acc * &base;
+ }
+ }
+ acc
+ }
+ }
+
+ impl<'a, 'b> Pow<&'b $T> for &'a BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn pow(self, exp: &$T) -> Self::Output {
+ self.pow(*exp)
+ }
+ }
+ };
+}
+
+pow_impl!(u8);
+pow_impl!(u16);
+pow_impl!(u32);
+pow_impl!(u64);
+pow_impl!(usize);
+#[cfg(has_i128)]
+pow_impl!(u128);
+
+forward_all_binop_to_val_ref_commutative!(impl Add for BigUint, add);
+forward_val_assign!(impl AddAssign for BigUint, add_assign);
+
+impl<'a> Add<&'a BigUint> for BigUint {
+ type Output = BigUint;
+
+ fn add(mut self, other: &BigUint) -> BigUint {
+ self += other;
+ self
+ }
+}
+impl<'a> AddAssign<&'a BigUint> for BigUint {
+ #[inline]
+ fn add_assign(&mut self, other: &BigUint) {
+ let self_len = self.data.len();
+ let carry = if self_len < other.data.len() {
+ let lo_carry = __add2(&mut self.data[..], &other.data[..self_len]);
+ self.data.extend_from_slice(&other.data[self_len..]);
+ __add2(&mut self.data[self_len..], &[lo_carry])
+ } else {
+ __add2(&mut self.data[..], &other.data[..])
+ };
+ if carry != 0 {
+ self.data.push(carry);
+ }
+ }
+}
+
+promote_unsigned_scalars!(impl Add for BigUint, add);
+promote_unsigned_scalars_assign!(impl AddAssign for BigUint, add_assign);
+forward_all_scalar_binop_to_val_val_commutative!(impl Add<u32> for BigUint, add);
+forward_all_scalar_binop_to_val_val_commutative!(impl Add<u64> for BigUint, add);
+#[cfg(has_i128)]
+forward_all_scalar_binop_to_val_val_commutative!(impl Add<u128> for BigUint, add);
+
+impl Add<u32> for BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn add(mut self, other: u32) -> BigUint {
+ self += other;
+ self
+ }
+}
+
+impl AddAssign<u32> for BigUint {
+ #[inline]
+ fn add_assign(&mut self, other: u32) {
+ if other != 0 {
+ if self.data.len() == 0 {
+ self.data.push(0);
+ }
+
+ let carry = __add2(&mut self.data, &[other as BigDigit]);
+ if carry != 0 {
+ self.data.push(carry);
+ }
+ }
+ }
+}
+
+impl Add<u64> for BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn add(mut self, other: u64) -> BigUint {
+ self += other;
+ self
+ }
+}
+
+impl AddAssign<u64> for BigUint {
+ #[inline]
+ fn add_assign(&mut self, other: u64) {
+ let (hi, lo) = big_digit::from_doublebigdigit(other);
+ if hi == 0 {
+ *self += lo;
+ } else {
+ while self.data.len() < 2 {
+ self.data.push(0);
+ }
+
+ let carry = __add2(&mut self.data, &[lo, hi]);
+ if carry != 0 {
+ self.data.push(carry);
+ }
+ }
+ }
+}
+
+#[cfg(has_i128)]
+impl Add<u128> for BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn add(mut self, other: u128) -> BigUint {
+ self += other;
+ self
+ }
+}
+
+#[cfg(has_i128)]
+impl AddAssign<u128> for BigUint {
+ #[inline]
+ fn add_assign(&mut self, other: u128) {
+ if other <= u128::from(u64::max_value()) {
+ *self += other as u64
+ } else {
+ let (a, b, c, d) = u32_from_u128(other);
+ let carry = if a > 0 {
+ while self.data.len() < 4 {
+ self.data.push(0);
+ }
+ __add2(&mut self.data, &[d, c, b, a])
+ } else {
+ debug_assert!(b > 0);
+ while self.data.len() < 3 {
+ self.data.push(0);
+ }
+ __add2(&mut self.data, &[d, c, b])
+ };
+
+ if carry != 0 {
+ self.data.push(carry);
+ }
+ }
+ }
+}
+
+forward_val_val_binop!(impl Sub for BigUint, sub);
+forward_ref_ref_binop!(impl Sub for BigUint, sub);
+forward_val_assign!(impl SubAssign for BigUint, sub_assign);
+
+impl<'a> Sub<&'a BigUint> for BigUint {
+ type Output = BigUint;
+
+ fn sub(mut self, other: &BigUint) -> BigUint {
+ self -= other;
+ self
+ }
+}
+impl<'a> SubAssign<&'a BigUint> for BigUint {
+ fn sub_assign(&mut self, other: &'a BigUint) {
+ sub2(&mut self.data[..], &other.data[..]);
+ self.normalize();
+ }
+}
+
+impl<'a> Sub<BigUint> for &'a BigUint {
+ type Output = BigUint;
+
+ fn sub(self, mut other: BigUint) -> BigUint {
+ let other_len = other.data.len();
+ if other_len < self.data.len() {
+ let lo_borrow = __sub2rev(&self.data[..other_len], &mut other.data);
+ other.data.extend_from_slice(&self.data[other_len..]);
+ if lo_borrow != 0 {
+ sub2(&mut other.data[other_len..], &[1])
+ }
+ } else {
+ sub2rev(&self.data[..], &mut other.data[..]);
+ }
+ other.normalized()
+ }
+}
+
+promote_unsigned_scalars!(impl Sub for BigUint, sub);
+promote_unsigned_scalars_assign!(impl SubAssign for BigUint, sub_assign);
+forward_all_scalar_binop_to_val_val!(impl Sub<u32> for BigUint, sub);
+forward_all_scalar_binop_to_val_val!(impl Sub<u64> for BigUint, sub);
+#[cfg(has_i128)]
+forward_all_scalar_binop_to_val_val!(impl Sub<u128> for BigUint, sub);
+
+impl Sub<u32> for BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn sub(mut self, other: u32) -> BigUint {
+ self -= other;
+ self
+ }
+}
+impl SubAssign<u32> for BigUint {
+ fn sub_assign(&mut self, other: u32) {
+ sub2(&mut self.data[..], &[other as BigDigit]);
+ self.normalize();
+ }
+}
+
+impl Sub<BigUint> for u32 {
+ type Output = BigUint;
+
+ #[inline]
+ fn sub(self, mut other: BigUint) -> BigUint {
+ if other.data.len() == 0 {
+ other.data.push(self as BigDigit);
+ } else {
+ sub2rev(&[self as BigDigit], &mut other.data[..]);
+ }
+ other.normalized()
+ }
+}
+
+impl Sub<u64> for BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn sub(mut self, other: u64) -> BigUint {
+ self -= other;
+ self
+ }
+}
+
+impl SubAssign<u64> for BigUint {
+ #[inline]
+ fn sub_assign(&mut self, other: u64) {
+ let (hi, lo) = big_digit::from_doublebigdigit(other);
+ sub2(&mut self.data[..], &[lo, hi]);
+ self.normalize();
+ }
+}
+
+impl Sub<BigUint> for u64 {
+ type Output = BigUint;
+
+ #[inline]
+ fn sub(self, mut other: BigUint) -> BigUint {
+ while other.data.len() < 2 {
+ other.data.push(0);
+ }
+
+ let (hi, lo) = big_digit::from_doublebigdigit(self);
+ sub2rev(&[lo, hi], &mut other.data[..]);
+ other.normalized()
+ }
+}
+
+#[cfg(has_i128)]
+impl Sub<u128> for BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn sub(mut self, other: u128) -> BigUint {
+ self -= other;
+ self
+ }
+}
+#[cfg(has_i128)]
+impl SubAssign<u128> for BigUint {
+ fn sub_assign(&mut self, other: u128) {
+ let (a, b, c, d) = u32_from_u128(other);
+ sub2(&mut self.data[..], &[d, c, b, a]);
+ self.normalize();
+ }
+}
+
+#[cfg(has_i128)]
+impl Sub<BigUint> for u128 {
+ type Output = BigUint;
+
+ #[inline]
+ fn sub(self, mut other: BigUint) -> BigUint {
+ while other.data.len() < 4 {
+ other.data.push(0);
+ }
+
+ let (a, b, c, d) = u32_from_u128(self);
+ sub2rev(&[d, c, b, a], &mut other.data[..]);
+ other.normalized()
+ }
+}
+
+forward_all_binop_to_ref_ref!(impl Mul for BigUint, mul);
+forward_val_assign!(impl MulAssign for BigUint, mul_assign);
+
+impl<'a, 'b> Mul<&'b BigUint> for &'a BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn mul(self, other: &BigUint) -> BigUint {
+ mul3(&self.data[..], &other.data[..])
+ }
+}
+impl<'a> MulAssign<&'a BigUint> for BigUint {
+ #[inline]
+ fn mul_assign(&mut self, other: &'a BigUint) {
+ *self = &*self * other
+ }
+}
+
+promote_unsigned_scalars!(impl Mul for BigUint, mul);
+promote_unsigned_scalars_assign!(impl MulAssign for BigUint, mul_assign);
+forward_all_scalar_binop_to_val_val_commutative!(impl Mul<u32> for BigUint, mul);
+forward_all_scalar_binop_to_val_val_commutative!(impl Mul<u64> for BigUint, mul);
+#[cfg(has_i128)]
+forward_all_scalar_binop_to_val_val_commutative!(impl Mul<u128> for BigUint, mul);
+
+impl Mul<u32> for BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn mul(mut self, other: u32) -> BigUint {
+ self *= other;
+ self
+ }
+}
+impl MulAssign<u32> for BigUint {
+ #[inline]
+ fn mul_assign(&mut self, other: u32) {
+ if other == 0 {
+ self.data.clear();
+ } else {
+ let carry = scalar_mul(&mut self.data[..], other as BigDigit);
+ if carry != 0 {
+ self.data.push(carry);
+ }
+ }
+ }
+}
+
+impl Mul<u64> for BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn mul(mut self, other: u64) -> BigUint {
+ self *= other;
+ self
+ }
+}
+impl MulAssign<u64> for BigUint {
+ #[inline]
+ fn mul_assign(&mut self, other: u64) {
+ if other == 0 {
+ self.data.clear();
+ } else if other <= u64::from(BigDigit::max_value()) {
+ *self *= other as BigDigit
+ } else {
+ let (hi, lo) = big_digit::from_doublebigdigit(other);
+ *self = mul3(&self.data[..], &[lo, hi])
+ }
+ }
+}
+
+#[cfg(has_i128)]
+impl Mul<u128> for BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn mul(mut self, other: u128) -> BigUint {
+ self *= other;
+ self
+ }
+}
+#[cfg(has_i128)]
+impl MulAssign<u128> for BigUint {
+ #[inline]
+ fn mul_assign(&mut self, other: u128) {
+ if other == 0 {
+ self.data.clear();
+ } else if other <= u128::from(BigDigit::max_value()) {
+ *self *= other as BigDigit
+ } else {
+ let (a, b, c, d) = u32_from_u128(other);
+ *self = mul3(&self.data[..], &[d, c, b, a])
+ }
+ }
+}
+
+forward_val_ref_binop!(impl Div for BigUint, div);
+forward_ref_val_binop!(impl Div for BigUint, div);
+forward_val_assign!(impl DivAssign for BigUint, div_assign);
+
+impl Div<BigUint> for BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn div(self, other: BigUint) -> BigUint {
+ let (q, _) = div_rem(self, other);
+ q
+ }
+}
+
+impl<'a, 'b> Div<&'b BigUint> for &'a BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn div(self, other: &BigUint) -> BigUint {
+ let (q, _) = self.div_rem(other);
+ q
+ }
+}
+impl<'a> DivAssign<&'a BigUint> for BigUint {
+ #[inline]
+ fn div_assign(&mut self, other: &'a BigUint) {
+ *self = &*self / other;
+ }
+}
+
+promote_unsigned_scalars!(impl Div for BigUint, div);
+promote_unsigned_scalars_assign!(impl DivAssign for BigUint, div_assign);
+forward_all_scalar_binop_to_val_val!(impl Div<u32> for BigUint, div);
+forward_all_scalar_binop_to_val_val!(impl Div<u64> for BigUint, div);
+#[cfg(has_i128)]
+forward_all_scalar_binop_to_val_val!(impl Div<u128> for BigUint, div);
+
+impl Div<u32> for BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn div(self, other: u32) -> BigUint {
+ let (q, _) = div_rem_digit(self, other as BigDigit);
+ q
+ }
+}
+impl DivAssign<u32> for BigUint {
+ #[inline]
+ fn div_assign(&mut self, other: u32) {
+ *self = &*self / other;
+ }
+}
+
+impl Div<BigUint> for u32 {
+ type Output = BigUint;
+
+ #[inline]
+ fn div(self, other: BigUint) -> BigUint {
+ match other.data.len() {
+ 0 => panic!(),
+ 1 => From::from(self as BigDigit / other.data[0]),
+ _ => Zero::zero(),
+ }
+ }
+}
+
+impl Div<u64> for BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn div(self, other: u64) -> BigUint {
+ let (q, _) = div_rem(self, From::from(other));
+ q
+ }
+}
+impl DivAssign<u64> for BigUint {
+ #[inline]
+ fn div_assign(&mut self, other: u64) {
+ // a vec of size 0 does not allocate, so this is fairly cheap
+ let temp = mem::replace(self, Zero::zero());
+ *self = temp / other;
+ }
+}
+
+impl Div<BigUint> for u64 {
+ type Output = BigUint;
+
+ #[inline]
+ fn div(self, other: BigUint) -> BigUint {
+ match other.data.len() {
+ 0 => panic!(),
+ 1 => From::from(self / u64::from(other.data[0])),
+ 2 => From::from(self / big_digit::to_doublebigdigit(other.data[1], other.data[0])),
+ _ => Zero::zero(),
+ }
+ }
+}
+
+#[cfg(has_i128)]
+impl Div<u128> for BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn div(self, other: u128) -> BigUint {
+ let (q, _) = div_rem(self, From::from(other));
+ q
+ }
+}
+#[cfg(has_i128)]
+impl DivAssign<u128> for BigUint {
+ #[inline]
+ fn div_assign(&mut self, other: u128) {
+ *self = &*self / other;
+ }
+}
+
+#[cfg(has_i128)]
+impl Div<BigUint> for u128 {
+ type Output = BigUint;
+
+ #[inline]
+ fn div(self, other: BigUint) -> BigUint {
+ match other.data.len() {
+ 0 => panic!(),
+ 1 => From::from(self / u128::from(other.data[0])),
+ 2 => From::from(
+ self / u128::from(big_digit::to_doublebigdigit(other.data[1], other.data[0])),
+ ),
+ 3 => From::from(self / u32_to_u128(0, other.data[2], other.data[1], other.data[0])),
+ 4 => From::from(
+ self / u32_to_u128(other.data[3], other.data[2], other.data[1], other.data[0]),
+ ),
+ _ => Zero::zero(),
+ }
+ }
+}
+
+forward_val_ref_binop!(impl Rem for BigUint, rem);
+forward_ref_val_binop!(impl Rem for BigUint, rem);
+forward_val_assign!(impl RemAssign for BigUint, rem_assign);
+
+impl Rem<BigUint> for BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn rem(self, other: BigUint) -> BigUint {
+ let (_, r) = div_rem(self, other);
+ r
+ }
+}
+
+impl<'a, 'b> Rem<&'b BigUint> for &'a BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn rem(self, other: &BigUint) -> BigUint {
+ let (_, r) = self.div_rem(other);
+ r
+ }
+}
+impl<'a> RemAssign<&'a BigUint> for BigUint {
+ #[inline]
+ fn rem_assign(&mut self, other: &BigUint) {
+ *self = &*self % other;
+ }
+}
+
+promote_unsigned_scalars!(impl Rem for BigUint, rem);
+promote_unsigned_scalars_assign!(impl RemAssign for BigUint, rem_assign);
+forward_all_scalar_binop_to_ref_val!(impl Rem<u32> for BigUint, rem);
+forward_all_scalar_binop_to_val_val!(impl Rem<u64> for BigUint, rem);
+#[cfg(has_i128)]
+forward_all_scalar_binop_to_val_val!(impl Rem<u128> for BigUint, rem);
+
+impl<'a> Rem<u32> for &'a BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn rem(self, other: u32) -> BigUint {
+ From::from(rem_digit(self, other as BigDigit))
+ }
+}
+impl RemAssign<u32> for BigUint {
+ #[inline]
+ fn rem_assign(&mut self, other: u32) {
+ *self = &*self % other;
+ }
+}
+
+impl<'a> Rem<&'a BigUint> for u32 {
+ type Output = BigUint;
+
+ #[inline]
+ fn rem(mut self, other: &'a BigUint) -> BigUint {
+ self %= other;
+ From::from(self)
+ }
+}
+
+macro_rules! impl_rem_assign_scalar {
+ ($scalar:ty, $to_scalar:ident) => {
+ forward_val_assign_scalar!(impl RemAssign for BigUint, $scalar, rem_assign);
+ impl<'a> RemAssign<&'a BigUint> for $scalar {
+ #[inline]
+ fn rem_assign(&mut self, other: &BigUint) {
+ *self = match other.$to_scalar() {
+ None => *self,
+ Some(0) => panic!(),
+ Some(v) => *self % v
+ };
+ }
+ }
+ }
+}
+// we can scalar %= BigUint for any scalar, including signed types
+#[cfg(has_i128)]
+impl_rem_assign_scalar!(u128, to_u128);
+impl_rem_assign_scalar!(usize, to_usize);
+impl_rem_assign_scalar!(u64, to_u64);
+impl_rem_assign_scalar!(u32, to_u32);
+impl_rem_assign_scalar!(u16, to_u16);
+impl_rem_assign_scalar!(u8, to_u8);
+#[cfg(has_i128)]
+impl_rem_assign_scalar!(i128, to_i128);
+impl_rem_assign_scalar!(isize, to_isize);
+impl_rem_assign_scalar!(i64, to_i64);
+impl_rem_assign_scalar!(i32, to_i32);
+impl_rem_assign_scalar!(i16, to_i16);
+impl_rem_assign_scalar!(i8, to_i8);
+
+impl Rem<u64> for BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn rem(self, other: u64) -> BigUint {
+ let (_, r) = div_rem(self, From::from(other));
+ r
+ }
+}
+impl RemAssign<u64> for BigUint {
+ #[inline]
+ fn rem_assign(&mut self, other: u64) {
+ *self = &*self % other;
+ }
+}
+
+impl Rem<BigUint> for u64 {
+ type Output = BigUint;
+
+ #[inline]
+ fn rem(mut self, other: BigUint) -> BigUint {
+ self %= other;
+ From::from(self)
+ }
+}
+
+#[cfg(has_i128)]
+impl Rem<u128> for BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn rem(self, other: u128) -> BigUint {
+ let (_, r) = div_rem(self, From::from(other));
+ r
+ }
+}
+#[cfg(has_i128)]
+impl RemAssign<u128> for BigUint {
+ #[inline]
+ fn rem_assign(&mut self, other: u128) {
+ *self = &*self % other;
+ }
+}
+
+#[cfg(has_i128)]
+impl Rem<BigUint> for u128 {
+ type Output = BigUint;
+
+ #[inline]
+ fn rem(mut self, other: BigUint) -> BigUint {
+ self %= other;
+ From::from(self)
+ }
+}
+
+impl Neg for BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn neg(self) -> BigUint {
+ panic!()
+ }
+}
+
+impl<'a> Neg for &'a BigUint {
+ type Output = BigUint;
+
+ #[inline]
+ fn neg(self) -> BigUint {
+ panic!()
+ }
+}
+
+impl CheckedAdd for BigUint {
+ #[inline]
+ fn checked_add(&self, v: &BigUint) -> Option<BigUint> {
+ return Some(self.add(v));
+ }
+}
+
+impl CheckedSub for BigUint {
+ #[inline]
+ fn checked_sub(&self, v: &BigUint) -> Option<BigUint> {
+ match self.cmp(v) {
+ Less => None,
+ Equal => Some(Zero::zero()),
+ Greater => Some(self.sub(v)),
+ }
+ }
+}
+
+impl CheckedMul for BigUint {
+ #[inline]
+ fn checked_mul(&self, v: &BigUint) -> Option<BigUint> {
+ return Some(self.mul(v));
+ }
+}
+
+impl CheckedDiv for BigUint {
+ #[inline]
+ fn checked_div(&self, v: &BigUint) -> Option<BigUint> {
+ if v.is_zero() {
+ return None;
+ }
+ return Some(self.div(v));
+ }
+}
+
+impl Integer for BigUint {
+ #[inline]
+ fn div_rem(&self, other: &BigUint) -> (BigUint, BigUint) {
+ div_rem_ref(self, other)
+ }
+
+ #[inline]
+ fn div_floor(&self, other: &BigUint) -> BigUint {
+ let (d, _) = div_rem_ref(self, other);
+ d
+ }
+
+ #[inline]
+ fn mod_floor(&self, other: &BigUint) -> BigUint {
+ let (_, m) = div_rem_ref(self, other);
+ m
+ }
+
+ #[inline]
+ fn div_mod_floor(&self, other: &BigUint) -> (BigUint, BigUint) {
+ div_rem_ref(self, other)
+ }
+
+ /// Calculates the Greatest Common Divisor (GCD) of the number and `other`.
+ ///
+ /// The result is always positive.
+ #[inline]
+ fn gcd(&self, other: &Self) -> Self {
+ #[inline]
+ fn twos(x: &BigUint) -> usize {
+ trailing_zeros(x).unwrap_or(0)
+ }
+
+ // Stein's algorithm
+ if self.is_zero() {
+ return other.clone();
+ }
+ if other.is_zero() {
+ return self.clone();
+ }
+ let mut m = self.clone();
+ let mut n = other.clone();
+
+ // find common factors of 2
+ let shift = cmp::min(twos(&n), twos(&m));
+
+ // divide m and n by 2 until odd
+ // m inside loop
+ n >>= twos(&n);
+
+ while !m.is_zero() {
+ m >>= twos(&m);
+ if n > m {
+ mem::swap(&mut n, &mut m)
+ }
+ m -= &n;
+ }
+
+ n << shift
+ }
+
+ /// Calculates the Lowest Common Multiple (LCM) of the number and `other`.
+ #[inline]
+ fn lcm(&self, other: &BigUint) -> BigUint {
+ if self.is_zero() && other.is_zero() {
+ Self::zero()
+ } else {
+ self / self.gcd(other) * other
+ }
+ }
+
+ /// Deprecated, use `is_multiple_of` instead.
+ #[inline]
+ fn divides(&self, other: &BigUint) -> bool {
+ self.is_multiple_of(other)
+ }
+
+ /// Returns `true` if the number is a multiple of `other`.
+ #[inline]
+ fn is_multiple_of(&self, other: &BigUint) -> bool {
+ (self % other).is_zero()
+ }
+
+ /// Returns `true` if the number is divisible by `2`.
+ #[inline]
+ fn is_even(&self) -> bool {
+ // Considering only the last digit.
+ match self.data.first() {
+ Some(x) => x.is_even(),
+ None => true,
+ }
+ }
+
+ /// Returns `true` if the number is not divisible by `2`.
+ #[inline]
+ fn is_odd(&self) -> bool {
+ !self.is_even()
+ }
+}
+
+#[inline]
+fn fixpoint<F>(mut x: BigUint, max_bits: usize, f: F) -> BigUint
+where
+ F: Fn(&BigUint) -> BigUint,
+{
+ let mut xn = f(&x);
+
+ // If the value increased, then the initial guess must have been low.
+ // Repeat until we reverse course.
+ while x < xn {
+ // Sometimes an increase will go way too far, especially with large
+ // powers, and then take a long time to walk back. We know an upper
+ // bound based on bit size, so saturate on that.
+ x = if xn.bits() > max_bits {
+ BigUint::one() << max_bits
+ } else {
+ xn
+ };
+ xn = f(&x);
+ }
+
+ // Now keep repeating while the estimate is decreasing.
+ while x > xn {
+ x = xn;
+ xn = f(&x);
+ }
+ x
+}
+
+impl Roots for BigUint {
+ // nth_root, sqrt and cbrt use Newton's method to compute
+ // principal root of a given degree for a given integer.
+
+ // Reference:
+ // Brent & Zimmermann, Modern Computer Arithmetic, v0.5.9, Algorithm 1.14
+ fn nth_root(&self, n: u32) -> Self {
+ assert!(n > 0, "root degree n must be at least 1");
+
+ if self.is_zero() || self.is_one() {
+ return self.clone();
+ }
+
+ match n {
+ // Optimize for small n
+ 1 => return self.clone(),
+ 2 => return self.sqrt(),
+ 3 => return self.cbrt(),
+ _ => (),
+ }
+
+ // The root of non-zero values less than 2ⁿ can only be 1.
+ let bits = self.bits();
+ if bits <= n as usize {
+ return BigUint::one();
+ }
+
+ // If we fit in `u64`, compute the root that way.
+ if let Some(x) = self.to_u64() {
+ return x.nth_root(n).into();
+ }
+
+ let max_bits = bits / n as usize + 1;
+
+ let guess = if let Some(f) = self.to_f64() {
+ // We fit in `f64` (lossy), so get a better initial guess from that.
+ BigUint::from_f64((f.ln() / f64::from(n)).exp()).unwrap()
+ } else {
+ // Try to guess by scaling down such that it does fit in `f64`.
+ // With some (x * 2ⁿᵏ), its nth root ≈ (ⁿ√x * 2ᵏ)
+ let nsz = n as usize;
+ let extra_bits = bits - (f64::MAX_EXP as usize - 1);
+ let root_scale = (extra_bits + (nsz - 1)) / nsz;
+ let scale = root_scale * nsz;
+ if scale < bits && bits - scale > nsz {
+ (self >> scale).nth_root(n) << root_scale
+ } else {
+ BigUint::one() << max_bits
+ }
+ };
+
+ let n_min_1 = n - 1;
+ fixpoint(guess, max_bits, move |s| {
+ let q = self / s.pow(n_min_1);
+ let t = n_min_1 * s + q;
+ t / n
+ })
+ }
+
+ // Reference:
+ // Brent & Zimmermann, Modern Computer Arithmetic, v0.5.9, Algorithm 1.13
+ fn sqrt(&self) -> Self {
+ if self.is_zero() || self.is_one() {
+ return self.clone();
+ }
+
+ // If we fit in `u64`, compute the root that way.
+ if let Some(x) = self.to_u64() {
+ return x.sqrt().into();
+ }
+
+ let bits = self.bits();
+ let max_bits = bits / 2 as usize + 1;
+
+ let guess = if let Some(f) = self.to_f64() {
+ // We fit in `f64` (lossy), so get a better initial guess from that.
+ BigUint::from_f64(f.sqrt()).unwrap()
+ } else {
+ // Try to guess by scaling down such that it does fit in `f64`.
+ // With some (x * 2²ᵏ), its sqrt ≈ (√x * 2ᵏ)
+ let extra_bits = bits - (f64::MAX_EXP as usize - 1);
+ let root_scale = (extra_bits + 1) / 2;
+ let scale = root_scale * 2;
+ (self >> scale).sqrt() << root_scale
+ };
+
+ fixpoint(guess, max_bits, move |s| {
+ let q = self / s;
+ let t = s + q;
+ t >> 1
+ })
+ }
+
+ fn cbrt(&self) -> Self {
+ if self.is_zero() || self.is_one() {
+ return self.clone();
+ }
+
+ // If we fit in `u64`, compute the root that way.
+ if let Some(x) = self.to_u64() {
+ return x.cbrt().into();
+ }
+
+ let bits = self.bits();
+ let max_bits = bits / 3 as usize + 1;
+
+ let guess = if let Some(f) = self.to_f64() {
+ // We fit in `f64` (lossy), so get a better initial guess from that.
+ BigUint::from_f64(f.cbrt()).unwrap()
+ } else {
+ // Try to guess by scaling down such that it does fit in `f64`.
+ // With some (x * 2³ᵏ), its cbrt ≈ (∛x * 2ᵏ)
+ let extra_bits = bits - (f64::MAX_EXP as usize - 1);
+ let root_scale = (extra_bits + 2) / 3;
+ let scale = root_scale * 3;
+ (self >> scale).cbrt() << root_scale
+ };
+
+ fixpoint(guess, max_bits, move |s| {
+ let q = self / (s * s);
+ let t = (s << 1) + q;
+ t / 3u32
+ })
+ }
+}
+
+fn high_bits_to_u64(v: &BigUint) -> u64 {
+ match v.data.len() {
+ 0 => 0,
+ 1 => u64::from(v.data[0]),
+ _ => {
+ let mut bits = v.bits();
+ let mut ret = 0u64;
+ let mut ret_bits = 0;
+
+ for d in v.data.iter().rev() {
+ let digit_bits = (bits - 1) % big_digit::BITS + 1;
+ let bits_want = cmp::min(64 - ret_bits, digit_bits);
+
+ if bits_want != 64 {
+ ret <<= bits_want;
+ }
+ ret |= u64::from(*d) >> (digit_bits - bits_want);
+ ret_bits += bits_want;
+ bits -= bits_want;
+
+ if ret_bits == 64 {
+ break;
+ }
+ }
+
+ ret
+ }
+ }
+}
+
+impl ToPrimitive for BigUint {
+ #[inline]
+ fn to_i64(&self) -> Option<i64> {
+ self.to_u64().as_ref().and_then(u64::to_i64)
+ }
+
+ #[inline]
+ #[cfg(has_i128)]
+ fn to_i128(&self) -> Option<i128> {
+ self.to_u128().as_ref().and_then(u128::to_i128)
+ }
+
+ #[inline]
+ fn to_u64(&self) -> Option<u64> {
+ let mut ret: u64 = 0;
+ let mut bits = 0;
+
+ for i in self.data.iter() {
+ if bits >= 64 {
+ return None;
+ }
+
+ ret += u64::from(*i) << bits;
+ bits += big_digit::BITS;
+ }
+
+ Some(ret)
+ }
+
+ #[inline]
+ #[cfg(has_i128)]
+ fn to_u128(&self) -> Option<u128> {
+ let mut ret: u128 = 0;
+ let mut bits = 0;
+
+ for i in self.data.iter() {
+ if bits >= 128 {
+ return None;
+ }
+
+ ret |= u128::from(*i) << bits;
+ bits += big_digit::BITS;
+ }
+
+ Some(ret)
+ }
+
+ #[inline]
+ fn to_f32(&self) -> Option<f32> {
+ let mantissa = high_bits_to_u64(self);
+ let exponent = self.bits() - fls(mantissa);
+
+ if exponent > f32::MAX_EXP as usize {
+ None
+ } else {
+ let ret = (mantissa as f32) * 2.0f32.powi(exponent as i32);
+ if ret.is_infinite() {
+ None
+ } else {
+ Some(ret)
+ }
+ }
+ }
+
+ #[inline]
+ fn to_f64(&self) -> Option<f64> {
+ let mantissa = high_bits_to_u64(self);
+ let exponent = self.bits() - fls(mantissa);
+
+ if exponent > f64::MAX_EXP as usize {
+ None
+ } else {
+ let ret = (mantissa as f64) * 2.0f64.powi(exponent as i32);
+ if ret.is_infinite() {
+ None
+ } else {
+ Some(ret)
+ }
+ }
+ }
+}
+
+impl FromPrimitive for BigUint {
+ #[inline]
+ fn from_i64(n: i64) -> Option<BigUint> {
+ if n >= 0 {
+ Some(BigUint::from(n as u64))
+ } else {
+ None
+ }
+ }
+
+ #[inline]
+ #[cfg(has_i128)]
+ fn from_i128(n: i128) -> Option<BigUint> {
+ if n >= 0 {
+ Some(BigUint::from(n as u128))
+ } else {
+ None
+ }
+ }
+
+ #[inline]
+ fn from_u64(n: u64) -> Option<BigUint> {
+ Some(BigUint::from(n))
+ }
+
+ #[inline]
+ #[cfg(has_i128)]
+ fn from_u128(n: u128) -> Option<BigUint> {
+ Some(BigUint::from(n))
+ }
+
+ #[inline]
+ fn from_f64(mut n: f64) -> Option<BigUint> {
+ // handle NAN, INFINITY, NEG_INFINITY
+ if !n.is_finite() {
+ return None;
+ }
+
+ // match the rounding of casting from float to int
+ n = n.trunc();
+
+ // handle 0.x, -0.x
+ if n.is_zero() {
+ return Some(BigUint::zero());
+ }
+
+ let (mantissa, exponent, sign) = Float::integer_decode(n);
+
+ if sign == -1 {
+ return None;
+ }
+
+ let mut ret = BigUint::from(mantissa);
+ if exponent > 0 {
+ ret = ret << exponent as usize;
+ } else if exponent < 0 {
+ ret = ret >> (-exponent) as usize;
+ }
+ Some(ret)
+ }
+}
+
+impl From<u64> for BigUint {
+ #[inline]
+ fn from(mut n: u64) -> Self {
+ let mut ret: BigUint = Zero::zero();
+
+ while n != 0 {
+ ret.data.push(n as BigDigit);
+ // don't overflow if BITS is 64:
+ n = (n >> 1) >> (big_digit::BITS - 1);
+ }
+
+ ret
+ }
+}
+
+#[cfg(has_i128)]
+impl From<u128> for BigUint {
+ #[inline]
+ fn from(mut n: u128) -> Self {
+ let mut ret: BigUint = Zero::zero();
+
+ while n != 0 {
+ ret.data.push(n as BigDigit);
+ n >>= big_digit::BITS;
+ }
+
+ ret
+ }
+}
+
+macro_rules! impl_biguint_from_uint {
+ ($T:ty) => {
+ impl From<$T> for BigUint {
+ #[inline]
+ fn from(n: $T) -> Self {
+ BigUint::from(n as u64)
+ }
+ }
+ };
+}
+
+impl_biguint_from_uint!(u8);
+impl_biguint_from_uint!(u16);
+impl_biguint_from_uint!(u32);
+impl_biguint_from_uint!(usize);
+
+/// A generic trait for converting a value to a `BigUint`.
+pub trait ToBigUint {
+ /// Converts the value of `self` to a `BigUint`.
+ fn to_biguint(&self) -> Option<BigUint>;
+}
+
+impl ToBigUint for BigUint {
+ #[inline]
+ fn to_biguint(&self) -> Option<BigUint> {
+ Some(self.clone())
+ }
+}
+
+macro_rules! impl_to_biguint {
+ ($T:ty, $from_ty:path) => {
+ impl ToBigUint for $T {
+ #[inline]
+ fn to_biguint(&self) -> Option<BigUint> {
+ $from_ty(*self)
+ }
+ }
+ };
+}
+
+impl_to_biguint!(isize, FromPrimitive::from_isize);
+impl_to_biguint!(i8, FromPrimitive::from_i8);
+impl_to_biguint!(i16, FromPrimitive::from_i16);
+impl_to_biguint!(i32, FromPrimitive::from_i32);
+impl_to_biguint!(i64, FromPrimitive::from_i64);
+#[cfg(has_i128)]
+impl_to_biguint!(i128, FromPrimitive::from_i128);
+
+impl_to_biguint!(usize, FromPrimitive::from_usize);
+impl_to_biguint!(u8, FromPrimitive::from_u8);
+impl_to_biguint!(u16, FromPrimitive::from_u16);
+impl_to_biguint!(u32, FromPrimitive::from_u32);
+impl_to_biguint!(u64, FromPrimitive::from_u64);
+#[cfg(has_i128)]
+impl_to_biguint!(u128, FromPrimitive::from_u128);
+
+impl_to_biguint!(f32, FromPrimitive::from_f32);
+impl_to_biguint!(f64, FromPrimitive::from_f64);
+
+// Extract bitwise digits that evenly divide BigDigit
+fn to_bitwise_digits_le(u: &BigUint, bits: usize) -> Vec<u8> {
+ debug_assert!(!u.is_zero() && bits <= 8 && big_digit::BITS % bits == 0);
+
+ let last_i = u.data.len() - 1;
+ let mask: BigDigit = (1 << bits) - 1;
+ let digits_per_big_digit = big_digit::BITS / bits;
+ let digits = (u.bits() + bits - 1) / bits;
+ let mut res = Vec::with_capacity(digits);
+
+ for mut r in u.data[..last_i].iter().cloned() {
+ for _ in 0..digits_per_big_digit {
+ res.push((r & mask) as u8);
+ r >>= bits;
+ }
+ }
+
+ let mut r = u.data[last_i];
+ while r != 0 {
+ res.push((r & mask) as u8);
+ r >>= bits;
+ }
+
+ res
+}
+
+// Extract bitwise digits that don't evenly divide BigDigit
+fn to_inexact_bitwise_digits_le(u: &BigUint, bits: usize) -> Vec<u8> {
+ debug_assert!(!u.is_zero() && bits <= 8 && big_digit::BITS % bits != 0);
+
+ let mask: BigDigit = (1 << bits) - 1;
+ let digits = (u.bits() + bits - 1) / bits;
+ let mut res = Vec::with_capacity(digits);
+
+ let mut r = 0;
+ let mut rbits = 0;
+
+ for c in &u.data {
+ r |= *c << rbits;
+ rbits += big_digit::BITS;
+
+ while rbits >= bits {
+ res.push((r & mask) as u8);
+ r >>= bits;
+
+ // r had more bits than it could fit - grab the bits we lost
+ if rbits > big_digit::BITS {
+ r = *c >> (big_digit::BITS - (rbits - bits));
+ }
+
+ rbits -= bits;
+ }
+ }
+
+ if rbits != 0 {
+ res.push(r as u8);
+ }
+
+ while let Some(&0) = res.last() {
+ res.pop();
+ }
+
+ res
+}
+
+// Extract little-endian radix digits
+#[inline(always)] // forced inline to get const-prop for radix=10
+fn to_radix_digits_le(u: &BigUint, radix: u32) -> Vec<u8> {
+ debug_assert!(!u.is_zero() && !radix.is_power_of_two());
+
+ // Estimate how big the result will be, so we can pre-allocate it.
+ let radix_digits = ((u.bits() as f64) / f64::from(radix).log2()).ceil();
+ let mut res = Vec::with_capacity(radix_digits as usize);
+ let mut digits = u.clone();
+
+ let (base, power) = get_radix_base(radix);
+ let radix = radix as BigDigit;
+
+ while digits.data.len() > 1 {
+ let (q, mut r) = div_rem_digit(digits, base);
+ for _ in 0..power {
+ res.push((r % radix) as u8);
+ r /= radix;
+ }
+ digits = q;
+ }
+
+ let mut r = digits.data[0];
+ while r != 0 {
+ res.push((r % radix) as u8);
+ r /= radix;
+ }
+
+ res
+}
+
+pub fn to_radix_le(u: &BigUint, radix: u32) -> Vec<u8> {
+ if u.is_zero() {
+ vec![0]
+ } else if radix.is_power_of_two() {
+ // Powers of two can use bitwise masks and shifting instead of division
+ let bits = ilog2(radix);
+ if big_digit::BITS % bits == 0 {
+ to_bitwise_digits_le(u, bits)
+ } else {
+ to_inexact_bitwise_digits_le(u, bits)
+ }
+ } else if radix == 10 {
+ // 10 is so common that it's worth separating out for const-propagation.
+ // Optimizers can often turn constant division into a faster multiplication.
+ to_radix_digits_le(u, 10)
+ } else {
+ to_radix_digits_le(u, radix)
+ }
+}
+
+pub fn to_str_radix_reversed(u: &BigUint, radix: u32) -> Vec<u8> {
+ assert!(2 <= radix && radix <= 36, "The radix must be within 2...36");
+
+ if u.is_zero() {
+ return vec![b'0'];
+ }
+
+ let mut res = to_radix_le(u, radix);
+
+ // Now convert everything to ASCII digits.
+ for r in &mut res {
+ debug_assert!(u32::from(*r) < radix);
+ if *r < 10 {
+ *r += b'0';
+ } else {
+ *r += b'a' - 10;
+ }
+ }
+ res
+}
+
+impl BigUint {
+ /// Creates and initializes a `BigUint`.
+ ///
+ /// The digits are in little-endian base 2<sup>32</sup>.
+ #[inline]
+ pub fn new(digits: Vec<u32>) -> BigUint {
+ BigUint { data: digits }.normalized()
+ }
+
+ /// Creates and initializes a `BigUint`.
+ ///
+ /// The digits are in little-endian base 2<sup>32</sup>.
+ #[inline]
+ pub fn from_slice(slice: &[u32]) -> BigUint {
+ BigUint::new(slice.to_vec())
+ }
+
+ /// Assign a value to a `BigUint`.
+ ///
+ /// The digits are in little-endian base 2<sup>32</sup>.
+ #[inline]
+ pub fn assign_from_slice(&mut self, slice: &[u32]) {
+ self.data.resize(slice.len(), 0);
+ self.data.clone_from_slice(slice);
+ self.normalize();
+ }
+
+ /// Creates and initializes a `BigUint`.
+ ///
+ /// The bytes are in big-endian byte order.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_bigint::BigUint;
+ ///
+ /// assert_eq!(BigUint::from_bytes_be(b"A"),
+ /// BigUint::parse_bytes(b"65", 10).unwrap());
+ /// assert_eq!(BigUint::from_bytes_be(b"AA"),
+ /// BigUint::parse_bytes(b"16705", 10).unwrap());
+ /// assert_eq!(BigUint::from_bytes_be(b"AB"),
+ /// BigUint::parse_bytes(b"16706", 10).unwrap());
+ /// assert_eq!(BigUint::from_bytes_be(b"Hello world!"),
+ /// BigUint::parse_bytes(b"22405534230753963835153736737", 10).unwrap());
+ /// ```
+ #[inline]
+ pub fn from_bytes_be(bytes: &[u8]) -> BigUint {
+ if bytes.is_empty() {
+ Zero::zero()
+ } else {
+ let mut v = bytes.to_vec();
+ v.reverse();
+ BigUint::from_bytes_le(&*v)
+ }
+ }
+
+ /// Creates and initializes a `BigUint`.
+ ///
+ /// The bytes are in little-endian byte order.
+ #[inline]
+ pub fn from_bytes_le(bytes: &[u8]) -> BigUint {
+ if bytes.is_empty() {
+ Zero::zero()
+ } else {
+ from_bitwise_digits_le(bytes, 8)
+ }
+ }
+
+ /// Creates and initializes a `BigUint`. The input slice must contain
+ /// ascii/utf8 characters in [0-9a-zA-Z].
+ /// `radix` must be in the range `2...36`.
+ ///
+ /// The function `from_str_radix` from the `Num` trait provides the same logic
+ /// for `&str` buffers.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_bigint::{BigUint, ToBigUint};
+ ///
+ /// assert_eq!(BigUint::parse_bytes(b"1234", 10), ToBigUint::to_biguint(&1234));
+ /// assert_eq!(BigUint::parse_bytes(b"ABCD", 16), ToBigUint::to_biguint(&0xABCD));
+ /// assert_eq!(BigUint::parse_bytes(b"G", 16), None);
+ /// ```
+ #[inline]
+ pub fn parse_bytes(buf: &[u8], radix: u32) -> Option<BigUint> {
+ str::from_utf8(buf)
+ .ok()
+ .and_then(|s| BigUint::from_str_radix(s, radix).ok())
+ }
+
+ /// Creates and initializes a `BigUint`. Each u8 of the input slice is
+ /// interpreted as one digit of the number
+ /// and must therefore be less than `radix`.
+ ///
+ /// The bytes are in big-endian byte order.
+ /// `radix` must be in the range `2...256`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_bigint::{BigUint};
+ ///
+ /// let inbase190 = &[15, 33, 125, 12, 14];
+ /// let a = BigUint::from_radix_be(inbase190, 190).unwrap();
+ /// assert_eq!(a.to_radix_be(190), inbase190);
+ /// ```
+ pub fn from_radix_be(buf: &[u8], radix: u32) -> Option<BigUint> {
+ assert!(
+ 2 <= radix && radix <= 256,
+ "The radix must be within 2...256"
+ );
+
+ if radix != 256 && buf.iter().any(|&b| b >= radix as u8) {
+ return None;
+ }
+
+ let res = if radix.is_power_of_two() {
+ // Powers of two can use bitwise masks and shifting instead of multiplication
+ let bits = ilog2(radix);
+ let mut v = Vec::from(buf);
+ v.reverse();
+ if big_digit::BITS % bits == 0 {
+ from_bitwise_digits_le(&v, bits)
+ } else {
+ from_inexact_bitwise_digits_le(&v, bits)
+ }
+ } else {
+ from_radix_digits_be(buf, radix)
+ };
+
+ Some(res)
+ }
+
+ /// Creates and initializes a `BigUint`. Each u8 of the input slice is
+ /// interpreted as one digit of the number
+ /// and must therefore be less than `radix`.
+ ///
+ /// The bytes are in little-endian byte order.
+ /// `radix` must be in the range `2...256`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_bigint::{BigUint};
+ ///
+ /// let inbase190 = &[14, 12, 125, 33, 15];
+ /// let a = BigUint::from_radix_be(inbase190, 190).unwrap();
+ /// assert_eq!(a.to_radix_be(190), inbase190);
+ /// ```
+ pub fn from_radix_le(buf: &[u8], radix: u32) -> Option<BigUint> {
+ assert!(
+ 2 <= radix && radix <= 256,
+ "The radix must be within 2...256"
+ );
+
+ if radix != 256 && buf.iter().any(|&b| b >= radix as u8) {
+ return None;
+ }
+
+ let res = if radix.is_power_of_two() {
+ // Powers of two can use bitwise masks and shifting instead of multiplication
+ let bits = ilog2(radix);
+ if big_digit::BITS % bits == 0 {
+ from_bitwise_digits_le(buf, bits)
+ } else {
+ from_inexact_bitwise_digits_le(buf, bits)
+ }
+ } else {
+ let mut v = Vec::from(buf);
+ v.reverse();
+ from_radix_digits_be(&v, radix)
+ };
+
+ Some(res)
+ }
+
+ /// Returns the byte representation of the `BigUint` in big-endian byte order.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_bigint::BigUint;
+ ///
+ /// let i = BigUint::parse_bytes(b"1125", 10).unwrap();
+ /// assert_eq!(i.to_bytes_be(), vec![4, 101]);
+ /// ```
+ #[inline]
+ pub fn to_bytes_be(&self) -> Vec<u8> {
+ let mut v = self.to_bytes_le();
+ v.reverse();
+ v
+ }
+
+ /// Returns the byte representation of the `BigUint` in little-endian byte order.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_bigint::BigUint;
+ ///
+ /// let i = BigUint::parse_bytes(b"1125", 10).unwrap();
+ /// assert_eq!(i.to_bytes_le(), vec![101, 4]);
+ /// ```
+ #[inline]
+ pub fn to_bytes_le(&self) -> Vec<u8> {
+ if self.is_zero() {
+ vec![0]
+ } else {
+ to_bitwise_digits_le(self, 8)
+ }
+ }
+
+ /// Returns the integer formatted as a string in the given radix.
+ /// `radix` must be in the range `2...36`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_bigint::BigUint;
+ ///
+ /// let i = BigUint::parse_bytes(b"ff", 16).unwrap();
+ /// assert_eq!(i.to_str_radix(16), "ff");
+ /// ```
+ #[inline]
+ pub fn to_str_radix(&self, radix: u32) -> String {
+ let mut v = to_str_radix_reversed(self, radix);
+ v.reverse();
+ unsafe { String::from_utf8_unchecked(v) }
+ }
+
+ /// Returns the integer in the requested base in big-endian digit order.
+ /// The output is not given in a human readable alphabet but as a zero
+ /// based u8 number.
+ /// `radix` must be in the range `2...256`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_bigint::BigUint;
+ ///
+ /// assert_eq!(BigUint::from(0xFFFFu64).to_radix_be(159),
+ /// vec![2, 94, 27]);
+ /// // 0xFFFF = 65535 = 2*(159^2) + 94*159 + 27
+ /// ```
+ #[inline]
+ pub fn to_radix_be(&self, radix: u32) -> Vec<u8> {
+ let mut v = to_radix_le(self, radix);
+ v.reverse();
+ v
+ }
+
+ /// Returns the integer in the requested base in little-endian digit order.
+ /// The output is not given in a human readable alphabet but as a zero
+ /// based u8 number.
+ /// `radix` must be in the range `2...256`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use num_bigint::BigUint;
+ ///
+ /// assert_eq!(BigUint::from(0xFFFFu64).to_radix_le(159),
+ /// vec![27, 94, 2]);
+ /// // 0xFFFF = 65535 = 27 + 94*159 + 2*(159^2)
+ /// ```
+ #[inline]
+ pub fn to_radix_le(&self, radix: u32) -> Vec<u8> {
+ to_radix_le(self, radix)
+ }
+
+ /// Determines the fewest bits necessary to express the `BigUint`.
+ #[inline]
+ pub fn bits(&self) -> usize {
+ if self.is_zero() {
+ return 0;
+ }
+ let zeros = self.data.last().unwrap().leading_zeros();
+ return self.data.len() * big_digit::BITS - zeros as usize;
+ }
+
+ /// Strips off trailing zero bigdigits - comparisons require the last element in the vector to
+ /// be nonzero.
+ #[inline]
+ fn normalize(&mut self) {
+ while let Some(&0) = self.data.last() {
+ self.data.pop();
+ }
+ }
+
+ /// Returns a normalized `BigUint`.
+ #[inline]
+ fn normalized(mut self) -> BigUint {
+ self.normalize();
+ self
+ }
+
+ /// Returns `(self ^ exponent) % modulus`.
+ ///
+ /// Panics if the modulus is zero.
+ pub fn modpow(&self, exponent: &Self, modulus: &Self) -> Self {
+ assert!(!modulus.is_zero(), "divide by zero!");
+
+ if modulus.is_odd() {
+ // For an odd modulus, we can use Montgomery multiplication in base 2^32.
+ monty_modpow(self, exponent, modulus)
+ } else {
+ // Otherwise do basically the same as `num::pow`, but with a modulus.
+ plain_modpow(self, &exponent.data, modulus)
+ }
+ }
+
+ /// Returns the truncated principal square root of `self` --
+ /// see [Roots::sqrt](https://docs.rs/num-integer/0.1/num_integer/trait.Roots.html#method.sqrt)
+ pub fn sqrt(&self) -> Self {
+ Roots::sqrt(self)
+ }
+
+ /// Returns the truncated principal cube root of `self` --
+ /// see [Roots::cbrt](https://docs.rs/num-integer/0.1/num_integer/trait.Roots.html#method.cbrt).
+ pub fn cbrt(&self) -> Self {
+ Roots::cbrt(self)
+ }
+
+ /// Returns the truncated principal `n`th root of `self` --
+ /// see [Roots::nth_root](https://docs.rs/num-integer/0.1/num_integer/trait.Roots.html#tymethod.nth_root).
+ pub fn nth_root(&self, n: u32) -> Self {
+ Roots::nth_root(self, n)
+ }
+}
+
+fn plain_modpow(base: &BigUint, exp_data: &[BigDigit], modulus: &BigUint) -> BigUint {
+ assert!(!modulus.is_zero(), "divide by zero!");
+
+ let i = match exp_data.iter().position(|&r| r != 0) {
+ None => return BigUint::one(),
+ Some(i) => i,
+ };
+
+ let mut base = base.clone();
+ for _ in 0..i {
+ for _ in 0..big_digit::BITS {
+ base = &base * &base % modulus;
+ }
+ }
+
+ let mut r = exp_data[i];
+ let mut b = 0usize;
+ while r.is_even() {
+ base = &base * &base % modulus;
+ r >>= 1;
+ b += 1;
+ }
+
+ let mut exp_iter = exp_data[i + 1..].iter();
+ if exp_iter.len() == 0 && r.is_one() {
+ return base;
+ }
+
+ let mut acc = base.clone();
+ r >>= 1;
+ b += 1;
+
+ {
+ let mut unit = |exp_is_odd| {
+ base = &base * &base % modulus;
+ if exp_is_odd {
+ acc = &acc * &base % modulus;
+ }
+ };
+
+ if let Some(&last) = exp_iter.next_back() {
+ // consume exp_data[i]
+ for _ in b..big_digit::BITS {
+ unit(r.is_odd());
+ r >>= 1;
+ }
+
+ // consume all other digits before the last
+ for &r in exp_iter {
+ let mut r = r;
+ for _ in 0..big_digit::BITS {
+ unit(r.is_odd());
+ r >>= 1;
+ }
+ }
+ r = last;
+ }
+
+ debug_assert_ne!(r, 0);
+ while !r.is_zero() {
+ unit(r.is_odd());
+ r >>= 1;
+ }
+ }
+ acc
+}
+
+#[test]
+fn test_plain_modpow() {
+ let two = BigUint::from(2u32);
+ let modulus = BigUint::from(0x1100u32);
+
+ let exp = vec![0, 0b1];
+ assert_eq!(
+ two.pow(0b1_00000000_u32) % &modulus,
+ plain_modpow(&two, &exp, &modulus)
+ );
+ let exp = vec![0, 0b10];
+ assert_eq!(
+ two.pow(0b10_00000000_u32) % &modulus,
+ plain_modpow(&two, &exp, &modulus)
+ );
+ let exp = vec![0, 0b110010];
+ assert_eq!(
+ two.pow(0b110010_00000000_u32) % &modulus,
+ plain_modpow(&two, &exp, &modulus)
+ );
+ let exp = vec![0b1, 0b1];
+ assert_eq!(
+ two.pow(0b1_00000001_u32) % &modulus,
+ plain_modpow(&two, &exp, &modulus)
+ );
+ let exp = vec![0b1100, 0, 0b1];
+ assert_eq!(
+ two.pow(0b1_00000000_00001100_u32) % &modulus,
+ plain_modpow(&two, &exp, &modulus)
+ );
+}
+
+/// Returns the number of least-significant bits that are zero,
+/// or `None` if the entire number is zero.
+pub fn trailing_zeros(u: &BigUint) -> Option<usize> {
+ u.data
+ .iter()
+ .enumerate()
+ .find(|&(_, &digit)| digit != 0)
+ .map(|(i, digit)| i * big_digit::BITS + digit.trailing_zeros() as usize)
+}
+
+impl_sum_iter_type!(BigUint);
+impl_product_iter_type!(BigUint);
+
+pub trait IntDigits {
+ fn digits(&self) -> &[BigDigit];
+ fn digits_mut(&mut self) -> &mut Vec<BigDigit>;
+ fn normalize(&mut self);
+ fn capacity(&self) -> usize;
+ fn len(&self) -> usize;
+}
+
+impl IntDigits for BigUint {
+ #[inline]
+ fn digits(&self) -> &[BigDigit] {
+ &self.data
+ }
+ #[inline]
+ fn digits_mut(&mut self) -> &mut Vec<BigDigit> {
+ &mut self.data
+ }
+ #[inline]
+ fn normalize(&mut self) {
+ self.normalize();
+ }
+ #[inline]
+ fn capacity(&self) -> usize {
+ self.data.capacity()
+ }
+ #[inline]
+ fn len(&self) -> usize {
+ self.data.len()
+ }
+}
+
+/// Combine four `u32`s into a single `u128`.
+#[cfg(has_i128)]
+#[inline]
+fn u32_to_u128(a: u32, b: u32, c: u32, d: u32) -> u128 {
+ u128::from(d) | (u128::from(c) << 32) | (u128::from(b) << 64) | (u128::from(a) << 96)
+}
+
+/// Split a single `u128` into four `u32`.
+#[cfg(has_i128)]
+#[inline]
+fn u32_from_u128(n: u128) -> (u32, u32, u32, u32) {
+ (
+ (n >> 96) as u32,
+ (n >> 64) as u32,
+ (n >> 32) as u32,
+ n as u32,
+ )
+}
+
+#[cfg(feature = "serde")]
+impl serde::Serialize for BigUint {
+ fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
+ where
+ S: serde::Serializer,
+ {
+ // Note: do not change the serialization format, or it may break forward
+ // and backward compatibility of serialized data! If we ever change the
+ // internal representation, we should still serialize in base-`u32`.
+ let data: &Vec<u32> = &self.data;
+ data.serialize(serializer)
+ }
+}
+
+#[cfg(feature = "serde")]
+impl<'de> serde::Deserialize<'de> for BigUint {
+ fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
+ where
+ D: serde::Deserializer<'de>,
+ {
+ let data: Vec<u32> = Vec::deserialize(deserializer)?;
+ Ok(BigUint::new(data))
+ }
+}
+
+/// Returns the greatest power of the radix <= big_digit::BASE
+#[inline]
+fn get_radix_base(radix: u32) -> (BigDigit, usize) {
+ debug_assert!(
+ 2 <= radix && radix <= 256,
+ "The radix must be within 2...256"
+ );
+ debug_assert!(!radix.is_power_of_two());
+
+ // To generate this table:
+ // for radix in 2u64..257 {
+ // let mut power = big_digit::BITS / fls(radix as u64);
+ // let mut base = radix.pow(power as u32);
+ //
+ // while let Some(b) = base.checked_mul(radix) {
+ // if b > big_digit::MAX {
+ // break;
+ // }
+ // base = b;
+ // power += 1;
+ // }
+ //
+ // println!("({:10}, {:2}), // {:2}", base, power, radix);
+ // }
+ // and
+ // for radix in 2u64..257 {
+ // let mut power = 64 / fls(radix as u64);
+ // let mut base = radix.pow(power as u32);
+ //
+ // while let Some(b) = base.checked_mul(radix) {
+ // base = b;
+ // power += 1;
+ // }
+ //
+ // println!("({:20}, {:2}), // {:2}", base, power, radix);
+ // }
+ match big_digit::BITS {
+ 32 => {
+ const BASES: [(u32, usize); 257] = [
+ (0, 0),
+ (0, 0),
+ (0, 0), // 2
+ (3486784401, 20), // 3
+ (0, 0), // 4
+ (1220703125, 13), // 5
+ (2176782336, 12), // 6
+ (1977326743, 11), // 7
+ (0, 0), // 8
+ (3486784401, 10), // 9
+ (1000000000, 9), // 10
+ (2357947691, 9), // 11
+ (429981696, 8), // 12
+ (815730721, 8), // 13
+ (1475789056, 8), // 14
+ (2562890625, 8), // 15
+ (0, 0), // 16
+ (410338673, 7), // 17
+ (612220032, 7), // 18
+ (893871739, 7), // 19
+ (1280000000, 7), // 20
+ (1801088541, 7), // 21
+ (2494357888, 7), // 22
+ (3404825447, 7), // 23
+ (191102976, 6), // 24
+ (244140625, 6), // 25
+ (308915776, 6), // 26
+ (387420489, 6), // 27
+ (481890304, 6), // 28
+ (594823321, 6), // 29
+ (729000000, 6), // 30
+ (887503681, 6), // 31
+ (0, 0), // 32
+ (1291467969, 6), // 33
+ (1544804416, 6), // 34
+ (1838265625, 6), // 35
+ (2176782336, 6), // 36
+ (2565726409, 6), // 37
+ (3010936384, 6), // 38
+ (3518743761, 6), // 39
+ (4096000000, 6), // 40
+ (115856201, 5), // 41
+ (130691232, 5), // 42
+ (147008443, 5), // 43
+ (164916224, 5), // 44
+ (184528125, 5), // 45
+ (205962976, 5), // 46
+ (229345007, 5), // 47
+ (254803968, 5), // 48
+ (282475249, 5), // 49
+ (312500000, 5), // 50
+ (345025251, 5), // 51
+ (380204032, 5), // 52
+ (418195493, 5), // 53
+ (459165024, 5), // 54
+ (503284375, 5), // 55
+ (550731776, 5), // 56
+ (601692057, 5), // 57
+ (656356768, 5), // 58
+ (714924299, 5), // 59
+ (777600000, 5), // 60
+ (844596301, 5), // 61
+ (916132832, 5), // 62
+ (992436543, 5), // 63
+ (0, 0), // 64
+ (1160290625, 5), // 65
+ (1252332576, 5), // 66
+ (1350125107, 5), // 67
+ (1453933568, 5), // 68
+ (1564031349, 5), // 69
+ (1680700000, 5), // 70
+ (1804229351, 5), // 71
+ (1934917632, 5), // 72
+ (2073071593, 5), // 73
+ (2219006624, 5), // 74
+ (2373046875, 5), // 75
+ (2535525376, 5), // 76
+ (2706784157, 5), // 77
+ (2887174368, 5), // 78
+ (3077056399, 5), // 79
+ (3276800000, 5), // 80
+ (3486784401, 5), // 81
+ (3707398432, 5), // 82
+ (3939040643, 5), // 83
+ (4182119424, 5), // 84
+ (52200625, 4), // 85
+ (54700816, 4), // 86
+ (57289761, 4), // 87
+ (59969536, 4), // 88
+ (62742241, 4), // 89
+ (65610000, 4), // 90
+ (68574961, 4), // 91
+ (71639296, 4), // 92
+ (74805201, 4), // 93
+ (78074896, 4), // 94
+ (81450625, 4), // 95
+ (84934656, 4), // 96
+ (88529281, 4), // 97
+ (92236816, 4), // 98
+ (96059601, 4), // 99
+ (100000000, 4), // 100
+ (104060401, 4), // 101
+ (108243216, 4), // 102
+ (112550881, 4), // 103
+ (116985856, 4), // 104
+ (121550625, 4), // 105
+ (126247696, 4), // 106
+ (131079601, 4), // 107
+ (136048896, 4), // 108
+ (141158161, 4), // 109
+ (146410000, 4), // 110
+ (151807041, 4), // 111
+ (157351936, 4), // 112
+ (163047361, 4), // 113
+ (168896016, 4), // 114
+ (174900625, 4), // 115
+ (181063936, 4), // 116
+ (187388721, 4), // 117
+ (193877776, 4), // 118
+ (200533921, 4), // 119
+ (207360000, 4), // 120
+ (214358881, 4), // 121
+ (221533456, 4), // 122
+ (228886641, 4), // 123
+ (236421376, 4), // 124
+ (244140625, 4), // 125
+ (252047376, 4), // 126
+ (260144641, 4), // 127
+ (0, 0), // 128
+ (276922881, 4), // 129
+ (285610000, 4), // 130
+ (294499921, 4), // 131
+ (303595776, 4), // 132
+ (312900721, 4), // 133
+ (322417936, 4), // 134
+ (332150625, 4), // 135
+ (342102016, 4), // 136
+ (352275361, 4), // 137
+ (362673936, 4), // 138
+ (373301041, 4), // 139
+ (384160000, 4), // 140
+ (395254161, 4), // 141
+ (406586896, 4), // 142
+ (418161601, 4), // 143
+ (429981696, 4), // 144
+ (442050625, 4), // 145
+ (454371856, 4), // 146
+ (466948881, 4), // 147
+ (479785216, 4), // 148
+ (492884401, 4), // 149
+ (506250000, 4), // 150
+ (519885601, 4), // 151
+ (533794816, 4), // 152
+ (547981281, 4), // 153
+ (562448656, 4), // 154
+ (577200625, 4), // 155
+ (592240896, 4), // 156
+ (607573201, 4), // 157
+ (623201296, 4), // 158
+ (639128961, 4), // 159
+ (655360000, 4), // 160
+ (671898241, 4), // 161
+ (688747536, 4), // 162
+ (705911761, 4), // 163
+ (723394816, 4), // 164
+ (741200625, 4), // 165
+ (759333136, 4), // 166
+ (777796321, 4), // 167
+ (796594176, 4), // 168
+ (815730721, 4), // 169
+ (835210000, 4), // 170
+ (855036081, 4), // 171
+ (875213056, 4), // 172
+ (895745041, 4), // 173
+ (916636176, 4), // 174
+ (937890625, 4), // 175
+ (959512576, 4), // 176
+ (981506241, 4), // 177
+ (1003875856, 4), // 178
+ (1026625681, 4), // 179
+ (1049760000, 4), // 180
+ (1073283121, 4), // 181
+ (1097199376, 4), // 182
+ (1121513121, 4), // 183
+ (1146228736, 4), // 184
+ (1171350625, 4), // 185
+ (1196883216, 4), // 186
+ (1222830961, 4), // 187
+ (1249198336, 4), // 188
+ (1275989841, 4), // 189
+ (1303210000, 4), // 190
+ (1330863361, 4), // 191
+ (1358954496, 4), // 192
+ (1387488001, 4), // 193
+ (1416468496, 4), // 194
+ (1445900625, 4), // 195
+ (1475789056, 4), // 196
+ (1506138481, 4), // 197
+ (1536953616, 4), // 198
+ (1568239201, 4), // 199
+ (1600000000, 4), // 200
+ (1632240801, 4), // 201
+ (1664966416, 4), // 202
+ (1698181681, 4), // 203
+ (1731891456, 4), // 204
+ (1766100625, 4), // 205
+ (1800814096, 4), // 206
+ (1836036801, 4), // 207
+ (1871773696, 4), // 208
+ (1908029761, 4), // 209
+ (1944810000, 4), // 210
+ (1982119441, 4), // 211
+ (2019963136, 4), // 212
+ (2058346161, 4), // 213
+ (2097273616, 4), // 214
+ (2136750625, 4), // 215
+ (2176782336, 4), // 216
+ (2217373921, 4), // 217
+ (2258530576, 4), // 218
+ (2300257521, 4), // 219
+ (2342560000, 4), // 220
+ (2385443281, 4), // 221
+ (2428912656, 4), // 222
+ (2472973441, 4), // 223
+ (2517630976, 4), // 224
+ (2562890625, 4), // 225
+ (2608757776, 4), // 226
+ (2655237841, 4), // 227
+ (2702336256, 4), // 228
+ (2750058481, 4), // 229
+ (2798410000, 4), // 230
+ (2847396321, 4), // 231
+ (2897022976, 4), // 232
+ (2947295521, 4), // 233
+ (2998219536, 4), // 234
+ (3049800625, 4), // 235
+ (3102044416, 4), // 236
+ (3154956561, 4), // 237
+ (3208542736, 4), // 238
+ (3262808641, 4), // 239
+ (3317760000, 4), // 240
+ (3373402561, 4), // 241
+ (3429742096, 4), // 242
+ (3486784401, 4), // 243
+ (3544535296, 4), // 244
+ (3603000625, 4), // 245
+ (3662186256, 4), // 246
+ (3722098081, 4), // 247
+ (3782742016, 4), // 248
+ (3844124001, 4), // 249
+ (3906250000, 4), // 250
+ (3969126001, 4), // 251
+ (4032758016, 4), // 252
+ (4097152081, 4), // 253
+ (4162314256, 4), // 254
+ (4228250625, 4), // 255
+ (0, 0), // 256
+ ];
+
+ let (base, power) = BASES[radix as usize];
+ (base as BigDigit, power)
+ }
+ 64 => {
+ const BASES: [(u64, usize); 257] = [
+ (0, 0),
+ (0, 0),
+ (9223372036854775808, 63), // 2
+ (12157665459056928801, 40), // 3
+ (4611686018427387904, 31), // 4
+ (7450580596923828125, 27), // 5
+ (4738381338321616896, 24), // 6
+ (3909821048582988049, 22), // 7
+ (9223372036854775808, 21), // 8
+ (12157665459056928801, 20), // 9
+ (10000000000000000000, 19), // 10
+ (5559917313492231481, 18), // 11
+ (2218611106740436992, 17), // 12
+ (8650415919381337933, 17), // 13
+ (2177953337809371136, 16), // 14
+ (6568408355712890625, 16), // 15
+ (1152921504606846976, 15), // 16
+ (2862423051509815793, 15), // 17
+ (6746640616477458432, 15), // 18
+ (15181127029874798299, 15), // 19
+ (1638400000000000000, 14), // 20
+ (3243919932521508681, 14), // 21
+ (6221821273427820544, 14), // 22
+ (11592836324538749809, 14), // 23
+ (876488338465357824, 13), // 24
+ (1490116119384765625, 13), // 25
+ (2481152873203736576, 13), // 26
+ (4052555153018976267, 13), // 27
+ (6502111422497947648, 13), // 28
+ (10260628712958602189, 13), // 29
+ (15943230000000000000, 13), // 30
+ (787662783788549761, 12), // 31
+ (1152921504606846976, 12), // 32
+ (1667889514952984961, 12), // 33
+ (2386420683693101056, 12), // 34
+ (3379220508056640625, 12), // 35
+ (4738381338321616896, 12), // 36
+ (6582952005840035281, 12), // 37
+ (9065737908494995456, 12), // 38
+ (12381557655576425121, 12), // 39
+ (16777216000000000000, 12), // 40
+ (550329031716248441, 11), // 41
+ (717368321110468608, 11), // 42
+ (929293739471222707, 11), // 43
+ (1196683881290399744, 11), // 44
+ (1532278301220703125, 11), // 45
+ (1951354384207722496, 11), // 46
+ (2472159215084012303, 11), // 47
+ (3116402981210161152, 11), // 48
+ (3909821048582988049, 11), // 49
+ (4882812500000000000, 11), // 50
+ (6071163615208263051, 11), // 51
+ (7516865509350965248, 11), // 52
+ (9269035929372191597, 11), // 53
+ (11384956040305711104, 11), // 54
+ (13931233916552734375, 11), // 55
+ (16985107389382393856, 11), // 56
+ (362033331456891249, 10), // 57
+ (430804206899405824, 10), // 58
+ (511116753300641401, 10), // 59
+ (604661760000000000, 10), // 60
+ (713342911662882601, 10), // 61
+ (839299365868340224, 10), // 62
+ (984930291881790849, 10), // 63
+ (1152921504606846976, 10), // 64
+ (1346274334462890625, 10), // 65
+ (1568336880910795776, 10), // 66
+ (1822837804551761449, 10), // 67
+ (2113922820157210624, 10), // 68
+ (2446194060654759801, 10), // 69
+ (2824752490000000000, 10), // 70
+ (3255243551009881201, 10), // 71
+ (3743906242624487424, 10), // 72
+ (4297625829703557649, 10), // 73
+ (4923990397355877376, 10), // 74
+ (5631351470947265625, 10), // 75
+ (6428888932339941376, 10), // 76
+ (7326680472586200649, 10), // 77
+ (8335775831236199424, 10), // 78
+ (9468276082626847201, 10), // 79
+ (10737418240000000000, 10), // 80
+ (12157665459056928801, 10), // 81
+ (13744803133596058624, 10), // 82
+ (15516041187205853449, 10), // 83
+ (17490122876598091776, 10), // 84
+ (231616946283203125, 9), // 85
+ (257327417311663616, 9), // 86
+ (285544154243029527, 9), // 87
+ (316478381828866048, 9), // 88
+ (350356403707485209, 9), // 89
+ (387420489000000000, 9), // 90
+ (427929800129788411, 9), // 91
+ (472161363286556672, 9), // 92
+ (520411082988487293, 9), // 93
+ (572994802228616704, 9), // 94
+ (630249409724609375, 9), // 95
+ (692533995824480256, 9), // 96
+ (760231058654565217, 9), // 97
+ (833747762130149888, 9), // 98
+ (913517247483640899, 9), // 99
+ (1000000000000000000, 9), // 100
+ (1093685272684360901, 9), // 101
+ (1195092568622310912, 9), // 102
+ (1304773183829244583, 9), // 103
+ (1423311812421484544, 9), // 104
+ (1551328215978515625, 9), // 105
+ (1689478959002692096, 9), // 106
+ (1838459212420154507, 9), // 107
+ (1999004627104432128, 9), // 108
+ (2171893279442309389, 9), // 109
+ (2357947691000000000, 9), // 110
+ (2558036924386500591, 9), // 111
+ (2773078757450186752, 9), // 112
+ (3004041937984268273, 9), // 113
+ (3251948521156637184, 9), // 114
+ (3517876291919921875, 9), // 115
+ (3802961274698203136, 9), // 116
+ (4108400332687853397, 9), // 117
+ (4435453859151328768, 9), // 118
+ (4785448563124474679, 9), // 119
+ (5159780352000000000, 9), // 120
+ (5559917313492231481, 9), // 121
+ (5987402799531080192, 9), // 122
+ (6443858614676334363, 9), // 123
+ (6930988311686938624, 9), // 124
+ (7450580596923828125, 9), // 125
+ (8004512848309157376, 9), // 126
+ (8594754748609397887, 9), // 127
+ (9223372036854775808, 9), // 128
+ (9892530380752880769, 9), // 129
+ (10604499373000000000, 9), // 130
+ (11361656654439817571, 9), // 131
+ (12166492167065567232, 9), // 132
+ (13021612539908538853, 9), // 133
+ (13929745610903012864, 9), // 134
+ (14893745087865234375, 9), // 135
+ (15916595351771938816, 9), // 136
+ (17001416405572203977, 9), // 137
+ (18151468971815029248, 9), // 138
+ (139353667211683681, 8), // 139
+ (147578905600000000, 8), // 140
+ (156225851787813921, 8), // 141
+ (165312903998914816, 8), // 142
+ (174859124550883201, 8), // 143
+ (184884258895036416, 8), // 144
+ (195408755062890625, 8), // 145
+ (206453783524884736, 8), // 146
+ (218041257467152161, 8), // 147
+ (230193853492166656, 8), // 148
+ (242935032749128801, 8), // 149
+ (256289062500000000, 8), // 150
+ (270281038127131201, 8), // 151
+ (284936905588473856, 8), // 152
+ (300283484326400961, 8), // 153
+ (316348490636206336, 8), // 154
+ (333160561500390625, 8), // 155
+ (350749278894882816, 8), // 156
+ (369145194573386401, 8), // 157
+ (388379855336079616, 8), // 158
+ (408485828788939521, 8), // 159
+ (429496729600000000, 8), // 160
+ (451447246258894081, 8), // 161
+ (474373168346071296, 8), // 162
+ (498311414318121121, 8), // 163
+ (523300059815673856, 8), // 164
+ (549378366500390625, 8), // 165
+ (576586811427594496, 8), // 166
+ (604967116961135041, 8), // 167
+ (634562281237118976, 8), // 168
+ (665416609183179841, 8), // 169
+ (697575744100000000, 8), // 170
+ (731086699811838561, 8), // 171
+ (765997893392859136, 8), // 172
+ (802359178476091681, 8), // 173
+ (840221879151902976, 8), // 174
+ (879638824462890625, 8), // 175
+ (920664383502155776, 8), // 176
+ (963354501121950081, 8), // 177
+ (1007766734259732736, 8), // 178
+ (1053960288888713761, 8), // 179
+ (1101996057600000000, 8), // 180
+ (1151936657823500641, 8), // 181
+ (1203846470694789376, 8), // 182
+ (1257791680575160641, 8), // 183
+ (1313840315232157696, 8), // 184
+ (1372062286687890625, 8), // 185
+ (1432529432742502656, 8), // 186
+ (1495315559180183521, 8), // 187
+ (1560496482665168896, 8), // 188
+ (1628150074335205281, 8), // 189
+ (1698356304100000000, 8), // 190
+ (1771197285652216321, 8), // 191
+ (1846757322198614016, 8), // 192
+ (1925122952918976001, 8), // 193
+ (2006383000160502016, 8), // 194
+ (2090628617375390625, 8), // 195
+ (2177953337809371136, 8), // 196
+ (2268453123948987361, 8), // 197
+ (2362226417735475456, 8), // 198
+ (2459374191553118401, 8), // 199
+ (2560000000000000000, 8), // 200
+ (2664210032449121601, 8), // 201
+ (2772113166407885056, 8), // 202
+ (2883821021683985761, 8), // 203
+ (2999448015365799936, 8), // 204
+ (3119111417625390625, 8), // 205
+ (3242931408352297216, 8), // 206
+ (3371031134626313601, 8), // 207
+ (3503536769037500416, 8), // 208
+ (3640577568861717121, 8), // 209
+ (3782285936100000000, 8), // 210
+ (3928797478390152481, 8), // 211
+ (4080251070798954496, 8), // 212
+ (4236788918503437921, 8), // 213
+ (4398556620369715456, 8), // 214
+ (4565703233437890625, 8), // 215
+ (4738381338321616896, 8), // 216
+ (4916747105530914241, 8), // 217
+ (5100960362726891776, 8), // 218
+ (5291184662917065441, 8), // 219
+ (5487587353600000000, 8), // 220
+ (5690339646868044961, 8), // 221
+ (5899616690476974336, 8), // 222
+ (6115597639891380481, 8), // 223
+ (6338465731314712576, 8), // 224
+ (6568408355712890625, 8), // 225
+ (6805617133840466176, 8), // 226
+ (7050287992278341281, 8), // 227
+ (7302621240492097536, 8), // 228
+ (7562821648920027361, 8), // 229
+ (7831098528100000000, 8), // 230
+ (8107665808844335041, 8), // 231
+ (8392742123471896576, 8), // 232
+ (8686550888106661441, 8), // 233
+ (8989320386052055296, 8), // 234
+ (9301283852250390625, 8), // 235
+ (9622679558836781056, 8), // 236
+ (9953750901796946721, 8), // 237
+ (10294746488738365696, 8), // 238
+ (10645920227784266881, 8), // 239
+ (11007531417600000000, 8), // 240
+ (11379844838561358721, 8), // 241
+ (11763130845074473216, 8), // 242
+ (12157665459056928801, 8), // 243
+ (12563730464589807616, 8), // 244
+ (12981613503750390625, 8), // 245
+ (13411608173635297536, 8), // 246
+ (13854014124583882561, 8), // 247
+ (14309137159611744256, 8), // 248
+ (14777289335064248001, 8), // 249
+ (15258789062500000000, 8), // 250
+ (15753961211814252001, 8), // 251
+ (16263137215612256256, 8), // 252
+ (16786655174842630561, 8), // 253
+ (17324859965700833536, 8), // 254
+ (17878103347812890625, 8), // 255
+ (72057594037927936, 7), // 256
+ ];
+
+ let (base, power) = BASES[radix as usize];
+ (base as BigDigit, power)
+ }
+ _ => panic!("Invalid bigdigit size"),
+ }
+}
+
+#[test]
+fn test_from_slice() {
+ fn check(slice: &[BigDigit], data: &[BigDigit]) {
+ assert!(BigUint::from_slice(slice).data == data);
+ }
+ check(&[1], &[1]);
+ check(&[0, 0, 0], &[]);
+ check(&[1, 2, 0, 0], &[1, 2]);
+ check(&[0, 0, 1, 2], &[0, 0, 1, 2]);
+ check(&[0, 0, 1, 2, 0, 0], &[0, 0, 1, 2]);
+ check(&[-1i32 as BigDigit], &[-1i32 as BigDigit]);
+}
+
+#[test]
+fn test_assign_from_slice() {
+ fn check(slice: &[BigDigit], data: &[BigDigit]) {
+ let mut p = BigUint::from_slice(&[2627_u32, 0_u32, 9182_u32, 42_u32]);
+ p.assign_from_slice(slice);
+ assert!(p.data == data);
+ }
+ check(&[1], &[1]);
+ check(&[0, 0, 0], &[]);
+ check(&[1, 2, 0, 0], &[1, 2]);
+ check(&[0, 0, 1, 2], &[0, 0, 1, 2]);
+ check(&[0, 0, 1, 2, 0, 0], &[0, 0, 1, 2]);
+ check(&[-1i32 as BigDigit], &[-1i32 as BigDigit]);
+}
+
+#[cfg(has_i128)]
+#[test]
+fn test_u32_u128() {
+ assert_eq!(u32_from_u128(0u128), (0, 0, 0, 0));
+ assert_eq!(
+ u32_from_u128(u128::max_value()),
+ (
+ u32::max_value(),
+ u32::max_value(),
+ u32::max_value(),
+ u32::max_value()
+ )
+ );
+
+ assert_eq!(
+ u32_from_u128(u32::max_value() as u128),
+ (0, 0, 0, u32::max_value())
+ );
+
+ assert_eq!(
+ u32_from_u128(u64::max_value() as u128),
+ (0, 0, u32::max_value(), u32::max_value())
+ );
+
+ assert_eq!(
+ u32_from_u128((u64::max_value() as u128) + u32::max_value() as u128),
+ (0, 1, 0, u32::max_value() - 1)
+ );
+
+ assert_eq!(u32_from_u128(36_893_488_151_714_070_528), (0, 2, 1, 0));
+}
+
+#[cfg(has_i128)]
+#[test]
+fn test_u128_u32_roundtrip() {
+ // roundtrips
+ let values = vec![
+ 0u128,
+ 1u128,
+ u64::max_value() as u128 * 3,
+ u32::max_value() as u128,
+ u64::max_value() as u128,
+ (u64::max_value() as u128) + u32::max_value() as u128,
+ u128::max_value(),
+ ];
+
+ for val in &values {
+ let (a, b, c, d) = u32_from_u128(*val);
+ assert_eq!(u32_to_u128(a, b, c, d), *val);
+ }
+}
+
+#[test]
+fn test_pow_biguint() {
+ let base = BigUint::from(5u8);
+ let exponent = BigUint::from(3u8);
+
+ assert_eq!(BigUint::from(125u8), base.pow(exponent));
+}
diff --git a/third_party/rust/num-bigint/src/lib.rs b/third_party/rust/num-bigint/src/lib.rs
new file mode 100644
index 0000000000..dece7bab00
--- /dev/null
+++ b/third_party/rust/num-bigint/src/lib.rs
@@ -0,0 +1,206 @@
+// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! A Big integer (signed version: `BigInt`, unsigned version: `BigUint`).
+//!
+//! A `BigUint` is represented as a vector of `BigDigit`s.
+//! A `BigInt` is a combination of `BigUint` and `Sign`.
+//!
+//! Common numerical operations are overloaded, so we can treat them
+//! the same way we treat other numbers.
+//!
+//! ## Example
+//!
+//! ```rust
+//! extern crate num_bigint;
+//! extern crate num_traits;
+//!
+//! # fn main() {
+//! use num_bigint::BigUint;
+//! use num_traits::{Zero, One};
+//! use std::mem::replace;
+//!
+//! // Calculate large fibonacci numbers.
+//! fn fib(n: usize) -> BigUint {
+//! let mut f0: BigUint = Zero::zero();
+//! let mut f1: BigUint = One::one();
+//! for _ in 0..n {
+//! let f2 = f0 + &f1;
+//! // This is a low cost way of swapping f0 with f1 and f1 with f2.
+//! f0 = replace(&mut f1, f2);
+//! }
+//! f0
+//! }
+//!
+//! // This is a very large number.
+//! println!("fib(1000) = {}", fib(1000));
+//! # }
+//! ```
+//!
+//! It's easy to generate large random numbers:
+//!
+//! ```rust
+//! # #[cfg(feature = "rand")]
+//! extern crate rand;
+//! extern crate num_bigint as bigint;
+//!
+//! # #[cfg(feature = "rand")]
+//! # fn main() {
+//! use bigint::{ToBigInt, RandBigInt};
+//!
+//! let mut rng = rand::thread_rng();
+//! let a = rng.gen_bigint(1000);
+//!
+//! let low = -10000.to_bigint().unwrap();
+//! let high = 10000.to_bigint().unwrap();
+//! let b = rng.gen_bigint_range(&low, &high);
+//!
+//! // Probably an even larger number.
+//! println!("{}", a * b);
+//! # }
+//!
+//! # #[cfg(not(feature = "rand"))]
+//! # fn main() {
+//! # }
+//! ```
+//!
+//! ## Compatibility
+//!
+//! The `num-bigint` crate is tested for rustc 1.15 and greater.
+
+#![doc(html_root_url = "https://docs.rs/num-bigint/0.2")]
+// We don't actually support `no_std` yet, and probably won't until `alloc` is stable. We're just
+// reserving this ability with the "std" feature now, and compilation will fail without.
+#![cfg_attr(not(feature = "std"), no_std)]
+
+#[cfg(feature = "rand")]
+extern crate rand;
+#[cfg(feature = "serde")]
+extern crate serde;
+
+extern crate num_integer as integer;
+extern crate num_traits as traits;
+#[cfg(feature = "quickcheck")]
+extern crate quickcheck;
+
+use std::error::Error;
+use std::fmt;
+
+#[macro_use]
+mod macros;
+
+mod bigint;
+mod biguint;
+
+#[cfg(feature = "rand")]
+mod bigrand;
+
+#[cfg(target_pointer_width = "32")]
+type UsizePromotion = u32;
+#[cfg(target_pointer_width = "64")]
+type UsizePromotion = u64;
+
+#[cfg(target_pointer_width = "32")]
+type IsizePromotion = i32;
+#[cfg(target_pointer_width = "64")]
+type IsizePromotion = i64;
+
+#[derive(Debug, Clone, PartialEq, Eq)]
+pub struct ParseBigIntError {
+ kind: BigIntErrorKind,
+}
+
+#[derive(Debug, Clone, PartialEq, Eq)]
+enum BigIntErrorKind {
+ Empty,
+ InvalidDigit,
+}
+
+impl ParseBigIntError {
+ fn __description(&self) -> &str {
+ use BigIntErrorKind::*;
+ match self.kind {
+ Empty => "cannot parse integer from empty string",
+ InvalidDigit => "invalid digit found in string",
+ }
+ }
+
+ fn empty() -> Self {
+ ParseBigIntError {
+ kind: BigIntErrorKind::Empty,
+ }
+ }
+
+ fn invalid() -> Self {
+ ParseBigIntError {
+ kind: BigIntErrorKind::InvalidDigit,
+ }
+ }
+}
+
+impl fmt::Display for ParseBigIntError {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ self.__description().fmt(f)
+ }
+}
+
+impl Error for ParseBigIntError {
+ fn description(&self) -> &str {
+ self.__description()
+ }
+}
+
+pub use biguint::BigUint;
+pub use biguint::ToBigUint;
+
+pub use bigint::BigInt;
+pub use bigint::Sign;
+pub use bigint::ToBigInt;
+
+#[cfg(feature = "rand")]
+pub use bigrand::{RandBigInt, RandomBits, UniformBigInt, UniformBigUint};
+
+mod big_digit {
+ /// A `BigDigit` is a `BigUint`'s composing element.
+ pub type BigDigit = u32;
+
+ /// A `DoubleBigDigit` is the internal type used to do the computations. Its
+ /// size is the double of the size of `BigDigit`.
+ pub type DoubleBigDigit = u64;
+
+ /// A `SignedDoubleBigDigit` is the signed version of `DoubleBigDigit`.
+ pub type SignedDoubleBigDigit = i64;
+
+ // `DoubleBigDigit` size dependent
+ pub const BITS: usize = 32;
+
+ const LO_MASK: DoubleBigDigit = (-1i32 as DoubleBigDigit) >> BITS;
+
+ #[inline]
+ fn get_hi(n: DoubleBigDigit) -> BigDigit {
+ (n >> BITS) as BigDigit
+ }
+ #[inline]
+ fn get_lo(n: DoubleBigDigit) -> BigDigit {
+ (n & LO_MASK) as BigDigit
+ }
+
+ /// Split one `DoubleBigDigit` into two `BigDigit`s.
+ #[inline]
+ pub fn from_doublebigdigit(n: DoubleBigDigit) -> (BigDigit, BigDigit) {
+ (get_hi(n), get_lo(n))
+ }
+
+ /// Join two `BigDigit`s into one `DoubleBigDigit`
+ #[inline]
+ pub fn to_doublebigdigit(hi: BigDigit, lo: BigDigit) -> DoubleBigDigit {
+ DoubleBigDigit::from(lo) | (DoubleBigDigit::from(hi) << BITS)
+ }
+}
diff --git a/third_party/rust/num-bigint/src/macros.rs b/third_party/rust/num-bigint/src/macros.rs
new file mode 100644
index 0000000000..0ba6e48c72
--- /dev/null
+++ b/third_party/rust/num-bigint/src/macros.rs
@@ -0,0 +1,445 @@
+#![allow(unknown_lints)] // older rustc doesn't know `unused_macros`
+#![allow(unused_macros)]
+
+macro_rules! forward_val_val_binop {
+ (impl $imp:ident for $res:ty, $method:ident) => {
+ impl $imp<$res> for $res {
+ type Output = $res;
+
+ #[inline]
+ fn $method(self, other: $res) -> $res {
+ // forward to val-ref
+ $imp::$method(self, &other)
+ }
+ }
+ };
+}
+
+macro_rules! forward_val_val_binop_commutative {
+ (impl $imp:ident for $res:ty, $method:ident) => {
+ impl $imp<$res> for $res {
+ type Output = $res;
+
+ #[inline]
+ fn $method(self, other: $res) -> $res {
+ // forward to val-ref, with the larger capacity as val
+ if self.capacity() >= other.capacity() {
+ $imp::$method(self, &other)
+ } else {
+ $imp::$method(other, &self)
+ }
+ }
+ }
+ };
+}
+
+macro_rules! forward_ref_val_binop {
+ (impl $imp:ident for $res:ty, $method:ident) => {
+ impl<'a> $imp<$res> for &'a $res {
+ type Output = $res;
+
+ #[inline]
+ fn $method(self, other: $res) -> $res {
+ // forward to ref-ref
+ $imp::$method(self, &other)
+ }
+ }
+ };
+}
+
+macro_rules! forward_ref_val_binop_commutative {
+ (impl $imp:ident for $res:ty, $method:ident) => {
+ impl<'a> $imp<$res> for &'a $res {
+ type Output = $res;
+
+ #[inline]
+ fn $method(self, other: $res) -> $res {
+ // reverse, forward to val-ref
+ $imp::$method(other, self)
+ }
+ }
+ };
+}
+
+macro_rules! forward_val_ref_binop {
+ (impl $imp:ident for $res:ty, $method:ident) => {
+ impl<'a> $imp<&'a $res> for $res {
+ type Output = $res;
+
+ #[inline]
+ fn $method(self, other: &$res) -> $res {
+ // forward to ref-ref
+ $imp::$method(&self, other)
+ }
+ }
+ };
+}
+
+macro_rules! forward_ref_ref_binop {
+ (impl $imp:ident for $res:ty, $method:ident) => {
+ impl<'a, 'b> $imp<&'b $res> for &'a $res {
+ type Output = $res;
+
+ #[inline]
+ fn $method(self, other: &$res) -> $res {
+ // forward to val-ref
+ $imp::$method(self.clone(), other)
+ }
+ }
+ };
+}
+
+macro_rules! forward_ref_ref_binop_commutative {
+ (impl $imp:ident for $res:ty, $method:ident) => {
+ impl<'a, 'b> $imp<&'b $res> for &'a $res {
+ type Output = $res;
+
+ #[inline]
+ fn $method(self, other: &$res) -> $res {
+ // forward to val-ref, choosing the larger to clone
+ if self.len() >= other.len() {
+ $imp::$method(self.clone(), other)
+ } else {
+ $imp::$method(other.clone(), self)
+ }
+ }
+ }
+ };
+}
+
+macro_rules! forward_val_assign {
+ (impl $imp:ident for $res:ty, $method:ident) => {
+ impl $imp<$res> for $res {
+ #[inline]
+ fn $method(&mut self, other: $res) {
+ self.$method(&other);
+ }
+ }
+ };
+}
+
+macro_rules! forward_val_assign_scalar {
+ (impl $imp:ident for $res:ty, $scalar:ty, $method:ident) => {
+ impl $imp<$res> for $scalar {
+ #[inline]
+ fn $method(&mut self, other: $res) {
+ self.$method(&other);
+ }
+ }
+ };
+}
+
+/// use this if val_val_binop is already implemented and the reversed order is required
+macro_rules! forward_scalar_val_val_binop_commutative {
+ (impl $imp:ident < $scalar:ty > for $res:ty, $method:ident) => {
+ impl $imp<$res> for $scalar {
+ type Output = $res;
+
+ #[inline]
+ fn $method(self, other: $res) -> $res {
+ $imp::$method(other, self)
+ }
+ }
+ };
+}
+
+// Forward scalar to ref-val, when reusing storage is not helpful
+macro_rules! forward_scalar_val_val_binop_to_ref_val {
+ (impl $imp:ident<$scalar:ty> for $res:ty, $method:ident) => {
+ impl $imp<$scalar> for $res {
+ type Output = $res;
+
+ #[inline]
+ fn $method(self, other: $scalar) -> $res {
+ $imp::$method(&self, other)
+ }
+ }
+
+ impl $imp<$res> for $scalar {
+ type Output = $res;
+
+ #[inline]
+ fn $method(self, other: $res) -> $res {
+ $imp::$method(self, &other)
+ }
+ }
+ };
+}
+
+macro_rules! forward_scalar_ref_ref_binop_to_ref_val {
+ (impl $imp:ident<$scalar:ty> for $res:ty, $method:ident) => {
+ impl<'a, 'b> $imp<&'b $scalar> for &'a $res {
+ type Output = $res;
+
+ #[inline]
+ fn $method(self, other: &$scalar) -> $res {
+ $imp::$method(self, *other)
+ }
+ }
+
+ impl<'a, 'b> $imp<&'a $res> for &'b $scalar {
+ type Output = $res;
+
+ #[inline]
+ fn $method(self, other: &$res) -> $res {
+ $imp::$method(*self, other)
+ }
+ }
+ };
+}
+
+macro_rules! forward_scalar_val_ref_binop_to_ref_val {
+ (impl $imp:ident<$scalar:ty> for $res:ty, $method:ident) => {
+ impl<'a> $imp<&'a $scalar> for $res {
+ type Output = $res;
+
+ #[inline]
+ fn $method(self, other: &$scalar) -> $res {
+ $imp::$method(&self, *other)
+ }
+ }
+
+ impl<'a> $imp<$res> for &'a $scalar {
+ type Output = $res;
+
+ #[inline]
+ fn $method(self, other: $res) -> $res {
+ $imp::$method(*self, &other)
+ }
+ }
+ };
+}
+
+macro_rules! forward_scalar_val_ref_binop_to_val_val {
+ (impl $imp:ident<$scalar:ty> for $res:ty, $method:ident) => {
+ impl<'a> $imp<&'a $scalar> for $res {
+ type Output = $res;
+
+ #[inline]
+ fn $method(self, other: &$scalar) -> $res {
+ $imp::$method(self, *other)
+ }
+ }
+
+ impl<'a> $imp<$res> for &'a $scalar {
+ type Output = $res;
+
+ #[inline]
+ fn $method(self, other: $res) -> $res {
+ $imp::$method(*self, other)
+ }
+ }
+ };
+}
+
+macro_rules! forward_scalar_ref_val_binop_to_val_val {
+ (impl $imp:ident < $scalar:ty > for $res:ty, $method:ident) => {
+ impl<'a> $imp<$scalar> for &'a $res {
+ type Output = $res;
+
+ #[inline]
+ fn $method(self, other: $scalar) -> $res {
+ $imp::$method(self.clone(), other)
+ }
+ }
+
+ impl<'a> $imp<&'a $res> for $scalar {
+ type Output = $res;
+
+ #[inline]
+ fn $method(self, other: &$res) -> $res {
+ $imp::$method(self, other.clone())
+ }
+ }
+ };
+}
+
+macro_rules! forward_scalar_ref_ref_binop_to_val_val {
+ (impl $imp:ident<$scalar:ty> for $res:ty, $method:ident) => {
+ impl<'a, 'b> $imp<&'b $scalar> for &'a $res {
+ type Output = $res;
+
+ #[inline]
+ fn $method(self, other: &$scalar) -> $res {
+ $imp::$method(self.clone(), *other)
+ }
+ }
+
+ impl<'a, 'b> $imp<&'a $res> for &'b $scalar {
+ type Output = $res;
+
+ #[inline]
+ fn $method(self, other: &$res) -> $res {
+ $imp::$method(*self, other.clone())
+ }
+ }
+ };
+}
+
+macro_rules! promote_scalars {
+ (impl $imp:ident<$promo:ty> for $res:ty, $method:ident, $( $scalar:ty ),*) => {
+ $(
+ forward_all_scalar_binop_to_val_val!(impl $imp<$scalar> for $res, $method);
+
+ impl $imp<$scalar> for $res {
+ type Output = $res;
+
+ #[cfg_attr(feature = "cargo-clippy", allow(renamed_and_removed_lints))]
+ #[cfg_attr(feature = "cargo-clippy", allow(cast_lossless))]
+ #[inline]
+ fn $method(self, other: $scalar) -> $res {
+ $imp::$method(self, other as $promo)
+ }
+ }
+
+ impl $imp<$res> for $scalar {
+ type Output = $res;
+
+ #[cfg_attr(feature = "cargo-clippy", allow(renamed_and_removed_lints))]
+ #[cfg_attr(feature = "cargo-clippy", allow(cast_lossless))]
+ #[inline]
+ fn $method(self, other: $res) -> $res {
+ $imp::$method(self as $promo, other)
+ }
+ }
+ )*
+ }
+}
+macro_rules! promote_scalars_assign {
+ (impl $imp:ident<$promo:ty> for $res:ty, $method:ident, $( $scalar:ty ),*) => {
+ $(
+ impl $imp<$scalar> for $res {
+ #[cfg_attr(feature = "cargo-clippy", allow(renamed_and_removed_lints))]
+ #[cfg_attr(feature = "cargo-clippy", allow(cast_lossless))]
+ #[inline]
+ fn $method(&mut self, other: $scalar) {
+ self.$method(other as $promo);
+ }
+ }
+ )*
+ }
+}
+
+macro_rules! promote_unsigned_scalars {
+ (impl $imp:ident for $res:ty, $method:ident) => {
+ promote_scalars!(impl $imp<u32> for $res, $method, u8, u16);
+ promote_scalars!(impl $imp<UsizePromotion> for $res, $method, usize);
+ }
+}
+
+macro_rules! promote_unsigned_scalars_assign {
+ (impl $imp:ident for $res:ty, $method:ident) => {
+ promote_scalars_assign!(impl $imp<u32> for $res, $method, u8, u16);
+ promote_scalars_assign!(impl $imp<UsizePromotion> for $res, $method, usize);
+ }
+}
+
+macro_rules! promote_signed_scalars {
+ (impl $imp:ident for $res:ty, $method:ident) => {
+ promote_scalars!(impl $imp<i32> for $res, $method, i8, i16);
+ promote_scalars!(impl $imp<IsizePromotion> for $res, $method, isize);
+ }
+}
+
+macro_rules! promote_signed_scalars_assign {
+ (impl $imp:ident for $res:ty, $method:ident) => {
+ promote_scalars_assign!(impl $imp<i32> for $res, $method, i8, i16);
+ promote_scalars_assign!(impl $imp<UsizePromotion> for $res, $method, isize);
+ }
+}
+
+// Forward everything to ref-ref, when reusing storage is not helpful
+macro_rules! forward_all_binop_to_ref_ref {
+ (impl $imp:ident for $res:ty, $method:ident) => {
+ forward_val_val_binop!(impl $imp for $res, $method);
+ forward_val_ref_binop!(impl $imp for $res, $method);
+ forward_ref_val_binop!(impl $imp for $res, $method);
+ };
+}
+
+// Forward everything to val-ref, so LHS storage can be reused
+macro_rules! forward_all_binop_to_val_ref {
+ (impl $imp:ident for $res:ty, $method:ident) => {
+ forward_val_val_binop!(impl $imp for $res, $method);
+ forward_ref_val_binop!(impl $imp for $res, $method);
+ forward_ref_ref_binop!(impl $imp for $res, $method);
+ };
+}
+
+// Forward everything to val-ref, commutatively, so either LHS or RHS storage can be reused
+macro_rules! forward_all_binop_to_val_ref_commutative {
+ (impl $imp:ident for $res:ty, $method:ident) => {
+ forward_val_val_binop_commutative!(impl $imp for $res, $method);
+ forward_ref_val_binop_commutative!(impl $imp for $res, $method);
+ forward_ref_ref_binop_commutative!(impl $imp for $res, $method);
+ };
+}
+
+macro_rules! forward_all_scalar_binop_to_ref_val {
+ (impl $imp:ident<$scalar:ty> for $res:ty, $method:ident) => {
+ forward_scalar_val_val_binop_to_ref_val!(impl $imp<$scalar> for $res, $method);
+ forward_scalar_val_ref_binop_to_ref_val!(impl $imp<$scalar> for $res, $method);
+ forward_scalar_ref_ref_binop_to_ref_val!(impl $imp<$scalar> for $res, $method);
+ }
+}
+
+macro_rules! forward_all_scalar_binop_to_val_val {
+ (impl $imp:ident<$scalar:ty> for $res:ty, $method:ident) => {
+ forward_scalar_val_ref_binop_to_val_val!(impl $imp<$scalar> for $res, $method);
+ forward_scalar_ref_val_binop_to_val_val!(impl $imp<$scalar> for $res, $method);
+ forward_scalar_ref_ref_binop_to_val_val!(impl $imp<$scalar> for $res, $method);
+ }
+}
+
+macro_rules! forward_all_scalar_binop_to_val_val_commutative {
+ (impl $imp:ident<$scalar:ty> for $res:ty, $method:ident) => {
+ forward_scalar_val_val_binop_commutative!(impl $imp<$scalar> for $res, $method);
+ forward_all_scalar_binop_to_val_val!(impl $imp<$scalar> for $res, $method);
+ }
+}
+
+macro_rules! promote_all_scalars {
+ (impl $imp:ident for $res:ty, $method:ident) => {
+ promote_unsigned_scalars!(impl $imp for $res, $method);
+ promote_signed_scalars!(impl $imp for $res, $method);
+ }
+}
+
+macro_rules! promote_all_scalars_assign {
+ (impl $imp:ident for $res:ty, $method:ident) => {
+ promote_unsigned_scalars_assign!(impl $imp for $res, $method);
+ promote_signed_scalars_assign!(impl $imp for $res, $method);
+ }
+}
+
+macro_rules! impl_sum_iter_type {
+ ($res:ty) => {
+ impl<T> Sum<T> for $res
+ where
+ $res: Add<T, Output = $res>,
+ {
+ fn sum<I>(iter: I) -> Self
+ where
+ I: Iterator<Item = T>,
+ {
+ iter.fold(Zero::zero(), <$res>::add)
+ }
+ }
+ };
+}
+
+macro_rules! impl_product_iter_type {
+ ($res:ty) => {
+ impl<T> Product<T> for $res
+ where
+ $res: Mul<T, Output = $res>,
+ {
+ fn product<I>(iter: I) -> Self
+ where
+ I: Iterator<Item = T>,
+ {
+ iter.fold(One::one(), <$res>::mul)
+ }
+ }
+ };
+}
diff --git a/third_party/rust/num-bigint/src/monty.rs b/third_party/rust/num-bigint/src/monty.rs
new file mode 100644
index 0000000000..72a4ab53eb
--- /dev/null
+++ b/third_party/rust/num-bigint/src/monty.rs
@@ -0,0 +1,129 @@
+use integer::Integer;
+use traits::Zero;
+
+use biguint::BigUint;
+
+struct MontyReducer<'a> {
+ n: &'a BigUint,
+ n0inv: u32,
+}
+
+// Calculate the modular inverse of `num`, using Extended GCD.
+//
+// Reference:
+// Brent & Zimmermann, Modern Computer Arithmetic, v0.5.9, Algorithm 1.20
+fn inv_mod_u32(num: u32) -> u32 {
+ // num needs to be relatively prime to 2**32 -- i.e. it must be odd.
+ assert!(num % 2 != 0);
+
+ let mut a: i64 = i64::from(num);
+ let mut b: i64 = i64::from(u32::max_value()) + 1;
+
+ // ExtendedGcd
+ // Input: positive integers a and b
+ // Output: integers (g, u, v) such that g = gcd(a, b) = ua + vb
+ // As we don't need v for modular inverse, we don't calculate it.
+
+ // 1: (u, w) <- (1, 0)
+ let mut u = 1;
+ let mut w = 0;
+ // 3: while b != 0
+ while b != 0 {
+ // 4: (q, r) <- DivRem(a, b)
+ let q = a / b;
+ let r = a % b;
+ // 5: (a, b) <- (b, r)
+ a = b;
+ b = r;
+ // 6: (u, w) <- (w, u - qw)
+ let m = u - w * q;
+ u = w;
+ w = m;
+ }
+
+ assert!(a == 1);
+ // Downcasting acts like a mod 2^32 too.
+ u as u32
+}
+
+impl<'a> MontyReducer<'a> {
+ fn new(n: &'a BigUint) -> Self {
+ let n0inv = inv_mod_u32(n.data[0]);
+ MontyReducer { n: n, n0inv: n0inv }
+ }
+}
+
+// Montgomery Reduction
+//
+// Reference:
+// Brent & Zimmermann, Modern Computer Arithmetic, v0.5.9, Algorithm 2.6
+fn monty_redc(a: BigUint, mr: &MontyReducer) -> BigUint {
+ let mut c = a.data;
+ let n = &mr.n.data;
+ let n_size = n.len();
+
+ // Allocate sufficient work space
+ c.resize(2 * n_size + 2, 0);
+
+ // β is the size of a word, in this case 32 bits. So "a mod β" is
+ // equivalent to masking a to 32 bits.
+ // mu <- -N^(-1) mod β
+ let mu = 0u32.wrapping_sub(mr.n0inv);
+
+ // 1: for i = 0 to (n-1)
+ for i in 0..n_size {
+ // 2: q_i <- mu*c_i mod β
+ let q_i = c[i].wrapping_mul(mu);
+
+ // 3: C <- C + q_i * N * β^i
+ super::algorithms::mac_digit(&mut c[i..], n, q_i);
+ }
+
+ // 4: R <- C * β^(-n)
+ // This is an n-word bitshift, equivalent to skipping n words.
+ let ret = BigUint::new(c[n_size..].to_vec());
+
+ // 5: if R >= β^n then return R-N else return R.
+ if &ret < mr.n {
+ ret
+ } else {
+ ret - mr.n
+ }
+}
+
+// Montgomery Multiplication
+fn monty_mult(a: BigUint, b: &BigUint, mr: &MontyReducer) -> BigUint {
+ monty_redc(a * b, mr)
+}
+
+// Montgomery Squaring
+fn monty_sqr(a: BigUint, mr: &MontyReducer) -> BigUint {
+ // TODO: Replace with an optimised squaring function
+ monty_redc(&a * &a, mr)
+}
+
+pub fn monty_modpow(a: &BigUint, exp: &BigUint, modulus: &BigUint) -> BigUint {
+ let mr = MontyReducer::new(modulus);
+
+ // Calculate the Montgomery parameter
+ let mut v = vec![0; modulus.data.len()];
+ v.push(1);
+ let r = BigUint::new(v);
+
+ // Map the base to the Montgomery domain
+ let mut apri = a * &r % modulus;
+
+ // Binary exponentiation
+ let mut ans = &r % modulus;
+ let mut e = exp.clone();
+ while !e.is_zero() {
+ if e.is_odd() {
+ ans = monty_mult(ans, &apri, &mr);
+ }
+ apri = monty_sqr(apri, &mr);
+ e = e >> 1;
+ }
+
+ // Map the result back to the residues domain
+ monty_redc(ans, &mr)
+}