summaryrefslogtreecommitdiffstats
path: root/vendor/crypto-bigint/src
diff options
context:
space:
mode:
Diffstat (limited to 'vendor/crypto-bigint/src')
-rw-r--r--vendor/crypto-bigint/src/array.rs38
-rw-r--r--vendor/crypto-bigint/src/boxed.rs3
-rw-r--r--vendor/crypto-bigint/src/boxed/uint.rs231
-rw-r--r--vendor/crypto-bigint/src/boxed/uint/add.rs62
-rw-r--r--vendor/crypto-bigint/src/boxed/uint/cmp.rs47
-rw-r--r--vendor/crypto-bigint/src/checked.rs131
-rw-r--r--vendor/crypto-bigint/src/ct_choice.rs104
-rw-r--r--vendor/crypto-bigint/src/lib.rs215
-rw-r--r--vendor/crypto-bigint/src/limb.rs176
-rw-r--r--vendor/crypto-bigint/src/limb/add.rs180
-rw-r--r--vendor/crypto-bigint/src/limb/bit_and.rs21
-rw-r--r--vendor/crypto-bigint/src/limb/bit_not.rs19
-rw-r--r--vendor/crypto-bigint/src/limb/bit_or.rs19
-rw-r--r--vendor/crypto-bigint/src/limb/bit_xor.rs19
-rw-r--r--vendor/crypto-bigint/src/limb/bits.rs18
-rw-r--r--vendor/crypto-bigint/src/limb/cmp.rs200
-rw-r--r--vendor/crypto-bigint/src/limb/encoding.rs64
-rw-r--r--vendor/crypto-bigint/src/limb/from.rs74
-rw-r--r--vendor/crypto-bigint/src/limb/mul.rs195
-rw-r--r--vendor/crypto-bigint/src/limb/neg.rs20
-rw-r--r--vendor/crypto-bigint/src/limb/rand.rs38
-rw-r--r--vendor/crypto-bigint/src/limb/shl.rs74
-rw-r--r--vendor/crypto-bigint/src/limb/shr.rs74
-rw-r--r--vendor/crypto-bigint/src/limb/sub.rs182
-rw-r--r--vendor/crypto-bigint/src/nlimbs.rs29
-rw-r--r--vendor/crypto-bigint/src/non_zero.rs393
-rw-r--r--vendor/crypto-bigint/src/traits.rs302
-rw-r--r--vendor/crypto-bigint/src/uint.rs491
-rw-r--r--vendor/crypto-bigint/src/uint/add.rs206
-rw-r--r--vendor/crypto-bigint/src/uint/add_mod.rs128
-rw-r--r--vendor/crypto-bigint/src/uint/array.rs193
-rw-r--r--vendor/crypto-bigint/src/uint/bit_and.rs146
-rw-r--r--vendor/crypto-bigint/src/uint/bit_not.rs49
-rw-r--r--vendor/crypto-bigint/src/uint/bit_or.rs142
-rw-r--r--vendor/crypto-bigint/src/uint/bit_xor.rs142
-rw-r--r--vendor/crypto-bigint/src/uint/bits.rs207
-rw-r--r--vendor/crypto-bigint/src/uint/cmp.rs275
-rw-r--r--vendor/crypto-bigint/src/uint/concat.rs70
-rw-r--r--vendor/crypto-bigint/src/uint/div.rs745
-rw-r--r--vendor/crypto-bigint/src/uint/div_limb.rs287
-rw-r--r--vendor/crypto-bigint/src/uint/encoding.rs292
-rw-r--r--vendor/crypto-bigint/src/uint/encoding/der.rs64
-rw-r--r--vendor/crypto-bigint/src/uint/encoding/rlp.rs78
-rw-r--r--vendor/crypto-bigint/src/uint/extra_sizes.rs160
-rw-r--r--vendor/crypto-bigint/src/uint/from.rs271
-rw-r--r--vendor/crypto-bigint/src/uint/inv_mod.rs306
-rw-r--r--vendor/crypto-bigint/src/uint/macros.rs115
-rw-r--r--vendor/crypto-bigint/src/uint/modular.rs164
-rw-r--r--vendor/crypto-bigint/src/uint/modular/add.rs9
-rw-r--r--vendor/crypto-bigint/src/uint/modular/constant_mod.rs254
-rw-r--r--vendor/crypto-bigint/src/uint/modular/constant_mod/const_add.rs98
-rw-r--r--vendor/crypto-bigint/src/uint/modular/constant_mod/const_inv.rs69
-rw-r--r--vendor/crypto-bigint/src/uint/modular/constant_mod/const_mul.rs94
-rw-r--r--vendor/crypto-bigint/src/uint/modular/constant_mod/const_neg.rs48
-rw-r--r--vendor/crypto-bigint/src/uint/modular/constant_mod/const_pow.rs101
-rw-r--r--vendor/crypto-bigint/src/uint/modular/constant_mod/const_sub.rs98
-rw-r--r--vendor/crypto-bigint/src/uint/modular/constant_mod/macros.rs57
-rw-r--r--vendor/crypto-bigint/src/uint/modular/div_by_2.rs30
-rw-r--r--vendor/crypto-bigint/src/uint/modular/inv.rs14
-rw-r--r--vendor/crypto-bigint/src/uint/modular/mul.rs22
-rw-r--r--vendor/crypto-bigint/src/uint/modular/pow.rs79
-rw-r--r--vendor/crypto-bigint/src/uint/modular/reduction.rs55
-rw-r--r--vendor/crypto-bigint/src/uint/modular/runtime_mod.rs301
-rw-r--r--vendor/crypto-bigint/src/uint/modular/runtime_mod/runtime_add.rs92
-rw-r--r--vendor/crypto-bigint/src/uint/modular/runtime_mod/runtime_inv.rs35
-rw-r--r--vendor/crypto-bigint/src/uint/modular/runtime_mod/runtime_mul.rs84
-rw-r--r--vendor/crypto-bigint/src/uint/modular/runtime_mod/runtime_neg.rs24
-rw-r--r--vendor/crypto-bigint/src/uint/modular/runtime_mod/runtime_pow.rs42
-rw-r--r--vendor/crypto-bigint/src/uint/modular/runtime_mod/runtime_sub.rs92
-rw-r--r--vendor/crypto-bigint/src/uint/modular/sub.rs9
-rw-r--r--vendor/crypto-bigint/src/uint/mul.rs414
-rw-r--r--vendor/crypto-bigint/src/uint/mul_mod.rs133
-rw-r--r--vendor/crypto-bigint/src/uint/neg.rs51
-rw-r--r--vendor/crypto-bigint/src/uint/neg_mod.rs68
-rw-r--r--vendor/crypto-bigint/src/uint/rand.rs79
-rw-r--r--vendor/crypto-bigint/src/uint/resize.rs37
-rw-r--r--vendor/crypto-bigint/src/uint/shl.rs216
-rw-r--r--vendor/crypto-bigint/src/uint/shr.rs186
-rw-r--r--vendor/crypto-bigint/src/uint/split.rs37
-rw-r--r--vendor/crypto-bigint/src/uint/sqrt.rs177
-rw-r--r--vendor/crypto-bigint/src/uint/sub.rs215
-rw-r--r--vendor/crypto-bigint/src/uint/sub_mod.rs191
-rw-r--r--vendor/crypto-bigint/src/wrapping.rs117
83 files changed, 11087 insertions, 0 deletions
diff --git a/vendor/crypto-bigint/src/array.rs b/vendor/crypto-bigint/src/array.rs
new file mode 100644
index 0000000..3528663
--- /dev/null
+++ b/vendor/crypto-bigint/src/array.rs
@@ -0,0 +1,38 @@
+//! Interop support for `generic-array`
+
+use crate::{Encoding, Integer};
+use core::ops::Add;
+use generic_array::{typenum::Unsigned, ArrayLength, GenericArray};
+
+/// Alias for a byte array whose size is defined by [`ArrayEncoding::ByteSize`].
+pub type ByteArray<T> = GenericArray<u8, <T as ArrayEncoding>::ByteSize>;
+
+/// Support for encoding a big integer as a `GenericArray`.
+pub trait ArrayEncoding: Encoding {
+ /// Size of a byte array which encodes a big integer.
+ type ByteSize: ArrayLength<u8> + Add + Eq + Ord + Unsigned;
+
+ /// Deserialize from a big-endian byte array.
+ fn from_be_byte_array(bytes: ByteArray<Self>) -> Self;
+
+ /// Deserialize from a little-endian byte array.
+ fn from_le_byte_array(bytes: ByteArray<Self>) -> Self;
+
+ /// Serialize to a big-endian byte array.
+ fn to_be_byte_array(&self) -> ByteArray<Self>;
+
+ /// Serialize to a little-endian byte array.
+ fn to_le_byte_array(&self) -> ByteArray<Self>;
+}
+
+/// Support for decoding a `GenericArray` as a big integer.
+pub trait ArrayDecoding {
+ /// Big integer which decodes a `GenericArray`.
+ type Output: ArrayEncoding + Integer;
+
+ /// Deserialize from a big-endian `GenericArray`.
+ fn into_uint_be(self) -> Self::Output;
+
+ /// Deserialize from a little-endian `GenericArray`.
+ fn into_uint_le(self) -> Self::Output;
+}
diff --git a/vendor/crypto-bigint/src/boxed.rs b/vendor/crypto-bigint/src/boxed.rs
new file mode 100644
index 0000000..4bcb860
--- /dev/null
+++ b/vendor/crypto-bigint/src/boxed.rs
@@ -0,0 +1,3 @@
+//! Heap-allocated "boxed" types.
+
+pub(crate) mod uint;
diff --git a/vendor/crypto-bigint/src/boxed/uint.rs b/vendor/crypto-bigint/src/boxed/uint.rs
new file mode 100644
index 0000000..4771a69
--- /dev/null
+++ b/vendor/crypto-bigint/src/boxed/uint.rs
@@ -0,0 +1,231 @@
+//! Heap-allocated big unsigned integers.
+
+mod add;
+mod cmp;
+
+use crate::{Limb, Word};
+use alloc::{vec, vec::Vec};
+use core::fmt;
+
+#[cfg(feature = "zeroize")]
+use zeroize::Zeroize;
+
+/// Fixed-precision heap-allocated big unsigned integer.
+///
+/// Alternative to the stack-allocated [`Uint`][`crate::Uint`] but with a
+/// fixed precision chosen at runtime instead of compile time.
+///
+/// Unlike many other heap-allocated big integer libraries, this type is not
+/// arbitrary precision and will wrap at its fixed-precision rather than
+/// automatically growing.
+#[derive(Clone, Default)]
+pub struct BoxedUint {
+ /// Inner limb vector. Stored from least significant to most significant.
+ limbs: Vec<Limb>,
+}
+
+impl BoxedUint {
+ /// Get the value `0`, represented as succinctly as possible.
+ pub fn zero() -> Self {
+ Self::default()
+ }
+
+ /// Get the value `1`, represented as succinctly as possible.
+ pub fn one() -> Self {
+ Self {
+ limbs: vec![Limb::ONE; 1],
+ }
+ }
+
+ /// Create a new [`BoxedUint`] with the given number of bits of precision.
+ ///
+ /// Returns `None` if the number of bits is not a multiple of the
+ /// [`Limb`] size.
+ pub fn new(bits_precision: usize) -> Option<Self> {
+ if bits_precision == 0 || bits_precision % Limb::BITS != 0 {
+ return None;
+ }
+
+ let nlimbs = bits_precision / Limb::BITS;
+
+ Some(Self {
+ limbs: vec![Limb::ZERO; nlimbs],
+ })
+ }
+
+ /// Get the maximum value for a given number of bits of precision.
+ ///
+ /// Returns `None` if the number of bits is not a multiple of the
+ /// [`Limb`] size.
+ pub fn max(bits_precision: usize) -> Option<Self> {
+ let mut ret = Self::new(bits_precision)?;
+
+ for limb in &mut ret.limbs {
+ *limb = Limb::MAX;
+ }
+
+ Some(ret)
+ }
+
+ /// Create a [`BoxedUint`] from an array of [`Word`]s (i.e. word-sized unsigned
+ /// integers).
+ #[inline]
+ pub fn from_words(words: &[Word]) -> Self {
+ Self {
+ limbs: words.iter().copied().map(Into::into).collect(),
+ }
+ }
+
+ /// Create an array of [`Word`]s (i.e. word-sized unsigned integers) from
+ /// a [`BoxedUint`].
+ #[inline]
+ pub fn to_words(&self) -> Vec<Word> {
+ self.limbs.iter().copied().map(Into::into).collect()
+ }
+
+ /// Borrow the inner limbs as a slice of [`Word`]s.
+ pub fn as_words(&self) -> &[Word] {
+ // SAFETY: `Limb` is a `repr(transparent)` newtype for `Word`
+ #[allow(trivial_casts, unsafe_code)]
+ unsafe {
+ &*((self.limbs.as_slice() as *const _) as *const [Word])
+ }
+ }
+
+ /// Borrow the inner limbs as a mutable array of [`Word`]s.
+ pub fn as_words_mut(&mut self) -> &mut [Word] {
+ // SAFETY: `Limb` is a `repr(transparent)` newtype for `Word`
+ #[allow(trivial_casts, unsafe_code)]
+ unsafe {
+ &mut *((self.limbs.as_mut_slice() as *mut _) as *mut [Word])
+ }
+ }
+
+ /// Borrow the limbs of this [`BoxedUint`].
+ pub fn as_limbs(&self) -> &[Limb] {
+ self.limbs.as_ref()
+ }
+
+ /// Borrow the limbs of this [`BoxedUint`] mutably.
+ pub fn as_limbs_mut(&mut self) -> &mut [Limb] {
+ self.limbs.as_mut()
+ }
+
+ /// Convert this [`BoxedUint`] into its inner limbs.
+ pub fn to_limbs(&self) -> Vec<Limb> {
+ self.limbs.clone()
+ }
+
+ /// Convert this [`BoxedUint`] into its inner limbs.
+ pub fn into_limbs(self) -> Vec<Limb> {
+ self.limbs
+ }
+
+ /// Get the precision of this [`BoxedUint`] in bits.
+ pub fn bits(&self) -> usize {
+ self.limbs.len() * Limb::BITS
+ }
+
+ /// Sort two [`BoxedUint`]s by precision, returning a tuple of the shorter
+ /// followed by the longer, or the original order if their precision is
+ /// equal.
+ fn sort_by_precision<'a>(a: &'a Self, b: &'a Self) -> (&'a Self, &'a Self) {
+ if a.limbs.len() <= b.limbs.len() {
+ (a, b)
+ } else {
+ (b, a)
+ }
+ }
+
+ /// Perform a carry chain-like operation over the limbs of the inputs,
+ /// constructing a result from the returned limbs and carry.
+ ///
+ /// If one of the two values has fewer limbs than the other, passes
+ /// [`Limb::ZERO`] as the value for that limb.
+ fn chain<F>(a: &Self, b: &Self, mut carry: Limb, f: F) -> (Self, Limb)
+ where
+ F: Fn(Limb, Limb, Limb) -> (Limb, Limb),
+ {
+ let (shorter, longer) = Self::sort_by_precision(a, b);
+ let mut limbs = Vec::with_capacity(longer.limbs.len());
+
+ for i in 0..longer.limbs.len() {
+ let &a = shorter.limbs.get(i).unwrap_or(&Limb::ZERO);
+ let &b = longer.limbs.get(i).unwrap_or(&Limb::ZERO);
+ let (limb, c) = f(a, b, carry);
+ limbs.push(limb);
+ carry = c;
+ }
+
+ (Self { limbs }, carry)
+ }
+}
+
+impl AsRef<[Word]> for BoxedUint {
+ fn as_ref(&self) -> &[Word] {
+ self.as_words()
+ }
+}
+
+impl AsMut<[Word]> for BoxedUint {
+ fn as_mut(&mut self) -> &mut [Word] {
+ self.as_words_mut()
+ }
+}
+
+impl AsRef<[Limb]> for BoxedUint {
+ fn as_ref(&self) -> &[Limb] {
+ self.as_limbs()
+ }
+}
+
+impl AsMut<[Limb]> for BoxedUint {
+ fn as_mut(&mut self) -> &mut [Limb] {
+ self.as_limbs_mut()
+ }
+}
+
+impl fmt::Debug for BoxedUint {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "BoxedUint(0x{self:X})")
+ }
+}
+
+impl fmt::Display for BoxedUint {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::UpperHex::fmt(self, f)
+ }
+}
+
+impl fmt::LowerHex for BoxedUint {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ if self.limbs.is_empty() {
+ return fmt::LowerHex::fmt(&Limb::ZERO, f);
+ }
+
+ for limb in self.limbs.iter().rev() {
+ fmt::LowerHex::fmt(limb, f)?;
+ }
+ Ok(())
+ }
+}
+
+impl fmt::UpperHex for BoxedUint {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ if self.limbs.is_empty() {
+ return fmt::LowerHex::fmt(&Limb::ZERO, f);
+ }
+
+ for limb in self.limbs.iter().rev() {
+ fmt::UpperHex::fmt(limb, f)?;
+ }
+ Ok(())
+ }
+}
+
+#[cfg(feature = "zeroize")]
+impl Zeroize for BoxedUint {
+ fn zeroize(&mut self) {
+ self.limbs.zeroize();
+ }
+}
diff --git a/vendor/crypto-bigint/src/boxed/uint/add.rs b/vendor/crypto-bigint/src/boxed/uint/add.rs
new file mode 100644
index 0000000..b6cedc7
--- /dev/null
+++ b/vendor/crypto-bigint/src/boxed/uint/add.rs
@@ -0,0 +1,62 @@
+//! [`BoxedUint`] addition operations.
+
+use crate::{BoxedUint, CheckedAdd, Limb, Zero};
+use subtle::CtOption;
+
+impl BoxedUint {
+ /// Computes `a + b + carry`, returning the result along with the new carry.
+ #[inline(always)]
+ pub fn adc(&self, rhs: &Self, carry: Limb) -> (Self, Limb) {
+ Self::chain(self, rhs, carry, |a, b, c| a.adc(b, c))
+ }
+
+ /// Perform wrapping addition, discarding overflow.
+ pub fn wrapping_add(&self, rhs: &Self) -> Self {
+ self.adc(rhs, Limb::ZERO).0
+ }
+}
+
+impl CheckedAdd<&BoxedUint> for BoxedUint {
+ type Output = Self;
+
+ fn checked_add(&self, rhs: &Self) -> CtOption<Self> {
+ let (result, carry) = self.adc(rhs, Limb::ZERO);
+ CtOption::new(result, carry.is_zero())
+ }
+}
+
+#[cfg(test)]
+#[allow(clippy::unwrap_used)]
+mod tests {
+ use super::{BoxedUint, CheckedAdd, Limb};
+
+ #[test]
+ fn adc_no_carry() {
+ let (res, carry) = BoxedUint::zero().adc(&BoxedUint::one(), Limb::ZERO);
+ assert_eq!(res, BoxedUint::one());
+ assert_eq!(carry, Limb::ZERO);
+ }
+
+ #[test]
+ fn adc_with_carry() {
+ let (res, carry) = BoxedUint::max(Limb::BITS)
+ .unwrap()
+ .adc(&BoxedUint::one(), Limb::ZERO);
+ assert_eq!(res, BoxedUint::zero());
+ assert_eq!(carry, Limb::ONE);
+ }
+
+ #[test]
+ fn checked_add_ok() {
+ let result = BoxedUint::zero().checked_add(&BoxedUint::one());
+ assert_eq!(result.unwrap(), BoxedUint::one());
+ }
+
+ #[test]
+ fn checked_add_overflow() {
+ let result = BoxedUint::max(Limb::BITS)
+ .unwrap()
+ .checked_add(&BoxedUint::one());
+ assert!(!bool::from(result.is_some()));
+ }
+}
diff --git a/vendor/crypto-bigint/src/boxed/uint/cmp.rs b/vendor/crypto-bigint/src/boxed/uint/cmp.rs
new file mode 100644
index 0000000..d850fc7
--- /dev/null
+++ b/vendor/crypto-bigint/src/boxed/uint/cmp.rs
@@ -0,0 +1,47 @@
+//! [`BoxedUint`] comparisons.
+//!
+//! By default these are all constant-time and use the `subtle` crate.
+
+use super::BoxedUint;
+use crate::Limb;
+use subtle::{Choice, ConstantTimeEq};
+
+impl ConstantTimeEq for BoxedUint {
+ #[inline]
+ fn ct_eq(&self, other: &Self) -> Choice {
+ let (shorter, longer) = Self::sort_by_precision(self, other);
+ let mut ret = Choice::from(1u8);
+
+ for i in 0..longer.limbs.len() {
+ let a = shorter.limbs.get(i).unwrap_or(&Limb::ZERO);
+ let b = longer.limbs.get(i).unwrap_or(&Limb::ZERO);
+ ret &= a.ct_eq(b);
+ }
+
+ ret
+ }
+}
+
+impl Eq for BoxedUint {}
+impl PartialEq for BoxedUint {
+ fn eq(&self, other: &Self) -> bool {
+ self.ct_eq(other).into()
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::BoxedUint;
+ use subtle::ConstantTimeEq;
+
+ #[test]
+ fn ct_eq() {
+ let a = BoxedUint::zero();
+ let b = BoxedUint::one();
+
+ assert!(bool::from(a.ct_eq(&a)));
+ assert!(!bool::from(a.ct_eq(&b)));
+ assert!(!bool::from(b.ct_eq(&a)));
+ assert!(bool::from(b.ct_eq(&b)));
+ }
+}
diff --git a/vendor/crypto-bigint/src/checked.rs b/vendor/crypto-bigint/src/checked.rs
new file mode 100644
index 0000000..caf0dfd
--- /dev/null
+++ b/vendor/crypto-bigint/src/checked.rs
@@ -0,0 +1,131 @@
+//! Checked arithmetic.
+
+use subtle::{Choice, ConditionallySelectable, ConstantTimeEq, CtOption};
+
+#[cfg(feature = "serde")]
+use serdect::serde::{Deserialize, Deserializer, Serialize, Serializer};
+
+/// Provides intentionally-checked arithmetic on `T`.
+///
+/// Internally this leverages the [`CtOption`] type from the [`subtle`] crate
+/// in order to handle overflows in constant time.
+#[derive(Copy, Clone, Debug)]
+pub struct Checked<T>(pub CtOption<T>);
+
+impl<T> Checked<T> {
+ /// Create a new checked arithmetic wrapper for the given value.
+ pub fn new(val: T) -> Self {
+ Self(CtOption::new(val, Choice::from(1)))
+ }
+}
+
+impl<T> Default for Checked<T>
+where
+ T: Default,
+{
+ fn default() -> Self {
+ Self::new(T::default())
+ }
+}
+
+impl<T: ConditionallySelectable> ConditionallySelectable for Checked<T> {
+ #[inline]
+ fn conditional_select(a: &Self, b: &Self, choice: Choice) -> Self {
+ Self(CtOption::conditional_select(&a.0, &b.0, choice))
+ }
+}
+
+impl<T: ConstantTimeEq> ConstantTimeEq for Checked<T> {
+ #[inline]
+ fn ct_eq(&self, rhs: &Self) -> Choice {
+ self.0.ct_eq(&rhs.0)
+ }
+}
+
+impl<T> From<Checked<T>> for CtOption<T> {
+ fn from(checked: Checked<T>) -> CtOption<T> {
+ checked.0
+ }
+}
+
+impl<T> From<CtOption<T>> for Checked<T> {
+ fn from(ct_option: CtOption<T>) -> Checked<T> {
+ Checked(ct_option)
+ }
+}
+
+impl<T> From<Checked<T>> for Option<T> {
+ fn from(checked: Checked<T>) -> Option<T> {
+ checked.0.into()
+ }
+}
+
+#[cfg(feature = "serde")]
+impl<'de, T: Default + Deserialize<'de>> Deserialize<'de> for Checked<T> {
+ fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
+ where
+ D: Deserializer<'de>,
+ {
+ let value = Option::<T>::deserialize(deserializer)?;
+ let choice = Choice::from(value.is_some() as u8);
+ Ok(Self(CtOption::new(value.unwrap_or_default(), choice)))
+ }
+}
+
+#[cfg(feature = "serde")]
+impl<T: Copy + Serialize> Serialize for Checked<T> {
+ fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
+ where
+ S: Serializer,
+ {
+ Option::<T>::from(self.0).serialize(serializer)
+ }
+}
+
+#[cfg(all(test, feature = "serde"))]
+#[allow(clippy::unwrap_used)]
+mod tests {
+
+ use crate::{Checked, U64};
+ use subtle::{Choice, ConstantTimeEq, CtOption};
+
+ #[test]
+ fn serde() {
+ let test = Checked::new(U64::from_u64(0x0011223344556677));
+
+ let serialized = bincode::serialize(&test).unwrap();
+ let deserialized: Checked<U64> = bincode::deserialize(&serialized).unwrap();
+
+ assert!(bool::from(test.ct_eq(&deserialized)));
+
+ let test = Checked::new(U64::ZERO) - Checked::new(U64::ONE);
+ assert!(bool::from(
+ test.ct_eq(&Checked(CtOption::new(U64::ZERO, Choice::from(0))))
+ ));
+
+ let serialized = bincode::serialize(&test).unwrap();
+ let deserialized: Checked<U64> = bincode::deserialize(&serialized).unwrap();
+
+ assert!(bool::from(test.ct_eq(&deserialized)));
+ }
+
+ #[test]
+ fn serde_owned() {
+ let test = Checked::new(U64::from_u64(0x0011223344556677));
+
+ let serialized = bincode::serialize(&test).unwrap();
+ let deserialized: Checked<U64> = bincode::deserialize_from(serialized.as_slice()).unwrap();
+
+ assert!(bool::from(test.ct_eq(&deserialized)));
+
+ let test = Checked::new(U64::ZERO) - Checked::new(U64::ONE);
+ assert!(bool::from(
+ test.ct_eq(&Checked(CtOption::new(U64::ZERO, Choice::from(0))))
+ ));
+
+ let serialized = bincode::serialize(&test).unwrap();
+ let deserialized: Checked<U64> = bincode::deserialize_from(serialized.as_slice()).unwrap();
+
+ assert!(bool::from(test.ct_eq(&deserialized)));
+ }
+}
diff --git a/vendor/crypto-bigint/src/ct_choice.rs b/vendor/crypto-bigint/src/ct_choice.rs
new file mode 100644
index 0000000..921e72d
--- /dev/null
+++ b/vendor/crypto-bigint/src/ct_choice.rs
@@ -0,0 +1,104 @@
+use subtle::Choice;
+
+use crate::Word;
+
+/// A boolean value returned by constant-time `const fn`s.
+// TODO: should be replaced by `subtle::Choice` or `CtOption`
+// when `subtle` starts supporting const fns.
+#[derive(Debug, Copy, Clone)]
+pub struct CtChoice(Word);
+
+impl CtChoice {
+ /// The falsy value.
+ pub const FALSE: Self = Self(0);
+
+ /// The truthy value.
+ pub const TRUE: Self = Self(Word::MAX);
+
+ /// Returns the truthy value if `value == Word::MAX`, and the falsy value if `value == 0`.
+ /// Panics for other values.
+ pub(crate) const fn from_mask(value: Word) -> Self {
+ debug_assert!(value == Self::FALSE.0 || value == Self::TRUE.0);
+ Self(value)
+ }
+
+ /// Returns the truthy value if `value == 1`, and the falsy value if `value == 0`.
+ /// Panics for other values.
+ pub(crate) const fn from_lsb(value: Word) -> Self {
+ debug_assert!(value == 0 || value == 1);
+ Self(value.wrapping_neg())
+ }
+
+ /// Returns the truthy value if `value != 0`, and the falsy value otherwise.
+ pub(crate) const fn from_usize_being_nonzero(value: usize) -> Self {
+ const HI_BIT: u32 = usize::BITS - 1;
+ Self::from_lsb(((value | value.wrapping_neg()) >> HI_BIT) as Word)
+ }
+
+ /// Returns the truthy value if `x == y`, and the falsy value otherwise.
+ pub(crate) const fn from_usize_equality(x: usize, y: usize) -> Self {
+ Self::from_usize_being_nonzero(x.wrapping_sub(y)).not()
+ }
+
+ /// Returns the truthy value if `x < y`, and the falsy value otherwise.
+ pub(crate) const fn from_usize_lt(x: usize, y: usize) -> Self {
+ let bit = (((!x) & y) | (((!x) | y) & (x.wrapping_sub(y)))) >> (usize::BITS - 1);
+ Self::from_lsb(bit as Word)
+ }
+
+ pub(crate) const fn not(&self) -> Self {
+ Self(!self.0)
+ }
+
+ pub(crate) const fn or(&self, other: Self) -> Self {
+ Self(self.0 | other.0)
+ }
+
+ pub(crate) const fn and(&self, other: Self) -> Self {
+ Self(self.0 & other.0)
+ }
+
+ /// Return `b` if `self` is truthy, otherwise return `a`.
+ pub(crate) const fn select(&self, a: Word, b: Word) -> Word {
+ a ^ (self.0 & (a ^ b))
+ }
+
+ /// Return `x` if `self` is truthy, otherwise return 0.
+ pub(crate) const fn if_true(&self, x: Word) -> Word {
+ x & self.0
+ }
+
+ pub(crate) const fn is_true_vartime(&self) -> bool {
+ self.0 == CtChoice::TRUE.0
+ }
+
+ pub(crate) const fn to_u8(self) -> u8 {
+ (self.0 as u8) & 1
+ }
+}
+
+impl From<CtChoice> for Choice {
+ fn from(choice: CtChoice) -> Self {
+ Choice::from(choice.to_u8())
+ }
+}
+
+impl From<CtChoice> for bool {
+ fn from(choice: CtChoice) -> Self {
+ choice.is_true_vartime()
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::CtChoice;
+ use crate::Word;
+
+ #[test]
+ fn select() {
+ let a: Word = 1;
+ let b: Word = 2;
+ assert_eq!(CtChoice::TRUE.select(a, b), b);
+ assert_eq!(CtChoice::FALSE.select(a, b), a);
+ }
+}
diff --git a/vendor/crypto-bigint/src/lib.rs b/vendor/crypto-bigint/src/lib.rs
new file mode 100644
index 0000000..ed7af4c
--- /dev/null
+++ b/vendor/crypto-bigint/src/lib.rs
@@ -0,0 +1,215 @@
+#![no_std]
+#![cfg_attr(docsrs, feature(doc_auto_cfg))]
+#![doc = include_str!("../README.md")]
+#![doc(
+ html_logo_url = "https://raw.githubusercontent.com/RustCrypto/meta/master/logo.svg",
+ html_favicon_url = "https://raw.githubusercontent.com/RustCrypto/meta/master/logo.svg"
+)]
+#![deny(unsafe_code)]
+#![warn(
+ clippy::mod_module_files,
+ clippy::unwrap_used,
+ missing_docs,
+ missing_debug_implementations,
+ missing_copy_implementations,
+ rust_2018_idioms,
+ trivial_casts,
+ trivial_numeric_casts,
+ unused_qualifications
+)]
+
+//! ## Usage
+//!
+//! This crate defines a [`Uint`] type which is const generic around an inner
+//! [`Limb`] array, where a [`Limb`] is a newtype for a word-sized integer.
+//! Thus large integers are represented as arrays of smaller integers which
+//! are sized appropriately for the CPU, giving us some assurances of how
+//! arithmetic operations over those smaller integers will behave.
+//!
+//! To obtain appropriately sized integers regardless of what a given CPU's
+//! word size happens to be, a number of portable type aliases are provided for
+//! integer sizes commonly used in cryptography, for example:
+//! [`U128`], [`U384`], [`U256`], [`U2048`], [`U3072`], [`U4096`].
+//!
+//! ### `const fn` usage
+//!
+//! The [`Uint`] type provides a number of `const fn` inherent methods which
+//! can be used for initializing and performing arithmetic on big integers in
+//! const contexts:
+//!
+//! ```
+//! use crypto_bigint::U256;
+//!
+//! // Parse a constant from a big endian hexadecimal string.
+//! pub const MODULUS: U256 =
+//! U256::from_be_hex("ffffffff00000000ffffffffffffffffbce6faada7179e84f3b9cac2fc632551");
+//!
+//! // Compute `MODULUS` shifted right by 1 at compile time
+//! pub const MODULUS_SHR1: U256 = MODULUS.shr_vartime(1);
+//! ```
+//!
+//! Note that large constant computations may accidentally trigger a the `const_eval_limit` of the compiler.
+//! The current way to deal with this problem is to either simplify this computation,
+//! or increase the compiler's limit (currently a nightly feature).
+//! One can completely remove the compiler's limit using:
+//! ```ignore
+//! #![feature(const_eval_limit)]
+//! #![const_eval_limit = "0"]
+//! ```
+//!
+//! ### Trait-based usage
+//!
+//! The [`Uint`] type itself does not implement the standard arithmetic traits
+//! such as [`Add`], [`Sub`], [`Mul`], and [`Div`].
+//!
+//! To use these traits you must first pick a wrapper type which determines
+//! overflow behavior: [`Wrapping`] or [`Checked`].
+//!
+//! #### Wrapping arithmetic
+//!
+//! ```
+//! use crypto_bigint::{U256, Wrapping};
+//!
+//! let a = Wrapping(U256::MAX);
+//! let b = Wrapping(U256::ONE);
+//! let c = a + b;
+//!
+//! // `MAX` + 1 wraps back around to zero
+//! assert_eq!(c.0, U256::ZERO);
+//! ```
+//!
+//! #### Checked arithmetic
+//!
+//! ```
+//! use crypto_bigint::{U256, Checked};
+//!
+//! let a = Checked::new(U256::ONE);
+//! let b = Checked::new(U256::from(2u8));
+//! let c = a + b;
+//! assert_eq!(c.0.unwrap(), U256::from(3u8))
+//! ```
+//!
+//! ### Modular arithmetic
+//!
+//! This library has initial support for modular arithmetic in the form of the
+//! [`AddMod`], [`SubMod`], [`NegMod`], and [`MulMod`] traits, as well as the
+//! support for the [`Rem`] trait when used with a [`NonZero`] operand.
+//!
+//! ```
+//! use crypto_bigint::{AddMod, U256};
+//!
+//! // mod 3
+//! let modulus = U256::from(3u8);
+//!
+//! // 1 + 1 mod 3 = 2
+//! let a = U256::ONE.add_mod(&U256::ONE, &modulus);
+//! assert_eq!(a, U256::from(2u8));
+//!
+//! // 2 + 1 mod 3 = 0
+//! let b = a.add_mod(&U256::ONE, &modulus);
+//! assert_eq!(b, U256::ZERO);
+//! ```
+//!
+//! It also supports modular arithmetic over constant moduli using `Residue`,
+//! and over moduli set at runtime using `DynResidue`.
+//! That includes modular exponentiation and multiplicative inverses.
+//! These features are described in the [`modular`] module.
+//!
+//! ### Random number generation
+//!
+//! When the `rand_core` or `rand` features of this crate are enabled, it's
+//! possible to generate random numbers using any CSRNG by using the
+//! [`Random`] trait:
+//!
+//! ```
+//! # #[cfg(feature = "rand")]
+//! # {
+//! use crypto_bigint::{Random, U256, rand_core::OsRng};
+//!
+//! let n = U256::random(&mut OsRng);
+//! # }
+//! ```
+//!
+//! #### Modular random number generation
+//!
+//! The [`RandomMod`] trait supports generating random numbers with a uniform
+//! distribution around a given [`NonZero`] modulus.
+//!
+//! ```
+//! # #[cfg(feature = "rand")]
+//! # {
+//! use crypto_bigint::{NonZero, RandomMod, U256, rand_core::OsRng};
+//!
+//! let modulus = NonZero::new(U256::from(3u8)).unwrap();
+//! let n = U256::random_mod(&mut OsRng, &modulus);
+//! # }
+//! ```
+//!
+//! [`Add`]: core::ops::Add
+//! [`Div`]: core::ops::Div
+//! [`Mul`]: core::ops::Mul
+//! [`Rem`]: core::ops::Rem
+//! [`Sub`]: core::ops::Sub
+
+#[cfg(feature = "alloc")]
+extern crate alloc;
+
+#[macro_use]
+mod nlimbs;
+
+#[cfg(feature = "generic-array")]
+mod array;
+#[cfg(feature = "alloc")]
+mod boxed;
+mod checked;
+mod ct_choice;
+mod limb;
+mod non_zero;
+mod traits;
+mod uint;
+mod wrapping;
+
+pub use crate::{
+ checked::Checked,
+ ct_choice::CtChoice,
+ limb::{Limb, WideWord, Word},
+ non_zero::NonZero,
+ traits::*,
+ uint::div_limb::Reciprocal,
+ uint::*,
+ wrapping::Wrapping,
+};
+pub use subtle;
+
+#[cfg(feature = "alloc")]
+pub use crate::boxed::uint::BoxedUint;
+
+#[cfg(feature = "generic-array")]
+pub use {
+ crate::array::{ArrayDecoding, ArrayEncoding, ByteArray},
+ generic_array::{self, typenum::consts},
+};
+
+#[cfg(feature = "rand_core")]
+pub use rand_core;
+
+#[cfg(feature = "rlp")]
+pub use rlp;
+
+#[cfg(feature = "zeroize")]
+pub use zeroize;
+
+/// Import prelude for this crate: includes important traits.
+pub mod prelude {
+ pub use crate::traits::*;
+
+ #[cfg(feature = "generic-array")]
+ pub use crate::array::{ArrayDecoding, ArrayEncoding};
+}
+
+#[cfg(sidefuzz)]
+#[no_mangle]
+pub extern "C" fn fuzz() {
+ let input = sidefuzz::fetch_input(32); // 32 bytes of of fuzzing input as a &[u8]
+ sidefuzz::black_box(my_hopefully_constant_fn(input));
+}
diff --git a/vendor/crypto-bigint/src/limb.rs b/vendor/crypto-bigint/src/limb.rs
new file mode 100644
index 0000000..a5ca957
--- /dev/null
+++ b/vendor/crypto-bigint/src/limb.rs
@@ -0,0 +1,176 @@
+//! Big integers are represented as an array of smaller CPU word-size integers
+//! called "limbs".
+
+mod add;
+mod bit_and;
+mod bit_not;
+mod bit_or;
+mod bit_xor;
+mod bits;
+mod cmp;
+mod encoding;
+mod from;
+mod mul;
+mod neg;
+mod shl;
+mod shr;
+mod sub;
+
+#[cfg(feature = "rand_core")]
+mod rand;
+
+use crate::{Bounded, Zero};
+use core::fmt;
+use subtle::{Choice, ConditionallySelectable};
+
+#[cfg(feature = "serde")]
+use serdect::serde::{Deserialize, Deserializer, Serialize, Serializer};
+
+#[cfg(not(any(target_pointer_width = "32", target_pointer_width = "64")))]
+compile_error!("this crate builds on 32-bit and 64-bit platforms only");
+
+//
+// 32-bit definitions
+//
+
+/// Inner integer type that the [`Limb`] newtype wraps.
+#[cfg(target_pointer_width = "32")]
+pub type Word = u32;
+
+/// Unsigned wide integer type: double the width of [`Word`].
+#[cfg(target_pointer_width = "32")]
+pub type WideWord = u64;
+
+//
+// 64-bit definitions
+//
+
+/// Unsigned integer type that the [`Limb`] newtype wraps.
+#[cfg(target_pointer_width = "64")]
+pub type Word = u64;
+
+/// Wide integer type: double the width of [`Word`].
+#[cfg(target_pointer_width = "64")]
+pub type WideWord = u128;
+
+/// Highest bit in a [`Limb`].
+pub(crate) const HI_BIT: usize = Limb::BITS - 1;
+
+/// Big integers are represented as an array of smaller CPU word-size integers
+/// called "limbs".
+// Our PartialEq impl only differs from the default one by being constant-time, so this is safe
+#[allow(clippy::derived_hash_with_manual_eq)]
+#[derive(Copy, Clone, Default, Hash)]
+#[repr(transparent)]
+pub struct Limb(pub Word);
+
+impl Limb {
+ /// The value `0`.
+ pub const ZERO: Self = Limb(0);
+
+ /// The value `1`.
+ pub const ONE: Self = Limb(1);
+
+ /// Maximum value this [`Limb`] can express.
+ pub const MAX: Self = Limb(Word::MAX);
+
+ // 32-bit
+
+ /// Size of the inner integer in bits.
+ #[cfg(target_pointer_width = "32")]
+ pub const BITS: usize = 32;
+ /// Size of the inner integer in bytes.
+ #[cfg(target_pointer_width = "32")]
+ pub const BYTES: usize = 4;
+
+ // 64-bit
+
+ /// Size of the inner integer in bits.
+ #[cfg(target_pointer_width = "64")]
+ pub const BITS: usize = 64;
+ /// Size of the inner integer in bytes.
+ #[cfg(target_pointer_width = "64")]
+ pub const BYTES: usize = 8;
+}
+
+impl Bounded for Limb {
+ const BITS: usize = Self::BITS;
+ const BYTES: usize = Self::BYTES;
+}
+
+impl ConditionallySelectable for Limb {
+ #[inline]
+ fn conditional_select(a: &Self, b: &Self, choice: Choice) -> Self {
+ Self(Word::conditional_select(&a.0, &b.0, choice))
+ }
+}
+
+impl Zero for Limb {
+ const ZERO: Self = Self::ZERO;
+}
+
+impl fmt::Debug for Limb {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "Limb(0x{self:X})")
+ }
+}
+
+impl fmt::Display for Limb {
+ #[inline]
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::UpperHex::fmt(self, f)
+ }
+}
+
+impl fmt::LowerHex for Limb {
+ #[inline]
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "{:0width$x}", &self.0, width = Self::BYTES * 2)
+ }
+}
+
+impl fmt::UpperHex for Limb {
+ #[inline]
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "{:0width$X}", &self.0, width = Self::BYTES * 2)
+ }
+}
+
+#[cfg(feature = "serde")]
+impl<'de> Deserialize<'de> for Limb {
+ fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
+ where
+ D: Deserializer<'de>,
+ {
+ Ok(Self(Word::deserialize(deserializer)?))
+ }
+}
+
+#[cfg(feature = "serde")]
+impl Serialize for Limb {
+ fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
+ where
+ S: Serializer,
+ {
+ self.0.serialize(serializer)
+ }
+}
+
+#[cfg(feature = "zeroize")]
+impl zeroize::DefaultIsZeroes for Limb {}
+
+#[cfg(test)]
+mod tests {
+ #[cfg(feature = "alloc")]
+ use {super::Limb, alloc::format};
+
+ #[cfg(feature = "alloc")]
+ #[test]
+ fn debug() {
+ #[cfg(target_pointer_width = "32")]
+ assert_eq!(format!("{:?}", Limb(42)), "Limb(0x0000002A)");
+
+ #[cfg(target_pointer_width = "64")]
+ assert_eq!(format!("{:?}", Limb(42)), "Limb(0x000000000000002A)");
+ }
+}
diff --git a/vendor/crypto-bigint/src/limb/add.rs b/vendor/crypto-bigint/src/limb/add.rs
new file mode 100644
index 0000000..0ef793b
--- /dev/null
+++ b/vendor/crypto-bigint/src/limb/add.rs
@@ -0,0 +1,180 @@
+//! Limb addition
+
+use crate::{Checked, CheckedAdd, Limb, WideWord, Word, Wrapping, Zero};
+use core::ops::{Add, AddAssign};
+use subtle::CtOption;
+
+impl Limb {
+ /// Computes `self + rhs + carry`, returning the result along with the new carry.
+ #[inline(always)]
+ pub const fn adc(self, rhs: Limb, carry: Limb) -> (Limb, Limb) {
+ let a = self.0 as WideWord;
+ let b = rhs.0 as WideWord;
+ let carry = carry.0 as WideWord;
+ let ret = a + b + carry;
+ (Limb(ret as Word), Limb((ret >> Self::BITS) as Word))
+ }
+
+ /// Perform saturating addition.
+ #[inline]
+ pub const fn saturating_add(&self, rhs: Self) -> Self {
+ Limb(self.0.saturating_add(rhs.0))
+ }
+
+ /// Perform wrapping addition, discarding overflow.
+ #[inline(always)]
+ pub const fn wrapping_add(&self, rhs: Self) -> Self {
+ Limb(self.0.wrapping_add(rhs.0))
+ }
+}
+
+impl CheckedAdd for Limb {
+ type Output = Self;
+
+ #[inline]
+ fn checked_add(&self, rhs: Self) -> CtOption<Self> {
+ let (result, carry) = self.adc(rhs, Limb::ZERO);
+ CtOption::new(result, carry.is_zero())
+ }
+}
+
+impl Add for Wrapping<Limb> {
+ type Output = Self;
+
+ fn add(self, rhs: Self) -> Wrapping<Limb> {
+ Wrapping(self.0.wrapping_add(rhs.0))
+ }
+}
+
+impl Add<&Wrapping<Limb>> for Wrapping<Limb> {
+ type Output = Wrapping<Limb>;
+
+ fn add(self, rhs: &Wrapping<Limb>) -> Wrapping<Limb> {
+ Wrapping(self.0.wrapping_add(rhs.0))
+ }
+}
+
+impl Add<Wrapping<Limb>> for &Wrapping<Limb> {
+ type Output = Wrapping<Limb>;
+
+ fn add(self, rhs: Wrapping<Limb>) -> Wrapping<Limb> {
+ Wrapping(self.0.wrapping_add(rhs.0))
+ }
+}
+
+impl Add<&Wrapping<Limb>> for &Wrapping<Limb> {
+ type Output = Wrapping<Limb>;
+
+ fn add(self, rhs: &Wrapping<Limb>) -> Wrapping<Limb> {
+ Wrapping(self.0.wrapping_add(rhs.0))
+ }
+}
+
+impl AddAssign for Wrapping<Limb> {
+ fn add_assign(&mut self, other: Self) {
+ *self = *self + other;
+ }
+}
+
+impl AddAssign<&Wrapping<Limb>> for Wrapping<Limb> {
+ fn add_assign(&mut self, other: &Self) {
+ *self = *self + other;
+ }
+}
+
+impl Add for Checked<Limb> {
+ type Output = Self;
+
+ fn add(self, rhs: Self) -> Checked<Limb> {
+ Checked(
+ self.0
+ .and_then(|lhs| rhs.0.and_then(|rhs| lhs.checked_add(rhs))),
+ )
+ }
+}
+
+impl Add<&Checked<Limb>> for Checked<Limb> {
+ type Output = Checked<Limb>;
+
+ fn add(self, rhs: &Checked<Limb>) -> Checked<Limb> {
+ Checked(
+ self.0
+ .and_then(|lhs| rhs.0.and_then(|rhs| lhs.checked_add(rhs))),
+ )
+ }
+}
+
+impl Add<Checked<Limb>> for &Checked<Limb> {
+ type Output = Checked<Limb>;
+
+ fn add(self, rhs: Checked<Limb>) -> Checked<Limb> {
+ Checked(
+ self.0
+ .and_then(|lhs| rhs.0.and_then(|rhs| lhs.checked_add(rhs))),
+ )
+ }
+}
+
+impl Add<&Checked<Limb>> for &Checked<Limb> {
+ type Output = Checked<Limb>;
+
+ fn add(self, rhs: &Checked<Limb>) -> Checked<Limb> {
+ Checked(
+ self.0
+ .and_then(|lhs| rhs.0.and_then(|rhs| lhs.checked_add(rhs))),
+ )
+ }
+}
+
+impl AddAssign for Checked<Limb> {
+ fn add_assign(&mut self, other: Self) {
+ *self = *self + other;
+ }
+}
+
+impl AddAssign<&Checked<Limb>> for Checked<Limb> {
+ fn add_assign(&mut self, other: &Self) {
+ *self = *self + other;
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use crate::{CheckedAdd, Limb};
+
+ #[test]
+ fn adc_no_carry() {
+ let (res, carry) = Limb::ZERO.adc(Limb::ONE, Limb::ZERO);
+ assert_eq!(res, Limb::ONE);
+ assert_eq!(carry, Limb::ZERO);
+ }
+
+ #[test]
+ fn adc_with_carry() {
+ let (res, carry) = Limb::MAX.adc(Limb::ONE, Limb::ZERO);
+ assert_eq!(res, Limb::ZERO);
+ assert_eq!(carry, Limb::ONE);
+ }
+
+ #[test]
+ fn wrapping_add_no_carry() {
+ assert_eq!(Limb::ZERO.wrapping_add(Limb::ONE), Limb::ONE);
+ }
+
+ #[test]
+ fn wrapping_add_with_carry() {
+ assert_eq!(Limb::MAX.wrapping_add(Limb::ONE), Limb::ZERO);
+ }
+
+ #[test]
+ fn checked_add_ok() {
+ let result = Limb::ZERO.checked_add(Limb::ONE);
+ assert_eq!(result.unwrap(), Limb::ONE);
+ }
+
+ #[test]
+ fn checked_add_overflow() {
+ let result = Limb::MAX.checked_add(Limb::ONE);
+ assert!(!bool::from(result.is_some()));
+ }
+}
diff --git a/vendor/crypto-bigint/src/limb/bit_and.rs b/vendor/crypto-bigint/src/limb/bit_and.rs
new file mode 100644
index 0000000..3f0bfba
--- /dev/null
+++ b/vendor/crypto-bigint/src/limb/bit_and.rs
@@ -0,0 +1,21 @@
+//! Limb bit and operations.
+
+use super::Limb;
+use core::ops::BitAnd;
+
+impl Limb {
+ /// Calculates `a & b`.
+ #[inline(always)]
+ pub const fn bitand(self, rhs: Self) -> Self {
+ Limb(self.0 & rhs.0)
+ }
+}
+
+impl BitAnd for Limb {
+ type Output = Limb;
+
+ #[inline(always)]
+ fn bitand(self, rhs: Self) -> Self::Output {
+ self.bitand(rhs)
+ }
+}
diff --git a/vendor/crypto-bigint/src/limb/bit_not.rs b/vendor/crypto-bigint/src/limb/bit_not.rs
new file mode 100644
index 0000000..26676d5
--- /dev/null
+++ b/vendor/crypto-bigint/src/limb/bit_not.rs
@@ -0,0 +1,19 @@
+//! Limb bit not operations.
+
+use super::Limb;
+use core::ops::Not;
+
+impl Limb {
+ /// Calculates `!a`.
+ pub const fn not(self) -> Self {
+ Limb(!self.0)
+ }
+}
+
+impl Not for Limb {
+ type Output = Limb;
+
+ fn not(self) -> <Self as Not>::Output {
+ self.not()
+ }
+}
diff --git a/vendor/crypto-bigint/src/limb/bit_or.rs b/vendor/crypto-bigint/src/limb/bit_or.rs
new file mode 100644
index 0000000..cafac18
--- /dev/null
+++ b/vendor/crypto-bigint/src/limb/bit_or.rs
@@ -0,0 +1,19 @@
+//! Limb bit or operations.
+
+use super::Limb;
+use core::ops::BitOr;
+
+impl Limb {
+ /// Calculates `a | b`.
+ pub const fn bitor(self, rhs: Self) -> Self {
+ Limb(self.0 | rhs.0)
+ }
+}
+
+impl BitOr for Limb {
+ type Output = Limb;
+
+ fn bitor(self, rhs: Self) -> Self::Output {
+ self.bitor(rhs)
+ }
+}
diff --git a/vendor/crypto-bigint/src/limb/bit_xor.rs b/vendor/crypto-bigint/src/limb/bit_xor.rs
new file mode 100644
index 0000000..a507822
--- /dev/null
+++ b/vendor/crypto-bigint/src/limb/bit_xor.rs
@@ -0,0 +1,19 @@
+//! Limb bit xor operations.
+
+use super::Limb;
+use core::ops::BitXor;
+
+impl Limb {
+ /// Calculates `a ^ b`.
+ pub const fn bitxor(self, rhs: Self) -> Self {
+ Limb(self.0 ^ rhs.0)
+ }
+}
+
+impl BitXor for Limb {
+ type Output = Limb;
+
+ fn bitxor(self, rhs: Self) -> Self::Output {
+ self.bitxor(rhs)
+ }
+}
diff --git a/vendor/crypto-bigint/src/limb/bits.rs b/vendor/crypto-bigint/src/limb/bits.rs
new file mode 100644
index 0000000..02a74de
--- /dev/null
+++ b/vendor/crypto-bigint/src/limb/bits.rs
@@ -0,0 +1,18 @@
+use super::Limb;
+
+impl Limb {
+ /// Calculate the number of bits needed to represent this number.
+ pub const fn bits(self) -> usize {
+ Limb::BITS - (self.0.leading_zeros() as usize)
+ }
+
+ /// Calculate the number of leading zeros in the binary representation of this number.
+ pub const fn leading_zeros(self) -> usize {
+ self.0.leading_zeros() as usize
+ }
+
+ /// Calculate the number of trailing zeros in the binary representation of this number.
+ pub const fn trailing_zeros(self) -> usize {
+ self.0.trailing_zeros() as usize
+ }
+}
diff --git a/vendor/crypto-bigint/src/limb/cmp.rs b/vendor/crypto-bigint/src/limb/cmp.rs
new file mode 100644
index 0000000..4cdec5b
--- /dev/null
+++ b/vendor/crypto-bigint/src/limb/cmp.rs
@@ -0,0 +1,200 @@
+//! Limb comparisons
+
+use super::HI_BIT;
+use crate::{CtChoice, Limb};
+use core::cmp::Ordering;
+use subtle::{Choice, ConstantTimeEq, ConstantTimeGreater, ConstantTimeLess};
+
+impl Limb {
+ /// Is this limb an odd number?
+ #[inline]
+ pub fn is_odd(&self) -> Choice {
+ Choice::from(self.0 as u8 & 1)
+ }
+
+ /// Perform a comparison of the inner value in variable-time.
+ ///
+ /// Note that the [`PartialOrd`] and [`Ord`] impls wrap constant-time
+ /// comparisons using the `subtle` crate.
+ pub fn cmp_vartime(&self, other: &Self) -> Ordering {
+ self.0.cmp(&other.0)
+ }
+
+ /// Performs an equality check in variable-time.
+ pub const fn eq_vartime(&self, other: &Self) -> bool {
+ self.0 == other.0
+ }
+
+ /// Return `b` if `c` is truthy, otherwise return `a`.
+ #[inline]
+ pub(crate) const fn ct_select(a: Self, b: Self, c: CtChoice) -> Self {
+ Self(c.select(a.0, b.0))
+ }
+
+ /// Returns the truthy value if `self != 0` and the falsy value otherwise.
+ #[inline]
+ pub(crate) const fn ct_is_nonzero(&self) -> CtChoice {
+ let inner = self.0;
+ CtChoice::from_lsb((inner | inner.wrapping_neg()) >> HI_BIT)
+ }
+
+ /// Returns the truthy value if `lhs == rhs` and the falsy value otherwise.
+ #[inline]
+ pub(crate) const fn ct_eq(lhs: Self, rhs: Self) -> CtChoice {
+ let x = lhs.0;
+ let y = rhs.0;
+
+ // x ^ y == 0 if and only if x == y
+ Self(x ^ y).ct_is_nonzero().not()
+ }
+
+ /// Returns the truthy value if `lhs < rhs` and the falsy value otherwise.
+ #[inline]
+ pub(crate) const fn ct_lt(lhs: Self, rhs: Self) -> CtChoice {
+ let x = lhs.0;
+ let y = rhs.0;
+ let bit = (((!x) & y) | (((!x) | y) & (x.wrapping_sub(y)))) >> (Limb::BITS - 1);
+ CtChoice::from_lsb(bit)
+ }
+
+ /// Returns the truthy value if `lhs <= rhs` and the falsy value otherwise.
+ #[inline]
+ pub(crate) const fn ct_le(lhs: Self, rhs: Self) -> CtChoice {
+ let x = lhs.0;
+ let y = rhs.0;
+ let bit = (((!x) | y) & ((x ^ y) | !(y.wrapping_sub(x)))) >> (Limb::BITS - 1);
+ CtChoice::from_lsb(bit)
+ }
+}
+
+impl ConstantTimeEq for Limb {
+ #[inline]
+ fn ct_eq(&self, other: &Self) -> Choice {
+ self.0.ct_eq(&other.0)
+ }
+}
+
+impl ConstantTimeGreater for Limb {
+ #[inline]
+ fn ct_gt(&self, other: &Self) -> Choice {
+ self.0.ct_gt(&other.0)
+ }
+}
+
+impl ConstantTimeLess for Limb {
+ #[inline]
+ fn ct_lt(&self, other: &Self) -> Choice {
+ self.0.ct_lt(&other.0)
+ }
+}
+
+impl Eq for Limb {}
+
+impl Ord for Limb {
+ fn cmp(&self, other: &Self) -> Ordering {
+ let mut n = 0i8;
+ n -= self.ct_lt(other).unwrap_u8() as i8;
+ n += self.ct_gt(other).unwrap_u8() as i8;
+
+ match n {
+ -1 => Ordering::Less,
+ 1 => Ordering::Greater,
+ _ => {
+ debug_assert_eq!(n, 0);
+ debug_assert!(bool::from(self.ct_eq(other)));
+ Ordering::Equal
+ }
+ }
+ }
+}
+
+impl PartialOrd for Limb {
+ #[inline]
+ fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
+ Some(self.cmp(other))
+ }
+}
+
+impl PartialEq for Limb {
+ #[inline]
+ fn eq(&self, other: &Self) -> bool {
+ self.ct_eq(other).into()
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use crate::{Limb, Zero};
+ use core::cmp::Ordering;
+ use subtle::{ConstantTimeEq, ConstantTimeGreater, ConstantTimeLess};
+
+ #[test]
+ fn is_zero() {
+ assert!(bool::from(Limb::ZERO.is_zero()));
+ assert!(!bool::from(Limb::ONE.is_zero()));
+ assert!(!bool::from(Limb::MAX.is_zero()));
+ }
+
+ #[test]
+ fn is_odd() {
+ assert!(!bool::from(Limb::ZERO.is_odd()));
+ assert!(bool::from(Limb::ONE.is_odd()));
+ assert!(bool::from(Limb::MAX.is_odd()));
+ }
+
+ #[test]
+ fn ct_eq() {
+ let a = Limb::ZERO;
+ let b = Limb::MAX;
+
+ assert!(bool::from(a.ct_eq(&a)));
+ assert!(!bool::from(a.ct_eq(&b)));
+ assert!(!bool::from(b.ct_eq(&a)));
+ assert!(bool::from(b.ct_eq(&b)));
+ }
+
+ #[test]
+ fn ct_gt() {
+ let a = Limb::ZERO;
+ let b = Limb::ONE;
+ let c = Limb::MAX;
+
+ assert!(bool::from(b.ct_gt(&a)));
+ assert!(bool::from(c.ct_gt(&a)));
+ assert!(bool::from(c.ct_gt(&b)));
+
+ assert!(!bool::from(a.ct_gt(&a)));
+ assert!(!bool::from(b.ct_gt(&b)));
+ assert!(!bool::from(c.ct_gt(&c)));
+
+ assert!(!bool::from(a.ct_gt(&b)));
+ assert!(!bool::from(a.ct_gt(&c)));
+ assert!(!bool::from(b.ct_gt(&c)));
+ }
+
+ #[test]
+ fn ct_lt() {
+ let a = Limb::ZERO;
+ let b = Limb::ONE;
+ let c = Limb::MAX;
+
+ assert!(bool::from(a.ct_lt(&b)));
+ assert!(bool::from(a.ct_lt(&c)));
+ assert!(bool::from(b.ct_lt(&c)));
+
+ assert!(!bool::from(a.ct_lt(&a)));
+ assert!(!bool::from(b.ct_lt(&b)));
+ assert!(!bool::from(c.ct_lt(&c)));
+
+ assert!(!bool::from(b.ct_lt(&a)));
+ assert!(!bool::from(c.ct_lt(&a)));
+ assert!(!bool::from(c.ct_lt(&b)));
+ }
+
+ #[test]
+ fn cmp() {
+ assert_eq!(Limb::ZERO.cmp(&Limb::ONE), Ordering::Less);
+ assert_eq!(Limb::ONE.cmp(&Limb::ONE), Ordering::Equal);
+ assert_eq!(Limb::MAX.cmp(&Limb::ONE), Ordering::Greater);
+ }
+}
diff --git a/vendor/crypto-bigint/src/limb/encoding.rs b/vendor/crypto-bigint/src/limb/encoding.rs
new file mode 100644
index 0000000..ab28a6a
--- /dev/null
+++ b/vendor/crypto-bigint/src/limb/encoding.rs
@@ -0,0 +1,64 @@
+//! Limb encoding
+
+use super::{Limb, Word};
+use crate::Encoding;
+
+impl Encoding for Limb {
+ #[cfg(target_pointer_width = "32")]
+ type Repr = [u8; 4];
+ #[cfg(target_pointer_width = "64")]
+ type Repr = [u8; 8];
+
+ #[inline]
+ fn from_be_bytes(bytes: Self::Repr) -> Self {
+ Limb(Word::from_be_bytes(bytes))
+ }
+
+ #[inline]
+ fn from_le_bytes(bytes: Self::Repr) -> Self {
+ Limb(Word::from_le_bytes(bytes))
+ }
+
+ #[inline]
+ fn to_be_bytes(&self) -> Self::Repr {
+ self.0.to_be_bytes()
+ }
+
+ #[inline]
+ fn to_le_bytes(&self) -> Self::Repr {
+ self.0.to_le_bytes()
+ }
+}
+
+#[cfg(test)]
+mod test {
+ use super::*;
+ use proptest::prelude::*;
+
+ prop_compose! {
+ fn limb()(inner in any::<Word>()) -> Limb {
+ Limb(inner)
+ }
+ }
+
+ proptest! {
+ #[test]
+ fn roundtrip(a in limb()) {
+ assert_eq!(a, Limb::from_be_bytes(a.to_be_bytes()));
+ assert_eq!(a, Limb::from_le_bytes(a.to_le_bytes()));
+ }
+ }
+
+ proptest! {
+ #[test]
+ fn reverse(a in limb()) {
+ let mut bytes = a.to_be_bytes();
+ bytes.reverse();
+ assert_eq!(a, Limb::from_le_bytes(bytes));
+
+ let mut bytes = a.to_le_bytes();
+ bytes.reverse();
+ assert_eq!(a, Limb::from_be_bytes(bytes));
+ }
+ }
+}
diff --git a/vendor/crypto-bigint/src/limb/from.rs b/vendor/crypto-bigint/src/limb/from.rs
new file mode 100644
index 0000000..aa64992
--- /dev/null
+++ b/vendor/crypto-bigint/src/limb/from.rs
@@ -0,0 +1,74 @@
+//! `From`-like conversions for [`Limb`].
+
+use super::{Limb, WideWord, Word};
+
+impl Limb {
+ /// Create a [`Limb`] from a `u8` integer (const-friendly)
+ // TODO(tarcieri): replace with `const impl From<u8>` when stable
+ pub const fn from_u8(n: u8) -> Self {
+ Limb(n as Word)
+ }
+
+ /// Create a [`Limb`] from a `u16` integer (const-friendly)
+ // TODO(tarcieri): replace with `const impl From<u16>` when stable
+ pub const fn from_u16(n: u16) -> Self {
+ Limb(n as Word)
+ }
+
+ /// Create a [`Limb`] from a `u32` integer (const-friendly)
+ // TODO(tarcieri): replace with `const impl From<u32>` when stable
+ pub const fn from_u32(n: u32) -> Self {
+ #[allow(trivial_numeric_casts)]
+ Limb(n as Word)
+ }
+
+ /// Create a [`Limb`] from a `u64` integer (const-friendly)
+ // TODO(tarcieri): replace with `const impl From<u64>` when stable
+ #[cfg(target_pointer_width = "64")]
+ pub const fn from_u64(n: u64) -> Self {
+ Limb(n)
+ }
+}
+
+impl From<u8> for Limb {
+ #[inline]
+ fn from(n: u8) -> Limb {
+ Limb(n.into())
+ }
+}
+
+impl From<u16> for Limb {
+ #[inline]
+ fn from(n: u16) -> Limb {
+ Limb(n.into())
+ }
+}
+
+impl From<u32> for Limb {
+ #[inline]
+ fn from(n: u32) -> Limb {
+ Limb(n.into())
+ }
+}
+
+#[cfg(target_pointer_width = "64")]
+impl From<u64> for Limb {
+ #[inline]
+ fn from(n: u64) -> Limb {
+ Limb(n)
+ }
+}
+
+impl From<Limb> for Word {
+ #[inline]
+ fn from(limb: Limb) -> Word {
+ limb.0
+ }
+}
+
+impl From<Limb> for WideWord {
+ #[inline]
+ fn from(limb: Limb) -> WideWord {
+ limb.0.into()
+ }
+}
diff --git a/vendor/crypto-bigint/src/limb/mul.rs b/vendor/crypto-bigint/src/limb/mul.rs
new file mode 100644
index 0000000..7f8b084
--- /dev/null
+++ b/vendor/crypto-bigint/src/limb/mul.rs
@@ -0,0 +1,195 @@
+//! Limb multiplication
+
+use crate::{Checked, CheckedMul, Limb, WideWord, Word, Wrapping, Zero};
+use core::ops::{Mul, MulAssign};
+use subtle::CtOption;
+
+impl Limb {
+ /// Computes `self + (b * c) + carry`, returning the result along with the new carry.
+ #[inline(always)]
+ pub const fn mac(self, b: Limb, c: Limb, carry: Limb) -> (Limb, Limb) {
+ let a = self.0 as WideWord;
+ let b = b.0 as WideWord;
+ let c = c.0 as WideWord;
+ let carry = carry.0 as WideWord;
+ let ret = a + (b * c) + carry;
+ (Limb(ret as Word), Limb((ret >> Self::BITS) as Word))
+ }
+
+ /// Perform saturating multiplication.
+ #[inline]
+ pub const fn saturating_mul(&self, rhs: Self) -> Self {
+ Limb(self.0.saturating_mul(rhs.0))
+ }
+
+ /// Perform wrapping multiplication, discarding overflow.
+ #[inline(always)]
+ pub const fn wrapping_mul(&self, rhs: Self) -> Self {
+ Limb(self.0.wrapping_mul(rhs.0))
+ }
+
+ /// Compute "wide" multiplication, with a product twice the size of the input.
+ pub(crate) const fn mul_wide(&self, rhs: Self) -> WideWord {
+ (self.0 as WideWord) * (rhs.0 as WideWord)
+ }
+}
+
+impl CheckedMul for Limb {
+ type Output = Self;
+
+ #[inline]
+ fn checked_mul(&self, rhs: Self) -> CtOption<Self> {
+ let result = self.mul_wide(rhs);
+ let overflow = Limb((result >> Self::BITS) as Word);
+ CtOption::new(Limb(result as Word), overflow.is_zero())
+ }
+}
+
+impl Mul for Wrapping<Limb> {
+ type Output = Self;
+
+ fn mul(self, rhs: Self) -> Wrapping<Limb> {
+ Wrapping(self.0.wrapping_mul(rhs.0))
+ }
+}
+
+impl Mul<&Wrapping<Limb>> for Wrapping<Limb> {
+ type Output = Wrapping<Limb>;
+
+ fn mul(self, rhs: &Wrapping<Limb>) -> Wrapping<Limb> {
+ Wrapping(self.0.wrapping_mul(rhs.0))
+ }
+}
+
+impl Mul<Wrapping<Limb>> for &Wrapping<Limb> {
+ type Output = Wrapping<Limb>;
+
+ fn mul(self, rhs: Wrapping<Limb>) -> Wrapping<Limb> {
+ Wrapping(self.0.wrapping_mul(rhs.0))
+ }
+}
+
+impl Mul<&Wrapping<Limb>> for &Wrapping<Limb> {
+ type Output = Wrapping<Limb>;
+
+ fn mul(self, rhs: &Wrapping<Limb>) -> Wrapping<Limb> {
+ Wrapping(self.0.wrapping_mul(rhs.0))
+ }
+}
+
+impl MulAssign for Wrapping<Limb> {
+ fn mul_assign(&mut self, other: Self) {
+ *self = *self * other;
+ }
+}
+
+impl MulAssign<&Wrapping<Limb>> for Wrapping<Limb> {
+ fn mul_assign(&mut self, other: &Self) {
+ *self = *self * other;
+ }
+}
+
+impl Mul for Checked<Limb> {
+ type Output = Self;
+
+ fn mul(self, rhs: Self) -> Checked<Limb> {
+ Checked(
+ self.0
+ .and_then(|lhs| rhs.0.and_then(|rhs| lhs.checked_mul(rhs))),
+ )
+ }
+}
+
+impl Mul<&Checked<Limb>> for Checked<Limb> {
+ type Output = Checked<Limb>;
+
+ fn mul(self, rhs: &Checked<Limb>) -> Checked<Limb> {
+ Checked(
+ self.0
+ .and_then(|lhs| rhs.0.and_then(|rhs| lhs.checked_mul(rhs))),
+ )
+ }
+}
+
+impl Mul<Checked<Limb>> for &Checked<Limb> {
+ type Output = Checked<Limb>;
+
+ fn mul(self, rhs: Checked<Limb>) -> Checked<Limb> {
+ Checked(
+ self.0
+ .and_then(|lhs| rhs.0.and_then(|rhs| lhs.checked_mul(rhs))),
+ )
+ }
+}
+
+impl Mul<&Checked<Limb>> for &Checked<Limb> {
+ type Output = Checked<Limb>;
+
+ fn mul(self, rhs: &Checked<Limb>) -> Checked<Limb> {
+ Checked(
+ self.0
+ .and_then(|lhs| rhs.0.and_then(|rhs| lhs.checked_mul(rhs))),
+ )
+ }
+}
+
+impl MulAssign for Checked<Limb> {
+ fn mul_assign(&mut self, other: Self) {
+ *self = *self * other;
+ }
+}
+
+impl MulAssign<&Checked<Limb>> for Checked<Limb> {
+ fn mul_assign(&mut self, other: &Self) {
+ *self = *self * other;
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::{CheckedMul, Limb, WideWord};
+
+ #[test]
+ fn mul_wide_zero_and_one() {
+ assert_eq!(Limb::ZERO.mul_wide(Limb::ZERO), 0);
+ assert_eq!(Limb::ZERO.mul_wide(Limb::ONE), 0);
+ assert_eq!(Limb::ONE.mul_wide(Limb::ZERO), 0);
+ assert_eq!(Limb::ONE.mul_wide(Limb::ONE), 1);
+ }
+
+ #[test]
+ fn mul_wide() {
+ let primes: &[u32] = &[3, 5, 17, 257, 65537];
+
+ for &a_int in primes {
+ for &b_int in primes {
+ let actual = Limb::from_u32(a_int).mul_wide(Limb::from_u32(b_int));
+ let expected = a_int as WideWord * b_int as WideWord;
+ assert_eq!(actual, expected);
+ }
+ }
+ }
+
+ #[test]
+ #[cfg(target_pointer_width = "32")]
+ fn checked_mul_ok() {
+ let n = Limb::from_u16(0xffff);
+ assert_eq!(n.checked_mul(n).unwrap(), Limb::from_u32(0xfffe_0001));
+ }
+
+ #[test]
+ #[cfg(target_pointer_width = "64")]
+ fn checked_mul_ok() {
+ let n = Limb::from_u32(0xffff_ffff);
+ assert_eq!(
+ n.checked_mul(n).unwrap(),
+ Limb::from_u64(0xffff_fffe_0000_0001)
+ );
+ }
+
+ #[test]
+ fn checked_mul_overflow() {
+ let n = Limb::MAX;
+ assert!(bool::from(n.checked_mul(n).is_none()));
+ }
+}
diff --git a/vendor/crypto-bigint/src/limb/neg.rs b/vendor/crypto-bigint/src/limb/neg.rs
new file mode 100644
index 0000000..b658bb9
--- /dev/null
+++ b/vendor/crypto-bigint/src/limb/neg.rs
@@ -0,0 +1,20 @@
+//! Limb negation
+
+use crate::{Limb, Wrapping};
+use core::ops::Neg;
+
+impl Neg for Wrapping<Limb> {
+ type Output = Self;
+
+ fn neg(self) -> Self::Output {
+ Self(self.0.wrapping_neg())
+ }
+}
+
+impl Limb {
+ /// Perform wrapping negation.
+ #[inline(always)]
+ pub const fn wrapping_neg(self) -> Self {
+ Limb(self.0.wrapping_neg())
+ }
+}
diff --git a/vendor/crypto-bigint/src/limb/rand.rs b/vendor/crypto-bigint/src/limb/rand.rs
new file mode 100644
index 0000000..4347168
--- /dev/null
+++ b/vendor/crypto-bigint/src/limb/rand.rs
@@ -0,0 +1,38 @@
+//! Random number generator support
+
+use super::Limb;
+use crate::{Encoding, NonZero, Random, RandomMod};
+use rand_core::CryptoRngCore;
+use subtle::ConstantTimeLess;
+
+impl Random for Limb {
+ #[cfg(target_pointer_width = "32")]
+ fn random(rng: &mut impl CryptoRngCore) -> Self {
+ Self(rng.next_u32())
+ }
+
+ #[cfg(target_pointer_width = "64")]
+ fn random(rng: &mut impl CryptoRngCore) -> Self {
+ Self(rng.next_u64())
+ }
+}
+
+impl RandomMod for Limb {
+ fn random_mod(rng: &mut impl CryptoRngCore, modulus: &NonZero<Self>) -> Self {
+ let mut bytes = <Self as Encoding>::Repr::default();
+
+ let n_bits = modulus.bits();
+ let n_bytes = (n_bits + 7) / 8;
+ let mask = 0xff >> (8 * n_bytes - n_bits);
+
+ loop {
+ rng.fill_bytes(&mut bytes[..n_bytes]);
+ bytes[n_bytes - 1] &= mask;
+
+ let n = Limb::from_le_bytes(bytes);
+ if n.ct_lt(modulus).into() {
+ return n;
+ }
+ }
+ }
+}
diff --git a/vendor/crypto-bigint/src/limb/shl.rs b/vendor/crypto-bigint/src/limb/shl.rs
new file mode 100644
index 0000000..88e37f0
--- /dev/null
+++ b/vendor/crypto-bigint/src/limb/shl.rs
@@ -0,0 +1,74 @@
+//! Limb left bitshift
+
+use crate::{Limb, Word};
+use core::ops::{Shl, ShlAssign};
+
+impl Limb {
+ /// Computes `self << rhs`.
+ /// Panics if `rhs` overflows `Limb::BITS`.
+ #[inline(always)]
+ pub const fn shl(self, rhs: Self) -> Self {
+ Limb(self.0 << rhs.0)
+ }
+}
+
+impl Shl for Limb {
+ type Output = Self;
+
+ #[inline(always)]
+ fn shl(self, rhs: Self) -> Self::Output {
+ self.shl(rhs)
+ }
+}
+
+impl Shl<usize> for Limb {
+ type Output = Self;
+
+ #[inline(always)]
+ fn shl(self, rhs: usize) -> Self::Output {
+ self.shl(Limb(rhs as Word))
+ }
+}
+
+impl ShlAssign for Limb {
+ #[inline(always)]
+ fn shl_assign(&mut self, other: Self) {
+ *self = self.shl(other);
+ }
+}
+
+impl ShlAssign<usize> for Limb {
+ #[inline(always)]
+ fn shl_assign(&mut self, other: usize) {
+ *self = self.shl(Limb(other as Word));
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use crate::Limb;
+
+ #[test]
+ fn shl1() {
+ assert_eq!(Limb(1) << 1, Limb(2));
+ }
+
+ #[test]
+ fn shl2() {
+ assert_eq!(Limb(1) << 2, Limb(4));
+ }
+
+ #[test]
+ fn shl_assign1() {
+ let mut l = Limb(1);
+ l <<= 1;
+ assert_eq!(l, Limb(2));
+ }
+
+ #[test]
+ fn shl_assign2() {
+ let mut l = Limb(1);
+ l <<= 2;
+ assert_eq!(l, Limb(4));
+ }
+}
diff --git a/vendor/crypto-bigint/src/limb/shr.rs b/vendor/crypto-bigint/src/limb/shr.rs
new file mode 100644
index 0000000..7c422e0
--- /dev/null
+++ b/vendor/crypto-bigint/src/limb/shr.rs
@@ -0,0 +1,74 @@
+//! Limb right bitshift
+
+use crate::{Limb, Word};
+use core::ops::{Shr, ShrAssign};
+
+impl Limb {
+ /// Computes `self >> rhs`.
+ /// Panics if `rhs` overflows `Limb::BITS`.
+ #[inline(always)]
+ pub const fn shr(self, rhs: Self) -> Self {
+ Limb(self.0 >> rhs.0)
+ }
+}
+
+impl Shr for Limb {
+ type Output = Self;
+
+ #[inline(always)]
+ fn shr(self, rhs: Self) -> Self::Output {
+ self.shr(rhs)
+ }
+}
+
+impl Shr<usize> for Limb {
+ type Output = Self;
+
+ #[inline(always)]
+ fn shr(self, rhs: usize) -> Self::Output {
+ self.shr(Limb(rhs as Word))
+ }
+}
+
+impl ShrAssign for Limb {
+ #[inline(always)]
+ fn shr_assign(&mut self, other: Self) {
+ *self = self.shr(other);
+ }
+}
+
+impl ShrAssign<usize> for Limb {
+ #[inline(always)]
+ fn shr_assign(&mut self, other: usize) {
+ *self = self.shr(Limb(other as Word));
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use crate::Limb;
+
+ #[test]
+ fn shr1() {
+ assert_eq!(Limb(2) >> 1, Limb(1));
+ }
+
+ #[test]
+ fn shr2() {
+ assert_eq!(Limb(16) >> 2, Limb(4));
+ }
+
+ #[test]
+ fn shr_assign1() {
+ let mut l = Limb::ONE;
+ l >>= 1;
+ assert_eq!(l, Limb::ZERO);
+ }
+
+ #[test]
+ fn shr_assign2() {
+ let mut l = Limb(32);
+ l >>= 2;
+ assert_eq!(l, Limb(8));
+ }
+}
diff --git a/vendor/crypto-bigint/src/limb/sub.rs b/vendor/crypto-bigint/src/limb/sub.rs
new file mode 100644
index 0000000..0fc7a4a
--- /dev/null
+++ b/vendor/crypto-bigint/src/limb/sub.rs
@@ -0,0 +1,182 @@
+//! Limb subtraction
+
+use crate::{Checked, CheckedSub, Limb, WideWord, Word, Wrapping, Zero};
+use core::ops::{Sub, SubAssign};
+use subtle::CtOption;
+
+impl Limb {
+ /// Computes `self - (rhs + borrow)`, returning the result along with the new borrow.
+ #[inline(always)]
+ pub const fn sbb(self, rhs: Limb, borrow: Limb) -> (Limb, Limb) {
+ let a = self.0 as WideWord;
+ let b = rhs.0 as WideWord;
+ let borrow = (borrow.0 >> (Self::BITS - 1)) as WideWord;
+ let ret = a.wrapping_sub(b + borrow);
+ (Limb(ret as Word), Limb((ret >> Self::BITS) as Word))
+ }
+
+ /// Perform saturating subtraction.
+ #[inline]
+ pub const fn saturating_sub(&self, rhs: Self) -> Self {
+ Limb(self.0.saturating_sub(rhs.0))
+ }
+
+ /// Perform wrapping subtraction, discarding underflow and wrapping around
+ /// the boundary of the type.
+ #[inline(always)]
+ pub const fn wrapping_sub(&self, rhs: Self) -> Self {
+ Limb(self.0.wrapping_sub(rhs.0))
+ }
+}
+
+impl CheckedSub for Limb {
+ type Output = Self;
+
+ #[inline]
+ fn checked_sub(&self, rhs: Self) -> CtOption<Self> {
+ let (result, underflow) = self.sbb(rhs, Limb::ZERO);
+ CtOption::new(result, underflow.is_zero())
+ }
+}
+
+impl Sub for Wrapping<Limb> {
+ type Output = Self;
+
+ fn sub(self, rhs: Self) -> Wrapping<Limb> {
+ Wrapping(self.0.wrapping_sub(rhs.0))
+ }
+}
+
+impl Sub<&Wrapping<Limb>> for Wrapping<Limb> {
+ type Output = Wrapping<Limb>;
+
+ fn sub(self, rhs: &Wrapping<Limb>) -> Wrapping<Limb> {
+ Wrapping(self.0.wrapping_sub(rhs.0))
+ }
+}
+
+impl Sub<Wrapping<Limb>> for &Wrapping<Limb> {
+ type Output = Wrapping<Limb>;
+
+ fn sub(self, rhs: Wrapping<Limb>) -> Wrapping<Limb> {
+ Wrapping(self.0.wrapping_sub(rhs.0))
+ }
+}
+
+impl Sub<&Wrapping<Limb>> for &Wrapping<Limb> {
+ type Output = Wrapping<Limb>;
+
+ fn sub(self, rhs: &Wrapping<Limb>) -> Wrapping<Limb> {
+ Wrapping(self.0.wrapping_sub(rhs.0))
+ }
+}
+
+impl SubAssign for Wrapping<Limb> {
+ fn sub_assign(&mut self, other: Self) {
+ *self = *self - other;
+ }
+}
+
+impl SubAssign<&Wrapping<Limb>> for Wrapping<Limb> {
+ fn sub_assign(&mut self, other: &Self) {
+ *self = *self - other;
+ }
+}
+
+impl Sub for Checked<Limb> {
+ type Output = Self;
+
+ fn sub(self, rhs: Self) -> Checked<Limb> {
+ Checked(
+ self.0
+ .and_then(|lhs| rhs.0.and_then(|rhs| lhs.checked_sub(rhs))),
+ )
+ }
+}
+
+impl Sub<&Checked<Limb>> for Checked<Limb> {
+ type Output = Checked<Limb>;
+
+ fn sub(self, rhs: &Checked<Limb>) -> Checked<Limb> {
+ Checked(
+ self.0
+ .and_then(|lhs| rhs.0.and_then(|rhs| lhs.checked_sub(rhs))),
+ )
+ }
+}
+
+impl Sub<Checked<Limb>> for &Checked<Limb> {
+ type Output = Checked<Limb>;
+
+ fn sub(self, rhs: Checked<Limb>) -> Checked<Limb> {
+ Checked(
+ self.0
+ .and_then(|lhs| rhs.0.and_then(|rhs| lhs.checked_sub(rhs))),
+ )
+ }
+}
+
+impl Sub<&Checked<Limb>> for &Checked<Limb> {
+ type Output = Checked<Limb>;
+
+ fn sub(self, rhs: &Checked<Limb>) -> Checked<Limb> {
+ Checked(
+ self.0
+ .and_then(|lhs| rhs.0.and_then(|rhs| lhs.checked_sub(rhs))),
+ )
+ }
+}
+
+impl SubAssign for Checked<Limb> {
+ fn sub_assign(&mut self, other: Self) {
+ *self = *self - other;
+ }
+}
+
+impl SubAssign<&Checked<Limb>> for Checked<Limb> {
+ fn sub_assign(&mut self, other: &Self) {
+ *self = *self - other;
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use crate::{CheckedSub, Limb};
+
+ #[test]
+ fn sbb_no_borrow() {
+ let (res, borrow) = Limb::ONE.sbb(Limb::ONE, Limb::ZERO);
+ assert_eq!(res, Limb::ZERO);
+ assert_eq!(borrow, Limb::ZERO);
+ }
+
+ #[test]
+ fn sbb_with_borrow() {
+ let (res, borrow) = Limb::ZERO.sbb(Limb::ONE, Limb::ZERO);
+
+ assert_eq!(res, Limb::MAX);
+ assert_eq!(borrow, Limb::MAX);
+ }
+
+ #[test]
+ fn wrapping_sub_no_borrow() {
+ assert_eq!(Limb::ONE.wrapping_sub(Limb::ONE), Limb::ZERO);
+ }
+
+ #[test]
+ fn wrapping_sub_with_borrow() {
+ assert_eq!(Limb::ZERO.wrapping_sub(Limb::ONE), Limb::MAX);
+ }
+
+ #[test]
+ fn checked_sub_ok() {
+ let result = Limb::ONE.checked_sub(Limb::ONE);
+ assert_eq!(result.unwrap(), Limb::ZERO);
+ }
+
+ #[test]
+ fn checked_sub_overflow() {
+ let result = Limb::ZERO.checked_sub(Limb::ONE);
+ assert!(!bool::from(result.is_some()));
+ }
+}
diff --git a/vendor/crypto-bigint/src/nlimbs.rs b/vendor/crypto-bigint/src/nlimbs.rs
new file mode 100644
index 0000000..c5166e7
--- /dev/null
+++ b/vendor/crypto-bigint/src/nlimbs.rs
@@ -0,0 +1,29 @@
+/// Calculate the number of limbs required to represent the given number of bits.
+// TODO(tarcieri): replace with `generic_const_exprs` (rust-lang/rust#76560) when stable
+#[macro_export]
+macro_rules! nlimbs {
+ ($bits:expr) => {
+ $bits / $crate::Limb::BITS
+ };
+}
+
+#[cfg(test)]
+mod tests {
+ #[cfg(target_pointer_width = "32")]
+ #[test]
+ fn nlimbs_for_bits_macro() {
+ assert_eq!(nlimbs!(64), 2);
+ assert_eq!(nlimbs!(128), 4);
+ assert_eq!(nlimbs!(192), 6);
+ assert_eq!(nlimbs!(256), 8);
+ }
+
+ #[cfg(target_pointer_width = "64")]
+ #[test]
+ fn nlimbs_for_bits_macro() {
+ assert_eq!(nlimbs!(64), 1);
+ assert_eq!(nlimbs!(128), 2);
+ assert_eq!(nlimbs!(192), 3);
+ assert_eq!(nlimbs!(256), 4);
+ }
+}
diff --git a/vendor/crypto-bigint/src/non_zero.rs b/vendor/crypto-bigint/src/non_zero.rs
new file mode 100644
index 0000000..dd4294e
--- /dev/null
+++ b/vendor/crypto-bigint/src/non_zero.rs
@@ -0,0 +1,393 @@
+//! Wrapper type for non-zero integers.
+
+use crate::{CtChoice, Encoding, Integer, Limb, Uint, Zero};
+use core::{
+ fmt,
+ num::{NonZeroU128, NonZeroU16, NonZeroU32, NonZeroU64, NonZeroU8},
+ ops::Deref,
+};
+use subtle::{Choice, ConditionallySelectable, ConstantTimeEq, CtOption};
+
+#[cfg(feature = "generic-array")]
+use crate::{ArrayEncoding, ByteArray};
+
+#[cfg(feature = "rand_core")]
+use {crate::Random, rand_core::CryptoRngCore};
+
+#[cfg(feature = "serde")]
+use serdect::serde::{
+ de::{Error, Unexpected},
+ Deserialize, Deserializer, Serialize, Serializer,
+};
+
+/// Wrapper type for non-zero integers.
+#[derive(Copy, Clone, Debug, Default, Eq, PartialEq, PartialOrd, Ord)]
+pub struct NonZero<T: Zero>(T);
+
+impl NonZero<Limb> {
+ /// Creates a new non-zero limb in a const context.
+ /// The second return value is `FALSE` if `n` is zero, `TRUE` otherwise.
+ pub const fn const_new(n: Limb) -> (Self, CtChoice) {
+ (Self(n), n.ct_is_nonzero())
+ }
+}
+
+impl<const LIMBS: usize> NonZero<Uint<LIMBS>> {
+ /// Creates a new non-zero integer in a const context.
+ /// The second return value is `FALSE` if `n` is zero, `TRUE` otherwise.
+ pub const fn const_new(n: Uint<LIMBS>) -> (Self, CtChoice) {
+ (Self(n), n.ct_is_nonzero())
+ }
+}
+
+impl<T> NonZero<T>
+where
+ T: Zero,
+{
+ /// Create a new non-zero integer.
+ pub fn new(n: T) -> CtOption<Self> {
+ let is_zero = n.is_zero();
+ CtOption::new(Self(n), !is_zero)
+ }
+}
+
+impl<T> NonZero<T>
+where
+ T: Integer,
+{
+ /// The value `1`.
+ pub const ONE: Self = Self(T::ONE);
+
+ /// Maximum value this integer can express.
+ pub const MAX: Self = Self(T::MAX);
+}
+
+impl<T> NonZero<T>
+where
+ T: Encoding + Zero,
+{
+ /// Decode from big endian bytes.
+ pub fn from_be_bytes(bytes: T::Repr) -> CtOption<Self> {
+ Self::new(T::from_be_bytes(bytes))
+ }
+
+ /// Decode from little endian bytes.
+ pub fn from_le_bytes(bytes: T::Repr) -> CtOption<Self> {
+ Self::new(T::from_le_bytes(bytes))
+ }
+}
+
+#[cfg(feature = "generic-array")]
+impl<T> NonZero<T>
+where
+ T: ArrayEncoding + Zero,
+{
+ /// Decode a non-zero integer from big endian bytes.
+ pub fn from_be_byte_array(bytes: ByteArray<T>) -> CtOption<Self> {
+ Self::new(T::from_be_byte_array(bytes))
+ }
+
+ /// Decode a non-zero integer from big endian bytes.
+ pub fn from_le_byte_array(bytes: ByteArray<T>) -> CtOption<Self> {
+ Self::new(T::from_be_byte_array(bytes))
+ }
+}
+
+impl<T> AsRef<T> for NonZero<T>
+where
+ T: Zero,
+{
+ fn as_ref(&self) -> &T {
+ &self.0
+ }
+}
+
+impl<T> ConditionallySelectable for NonZero<T>
+where
+ T: ConditionallySelectable + Zero,
+{
+ fn conditional_select(a: &Self, b: &Self, choice: Choice) -> Self {
+ Self(T::conditional_select(&a.0, &b.0, choice))
+ }
+}
+
+impl<T> ConstantTimeEq for NonZero<T>
+where
+ T: Zero,
+{
+ fn ct_eq(&self, other: &Self) -> Choice {
+ self.0.ct_eq(&other.0)
+ }
+}
+
+impl<T> Deref for NonZero<T>
+where
+ T: Zero,
+{
+ type Target = T;
+
+ fn deref(&self) -> &T {
+ &self.0
+ }
+}
+
+#[cfg(feature = "rand_core")]
+impl<T> Random for NonZero<T>
+where
+ T: Random + Zero,
+{
+ /// Generate a random `NonZero<T>`.
+ fn random(mut rng: &mut impl CryptoRngCore) -> Self {
+ // Use rejection sampling to eliminate zero values.
+ // While this method isn't constant-time, the attacker shouldn't learn
+ // anything about unrelated outputs so long as `rng` is a CSRNG.
+ loop {
+ if let Some(result) = Self::new(T::random(&mut rng)).into() {
+ break result;
+ }
+ }
+ }
+}
+
+impl<T> fmt::Display for NonZero<T>
+where
+ T: fmt::Display + Zero,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Display::fmt(&self.0, f)
+ }
+}
+
+impl<T> fmt::Binary for NonZero<T>
+where
+ T: fmt::Binary + Zero,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Binary::fmt(&self.0, f)
+ }
+}
+
+impl<T> fmt::Octal for NonZero<T>
+where
+ T: fmt::Octal + Zero,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Octal::fmt(&self.0, f)
+ }
+}
+
+impl<T> fmt::LowerHex for NonZero<T>
+where
+ T: fmt::LowerHex + Zero,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::LowerHex::fmt(&self.0, f)
+ }
+}
+
+impl<T> fmt::UpperHex for NonZero<T>
+where
+ T: fmt::UpperHex + Zero,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::UpperHex::fmt(&self.0, f)
+ }
+}
+
+impl NonZero<Limb> {
+ /// Create a [`NonZero<Limb>`] from a [`NonZeroU8`] (const-friendly)
+ // TODO(tarcieri): replace with `const impl From<NonZeroU8>` when stable
+ pub const fn from_u8(n: NonZeroU8) -> Self {
+ Self(Limb::from_u8(n.get()))
+ }
+
+ /// Create a [`NonZero<Limb>`] from a [`NonZeroU16`] (const-friendly)
+ // TODO(tarcieri): replace with `const impl From<NonZeroU16>` when stable
+ pub const fn from_u16(n: NonZeroU16) -> Self {
+ Self(Limb::from_u16(n.get()))
+ }
+
+ /// Create a [`NonZero<Limb>`] from a [`NonZeroU32`] (const-friendly)
+ // TODO(tarcieri): replace with `const impl From<NonZeroU32>` when stable
+ pub const fn from_u32(n: NonZeroU32) -> Self {
+ Self(Limb::from_u32(n.get()))
+ }
+
+ /// Create a [`NonZero<Limb>`] from a [`NonZeroU64`] (const-friendly)
+ // TODO(tarcieri): replace with `const impl From<NonZeroU64>` when stable
+ #[cfg(target_pointer_width = "64")]
+ pub const fn from_u64(n: NonZeroU64) -> Self {
+ Self(Limb::from_u64(n.get()))
+ }
+}
+
+impl From<NonZeroU8> for NonZero<Limb> {
+ fn from(integer: NonZeroU8) -> Self {
+ Self::from_u8(integer)
+ }
+}
+
+impl From<NonZeroU16> for NonZero<Limb> {
+ fn from(integer: NonZeroU16) -> Self {
+ Self::from_u16(integer)
+ }
+}
+
+impl From<NonZeroU32> for NonZero<Limb> {
+ fn from(integer: NonZeroU32) -> Self {
+ Self::from_u32(integer)
+ }
+}
+
+#[cfg(target_pointer_width = "64")]
+impl From<NonZeroU64> for NonZero<Limb> {
+ fn from(integer: NonZeroU64) -> Self {
+ Self::from_u64(integer)
+ }
+}
+
+impl<const LIMBS: usize> NonZero<Uint<LIMBS>> {
+ /// Create a [`NonZero<Uint>`] from a [`Uint`] (const-friendly)
+ pub const fn from_uint(n: Uint<LIMBS>) -> Self {
+ let mut i = 0;
+ let mut found_non_zero = false;
+ while i < LIMBS {
+ if n.as_limbs()[i].0 != 0 {
+ found_non_zero = true;
+ }
+ i += 1;
+ }
+ assert!(found_non_zero, "found zero");
+ Self(n)
+ }
+
+ /// Create a [`NonZero<Uint>`] from a [`NonZeroU8`] (const-friendly)
+ // TODO(tarcieri): replace with `const impl From<NonZeroU8>` when stable
+ pub const fn from_u8(n: NonZeroU8) -> Self {
+ Self(Uint::from_u8(n.get()))
+ }
+
+ /// Create a [`NonZero<Uint>`] from a [`NonZeroU16`] (const-friendly)
+ // TODO(tarcieri): replace with `const impl From<NonZeroU16>` when stable
+ pub const fn from_u16(n: NonZeroU16) -> Self {
+ Self(Uint::from_u16(n.get()))
+ }
+
+ /// Create a [`NonZero<Uint>`] from a [`NonZeroU32`] (const-friendly)
+ // TODO(tarcieri): replace with `const impl From<NonZeroU32>` when stable
+ pub const fn from_u32(n: NonZeroU32) -> Self {
+ Self(Uint::from_u32(n.get()))
+ }
+
+ /// Create a [`NonZero<Uint>`] from a [`NonZeroU64`] (const-friendly)
+ // TODO(tarcieri): replace with `const impl From<NonZeroU64>` when stable
+ pub const fn from_u64(n: NonZeroU64) -> Self {
+ Self(Uint::from_u64(n.get()))
+ }
+
+ /// Create a [`NonZero<Uint>`] from a [`NonZeroU128`] (const-friendly)
+ // TODO(tarcieri): replace with `const impl From<NonZeroU128>` when stable
+ pub const fn from_u128(n: NonZeroU128) -> Self {
+ Self(Uint::from_u128(n.get()))
+ }
+}
+
+impl<const LIMBS: usize> From<NonZeroU8> for NonZero<Uint<LIMBS>> {
+ fn from(integer: NonZeroU8) -> Self {
+ Self::from_u8(integer)
+ }
+}
+
+impl<const LIMBS: usize> From<NonZeroU16> for NonZero<Uint<LIMBS>> {
+ fn from(integer: NonZeroU16) -> Self {
+ Self::from_u16(integer)
+ }
+}
+
+impl<const LIMBS: usize> From<NonZeroU32> for NonZero<Uint<LIMBS>> {
+ fn from(integer: NonZeroU32) -> Self {
+ Self::from_u32(integer)
+ }
+}
+
+impl<const LIMBS: usize> From<NonZeroU64> for NonZero<Uint<LIMBS>> {
+ fn from(integer: NonZeroU64) -> Self {
+ Self::from_u64(integer)
+ }
+}
+
+impl<const LIMBS: usize> From<NonZeroU128> for NonZero<Uint<LIMBS>> {
+ fn from(integer: NonZeroU128) -> Self {
+ Self::from_u128(integer)
+ }
+}
+
+#[cfg(feature = "serde")]
+impl<'de, T: Deserialize<'de> + Zero> Deserialize<'de> for NonZero<T> {
+ fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
+ where
+ D: Deserializer<'de>,
+ {
+ let value: T = T::deserialize(deserializer)?;
+
+ if bool::from(value.is_zero()) {
+ Err(D::Error::invalid_value(
+ Unexpected::Other("zero"),
+ &"a non-zero value",
+ ))
+ } else {
+ Ok(Self(value))
+ }
+ }
+}
+
+#[cfg(feature = "serde")]
+impl<T: Serialize + Zero> Serialize for NonZero<T> {
+ fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
+ where
+ S: Serializer,
+ {
+ self.0.serialize(serializer)
+ }
+}
+
+#[cfg(all(test, feature = "serde"))]
+#[allow(clippy::unwrap_used)]
+mod tests {
+ use crate::{NonZero, U64};
+ use bincode::ErrorKind;
+
+ #[test]
+ fn serde() {
+ let test =
+ Option::<NonZero<U64>>::from(NonZero::new(U64::from_u64(0x0011223344556677))).unwrap();
+
+ let serialized = bincode::serialize(&test).unwrap();
+ let deserialized: NonZero<U64> = bincode::deserialize(&serialized).unwrap();
+
+ assert_eq!(test, deserialized);
+
+ let serialized = bincode::serialize(&U64::ZERO).unwrap();
+ assert!(matches!(
+ *bincode::deserialize::<NonZero<U64>>(&serialized).unwrap_err(),
+ ErrorKind::Custom(message) if message == "invalid value: zero, expected a non-zero value"
+ ));
+ }
+
+ #[test]
+ fn serde_owned() {
+ let test =
+ Option::<NonZero<U64>>::from(NonZero::new(U64::from_u64(0x0011223344556677))).unwrap();
+
+ let serialized = bincode::serialize(&test).unwrap();
+ let deserialized: NonZero<U64> = bincode::deserialize_from(serialized.as_slice()).unwrap();
+
+ assert_eq!(test, deserialized);
+
+ let serialized = bincode::serialize(&U64::ZERO).unwrap();
+ assert!(matches!(
+ *bincode::deserialize_from::<_, NonZero<U64>>(serialized.as_slice()).unwrap_err(),
+ ErrorKind::Custom(message) if message == "invalid value: zero, expected a non-zero value"
+ ));
+ }
+}
diff --git a/vendor/crypto-bigint/src/traits.rs b/vendor/crypto-bigint/src/traits.rs
new file mode 100644
index 0000000..b500001
--- /dev/null
+++ b/vendor/crypto-bigint/src/traits.rs
@@ -0,0 +1,302 @@
+//! Traits provided by this crate
+
+use crate::{Limb, NonZero};
+use core::fmt::Debug;
+use core::ops::{BitAnd, BitOr, BitXor, Div, Not, Rem, Shl, Shr};
+use subtle::{
+ Choice, ConditionallySelectable, ConstantTimeEq, ConstantTimeGreater, ConstantTimeLess,
+ CtOption,
+};
+
+#[cfg(feature = "rand_core")]
+use rand_core::CryptoRngCore;
+
+/// Integer type.
+pub trait Integer:
+ 'static
+ + AsRef<[Limb]>
+ + BitAnd<Output = Self>
+ + BitOr<Output = Self>
+ + BitXor<Output = Self>
+ + for<'a> CheckedAdd<&'a Self, Output = Self>
+ + for<'a> CheckedSub<&'a Self, Output = Self>
+ + for<'a> CheckedMul<&'a Self, Output = Self>
+ + Copy
+ + ConditionallySelectable
+ + ConstantTimeEq
+ + ConstantTimeGreater
+ + ConstantTimeLess
+ + Debug
+ + Default
+ + Div<NonZero<Self>, Output = Self>
+ + Eq
+ + From<u64>
+ + Not
+ + Ord
+ + Rem<NonZero<Self>, Output = Self>
+ + Send
+ + Sized
+ + Shl<usize, Output = Self>
+ + Shr<usize, Output = Self>
+ + Sync
+ + Zero
+{
+ /// The value `1`.
+ const ONE: Self;
+
+ /// Maximum value this integer can express.
+ const MAX: Self;
+
+ /// Total size of the represented integer in bits.
+ const BITS: usize;
+
+ /// Total size of the represented integer in bytes.
+ const BYTES: usize;
+
+ /// The number of limbs used on this platform.
+ const LIMBS: usize;
+
+ /// Is this integer value an odd number?
+ ///
+ /// # Returns
+ ///
+ /// If odd, returns `Choice(1)`. Otherwise, returns `Choice(0)`.
+ fn is_odd(&self) -> Choice;
+
+ /// Is this integer value an even number?
+ ///
+ /// # Returns
+ ///
+ /// If even, returns `Choice(1)`. Otherwise, returns `Choice(0)`.
+ fn is_even(&self) -> Choice {
+ !self.is_odd()
+ }
+}
+
+/// Zero values.
+pub trait Zero: ConstantTimeEq + Sized {
+ /// The value `0`.
+ const ZERO: Self;
+
+ /// Determine if this value is equal to zero.
+ ///
+ /// # Returns
+ ///
+ /// If zero, returns `Choice(1)`. Otherwise, returns `Choice(0)`.
+ fn is_zero(&self) -> Choice {
+ self.ct_eq(&Self::ZERO)
+ }
+}
+
+/// Random number generation support.
+#[cfg(feature = "rand_core")]
+pub trait Random: Sized {
+ /// Generate a cryptographically secure random value.
+ fn random(rng: &mut impl CryptoRngCore) -> Self;
+}
+
+/// Modular random number generation support.
+#[cfg(feature = "rand_core")]
+pub trait RandomMod: Sized + Zero {
+ /// Generate a cryptographically secure random number which is less than
+ /// a given `modulus`.
+ ///
+ /// This function uses rejection sampling, a method which produces an
+ /// unbiased distribution of in-range values provided the underlying
+ /// CSRNG is unbiased, but runs in variable-time.
+ ///
+ /// The variable-time nature of the algorithm should not pose a security
+ /// issue so long as the underlying random number generator is truly a
+ /// CSRNG, where previous outputs are unrelated to subsequent
+ /// outputs and do not reveal information about the RNG's internal state.
+ fn random_mod(rng: &mut impl CryptoRngCore, modulus: &NonZero<Self>) -> Self;
+}
+
+/// Compute `self + rhs mod p`.
+pub trait AddMod<Rhs = Self> {
+ /// Output type.
+ type Output;
+
+ /// Compute `self + rhs mod p`.
+ ///
+ /// Assumes `self` and `rhs` are `< p`.
+ fn add_mod(&self, rhs: &Rhs, p: &Self) -> Self::Output;
+}
+
+/// Compute `self - rhs mod p`.
+pub trait SubMod<Rhs = Self> {
+ /// Output type.
+ type Output;
+
+ /// Compute `self - rhs mod p`.
+ ///
+ /// Assumes `self` and `rhs` are `< p`.
+ fn sub_mod(&self, rhs: &Rhs, p: &Self) -> Self::Output;
+}
+
+/// Compute `-self mod p`.
+pub trait NegMod {
+ /// Output type.
+ type Output;
+
+ /// Compute `-self mod p`.
+ #[must_use]
+ fn neg_mod(&self, p: &Self) -> Self::Output;
+}
+
+/// Compute `self * rhs mod p`.
+///
+/// Requires `p_inv = -(p^{-1} mod 2^{BITS}) mod 2^{BITS}` to be provided for efficiency.
+pub trait MulMod<Rhs = Self> {
+ /// Output type.
+ type Output;
+
+ /// Compute `self * rhs mod p`.
+ ///
+ /// Requires `p_inv = -(p^{-1} mod 2^{BITS}) mod 2^{BITS}` to be provided for efficiency.
+ fn mul_mod(&self, rhs: &Rhs, p: &Self, p_inv: Limb) -> Self::Output;
+}
+
+/// Checked addition.
+pub trait CheckedAdd<Rhs = Self>: Sized {
+ /// Output type.
+ type Output;
+
+ /// Perform checked subtraction, returning a [`CtOption`] which `is_some`
+ /// only if the operation did not overflow.
+ fn checked_add(&self, rhs: Rhs) -> CtOption<Self>;
+}
+
+/// Checked multiplication.
+pub trait CheckedMul<Rhs = Self>: Sized {
+ /// Output type.
+ type Output;
+
+ /// Perform checked multiplication, returning a [`CtOption`] which `is_some`
+ /// only if the operation did not overflow.
+ fn checked_mul(&self, rhs: Rhs) -> CtOption<Self>;
+}
+
+/// Checked subtraction.
+pub trait CheckedSub<Rhs = Self>: Sized {
+ /// Output type.
+ type Output;
+
+ /// Perform checked subtraction, returning a [`CtOption`] which `is_some`
+ /// only if the operation did not underflow.
+ fn checked_sub(&self, rhs: Rhs) -> CtOption<Self>;
+}
+
+/// Concatenate two numbers into a "wide" double-width value, using the `lo`
+/// value as the least significant value.
+pub trait Concat: ConcatMixed<Self, MixedOutput = Self::Output> {
+ /// Concatenated output: twice the width of `Self`.
+ type Output;
+
+ /// Concatenate the two halves, with `self` as most significant and `lo`
+ /// as the least significant.
+ fn concat(&self, lo: &Self) -> Self::Output {
+ self.concat_mixed(lo)
+ }
+}
+
+/// Concatenate two numbers into a "wide" combined-width value, using the `lo`
+/// value as the least significant value.
+pub trait ConcatMixed<Lo: ?Sized = Self> {
+ /// Concatenated output: combination of `Lo` and `Self`.
+ type MixedOutput;
+
+ /// Concatenate the two values, with `self` as most significant and `lo`
+ /// as the least significant.
+ fn concat_mixed(&self, lo: &Lo) -> Self::MixedOutput;
+}
+
+/// Split a number in half, returning the most significant half followed by
+/// the least significant.
+pub trait Split: SplitMixed<Self::Output, Self::Output> {
+ /// Split output: high/low components of the value.
+ type Output;
+
+ /// Split this number in half, returning its high and low components
+ /// respectively.
+ fn split(&self) -> (Self::Output, Self::Output) {
+ self.split_mixed()
+ }
+}
+
+/// Split a number into parts, returning the most significant part followed by
+/// the least significant.
+pub trait SplitMixed<Hi, Lo> {
+ /// Split this number into parts, returning its high and low components
+ /// respectively.
+ fn split_mixed(&self) -> (Hi, Lo);
+}
+
+/// Integers whose representation takes a bounded amount of space.
+pub trait Bounded {
+ /// Size of this integer in bits.
+ const BITS: usize;
+
+ /// Size of this integer in bytes.
+ const BYTES: usize;
+}
+
+/// Encoding support.
+pub trait Encoding: Sized {
+ /// Byte array representation.
+ type Repr: AsRef<[u8]> + AsMut<[u8]> + Copy + Clone + Sized;
+
+ /// Decode from big endian bytes.
+ fn from_be_bytes(bytes: Self::Repr) -> Self;
+
+ /// Decode from little endian bytes.
+ fn from_le_bytes(bytes: Self::Repr) -> Self;
+
+ /// Encode to big endian bytes.
+ fn to_be_bytes(&self) -> Self::Repr;
+
+ /// Encode to little endian bytes.
+ fn to_le_bytes(&self) -> Self::Repr;
+}
+
+/// Support for optimized squaring
+pub trait Square: Sized
+where
+ for<'a> &'a Self: core::ops::Mul<&'a Self, Output = Self>,
+{
+ /// Computes the same as `self.mul(self)`, but may be more efficient.
+ fn square(&self) -> Self {
+ self * self
+ }
+}
+
+/// Constant-time exponentiation.
+pub trait Pow<Exponent> {
+ /// Raises to the `exponent` power.
+ fn pow(&self, exponent: &Exponent) -> Self;
+}
+
+impl<T: PowBoundedExp<Exponent>, Exponent: Bounded> Pow<Exponent> for T {
+ fn pow(&self, exponent: &Exponent) -> Self {
+ self.pow_bounded_exp(exponent, Exponent::BITS)
+ }
+}
+
+/// Constant-time exponentiation with exponent of a bounded bit size.
+pub trait PowBoundedExp<Exponent> {
+ /// Raises to the `exponent` power,
+ /// with `exponent_bits` representing the number of (least significant) bits
+ /// to take into account for the exponent.
+ ///
+ /// NOTE: `exponent_bits` may be leaked in the time pattern.
+ fn pow_bounded_exp(&self, exponent: &Exponent, exponent_bits: usize) -> Self;
+}
+
+/// Constant-time inversion.
+pub trait Invert: Sized {
+ /// Output of the inversion.
+ type Output;
+
+ /// Computes the inverse.
+ fn invert(&self) -> Self::Output;
+}
diff --git a/vendor/crypto-bigint/src/uint.rs b/vendor/crypto-bigint/src/uint.rs
new file mode 100644
index 0000000..a644496
--- /dev/null
+++ b/vendor/crypto-bigint/src/uint.rs
@@ -0,0 +1,491 @@
+//! Stack-allocated big unsigned integers.
+
+#![allow(clippy::needless_range_loop, clippy::many_single_char_names)]
+
+#[macro_use]
+mod macros;
+
+mod add;
+mod add_mod;
+mod bit_and;
+mod bit_not;
+mod bit_or;
+mod bit_xor;
+mod bits;
+mod cmp;
+mod concat;
+mod div;
+pub(crate) mod div_limb;
+mod encoding;
+mod from;
+mod inv_mod;
+mod mul;
+mod mul_mod;
+mod neg;
+mod neg_mod;
+mod resize;
+mod shl;
+mod shr;
+mod split;
+mod sqrt;
+mod sub;
+mod sub_mod;
+
+/// Implements modular arithmetic for constant moduli.
+pub mod modular;
+
+#[cfg(feature = "generic-array")]
+mod array;
+
+#[cfg(feature = "rand_core")]
+mod rand;
+
+use crate::{Bounded, Encoding, Integer, Limb, Word, Zero};
+use core::fmt;
+use subtle::{Choice, ConditionallySelectable};
+
+#[cfg(feature = "serde")]
+use serdect::serde::{Deserialize, Deserializer, Serialize, Serializer};
+
+#[cfg(feature = "zeroize")]
+use zeroize::DefaultIsZeroes;
+
+/// Stack-allocated big unsigned integer.
+///
+/// Generic over the given number of `LIMBS`
+///
+/// # Encoding support
+/// This type supports many different types of encodings, either via the
+/// [`Encoding`][`crate::Encoding`] trait or various `const fn` decoding and
+/// encoding functions that can be used with [`Uint`] constants.
+///
+/// Optional crate features for encoding (off-by-default):
+/// - `generic-array`: enables [`ArrayEncoding`][`crate::ArrayEncoding`] trait which can be used to
+/// [`Uint`] as `GenericArray<u8, N>` and a [`ArrayDecoding`][`crate::ArrayDecoding`] trait which
+/// can be used to `GenericArray<u8, N>` as [`Uint`].
+/// - `rlp`: support for [Recursive Length Prefix (RLP)][RLP] encoding.
+///
+/// [RLP]: https://eth.wiki/fundamentals/rlp
+// TODO(tarcieri): make generic around a specified number of bits.
+// Our PartialEq impl only differs from the default one by being constant-time, so this is safe
+#[allow(clippy::derived_hash_with_manual_eq)]
+#[derive(Copy, Clone, Hash)]
+pub struct Uint<const LIMBS: usize> {
+ /// Inner limb array. Stored from least significant to most significant.
+ limbs: [Limb; LIMBS],
+}
+
+impl<const LIMBS: usize> Uint<LIMBS> {
+ /// The value `0`.
+ pub const ZERO: Self = Self::from_u8(0);
+
+ /// The value `1`.
+ pub const ONE: Self = Self::from_u8(1);
+
+ /// Maximum value this [`Uint`] can express.
+ pub const MAX: Self = Self {
+ limbs: [Limb::MAX; LIMBS],
+ };
+
+ /// Total size of the represented integer in bits.
+ pub const BITS: usize = LIMBS * Limb::BITS;
+
+ /// Bit size of `BITS`.
+ // Note: assumes the type of `BITS` is `usize`. Any way to assert that?
+ pub(crate) const LOG2_BITS: usize = (usize::BITS - Self::BITS.leading_zeros()) as usize;
+
+ /// Total size of the represented integer in bytes.
+ pub const BYTES: usize = LIMBS * Limb::BYTES;
+
+ /// The number of limbs used on this platform.
+ pub const LIMBS: usize = LIMBS;
+
+ /// Const-friendly [`Uint`] constructor.
+ pub const fn new(limbs: [Limb; LIMBS]) -> Self {
+ Self { limbs }
+ }
+
+ /// Create a [`Uint`] from an array of [`Word`]s (i.e. word-sized unsigned
+ /// integers).
+ #[inline]
+ pub const fn from_words(arr: [Word; LIMBS]) -> Self {
+ let mut limbs = [Limb::ZERO; LIMBS];
+ let mut i = 0;
+
+ while i < LIMBS {
+ limbs[i] = Limb(arr[i]);
+ i += 1;
+ }
+
+ Self { limbs }
+ }
+
+ /// Create an array of [`Word`]s (i.e. word-sized unsigned integers) from
+ /// a [`Uint`].
+ #[inline]
+ pub const fn to_words(self) -> [Word; LIMBS] {
+ let mut arr = [0; LIMBS];
+ let mut i = 0;
+
+ while i < LIMBS {
+ arr[i] = self.limbs[i].0;
+ i += 1;
+ }
+
+ arr
+ }
+
+ /// Borrow the inner limbs as an array of [`Word`]s.
+ pub const fn as_words(&self) -> &[Word; LIMBS] {
+ // SAFETY: `Limb` is a `repr(transparent)` newtype for `Word`
+ #[allow(trivial_casts, unsafe_code)]
+ unsafe {
+ &*((&self.limbs as *const _) as *const [Word; LIMBS])
+ }
+ }
+
+ /// Borrow the inner limbs as a mutable array of [`Word`]s.
+ pub fn as_words_mut(&mut self) -> &mut [Word; LIMBS] {
+ // SAFETY: `Limb` is a `repr(transparent)` newtype for `Word`
+ #[allow(trivial_casts, unsafe_code)]
+ unsafe {
+ &mut *((&mut self.limbs as *mut _) as *mut [Word; LIMBS])
+ }
+ }
+
+ /// Borrow the limbs of this [`Uint`].
+ pub const fn as_limbs(&self) -> &[Limb; LIMBS] {
+ &self.limbs
+ }
+
+ /// Borrow the limbs of this [`Uint`] mutably.
+ pub fn as_limbs_mut(&mut self) -> &mut [Limb; LIMBS] {
+ &mut self.limbs
+ }
+
+ /// Convert this [`Uint`] into its inner limbs.
+ pub const fn to_limbs(self) -> [Limb; LIMBS] {
+ self.limbs
+ }
+}
+
+impl<const LIMBS: usize> AsRef<[Word; LIMBS]> for Uint<LIMBS> {
+ fn as_ref(&self) -> &[Word; LIMBS] {
+ self.as_words()
+ }
+}
+
+impl<const LIMBS: usize> AsMut<[Word; LIMBS]> for Uint<LIMBS> {
+ fn as_mut(&mut self) -> &mut [Word; LIMBS] {
+ self.as_words_mut()
+ }
+}
+
+impl<const LIMBS: usize> AsRef<[Limb]> for Uint<LIMBS> {
+ fn as_ref(&self) -> &[Limb] {
+ self.as_limbs()
+ }
+}
+
+impl<const LIMBS: usize> AsMut<[Limb]> for Uint<LIMBS> {
+ fn as_mut(&mut self) -> &mut [Limb] {
+ self.as_limbs_mut()
+ }
+}
+
+impl<const LIMBS: usize> ConditionallySelectable for Uint<LIMBS> {
+ fn conditional_select(a: &Self, b: &Self, choice: Choice) -> Self {
+ let mut limbs = [Limb::ZERO; LIMBS];
+
+ for i in 0..LIMBS {
+ limbs[i] = Limb::conditional_select(&a.limbs[i], &b.limbs[i], choice);
+ }
+
+ Self { limbs }
+ }
+}
+
+impl<const LIMBS: usize> Default for Uint<LIMBS> {
+ fn default() -> Self {
+ Self::ZERO
+ }
+}
+
+impl<const LIMBS: usize> Integer for Uint<LIMBS> {
+ const ONE: Self = Self::ONE;
+ const MAX: Self = Self::MAX;
+ const BITS: usize = Self::BITS;
+ const BYTES: usize = Self::BYTES;
+ const LIMBS: usize = Self::LIMBS;
+
+ fn is_odd(&self) -> Choice {
+ self.limbs
+ .first()
+ .map(|limb| limb.is_odd())
+ .unwrap_or_else(|| Choice::from(0))
+ }
+}
+
+impl<const LIMBS: usize> Zero for Uint<LIMBS> {
+ const ZERO: Self = Self::ZERO;
+}
+
+impl<const LIMBS: usize> Bounded for Uint<LIMBS> {
+ const BITS: usize = Self::BITS;
+ const BYTES: usize = Self::BYTES;
+}
+
+impl<const LIMBS: usize> fmt::Debug for Uint<LIMBS> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "Uint(0x{self:X})")
+ }
+}
+
+impl<const LIMBS: usize> fmt::Display for Uint<LIMBS> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::UpperHex::fmt(self, f)
+ }
+}
+
+impl<const LIMBS: usize> fmt::LowerHex for Uint<LIMBS> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ for limb in self.limbs.iter().rev() {
+ fmt::LowerHex::fmt(limb, f)?;
+ }
+ Ok(())
+ }
+}
+
+impl<const LIMBS: usize> fmt::UpperHex for Uint<LIMBS> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ for limb in self.limbs.iter().rev() {
+ fmt::UpperHex::fmt(limb, f)?;
+ }
+ Ok(())
+ }
+}
+
+#[cfg(feature = "serde")]
+impl<'de, const LIMBS: usize> Deserialize<'de> for Uint<LIMBS>
+where
+ Uint<LIMBS>: Encoding,
+{
+ fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
+ where
+ D: Deserializer<'de>,
+ {
+ let mut buffer = Self::ZERO.to_le_bytes();
+ serdect::array::deserialize_hex_or_bin(buffer.as_mut(), deserializer)?;
+
+ Ok(Self::from_le_bytes(buffer))
+ }
+}
+
+#[cfg(feature = "serde")]
+impl<const LIMBS: usize> Serialize for Uint<LIMBS>
+where
+ Uint<LIMBS>: Encoding,
+{
+ fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
+ where
+ S: Serializer,
+ {
+ serdect::array::serialize_hex_lower_or_bin(&Encoding::to_le_bytes(self), serializer)
+ }
+}
+
+#[cfg(feature = "zeroize")]
+impl<const LIMBS: usize> DefaultIsZeroes for Uint<LIMBS> {}
+
+// TODO(tarcieri): use `generic_const_exprs` when stable to make generic around bits.
+impl_uint_aliases! {
+ (U64, 64, "64-bit"),
+ (U128, 128, "128-bit"),
+ (U192, 192, "192-bit"),
+ (U256, 256, "256-bit"),
+ (U320, 320, "320-bit"),
+ (U384, 384, "384-bit"),
+ (U448, 448, "448-bit"),
+ (U512, 512, "512-bit"),
+ (U576, 576, "576-bit"),
+ (U640, 640, "640-bit"),
+ (U704, 704, "704-bit"),
+ (U768, 768, "768-bit"),
+ (U832, 832, "832-bit"),
+ (U896, 896, "896-bit"),
+ (U960, 960, "960-bit"),
+ (U1024, 1024, "1024-bit"),
+ (U1280, 1280, "1280-bit"),
+ (U1536, 1536, "1536-bit"),
+ (U1792, 1792, "1792-bit"),
+ (U2048, 2048, "2048-bit"),
+ (U3072, 3072, "3072-bit"),
+ (U3584, 3584, "3584-bit"),
+ (U4096, 4096, "4096-bit"),
+ (U4224, 4224, "4224-bit"),
+ (U4352, 4352, "4352-bit"),
+ (U6144, 6144, "6144-bit"),
+ (U8192, 8192, "8192-bit"),
+ (U16384, 16384, "16384-bit"),
+ (U32768, 32768, "32768-bit")
+}
+
+#[cfg(target_pointer_width = "32")]
+impl_uint_aliases! {
+ (U224, 224, "224-bit"), // For NIST P-224
+ (U544, 544, "544-bit") // For NIST P-521
+}
+
+#[cfg(target_pointer_width = "32")]
+impl_uint_concat_split_even! {
+ U64,
+}
+
+// Implement concat and split for double-width Uint sizes: these should be
+// multiples of 128 bits.
+impl_uint_concat_split_even! {
+ U128,
+ U256,
+ U384,
+ U512,
+ U640,
+ U768,
+ U896,
+ U1024,
+ U1280,
+ U1536,
+ U1792,
+ U2048,
+ U3072,
+ U3584,
+ U4096,
+ U4224,
+ U4352,
+ U6144,
+ U8192,
+ U16384,
+}
+
+// Implement mixed concat and split for combinations not implemented by
+// impl_uint_concat_split_even. The numbers represent the size of each
+// component Uint in multiple of 64 bits. For example,
+// (U256, [1, 3]) will allow splitting U256 into (U64, U192) as well as
+// (U192, U64), while the (U128, U128) combination is already covered.
+impl_uint_concat_split_mixed! {
+ (U192, [1, 2]),
+ (U256, [1, 3]),
+ (U320, [1, 2, 3, 4]),
+ (U384, [1, 2, 4, 5]),
+ (U448, [1, 2, 3, 4, 5, 6]),
+ (U512, [1, 2, 3, 5, 6, 7]),
+ (U576, [1, 2, 3, 4, 5, 6, 7, 8]),
+ (U640, [1, 2, 3, 4, 6, 7, 8, 9]),
+ (U704, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]),
+ (U768, [1, 2, 3, 4, 5, 7, 8, 9, 10, 11]),
+ (U832, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]),
+ (U896, [1, 2, 3, 4, 5, 6, 8, 9, 10, 11, 12, 13]),
+ (U960, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14]),
+ (U1024, [1, 2, 3, 4, 5, 6, 7, 9, 10, 11, 12, 13, 14, 15]),
+}
+
+#[cfg(feature = "extra-sizes")]
+mod extra_sizes;
+#[cfg(feature = "extra-sizes")]
+pub use extra_sizes::*;
+
+#[cfg(test)]
+#[allow(clippy::unwrap_used)]
+mod tests {
+ use crate::{Encoding, U128};
+ use subtle::ConditionallySelectable;
+
+ #[cfg(feature = "alloc")]
+ use alloc::format;
+
+ #[cfg(feature = "serde")]
+ use crate::U64;
+
+ #[cfg(feature = "alloc")]
+ #[test]
+ fn debug() {
+ let hex = "AAAAAAAABBBBBBBBCCCCCCCCDDDDDDDD";
+ let n = U128::from_be_hex(hex);
+
+ assert_eq!(
+ format!("{:?}", n),
+ "Uint(0xAAAAAAAABBBBBBBBCCCCCCCCDDDDDDDD)"
+ );
+ }
+
+ #[cfg(feature = "alloc")]
+ #[test]
+ fn display() {
+ let hex = "AAAAAAAABBBBBBBBCCCCCCCCDDDDDDDD";
+ let n = U128::from_be_hex(hex);
+
+ use alloc::string::ToString;
+ assert_eq!(hex, n.to_string());
+
+ let hex = "AAAAAAAABBBBBBBB0000000000000000";
+ let n = U128::from_be_hex(hex);
+ assert_eq!(hex, n.to_string());
+
+ let hex = "AAAAAAAABBBBBBBB00000000DDDDDDDD";
+ let n = U128::from_be_hex(hex);
+ assert_eq!(hex, n.to_string());
+
+ let hex = "AAAAAAAABBBBBBBB0CCCCCCCDDDDDDDD";
+ let n = U128::from_be_hex(hex);
+ assert_eq!(hex, n.to_string());
+ }
+
+ #[test]
+ fn from_bytes() {
+ let a = U128::from_be_hex("AAAAAAAABBBBBBBB0CCCCCCCDDDDDDDD");
+
+ let be_bytes = a.to_be_bytes();
+ let le_bytes = a.to_le_bytes();
+ for i in 0..16 {
+ assert_eq!(le_bytes[i], be_bytes[15 - i]);
+ }
+
+ let a_from_be = U128::from_be_bytes(be_bytes);
+ let a_from_le = U128::from_le_bytes(le_bytes);
+ assert_eq!(a_from_be, a_from_le);
+ assert_eq!(a_from_be, a);
+ }
+
+ #[test]
+ fn conditional_select() {
+ let a = U128::from_be_hex("00002222444466668888AAAACCCCEEEE");
+ let b = U128::from_be_hex("11113333555577779999BBBBDDDDFFFF");
+
+ let select_0 = U128::conditional_select(&a, &b, 0.into());
+ assert_eq!(a, select_0);
+
+ let select_1 = U128::conditional_select(&a, &b, 1.into());
+ assert_eq!(b, select_1);
+ }
+
+ #[cfg(feature = "serde")]
+ #[test]
+ fn serde() {
+ const TEST: U64 = U64::from_u64(0x0011223344556677);
+
+ let serialized = bincode::serialize(&TEST).unwrap();
+ let deserialized: U64 = bincode::deserialize(&serialized).unwrap();
+
+ assert_eq!(TEST, deserialized);
+ }
+
+ #[cfg(feature = "serde")]
+ #[test]
+ fn serde_owned() {
+ const TEST: U64 = U64::from_u64(0x0011223344556677);
+
+ let serialized = bincode::serialize(&TEST).unwrap();
+ let deserialized: U64 = bincode::deserialize_from(serialized.as_slice()).unwrap();
+
+ assert_eq!(TEST, deserialized);
+ }
+}
diff --git a/vendor/crypto-bigint/src/uint/add.rs b/vendor/crypto-bigint/src/uint/add.rs
new file mode 100644
index 0000000..e4f7bfa
--- /dev/null
+++ b/vendor/crypto-bigint/src/uint/add.rs
@@ -0,0 +1,206 @@
+//! [`Uint`] addition operations.
+
+use crate::{Checked, CheckedAdd, CtChoice, Limb, Uint, Wrapping, Zero};
+use core::ops::{Add, AddAssign};
+use subtle::CtOption;
+
+impl<const LIMBS: usize> Uint<LIMBS> {
+ /// Computes `a + b + carry`, returning the result along with the new carry.
+ #[inline(always)]
+ pub const fn adc(&self, rhs: &Self, mut carry: Limb) -> (Self, Limb) {
+ let mut limbs = [Limb::ZERO; LIMBS];
+ let mut i = 0;
+
+ while i < LIMBS {
+ let (w, c) = self.limbs[i].adc(rhs.limbs[i], carry);
+ limbs[i] = w;
+ carry = c;
+ i += 1;
+ }
+
+ (Self { limbs }, carry)
+ }
+
+ /// Perform saturating addition, returning `MAX` on overflow.
+ pub const fn saturating_add(&self, rhs: &Self) -> Self {
+ let (res, overflow) = self.adc(rhs, Limb::ZERO);
+ Self::ct_select(&res, &Self::MAX, CtChoice::from_lsb(overflow.0))
+ }
+
+ /// Perform wrapping addition, discarding overflow.
+ pub const fn wrapping_add(&self, rhs: &Self) -> Self {
+ self.adc(rhs, Limb::ZERO).0
+ }
+
+ /// Perform wrapping addition, returning the truthy value as the second element of the tuple
+ /// if an overflow has occurred.
+ pub(crate) const fn conditional_wrapping_add(
+ &self,
+ rhs: &Self,
+ choice: CtChoice,
+ ) -> (Self, CtChoice) {
+ let actual_rhs = Uint::ct_select(&Uint::ZERO, rhs, choice);
+ let (sum, carry) = self.adc(&actual_rhs, Limb::ZERO);
+ (sum, CtChoice::from_lsb(carry.0))
+ }
+}
+
+impl<const LIMBS: usize> CheckedAdd<&Uint<LIMBS>> for Uint<LIMBS> {
+ type Output = Self;
+
+ fn checked_add(&self, rhs: &Self) -> CtOption<Self> {
+ let (result, carry) = self.adc(rhs, Limb::ZERO);
+ CtOption::new(result, carry.is_zero())
+ }
+}
+
+impl<const LIMBS: usize> Add for Wrapping<Uint<LIMBS>> {
+ type Output = Self;
+
+ fn add(self, rhs: Self) -> Wrapping<Uint<LIMBS>> {
+ Wrapping(self.0.wrapping_add(&rhs.0))
+ }
+}
+
+impl<const LIMBS: usize> Add<&Wrapping<Uint<LIMBS>>> for Wrapping<Uint<LIMBS>> {
+ type Output = Wrapping<Uint<LIMBS>>;
+
+ fn add(self, rhs: &Wrapping<Uint<LIMBS>>) -> Wrapping<Uint<LIMBS>> {
+ Wrapping(self.0.wrapping_add(&rhs.0))
+ }
+}
+
+impl<const LIMBS: usize> Add<Wrapping<Uint<LIMBS>>> for &Wrapping<Uint<LIMBS>> {
+ type Output = Wrapping<Uint<LIMBS>>;
+
+ fn add(self, rhs: Wrapping<Uint<LIMBS>>) -> Wrapping<Uint<LIMBS>> {
+ Wrapping(self.0.wrapping_add(&rhs.0))
+ }
+}
+
+impl<const LIMBS: usize> Add<&Wrapping<Uint<LIMBS>>> for &Wrapping<Uint<LIMBS>> {
+ type Output = Wrapping<Uint<LIMBS>>;
+
+ fn add(self, rhs: &Wrapping<Uint<LIMBS>>) -> Wrapping<Uint<LIMBS>> {
+ Wrapping(self.0.wrapping_add(&rhs.0))
+ }
+}
+
+impl<const LIMBS: usize> AddAssign for Wrapping<Uint<LIMBS>> {
+ fn add_assign(&mut self, other: Self) {
+ *self = *self + other;
+ }
+}
+
+impl<const LIMBS: usize> AddAssign<&Wrapping<Uint<LIMBS>>> for Wrapping<Uint<LIMBS>> {
+ fn add_assign(&mut self, other: &Self) {
+ *self = *self + other;
+ }
+}
+
+impl<const LIMBS: usize> Add for Checked<Uint<LIMBS>> {
+ type Output = Self;
+
+ fn add(self, rhs: Self) -> Checked<Uint<LIMBS>> {
+ Checked(
+ self.0
+ .and_then(|lhs| rhs.0.and_then(|rhs| lhs.checked_add(&rhs))),
+ )
+ }
+}
+
+impl<const LIMBS: usize> Add<&Checked<Uint<LIMBS>>> for Checked<Uint<LIMBS>> {
+ type Output = Checked<Uint<LIMBS>>;
+
+ fn add(self, rhs: &Checked<Uint<LIMBS>>) -> Checked<Uint<LIMBS>> {
+ Checked(
+ self.0
+ .and_then(|lhs| rhs.0.and_then(|rhs| lhs.checked_add(&rhs))),
+ )
+ }
+}
+
+impl<const LIMBS: usize> Add<Checked<Uint<LIMBS>>> for &Checked<Uint<LIMBS>> {
+ type Output = Checked<Uint<LIMBS>>;
+
+ fn add(self, rhs: Checked<Uint<LIMBS>>) -> Checked<Uint<LIMBS>> {
+ Checked(
+ self.0
+ .and_then(|lhs| rhs.0.and_then(|rhs| lhs.checked_add(&rhs))),
+ )
+ }
+}
+
+impl<const LIMBS: usize> Add<&Checked<Uint<LIMBS>>> for &Checked<Uint<LIMBS>> {
+ type Output = Checked<Uint<LIMBS>>;
+
+ fn add(self, rhs: &Checked<Uint<LIMBS>>) -> Checked<Uint<LIMBS>> {
+ Checked(
+ self.0
+ .and_then(|lhs| rhs.0.and_then(|rhs| lhs.checked_add(&rhs))),
+ )
+ }
+}
+
+impl<const LIMBS: usize> AddAssign for Checked<Uint<LIMBS>> {
+ fn add_assign(&mut self, other: Self) {
+ *self = *self + other;
+ }
+}
+
+impl<const LIMBS: usize> AddAssign<&Checked<Uint<LIMBS>>> for Checked<Uint<LIMBS>> {
+ fn add_assign(&mut self, other: &Self) {
+ *self = *self + other;
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use crate::{CheckedAdd, Limb, U128};
+
+ #[test]
+ fn adc_no_carry() {
+ let (res, carry) = U128::ZERO.adc(&U128::ONE, Limb::ZERO);
+ assert_eq!(res, U128::ONE);
+ assert_eq!(carry, Limb::ZERO);
+ }
+
+ #[test]
+ fn adc_with_carry() {
+ let (res, carry) = U128::MAX.adc(&U128::ONE, Limb::ZERO);
+ assert_eq!(res, U128::ZERO);
+ assert_eq!(carry, Limb::ONE);
+ }
+
+ #[test]
+ fn saturating_add_no_carry() {
+ assert_eq!(U128::ZERO.saturating_add(&U128::ONE), U128::ONE);
+ }
+
+ #[test]
+ fn saturating_add_with_carry() {
+ assert_eq!(U128::MAX.saturating_add(&U128::ONE), U128::MAX);
+ }
+
+ #[test]
+ fn wrapping_add_no_carry() {
+ assert_eq!(U128::ZERO.wrapping_add(&U128::ONE), U128::ONE);
+ }
+
+ #[test]
+ fn wrapping_add_with_carry() {
+ assert_eq!(U128::MAX.wrapping_add(&U128::ONE), U128::ZERO);
+ }
+
+ #[test]
+ fn checked_add_ok() {
+ let result = U128::ZERO.checked_add(&U128::ONE);
+ assert_eq!(result.unwrap(), U128::ONE);
+ }
+
+ #[test]
+ fn checked_add_overflow() {
+ let result = U128::MAX.checked_add(&U128::ONE);
+ assert!(!bool::from(result.is_some()));
+ }
+}
diff --git a/vendor/crypto-bigint/src/uint/add_mod.rs b/vendor/crypto-bigint/src/uint/add_mod.rs
new file mode 100644
index 0000000..70674f5
--- /dev/null
+++ b/vendor/crypto-bigint/src/uint/add_mod.rs
@@ -0,0 +1,128 @@
+//! [`Uint`] addition modulus operations.
+
+use crate::{AddMod, Limb, Uint};
+
+impl<const LIMBS: usize> Uint<LIMBS> {
+ /// Computes `self + rhs mod p` in constant time.
+ ///
+ /// Assumes `self + rhs` as unbounded integer is `< 2p`.
+ pub const fn add_mod(&self, rhs: &Uint<LIMBS>, p: &Uint<LIMBS>) -> Uint<LIMBS> {
+ let (w, carry) = self.adc(rhs, Limb::ZERO);
+
+ // Attempt to subtract the modulus, to ensure the result is in the field.
+ let (w, borrow) = w.sbb(p, Limb::ZERO);
+ let (_, borrow) = carry.sbb(Limb::ZERO, borrow);
+
+ // If underflow occurred on the final limb, borrow = 0xfff...fff, otherwise
+ // borrow = 0x000...000. Thus, we use it as a mask to conditionally add the
+ // modulus.
+ let mask = Uint::from_words([borrow.0; LIMBS]);
+
+ w.wrapping_add(&p.bitand(&mask))
+ }
+
+ /// Computes `self + rhs mod p` in constant time for the special modulus
+ /// `p = MAX+1-c` where `c` is small enough to fit in a single [`Limb`].
+ ///
+ /// Assumes `self + rhs` as unbounded integer is `< 2p`.
+ pub const fn add_mod_special(&self, rhs: &Self, c: Limb) -> Self {
+ // `Uint::adc` also works with a carry greater than 1.
+ let (out, carry) = self.adc(rhs, c);
+
+ // If overflow occurred, then above addition of `c` already accounts
+ // for the overflow. Otherwise, we need to subtract `c` again, which
+ // in that case cannot underflow.
+ let l = carry.0.wrapping_sub(1) & c.0;
+ out.wrapping_sub(&Uint::from_word(l))
+ }
+}
+
+impl<const LIMBS: usize> AddMod for Uint<LIMBS> {
+ type Output = Self;
+
+ fn add_mod(&self, rhs: &Self, p: &Self) -> Self {
+ debug_assert!(self < p);
+ debug_assert!(rhs < p);
+ self.add_mod(rhs, p)
+ }
+}
+
+#[cfg(all(test, feature = "rand"))]
+mod tests {
+ use crate::{Limb, NonZero, Random, RandomMod, Uint, U256};
+ use rand_core::SeedableRng;
+
+ // TODO(tarcieri): additional tests + proptests
+
+ #[test]
+ fn add_mod_nist_p256() {
+ let a =
+ U256::from_be_hex("44acf6b7e36c1342c2c5897204fe09504e1e2efb1a900377dbc4e7a6a133ec56");
+ let b =
+ U256::from_be_hex("d5777c45019673125ad240f83094d4252d829516fac8601ed01979ec1ec1a251");
+ let n =
+ U256::from_be_hex("ffffffff00000000ffffffffffffffffbce6faada7179e84f3b9cac2fc632551");
+
+ let actual = a.add_mod(&b, &n);
+ let expected =
+ U256::from_be_hex("1a2472fde50286541d97ca6a3592dd75beb9c9646e40c511b82496cfc3926956");
+
+ assert_eq!(expected, actual);
+ }
+
+ macro_rules! test_add_mod_special {
+ ($size:expr, $test_name:ident) => {
+ #[test]
+ fn $test_name() {
+ let mut rng = rand_chacha::ChaCha8Rng::seed_from_u64(1);
+ let moduli = [
+ NonZero::<Limb>::random(&mut rng),
+ NonZero::<Limb>::random(&mut rng),
+ ];
+
+ for special in &moduli {
+ let p = &NonZero::new(Uint::ZERO.wrapping_sub(&Uint::from_word(special.0)))
+ .unwrap();
+
+ let minus_one = p.wrapping_sub(&Uint::ONE);
+
+ let base_cases = [
+ (Uint::ZERO, Uint::ZERO, Uint::ZERO),
+ (Uint::ONE, Uint::ZERO, Uint::ONE),
+ (Uint::ZERO, Uint::ONE, Uint::ONE),
+ (minus_one, Uint::ONE, Uint::ZERO),
+ (Uint::ONE, minus_one, Uint::ZERO),
+ ];
+ for (a, b, c) in &base_cases {
+ let x = a.add_mod_special(b, *special.as_ref());
+ assert_eq!(*c, x, "{} + {} mod {} = {} != {}", a, b, p, x, c);
+ }
+
+ for _i in 0..100 {
+ let a = Uint::<$size>::random_mod(&mut rng, p);
+ let b = Uint::<$size>::random_mod(&mut rng, p);
+
+ let c = a.add_mod_special(&b, *special.as_ref());
+ assert!(c < **p, "not reduced: {} >= {} ", c, p);
+
+ let expected = a.add_mod(&b, p);
+ assert_eq!(c, expected, "incorrect result");
+ }
+ }
+ }
+ };
+ }
+
+ test_add_mod_special!(1, add_mod_special_1);
+ test_add_mod_special!(2, add_mod_special_2);
+ test_add_mod_special!(3, add_mod_special_3);
+ test_add_mod_special!(4, add_mod_special_4);
+ test_add_mod_special!(5, add_mod_special_5);
+ test_add_mod_special!(6, add_mod_special_6);
+ test_add_mod_special!(7, add_mod_special_7);
+ test_add_mod_special!(8, add_mod_special_8);
+ test_add_mod_special!(9, add_mod_special_9);
+ test_add_mod_special!(10, add_mod_special_10);
+ test_add_mod_special!(11, add_mod_special_11);
+ test_add_mod_special!(12, add_mod_special_12);
+}
diff --git a/vendor/crypto-bigint/src/uint/array.rs b/vendor/crypto-bigint/src/uint/array.rs
new file mode 100644
index 0000000..a23e84e
--- /dev/null
+++ b/vendor/crypto-bigint/src/uint/array.rs
@@ -0,0 +1,193 @@
+//! `generic-array` integration with `Uint`.
+// TODO(tarcieri): completely phase out `generic-array` when const generics are powerful enough
+
+use crate::{ArrayDecoding, ArrayEncoding, ByteArray};
+use generic_array::{typenum, GenericArray};
+
+macro_rules! impl_uint_array_encoding {
+ ($(($uint:ident, $bytes:path)),+) => {
+ $(
+ impl ArrayEncoding for super::$uint {
+ type ByteSize = $bytes;
+
+ #[inline]
+ fn from_be_byte_array(bytes: ByteArray<Self>) -> Self {
+ Self::from_be_slice(&bytes)
+ }
+
+ #[inline]
+ fn from_le_byte_array(bytes: ByteArray<Self>) -> Self {
+ Self::from_le_slice(&bytes)
+ }
+
+ #[inline]
+ fn to_be_byte_array(&self) -> ByteArray<Self> {
+ let mut result = GenericArray::default();
+ self.write_be_bytes(&mut result);
+ result
+ }
+
+ #[inline]
+ fn to_le_byte_array(&self) -> ByteArray<Self> {
+ let mut result = GenericArray::default();
+ self.write_le_bytes(&mut result);
+ result
+ }
+ }
+
+ impl ArrayDecoding for GenericArray<u8, $bytes> {
+ type Output = super::$uint;
+
+ fn into_uint_be(self) -> Self::Output {
+ Self::Output::from_be_byte_array(self)
+ }
+
+ fn into_uint_le(self) -> Self::Output {
+ Self::Output::from_le_byte_array(self)
+ }
+ }
+ )+
+ };
+}
+
+// TODO(tarcieri): use `generic_const_exprs` when stable to make generic around bits.
+impl_uint_array_encoding! {
+ (U64, typenum::U8),
+ (U128, typenum::U16),
+ (U192, typenum::U24),
+ (U256, typenum::U32),
+ (U384, typenum::U48),
+ (U448, typenum::U56),
+ (U512, typenum::U64),
+ (U576, typenum::U72),
+ (U768, typenum::U96),
+ (U896, typenum::U112),
+ (U1024, typenum::U128),
+ (U1536, typenum::U192),
+ (U1792, typenum::U224),
+ (U2048, typenum::U256),
+ (U3072, typenum::U384),
+ (U3584, typenum::U448),
+ (U4096, typenum::U512),
+ (U6144, typenum::U768),
+ (U8192, typenum::U1024)
+}
+
+#[cfg(target_pointer_width = "32")]
+impl_uint_array_encoding! {
+ (U224, typenum::U28), // For NIST P-224
+ (U544, typenum::U68) // For NIST P-521
+}
+
+#[cfg(test)]
+mod tests {
+ use crate::{ArrayDecoding, ArrayEncoding, Limb};
+ use hex_literal::hex;
+
+ #[cfg(target_pointer_width = "32")]
+ use crate::U64 as UintEx;
+
+ #[cfg(target_pointer_width = "64")]
+ use crate::U128 as UintEx;
+
+ /// Byte array that corresponds to `UintEx`
+ type ByteArray = crate::ByteArray<UintEx>;
+
+ #[test]
+ #[cfg(target_pointer_width = "32")]
+ fn from_be_byte_array() {
+ let n = UintEx::from_be_byte_array(hex!("0011223344556677").into());
+ assert_eq!(n.as_limbs(), &[Limb(0x44556677), Limb(0x00112233)]);
+ }
+
+ #[test]
+ #[cfg(target_pointer_width = "64")]
+ fn from_be_byte_array() {
+ let n = UintEx::from_be_byte_array(hex!("00112233445566778899aabbccddeeff").into());
+ assert_eq!(
+ n.as_limbs(),
+ &[Limb(0x8899aabbccddeeff), Limb(0x0011223344556677)]
+ );
+ }
+
+ #[test]
+ #[cfg(target_pointer_width = "32")]
+ fn from_le_byte_array() {
+ let n = UintEx::from_le_byte_array(hex!("7766554433221100").into());
+ assert_eq!(n.as_limbs(), &[Limb(0x44556677), Limb(0x00112233)]);
+ }
+
+ #[test]
+ #[cfg(target_pointer_width = "64")]
+ fn from_le_byte_array() {
+ let n = UintEx::from_le_byte_array(hex!("ffeeddccbbaa99887766554433221100").into());
+ assert_eq!(
+ n.as_limbs(),
+ &[Limb(0x8899aabbccddeeff), Limb(0x0011223344556677)]
+ );
+ }
+
+ #[test]
+ #[cfg(target_pointer_width = "32")]
+ fn to_be_byte_array() {
+ let expected_bytes = ByteArray::from(hex!("0011223344556677"));
+ let actual_bytes = UintEx::from_be_byte_array(expected_bytes).to_be_byte_array();
+ assert_eq!(expected_bytes, actual_bytes);
+ }
+
+ #[test]
+ #[cfg(target_pointer_width = "64")]
+ fn to_be_byte_array() {
+ let expected_bytes = ByteArray::from(hex!("00112233445566778899aabbccddeeff"));
+ let actual_bytes = UintEx::from_be_byte_array(expected_bytes).to_be_byte_array();
+ assert_eq!(expected_bytes, actual_bytes);
+ }
+
+ #[test]
+ #[cfg(target_pointer_width = "32")]
+ fn to_le_byte_array() {
+ let expected_bytes = ByteArray::from(hex!("7766554433221100"));
+ let actual_bytes = UintEx::from_le_byte_array(expected_bytes).to_le_byte_array();
+ assert_eq!(expected_bytes, actual_bytes);
+ }
+
+ #[test]
+ #[cfg(target_pointer_width = "64")]
+ fn to_le_byte_array() {
+ let expected_bytes = ByteArray::from(hex!("ffeeddccbbaa99887766554433221100"));
+ let actual_bytes = UintEx::from_le_byte_array(expected_bytes).to_le_byte_array();
+ assert_eq!(expected_bytes, actual_bytes);
+ }
+
+ #[test]
+ #[cfg(target_pointer_width = "32")]
+ fn into_uint_be() {
+ let expected_bytes = ByteArray::from(hex!("0011223344556677"));
+ let actual_bytes = expected_bytes.into_uint_be().to_be_byte_array();
+ assert_eq!(expected_bytes, actual_bytes);
+ }
+
+ #[test]
+ #[cfg(target_pointer_width = "64")]
+ fn into_uint_be() {
+ let expected_bytes = ByteArray::from(hex!("00112233445566778899aabbccddeeff"));
+ let actual_bytes = expected_bytes.into_uint_be().to_be_byte_array();
+ assert_eq!(expected_bytes, actual_bytes);
+ }
+
+ #[test]
+ #[cfg(target_pointer_width = "32")]
+ fn into_uint_le() {
+ let expected_bytes = ByteArray::from(hex!("7766554433221100"));
+ let actual_bytes = expected_bytes.into_uint_le().to_le_byte_array();
+ assert_eq!(expected_bytes, actual_bytes);
+ }
+
+ #[test]
+ #[cfg(target_pointer_width = "64")]
+ fn into_uint_le() {
+ let expected_bytes = ByteArray::from(hex!("ffeeddccbbaa99887766554433221100"));
+ let actual_bytes = expected_bytes.into_uint_le().to_le_byte_array();
+ assert_eq!(expected_bytes, actual_bytes);
+ }
+}
diff --git a/vendor/crypto-bigint/src/uint/bit_and.rs b/vendor/crypto-bigint/src/uint/bit_and.rs
new file mode 100644
index 0000000..18186fb
--- /dev/null
+++ b/vendor/crypto-bigint/src/uint/bit_and.rs
@@ -0,0 +1,146 @@
+//! [`Uint`] bitwise and operations.
+
+use super::Uint;
+use crate::{Limb, Wrapping};
+use core::ops::{BitAnd, BitAndAssign};
+use subtle::{Choice, CtOption};
+
+impl<const LIMBS: usize> Uint<LIMBS> {
+ /// Computes bitwise `a & b`.
+ #[inline(always)]
+ pub const fn bitand(&self, rhs: &Self) -> Self {
+ let mut limbs = [Limb::ZERO; LIMBS];
+ let mut i = 0;
+
+ while i < LIMBS {
+ limbs[i] = self.limbs[i].bitand(rhs.limbs[i]);
+ i += 1;
+ }
+
+ Self { limbs }
+ }
+
+ /// Perform wrapping bitwise `AND`.
+ ///
+ /// There's no way wrapping could ever happen.
+ /// This function exists so that all operations are accounted for in the wrapping operations
+ pub const fn wrapping_and(&self, rhs: &Self) -> Self {
+ self.bitand(rhs)
+ }
+
+ /// Perform checked bitwise `AND`, returning a [`CtOption`] which `is_some` always
+ pub fn checked_and(&self, rhs: &Self) -> CtOption<Self> {
+ let result = self.bitand(rhs);
+ CtOption::new(result, Choice::from(1))
+ }
+}
+
+impl<const LIMBS: usize> BitAnd for Uint<LIMBS> {
+ type Output = Self;
+
+ fn bitand(self, rhs: Self) -> Uint<LIMBS> {
+ self.bitand(&rhs)
+ }
+}
+
+impl<const LIMBS: usize> BitAnd<&Uint<LIMBS>> for Uint<LIMBS> {
+ type Output = Uint<LIMBS>;
+
+ #[allow(clippy::needless_borrow)]
+ fn bitand(self, rhs: &Uint<LIMBS>) -> Uint<LIMBS> {
+ (&self).bitand(rhs)
+ }
+}
+
+impl<const LIMBS: usize> BitAnd<Uint<LIMBS>> for &Uint<LIMBS> {
+ type Output = Uint<LIMBS>;
+
+ fn bitand(self, rhs: Uint<LIMBS>) -> Uint<LIMBS> {
+ self.bitand(&rhs)
+ }
+}
+
+impl<const LIMBS: usize> BitAnd<&Uint<LIMBS>> for &Uint<LIMBS> {
+ type Output = Uint<LIMBS>;
+
+ fn bitand(self, rhs: &Uint<LIMBS>) -> Uint<LIMBS> {
+ self.bitand(rhs)
+ }
+}
+
+impl<const LIMBS: usize> BitAndAssign for Uint<LIMBS> {
+ #[allow(clippy::assign_op_pattern)]
+ fn bitand_assign(&mut self, other: Self) {
+ *self = *self & other;
+ }
+}
+
+impl<const LIMBS: usize> BitAndAssign<&Uint<LIMBS>> for Uint<LIMBS> {
+ #[allow(clippy::assign_op_pattern)]
+ fn bitand_assign(&mut self, other: &Self) {
+ *self = *self & other;
+ }
+}
+
+impl<const LIMBS: usize> BitAnd for Wrapping<Uint<LIMBS>> {
+ type Output = Self;
+
+ fn bitand(self, rhs: Self) -> Wrapping<Uint<LIMBS>> {
+ Wrapping(self.0.bitand(&rhs.0))
+ }
+}
+
+impl<const LIMBS: usize> BitAnd<&Wrapping<Uint<LIMBS>>> for Wrapping<Uint<LIMBS>> {
+ type Output = Wrapping<Uint<LIMBS>>;
+
+ fn bitand(self, rhs: &Wrapping<Uint<LIMBS>>) -> Wrapping<Uint<LIMBS>> {
+ Wrapping(self.0.bitand(&rhs.0))
+ }
+}
+
+impl<const LIMBS: usize> BitAnd<Wrapping<Uint<LIMBS>>> for &Wrapping<Uint<LIMBS>> {
+ type Output = Wrapping<Uint<LIMBS>>;
+
+ fn bitand(self, rhs: Wrapping<Uint<LIMBS>>) -> Wrapping<Uint<LIMBS>> {
+ Wrapping(self.0.bitand(&rhs.0))
+ }
+}
+
+impl<const LIMBS: usize> BitAnd<&Wrapping<Uint<LIMBS>>> for &Wrapping<Uint<LIMBS>> {
+ type Output = Wrapping<Uint<LIMBS>>;
+
+ fn bitand(self, rhs: &Wrapping<Uint<LIMBS>>) -> Wrapping<Uint<LIMBS>> {
+ Wrapping(self.0.bitand(&rhs.0))
+ }
+}
+
+impl<const LIMBS: usize> BitAndAssign for Wrapping<Uint<LIMBS>> {
+ #[allow(clippy::assign_op_pattern)]
+ fn bitand_assign(&mut self, other: Self) {
+ *self = *self & other;
+ }
+}
+
+impl<const LIMBS: usize> BitAndAssign<&Wrapping<Uint<LIMBS>>> for Wrapping<Uint<LIMBS>> {
+ #[allow(clippy::assign_op_pattern)]
+ fn bitand_assign(&mut self, other: &Self) {
+ *self = *self & other;
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use crate::U128;
+
+ #[test]
+ fn checked_and_ok() {
+ let result = U128::ZERO.checked_and(&U128::ONE);
+ assert_eq!(result.unwrap(), U128::ZERO);
+ }
+
+ #[test]
+ fn overlapping_and_ok() {
+ let result = U128::MAX.wrapping_and(&U128::ONE);
+ assert_eq!(result, U128::ONE);
+ }
+}
diff --git a/vendor/crypto-bigint/src/uint/bit_not.rs b/vendor/crypto-bigint/src/uint/bit_not.rs
new file mode 100644
index 0000000..52fea5f
--- /dev/null
+++ b/vendor/crypto-bigint/src/uint/bit_not.rs
@@ -0,0 +1,49 @@
+//! [`Uint`] bitwise not operations.
+
+use super::Uint;
+use crate::{Limb, Wrapping};
+use core::ops::Not;
+
+impl<const LIMBS: usize> Uint<LIMBS> {
+ /// Computes bitwise `!a`.
+ #[inline(always)]
+ pub const fn not(&self) -> Self {
+ let mut limbs = [Limb::ZERO; LIMBS];
+ let mut i = 0;
+
+ while i < LIMBS {
+ limbs[i] = self.limbs[i].not();
+ i += 1;
+ }
+
+ Self { limbs }
+ }
+}
+
+impl<const LIMBS: usize> Not for Uint<LIMBS> {
+ type Output = Self;
+
+ #[allow(clippy::needless_borrow)]
+ fn not(self) -> <Self as Not>::Output {
+ (&self).not()
+ }
+}
+
+impl<const LIMBS: usize> Not for Wrapping<Uint<LIMBS>> {
+ type Output = Self;
+
+ fn not(self) -> <Self as Not>::Output {
+ Wrapping(self.0.not())
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use crate::U128;
+
+ #[test]
+ fn bitnot_ok() {
+ assert_eq!(U128::ZERO.not(), U128::MAX);
+ assert_eq!(U128::MAX.not(), U128::ZERO);
+ }
+}
diff --git a/vendor/crypto-bigint/src/uint/bit_or.rs b/vendor/crypto-bigint/src/uint/bit_or.rs
new file mode 100644
index 0000000..9a78e36
--- /dev/null
+++ b/vendor/crypto-bigint/src/uint/bit_or.rs
@@ -0,0 +1,142 @@
+//! [`Uint`] bitwise or operations.
+
+use super::Uint;
+use crate::{Limb, Wrapping};
+use core::ops::{BitOr, BitOrAssign};
+use subtle::{Choice, CtOption};
+
+impl<const LIMBS: usize> Uint<LIMBS> {
+ /// Computes bitwise `a & b`.
+ #[inline(always)]
+ pub const fn bitor(&self, rhs: &Self) -> Self {
+ let mut limbs = [Limb::ZERO; LIMBS];
+ let mut i = 0;
+
+ while i < LIMBS {
+ limbs[i] = self.limbs[i].bitor(rhs.limbs[i]);
+ i += 1;
+ }
+
+ Self { limbs }
+ }
+
+ /// Perform wrapping bitwise `OR`.
+ ///
+ /// There's no way wrapping could ever happen.
+ /// This function exists so that all operations are accounted for in the wrapping operations
+ pub const fn wrapping_or(&self, rhs: &Self) -> Self {
+ self.bitor(rhs)
+ }
+
+ /// Perform checked bitwise `OR`, returning a [`CtOption`] which `is_some` always
+ pub fn checked_or(&self, rhs: &Self) -> CtOption<Self> {
+ let result = self.bitor(rhs);
+ CtOption::new(result, Choice::from(1))
+ }
+}
+
+impl<const LIMBS: usize> BitOr for Uint<LIMBS> {
+ type Output = Self;
+
+ fn bitor(self, rhs: Self) -> Uint<LIMBS> {
+ self.bitor(&rhs)
+ }
+}
+
+impl<const LIMBS: usize> BitOr<&Uint<LIMBS>> for Uint<LIMBS> {
+ type Output = Uint<LIMBS>;
+
+ #[allow(clippy::needless_borrow)]
+ fn bitor(self, rhs: &Uint<LIMBS>) -> Uint<LIMBS> {
+ (&self).bitor(rhs)
+ }
+}
+
+impl<const LIMBS: usize> BitOr<Uint<LIMBS>> for &Uint<LIMBS> {
+ type Output = Uint<LIMBS>;
+
+ fn bitor(self, rhs: Uint<LIMBS>) -> Uint<LIMBS> {
+ self.bitor(&rhs)
+ }
+}
+
+impl<const LIMBS: usize> BitOr<&Uint<LIMBS>> for &Uint<LIMBS> {
+ type Output = Uint<LIMBS>;
+
+ fn bitor(self, rhs: &Uint<LIMBS>) -> Uint<LIMBS> {
+ self.bitor(rhs)
+ }
+}
+
+impl<const LIMBS: usize> BitOrAssign for Uint<LIMBS> {
+ fn bitor_assign(&mut self, other: Self) {
+ *self = *self | other;
+ }
+}
+
+impl<const LIMBS: usize> BitOrAssign<&Uint<LIMBS>> for Uint<LIMBS> {
+ fn bitor_assign(&mut self, other: &Self) {
+ *self = *self | other;
+ }
+}
+
+impl<const LIMBS: usize> BitOr for Wrapping<Uint<LIMBS>> {
+ type Output = Self;
+
+ fn bitor(self, rhs: Self) -> Wrapping<Uint<LIMBS>> {
+ Wrapping(self.0.bitor(&rhs.0))
+ }
+}
+
+impl<const LIMBS: usize> BitOr<&Wrapping<Uint<LIMBS>>> for Wrapping<Uint<LIMBS>> {
+ type Output = Wrapping<Uint<LIMBS>>;
+
+ fn bitor(self, rhs: &Wrapping<Uint<LIMBS>>) -> Wrapping<Uint<LIMBS>> {
+ Wrapping(self.0.bitor(&rhs.0))
+ }
+}
+
+impl<const LIMBS: usize> BitOr<Wrapping<Uint<LIMBS>>> for &Wrapping<Uint<LIMBS>> {
+ type Output = Wrapping<Uint<LIMBS>>;
+
+ fn bitor(self, rhs: Wrapping<Uint<LIMBS>>) -> Wrapping<Uint<LIMBS>> {
+ Wrapping(self.0.bitor(&rhs.0))
+ }
+}
+
+impl<const LIMBS: usize> BitOr<&Wrapping<Uint<LIMBS>>> for &Wrapping<Uint<LIMBS>> {
+ type Output = Wrapping<Uint<LIMBS>>;
+
+ fn bitor(self, rhs: &Wrapping<Uint<LIMBS>>) -> Wrapping<Uint<LIMBS>> {
+ Wrapping(self.0.bitor(&rhs.0))
+ }
+}
+
+impl<const LIMBS: usize> BitOrAssign for Wrapping<Uint<LIMBS>> {
+ fn bitor_assign(&mut self, other: Self) {
+ *self = *self | other;
+ }
+}
+
+impl<const LIMBS: usize> BitOrAssign<&Wrapping<Uint<LIMBS>>> for Wrapping<Uint<LIMBS>> {
+ fn bitor_assign(&mut self, other: &Self) {
+ *self = *self | other;
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use crate::U128;
+
+ #[test]
+ fn checked_or_ok() {
+ let result = U128::ZERO.checked_or(&U128::ONE);
+ assert_eq!(result.unwrap(), U128::ONE);
+ }
+
+ #[test]
+ fn overlapping_or_ok() {
+ let result = U128::MAX.wrapping_or(&U128::ONE);
+ assert_eq!(result, U128::MAX);
+ }
+}
diff --git a/vendor/crypto-bigint/src/uint/bit_xor.rs b/vendor/crypto-bigint/src/uint/bit_xor.rs
new file mode 100644
index 0000000..91121d2
--- /dev/null
+++ b/vendor/crypto-bigint/src/uint/bit_xor.rs
@@ -0,0 +1,142 @@
+//! [`Uint`] bitwise xor operations.
+
+use super::Uint;
+use crate::{Limb, Wrapping};
+use core::ops::{BitXor, BitXorAssign};
+use subtle::{Choice, CtOption};
+
+impl<const LIMBS: usize> Uint<LIMBS> {
+ /// Computes bitwise `a ^ b`.
+ #[inline(always)]
+ pub const fn bitxor(&self, rhs: &Self) -> Self {
+ let mut limbs = [Limb::ZERO; LIMBS];
+ let mut i = 0;
+
+ while i < LIMBS {
+ limbs[i] = self.limbs[i].bitxor(rhs.limbs[i]);
+ i += 1;
+ }
+
+ Self { limbs }
+ }
+
+ /// Perform wrapping bitwise `XOR``.
+ ///
+ /// There's no way wrapping could ever happen.
+ /// This function exists so that all operations are accounted for in the wrapping operations
+ pub const fn wrapping_xor(&self, rhs: &Self) -> Self {
+ self.bitxor(rhs)
+ }
+
+ /// Perform checked bitwise `XOR`, returning a [`CtOption`] which `is_some` always
+ pub fn checked_xor(&self, rhs: &Self) -> CtOption<Self> {
+ let result = self.bitxor(rhs);
+ CtOption::new(result, Choice::from(1))
+ }
+}
+
+impl<const LIMBS: usize> BitXor for Uint<LIMBS> {
+ type Output = Self;
+
+ fn bitxor(self, rhs: Self) -> Uint<LIMBS> {
+ self.bitxor(&rhs)
+ }
+}
+
+impl<const LIMBS: usize> BitXor<&Uint<LIMBS>> for Uint<LIMBS> {
+ type Output = Uint<LIMBS>;
+
+ #[allow(clippy::needless_borrow)]
+ fn bitxor(self, rhs: &Uint<LIMBS>) -> Uint<LIMBS> {
+ (&self).bitxor(rhs)
+ }
+}
+
+impl<const LIMBS: usize> BitXor<Uint<LIMBS>> for &Uint<LIMBS> {
+ type Output = Uint<LIMBS>;
+
+ fn bitxor(self, rhs: Uint<LIMBS>) -> Uint<LIMBS> {
+ self.bitxor(&rhs)
+ }
+}
+
+impl<const LIMBS: usize> BitXor<&Uint<LIMBS>> for &Uint<LIMBS> {
+ type Output = Uint<LIMBS>;
+
+ fn bitxor(self, rhs: &Uint<LIMBS>) -> Uint<LIMBS> {
+ self.bitxor(rhs)
+ }
+}
+
+impl<const LIMBS: usize> BitXorAssign for Uint<LIMBS> {
+ fn bitxor_assign(&mut self, other: Self) {
+ *self = *self ^ other;
+ }
+}
+
+impl<const LIMBS: usize> BitXorAssign<&Uint<LIMBS>> for Uint<LIMBS> {
+ fn bitxor_assign(&mut self, other: &Self) {
+ *self = *self ^ other;
+ }
+}
+
+impl<const LIMBS: usize> BitXor for Wrapping<Uint<LIMBS>> {
+ type Output = Self;
+
+ fn bitxor(self, rhs: Self) -> Wrapping<Uint<LIMBS>> {
+ Wrapping(self.0.bitxor(&rhs.0))
+ }
+}
+
+impl<const LIMBS: usize> BitXor<&Wrapping<Uint<LIMBS>>> for Wrapping<Uint<LIMBS>> {
+ type Output = Wrapping<Uint<LIMBS>>;
+
+ fn bitxor(self, rhs: &Wrapping<Uint<LIMBS>>) -> Wrapping<Uint<LIMBS>> {
+ Wrapping(self.0.bitxor(&rhs.0))
+ }
+}
+
+impl<const LIMBS: usize> BitXor<Wrapping<Uint<LIMBS>>> for &Wrapping<Uint<LIMBS>> {
+ type Output = Wrapping<Uint<LIMBS>>;
+
+ fn bitxor(self, rhs: Wrapping<Uint<LIMBS>>) -> Wrapping<Uint<LIMBS>> {
+ Wrapping(self.0.bitxor(&rhs.0))
+ }
+}
+
+impl<const LIMBS: usize> BitXor<&Wrapping<Uint<LIMBS>>> for &Wrapping<Uint<LIMBS>> {
+ type Output = Wrapping<Uint<LIMBS>>;
+
+ fn bitxor(self, rhs: &Wrapping<Uint<LIMBS>>) -> Wrapping<Uint<LIMBS>> {
+ Wrapping(self.0.bitxor(&rhs.0))
+ }
+}
+
+impl<const LIMBS: usize> BitXorAssign for Wrapping<Uint<LIMBS>> {
+ fn bitxor_assign(&mut self, other: Self) {
+ *self = *self ^ other;
+ }
+}
+
+impl<const LIMBS: usize> BitXorAssign<&Wrapping<Uint<LIMBS>>> for Wrapping<Uint<LIMBS>> {
+ fn bitxor_assign(&mut self, other: &Self) {
+ *self = *self ^ other;
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use crate::U128;
+
+ #[test]
+ fn checked_xor_ok() {
+ let result = U128::ZERO.checked_xor(&U128::ONE);
+ assert_eq!(result.unwrap(), U128::ONE);
+ }
+
+ #[test]
+ fn overlapping_xor_ok() {
+ let result = U128::ZERO.wrapping_xor(&U128::ONE);
+ assert_eq!(result, U128::ONE);
+ }
+}
diff --git a/vendor/crypto-bigint/src/uint/bits.rs b/vendor/crypto-bigint/src/uint/bits.rs
new file mode 100644
index 0000000..506bf99
--- /dev/null
+++ b/vendor/crypto-bigint/src/uint/bits.rs
@@ -0,0 +1,207 @@
+use crate::{CtChoice, Limb, Uint, Word};
+
+impl<const LIMBS: usize> Uint<LIMBS> {
+ /// Returns `true` if the bit at position `index` is set, `false` otherwise.
+ #[inline(always)]
+ pub const fn bit_vartime(&self, index: usize) -> bool {
+ if index >= Self::BITS {
+ false
+ } else {
+ (self.limbs[index / Limb::BITS].0 >> (index % Limb::BITS)) & 1 == 1
+ }
+ }
+
+ /// Calculate the number of bits needed to represent this number.
+ #[allow(trivial_numeric_casts)]
+ pub const fn bits_vartime(&self) -> usize {
+ let mut i = LIMBS - 1;
+ while i > 0 && self.limbs[i].0 == 0 {
+ i -= 1;
+ }
+
+ let limb = self.limbs[i].0;
+ Limb::BITS * (i + 1) - limb.leading_zeros() as usize
+ }
+
+ /// Calculate the number of leading zeros in the binary representation of this number.
+ pub const fn leading_zeros(&self) -> usize {
+ let limbs = self.as_limbs();
+
+ let mut count: Word = 0;
+ let mut i = LIMBS;
+ let mut nonzero_limb_not_encountered = CtChoice::TRUE;
+ while i > 0 {
+ i -= 1;
+ let l = limbs[i];
+ let z = l.leading_zeros() as Word;
+ count += nonzero_limb_not_encountered.if_true(z);
+ nonzero_limb_not_encountered =
+ nonzero_limb_not_encountered.and(l.ct_is_nonzero().not());
+ }
+
+ count as usize
+ }
+
+ /// Calculate the number of trailing zeros in the binary representation of this number.
+ pub const fn trailing_zeros(&self) -> usize {
+ let limbs = self.as_limbs();
+
+ let mut count: Word = 0;
+ let mut i = 0;
+ let mut nonzero_limb_not_encountered = CtChoice::TRUE;
+ while i < LIMBS {
+ let l = limbs[i];
+ let z = l.trailing_zeros() as Word;
+ count += nonzero_limb_not_encountered.if_true(z);
+ nonzero_limb_not_encountered =
+ nonzero_limb_not_encountered.and(l.ct_is_nonzero().not());
+ i += 1;
+ }
+
+ count as usize
+ }
+
+ /// Calculate the number of bits needed to represent this number.
+ pub const fn bits(&self) -> usize {
+ Self::BITS - self.leading_zeros()
+ }
+
+ /// Get the value of the bit at position `index`, as a truthy or falsy `CtChoice`.
+ /// Returns the falsy value for indices out of range.
+ pub const fn bit(&self, index: usize) -> CtChoice {
+ let limb_num = index / Limb::BITS;
+ let index_in_limb = index % Limb::BITS;
+ let index_mask = 1 << index_in_limb;
+
+ let limbs = self.as_words();
+
+ let mut result: Word = 0;
+ let mut i = 0;
+ while i < LIMBS {
+ let bit = limbs[i] & index_mask;
+ let is_right_limb = CtChoice::from_usize_equality(i, limb_num);
+ result |= is_right_limb.if_true(bit);
+ i += 1;
+ }
+
+ CtChoice::from_lsb(result >> index_in_limb)
+ }
+
+ /// Sets the bit at `index` to 0 or 1 depending on the value of `bit_value`.
+ pub(crate) const fn set_bit(self, index: usize, bit_value: CtChoice) -> Self {
+ let mut result = self;
+ let limb_num = index / Limb::BITS;
+ let index_in_limb = index % Limb::BITS;
+ let index_mask = 1 << index_in_limb;
+
+ let mut i = 0;
+ while i < LIMBS {
+ let is_right_limb = CtChoice::from_usize_equality(i, limb_num);
+ let old_limb = result.limbs[i].0;
+ let new_limb = bit_value.select(old_limb & !index_mask, old_limb | index_mask);
+ result.limbs[i] = Limb(is_right_limb.select(old_limb, new_limb));
+ i += 1;
+ }
+ result
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use crate::{CtChoice, U256};
+
+ fn uint_with_bits_at(positions: &[usize]) -> U256 {
+ let mut result = U256::ZERO;
+ for pos in positions {
+ result |= U256::ONE << *pos;
+ }
+ result
+ }
+
+ #[test]
+ fn bit_vartime() {
+ let u = uint_with_bits_at(&[16, 48, 112, 127, 255]);
+ assert!(!u.bit_vartime(0));
+ assert!(!u.bit_vartime(1));
+ assert!(u.bit_vartime(16));
+ assert!(u.bit_vartime(127));
+ assert!(u.bit_vartime(255));
+ assert!(!u.bit_vartime(256));
+ assert!(!u.bit_vartime(260));
+ }
+
+ #[test]
+ fn bit() {
+ let u = uint_with_bits_at(&[16, 48, 112, 127, 255]);
+ assert!(!u.bit(0).is_true_vartime());
+ assert!(!u.bit(1).is_true_vartime());
+ assert!(u.bit(16).is_true_vartime());
+ assert!(u.bit(127).is_true_vartime());
+ assert!(u.bit(255).is_true_vartime());
+ assert!(!u.bit(256).is_true_vartime());
+ assert!(!u.bit(260).is_true_vartime());
+ }
+
+ #[test]
+ fn leading_zeros() {
+ let u = uint_with_bits_at(&[256 - 16, 256 - 79, 256 - 207]);
+ assert_eq!(u.leading_zeros() as u32, 15);
+
+ let u = uint_with_bits_at(&[256 - 79, 256 - 207]);
+ assert_eq!(u.leading_zeros() as u32, 78);
+
+ let u = uint_with_bits_at(&[256 - 207]);
+ assert_eq!(u.leading_zeros() as u32, 206);
+
+ let u = uint_with_bits_at(&[256 - 1, 256 - 75, 256 - 150]);
+ assert_eq!(u.leading_zeros() as u32, 0);
+
+ let u = U256::ZERO;
+ assert_eq!(u.leading_zeros() as u32, 256);
+ }
+
+ #[test]
+ fn trailing_zeros() {
+ let u = uint_with_bits_at(&[16, 79, 150]);
+ assert_eq!(u.trailing_zeros() as u32, 16);
+
+ let u = uint_with_bits_at(&[79, 150]);
+ assert_eq!(u.trailing_zeros() as u32, 79);
+
+ let u = uint_with_bits_at(&[150, 207]);
+ assert_eq!(u.trailing_zeros() as u32, 150);
+
+ let u = uint_with_bits_at(&[0, 150, 207]);
+ assert_eq!(u.trailing_zeros() as u32, 0);
+
+ let u = U256::ZERO;
+ assert_eq!(u.trailing_zeros() as u32, 256);
+ }
+
+ #[test]
+ fn set_bit() {
+ let u = uint_with_bits_at(&[16, 79, 150]);
+ assert_eq!(
+ u.set_bit(127, CtChoice::TRUE),
+ uint_with_bits_at(&[16, 79, 127, 150])
+ );
+
+ let u = uint_with_bits_at(&[16, 79, 150]);
+ assert_eq!(
+ u.set_bit(150, CtChoice::TRUE),
+ uint_with_bits_at(&[16, 79, 150])
+ );
+
+ let u = uint_with_bits_at(&[16, 79, 150]);
+ assert_eq!(
+ u.set_bit(127, CtChoice::FALSE),
+ uint_with_bits_at(&[16, 79, 150])
+ );
+
+ let u = uint_with_bits_at(&[16, 79, 150]);
+ assert_eq!(
+ u.set_bit(150, CtChoice::FALSE),
+ uint_with_bits_at(&[16, 79])
+ );
+ }
+}
diff --git a/vendor/crypto-bigint/src/uint/cmp.rs b/vendor/crypto-bigint/src/uint/cmp.rs
new file mode 100644
index 0000000..b513242
--- /dev/null
+++ b/vendor/crypto-bigint/src/uint/cmp.rs
@@ -0,0 +1,275 @@
+//! [`Uint`] comparisons.
+//!
+//! By default these are all constant-time and use the `subtle` crate.
+
+use super::Uint;
+use crate::{CtChoice, Limb};
+use core::cmp::Ordering;
+use subtle::{Choice, ConstantTimeEq, ConstantTimeGreater, ConstantTimeLess};
+
+impl<const LIMBS: usize> Uint<LIMBS> {
+ /// Return `b` if `c` is truthy, otherwise return `a`.
+ #[inline]
+ pub(crate) const fn ct_select(a: &Self, b: &Self, c: CtChoice) -> Self {
+ let mut limbs = [Limb::ZERO; LIMBS];
+
+ let mut i = 0;
+ while i < LIMBS {
+ limbs[i] = Limb::ct_select(a.limbs[i], b.limbs[i], c);
+ i += 1;
+ }
+
+ Uint { limbs }
+ }
+
+ #[inline]
+ pub(crate) const fn ct_swap(a: &Self, b: &Self, c: CtChoice) -> (Self, Self) {
+ let new_a = Self::ct_select(a, b, c);
+ let new_b = Self::ct_select(b, a, c);
+
+ (new_a, new_b)
+ }
+
+ /// Returns the truthy value if `self`!=0 or the falsy value otherwise.
+ #[inline]
+ pub(crate) const fn ct_is_nonzero(&self) -> CtChoice {
+ let mut b = 0;
+ let mut i = 0;
+ while i < LIMBS {
+ b |= self.limbs[i].0;
+ i += 1;
+ }
+ Limb(b).ct_is_nonzero()
+ }
+
+ /// Returns the truthy value if `self` is odd or the falsy value otherwise.
+ pub(crate) const fn ct_is_odd(&self) -> CtChoice {
+ CtChoice::from_lsb(self.limbs[0].0 & 1)
+ }
+
+ /// Returns the truthy value if `self == rhs` or the falsy value otherwise.
+ #[inline]
+ pub(crate) const fn ct_eq(lhs: &Self, rhs: &Self) -> CtChoice {
+ let mut acc = 0;
+ let mut i = 0;
+
+ while i < LIMBS {
+ acc |= lhs.limbs[i].0 ^ rhs.limbs[i].0;
+ i += 1;
+ }
+
+ // acc == 0 if and only if self == rhs
+ Limb(acc).ct_is_nonzero().not()
+ }
+
+ /// Returns the truthy value if `self <= rhs` and the falsy value otherwise.
+ #[inline]
+ pub(crate) const fn ct_lt(lhs: &Self, rhs: &Self) -> CtChoice {
+ // We could use the same approach as in Limb::ct_lt(),
+ // but since we have to use Uint::wrapping_sub(), which calls `sbb()`,
+ // there are no savings compared to just calling `sbb()` directly.
+ let (_res, borrow) = lhs.sbb(rhs, Limb::ZERO);
+ CtChoice::from_mask(borrow.0)
+ }
+
+ /// Returns the truthy value if `self >= rhs` and the falsy value otherwise.
+ #[inline]
+ pub(crate) const fn ct_gt(lhs: &Self, rhs: &Self) -> CtChoice {
+ let (_res, borrow) = rhs.sbb(lhs, Limb::ZERO);
+ CtChoice::from_mask(borrow.0)
+ }
+
+ /// Returns the ordering between `self` and `rhs` as an i8.
+ /// Values correspond to the Ordering enum:
+ /// -1 is Less
+ /// 0 is Equal
+ /// 1 is Greater
+ #[inline]
+ pub(crate) const fn ct_cmp(lhs: &Self, rhs: &Self) -> i8 {
+ let mut i = 0;
+ let mut borrow = Limb::ZERO;
+ let mut diff = Limb::ZERO;
+
+ while i < LIMBS {
+ let (w, b) = rhs.limbs[i].sbb(lhs.limbs[i], borrow);
+ diff = diff.bitor(w);
+ borrow = b;
+ i += 1;
+ }
+ let sgn = ((borrow.0 & 2) as i8) - 1;
+ (diff.ct_is_nonzero().to_u8() as i8) * sgn
+ }
+
+ /// Returns the Ordering between `self` and `rhs` in variable time.
+ pub const fn cmp_vartime(&self, rhs: &Self) -> Ordering {
+ let mut i = LIMBS - 1;
+ loop {
+ let (val, borrow) = self.limbs[i].sbb(rhs.limbs[i], Limb::ZERO);
+ if val.0 != 0 {
+ return if borrow.0 != 0 {
+ Ordering::Less
+ } else {
+ Ordering::Greater
+ };
+ }
+ if i == 0 {
+ return Ordering::Equal;
+ }
+ i -= 1;
+ }
+ }
+}
+
+impl<const LIMBS: usize> ConstantTimeEq for Uint<LIMBS> {
+ #[inline]
+ fn ct_eq(&self, other: &Self) -> Choice {
+ Uint::ct_eq(self, other).into()
+ }
+}
+
+impl<const LIMBS: usize> ConstantTimeGreater for Uint<LIMBS> {
+ #[inline]
+ fn ct_gt(&self, other: &Self) -> Choice {
+ Uint::ct_gt(self, other).into()
+ }
+}
+
+impl<const LIMBS: usize> ConstantTimeLess for Uint<LIMBS> {
+ #[inline]
+ fn ct_lt(&self, other: &Self) -> Choice {
+ Uint::ct_lt(self, other).into()
+ }
+}
+
+impl<const LIMBS: usize> Eq for Uint<LIMBS> {}
+
+impl<const LIMBS: usize> Ord for Uint<LIMBS> {
+ fn cmp(&self, other: &Self) -> Ordering {
+ let c = Self::ct_cmp(self, other);
+ match c {
+ -1 => Ordering::Less,
+ 0 => Ordering::Equal,
+ _ => Ordering::Greater,
+ }
+ }
+}
+
+impl<const LIMBS: usize> PartialOrd for Uint<LIMBS> {
+ fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
+ Some(self.cmp(other))
+ }
+}
+
+impl<const LIMBS: usize> PartialEq for Uint<LIMBS> {
+ fn eq(&self, other: &Self) -> bool {
+ self.ct_eq(other).into()
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use crate::{Integer, Zero, U128};
+ use core::cmp::Ordering;
+ use subtle::{ConstantTimeEq, ConstantTimeGreater, ConstantTimeLess};
+
+ #[test]
+ fn is_zero() {
+ assert!(bool::from(U128::ZERO.is_zero()));
+ assert!(!bool::from(U128::ONE.is_zero()));
+ assert!(!bool::from(U128::MAX.is_zero()));
+ }
+
+ #[test]
+ fn is_odd() {
+ assert!(!bool::from(U128::ZERO.is_odd()));
+ assert!(bool::from(U128::ONE.is_odd()));
+ assert!(bool::from(U128::MAX.is_odd()));
+ }
+
+ #[test]
+ fn ct_eq() {
+ let a = U128::ZERO;
+ let b = U128::MAX;
+
+ assert!(bool::from(a.ct_eq(&a)));
+ assert!(!bool::from(a.ct_eq(&b)));
+ assert!(!bool::from(b.ct_eq(&a)));
+ assert!(bool::from(b.ct_eq(&b)));
+ }
+
+ #[test]
+ fn ct_gt() {
+ let a = U128::ZERO;
+ let b = U128::ONE;
+ let c = U128::MAX;
+
+ assert!(bool::from(b.ct_gt(&a)));
+ assert!(bool::from(c.ct_gt(&a)));
+ assert!(bool::from(c.ct_gt(&b)));
+
+ assert!(!bool::from(a.ct_gt(&a)));
+ assert!(!bool::from(b.ct_gt(&b)));
+ assert!(!bool::from(c.ct_gt(&c)));
+
+ assert!(!bool::from(a.ct_gt(&b)));
+ assert!(!bool::from(a.ct_gt(&c)));
+ assert!(!bool::from(b.ct_gt(&c)));
+ }
+
+ #[test]
+ fn ct_lt() {
+ let a = U128::ZERO;
+ let b = U128::ONE;
+ let c = U128::MAX;
+
+ assert!(bool::from(a.ct_lt(&b)));
+ assert!(bool::from(a.ct_lt(&c)));
+ assert!(bool::from(b.ct_lt(&c)));
+
+ assert!(!bool::from(a.ct_lt(&a)));
+ assert!(!bool::from(b.ct_lt(&b)));
+ assert!(!bool::from(c.ct_lt(&c)));
+
+ assert!(!bool::from(b.ct_lt(&a)));
+ assert!(!bool::from(c.ct_lt(&a)));
+ assert!(!bool::from(c.ct_lt(&b)));
+ }
+
+ #[test]
+ fn cmp() {
+ let a = U128::ZERO;
+ let b = U128::ONE;
+ let c = U128::MAX;
+
+ assert_eq!(a.cmp(&b), Ordering::Less);
+ assert_eq!(a.cmp(&c), Ordering::Less);
+ assert_eq!(b.cmp(&c), Ordering::Less);
+
+ assert_eq!(a.cmp(&a), Ordering::Equal);
+ assert_eq!(b.cmp(&b), Ordering::Equal);
+ assert_eq!(c.cmp(&c), Ordering::Equal);
+
+ assert_eq!(b.cmp(&a), Ordering::Greater);
+ assert_eq!(c.cmp(&a), Ordering::Greater);
+ assert_eq!(c.cmp(&b), Ordering::Greater);
+ }
+
+ #[test]
+ fn cmp_vartime() {
+ let a = U128::ZERO;
+ let b = U128::ONE;
+ let c = U128::MAX;
+
+ assert_eq!(a.cmp_vartime(&b), Ordering::Less);
+ assert_eq!(a.cmp_vartime(&c), Ordering::Less);
+ assert_eq!(b.cmp_vartime(&c), Ordering::Less);
+
+ assert_eq!(a.cmp_vartime(&a), Ordering::Equal);
+ assert_eq!(b.cmp_vartime(&b), Ordering::Equal);
+ assert_eq!(c.cmp_vartime(&c), Ordering::Equal);
+
+ assert_eq!(b.cmp_vartime(&a), Ordering::Greater);
+ assert_eq!(c.cmp_vartime(&a), Ordering::Greater);
+ assert_eq!(c.cmp_vartime(&b), Ordering::Greater);
+ }
+}
diff --git a/vendor/crypto-bigint/src/uint/concat.rs b/vendor/crypto-bigint/src/uint/concat.rs
new file mode 100644
index 0000000..dde5242
--- /dev/null
+++ b/vendor/crypto-bigint/src/uint/concat.rs
@@ -0,0 +1,70 @@
+use crate::{Concat, ConcatMixed, Limb, Uint};
+
+impl<T> Concat for T
+where
+ T: ConcatMixed<T>,
+{
+ type Output = Self::MixedOutput;
+}
+
+/// Concatenate the two values, with `lo` as least significant and `hi`
+/// as the most significant.
+#[inline]
+pub(crate) const fn concat_mixed<const L: usize, const H: usize, const O: usize>(
+ lo: &Uint<L>,
+ hi: &Uint<H>,
+) -> Uint<O> {
+ let top = L + H;
+ let top = if top < O { top } else { O };
+ let mut limbs = [Limb::ZERO; O];
+ let mut i = 0;
+
+ while i < top {
+ if i < L {
+ limbs[i] = lo.limbs[i];
+ } else {
+ limbs[i] = hi.limbs[i - L];
+ }
+ i += 1;
+ }
+
+ Uint { limbs }
+}
+
+#[cfg(test)]
+mod tests {
+ use crate::{ConcatMixed, U128, U192, U64};
+
+ #[test]
+ fn concat() {
+ let hi = U64::from_u64(0x0011223344556677);
+ let lo = U64::from_u64(0x8899aabbccddeeff);
+ assert_eq!(
+ hi.concat(&lo),
+ U128::from_be_hex("00112233445566778899aabbccddeeff")
+ );
+ }
+
+ #[test]
+ fn concat_mixed() {
+ let a = U64::from_u64(0x0011223344556677);
+ let b = U128::from_u128(0x8899aabbccddeeff_8899aabbccddeeff);
+ assert_eq!(
+ a.concat_mixed(&b),
+ U192::from_be_hex("00112233445566778899aabbccddeeff8899aabbccddeeff")
+ );
+ assert_eq!(
+ b.concat_mixed(&a),
+ U192::from_be_hex("8899aabbccddeeff8899aabbccddeeff0011223344556677")
+ );
+ }
+
+ #[test]
+ fn convert() {
+ let res: U128 = U64::ONE.mul_wide(&U64::ONE).into();
+ assert_eq!(res, U128::ONE);
+
+ let res: U128 = U64::ONE.square_wide().into();
+ assert_eq!(res, U128::ONE);
+ }
+}
diff --git a/vendor/crypto-bigint/src/uint/div.rs b/vendor/crypto-bigint/src/uint/div.rs
new file mode 100644
index 0000000..7f5cda7
--- /dev/null
+++ b/vendor/crypto-bigint/src/uint/div.rs
@@ -0,0 +1,745 @@
+//! [`Uint`] division operations.
+
+use super::div_limb::{div_rem_limb_with_reciprocal, Reciprocal};
+use crate::{CtChoice, Limb, NonZero, Uint, Word, Wrapping};
+use core::ops::{Div, DivAssign, Rem, RemAssign};
+use subtle::CtOption;
+
+impl<const LIMBS: usize> Uint<LIMBS> {
+ /// Computes `self` / `rhs` using a pre-made reciprocal,
+ /// returns the quotient (q) and remainder (r).
+ #[inline(always)]
+ pub const fn ct_div_rem_limb_with_reciprocal(&self, reciprocal: &Reciprocal) -> (Self, Limb) {
+ div_rem_limb_with_reciprocal(self, reciprocal)
+ }
+
+ /// Computes `self` / `rhs` using a pre-made reciprocal,
+ /// returns the quotient (q) and remainder (r).
+ #[inline(always)]
+ pub fn div_rem_limb_with_reciprocal(
+ &self,
+ reciprocal: &CtOption<Reciprocal>,
+ ) -> CtOption<(Self, Limb)> {
+ reciprocal.map(|r| div_rem_limb_with_reciprocal(self, &r))
+ }
+
+ /// Computes `self` / `rhs`, returns the quotient (q) and remainder (r).
+ /// Returns the truthy value as the third element of the tuple if `rhs != 0`,
+ /// and the falsy value otherwise.
+ #[inline(always)]
+ pub(crate) const fn ct_div_rem_limb(&self, rhs: Limb) -> (Self, Limb, CtChoice) {
+ let (reciprocal, is_some) = Reciprocal::ct_new(rhs);
+ let (quo, rem) = div_rem_limb_with_reciprocal(self, &reciprocal);
+ (quo, rem, is_some)
+ }
+
+ /// Computes `self` / `rhs`, returns the quotient (q) and remainder (r).
+ #[inline(always)]
+ pub fn div_rem_limb(&self, rhs: NonZero<Limb>) -> (Self, Limb) {
+ // Guaranteed to succeed since `rhs` is nonzero.
+ let (quo, rem, _is_some) = self.ct_div_rem_limb(*rhs);
+ (quo, rem)
+ }
+
+ /// Computes `self` / `rhs`, returns the quotient (q), remainder (r)
+ /// and the truthy value for is_some or the falsy value for is_none.
+ ///
+ /// NOTE: Use only if you need to access const fn. Otherwise use [`Self::div_rem`] because
+ /// the value for is_some needs to be checked before using `q` and `r`.
+ ///
+ /// This is variable only with respect to `rhs`.
+ ///
+ /// When used with a fixed `rhs`, this function is constant-time with respect
+ /// to `self`.
+ pub(crate) const fn ct_div_rem(&self, rhs: &Self) -> (Self, Self, CtChoice) {
+ let mb = rhs.bits_vartime();
+ let mut bd = Self::BITS - mb;
+ let mut rem = *self;
+ let mut quo = Self::ZERO;
+ let mut c = rhs.shl_vartime(bd);
+
+ loop {
+ let (mut r, borrow) = rem.sbb(&c, Limb::ZERO);
+ rem = Self::ct_select(&r, &rem, CtChoice::from_mask(borrow.0));
+ r = quo.bitor(&Self::ONE);
+ quo = Self::ct_select(&r, &quo, CtChoice::from_mask(borrow.0));
+ if bd == 0 {
+ break;
+ }
+ bd -= 1;
+ c = c.shr_vartime(1);
+ quo = quo.shl_vartime(1);
+ }
+
+ let is_some = Limb(mb as Word).ct_is_nonzero();
+ quo = Self::ct_select(&Self::ZERO, &quo, is_some);
+ (quo, rem, is_some)
+ }
+
+ /// Computes `self` % `rhs`, returns the remainder and
+ /// and the truthy value for is_some or the falsy value for is_none.
+ ///
+ /// NOTE: Use only if you need to access const fn. Otherwise use [`Self::rem`].
+ /// This is variable only with respect to `rhs`.
+ ///
+ /// When used with a fixed `rhs`, this function is constant-time with respect
+ /// to `self`.
+ pub const fn const_rem(&self, rhs: &Self) -> (Self, CtChoice) {
+ let mb = rhs.bits_vartime();
+ let mut bd = Self::BITS - mb;
+ let mut rem = *self;
+ let mut c = rhs.shl_vartime(bd);
+
+ loop {
+ let (r, borrow) = rem.sbb(&c, Limb::ZERO);
+ rem = Self::ct_select(&r, &rem, CtChoice::from_mask(borrow.0));
+ if bd == 0 {
+ break;
+ }
+ bd -= 1;
+ c = c.shr_vartime(1);
+ }
+
+ let is_some = Limb(mb as Word).ct_is_nonzero();
+ (rem, is_some)
+ }
+
+ /// Computes `self` % `rhs`, returns the remainder and
+ /// and the truthy value for is_some or the falsy value for is_none.
+ ///
+ /// This is variable only with respect to `rhs`.
+ ///
+ /// When used with a fixed `rhs`, this function is constant-time with respect
+ /// to `self`.
+ pub const fn const_rem_wide(lower_upper: (Self, Self), rhs: &Self) -> (Self, CtChoice) {
+ let mb = rhs.bits_vartime();
+
+ // The number of bits to consider is two sets of limbs * BITS - mb (modulus bitcount)
+ let mut bd = (2 * Self::BITS) - mb;
+
+ // The wide integer to reduce, split into two halves
+ let (mut lower, mut upper) = lower_upper;
+
+ // Factor of the modulus, split into two halves
+ let mut c = Self::shl_vartime_wide((*rhs, Uint::ZERO), bd);
+
+ loop {
+ let (lower_sub, borrow) = lower.sbb(&c.0, Limb::ZERO);
+ let (upper_sub, borrow) = upper.sbb(&c.1, borrow);
+
+ lower = Self::ct_select(&lower_sub, &lower, CtChoice::from_mask(borrow.0));
+ upper = Self::ct_select(&upper_sub, &upper, CtChoice::from_mask(borrow.0));
+ if bd == 0 {
+ break;
+ }
+ bd -= 1;
+ c = Self::shr_vartime_wide(c, 1);
+ }
+
+ let is_some = Limb(mb as Word).ct_is_nonzero();
+ (lower, is_some)
+ }
+
+ /// Computes `self` % 2^k. Faster than reduce since its a power of 2.
+ /// Limited to 2^16-1 since Uint doesn't support higher.
+ pub const fn rem2k(&self, k: usize) -> Self {
+ let highest = (LIMBS - 1) as u32;
+ let index = k as u32 / (Limb::BITS as u32);
+ let le = Limb::ct_le(Limb::from_u32(index), Limb::from_u32(highest));
+ let word = Limb::ct_select(Limb::from_u32(highest), Limb::from_u32(index), le).0 as usize;
+
+ let base = k % Limb::BITS;
+ let mask = (1 << base) - 1;
+ let mut out = *self;
+
+ let outmask = Limb(out.limbs[word].0 & mask);
+
+ out.limbs[word] = Limb::ct_select(out.limbs[word], outmask, le);
+
+ let mut i = word + 1;
+ while i < LIMBS {
+ out.limbs[i] = Limb::ZERO;
+ i += 1;
+ }
+
+ out
+ }
+
+ /// Computes self / rhs, returns the quotient, remainder.
+ pub fn div_rem(&self, rhs: &NonZero<Self>) -> (Self, Self) {
+ // Since `rhs` is nonzero, this should always hold.
+ let (q, r, _c) = self.ct_div_rem(rhs);
+ (q, r)
+ }
+
+ /// Computes self % rhs, returns the remainder.
+ pub fn rem(&self, rhs: &NonZero<Self>) -> Self {
+ // Since `rhs` is nonzero, this should always hold.
+ let (r, _c) = self.const_rem(rhs);
+ r
+ }
+
+ /// Wrapped division is just normal division i.e. `self` / `rhs`
+ /// There’s no way wrapping could ever happen.
+ /// This function exists, so that all operations are accounted for in the wrapping operations.
+ ///
+ /// Panics if `rhs == 0`.
+ pub const fn wrapping_div(&self, rhs: &Self) -> Self {
+ let (q, _, c) = self.ct_div_rem(rhs);
+ assert!(c.is_true_vartime(), "divide by zero");
+ q
+ }
+
+ /// Perform checked division, returning a [`CtOption`] which `is_some`
+ /// only if the rhs != 0
+ pub fn checked_div(&self, rhs: &Self) -> CtOption<Self> {
+ NonZero::new(*rhs).map(|rhs| {
+ let (q, _r) = self.div_rem(&rhs);
+ q
+ })
+ }
+
+ /// Wrapped (modular) remainder calculation is just `self` % `rhs`.
+ /// There’s no way wrapping could ever happen.
+ /// This function exists, so that all operations are accounted for in the wrapping operations.
+ ///
+ /// Panics if `rhs == 0`.
+ pub const fn wrapping_rem(&self, rhs: &Self) -> Self {
+ let (r, c) = self.const_rem(rhs);
+ assert!(c.is_true_vartime(), "modulo zero");
+ r
+ }
+
+ /// Perform checked reduction, returning a [`CtOption`] which `is_some`
+ /// only if the rhs != 0
+ pub fn checked_rem(&self, rhs: &Self) -> CtOption<Self> {
+ NonZero::new(*rhs).map(|rhs| self.rem(&rhs))
+ }
+}
+
+//
+// Division by a single limb
+//
+
+impl<const LIMBS: usize> Div<&NonZero<Limb>> for &Uint<LIMBS> {
+ type Output = Uint<LIMBS>;
+
+ fn div(self, rhs: &NonZero<Limb>) -> Self::Output {
+ *self / *rhs
+ }
+}
+
+impl<const LIMBS: usize> Div<&NonZero<Limb>> for Uint<LIMBS> {
+ type Output = Uint<LIMBS>;
+
+ fn div(self, rhs: &NonZero<Limb>) -> Self::Output {
+ self / *rhs
+ }
+}
+
+impl<const LIMBS: usize> Div<NonZero<Limb>> for &Uint<LIMBS> {
+ type Output = Uint<LIMBS>;
+
+ fn div(self, rhs: NonZero<Limb>) -> Self::Output {
+ *self / rhs
+ }
+}
+
+impl<const LIMBS: usize> Div<NonZero<Limb>> for Uint<LIMBS> {
+ type Output = Uint<LIMBS>;
+
+ fn div(self, rhs: NonZero<Limb>) -> Self::Output {
+ let (q, _, _) = self.ct_div_rem_limb(*rhs);
+ q
+ }
+}
+
+impl<const LIMBS: usize> DivAssign<&NonZero<Limb>> for Uint<LIMBS> {
+ fn div_assign(&mut self, rhs: &NonZero<Limb>) {
+ *self /= *rhs;
+ }
+}
+
+impl<const LIMBS: usize> DivAssign<NonZero<Limb>> for Uint<LIMBS> {
+ fn div_assign(&mut self, rhs: NonZero<Limb>) {
+ *self = *self / rhs;
+ }
+}
+
+impl<const LIMBS: usize> Div<NonZero<Limb>> for Wrapping<Uint<LIMBS>> {
+ type Output = Wrapping<Uint<LIMBS>>;
+
+ fn div(self, rhs: NonZero<Limb>) -> Self::Output {
+ Wrapping(self.0 / rhs)
+ }
+}
+
+impl<const LIMBS: usize> Div<NonZero<Limb>> for &Wrapping<Uint<LIMBS>> {
+ type Output = Wrapping<Uint<LIMBS>>;
+
+ fn div(self, rhs: NonZero<Limb>) -> Self::Output {
+ *self / rhs
+ }
+}
+
+impl<const LIMBS: usize> Div<&NonZero<Limb>> for &Wrapping<Uint<LIMBS>> {
+ type Output = Wrapping<Uint<LIMBS>>;
+
+ fn div(self, rhs: &NonZero<Limb>) -> Self::Output {
+ *self / *rhs
+ }
+}
+
+impl<const LIMBS: usize> Div<&NonZero<Limb>> for Wrapping<Uint<LIMBS>> {
+ type Output = Wrapping<Uint<LIMBS>>;
+
+ fn div(self, rhs: &NonZero<Limb>) -> Self::Output {
+ self / *rhs
+ }
+}
+
+impl<const LIMBS: usize> DivAssign<&NonZero<Limb>> for Wrapping<Uint<LIMBS>> {
+ fn div_assign(&mut self, rhs: &NonZero<Limb>) {
+ *self = Wrapping(self.0 / rhs)
+ }
+}
+
+impl<const LIMBS: usize> DivAssign<NonZero<Limb>> for Wrapping<Uint<LIMBS>> {
+ fn div_assign(&mut self, rhs: NonZero<Limb>) {
+ *self /= &rhs;
+ }
+}
+
+impl<const LIMBS: usize> Rem<&NonZero<Limb>> for &Uint<LIMBS> {
+ type Output = Limb;
+
+ fn rem(self, rhs: &NonZero<Limb>) -> Self::Output {
+ *self % *rhs
+ }
+}
+
+impl<const LIMBS: usize> Rem<&NonZero<Limb>> for Uint<LIMBS> {
+ type Output = Limb;
+
+ fn rem(self, rhs: &NonZero<Limb>) -> Self::Output {
+ self % *rhs
+ }
+}
+
+impl<const LIMBS: usize> Rem<NonZero<Limb>> for &Uint<LIMBS> {
+ type Output = Limb;
+
+ fn rem(self, rhs: NonZero<Limb>) -> Self::Output {
+ *self % rhs
+ }
+}
+
+impl<const LIMBS: usize> Rem<NonZero<Limb>> for Uint<LIMBS> {
+ type Output = Limb;
+
+ fn rem(self, rhs: NonZero<Limb>) -> Self::Output {
+ let (_, r, _) = self.ct_div_rem_limb(*rhs);
+ r
+ }
+}
+
+impl<const LIMBS: usize> RemAssign<&NonZero<Limb>> for Uint<LIMBS> {
+ fn rem_assign(&mut self, rhs: &NonZero<Limb>) {
+ *self = (*self % rhs).into();
+ }
+}
+
+impl<const LIMBS: usize> RemAssign<NonZero<Limb>> for Uint<LIMBS> {
+ fn rem_assign(&mut self, rhs: NonZero<Limb>) {
+ *self %= &rhs;
+ }
+}
+
+impl<const LIMBS: usize> Rem<NonZero<Limb>> for Wrapping<Uint<LIMBS>> {
+ type Output = Wrapping<Limb>;
+
+ fn rem(self, rhs: NonZero<Limb>) -> Self::Output {
+ Wrapping(self.0 % rhs)
+ }
+}
+
+impl<const LIMBS: usize> Rem<NonZero<Limb>> for &Wrapping<Uint<LIMBS>> {
+ type Output = Wrapping<Limb>;
+
+ fn rem(self, rhs: NonZero<Limb>) -> Self::Output {
+ *self % rhs
+ }
+}
+
+impl<const LIMBS: usize> Rem<&NonZero<Limb>> for &Wrapping<Uint<LIMBS>> {
+ type Output = Wrapping<Limb>;
+
+ fn rem(self, rhs: &NonZero<Limb>) -> Self::Output {
+ *self % *rhs
+ }
+}
+
+impl<const LIMBS: usize> Rem<&NonZero<Limb>> for Wrapping<Uint<LIMBS>> {
+ type Output = Wrapping<Limb>;
+
+ fn rem(self, rhs: &NonZero<Limb>) -> Self::Output {
+ self % *rhs
+ }
+}
+
+impl<const LIMBS: usize> RemAssign<NonZero<Limb>> for Wrapping<Uint<LIMBS>> {
+ fn rem_assign(&mut self, rhs: NonZero<Limb>) {
+ *self %= &rhs;
+ }
+}
+
+impl<const LIMBS: usize> RemAssign<&NonZero<Limb>> for Wrapping<Uint<LIMBS>> {
+ fn rem_assign(&mut self, rhs: &NonZero<Limb>) {
+ *self = Wrapping((self.0 % rhs).into())
+ }
+}
+
+//
+// Division by an Uint
+//
+
+impl<const LIMBS: usize> Div<&NonZero<Uint<LIMBS>>> for &Uint<LIMBS> {
+ type Output = Uint<LIMBS>;
+
+ fn div(self, rhs: &NonZero<Uint<LIMBS>>) -> Self::Output {
+ *self / *rhs
+ }
+}
+
+impl<const LIMBS: usize> Div<&NonZero<Uint<LIMBS>>> for Uint<LIMBS> {
+ type Output = Uint<LIMBS>;
+
+ fn div(self, rhs: &NonZero<Uint<LIMBS>>) -> Self::Output {
+ self / *rhs
+ }
+}
+
+impl<const LIMBS: usize> Div<NonZero<Uint<LIMBS>>> for &Uint<LIMBS> {
+ type Output = Uint<LIMBS>;
+
+ fn div(self, rhs: NonZero<Uint<LIMBS>>) -> Self::Output {
+ *self / rhs
+ }
+}
+
+impl<const LIMBS: usize> Div<NonZero<Uint<LIMBS>>> for Uint<LIMBS> {
+ type Output = Uint<LIMBS>;
+
+ fn div(self, rhs: NonZero<Uint<LIMBS>>) -> Self::Output {
+ let (q, _) = self.div_rem(&rhs);
+ q
+ }
+}
+
+impl<const LIMBS: usize> DivAssign<&NonZero<Uint<LIMBS>>> for Uint<LIMBS> {
+ fn div_assign(&mut self, rhs: &NonZero<Uint<LIMBS>>) {
+ *self /= *rhs
+ }
+}
+
+impl<const LIMBS: usize> DivAssign<NonZero<Uint<LIMBS>>> for Uint<LIMBS> {
+ fn div_assign(&mut self, rhs: NonZero<Uint<LIMBS>>) {
+ *self = *self / rhs;
+ }
+}
+
+impl<const LIMBS: usize> Div<NonZero<Uint<LIMBS>>> for Wrapping<Uint<LIMBS>> {
+ type Output = Wrapping<Uint<LIMBS>>;
+
+ fn div(self, rhs: NonZero<Uint<LIMBS>>) -> Self::Output {
+ Wrapping(self.0 / rhs)
+ }
+}
+
+impl<const LIMBS: usize> Div<NonZero<Uint<LIMBS>>> for &Wrapping<Uint<LIMBS>> {
+ type Output = Wrapping<Uint<LIMBS>>;
+
+ fn div(self, rhs: NonZero<Uint<LIMBS>>) -> Self::Output {
+ *self / rhs
+ }
+}
+
+impl<const LIMBS: usize> Div<&NonZero<Uint<LIMBS>>> for &Wrapping<Uint<LIMBS>> {
+ type Output = Wrapping<Uint<LIMBS>>;
+
+ fn div(self, rhs: &NonZero<Uint<LIMBS>>) -> Self::Output {
+ *self / *rhs
+ }
+}
+
+impl<const LIMBS: usize> Div<&NonZero<Uint<LIMBS>>> for Wrapping<Uint<LIMBS>> {
+ type Output = Wrapping<Uint<LIMBS>>;
+
+ fn div(self, rhs: &NonZero<Uint<LIMBS>>) -> Self::Output {
+ self / *rhs
+ }
+}
+
+impl<const LIMBS: usize> DivAssign<&NonZero<Uint<LIMBS>>> for Wrapping<Uint<LIMBS>> {
+ fn div_assign(&mut self, rhs: &NonZero<Uint<LIMBS>>) {
+ *self = Wrapping(self.0 / rhs);
+ }
+}
+
+impl<const LIMBS: usize> DivAssign<NonZero<Uint<LIMBS>>> for Wrapping<Uint<LIMBS>> {
+ fn div_assign(&mut self, rhs: NonZero<Uint<LIMBS>>) {
+ *self /= &rhs;
+ }
+}
+
+impl<const LIMBS: usize> Rem<&NonZero<Uint<LIMBS>>> for &Uint<LIMBS> {
+ type Output = Uint<LIMBS>;
+
+ fn rem(self, rhs: &NonZero<Uint<LIMBS>>) -> Self::Output {
+ *self % *rhs
+ }
+}
+
+impl<const LIMBS: usize> Rem<&NonZero<Uint<LIMBS>>> for Uint<LIMBS> {
+ type Output = Uint<LIMBS>;
+
+ fn rem(self, rhs: &NonZero<Uint<LIMBS>>) -> Self::Output {
+ self % *rhs
+ }
+}
+
+impl<const LIMBS: usize> Rem<NonZero<Uint<LIMBS>>> for &Uint<LIMBS> {
+ type Output = Uint<LIMBS>;
+
+ fn rem(self, rhs: NonZero<Uint<LIMBS>>) -> Self::Output {
+ *self % rhs
+ }
+}
+
+impl<const LIMBS: usize> Rem<NonZero<Uint<LIMBS>>> for Uint<LIMBS> {
+ type Output = Uint<LIMBS>;
+
+ fn rem(self, rhs: NonZero<Uint<LIMBS>>) -> Self::Output {
+ Self::rem(&self, &rhs)
+ }
+}
+
+impl<const LIMBS: usize> RemAssign<&NonZero<Uint<LIMBS>>> for Uint<LIMBS> {
+ fn rem_assign(&mut self, rhs: &NonZero<Uint<LIMBS>>) {
+ *self %= *rhs
+ }
+}
+
+impl<const LIMBS: usize> RemAssign<NonZero<Uint<LIMBS>>> for Uint<LIMBS> {
+ fn rem_assign(&mut self, rhs: NonZero<Uint<LIMBS>>) {
+ *self = *self % rhs;
+ }
+}
+
+impl<const LIMBS: usize> Rem<NonZero<Uint<LIMBS>>> for Wrapping<Uint<LIMBS>> {
+ type Output = Wrapping<Uint<LIMBS>>;
+
+ fn rem(self, rhs: NonZero<Uint<LIMBS>>) -> Self::Output {
+ Wrapping(self.0 % rhs)
+ }
+}
+
+impl<const LIMBS: usize> Rem<NonZero<Uint<LIMBS>>> for &Wrapping<Uint<LIMBS>> {
+ type Output = Wrapping<Uint<LIMBS>>;
+
+ fn rem(self, rhs: NonZero<Uint<LIMBS>>) -> Self::Output {
+ *self % rhs
+ }
+}
+
+impl<const LIMBS: usize> Rem<&NonZero<Uint<LIMBS>>> for &Wrapping<Uint<LIMBS>> {
+ type Output = Wrapping<Uint<LIMBS>>;
+
+ fn rem(self, rhs: &NonZero<Uint<LIMBS>>) -> Self::Output {
+ *self % *rhs
+ }
+}
+
+impl<const LIMBS: usize> Rem<&NonZero<Uint<LIMBS>>> for Wrapping<Uint<LIMBS>> {
+ type Output = Wrapping<Uint<LIMBS>>;
+
+ fn rem(self, rhs: &NonZero<Uint<LIMBS>>) -> Self::Output {
+ self % *rhs
+ }
+}
+
+impl<const LIMBS: usize> RemAssign<NonZero<Uint<LIMBS>>> for Wrapping<Uint<LIMBS>> {
+ fn rem_assign(&mut self, rhs: NonZero<Uint<LIMBS>>) {
+ *self %= &rhs;
+ }
+}
+
+impl<const LIMBS: usize> RemAssign<&NonZero<Uint<LIMBS>>> for Wrapping<Uint<LIMBS>> {
+ fn rem_assign(&mut self, rhs: &NonZero<Uint<LIMBS>>) {
+ *self = Wrapping(self.0 % rhs)
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use crate::{limb::HI_BIT, Limb, U256};
+
+ #[cfg(feature = "rand")]
+ use {
+ crate::{CheckedMul, Random},
+ rand_chacha::ChaChaRng,
+ rand_core::RngCore,
+ rand_core::SeedableRng,
+ };
+
+ #[test]
+ fn div_word() {
+ for (n, d, e, ee) in &[
+ (200u64, 2u64, 100u64, 0),
+ (100u64, 25u64, 4u64, 0),
+ (100u64, 10u64, 10u64, 0),
+ (1024u64, 8u64, 128u64, 0),
+ (27u64, 13u64, 2u64, 1u64),
+ (26u64, 13u64, 2u64, 0u64),
+ (14u64, 13u64, 1u64, 1u64),
+ (13u64, 13u64, 1u64, 0u64),
+ (12u64, 13u64, 0u64, 12u64),
+ (1u64, 13u64, 0u64, 1u64),
+ ] {
+ let lhs = U256::from(*n);
+ let rhs = U256::from(*d);
+ let (q, r, is_some) = lhs.ct_div_rem(&rhs);
+ assert!(is_some.is_true_vartime());
+ assert_eq!(U256::from(*e), q);
+ assert_eq!(U256::from(*ee), r);
+ }
+ }
+
+ #[cfg(feature = "rand")]
+ #[test]
+ fn div() {
+ let mut rng = ChaChaRng::from_seed([7u8; 32]);
+ for _ in 0..25 {
+ let num = U256::random(&mut rng).shr_vartime(128);
+ let den = U256::random(&mut rng).shr_vartime(128);
+ let n = num.checked_mul(&den);
+ if n.is_some().into() {
+ let (q, _, is_some) = n.unwrap().ct_div_rem(&den);
+ assert!(is_some.is_true_vartime());
+ assert_eq!(q, num);
+ }
+ }
+ }
+
+ #[test]
+ fn div_max() {
+ let mut a = U256::ZERO;
+ let mut b = U256::ZERO;
+ b.limbs[b.limbs.len() - 1] = Limb(Word::MAX);
+ let q = a.wrapping_div(&b);
+ assert_eq!(q, Uint::ZERO);
+ a.limbs[a.limbs.len() - 1] = Limb(1 << (HI_BIT - 7));
+ b.limbs[b.limbs.len() - 1] = Limb(0x82 << (HI_BIT - 7));
+ let q = a.wrapping_div(&b);
+ assert_eq!(q, Uint::ZERO);
+ }
+
+ #[test]
+ fn div_zero() {
+ let (q, r, is_some) = U256::ONE.ct_div_rem(&U256::ZERO);
+ assert!(!is_some.is_true_vartime());
+ assert_eq!(q, U256::ZERO);
+ assert_eq!(r, U256::ONE);
+ }
+
+ #[test]
+ fn div_one() {
+ let (q, r, is_some) = U256::from(10u8).ct_div_rem(&U256::ONE);
+ assert!(is_some.is_true_vartime());
+ assert_eq!(q, U256::from(10u8));
+ assert_eq!(r, U256::ZERO);
+ }
+
+ #[test]
+ fn reduce_one() {
+ let (r, is_some) = U256::from(10u8).const_rem(&U256::ONE);
+ assert!(is_some.is_true_vartime());
+ assert_eq!(r, U256::ZERO);
+ }
+
+ #[test]
+ fn reduce_zero() {
+ let u = U256::from(10u8);
+ let (r, is_some) = u.const_rem(&U256::ZERO);
+ assert!(!is_some.is_true_vartime());
+ assert_eq!(r, u);
+ }
+
+ #[test]
+ fn reduce_tests() {
+ let (r, is_some) = U256::from(10u8).const_rem(&U256::from(2u8));
+ assert!(is_some.is_true_vartime());
+ assert_eq!(r, U256::ZERO);
+ let (r, is_some) = U256::from(10u8).const_rem(&U256::from(3u8));
+ assert!(is_some.is_true_vartime());
+ assert_eq!(r, U256::ONE);
+ let (r, is_some) = U256::from(10u8).const_rem(&U256::from(7u8));
+ assert!(is_some.is_true_vartime());
+ assert_eq!(r, U256::from(3u8));
+ }
+
+ #[test]
+ fn reduce_tests_wide_zero_padded() {
+ let (r, is_some) = U256::const_rem_wide((U256::from(10u8), U256::ZERO), &U256::from(2u8));
+ assert!(is_some.is_true_vartime());
+ assert_eq!(r, U256::ZERO);
+ let (r, is_some) = U256::const_rem_wide((U256::from(10u8), U256::ZERO), &U256::from(3u8));
+ assert!(is_some.is_true_vartime());
+ assert_eq!(r, U256::ONE);
+ let (r, is_some) = U256::const_rem_wide((U256::from(10u8), U256::ZERO), &U256::from(7u8));
+ assert!(is_some.is_true_vartime());
+ assert_eq!(r, U256::from(3u8));
+ }
+
+ #[test]
+ fn reduce_max() {
+ let mut a = U256::ZERO;
+ let mut b = U256::ZERO;
+ b.limbs[b.limbs.len() - 1] = Limb(Word::MAX);
+ let r = a.wrapping_rem(&b);
+ assert_eq!(r, Uint::ZERO);
+ a.limbs[a.limbs.len() - 1] = Limb(1 << (HI_BIT - 7));
+ b.limbs[b.limbs.len() - 1] = Limb(0x82 << (HI_BIT - 7));
+ let r = a.wrapping_rem(&b);
+ assert_eq!(r, a);
+ }
+
+ #[cfg(feature = "rand")]
+ #[test]
+ fn rem2krand() {
+ let mut rng = ChaChaRng::from_seed([7u8; 32]);
+ for _ in 0..25 {
+ let num = U256::random(&mut rng);
+ let k = (rng.next_u32() % 256) as usize;
+ let den = U256::ONE.shl_vartime(k);
+
+ let a = num.rem2k(k);
+ let e = num.wrapping_rem(&den);
+ assert_eq!(a, e);
+ }
+ }
+
+ #[allow(clippy::op_ref)]
+ #[test]
+ fn rem_trait() {
+ let a = U256::from(10u64);
+ let b = NonZero::new(U256::from(3u64)).unwrap();
+ let c = U256::from(1u64);
+
+ assert_eq!(a % b, c);
+ assert_eq!(a % &b, c);
+ assert_eq!(&a % b, c);
+ assert_eq!(&a % &b, c);
+ }
+}
diff --git a/vendor/crypto-bigint/src/uint/div_limb.rs b/vendor/crypto-bigint/src/uint/div_limb.rs
new file mode 100644
index 0000000..c00bc77
--- /dev/null
+++ b/vendor/crypto-bigint/src/uint/div_limb.rs
@@ -0,0 +1,287 @@
+//! Implementation of constant-time division via reciprocal precomputation, as described in
+//! "Improved Division by Invariant Integers" by Niels Möller and Torbjorn Granlund
+//! (DOI: 10.1109/TC.2010.143, <https://gmplib.org/~tege/division-paper.pdf>).
+use subtle::{Choice, ConditionallySelectable, CtOption};
+
+use crate::{CtChoice, Limb, Uint, WideWord, Word};
+
+/// Calculates the reciprocal of the given 32-bit divisor with the highmost bit set.
+#[cfg(target_pointer_width = "32")]
+pub const fn reciprocal(d: Word) -> Word {
+ debug_assert!(d >= (1 << (Word::BITS - 1)));
+
+ let d0 = d & 1;
+ let d10 = d >> 22;
+ let d21 = (d >> 11) + 1;
+ let d31 = (d >> 1) + d0;
+ let v0 = short_div((1 << 24) - (1 << 14) + (1 << 9), 24, d10, 10);
+ let (hi, _lo) = mulhilo(v0 * v0, d21);
+ let v1 = (v0 << 4) - hi - 1;
+
+ // Checks that the expression for `e` can be simplified in the way we did below.
+ debug_assert!(mulhilo(v1, d31).0 == (1 << 16) - 1);
+ let e = Word::MAX - v1.wrapping_mul(d31) + 1 + (v1 >> 1) * d0;
+
+ let (hi, _lo) = mulhilo(v1, e);
+ // Note: the paper does not mention a wrapping add here,
+ // but the 64-bit version has it at this stage, and the function panics without it
+ // when calculating a reciprocal for `Word::MAX`.
+ let v2 = (v1 << 15).wrapping_add(hi >> 1);
+
+ // The paper has `(v2 + 1) * d / 2^32` (there's another 2^32, but it's accounted for later).
+ // If `v2 == 2^32-1` this should give `d`, but we can't achieve this in our wrapping arithmetic.
+ // Hence the `ct_select()`.
+ let x = v2.wrapping_add(1);
+ let (hi, _lo) = mulhilo(x, d);
+ let hi = Limb::ct_select(Limb(d), Limb(hi), Limb(x).ct_is_nonzero()).0;
+
+ v2.wrapping_sub(hi).wrapping_sub(d)
+}
+
+/// Calculates the reciprocal of the given 64-bit divisor with the highmost bit set.
+#[cfg(target_pointer_width = "64")]
+pub const fn reciprocal(d: Word) -> Word {
+ debug_assert!(d >= (1 << (Word::BITS - 1)));
+
+ let d0 = d & 1;
+ let d9 = d >> 55;
+ let d40 = (d >> 24) + 1;
+ let d63 = (d >> 1) + d0;
+ let v0 = short_div((1 << 19) - 3 * (1 << 8), 19, d9 as u32, 9) as u64;
+ let v1 = (v0 << 11) - ((v0 * v0 * d40) >> 40) - 1;
+ let v2 = (v1 << 13) + ((v1 * ((1 << 60) - v1 * d40)) >> 47);
+
+ // Checks that the expression for `e` can be simplified in the way we did below.
+ debug_assert!(mulhilo(v2, d63).0 == (1 << 32) - 1);
+ let e = Word::MAX - v2.wrapping_mul(d63) + 1 + (v2 >> 1) * d0;
+
+ let (hi, _lo) = mulhilo(v2, e);
+ let v3 = (v2 << 31).wrapping_add(hi >> 1);
+
+ // The paper has `(v3 + 1) * d / 2^64` (there's another 2^64, but it's accounted for later).
+ // If `v3 == 2^64-1` this should give `d`, but we can't achieve this in our wrapping arithmetic.
+ // Hence the `ct_select()`.
+ let x = v3.wrapping_add(1);
+ let (hi, _lo) = mulhilo(x, d);
+ let hi = Limb::ct_select(Limb(d), Limb(hi), Limb(x).ct_is_nonzero()).0;
+
+ v3.wrapping_sub(hi).wrapping_sub(d)
+}
+
+/// Returns `u32::MAX` if `a < b` and `0` otherwise.
+#[inline]
+const fn ct_lt(a: u32, b: u32) -> u32 {
+ let bit = (((!a) & b) | (((!a) | b) & (a.wrapping_sub(b)))) >> (u32::BITS - 1);
+ bit.wrapping_neg()
+}
+
+/// Returns `a` if `c == 0` and `b` if `c == u32::MAX`.
+#[inline(always)]
+const fn ct_select(a: u32, b: u32, c: u32) -> u32 {
+ a ^ (c & (a ^ b))
+}
+
+/// Calculates `dividend / divisor` in constant time, given `dividend` and `divisor`
+/// along with their maximum bitsizes.
+#[inline(always)]
+const fn short_div(dividend: u32, dividend_bits: u32, divisor: u32, divisor_bits: u32) -> u32 {
+ // TODO: this may be sped up even more using the fact that `dividend` is a known constant.
+
+ // In the paper this is a table lookup, but since we want it to be constant-time,
+ // we have to access all the elements of the table, which is quite large.
+ // So this shift-and-subtract approach is actually faster.
+
+ // Passing `dividend_bits` and `divisor_bits` because calling `.leading_zeros()`
+ // causes a significant slowdown, and we know those values anyway.
+
+ let mut dividend = dividend;
+ let mut divisor = divisor << (dividend_bits - divisor_bits);
+ let mut quotient: u32 = 0;
+ let mut i = dividend_bits - divisor_bits + 1;
+
+ while i > 0 {
+ i -= 1;
+ let bit = ct_lt(dividend, divisor);
+ dividend = ct_select(dividend.wrapping_sub(divisor), dividend, bit);
+ divisor >>= 1;
+ let inv_bit = !bit;
+ quotient |= (inv_bit >> (u32::BITS - 1)) << i;
+ }
+
+ quotient
+}
+
+/// Multiplies `x` and `y`, returning the most significant
+/// and the least significant words as `(hi, lo)`.
+#[inline(always)]
+const fn mulhilo(x: Word, y: Word) -> (Word, Word) {
+ let res = (x as WideWord) * (y as WideWord);
+ ((res >> Word::BITS) as Word, res as Word)
+}
+
+/// Adds wide numbers represented by pairs of (most significant word, least significant word)
+/// and returns the result in the same format `(hi, lo)`.
+#[inline(always)]
+const fn addhilo(x_hi: Word, x_lo: Word, y_hi: Word, y_lo: Word) -> (Word, Word) {
+ let res = (((x_hi as WideWord) << Word::BITS) | (x_lo as WideWord))
+ + (((y_hi as WideWord) << Word::BITS) | (y_lo as WideWord));
+ ((res >> Word::BITS) as Word, res as Word)
+}
+
+/// Calculate the quotient and the remainder of the division of a wide word
+/// (supplied as high and low words) by `d`, with a precalculated reciprocal `v`.
+#[inline(always)]
+const fn div2by1(u1: Word, u0: Word, reciprocal: &Reciprocal) -> (Word, Word) {
+ let d = reciprocal.divisor_normalized;
+
+ debug_assert!(d >= (1 << (Word::BITS - 1)));
+ debug_assert!(u1 < d);
+
+ let (q1, q0) = mulhilo(reciprocal.reciprocal, u1);
+ let (q1, q0) = addhilo(q1, q0, u1, u0);
+ let q1 = q1.wrapping_add(1);
+ let r = u0.wrapping_sub(q1.wrapping_mul(d));
+
+ let r_gt_q0 = Limb::ct_lt(Limb(q0), Limb(r));
+ let q1 = Limb::ct_select(Limb(q1), Limb(q1.wrapping_sub(1)), r_gt_q0).0;
+ let r = Limb::ct_select(Limb(r), Limb(r.wrapping_add(d)), r_gt_q0).0;
+
+ // If this was a normal `if`, we wouldn't need wrapping ops, because there would be no overflow.
+ // But since we calculate both results either way, we have to wrap.
+ // Added an assert to still check the lack of overflow in debug mode.
+ debug_assert!(r < d || q1 < Word::MAX);
+ let r_ge_d = Limb::ct_le(Limb(d), Limb(r));
+ let q1 = Limb::ct_select(Limb(q1), Limb(q1.wrapping_add(1)), r_ge_d).0;
+ let r = Limb::ct_select(Limb(r), Limb(r.wrapping_sub(d)), r_ge_d).0;
+
+ (q1, r)
+}
+
+/// A pre-calculated reciprocal for division by a single limb.
+#[derive(Copy, Clone, Debug, PartialEq, Eq)]
+pub struct Reciprocal {
+ divisor_normalized: Word,
+ shift: u32,
+ reciprocal: Word,
+}
+
+impl Reciprocal {
+ /// Pre-calculates a reciprocal for a known divisor,
+ /// to be used in the single-limb division later.
+ /// Returns the reciprocal, and the truthy value if `divisor != 0`
+ /// and the falsy value otherwise.
+ ///
+ /// Note: if the returned flag is falsy, the returned reciprocal object is still self-consistent
+ /// and can be passed to functions here without causing them to panic,
+ /// but the results are naturally not to be used.
+ pub const fn ct_new(divisor: Limb) -> (Self, CtChoice) {
+ // Assuming this is constant-time for primitive types.
+ let shift = divisor.0.leading_zeros();
+
+ #[allow(trivial_numeric_casts)]
+ let is_some = Limb((Word::BITS - shift) as Word).ct_is_nonzero();
+
+ // If `divisor = 0`, shifting `divisor` by `leading_zeros == Word::BITS` will cause a panic.
+ // Have to substitute a "bogus" shift in that case.
+ #[allow(trivial_numeric_casts)]
+ let shift_limb = Limb::ct_select(Limb::ZERO, Limb(shift as Word), is_some);
+
+ // Need to provide bogus normalized divisor and reciprocal too,
+ // so that we don't get a panic in low-level functions.
+ let divisor_normalized = divisor.shl(shift_limb);
+ let divisor_normalized = Limb::ct_select(Limb::MAX, divisor_normalized, is_some).0;
+
+ #[allow(trivial_numeric_casts)]
+ let shift = shift_limb.0 as u32;
+
+ (
+ Self {
+ divisor_normalized,
+ shift,
+ reciprocal: reciprocal(divisor_normalized),
+ },
+ is_some,
+ )
+ }
+
+ /// Returns a default instance of this object.
+ /// It is a self-consistent `Reciprocal` that will not cause panics in functions that take it.
+ ///
+ /// NOTE: intended for using it as a placeholder during compile-time array generation,
+ /// don't rely on the contents.
+ pub const fn default() -> Self {
+ Self {
+ divisor_normalized: Word::MAX,
+ shift: 0,
+ // The result of calling `reciprocal(Word::MAX)`
+ // This holds both for 32- and 64-bit versions.
+ reciprocal: 1,
+ }
+ }
+
+ /// A non-const-fn version of `new_const()`, wrapping the result in a `CtOption`.
+ pub fn new(divisor: Limb) -> CtOption<Self> {
+ let (rec, is_some) = Self::ct_new(divisor);
+ CtOption::new(rec, is_some.into())
+ }
+}
+
+impl ConditionallySelectable for Reciprocal {
+ fn conditional_select(a: &Self, b: &Self, choice: Choice) -> Self {
+ Self {
+ divisor_normalized: Word::conditional_select(
+ &a.divisor_normalized,
+ &b.divisor_normalized,
+ choice,
+ ),
+ shift: u32::conditional_select(&a.shift, &b.shift, choice),
+ reciprocal: Word::conditional_select(&a.reciprocal, &b.reciprocal, choice),
+ }
+ }
+}
+
+// `CtOption.map()` needs this; for some reason it doesn't use the value it already has
+// for the `None` branch.
+impl Default for Reciprocal {
+ fn default() -> Self {
+ Self::default()
+ }
+}
+
+/// Divides `u` by the divisor encoded in the `reciprocal`, and returns
+/// the quotient and the remainder.
+#[inline(always)]
+pub(crate) const fn div_rem_limb_with_reciprocal<const L: usize>(
+ u: &Uint<L>,
+ reciprocal: &Reciprocal,
+) -> (Uint<L>, Limb) {
+ let (u_shifted, u_hi) = u.shl_limb(reciprocal.shift as usize);
+ let mut r = u_hi.0;
+ let mut q = [Limb::ZERO; L];
+
+ let mut j = L;
+ while j > 0 {
+ j -= 1;
+ let (qj, rj) = div2by1(r, u_shifted.as_limbs()[j].0, reciprocal);
+ q[j] = Limb(qj);
+ r = rj;
+ }
+ (Uint::<L>::new(q), Limb(r >> reciprocal.shift))
+}
+
+#[cfg(test)]
+mod tests {
+ use super::{div2by1, Reciprocal};
+ use crate::{Limb, Word};
+ #[test]
+ fn div2by1_overflow() {
+ // A regression test for a situation when in div2by1() an operation (`q1 + 1`)
+ // that is protected from overflowing by a condition in the original paper (`r >= d`)
+ // still overflows because we're calculating the results for both branches.
+ let r = Reciprocal::new(Limb(Word::MAX - 1)).unwrap();
+ assert_eq!(
+ div2by1(Word::MAX - 2, Word::MAX - 63, &r),
+ (Word::MAX, Word::MAX - 65)
+ );
+ }
+}
diff --git a/vendor/crypto-bigint/src/uint/encoding.rs b/vendor/crypto-bigint/src/uint/encoding.rs
new file mode 100644
index 0000000..42f9de1
--- /dev/null
+++ b/vendor/crypto-bigint/src/uint/encoding.rs
@@ -0,0 +1,292 @@
+//! Const-friendly decoding operations for [`Uint`]
+
+#[cfg(all(feature = "der", feature = "generic-array"))]
+mod der;
+
+#[cfg(feature = "rlp")]
+mod rlp;
+
+use super::Uint;
+use crate::{Encoding, Limb, Word};
+
+impl<const LIMBS: usize> Uint<LIMBS> {
+ /// Create a new [`Uint`] from the provided big endian bytes.
+ pub const fn from_be_slice(bytes: &[u8]) -> Self {
+ assert!(
+ bytes.len() == Limb::BYTES * LIMBS,
+ "bytes are not the expected size"
+ );
+
+ let mut res = [Limb::ZERO; LIMBS];
+ let mut buf = [0u8; Limb::BYTES];
+ let mut i = 0;
+
+ while i < LIMBS {
+ let mut j = 0;
+ while j < Limb::BYTES {
+ buf[j] = bytes[i * Limb::BYTES + j];
+ j += 1;
+ }
+ res[LIMBS - i - 1] = Limb(Word::from_be_bytes(buf));
+ i += 1;
+ }
+
+ Uint::new(res)
+ }
+
+ /// Create a new [`Uint`] from the provided big endian hex string.
+ pub const fn from_be_hex(hex: &str) -> Self {
+ let bytes = hex.as_bytes();
+
+ assert!(
+ bytes.len() == Limb::BYTES * LIMBS * 2,
+ "hex string is not the expected size"
+ );
+
+ let mut res = [Limb::ZERO; LIMBS];
+ let mut buf = [0u8; Limb::BYTES];
+ let mut i = 0;
+ let mut err = 0;
+
+ while i < LIMBS {
+ let mut j = 0;
+ while j < Limb::BYTES {
+ let offset = (i * Limb::BYTES + j) * 2;
+ let (result, byte_err) = decode_hex_byte([bytes[offset], bytes[offset + 1]]);
+ err |= byte_err;
+ buf[j] = result;
+ j += 1;
+ }
+ res[LIMBS - i - 1] = Limb(Word::from_be_bytes(buf));
+ i += 1;
+ }
+
+ assert!(err == 0, "invalid hex byte");
+
+ Uint::new(res)
+ }
+
+ /// Create a new [`Uint`] from the provided little endian bytes.
+ pub const fn from_le_slice(bytes: &[u8]) -> Self {
+ assert!(
+ bytes.len() == Limb::BYTES * LIMBS,
+ "bytes are not the expected size"
+ );
+
+ let mut res = [Limb::ZERO; LIMBS];
+ let mut buf = [0u8; Limb::BYTES];
+ let mut i = 0;
+
+ while i < LIMBS {
+ let mut j = 0;
+ while j < Limb::BYTES {
+ buf[j] = bytes[i * Limb::BYTES + j];
+ j += 1;
+ }
+ res[i] = Limb(Word::from_le_bytes(buf));
+ i += 1;
+ }
+
+ Uint::new(res)
+ }
+
+ /// Create a new [`Uint`] from the provided little endian hex string.
+ pub const fn from_le_hex(hex: &str) -> Self {
+ let bytes = hex.as_bytes();
+
+ assert!(
+ bytes.len() == Limb::BYTES * LIMBS * 2,
+ "bytes are not the expected size"
+ );
+
+ let mut res = [Limb::ZERO; LIMBS];
+ let mut buf = [0u8; Limb::BYTES];
+ let mut i = 0;
+ let mut err = 0;
+
+ while i < LIMBS {
+ let mut j = 0;
+ while j < Limb::BYTES {
+ let offset = (i * Limb::BYTES + j) * 2;
+ let (result, byte_err) = decode_hex_byte([bytes[offset], bytes[offset + 1]]);
+ err |= byte_err;
+ buf[j] = result;
+ j += 1;
+ }
+ res[i] = Limb(Word::from_le_bytes(buf));
+ i += 1;
+ }
+
+ assert!(err == 0, "invalid hex byte");
+
+ Uint::new(res)
+ }
+
+ /// Serialize this [`Uint`] as big-endian, writing it into the provided
+ /// byte slice.
+ #[inline]
+ pub(crate) fn write_be_bytes(&self, out: &mut [u8]) {
+ debug_assert_eq!(out.len(), Limb::BYTES * LIMBS);
+
+ for (src, dst) in self
+ .limbs
+ .iter()
+ .rev()
+ .cloned()
+ .zip(out.chunks_exact_mut(Limb::BYTES))
+ {
+ dst.copy_from_slice(&src.to_be_bytes());
+ }
+ }
+
+ /// Serialize this [`Uint`] as little-endian, writing it into the provided
+ /// byte slice.
+ #[inline]
+ pub(crate) fn write_le_bytes(&self, out: &mut [u8]) {
+ debug_assert_eq!(out.len(), Limb::BYTES * LIMBS);
+
+ for (src, dst) in self
+ .limbs
+ .iter()
+ .cloned()
+ .zip(out.chunks_exact_mut(Limb::BYTES))
+ {
+ dst.copy_from_slice(&src.to_le_bytes());
+ }
+ }
+}
+
+/// Decode a single nibble of upper or lower hex
+#[inline(always)]
+const fn decode_nibble(src: u8) -> u16 {
+ let byte = src as i16;
+ let mut ret: i16 = -1;
+
+ // 0-9 0x30-0x39
+ // if (byte > 0x2f && byte < 0x3a) ret += byte - 0x30 + 1; // -47
+ ret += (((0x2fi16 - byte) & (byte - 0x3a)) >> 8) & (byte - 47);
+ // A-F 0x41-0x46
+ // if (byte > 0x40 && byte < 0x47) ret += byte - 0x41 + 10 + 1; // -54
+ ret += (((0x40i16 - byte) & (byte - 0x47)) >> 8) & (byte - 54);
+ // a-f 0x61-0x66
+ // if (byte > 0x60 && byte < 0x67) ret += byte - 0x61 + 10 + 1; // -86
+ ret += (((0x60i16 - byte) & (byte - 0x67)) >> 8) & (byte - 86);
+
+ ret as u16
+}
+
+/// Decode a single byte encoded as two hexadecimal characters.
+/// Second element of the tuple is non-zero if the `bytes` values are not in the valid range
+/// (0-9, a-z, A-Z).
+#[inline(always)]
+const fn decode_hex_byte(bytes: [u8; 2]) -> (u8, u16) {
+ let hi = decode_nibble(bytes[0]);
+ let lo = decode_nibble(bytes[1]);
+ let byte = (hi << 4) | lo;
+ let err = byte >> 8;
+ let result = byte as u8;
+ (result, err)
+}
+
+#[cfg(test)]
+mod tests {
+ use crate::Limb;
+ use hex_literal::hex;
+
+ #[cfg(feature = "alloc")]
+ use {crate::U128, alloc::format};
+
+ #[cfg(target_pointer_width = "32")]
+ use crate::U64 as UintEx;
+
+ #[cfg(target_pointer_width = "64")]
+ use crate::U128 as UintEx;
+
+ #[test]
+ #[cfg(target_pointer_width = "32")]
+ fn from_be_slice() {
+ let bytes = hex!("0011223344556677");
+ let n = UintEx::from_be_slice(&bytes);
+ assert_eq!(n.as_limbs(), &[Limb(0x44556677), Limb(0x00112233)]);
+ }
+
+ #[test]
+ #[cfg(target_pointer_width = "64")]
+ fn from_be_slice() {
+ let bytes = hex!("00112233445566778899aabbccddeeff");
+ let n = UintEx::from_be_slice(&bytes);
+ assert_eq!(
+ n.as_limbs(),
+ &[Limb(0x8899aabbccddeeff), Limb(0x0011223344556677)]
+ );
+ }
+
+ #[test]
+ #[cfg(target_pointer_width = "32")]
+ fn from_le_slice() {
+ let bytes = hex!("7766554433221100");
+ let n = UintEx::from_le_slice(&bytes);
+ assert_eq!(n.as_limbs(), &[Limb(0x44556677), Limb(0x00112233)]);
+ }
+
+ #[test]
+ #[cfg(target_pointer_width = "64")]
+ fn from_le_slice() {
+ let bytes = hex!("ffeeddccbbaa99887766554433221100");
+ let n = UintEx::from_le_slice(&bytes);
+ assert_eq!(
+ n.as_limbs(),
+ &[Limb(0x8899aabbccddeeff), Limb(0x0011223344556677)]
+ );
+ }
+
+ #[test]
+ #[cfg(target_pointer_width = "32")]
+ fn from_be_hex() {
+ let n = UintEx::from_be_hex("0011223344556677");
+ assert_eq!(n.as_limbs(), &[Limb(0x44556677), Limb(0x00112233)]);
+ }
+
+ #[test]
+ #[cfg(target_pointer_width = "64")]
+ fn from_be_hex() {
+ let n = UintEx::from_be_hex("00112233445566778899aabbccddeeff");
+ assert_eq!(
+ n.as_limbs(),
+ &[Limb(0x8899aabbccddeeff), Limb(0x0011223344556677)]
+ );
+ }
+
+ #[test]
+ #[cfg(target_pointer_width = "32")]
+ fn from_le_hex() {
+ let n = UintEx::from_le_hex("7766554433221100");
+ assert_eq!(n.as_limbs(), &[Limb(0x44556677), Limb(0x00112233)]);
+ }
+
+ #[test]
+ #[cfg(target_pointer_width = "64")]
+ fn from_le_hex() {
+ let n = UintEx::from_le_hex("ffeeddccbbaa99887766554433221100");
+ assert_eq!(
+ n.as_limbs(),
+ &[Limb(0x8899aabbccddeeff), Limb(0x0011223344556677)]
+ );
+ }
+
+ #[cfg(feature = "alloc")]
+ #[test]
+ fn hex_upper() {
+ let hex = "AAAAAAAABBBBBBBBCCCCCCCCDDDDDDDD";
+ let n = U128::from_be_hex(hex);
+ assert_eq!(hex, format!("{:X}", n));
+ }
+
+ #[cfg(feature = "alloc")]
+ #[test]
+ fn hex_lower() {
+ let hex = "aaaaaaaabbbbbbbbccccccccdddddddd";
+ let n = U128::from_be_hex(hex);
+ assert_eq!(hex, format!("{:x}", n));
+ }
+}
diff --git a/vendor/crypto-bigint/src/uint/encoding/der.rs b/vendor/crypto-bigint/src/uint/encoding/der.rs
new file mode 100644
index 0000000..dcd766b
--- /dev/null
+++ b/vendor/crypto-bigint/src/uint/encoding/der.rs
@@ -0,0 +1,64 @@
+//! Support for decoding/encoding [`Uint`] as an ASN.1 DER `INTEGER`.
+
+use crate::{generic_array::GenericArray, ArrayEncoding, Uint};
+use ::der::{
+ asn1::{AnyRef, UintRef},
+ DecodeValue, EncodeValue, FixedTag, Length, Tag,
+};
+
+impl<'a, const LIMBS: usize> TryFrom<AnyRef<'a>> for Uint<LIMBS>
+where
+ Uint<LIMBS>: ArrayEncoding,
+{
+ type Error = der::Error;
+
+ fn try_from(any: AnyRef<'a>) -> der::Result<Uint<LIMBS>> {
+ UintRef::try_from(any)?.try_into()
+ }
+}
+
+impl<'a, const LIMBS: usize> TryFrom<UintRef<'a>> for Uint<LIMBS>
+where
+ Uint<LIMBS>: ArrayEncoding,
+{
+ type Error = der::Error;
+
+ fn try_from(bytes: UintRef<'a>) -> der::Result<Uint<LIMBS>> {
+ let mut array = GenericArray::default();
+ let offset = array.len().saturating_sub(bytes.len().try_into()?);
+ array[offset..].copy_from_slice(bytes.as_bytes());
+ Ok(Uint::from_be_byte_array(array))
+ }
+}
+
+impl<'a, const LIMBS: usize> DecodeValue<'a> for Uint<LIMBS>
+where
+ Uint<LIMBS>: ArrayEncoding,
+{
+ fn decode_value<R: der::Reader<'a>>(reader: &mut R, header: der::Header) -> der::Result<Self> {
+ UintRef::decode_value(reader, header)?.try_into()
+ }
+}
+
+impl<const LIMBS: usize> EncodeValue for Uint<LIMBS>
+where
+ Uint<LIMBS>: ArrayEncoding,
+{
+ fn value_len(&self) -> der::Result<Length> {
+ // TODO(tarcieri): more efficient length calculation
+ let array = self.to_be_byte_array();
+ UintRef::new(&array)?.value_len()
+ }
+
+ fn encode_value(&self, encoder: &mut impl der::Writer) -> der::Result<()> {
+ let array = self.to_be_byte_array();
+ UintRef::new(&array)?.encode_value(encoder)
+ }
+}
+
+impl<const LIMBS: usize> FixedTag for Uint<LIMBS>
+where
+ Uint<LIMBS>: ArrayEncoding,
+{
+ const TAG: Tag = Tag::Integer;
+}
diff --git a/vendor/crypto-bigint/src/uint/encoding/rlp.rs b/vendor/crypto-bigint/src/uint/encoding/rlp.rs
new file mode 100644
index 0000000..112efb1
--- /dev/null
+++ b/vendor/crypto-bigint/src/uint/encoding/rlp.rs
@@ -0,0 +1,78 @@
+//! Recursive Length Prefix (RLP) encoding support.
+
+use crate::{Encoding, Uint};
+use rlp::{DecoderError, Rlp, RlpStream};
+
+impl<const LIMBS: usize> rlp::Encodable for Uint<LIMBS>
+where
+ Self: Encoding,
+{
+ fn rlp_append(&self, stream: &mut RlpStream) {
+ let bytes = self.to_be_bytes();
+ let mut bytes_stripped = bytes.as_ref();
+
+ while bytes_stripped.first().cloned() == Some(0) {
+ bytes_stripped = &bytes_stripped[1..];
+ }
+
+ stream.encoder().encode_value(bytes_stripped);
+ }
+}
+
+impl<const LIMBS: usize> rlp::Decodable for Uint<LIMBS>
+where
+ Self: Encoding,
+ <Self as Encoding>::Repr: Default,
+{
+ fn decode(rlp: &Rlp<'_>) -> Result<Self, DecoderError> {
+ rlp.decoder().decode_value(|bytes| {
+ if bytes.first().cloned() == Some(0) {
+ Err(rlp::DecoderError::RlpInvalidIndirection)
+ } else {
+ let mut repr = <Self as Encoding>::Repr::default();
+ let offset = repr
+ .as_ref()
+ .len()
+ .checked_sub(bytes.len())
+ .ok_or(DecoderError::RlpIsTooBig)?;
+
+ repr.as_mut()[offset..].copy_from_slice(bytes);
+ Ok(Self::from_be_bytes(repr))
+ }
+ })
+ }
+}
+
+#[cfg(test)]
+#[allow(clippy::unwrap_used)]
+mod tests {
+ use crate::U256;
+ use hex_literal::hex;
+
+ /// U256 test vectors from the `rlp` crate.
+ ///
+ /// <https://github.com/paritytech/parity-common/blob/faad8b6/rlp/tests/tests.rs#L216-L222>
+ const U256_VECTORS: &[(U256, &[u8])] = &[
+ (U256::ZERO, &hex!("80")),
+ (
+ U256::from_be_hex("0000000000000000000000000000000000000000000000000000000001000000"),
+ &hex!("8401000000"),
+ ),
+ (
+ U256::from_be_hex("00000000000000000000000000000000000000000000000000000000ffffffff"),
+ &hex!("84ffffffff"),
+ ),
+ (
+ U256::from_be_hex("8090a0b0c0d0e0f00910203040506077000000000000000100000000000012f0"),
+ &hex!("a08090a0b0c0d0e0f00910203040506077000000000000000100000000000012f0"),
+ ),
+ ];
+
+ #[test]
+ fn round_trip() {
+ for &(uint, expected_bytes) in U256_VECTORS {
+ assert_eq!(rlp::encode(&uint), expected_bytes);
+ assert_eq!(rlp::decode::<U256>(expected_bytes).unwrap(), uint);
+ }
+ }
+}
diff --git a/vendor/crypto-bigint/src/uint/extra_sizes.rs b/vendor/crypto-bigint/src/uint/extra_sizes.rs
new file mode 100644
index 0000000..fb639c7
--- /dev/null
+++ b/vendor/crypto-bigint/src/uint/extra_sizes.rs
@@ -0,0 +1,160 @@
+//! Support for additional integer sizes beyond the core set which is defined
+//! in the toplevel module.
+//!
+//! These are feature-gated to keep compile times down for applications which
+//! do not need them.
+// TODO(tarcieri): switch to a fully const generic implementation using `generic_const_exprs`
+
+use super::*;
+
+impl_uint_aliases! {
+ (U1088, 1088, "1088-bit"),
+ (U1152, 1152, "1152-bit"),
+ (U1216, 1216, "1216-bit"),
+ (U1344, 1344, "1344-bit"),
+ (U1408, 1408, "1408-bit"),
+ (U1472, 1472, "1472-bit"),
+ (U1600, 1600, "1600-bit"),
+ (U1664, 1664, "1664-bit"),
+ (U1728, 1728, "1728-bit"),
+ (U1856, 1856, "1856-bit"),
+ (U1920, 1920, "1920-bit"),
+ (U1984, 1984, "1984-bit"),
+ (U2112, 2112, "2112-bit"),
+ (U2176, 2176, "2176-bit"),
+ (U2240, 2240, "2240-bit"),
+ (U2304, 2304, "2304-bit"),
+ (U2368, 2368, "2368-bit"),
+ (U2432, 2432, "2432-bit"),
+ (U2496, 2496, "2496-bit"),
+ (U2560, 2560, "2560-bit"),
+ (U2624, 2624, "2624-bit"),
+ (U2688, 2688, "2688-bit"),
+ (U2752, 2752, "2752-bit"),
+ (U2816, 2816, "2816-bit"),
+ (U2880, 2880, "2880-bit"),
+ (U2944, 2944, "2944-bit"),
+ (U3008, 3008, "3008-bit"),
+ (U3136, 3136, "3136-bit"),
+ (U3200, 3200, "3200-bit"),
+ (U3264, 3264, "3264-bit"),
+ (U3328, 3328, "3328-bit"),
+ (U3392, 3392, "3392-bit"),
+ (U3456, 3456, "3456-bit"),
+ (U3520, 3520, "3520-bit"),
+ (U3648, 3648, "3648-bit"),
+ (U3712, 3712, "3712-bit"),
+ (U3776, 3776, "3776-bit"),
+ (U3840, 3840, "3840-bit"),
+ (U3904, 3904, "3904-bit"),
+ (U3968, 3968, "3968-bit"),
+ (U4032, 4032, "4032-bit"),
+ (U4160, 4160, "4160-bit"),
+ (U4288, 4288, "4288-bit"),
+ (U4416, 4416, "4416-bit"),
+ (U4480, 4480, "4480-bit"),
+ (U4544, 4544, "4544-bit"),
+ (U4608, 4608, "4608-bit"),
+ (U4672, 4672, "4672-bit"),
+ (U4736, 4736, "4736-bit"),
+ (U4800, 4800, "4800-bit"),
+ (U4864, 4864, "4864-bit"),
+ (U4928, 4928, "4928-bit"),
+ (U4992, 4992, "4992-bit"),
+ (U5056, 5056, "5056-bit"),
+ (U5120, 5120, "5120-bit"),
+ (U5184, 5184, "5184-bit"),
+ (U5248, 5248, "5248-bit"),
+ (U5312, 5312, "5312-bit"),
+ (U5376, 5376, "5376-bit"),
+ (U5440, 5440, "5440-bit"),
+ (U5504, 5504, "5504-bit"),
+ (U5568, 5568, "5568-bit"),
+ (U5632, 5632, "5632-bit"),
+ (U5696, 5696, "5696-bit"),
+ (U5760, 5760, "5760-bit"),
+ (U5824, 5824, "5824-bit"),
+ (U5888, 5888, "5888-bit"),
+ (U5952, 5952, "5952-bit"),
+ (U6016, 6016, "6016-bit"),
+ (U6080, 6080, "6080-bit"),
+ (U6208, 6208, "6208-bit"),
+ (U6272, 6272, "6272-bit"),
+ (U6336, 6336, "6336-bit"),
+ (U6400, 6400, "6400-bit"),
+ (U6464, 6464, "6464-bit"),
+ (U6528, 6528, "6528-bit"),
+ (U6592, 6592, "6592-bit"),
+ (U6656, 6656, "6656-bit"),
+ (U6720, 6720, "6720-bit"),
+ (U6784, 6784, "6784-bit"),
+ (U6848, 6848, "6848-bit"),
+ (U6912, 6912, "6912-bit"),
+ (U6976, 6976, "6976-bit"),
+ (U7040, 7040, "7040-bit"),
+ (U7104, 7104, "7104-bit"),
+ (U7168, 7168, "7168-bit"),
+ (U7232, 7232, "7232-bit"),
+ (U7296, 7296, "7296-bit"),
+ (U7360, 7360, "7360-bit"),
+ (U7424, 7424, "7424-bit"),
+ (U7488, 7488, "7488-bit"),
+ (U7552, 7552, "7552-bit"),
+ (U7616, 7616, "7616-bit"),
+ (U7680, 7680, "7680-bit"),
+ (U7744, 7744, "7744-bit"),
+ (U7808, 7808, "7808-bit"),
+ (U7872, 7872, "7872-bit"),
+ (U7936, 7936, "7936-bit"),
+ (U8000, 8000, "8000-bit"),
+ (U8064, 8064, "8064-bit"),
+ (U8128, 8128, "8128-bit")
+}
+
+impl_uint_concat_split_even! {
+ U1152,
+ U1408,
+ U1664,
+ U1920,
+ U2176,
+ U2304,
+ U2432,
+ U2560,
+ U2688,
+ U2816,
+ U2944,
+ U3200,
+ U3328,
+ U3456,
+ U3712,
+ U3840,
+ U3968,
+ U4480,
+ U4608,
+ U4736,
+ U4864,
+ U4992,
+ U5120,
+ U5248,
+ U5376,
+ U5504,
+ U5632,
+ U5760,
+ U5888,
+ U6016,
+ U6272,
+ U6400,
+ U6528,
+ U6656,
+ U6784,
+ U6912,
+ U7040,
+ U7168,
+ U7296,
+ U7424,
+ U7552,
+ U7680,
+ U7808,
+ U7936,
+ U8064,
+}
diff --git a/vendor/crypto-bigint/src/uint/from.rs b/vendor/crypto-bigint/src/uint/from.rs
new file mode 100644
index 0000000..0f1bfbc
--- /dev/null
+++ b/vendor/crypto-bigint/src/uint/from.rs
@@ -0,0 +1,271 @@
+//! `From`-like conversions for [`Uint`].
+
+use crate::{ConcatMixed, Limb, Uint, WideWord, Word, U128, U64};
+
+impl<const LIMBS: usize> Uint<LIMBS> {
+ /// Create a [`Uint`] from a `u8` (const-friendly)
+ // TODO(tarcieri): replace with `const impl From<u8>` when stable
+ pub const fn from_u8(n: u8) -> Self {
+ assert!(LIMBS >= 1, "number of limbs must be greater than zero");
+ let mut limbs = [Limb::ZERO; LIMBS];
+ limbs[0].0 = n as Word;
+ Self { limbs }
+ }
+
+ /// Create a [`Uint`] from a `u16` (const-friendly)
+ // TODO(tarcieri): replace with `const impl From<u16>` when stable
+ pub const fn from_u16(n: u16) -> Self {
+ assert!(LIMBS >= 1, "number of limbs must be greater than zero");
+ let mut limbs = [Limb::ZERO; LIMBS];
+ limbs[0].0 = n as Word;
+ Self { limbs }
+ }
+
+ /// Create a [`Uint`] from a `u32` (const-friendly)
+ // TODO(tarcieri): replace with `const impl From<u32>` when stable
+ #[allow(trivial_numeric_casts)]
+ pub const fn from_u32(n: u32) -> Self {
+ assert!(LIMBS >= 1, "number of limbs must be greater than zero");
+ let mut limbs = [Limb::ZERO; LIMBS];
+ limbs[0].0 = n as Word;
+ Self { limbs }
+ }
+
+ /// Create a [`Uint`] from a `u64` (const-friendly)
+ // TODO(tarcieri): replace with `const impl From<u64>` when stable
+ #[cfg(target_pointer_width = "32")]
+ pub const fn from_u64(n: u64) -> Self {
+ assert!(LIMBS >= 2, "number of limbs must be two or greater");
+ let mut limbs = [Limb::ZERO; LIMBS];
+ limbs[0].0 = (n & 0xFFFFFFFF) as u32;
+ limbs[1].0 = (n >> 32) as u32;
+ Self { limbs }
+ }
+
+ /// Create a [`Uint`] from a `u64` (const-friendly)
+ // TODO(tarcieri): replace with `const impl From<u64>` when stable
+ #[cfg(target_pointer_width = "64")]
+ pub const fn from_u64(n: u64) -> Self {
+ assert!(LIMBS >= 1, "number of limbs must be greater than zero");
+ let mut limbs = [Limb::ZERO; LIMBS];
+ limbs[0].0 = n;
+ Self { limbs }
+ }
+
+ /// Create a [`Uint`] from a `u128` (const-friendly)
+ // TODO(tarcieri): replace with `const impl From<u128>` when stable
+ pub const fn from_u128(n: u128) -> Self {
+ assert!(
+ LIMBS >= (128 / Limb::BITS),
+ "number of limbs must be greater than zero"
+ );
+
+ let lo = U64::from_u64((n & 0xffff_ffff_ffff_ffff) as u64);
+ let hi = U64::from_u64((n >> 64) as u64);
+
+ let mut limbs = [Limb::ZERO; LIMBS];
+
+ let mut i = 0;
+ while i < lo.limbs.len() {
+ limbs[i] = lo.limbs[i];
+ i += 1;
+ }
+
+ let mut j = 0;
+ while j < hi.limbs.len() {
+ limbs[i + j] = hi.limbs[j];
+ j += 1;
+ }
+
+ Self { limbs }
+ }
+
+ /// Create a [`Uint`] from a `Word` (const-friendly)
+ // TODO(tarcieri): replace with `const impl From<Word>` when stable
+ pub const fn from_word(n: Word) -> Self {
+ assert!(LIMBS >= 1, "number of limbs must be greater than zero");
+ let mut limbs = [Limb::ZERO; LIMBS];
+ limbs[0].0 = n;
+ Self { limbs }
+ }
+
+ /// Create a [`Uint`] from a `WideWord` (const-friendly)
+ // TODO(tarcieri): replace with `const impl From<WideWord>` when stable
+ pub const fn from_wide_word(n: WideWord) -> Self {
+ assert!(LIMBS >= 2, "number of limbs must be two or greater");
+ let mut limbs = [Limb::ZERO; LIMBS];
+ limbs[0].0 = n as Word;
+ limbs[1].0 = (n >> Limb::BITS) as Word;
+ Self { limbs }
+ }
+}
+
+impl<const LIMBS: usize> From<u8> for Uint<LIMBS> {
+ fn from(n: u8) -> Self {
+ // TODO(tarcieri): const where clause when possible
+ debug_assert!(LIMBS > 0, "limbs must be non-zero");
+ Self::from_u8(n)
+ }
+}
+
+impl<const LIMBS: usize> From<u16> for Uint<LIMBS> {
+ fn from(n: u16) -> Self {
+ // TODO(tarcieri): const where clause when possible
+ debug_assert!(LIMBS > 0, "limbs must be non-zero");
+ Self::from_u16(n)
+ }
+}
+
+impl<const LIMBS: usize> From<u32> for Uint<LIMBS> {
+ fn from(n: u32) -> Self {
+ // TODO(tarcieri): const where clause when possible
+ debug_assert!(LIMBS > 0, "limbs must be non-zero");
+ Self::from_u32(n)
+ }
+}
+
+impl<const LIMBS: usize> From<u64> for Uint<LIMBS> {
+ fn from(n: u64) -> Self {
+ // TODO(tarcieri): const where clause when possible
+ debug_assert!(LIMBS >= (64 / Limb::BITS), "not enough limbs");
+ Self::from_u64(n)
+ }
+}
+
+impl<const LIMBS: usize> From<u128> for Uint<LIMBS> {
+ fn from(n: u128) -> Self {
+ // TODO(tarcieri): const where clause when possible
+ debug_assert!(LIMBS >= (128 / Limb::BITS), "not enough limbs");
+ Self::from_u128(n)
+ }
+}
+
+#[cfg(target_pointer_width = "32")]
+impl From<U64> for u64 {
+ fn from(n: U64) -> u64 {
+ (n.limbs[0].0 as u64) | ((n.limbs[1].0 as u64) << 32)
+ }
+}
+
+#[cfg(target_pointer_width = "64")]
+impl From<U64> for u64 {
+ fn from(n: U64) -> u64 {
+ n.limbs[0].into()
+ }
+}
+
+impl From<U128> for u128 {
+ fn from(n: U128) -> u128 {
+ let mut i = U128::LIMBS - 1;
+ let mut res = n.limbs[i].0 as u128;
+ while i > 0 {
+ i -= 1;
+ res = (res << Limb::BITS) | (n.limbs[i].0 as u128);
+ }
+ res
+ }
+}
+
+impl<const LIMBS: usize> From<[Word; LIMBS]> for Uint<LIMBS> {
+ fn from(arr: [Word; LIMBS]) -> Self {
+ Self::from_words(arr)
+ }
+}
+
+impl<const LIMBS: usize> From<Uint<LIMBS>> for [Word; LIMBS] {
+ fn from(n: Uint<LIMBS>) -> [Word; LIMBS] {
+ *n.as_ref()
+ }
+}
+
+impl<const LIMBS: usize> From<[Limb; LIMBS]> for Uint<LIMBS> {
+ fn from(limbs: [Limb; LIMBS]) -> Self {
+ Self { limbs }
+ }
+}
+
+impl<const LIMBS: usize> From<Uint<LIMBS>> for [Limb; LIMBS] {
+ fn from(n: Uint<LIMBS>) -> [Limb; LIMBS] {
+ n.limbs
+ }
+}
+
+impl<const LIMBS: usize> From<Limb> for Uint<LIMBS> {
+ fn from(limb: Limb) -> Self {
+ limb.0.into()
+ }
+}
+
+impl<const L: usize, const H: usize, const LIMBS: usize> From<(Uint<L>, Uint<H>)> for Uint<LIMBS>
+where
+ Uint<H>: ConcatMixed<Uint<L>, MixedOutput = Uint<LIMBS>>,
+{
+ fn from(nums: (Uint<L>, Uint<H>)) -> Uint<LIMBS> {
+ nums.1.concat_mixed(&nums.0)
+ }
+}
+
+impl<const L: usize, const H: usize, const LIMBS: usize> From<&(Uint<L>, Uint<H>)> for Uint<LIMBS>
+where
+ Uint<H>: ConcatMixed<Uint<L>, MixedOutput = Uint<LIMBS>>,
+{
+ fn from(nums: &(Uint<L>, Uint<H>)) -> Uint<LIMBS> {
+ nums.1.concat_mixed(&nums.0)
+ }
+}
+
+impl<const L: usize, const H: usize, const LIMBS: usize> From<Uint<LIMBS>> for (Uint<L>, Uint<H>) {
+ fn from(num: Uint<LIMBS>) -> (Uint<L>, Uint<H>) {
+ crate::uint::split::split_mixed(&num)
+ }
+}
+
+impl<const LIMBS: usize, const LIMBS2: usize> From<&Uint<LIMBS>> for Uint<LIMBS2> {
+ fn from(num: &Uint<LIMBS>) -> Uint<LIMBS2> {
+ num.resize()
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use crate::{Limb, Word, U128};
+
+ #[cfg(target_pointer_width = "32")]
+ use crate::U64 as UintEx;
+
+ #[cfg(target_pointer_width = "64")]
+ use crate::U128 as UintEx;
+
+ #[test]
+ fn from_u8() {
+ let n = UintEx::from(42u8);
+ assert_eq!(n.as_limbs(), &[Limb(42), Limb(0)]);
+ }
+
+ #[test]
+ fn from_u16() {
+ let n = UintEx::from(42u16);
+ assert_eq!(n.as_limbs(), &[Limb(42), Limb(0)]);
+ }
+
+ #[test]
+ fn from_u64() {
+ let n = UintEx::from(42u64);
+ assert_eq!(n.as_limbs(), &[Limb(42), Limb(0)]);
+ }
+
+ #[test]
+ fn from_u128() {
+ let n = U128::from(42u128);
+ assert_eq!(&n.as_limbs()[..2], &[Limb(42), Limb(0)]);
+ assert_eq!(u128::from(n), 42u128);
+ }
+
+ #[test]
+ fn array_round_trip() {
+ let arr1 = [1, 2];
+ let n = UintEx::from(arr1);
+ let arr2: [Word; 2] = n.into();
+ assert_eq!(arr1, arr2);
+ }
+}
diff --git a/vendor/crypto-bigint/src/uint/inv_mod.rs b/vendor/crypto-bigint/src/uint/inv_mod.rs
new file mode 100644
index 0000000..e41dc66
--- /dev/null
+++ b/vendor/crypto-bigint/src/uint/inv_mod.rs
@@ -0,0 +1,306 @@
+use super::Uint;
+use crate::CtChoice;
+
+impl<const LIMBS: usize> Uint<LIMBS> {
+ /// Computes 1/`self` mod `2^k`.
+ /// This method is constant-time w.r.t. `self` but not `k`.
+ ///
+ /// Conditions: `self` < 2^k and `self` must be odd
+ pub const fn inv_mod2k_vartime(&self, k: usize) -> Self {
+ // Using the Algorithm 3 from "A Secure Algorithm for Inversion Modulo 2k"
+ // by Sadiel de la Fe and Carles Ferrer.
+ // See <https://www.mdpi.com/2410-387X/2/3/23>.
+
+ // Note that we are not using Alrgorithm 4, since we have a different approach
+ // of enforcing constant-timeness w.r.t. `self`.
+
+ let mut x = Self::ZERO; // keeps `x` during iterations
+ let mut b = Self::ONE; // keeps `b_i` during iterations
+ let mut i = 0;
+
+ while i < k {
+ // X_i = b_i mod 2
+ let x_i = b.limbs[0].0 & 1;
+ let x_i_choice = CtChoice::from_lsb(x_i);
+ // b_{i+1} = (b_i - a * X_i) / 2
+ b = Self::ct_select(&b, &b.wrapping_sub(self), x_i_choice).shr_vartime(1);
+ // Store the X_i bit in the result (x = x | (1 << X_i))
+ x = x.bitor(&Uint::from_word(x_i).shl_vartime(i));
+
+ i += 1;
+ }
+
+ x
+ }
+
+ /// Computes 1/`self` mod `2^k`.
+ ///
+ /// Conditions: `self` < 2^k and `self` must be odd
+ pub const fn inv_mod2k(&self, k: usize) -> Self {
+ // This is the same algorithm as in `inv_mod2k_vartime()`,
+ // but made constant-time w.r.t `k` as well.
+
+ let mut x = Self::ZERO; // keeps `x` during iterations
+ let mut b = Self::ONE; // keeps `b_i` during iterations
+ let mut i = 0;
+
+ while i < Self::BITS {
+ // Only iterations for i = 0..k need to change `x`,
+ // the rest are dummy ones performed for the sake of constant-timeness.
+ let within_range = CtChoice::from_usize_lt(i, k);
+
+ // X_i = b_i mod 2
+ let x_i = b.limbs[0].0 & 1;
+ let x_i_choice = CtChoice::from_lsb(x_i);
+ // b_{i+1} = (b_i - a * X_i) / 2
+ b = Self::ct_select(&b, &b.wrapping_sub(self), x_i_choice).shr_vartime(1);
+
+ // Store the X_i bit in the result (x = x | (1 << X_i))
+ // Don't change the result in dummy iterations.
+ let x_i_choice = x_i_choice.and(within_range);
+ x = x.set_bit(i, x_i_choice);
+
+ i += 1;
+ }
+
+ x
+ }
+
+ /// Computes the multiplicative inverse of `self` mod `modulus`, where `modulus` is odd.
+ /// In other words `self^-1 mod modulus`.
+ /// `bits` and `modulus_bits` are the bounds on the bit size
+ /// of `self` and `modulus`, respectively
+ /// (the inversion speed will be proportional to `bits + modulus_bits`).
+ /// The second element of the tuple is the truthy value if an inverse exists,
+ /// otherwise it is a falsy value.
+ ///
+ /// **Note:** variable time in `bits` and `modulus_bits`.
+ ///
+ /// The algorithm is the same as in GMP 6.2.1's `mpn_sec_invert`.
+ pub const fn inv_odd_mod_bounded(
+ &self,
+ modulus: &Self,
+ bits: usize,
+ modulus_bits: usize,
+ ) -> (Self, CtChoice) {
+ debug_assert!(modulus.ct_is_odd().is_true_vartime());
+
+ let mut a = *self;
+
+ let mut u = Uint::ONE;
+ let mut v = Uint::ZERO;
+
+ let mut b = *modulus;
+
+ // `bit_size` can be anything >= `self.bits()` + `modulus.bits()`, setting to the minimum.
+ let bit_size = bits + modulus_bits;
+
+ let mut m1hp = *modulus;
+ let (m1hp_new, carry) = m1hp.shr_1();
+ debug_assert!(carry.is_true_vartime());
+ m1hp = m1hp_new.wrapping_add(&Uint::ONE);
+
+ let mut i = 0;
+ while i < bit_size {
+ debug_assert!(b.ct_is_odd().is_true_vartime());
+
+ let self_odd = a.ct_is_odd();
+
+ // Set `self -= b` if `self` is odd.
+ let (new_a, swap) = a.conditional_wrapping_sub(&b, self_odd);
+ // Set `b += self` if `swap` is true.
+ b = Uint::ct_select(&b, &b.wrapping_add(&new_a), swap);
+ // Negate `self` if `swap` is true.
+ a = new_a.conditional_wrapping_neg(swap);
+
+ let (new_u, new_v) = Uint::ct_swap(&u, &v, swap);
+ let (new_u, cy) = new_u.conditional_wrapping_sub(&new_v, self_odd);
+ let (new_u, cyy) = new_u.conditional_wrapping_add(modulus, cy);
+ debug_assert!(cy.is_true_vartime() == cyy.is_true_vartime());
+
+ let (new_a, overflow) = a.shr_1();
+ debug_assert!(!overflow.is_true_vartime());
+ let (new_u, cy) = new_u.shr_1();
+ let (new_u, cy) = new_u.conditional_wrapping_add(&m1hp, cy);
+ debug_assert!(!cy.is_true_vartime());
+
+ a = new_a;
+ u = new_u;
+ v = new_v;
+
+ i += 1;
+ }
+
+ debug_assert!(!a.ct_is_nonzero().is_true_vartime());
+
+ (v, Uint::ct_eq(&b, &Uint::ONE))
+ }
+
+ /// Computes the multiplicative inverse of `self` mod `modulus`, where `modulus` is odd.
+ /// Returns `(inverse, CtChoice::TRUE)` if an inverse exists,
+ /// otherwise `(undefined, CtChoice::FALSE)`.
+ pub const fn inv_odd_mod(&self, modulus: &Self) -> (Self, CtChoice) {
+ self.inv_odd_mod_bounded(modulus, Uint::<LIMBS>::BITS, Uint::<LIMBS>::BITS)
+ }
+
+ /// Computes the multiplicative inverse of `self` mod `modulus`.
+ /// Returns `(inverse, CtChoice::TRUE)` if an inverse exists,
+ /// otherwise `(undefined, CtChoice::FALSE)`.
+ pub const fn inv_mod(&self, modulus: &Self) -> (Self, CtChoice) {
+ // Decompose `modulus = s * 2^k` where `s` is odd
+ let k = modulus.trailing_zeros();
+ let s = modulus.shr(k);
+
+ // Decompose `self` into RNS with moduli `2^k` and `s` and calculate the inverses.
+ // Using the fact that `(z^{-1} mod (m1 * m2)) mod m1 == z^{-1} mod m1`
+ let (a, a_is_some) = self.inv_odd_mod(&s);
+ let b = self.inv_mod2k(k);
+ // inverse modulo 2^k exists either if `k` is 0 or if `self` is odd.
+ let b_is_some = CtChoice::from_usize_being_nonzero(k)
+ .not()
+ .or(self.ct_is_odd());
+
+ // Restore from RNS:
+ // self^{-1} = a mod s = b mod 2^k
+ // => self^{-1} = a + s * ((b - a) * s^(-1) mod 2^k)
+ // (essentially one step of the Garner's algorithm for recovery from RNS).
+
+ let m_odd_inv = s.inv_mod2k(k); // `s` is odd, so this always exists
+
+ // This part is mod 2^k
+ let mask = Uint::ONE.shl(k).wrapping_sub(&Uint::ONE);
+ let t = (b.wrapping_sub(&a).wrapping_mul(&m_odd_inv)).bitand(&mask);
+
+ // Will not overflow since `a <= s - 1`, `t <= 2^k - 1`,
+ // so `a + s * t <= s * 2^k - 1 == modulus - 1`.
+ let result = a.wrapping_add(&s.wrapping_mul(&t));
+ (result, a_is_some.and(b_is_some))
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use crate::{U1024, U256, U64};
+
+ #[test]
+ fn inv_mod2k() {
+ let v =
+ U256::from_be_hex("fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f");
+ let e =
+ U256::from_be_hex("3642e6faeaac7c6663b93d3d6a0d489e434ddc0123db5fa627c7f6e22ddacacf");
+ let a = v.inv_mod2k(256);
+ assert_eq!(e, a);
+
+ let v =
+ U256::from_be_hex("fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141");
+ let e =
+ U256::from_be_hex("261776f29b6b106c7680cf3ed83054a1af5ae537cb4613dbb4f20099aa774ec1");
+ let a = v.inv_mod2k(256);
+ assert_eq!(e, a);
+ }
+
+ #[test]
+ fn test_invert_odd() {
+ let a = U1024::from_be_hex(concat![
+ "000225E99153B467A5B451979A3F451DAEF3BF8D6C6521D2FA24BBB17F29544E",
+ "347A412B065B75A351EA9719E2430D2477B11CC9CF9C1AD6EDEE26CB15F463F8",
+ "BCC72EF87EA30288E95A48AA792226CEC959DCB0672D8F9D80A54CBBEA85CAD8",
+ "382EC224DEB2F5784E62D0CC2F81C2E6AD14EBABE646D6764B30C32B87688985"
+ ]);
+ let m = U1024::from_be_hex(concat![
+ "D509E7854ABDC81921F669F1DC6F61359523F3949803E58ED4EA8BC16483DC6F",
+ "37BFE27A9AC9EEA2969B357ABC5C0EE214BE16A7D4C58FC620D5B5A20AFF001A",
+ "D198D3155E5799DC4EA76652D64983A7E130B5EACEBAC768D28D589C36EC749C",
+ "558D0B64E37CD0775C0D0104AE7D98BA23C815185DD43CD8B16292FD94156767"
+ ]);
+ let expected = U1024::from_be_hex(concat![
+ "B03623284B0EBABCABD5C5881893320281460C0A8E7BF4BFDCFFCBCCBF436A55",
+ "D364235C8171E46C7D21AAD0680676E57274A8FDA6D12768EF961CACDD2DAE57",
+ "88D93DA5EB8EDC391EE3726CDCF4613C539F7D23E8702200CB31B5ED5B06E5CA",
+ "3E520968399B4017BF98A864FABA2B647EFC4998B56774D4F2CB026BC024A336"
+ ]);
+
+ let (res, is_some) = a.inv_odd_mod(&m);
+ assert!(is_some.is_true_vartime());
+ assert_eq!(res, expected);
+
+ // Even though it is less efficient, it still works
+ let (res, is_some) = a.inv_mod(&m);
+ assert!(is_some.is_true_vartime());
+ assert_eq!(res, expected);
+ }
+
+ #[test]
+ fn test_invert_even() {
+ let a = U1024::from_be_hex(concat![
+ "000225E99153B467A5B451979A3F451DAEF3BF8D6C6521D2FA24BBB17F29544E",
+ "347A412B065B75A351EA9719E2430D2477B11CC9CF9C1AD6EDEE26CB15F463F8",
+ "BCC72EF87EA30288E95A48AA792226CEC959DCB0672D8F9D80A54CBBEA85CAD8",
+ "382EC224DEB2F5784E62D0CC2F81C2E6AD14EBABE646D6764B30C32B87688985"
+ ]);
+ let m = U1024::from_be_hex(concat![
+ "D509E7854ABDC81921F669F1DC6F61359523F3949803E58ED4EA8BC16483DC6F",
+ "37BFE27A9AC9EEA2969B357ABC5C0EE214BE16A7D4C58FC620D5B5A20AFF001A",
+ "D198D3155E5799DC4EA76652D64983A7E130B5EACEBAC768D28D589C36EC749C",
+ "558D0B64E37CD0775C0D0104AE7D98BA23C815185DD43CD8B16292FD94156000"
+ ]);
+ let expected = U1024::from_be_hex(concat![
+ "1EBF391306817E1BC610E213F4453AD70911CCBD59A901B2A468A4FC1D64F357",
+ "DBFC6381EC5635CAA664DF280028AF4651482C77A143DF38D6BFD4D64B6C0225",
+ "FC0E199B15A64966FB26D88A86AD144271F6BDCD3D63193AB2B3CC53B99F21A3",
+ "5B9BFAE5D43C6BC6E7A9856C71C7318C76530E9E5AE35882D5ABB02F1696874D",
+ ]);
+
+ let (res, is_some) = a.inv_mod(&m);
+ assert!(is_some.is_true_vartime());
+ assert_eq!(res, expected);
+ }
+
+ #[test]
+ fn test_invert_bounded() {
+ let a = U1024::from_be_hex(concat![
+ "0000000000000000000000000000000000000000000000000000000000000000",
+ "347A412B065B75A351EA9719E2430D2477B11CC9CF9C1AD6EDEE26CB15F463F8",
+ "BCC72EF87EA30288E95A48AA792226CEC959DCB0672D8F9D80A54CBBEA85CAD8",
+ "382EC224DEB2F5784E62D0CC2F81C2E6AD14EBABE646D6764B30C32B87688985"
+ ]);
+ let m = U1024::from_be_hex(concat![
+ "0000000000000000000000000000000000000000000000000000000000000000",
+ "0000000000000000000000000000000000000000000000000000000000000000",
+ "D198D3155E5799DC4EA76652D64983A7E130B5EACEBAC768D28D589C36EC749C",
+ "558D0B64E37CD0775C0D0104AE7D98BA23C815185DD43CD8B16292FD94156767"
+ ]);
+
+ let (res, is_some) = a.inv_odd_mod_bounded(&m, 768, 512);
+
+ let expected = U1024::from_be_hex(concat![
+ "0000000000000000000000000000000000000000000000000000000000000000",
+ "0000000000000000000000000000000000000000000000000000000000000000",
+ "0DCC94E2FE509E6EBBA0825645A38E73EF85D5927C79C1AD8FFE7C8DF9A822FA",
+ "09EB396A21B1EF05CBE51E1A8EF284EF01EBDD36A9A4EA17039D8EEFDD934768"
+ ]);
+ assert!(is_some.is_true_vartime());
+ assert_eq!(res, expected);
+ }
+
+ #[test]
+ fn test_invert_small() {
+ let a = U64::from(3u64);
+ let m = U64::from(13u64);
+
+ let (res, is_some) = a.inv_odd_mod(&m);
+
+ assert!(is_some.is_true_vartime());
+ assert_eq!(U64::from(9u64), res);
+ }
+
+ #[test]
+ fn test_no_inverse_small() {
+ let a = U64::from(14u64);
+ let m = U64::from(49u64);
+
+ let (_res, is_some) = a.inv_odd_mod(&m);
+
+ assert!(!is_some.is_true_vartime());
+ }
+}
diff --git a/vendor/crypto-bigint/src/uint/macros.rs b/vendor/crypto-bigint/src/uint/macros.rs
new file mode 100644
index 0000000..47d32e7
--- /dev/null
+++ b/vendor/crypto-bigint/src/uint/macros.rs
@@ -0,0 +1,115 @@
+// TODO(tarcieri): use `generic_const_exprs` when stable to make generic around bits.
+macro_rules! impl_uint_aliases {
+ ($(($name:ident, $bits:expr, $doc:expr)),+) => {
+ $(
+ #[doc = $doc]
+ #[doc="unsigned big integer."]
+ pub type $name = Uint<{nlimbs!($bits)}>;
+
+ impl Encoding for $name {
+
+ type Repr = [u8; $bits / 8];
+
+ #[inline]
+ fn from_be_bytes(bytes: Self::Repr) -> Self {
+ Self::from_be_slice(&bytes)
+ }
+
+ #[inline]
+ fn from_le_bytes(bytes: Self::Repr) -> Self {
+ Self::from_le_slice(&bytes)
+ }
+
+ #[inline]
+ fn to_be_bytes(&self) -> Self::Repr {
+ let mut result = [0u8; $bits / 8];
+ self.write_be_bytes(&mut result);
+ result
+ }
+
+ #[inline]
+ fn to_le_bytes(&self) -> Self::Repr {
+ let mut result = [0u8; $bits / 8];
+ self.write_le_bytes(&mut result);
+ result
+ }
+ }
+ )+
+ };
+}
+
+macro_rules! impl_uint_concat_split_mixed {
+ ($name:ident, $size:literal) => {
+ impl $crate::traits::ConcatMixed<Uint<{ U64::LIMBS * $size }>> for Uint<{ <$name>::LIMBS - U64::LIMBS * $size }>
+ {
+ type MixedOutput = $name;
+
+ fn concat_mixed(&self, lo: &Uint<{ U64::LIMBS * $size }>) -> Self::MixedOutput {
+ $crate::uint::concat::concat_mixed(lo, self)
+ }
+ }
+
+ impl $crate::traits::SplitMixed<Uint<{ U64::LIMBS * $size }>, Uint<{ <$name>::LIMBS - U64::LIMBS * $size }>> for $name
+ {
+ fn split_mixed(&self) -> (Uint<{ U64::LIMBS * $size }>, Uint<{ <$name>::LIMBS - U64::LIMBS * $size }>) {
+ $crate::uint::split::split_mixed(self)
+ }
+ }
+ };
+ ($name:ident, [ $($size:literal),+ ]) => {
+ $(
+ impl_uint_concat_split_mixed!($name, $size);
+ )+
+ };
+ ($( ($name:ident, $sizes:tt), )+) => {
+ $(
+ impl_uint_concat_split_mixed!($name, $sizes);
+ )+
+ };
+}
+
+macro_rules! impl_uint_concat_split_even {
+ ($name:ident) => {
+ impl $crate::traits::ConcatMixed<Uint<{ <$name>::LIMBS / 2 }>> for Uint<{ <$name>::LIMBS / 2 }>
+ {
+ type MixedOutput = $name;
+
+ fn concat_mixed(&self, lo: &Uint<{ <$name>::LIMBS / 2 }>) -> Self::MixedOutput {
+ $crate::uint::concat::concat_mixed(lo, self)
+ }
+ }
+
+ impl Uint<{ <$name>::LIMBS / 2 }> {
+ /// Concatenate the two values, with `self` as most significant and `rhs`
+ /// as the least significant.
+ pub const fn concat(&self, lo: &Uint<{ <$name>::LIMBS / 2 }>) -> $name {
+ $crate::uint::concat::concat_mixed(lo, self)
+ }
+ }
+
+ impl $crate::traits::SplitMixed<Uint<{ <$name>::LIMBS / 2 }>, Uint<{ <$name>::LIMBS / 2 }>> for $name
+ {
+ fn split_mixed(&self) -> (Uint<{ <$name>::LIMBS / 2 }>, Uint<{ <$name>::LIMBS / 2 }>) {
+ $crate::uint::split::split_mixed(self)
+ }
+ }
+
+ impl $crate::traits::Split for $name
+ {
+ type Output = Uint<{ <$name>::LIMBS / 2 }>;
+ }
+
+ impl $name {
+ /// Split this number in half, returning its high and low components
+ /// respectively.
+ pub const fn split(&self) -> (Uint<{ <$name>::LIMBS / 2 }>, Uint<{ <$name>::LIMBS / 2 }>) {
+ $crate::uint::split::split_mixed(self)
+ }
+ }
+ };
+ ($($name:ident,)+) => {
+ $(
+ impl_uint_concat_split_even!($name);
+ )+
+ }
+}
diff --git a/vendor/crypto-bigint/src/uint/modular.rs b/vendor/crypto-bigint/src/uint/modular.rs
new file mode 100644
index 0000000..cd560aa
--- /dev/null
+++ b/vendor/crypto-bigint/src/uint/modular.rs
@@ -0,0 +1,164 @@
+mod reduction;
+
+/// Implements `Residue`s, supporting modular arithmetic with a constant modulus.
+pub mod constant_mod;
+/// Implements `DynResidue`s, supporting modular arithmetic with a modulus set at runtime.
+pub mod runtime_mod;
+
+mod add;
+mod div_by_2;
+mod inv;
+mod mul;
+mod pow;
+mod sub;
+
+pub use reduction::montgomery_reduction;
+
+/// A generalization for numbers kept in optimized representations (e.g. Montgomery)
+/// that can be converted back to the original form.
+pub trait Retrieve {
+ /// The original type.
+ type Output;
+
+ /// Convert the number back from the optimized representation.
+ fn retrieve(&self) -> Self::Output;
+}
+
+#[cfg(test)]
+mod tests {
+ use crate::{
+ const_residue, impl_modulus,
+ modular::{
+ constant_mod::Residue, constant_mod::ResidueParams, reduction::montgomery_reduction,
+ },
+ NonZero, Uint, U256, U64,
+ };
+
+ impl_modulus!(
+ Modulus1,
+ U256,
+ "73eda753299d7d483339d80809a1d80553bda402fffe5bfeffffffff00000001"
+ );
+
+ #[test]
+ fn test_montgomery_params() {
+ assert_eq!(
+ Modulus1::R,
+ U256::from_be_hex("1824b159acc5056f998c4fefecbc4ff55884b7fa0003480200000001fffffffe")
+ );
+ assert_eq!(
+ Modulus1::R2,
+ U256::from_be_hex("0748d9d99f59ff1105d314967254398f2b6cedcb87925c23c999e990f3f29c6d")
+ );
+ assert_eq!(
+ Modulus1::MOD_NEG_INV,
+ U64::from_be_hex("fffffffeffffffff").limbs[0]
+ );
+ }
+
+ impl_modulus!(
+ Modulus2,
+ U256,
+ "ffffffff00000000ffffffffffffffffbce6faada7179e84f3b9cac2fc632551"
+ );
+
+ #[test]
+ fn test_reducing_r() {
+ // Divide the value R by R, which should equal 1
+ assert_eq!(
+ montgomery_reduction::<{ Modulus2::LIMBS }>(
+ &(Modulus2::R, Uint::ZERO),
+ &Modulus2::MODULUS,
+ Modulus2::MOD_NEG_INV
+ ),
+ Uint::ONE
+ );
+ }
+
+ #[test]
+ fn test_reducing_r2() {
+ // Divide the value R^2 by R, which should equal R
+ assert_eq!(
+ montgomery_reduction::<{ Modulus2::LIMBS }>(
+ &(Modulus2::R2, Uint::ZERO),
+ &Modulus2::MODULUS,
+ Modulus2::MOD_NEG_INV
+ ),
+ Modulus2::R
+ );
+ }
+
+ #[test]
+ fn test_reducing_r2_wide() {
+ // Divide the value R^2 by R, which should equal R
+ let (hi, lo) = Modulus2::R.square().split();
+ assert_eq!(
+ montgomery_reduction::<{ Modulus2::LIMBS }>(
+ &(lo, hi),
+ &Modulus2::MODULUS,
+ Modulus2::MOD_NEG_INV
+ ),
+ Modulus2::R
+ );
+ }
+
+ #[test]
+ fn test_reducing_xr_wide() {
+ // Reducing xR should return x
+ let x =
+ U256::from_be_hex("44acf6b7e36c1342c2c5897204fe09504e1e2efb1a900377dbc4e7a6a133ec56");
+ let product = x.mul_wide(&Modulus2::R);
+ assert_eq!(
+ montgomery_reduction::<{ Modulus2::LIMBS }>(
+ &product,
+ &Modulus2::MODULUS,
+ Modulus2::MOD_NEG_INV
+ ),
+ x
+ );
+ }
+
+ #[test]
+ fn test_reducing_xr2_wide() {
+ // Reducing xR^2 should return xR
+ let x =
+ U256::from_be_hex("44acf6b7e36c1342c2c5897204fe09504e1e2efb1a900377dbc4e7a6a133ec56");
+ let product = x.mul_wide(&Modulus2::R2);
+
+ // Computing xR mod modulus without Montgomery reduction
+ let (lo, hi) = x.mul_wide(&Modulus2::R);
+ let c = hi.concat(&lo);
+ let red = c.rem(&NonZero::new(U256::ZERO.concat(&Modulus2::MODULUS)).unwrap());
+ let (hi, lo) = red.split();
+ assert_eq!(hi, Uint::ZERO);
+
+ assert_eq!(
+ montgomery_reduction::<{ Modulus2::LIMBS }>(
+ &product,
+ &Modulus2::MODULUS,
+ Modulus2::MOD_NEG_INV
+ ),
+ lo
+ );
+ }
+
+ #[test]
+ fn test_new_retrieve() {
+ let x =
+ U256::from_be_hex("44acf6b7e36c1342c2c5897204fe09504e1e2efb1a900377dbc4e7a6a133ec56");
+ let x_mod = Residue::<Modulus2, { Modulus2::LIMBS }>::new(&x);
+
+ // Confirm that when creating a Modular and retrieving the value, that it equals the original
+ assert_eq!(x, x_mod.retrieve());
+ }
+
+ #[test]
+ fn test_residue_macro() {
+ let x =
+ U256::from_be_hex("44acf6b7e36c1342c2c5897204fe09504e1e2efb1a900377dbc4e7a6a133ec56");
+ assert_eq!(
+ Residue::<Modulus2, { Modulus2::LIMBS }>::new(&x),
+ const_residue!(x, Modulus2)
+ );
+ }
+}
diff --git a/vendor/crypto-bigint/src/uint/modular/add.rs b/vendor/crypto-bigint/src/uint/modular/add.rs
new file mode 100644
index 0000000..f4d0f79
--- /dev/null
+++ b/vendor/crypto-bigint/src/uint/modular/add.rs
@@ -0,0 +1,9 @@
+use crate::Uint;
+
+pub(crate) const fn add_montgomery_form<const LIMBS: usize>(
+ a: &Uint<LIMBS>,
+ b: &Uint<LIMBS>,
+ modulus: &Uint<LIMBS>,
+) -> Uint<LIMBS> {
+ a.add_mod(b, modulus)
+}
diff --git a/vendor/crypto-bigint/src/uint/modular/constant_mod.rs b/vendor/crypto-bigint/src/uint/modular/constant_mod.rs
new file mode 100644
index 0000000..b775af4
--- /dev/null
+++ b/vendor/crypto-bigint/src/uint/modular/constant_mod.rs
@@ -0,0 +1,254 @@
+use core::{fmt::Debug, marker::PhantomData};
+
+use subtle::{Choice, ConditionallySelectable, ConstantTimeEq, CtOption};
+
+use crate::{Limb, Uint, Zero};
+
+use super::{div_by_2::div_by_2, reduction::montgomery_reduction, Retrieve};
+
+#[cfg(feature = "rand_core")]
+use crate::{rand_core::CryptoRngCore, NonZero, Random, RandomMod};
+
+#[cfg(feature = "serde")]
+use {
+ crate::Encoding,
+ serdect::serde::de::Error,
+ serdect::serde::{Deserialize, Deserializer, Serialize, Serializer},
+};
+
+/// Additions between residues with a constant modulus
+mod const_add;
+/// Multiplicative inverses of residues with a constant modulus
+mod const_inv;
+/// Multiplications between residues with a constant modulus
+mod const_mul;
+/// Negations of residues with a constant modulus
+mod const_neg;
+/// Exponentiation of residues with a constant modulus
+mod const_pow;
+/// Subtractions between residues with a constant modulus
+mod const_sub;
+
+/// Macros to remove the boilerplate code when dealing with constant moduli.
+#[macro_use]
+mod macros;
+
+pub use macros::*;
+
+/// The parameters to efficiently go to and from the Montgomery form for a given odd modulus. An easy way to generate these parameters is using the `impl_modulus!` macro. These parameters are constant, so they cannot be set at runtime.
+///
+/// Unfortunately, `LIMBS` must be generic for now until const generics are stabilized.
+pub trait ResidueParams<const LIMBS: usize>:
+ Copy + Debug + Default + Eq + Send + Sync + 'static
+{
+ /// Number of limbs required to encode a residue
+ const LIMBS: usize;
+
+ /// The constant modulus
+ const MODULUS: Uint<LIMBS>;
+ /// Parameter used in Montgomery reduction
+ const R: Uint<LIMBS>;
+ /// R^2, used to move into Montgomery form
+ const R2: Uint<LIMBS>;
+ /// R^3, used to perform a multiplicative inverse
+ const R3: Uint<LIMBS>;
+ /// The lowest limbs of -(MODULUS^-1) mod R
+ // We only need the LSB because during reduction this value is multiplied modulo 2**Limb::BITS.
+ const MOD_NEG_INV: Limb;
+}
+
+#[derive(Debug, Clone, Copy, PartialEq, Eq)]
+/// A residue mod `MOD`, represented using `LIMBS` limbs. The modulus of this residue is constant, so it cannot be set at runtime.
+/// Internally, the value is stored in Montgomery form (multiplied by MOD::R) until it is retrieved.
+pub struct Residue<MOD, const LIMBS: usize>
+where
+ MOD: ResidueParams<LIMBS>,
+{
+ montgomery_form: Uint<LIMBS>,
+ phantom: PhantomData<MOD>,
+}
+
+#[cfg(feature = "zeroize")]
+impl<MOD: ResidueParams<LIMBS>, const LIMBS: usize> zeroize::DefaultIsZeroes
+ for Residue<MOD, LIMBS>
+{
+}
+
+impl<MOD: ResidueParams<LIMBS>, const LIMBS: usize> Residue<MOD, LIMBS> {
+ /// The representation of 0 mod `MOD`.
+ pub const ZERO: Self = Self {
+ montgomery_form: Uint::<LIMBS>::ZERO,
+ phantom: PhantomData,
+ };
+
+ /// The representation of 1 mod `MOD`.
+ pub const ONE: Self = Self {
+ montgomery_form: MOD::R,
+ phantom: PhantomData,
+ };
+
+ // Internal helper function to generate a residue; this lets us wrap the constructors more cleanly
+ const fn generate_residue(integer: &Uint<LIMBS>) -> Self {
+ let product = integer.mul_wide(&MOD::R2);
+ let montgomery_form =
+ montgomery_reduction::<LIMBS>(&product, &MOD::MODULUS, MOD::MOD_NEG_INV);
+
+ Self {
+ montgomery_form,
+ phantom: PhantomData,
+ }
+ }
+
+ /// Instantiates a new `Residue` that represents this `integer` mod `MOD`.
+ /// If the modulus represented by `MOD` is not odd, this function will panic; use [`new_checked`][`Residue::new_checked`] if you want to be able to detect an invalid modulus.
+ pub const fn new(integer: &Uint<LIMBS>) -> Self {
+ // A valid modulus must be odd
+ if MOD::MODULUS.ct_is_odd().to_u8() == 0 {
+ panic!("modulus must be odd");
+ }
+
+ Self::generate_residue(integer)
+ }
+
+ /// Instantiates a new `Residue` that represents this `integer` mod `MOD` if the modulus is odd.
+ /// Returns a `CtOption` that is `None` if the provided modulus is not odd; this is a safer version of [`new`][`Residue::new`], which can panic.
+ // TODO: remove this method when we can use `generic_const_exprs.` to ensure the modulus is
+ // always valid.
+ pub fn new_checked(integer: &Uint<LIMBS>) -> CtOption<Self> {
+ // A valid modulus must be odd, which we can check in constant time
+ CtOption::new(
+ Self::generate_residue(integer),
+ MOD::MODULUS.ct_is_odd().into(),
+ )
+ }
+
+ /// Retrieves the integer currently encoded in this `Residue`, guaranteed to be reduced.
+ pub const fn retrieve(&self) -> Uint<LIMBS> {
+ montgomery_reduction::<LIMBS>(
+ &(self.montgomery_form, Uint::ZERO),
+ &MOD::MODULUS,
+ MOD::MOD_NEG_INV,
+ )
+ }
+
+ /// Access the `Residue` value in Montgomery form.
+ pub const fn as_montgomery(&self) -> &Uint<LIMBS> {
+ &self.montgomery_form
+ }
+
+ /// Mutably access the `Residue` value in Montgomery form.
+ pub fn as_montgomery_mut(&mut self) -> &mut Uint<LIMBS> {
+ &mut self.montgomery_form
+ }
+
+ /// Create a `Residue` from a value in Montgomery form.
+ pub const fn from_montgomery(integer: Uint<LIMBS>) -> Self {
+ Self {
+ montgomery_form: integer,
+ phantom: PhantomData,
+ }
+ }
+
+ /// Extract the value from the `Residue` in Montgomery form.
+ pub const fn to_montgomery(&self) -> Uint<LIMBS> {
+ self.montgomery_form
+ }
+
+ /// Performs the modular division by 2, that is for given `x` returns `y`
+ /// such that `y * 2 = x mod p`. This means:
+ /// - if `x` is even, returns `x / 2`,
+ /// - if `x` is odd, returns `(x + p) / 2`
+ /// (since the modulus `p` in Montgomery form is always odd, this divides entirely).
+ pub fn div_by_2(&self) -> Self {
+ Self {
+ montgomery_form: div_by_2(&self.montgomery_form, &MOD::MODULUS),
+ phantom: PhantomData,
+ }
+ }
+}
+
+impl<MOD: ResidueParams<LIMBS> + Copy, const LIMBS: usize> ConditionallySelectable
+ for Residue<MOD, LIMBS>
+{
+ fn conditional_select(a: &Self, b: &Self, choice: Choice) -> Self {
+ Residue {
+ montgomery_form: Uint::conditional_select(
+ &a.montgomery_form,
+ &b.montgomery_form,
+ choice,
+ ),
+ phantom: PhantomData,
+ }
+ }
+}
+
+impl<MOD: ResidueParams<LIMBS>, const LIMBS: usize> ConstantTimeEq for Residue<MOD, LIMBS> {
+ fn ct_eq(&self, other: &Self) -> Choice {
+ ConstantTimeEq::ct_eq(&self.montgomery_form, &other.montgomery_form)
+ }
+}
+
+impl<MOD: ResidueParams<LIMBS>, const LIMBS: usize> Default for Residue<MOD, LIMBS> {
+ fn default() -> Self {
+ Self::ZERO
+ }
+}
+
+impl<MOD: ResidueParams<LIMBS>, const LIMBS: usize> Zero for Residue<MOD, LIMBS> {
+ const ZERO: Self = Self::ZERO;
+}
+
+#[cfg(feature = "rand_core")]
+impl<MOD, const LIMBS: usize> Random for Residue<MOD, LIMBS>
+where
+ MOD: ResidueParams<LIMBS>,
+{
+ #[inline]
+ fn random(rng: &mut impl CryptoRngCore) -> Self {
+ Self::new(&Uint::random_mod(rng, &NonZero::from_uint(MOD::MODULUS)))
+ }
+}
+
+impl<MOD: ResidueParams<LIMBS>, const LIMBS: usize> Retrieve for Residue<MOD, LIMBS> {
+ type Output = Uint<LIMBS>;
+ fn retrieve(&self) -> Self::Output {
+ self.retrieve()
+ }
+}
+
+#[cfg(feature = "serde")]
+impl<'de, MOD, const LIMBS: usize> Deserialize<'de> for Residue<MOD, LIMBS>
+where
+ MOD: ResidueParams<LIMBS>,
+ Uint<LIMBS>: Encoding,
+{
+ fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
+ where
+ D: Deserializer<'de>,
+ {
+ Uint::<LIMBS>::deserialize(deserializer).and_then(|montgomery_form| {
+ if Uint::ct_lt(&montgomery_form, &MOD::MODULUS).into() {
+ Ok(Self {
+ montgomery_form,
+ phantom: PhantomData,
+ })
+ } else {
+ Err(D::Error::custom("montgomery form must be reduced"))
+ }
+ })
+ }
+}
+
+#[cfg(feature = "serde")]
+impl<MOD, const LIMBS: usize> Serialize for Residue<MOD, LIMBS>
+where
+ MOD: ResidueParams<LIMBS>,
+ Uint<LIMBS>: Encoding,
+{
+ fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
+ where
+ S: Serializer,
+ {
+ self.montgomery_form.serialize(serializer)
+ }
+}
diff --git a/vendor/crypto-bigint/src/uint/modular/constant_mod/const_add.rs b/vendor/crypto-bigint/src/uint/modular/constant_mod/const_add.rs
new file mode 100644
index 0000000..82eb882
--- /dev/null
+++ b/vendor/crypto-bigint/src/uint/modular/constant_mod/const_add.rs
@@ -0,0 +1,98 @@
+use core::ops::{Add, AddAssign};
+
+use crate::modular::add::add_montgomery_form;
+
+use super::{Residue, ResidueParams};
+
+impl<MOD: ResidueParams<LIMBS>, const LIMBS: usize> Residue<MOD, LIMBS> {
+ /// Adds `rhs`.
+ pub const fn add(&self, rhs: &Residue<MOD, LIMBS>) -> Self {
+ Self {
+ montgomery_form: add_montgomery_form(
+ &self.montgomery_form,
+ &rhs.montgomery_form,
+ &MOD::MODULUS,
+ ),
+ phantom: core::marker::PhantomData,
+ }
+ }
+}
+
+impl<MOD: ResidueParams<LIMBS>, const LIMBS: usize> Add<&Residue<MOD, LIMBS>>
+ for &Residue<MOD, LIMBS>
+{
+ type Output = Residue<MOD, LIMBS>;
+ fn add(self, rhs: &Residue<MOD, LIMBS>) -> Residue<MOD, LIMBS> {
+ self.add(rhs)
+ }
+}
+
+impl<MOD: ResidueParams<LIMBS>, const LIMBS: usize> Add<Residue<MOD, LIMBS>>
+ for &Residue<MOD, LIMBS>
+{
+ type Output = Residue<MOD, LIMBS>;
+ #[allow(clippy::op_ref)]
+ fn add(self, rhs: Residue<MOD, LIMBS>) -> Residue<MOD, LIMBS> {
+ self + &rhs
+ }
+}
+
+impl<MOD: ResidueParams<LIMBS>, const LIMBS: usize> Add<&Residue<MOD, LIMBS>>
+ for Residue<MOD, LIMBS>
+{
+ type Output = Residue<MOD, LIMBS>;
+ #[allow(clippy::op_ref)]
+ fn add(self, rhs: &Residue<MOD, LIMBS>) -> Residue<MOD, LIMBS> {
+ &self + rhs
+ }
+}
+
+impl<MOD: ResidueParams<LIMBS>, const LIMBS: usize> Add<Residue<MOD, LIMBS>>
+ for Residue<MOD, LIMBS>
+{
+ type Output = Residue<MOD, LIMBS>;
+ fn add(self, rhs: Residue<MOD, LIMBS>) -> Residue<MOD, LIMBS> {
+ &self + &rhs
+ }
+}
+
+impl<MOD: ResidueParams<LIMBS>, const LIMBS: usize> AddAssign<&Self> for Residue<MOD, LIMBS> {
+ fn add_assign(&mut self, rhs: &Self) {
+ *self = *self + rhs;
+ }
+}
+
+impl<MOD: ResidueParams<LIMBS>, const LIMBS: usize> AddAssign<Self> for Residue<MOD, LIMBS> {
+ fn add_assign(&mut self, rhs: Self) {
+ *self += &rhs;
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use crate::{const_residue, impl_modulus, modular::constant_mod::ResidueParams, U256};
+
+ impl_modulus!(
+ Modulus,
+ U256,
+ "ffffffff00000000ffffffffffffffffbce6faada7179e84f3b9cac2fc632551"
+ );
+
+ #[test]
+ fn add_overflow() {
+ let x =
+ U256::from_be_hex("44acf6b7e36c1342c2c5897204fe09504e1e2efb1a900377dbc4e7a6a133ec56");
+ let mut x_mod = const_residue!(x, Modulus);
+
+ let y =
+ U256::from_be_hex("d5777c45019673125ad240f83094d4252d829516fac8601ed01979ec1ec1a251");
+ let y_mod = const_residue!(y, Modulus);
+
+ x_mod += &y_mod;
+
+ let expected =
+ U256::from_be_hex("1a2472fde50286541d97ca6a3592dd75beb9c9646e40c511b82496cfc3926956");
+
+ assert_eq!(expected, x_mod.retrieve());
+ }
+}
diff --git a/vendor/crypto-bigint/src/uint/modular/constant_mod/const_inv.rs b/vendor/crypto-bigint/src/uint/modular/constant_mod/const_inv.rs
new file mode 100644
index 0000000..28f0622
--- /dev/null
+++ b/vendor/crypto-bigint/src/uint/modular/constant_mod/const_inv.rs
@@ -0,0 +1,69 @@
+use core::marker::PhantomData;
+
+use subtle::CtOption;
+
+use crate::{modular::inv::inv_montgomery_form, traits::Invert, CtChoice, NonZero};
+
+use super::{Residue, ResidueParams};
+
+impl<MOD: ResidueParams<LIMBS>, const LIMBS: usize> Residue<MOD, LIMBS> {
+ /// Computes the residue `self^-1` representing the multiplicative inverse of `self`.
+ /// I.e. `self * self^-1 = 1`.
+ /// If the number was invertible, the second element of the tuple is the truthy value,
+ /// otherwise it is the falsy value (in which case the first element's value is unspecified).
+ pub const fn invert(&self) -> (Self, CtChoice) {
+ let (montgomery_form, is_some) = inv_montgomery_form(
+ &self.montgomery_form,
+ &MOD::MODULUS,
+ &MOD::R3,
+ MOD::MOD_NEG_INV,
+ );
+
+ let value = Self {
+ montgomery_form,
+ phantom: PhantomData,
+ };
+
+ (value, is_some)
+ }
+}
+
+impl<MOD: ResidueParams<LIMBS>, const LIMBS: usize> Invert for Residue<MOD, LIMBS> {
+ type Output = CtOption<Self>;
+ fn invert(&self) -> Self::Output {
+ let (value, is_some) = self.invert();
+ CtOption::new(value, is_some.into())
+ }
+}
+
+impl<MOD: ResidueParams<LIMBS>, const LIMBS: usize> Invert for NonZero<Residue<MOD, LIMBS>> {
+ type Output = Self;
+ fn invert(&self) -> Self::Output {
+ // Always succeeds for a non-zero argument
+ let (value, _is_some) = self.as_ref().invert();
+ NonZero::new(value).unwrap()
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use crate::{const_residue, impl_modulus, modular::constant_mod::ResidueParams, U256};
+
+ impl_modulus!(
+ Modulus,
+ U256,
+ "15477BCCEFE197328255BFA79A1217899016D927EF460F4FF404029D24FA4409"
+ );
+
+ #[test]
+ fn test_self_inverse() {
+ let x =
+ U256::from_be_hex("77117F1273373C26C700D076B3F780074D03339F56DD0EFB60E7F58441FD3685");
+ let x_mod = const_residue!(x, Modulus);
+
+ let (inv, _is_some) = x_mod.invert();
+ let res = x_mod * inv;
+
+ assert_eq!(res.retrieve(), U256::ONE);
+ }
+}
diff --git a/vendor/crypto-bigint/src/uint/modular/constant_mod/const_mul.rs b/vendor/crypto-bigint/src/uint/modular/constant_mod/const_mul.rs
new file mode 100644
index 0000000..3bce184
--- /dev/null
+++ b/vendor/crypto-bigint/src/uint/modular/constant_mod/const_mul.rs
@@ -0,0 +1,94 @@
+use core::{
+ marker::PhantomData,
+ ops::{Mul, MulAssign},
+};
+
+use crate::{
+ modular::mul::{mul_montgomery_form, square_montgomery_form},
+ traits::Square,
+};
+
+use super::{Residue, ResidueParams};
+
+impl<MOD: ResidueParams<LIMBS>, const LIMBS: usize> Residue<MOD, LIMBS> {
+ /// Multiplies by `rhs`.
+ pub const fn mul(&self, rhs: &Self) -> Self {
+ Self {
+ montgomery_form: mul_montgomery_form(
+ &self.montgomery_form,
+ &rhs.montgomery_form,
+ &MOD::MODULUS,
+ MOD::MOD_NEG_INV,
+ ),
+ phantom: PhantomData,
+ }
+ }
+
+ /// Computes the (reduced) square of a residue.
+ pub const fn square(&self) -> Self {
+ Self {
+ montgomery_form: square_montgomery_form(
+ &self.montgomery_form,
+ &MOD::MODULUS,
+ MOD::MOD_NEG_INV,
+ ),
+ phantom: PhantomData,
+ }
+ }
+}
+
+impl<MOD: ResidueParams<LIMBS>, const LIMBS: usize> Mul<&Residue<MOD, LIMBS>>
+ for &Residue<MOD, LIMBS>
+{
+ type Output = Residue<MOD, LIMBS>;
+ fn mul(self, rhs: &Residue<MOD, LIMBS>) -> Residue<MOD, LIMBS> {
+ self.mul(rhs)
+ }
+}
+
+impl<MOD: ResidueParams<LIMBS>, const LIMBS: usize> Mul<Residue<MOD, LIMBS>>
+ for &Residue<MOD, LIMBS>
+{
+ type Output = Residue<MOD, LIMBS>;
+ #[allow(clippy::op_ref)]
+ fn mul(self, rhs: Residue<MOD, LIMBS>) -> Residue<MOD, LIMBS> {
+ self * &rhs
+ }
+}
+
+impl<MOD: ResidueParams<LIMBS>, const LIMBS: usize> Mul<&Residue<MOD, LIMBS>>
+ for Residue<MOD, LIMBS>
+{
+ type Output = Residue<MOD, LIMBS>;
+ #[allow(clippy::op_ref)]
+ fn mul(self, rhs: &Residue<MOD, LIMBS>) -> Residue<MOD, LIMBS> {
+ &self * rhs
+ }
+}
+
+impl<MOD: ResidueParams<LIMBS>, const LIMBS: usize> Mul<Residue<MOD, LIMBS>>
+ for Residue<MOD, LIMBS>
+{
+ type Output = Residue<MOD, LIMBS>;
+ fn mul(self, rhs: Residue<MOD, LIMBS>) -> Residue<MOD, LIMBS> {
+ &self * &rhs
+ }
+}
+
+impl<MOD: ResidueParams<LIMBS>, const LIMBS: usize> MulAssign<&Self> for Residue<MOD, LIMBS> {
+ fn mul_assign(&mut self, rhs: &Residue<MOD, LIMBS>) {
+ *self = *self * rhs;
+ }
+}
+
+impl<MOD: ResidueParams<LIMBS>, const LIMBS: usize> MulAssign<Self> for Residue<MOD, LIMBS> {
+ fn mul_assign(&mut self, rhs: Self) {
+ *self *= &rhs;
+ }
+}
+
+impl<MOD: ResidueParams<LIMBS>, const LIMBS: usize> Square for Residue<MOD, LIMBS> {
+ fn square(&self) -> Self {
+ Residue::square(self)
+ }
+}
diff --git a/vendor/crypto-bigint/src/uint/modular/constant_mod/const_neg.rs b/vendor/crypto-bigint/src/uint/modular/constant_mod/const_neg.rs
new file mode 100644
index 0000000..1981f5a
--- /dev/null
+++ b/vendor/crypto-bigint/src/uint/modular/constant_mod/const_neg.rs
@@ -0,0 +1,48 @@
+use core::ops::Neg;
+
+use super::{Residue, ResidueParams};
+
+impl<MOD: ResidueParams<LIMBS>, const LIMBS: usize> Residue<MOD, LIMBS> {
+ /// Negates the number.
+ pub const fn neg(&self) -> Self {
+ Self::ZERO.sub(self)
+ }
+}
+
+impl<MOD: ResidueParams<LIMBS>, const LIMBS: usize> Neg for Residue<MOD, LIMBS> {
+ type Output = Self;
+ fn neg(self) -> Self {
+ Residue::neg(&self)
+ }
+}
+
+impl<MOD: ResidueParams<LIMBS>, const LIMBS: usize> Neg for &Residue<MOD, LIMBS> {
+ type Output = Residue<MOD, LIMBS>;
+ fn neg(self) -> Residue<MOD, LIMBS> {
+ Residue::neg(self)
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use crate::{const_residue, impl_modulus, modular::constant_mod::ResidueParams, U256};
+
+ impl_modulus!(
+ Modulus,
+ U256,
+ "15477BCCEFE197328255BFA79A1217899016D927EF460F4FF404029D24FA4409"
+ );
+
+ #[test]
+ fn test_negate() {
+ let x =
+ U256::from_be_hex("77117F1273373C26C700D076B3F780074D03339F56DD0EFB60E7F58441FD3685");
+ let x_mod = const_residue!(x, Modulus);
+
+ let res = -x_mod;
+ let expected =
+ U256::from_be_hex("089B67BB2C124F084701AD76E8750D321385E35044C74CE457301A2A9BE061B1");
+
+ assert_eq!(res.retrieve(), expected);
+ }
+}
diff --git a/vendor/crypto-bigint/src/uint/modular/constant_mod/const_pow.rs b/vendor/crypto-bigint/src/uint/modular/constant_mod/const_pow.rs
new file mode 100644
index 0000000..ea3dd1c
--- /dev/null
+++ b/vendor/crypto-bigint/src/uint/modular/constant_mod/const_pow.rs
@@ -0,0 +1,101 @@
+use crate::{modular::pow::pow_montgomery_form, PowBoundedExp, Uint};
+
+use super::{Residue, ResidueParams};
+
+impl<MOD: ResidueParams<LIMBS>, const LIMBS: usize> Residue<MOD, LIMBS> {
+ /// Raises to the `exponent` power.
+ pub const fn pow<const RHS_LIMBS: usize>(
+ &self,
+ exponent: &Uint<RHS_LIMBS>,
+ ) -> Residue<MOD, LIMBS> {
+ self.pow_bounded_exp(exponent, Uint::<RHS_LIMBS>::BITS)
+ }
+
+ /// Raises to the `exponent` power,
+ /// with `exponent_bits` representing the number of (least significant) bits
+ /// to take into account for the exponent.
+ ///
+ /// NOTE: `exponent_bits` may be leaked in the time pattern.
+ pub const fn pow_bounded_exp<const RHS_LIMBS: usize>(
+ &self,
+ exponent: &Uint<RHS_LIMBS>,
+ exponent_bits: usize,
+ ) -> Residue<MOD, LIMBS> {
+ Self {
+ montgomery_form: pow_montgomery_form(
+ &self.montgomery_form,
+ exponent,
+ exponent_bits,
+ &MOD::MODULUS,
+ &MOD::R,
+ MOD::MOD_NEG_INV,
+ ),
+ phantom: core::marker::PhantomData,
+ }
+ }
+}
+
+impl<MOD: ResidueParams<LIMBS>, const LIMBS: usize> PowBoundedExp<Uint<LIMBS>>
+ for Residue<MOD, LIMBS>
+{
+ fn pow_bounded_exp(&self, exponent: &Uint<LIMBS>, exponent_bits: usize) -> Self {
+ self.pow_bounded_exp(exponent, exponent_bits)
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use crate::{const_residue, impl_modulus, modular::constant_mod::ResidueParams, U256};
+
+ impl_modulus!(
+ Modulus,
+ U256,
+ "9CC24C5DF431A864188AB905AC751B727C9447A8E99E6366E1AD78A21E8D882B"
+ );
+
+ #[test]
+ fn test_powmod_small_base() {
+ let base = U256::from(105u64);
+ let base_mod = const_residue!(base, Modulus);
+
+ let exponent =
+ U256::from_be_hex("77117F1273373C26C700D076B3F780074D03339F56DD0EFB60E7F58441FD3685");
+
+ let res = base_mod.pow(&exponent);
+
+ let expected =
+ U256::from_be_hex("7B2CD7BDDD96C271E6F232F2F415BB03FE2A90BD6CCCEA5E94F1BFD064993766");
+ assert_eq!(res.retrieve(), expected);
+ }
+
+ #[test]
+ fn test_powmod_small_exponent() {
+ let base =
+ U256::from_be_hex("3435D18AA8313EBBE4D20002922225B53F75DC4453BB3EEC0378646F79B524A4");
+ let base_mod = const_residue!(base, Modulus);
+
+ let exponent = U256::from(105u64);
+
+ let res = base_mod.pow(&exponent);
+
+ let expected =
+ U256::from_be_hex("89E2A4E99F649A5AE2C18068148C355CA927B34A3245C938178ED00D6EF218AA");
+ assert_eq!(res.retrieve(), expected);
+ }
+
+ #[test]
+ fn test_powmod() {
+ let base =
+ U256::from_be_hex("3435D18AA8313EBBE4D20002922225B53F75DC4453BB3EEC0378646F79B524A4");
+ let base_mod = const_residue!(base, Modulus);
+
+ let exponent =
+ U256::from_be_hex("77117F1273373C26C700D076B3F780074D03339F56DD0EFB60E7F58441FD3685");
+
+ let res = base_mod.pow(&exponent);
+
+ let expected =
+ U256::from_be_hex("3681BC0FEA2E5D394EB178155A127B0FD2EF405486D354251C385BDD51B9D421");
+ assert_eq!(res.retrieve(), expected);
+ }
+}
diff --git a/vendor/crypto-bigint/src/uint/modular/constant_mod/const_sub.rs b/vendor/crypto-bigint/src/uint/modular/constant_mod/const_sub.rs
new file mode 100644
index 0000000..f650611
--- /dev/null
+++ b/vendor/crypto-bigint/src/uint/modular/constant_mod/const_sub.rs
@@ -0,0 +1,98 @@
+use core::ops::{Sub, SubAssign};
+
+use crate::modular::sub::sub_montgomery_form;
+
+use super::{Residue, ResidueParams};
+
+impl<MOD: ResidueParams<LIMBS>, const LIMBS: usize> Residue<MOD, LIMBS> {
+ /// Subtracts `rhs`.
+ pub const fn sub(&self, rhs: &Self) -> Self {
+ Self {
+ montgomery_form: sub_montgomery_form(
+ &self.montgomery_form,
+ &rhs.montgomery_form,
+ &MOD::MODULUS,
+ ),
+ phantom: core::marker::PhantomData,
+ }
+ }
+}
+
+impl<MOD: ResidueParams<LIMBS>, const LIMBS: usize> Sub<&Residue<MOD, LIMBS>>
+ for &Residue<MOD, LIMBS>
+{
+ type Output = Residue<MOD, LIMBS>;
+ fn sub(self, rhs: &Residue<MOD, LIMBS>) -> Residue<MOD, LIMBS> {
+ self.sub(rhs)
+ }
+}
+
+impl<MOD: ResidueParams<LIMBS>, const LIMBS: usize> Sub<Residue<MOD, LIMBS>>
+ for &Residue<MOD, LIMBS>
+{
+ type Output = Residue<MOD, LIMBS>;
+ #[allow(clippy::op_ref)]
+ fn sub(self, rhs: Residue<MOD, LIMBS>) -> Residue<MOD, LIMBS> {
+ self - &rhs
+ }
+}
+
+impl<MOD: ResidueParams<LIMBS>, const LIMBS: usize> Sub<&Residue<MOD, LIMBS>>
+ for Residue<MOD, LIMBS>
+{
+ type Output = Residue<MOD, LIMBS>;
+ #[allow(clippy::op_ref)]
+ fn sub(self, rhs: &Residue<MOD, LIMBS>) -> Residue<MOD, LIMBS> {
+ &self - rhs
+ }
+}
+
+impl<MOD: ResidueParams<LIMBS>, const LIMBS: usize> Sub<Residue<MOD, LIMBS>>
+ for Residue<MOD, LIMBS>
+{
+ type Output = Residue<MOD, LIMBS>;
+ fn sub(self, rhs: Residue<MOD, LIMBS>) -> Residue<MOD, LIMBS> {
+ &self - &rhs
+ }
+}
+
+impl<MOD: ResidueParams<LIMBS>, const LIMBS: usize> SubAssign<&Self> for Residue<MOD, LIMBS> {
+ fn sub_assign(&mut self, rhs: &Self) {
+ *self = *self - rhs;
+ }
+}
+
+impl<MOD: ResidueParams<LIMBS>, const LIMBS: usize> SubAssign<Self> for Residue<MOD, LIMBS> {
+ fn sub_assign(&mut self, rhs: Self) {
+ *self -= &rhs;
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use crate::{const_residue, impl_modulus, modular::constant_mod::ResidueParams, U256};
+
+ impl_modulus!(
+ Modulus,
+ U256,
+ "ffffffff00000000ffffffffffffffffbce6faada7179e84f3b9cac2fc632551"
+ );
+
+ #[test]
+ fn sub_overflow() {
+ let x =
+ U256::from_be_hex("44acf6b7e36c1342c2c5897204fe09504e1e2efb1a900377dbc4e7a6a133ec56");
+ let mut x_mod = const_residue!(x, Modulus);
+
+ let y =
+ U256::from_be_hex("d5777c45019673125ad240f83094d4252d829516fac8601ed01979ec1ec1a251");
+ let y_mod = const_residue!(y, Modulus);
+
+ x_mod -= &y_mod;
+
+ let expected =
+ U256::from_be_hex("6f357a71e1d5a03167f34879d469352add829491c6df41ddff65387d7ed56f56");
+
+ assert_eq!(expected, x_mod.retrieve());
+ }
+}
diff --git a/vendor/crypto-bigint/src/uint/modular/constant_mod/macros.rs b/vendor/crypto-bigint/src/uint/modular/constant_mod/macros.rs
new file mode 100644
index 0000000..dfa440e
--- /dev/null
+++ b/vendor/crypto-bigint/src/uint/modular/constant_mod/macros.rs
@@ -0,0 +1,57 @@
+// TODO: Use `adt_const_params` once stabilized to make a `Residue` generic around a modulus rather than having to implement a ZST + trait
+#[macro_export]
+/// Implements a modulus with the given name, type, and value, in that specific order. Please `use crypto_bigint::traits::Encoding` to make this work.
+/// For example, `impl_modulus!(MyModulus, U256, "73eda753299d7d483339d80809a1d80553bda402fffe5bfeffffffff00000001");` implements a 256-bit modulus named `MyModulus`.
+/// The modulus _must_ be odd, or this will panic.
+macro_rules! impl_modulus {
+ ($name:ident, $uint_type:ty, $value:expr) => {
+ #[derive(Clone, Copy, Debug, Default, Eq, PartialEq)]
+ pub struct $name {}
+ impl<const DLIMBS: usize>
+ $crate::modular::constant_mod::ResidueParams<{ <$uint_type>::LIMBS }> for $name
+ where
+ $uint_type: $crate::ConcatMixed<MixedOutput = $crate::Uint<DLIMBS>>,
+ {
+ const LIMBS: usize = <$uint_type>::LIMBS;
+ const MODULUS: $uint_type = {
+ let res = <$uint_type>::from_be_hex($value);
+
+ // Check that the modulus is odd
+ if res.as_limbs()[0].0 & 1 == 0 {
+ panic!("modulus must be odd");
+ }
+
+ res
+ };
+ const R: $uint_type = $crate::Uint::MAX
+ .const_rem(&Self::MODULUS)
+ .0
+ .wrapping_add(&$crate::Uint::ONE);
+ const R2: $uint_type =
+ $crate::Uint::const_rem_wide(Self::R.square_wide(), &Self::MODULUS).0;
+ const MOD_NEG_INV: $crate::Limb = $crate::Limb(
+ $crate::Word::MIN.wrapping_sub(
+ Self::MODULUS
+ .inv_mod2k_vartime($crate::Word::BITS as usize)
+ .as_limbs()[0]
+ .0,
+ ),
+ );
+ const R3: $uint_type = $crate::modular::montgomery_reduction(
+ &Self::R2.square_wide(),
+ &Self::MODULUS,
+ Self::MOD_NEG_INV,
+ );
+ }
+ };
+}
+
+#[macro_export]
+/// Creates a `Residue` with the given value for a specific modulus.
+/// For example, `residue!(U256::from(105u64), MyModulus);` creates a `Residue` for 105 mod `MyModulus`.
+/// The modulus _must_ be odd, or this will panic.
+macro_rules! const_residue {
+ ($variable:ident, $modulus:ident) => {
+ $crate::modular::constant_mod::Residue::<$modulus, { $modulus::LIMBS }>::new(&$variable)
+ };
+}
diff --git a/vendor/crypto-bigint/src/uint/modular/div_by_2.rs b/vendor/crypto-bigint/src/uint/modular/div_by_2.rs
new file mode 100644
index 0000000..20c0a5d
--- /dev/null
+++ b/vendor/crypto-bigint/src/uint/modular/div_by_2.rs
@@ -0,0 +1,30 @@
+use crate::Uint;
+
+pub(crate) fn div_by_2<const LIMBS: usize>(a: &Uint<LIMBS>, modulus: &Uint<LIMBS>) -> Uint<LIMBS> {
+ // We are looking for such `x` that `x * 2 = y mod modulus`,
+ // where the given `a = M(y)` is the Montgomery representation of some `y`.
+ // This means that in Montgomery representation it would still apply:
+ // `M(x) + M(x) = a mod modulus`.
+ // So we can just forget about Montgomery representation, and return whatever is
+ // `a` divided by 2, and this will be the Montgomery representation of `x`.
+ // (Which means that this function works regardless of whether `a`
+ // is in Montgomery representation or not, but the algorithm below
+ // does need `modulus` to be odd)
+
+ // Two possibilities:
+ // - if `a` is even, we can just divide by 2;
+ // - if `a` is odd, we divide `(a + modulus)` by 2.
+ // To stay within the modulus we open the parentheses turning it into `a / 2 + modulus / 2 + 1`
+ // ("+1" because both `a` and `modulus` are odd, we lose 0.5 in each integer division).
+ // This will not overflow, so we can just use wrapping operations.
+
+ let (half, is_odd) = a.shr_1();
+ let half_modulus = modulus.shr_vartime(1);
+
+ let if_even = half;
+ let if_odd = half
+ .wrapping_add(&half_modulus)
+ .wrapping_add(&Uint::<LIMBS>::ONE);
+
+ Uint::<LIMBS>::ct_select(&if_even, &if_odd, is_odd)
+}
diff --git a/vendor/crypto-bigint/src/uint/modular/inv.rs b/vendor/crypto-bigint/src/uint/modular/inv.rs
new file mode 100644
index 0000000..408c03f
--- /dev/null
+++ b/vendor/crypto-bigint/src/uint/modular/inv.rs
@@ -0,0 +1,14 @@
+use crate::{modular::reduction::montgomery_reduction, CtChoice, Limb, Uint};
+
+pub const fn inv_montgomery_form<const LIMBS: usize>(
+ x: &Uint<LIMBS>,
+ modulus: &Uint<LIMBS>,
+ r3: &Uint<LIMBS>,
+ mod_neg_inv: Limb,
+) -> (Uint<LIMBS>, CtChoice) {
+ let (inverse, is_some) = x.inv_odd_mod(modulus);
+ (
+ montgomery_reduction(&inverse.mul_wide(r3), modulus, mod_neg_inv),
+ is_some,
+ )
+}
diff --git a/vendor/crypto-bigint/src/uint/modular/mul.rs b/vendor/crypto-bigint/src/uint/modular/mul.rs
new file mode 100644
index 0000000..b84ceb5
--- /dev/null
+++ b/vendor/crypto-bigint/src/uint/modular/mul.rs
@@ -0,0 +1,22 @@
+use crate::{Limb, Uint};
+
+use super::reduction::montgomery_reduction;
+
+pub(crate) const fn mul_montgomery_form<const LIMBS: usize>(
+ a: &Uint<LIMBS>,
+ b: &Uint<LIMBS>,
+ modulus: &Uint<LIMBS>,
+ mod_neg_inv: Limb,
+) -> Uint<LIMBS> {
+ let product = a.mul_wide(b);
+ montgomery_reduction::<LIMBS>(&product, modulus, mod_neg_inv)
+}
+
+pub(crate) const fn square_montgomery_form<const LIMBS: usize>(
+ a: &Uint<LIMBS>,
+ modulus: &Uint<LIMBS>,
+ mod_neg_inv: Limb,
+) -> Uint<LIMBS> {
+ let product = a.square_wide();
+ montgomery_reduction::<LIMBS>(&product, modulus, mod_neg_inv)
+}
diff --git a/vendor/crypto-bigint/src/uint/modular/pow.rs b/vendor/crypto-bigint/src/uint/modular/pow.rs
new file mode 100644
index 0000000..db9ac99
--- /dev/null
+++ b/vendor/crypto-bigint/src/uint/modular/pow.rs
@@ -0,0 +1,79 @@
+use crate::{Limb, Uint, Word};
+
+use super::mul::{mul_montgomery_form, square_montgomery_form};
+
+/// Performs modular exponentiation using Montgomery's ladder.
+/// `exponent_bits` represents the number of bits to take into account for the exponent.
+///
+/// NOTE: this value is leaked in the time pattern.
+pub const fn pow_montgomery_form<const LIMBS: usize, const RHS_LIMBS: usize>(
+ x: &Uint<LIMBS>,
+ exponent: &Uint<RHS_LIMBS>,
+ exponent_bits: usize,
+ modulus: &Uint<LIMBS>,
+ r: &Uint<LIMBS>,
+ mod_neg_inv: Limb,
+) -> Uint<LIMBS> {
+ if exponent_bits == 0 {
+ return *r; // 1 in Montgomery form
+ }
+
+ const WINDOW: usize = 4;
+ const WINDOW_MASK: Word = (1 << WINDOW) - 1;
+
+ // powers[i] contains x^i
+ let mut powers = [*r; 1 << WINDOW];
+ powers[1] = *x;
+ let mut i = 2;
+ while i < powers.len() {
+ powers[i] = mul_montgomery_form(&powers[i - 1], x, modulus, mod_neg_inv);
+ i += 1;
+ }
+
+ let starting_limb = (exponent_bits - 1) / Limb::BITS;
+ let starting_bit_in_limb = (exponent_bits - 1) % Limb::BITS;
+ let starting_window = starting_bit_in_limb / WINDOW;
+ let starting_window_mask = (1 << (starting_bit_in_limb % WINDOW + 1)) - 1;
+
+ let mut z = *r; // 1 in Montgomery form
+
+ let mut limb_num = starting_limb + 1;
+ while limb_num > 0 {
+ limb_num -= 1;
+ let w = exponent.as_limbs()[limb_num].0;
+
+ let mut window_num = if limb_num == starting_limb {
+ starting_window + 1
+ } else {
+ Limb::BITS / WINDOW
+ };
+ while window_num > 0 {
+ window_num -= 1;
+
+ let mut idx = (w >> (window_num * WINDOW)) & WINDOW_MASK;
+
+ if limb_num == starting_limb && window_num == starting_window {
+ idx &= starting_window_mask;
+ } else {
+ let mut i = 0;
+ while i < WINDOW {
+ i += 1;
+ z = square_montgomery_form(&z, modulus, mod_neg_inv);
+ }
+ }
+
+ // Constant-time lookup in the array of powers
+ let mut power = powers[0];
+ let mut i = 1;
+ while i < 1 << WINDOW {
+ let choice = Limb::ct_eq(Limb(i as Word), Limb(idx));
+ power = Uint::<LIMBS>::ct_select(&power, &powers[i], choice);
+ i += 1;
+ }
+
+ z = mul_montgomery_form(&z, &power, modulus, mod_neg_inv);
+ }
+ }
+
+ z
+}
diff --git a/vendor/crypto-bigint/src/uint/modular/reduction.rs b/vendor/crypto-bigint/src/uint/modular/reduction.rs
new file mode 100644
index 0000000..b206ae3
--- /dev/null
+++ b/vendor/crypto-bigint/src/uint/modular/reduction.rs
@@ -0,0 +1,55 @@
+use crate::{Limb, Uint, WideWord, Word};
+
+/// Returns `(hi, lo)` such that `hi * R + lo = x * y + z + w`.
+#[inline(always)]
+const fn muladdcarry(x: Word, y: Word, z: Word, w: Word) -> (Word, Word) {
+ let res = (x as WideWord)
+ .wrapping_mul(y as WideWord)
+ .wrapping_add(z as WideWord)
+ .wrapping_add(w as WideWord);
+ ((res >> Word::BITS) as Word, res as Word)
+}
+
+/// Algorithm 14.32 in Handbook of Applied Cryptography <https://cacr.uwaterloo.ca/hac/about/chap14.pdf>
+pub const fn montgomery_reduction<const LIMBS: usize>(
+ lower_upper: &(Uint<LIMBS>, Uint<LIMBS>),
+ modulus: &Uint<LIMBS>,
+ mod_neg_inv: Limb,
+) -> Uint<LIMBS> {
+ let (mut lower, mut upper) = *lower_upper;
+
+ let mut meta_carry = Limb(0);
+ let mut new_sum;
+
+ let mut i = 0;
+ while i < LIMBS {
+ let u = lower.limbs[i].0.wrapping_mul(mod_neg_inv.0);
+
+ let (mut carry, _) = muladdcarry(u, modulus.limbs[0].0, lower.limbs[i].0, 0);
+ let mut new_limb;
+
+ let mut j = 1;
+ while j < (LIMBS - i) {
+ (carry, new_limb) = muladdcarry(u, modulus.limbs[j].0, lower.limbs[i + j].0, carry);
+ lower.limbs[i + j] = Limb(new_limb);
+ j += 1;
+ }
+ while j < LIMBS {
+ (carry, new_limb) =
+ muladdcarry(u, modulus.limbs[j].0, upper.limbs[i + j - LIMBS].0, carry);
+ upper.limbs[i + j - LIMBS] = Limb(new_limb);
+ j += 1;
+ }
+
+ (new_sum, meta_carry) = upper.limbs[i].adc(Limb(carry), meta_carry);
+ upper.limbs[i] = new_sum;
+
+ i += 1;
+ }
+
+ // Division is simply taking the upper half of the limbs
+ // Final reduction (at this point, the value is at most 2 * modulus,
+ // so `meta_carry` is either 0 or 1)
+
+ upper.sub_mod_with_carry(meta_carry, modulus, modulus)
+}
diff --git a/vendor/crypto-bigint/src/uint/modular/runtime_mod.rs b/vendor/crypto-bigint/src/uint/modular/runtime_mod.rs
new file mode 100644
index 0000000..ad5cfd5
--- /dev/null
+++ b/vendor/crypto-bigint/src/uint/modular/runtime_mod.rs
@@ -0,0 +1,301 @@
+use crate::{Limb, Uint, Word};
+
+use super::{
+ constant_mod::{Residue, ResidueParams},
+ div_by_2::div_by_2,
+ reduction::montgomery_reduction,
+ Retrieve,
+};
+
+use subtle::{Choice, ConditionallySelectable, ConstantTimeEq, CtOption};
+
+/// Additions between residues with a modulus set at runtime
+mod runtime_add;
+/// Multiplicative inverses of residues with a modulus set at runtime
+mod runtime_inv;
+/// Multiplications between residues with a modulus set at runtime
+mod runtime_mul;
+/// Negations of residues with a modulus set at runtime
+mod runtime_neg;
+/// Exponentiation of residues with a modulus set at runtime
+mod runtime_pow;
+/// Subtractions between residues with a modulus set at runtime
+mod runtime_sub;
+
+/// The parameters to efficiently go to and from the Montgomery form for an odd modulus provided at runtime.
+#[derive(Debug, Clone, Copy, PartialEq, Eq)]
+pub struct DynResidueParams<const LIMBS: usize> {
+ // The constant modulus
+ modulus: Uint<LIMBS>,
+ // Parameter used in Montgomery reduction
+ r: Uint<LIMBS>,
+ // R^2, used to move into Montgomery form
+ r2: Uint<LIMBS>,
+ // R^3, used to compute the multiplicative inverse
+ r3: Uint<LIMBS>,
+ // The lowest limbs of -(MODULUS^-1) mod R
+ // We only need the LSB because during reduction this value is multiplied modulo 2**Limb::BITS.
+ mod_neg_inv: Limb,
+}
+
+impl<const LIMBS: usize> DynResidueParams<LIMBS> {
+ // Internal helper function to generate parameters; this lets us wrap the constructors more cleanly
+ const fn generate_params(modulus: &Uint<LIMBS>) -> Self {
+ let r = Uint::MAX.const_rem(modulus).0.wrapping_add(&Uint::ONE);
+ let r2 = Uint::const_rem_wide(r.square_wide(), modulus).0;
+
+ // Since we are calculating the inverse modulo (Word::MAX+1),
+ // we can take the modulo right away and calculate the inverse of the first limb only.
+ let modulus_lo = Uint::<1>::from_words([modulus.limbs[0].0]);
+ let mod_neg_inv = Limb(
+ Word::MIN.wrapping_sub(modulus_lo.inv_mod2k_vartime(Word::BITS as usize).limbs[0].0),
+ );
+
+ let r3 = montgomery_reduction(&r2.square_wide(), modulus, mod_neg_inv);
+
+ Self {
+ modulus: *modulus,
+ r,
+ r2,
+ r3,
+ mod_neg_inv,
+ }
+ }
+
+ /// Instantiates a new set of `ResidueParams` representing the given `modulus`, which _must_ be odd.
+ /// If `modulus` is not odd, this function will panic; use [`new_checked`][`DynResidueParams::new_checked`] if you want to be able to detect an invalid modulus.
+ pub const fn new(modulus: &Uint<LIMBS>) -> Self {
+ // A valid modulus must be odd
+ if modulus.ct_is_odd().to_u8() == 0 {
+ panic!("modulus must be odd");
+ }
+
+ Self::generate_params(modulus)
+ }
+
+ /// Instantiates a new set of `ResidueParams` representing the given `modulus` if it is odd.
+ /// Returns a `CtOption` that is `None` if the provided modulus is not odd; this is a safer version of [`new`][`DynResidueParams::new`], which can panic.
+ #[deprecated(
+ since = "0.5.3",
+ note = "This functionality will be moved to `new` in a future release."
+ )]
+ pub fn new_checked(modulus: &Uint<LIMBS>) -> CtOption<Self> {
+ // A valid modulus must be odd, which we check in constant time
+ CtOption::new(Self::generate_params(modulus), modulus.ct_is_odd().into())
+ }
+
+ /// Returns the modulus which was used to initialize these parameters.
+ pub const fn modulus(&self) -> &Uint<LIMBS> {
+ &self.modulus
+ }
+
+ /// Create `DynResidueParams` corresponding to a `ResidueParams`.
+ pub const fn from_residue_params<P>() -> Self
+ where
+ P: ResidueParams<LIMBS>,
+ {
+ Self {
+ modulus: P::MODULUS,
+ r: P::R,
+ r2: P::R2,
+ r3: P::R3,
+ mod_neg_inv: P::MOD_NEG_INV,
+ }
+ }
+}
+
+impl<const LIMBS: usize> ConditionallySelectable for DynResidueParams<LIMBS> {
+ fn conditional_select(a: &Self, b: &Self, choice: Choice) -> Self {
+ Self {
+ modulus: Uint::conditional_select(&a.modulus, &b.modulus, choice),
+ r: Uint::conditional_select(&a.r, &b.r, choice),
+ r2: Uint::conditional_select(&a.r2, &b.r2, choice),
+ r3: Uint::conditional_select(&a.r3, &b.r3, choice),
+ mod_neg_inv: Limb::conditional_select(&a.mod_neg_inv, &b.mod_neg_inv, choice),
+ }
+ }
+}
+
+impl<const LIMBS: usize> ConstantTimeEq for DynResidueParams<LIMBS> {
+ fn ct_eq(&self, other: &Self) -> Choice {
+ self.modulus.ct_eq(&other.modulus)
+ & self.r.ct_eq(&other.r)
+ & self.r2.ct_eq(&other.r2)
+ & self.r3.ct_eq(&other.r3)
+ & self.mod_neg_inv.ct_eq(&other.mod_neg_inv)
+ }
+}
+
+/// A residue represented using `LIMBS` limbs. The odd modulus of this residue is set at runtime.
+#[derive(Debug, Clone, Copy, PartialEq, Eq)]
+pub struct DynResidue<const LIMBS: usize> {
+ montgomery_form: Uint<LIMBS>,
+ residue_params: DynResidueParams<LIMBS>,
+}
+
+impl<const LIMBS: usize> DynResidue<LIMBS> {
+ /// Instantiates a new `Residue` that represents this `integer` mod `MOD`.
+ pub const fn new(integer: &Uint<LIMBS>, residue_params: DynResidueParams<LIMBS>) -> Self {
+ let product = integer.mul_wide(&residue_params.r2);
+ let montgomery_form = montgomery_reduction(
+ &product,
+ &residue_params.modulus,
+ residue_params.mod_neg_inv,
+ );
+
+ Self {
+ montgomery_form,
+ residue_params,
+ }
+ }
+
+ /// Retrieves the integer currently encoded in this `Residue`, guaranteed to be reduced.
+ pub const fn retrieve(&self) -> Uint<LIMBS> {
+ montgomery_reduction(
+ &(self.montgomery_form, Uint::ZERO),
+ &self.residue_params.modulus,
+ self.residue_params.mod_neg_inv,
+ )
+ }
+
+ /// Instantiates a new `Residue` that represents zero.
+ pub const fn zero(residue_params: DynResidueParams<LIMBS>) -> Self {
+ Self {
+ montgomery_form: Uint::<LIMBS>::ZERO,
+ residue_params,
+ }
+ }
+
+ /// Instantiates a new `Residue` that represents 1.
+ pub const fn one(residue_params: DynResidueParams<LIMBS>) -> Self {
+ Self {
+ montgomery_form: residue_params.r,
+ residue_params,
+ }
+ }
+
+ /// Returns the parameter struct used to initialize this residue.
+ pub const fn params(&self) -> &DynResidueParams<LIMBS> {
+ &self.residue_params
+ }
+
+ /// Access the `DynResidue` value in Montgomery form.
+ pub const fn as_montgomery(&self) -> &Uint<LIMBS> {
+ &self.montgomery_form
+ }
+
+ /// Mutably access the `DynResidue` value in Montgomery form.
+ pub fn as_montgomery_mut(&mut self) -> &mut Uint<LIMBS> {
+ &mut self.montgomery_form
+ }
+
+ /// Create a `DynResidue` from a value in Montgomery form.
+ pub const fn from_montgomery(
+ integer: Uint<LIMBS>,
+ residue_params: DynResidueParams<LIMBS>,
+ ) -> Self {
+ Self {
+ montgomery_form: integer,
+ residue_params,
+ }
+ }
+
+ /// Extract the value from the `DynResidue` in Montgomery form.
+ pub const fn to_montgomery(&self) -> Uint<LIMBS> {
+ self.montgomery_form
+ }
+
+ /// Performs the modular division by 2, that is for given `x` returns `y`
+ /// such that `y * 2 = x mod p`. This means:
+ /// - if `x` is even, returns `x / 2`,
+ /// - if `x` is odd, returns `(x + p) / 2`
+ /// (since the modulus `p` in Montgomery form is always odd, this divides entirely).
+ pub fn div_by_2(&self) -> Self {
+ Self {
+ montgomery_form: div_by_2(&self.montgomery_form, &self.residue_params.modulus),
+ residue_params: self.residue_params,
+ }
+ }
+}
+
+impl<const LIMBS: usize> Retrieve for DynResidue<LIMBS> {
+ type Output = Uint<LIMBS>;
+ fn retrieve(&self) -> Self::Output {
+ self.retrieve()
+ }
+}
+
+impl<const LIMBS: usize, P: ResidueParams<LIMBS>> From<&Residue<P, LIMBS>> for DynResidue<LIMBS> {
+ fn from(residue: &Residue<P, LIMBS>) -> Self {
+ Self {
+ montgomery_form: residue.to_montgomery(),
+ residue_params: DynResidueParams::from_residue_params::<P>(),
+ }
+ }
+}
+
+impl<const LIMBS: usize> ConditionallySelectable for DynResidue<LIMBS> {
+ fn conditional_select(a: &Self, b: &Self, choice: Choice) -> Self {
+ Self {
+ montgomery_form: Uint::conditional_select(
+ &a.montgomery_form,
+ &b.montgomery_form,
+ choice,
+ ),
+ residue_params: DynResidueParams::conditional_select(
+ &a.residue_params,
+ &b.residue_params,
+ choice,
+ ),
+ }
+ }
+}
+
+impl<const LIMBS: usize> ConstantTimeEq for DynResidue<LIMBS> {
+ fn ct_eq(&self, other: &Self) -> Choice {
+ self.montgomery_form.ct_eq(&other.montgomery_form)
+ & self.residue_params.ct_eq(&other.residue_params)
+ }
+}
+
+/// NOTE: this does _not_ zeroize the parameters, in order to maintain some form of type consistency
+#[cfg(feature = "zeroize")]
+impl<const LIMBS: usize> zeroize::Zeroize for DynResidue<LIMBS> {
+ fn zeroize(&mut self) {
+ self.montgomery_form.zeroize()
+ }
+}
+
+#[cfg(test)]
+mod test {
+ use super::*;
+ use crate::nlimbs;
+
+ const LIMBS: usize = nlimbs!(64);
+
+ #[test]
+ #[allow(deprecated)]
+ // Test that a valid modulus yields `DynResidueParams`
+ fn test_valid_modulus() {
+ let valid_modulus = Uint::<LIMBS>::from(3u8);
+
+ DynResidueParams::<LIMBS>::new_checked(&valid_modulus).unwrap();
+ DynResidueParams::<LIMBS>::new(&valid_modulus);
+ }
+
+ #[test]
+ #[allow(deprecated)]
+ // Test that an invalid checked modulus does not yield `DynResidueParams`
+ fn test_invalid_checked_modulus() {
+ assert!(bool::from(
+ DynResidueParams::<LIMBS>::new_checked(&Uint::from(2u8)).is_none()
+ ))
+ }
+
+ #[test]
+ #[should_panic]
+ // Tets that an invalid modulus panics
+ fn test_invalid_modulus() {
+ DynResidueParams::<LIMBS>::new(&Uint::from(2u8));
+ }
+}
diff --git a/vendor/crypto-bigint/src/uint/modular/runtime_mod/runtime_add.rs b/vendor/crypto-bigint/src/uint/modular/runtime_mod/runtime_add.rs
new file mode 100644
index 0000000..eb47086
--- /dev/null
+++ b/vendor/crypto-bigint/src/uint/modular/runtime_mod/runtime_add.rs
@@ -0,0 +1,92 @@
+use core::ops::{Add, AddAssign};
+
+use crate::modular::add::add_montgomery_form;
+
+use super::DynResidue;
+
+impl<const LIMBS: usize> DynResidue<LIMBS> {
+ /// Adds `rhs`.
+ pub const fn add(&self, rhs: &Self) -> Self {
+ Self {
+ montgomery_form: add_montgomery_form(
+ &self.montgomery_form,
+ &rhs.montgomery_form,
+ &self.residue_params.modulus,
+ ),
+ residue_params: self.residue_params,
+ }
+ }
+}
+
+impl<const LIMBS: usize> Add<&DynResidue<LIMBS>> for &DynResidue<LIMBS> {
+ type Output = DynResidue<LIMBS>;
+ fn add(self, rhs: &DynResidue<LIMBS>) -> DynResidue<LIMBS> {
+ debug_assert_eq!(self.residue_params, rhs.residue_params);
+ self.add(rhs)
+ }
+}
+
+impl<const LIMBS: usize> Add<DynResidue<LIMBS>> for &DynResidue<LIMBS> {
+ type Output = DynResidue<LIMBS>;
+ #[allow(clippy::op_ref)]
+ fn add(self, rhs: DynResidue<LIMBS>) -> DynResidue<LIMBS> {
+ self + &rhs
+ }
+}
+
+impl<const LIMBS: usize> Add<&DynResidue<LIMBS>> for DynResidue<LIMBS> {
+ type Output = DynResidue<LIMBS>;
+ #[allow(clippy::op_ref)]
+ fn add(self, rhs: &DynResidue<LIMBS>) -> DynResidue<LIMBS> {
+ &self + rhs
+ }
+}
+
+impl<const LIMBS: usize> Add<DynResidue<LIMBS>> for DynResidue<LIMBS> {
+ type Output = DynResidue<LIMBS>;
+ fn add(self, rhs: DynResidue<LIMBS>) -> DynResidue<LIMBS> {
+ &self + &rhs
+ }
+}
+
+impl<const LIMBS: usize> AddAssign<&DynResidue<LIMBS>> for DynResidue<LIMBS> {
+ fn add_assign(&mut self, rhs: &DynResidue<LIMBS>) {
+ *self = *self + rhs;
+ }
+}
+
+impl<const LIMBS: usize> AddAssign<DynResidue<LIMBS>> for DynResidue<LIMBS> {
+ fn add_assign(&mut self, rhs: DynResidue<LIMBS>) {
+ *self += &rhs;
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use crate::{
+ modular::runtime_mod::{DynResidue, DynResidueParams},
+ U256,
+ };
+
+ #[test]
+ fn add_overflow() {
+ let params = DynResidueParams::new(&U256::from_be_hex(
+ "ffffffff00000000ffffffffffffffffbce6faada7179e84f3b9cac2fc632551",
+ ));
+
+ let x =
+ U256::from_be_hex("44acf6b7e36c1342c2c5897204fe09504e1e2efb1a900377dbc4e7a6a133ec56");
+ let mut x_mod = DynResidue::new(&x, params);
+
+ let y =
+ U256::from_be_hex("d5777c45019673125ad240f83094d4252d829516fac8601ed01979ec1ec1a251");
+ let y_mod = DynResidue::new(&y, params);
+
+ x_mod += &y_mod;
+
+ let expected =
+ U256::from_be_hex("1a2472fde50286541d97ca6a3592dd75beb9c9646e40c511b82496cfc3926956");
+
+ assert_eq!(expected, x_mod.retrieve());
+ }
+}
diff --git a/vendor/crypto-bigint/src/uint/modular/runtime_mod/runtime_inv.rs b/vendor/crypto-bigint/src/uint/modular/runtime_mod/runtime_inv.rs
new file mode 100644
index 0000000..5e639d4
--- /dev/null
+++ b/vendor/crypto-bigint/src/uint/modular/runtime_mod/runtime_inv.rs
@@ -0,0 +1,35 @@
+use subtle::CtOption;
+
+use crate::{modular::inv::inv_montgomery_form, traits::Invert, CtChoice};
+
+use super::DynResidue;
+
+impl<const LIMBS: usize> DynResidue<LIMBS> {
+ /// Computes the residue `self^-1` representing the multiplicative inverse of `self`.
+ /// I.e. `self * self^-1 = 1`.
+ /// If the number was invertible, the second element of the tuple is the truthy value,
+ /// otherwise it is the falsy value (in which case the first element's value is unspecified).
+ pub const fn invert(&self) -> (Self, CtChoice) {
+ let (montgomery_form, is_some) = inv_montgomery_form(
+ &self.montgomery_form,
+ &self.residue_params.modulus,
+ &self.residue_params.r3,
+ self.residue_params.mod_neg_inv,
+ );
+
+ let value = Self {
+ montgomery_form,
+ residue_params: self.residue_params,
+ };
+
+ (value, is_some)
+ }
+}
+
+impl<const LIMBS: usize> Invert for DynResidue<LIMBS> {
+ type Output = CtOption<Self>;
+ fn invert(&self) -> Self::Output {
+ let (value, is_some) = self.invert();
+ CtOption::new(value, is_some.into())
+ }
+}
diff --git a/vendor/crypto-bigint/src/uint/modular/runtime_mod/runtime_mul.rs b/vendor/crypto-bigint/src/uint/modular/runtime_mod/runtime_mul.rs
new file mode 100644
index 0000000..30c4b9c
--- /dev/null
+++ b/vendor/crypto-bigint/src/uint/modular/runtime_mod/runtime_mul.rs
@@ -0,0 +1,84 @@
+use core::ops::{Mul, MulAssign};
+
+use crate::{
+ modular::mul::{mul_montgomery_form, square_montgomery_form},
+ traits::Square,
+};
+
+use super::DynResidue;
+
+impl<const LIMBS: usize> DynResidue<LIMBS> {
+ /// Multiplies by `rhs`.
+ pub const fn mul(&self, rhs: &Self) -> Self {
+ Self {
+ montgomery_form: mul_montgomery_form(
+ &self.montgomery_form,
+ &rhs.montgomery_form,
+ &self.residue_params.modulus,
+ self.residue_params.mod_neg_inv,
+ ),
+ residue_params: self.residue_params,
+ }
+ }
+
+ /// Computes the (reduced) square of a residue.
+ pub const fn square(&self) -> Self {
+ Self {
+ montgomery_form: square_montgomery_form(
+ &self.montgomery_form,
+ &self.residue_params.modulus,
+ self.residue_params.mod_neg_inv,
+ ),
+ residue_params: self.residue_params,
+ }
+ }
+}
+
+impl<const LIMBS: usize> Mul<&DynResidue<LIMBS>> for &DynResidue<LIMBS> {
+ type Output = DynResidue<LIMBS>;
+ fn mul(self, rhs: &DynResidue<LIMBS>) -> DynResidue<LIMBS> {
+ debug_assert_eq!(self.residue_params, rhs.residue_params);
+ self.mul(rhs)
+ }
+}
+
+impl<const LIMBS: usize> Mul<DynResidue<LIMBS>> for &DynResidue<LIMBS> {
+ type Output = DynResidue<LIMBS>;
+ #[allow(clippy::op_ref)]
+ fn mul(self, rhs: DynResidue<LIMBS>) -> DynResidue<LIMBS> {
+ self * &rhs
+ }
+}
+
+impl<const LIMBS: usize> Mul<&DynResidue<LIMBS>> for DynResidue<LIMBS> {
+ type Output = DynResidue<LIMBS>;
+ #[allow(clippy::op_ref)]
+ fn mul(self, rhs: &DynResidue<LIMBS>) -> DynResidue<LIMBS> {
+ &self * rhs
+ }
+}
+
+impl<const LIMBS: usize> Mul<DynResidue<LIMBS>> for DynResidue<LIMBS> {
+ type Output = DynResidue<LIMBS>;
+ fn mul(self, rhs: DynResidue<LIMBS>) -> DynResidue<LIMBS> {
+ &self * &rhs
+ }
+}
+
+impl<const LIMBS: usize> MulAssign<&DynResidue<LIMBS>> for DynResidue<LIMBS> {
+ fn mul_assign(&mut self, rhs: &DynResidue<LIMBS>) {
+ *self = *self * rhs;
+ }
+}
+
+impl<const LIMBS: usize> MulAssign<DynResidue<LIMBS>> for DynResidue<LIMBS> {
+ fn mul_assign(&mut self, rhs: DynResidue<LIMBS>) {
+ *self *= &rhs;
+ }
+}
+
+impl<const LIMBS: usize> Square for DynResidue<LIMBS> {
+ fn square(&self) -> Self {
+ DynResidue::square(self)
+ }
+}
diff --git a/vendor/crypto-bigint/src/uint/modular/runtime_mod/runtime_neg.rs b/vendor/crypto-bigint/src/uint/modular/runtime_mod/runtime_neg.rs
new file mode 100644
index 0000000..fca1ff8
--- /dev/null
+++ b/vendor/crypto-bigint/src/uint/modular/runtime_mod/runtime_neg.rs
@@ -0,0 +1,24 @@
+use core::ops::Neg;
+
+use super::DynResidue;
+
+impl<const LIMBS: usize> DynResidue<LIMBS> {
+ /// Negates the number.
+ pub const fn neg(&self) -> Self {
+ Self::zero(self.residue_params).sub(self)
+ }
+}
+
+impl<const LIMBS: usize> Neg for DynResidue<LIMBS> {
+ type Output = Self;
+ fn neg(self) -> Self {
+ DynResidue::neg(&self)
+ }
+}
+
+impl<const LIMBS: usize> Neg for &DynResidue<LIMBS> {
+ type Output = DynResidue<LIMBS>;
+ fn neg(self) -> DynResidue<LIMBS> {
+ DynResidue::neg(self)
+ }
+}
diff --git a/vendor/crypto-bigint/src/uint/modular/runtime_mod/runtime_pow.rs b/vendor/crypto-bigint/src/uint/modular/runtime_mod/runtime_pow.rs
new file mode 100644
index 0000000..889e41b
--- /dev/null
+++ b/vendor/crypto-bigint/src/uint/modular/runtime_mod/runtime_pow.rs
@@ -0,0 +1,42 @@
+use crate::{modular::pow::pow_montgomery_form, PowBoundedExp, Uint};
+
+use super::DynResidue;
+
+impl<const LIMBS: usize> DynResidue<LIMBS> {
+ /// Raises to the `exponent` power.
+ pub const fn pow<const RHS_LIMBS: usize>(
+ &self,
+ exponent: &Uint<RHS_LIMBS>,
+ ) -> DynResidue<LIMBS> {
+ self.pow_bounded_exp(exponent, Uint::<RHS_LIMBS>::BITS)
+ }
+
+ /// Raises to the `exponent` power,
+ /// with `exponent_bits` representing the number of (least significant) bits
+ /// to take into account for the exponent.
+ ///
+ /// NOTE: `exponent_bits` may be leaked in the time pattern.
+ pub const fn pow_bounded_exp<const RHS_LIMBS: usize>(
+ &self,
+ exponent: &Uint<RHS_LIMBS>,
+ exponent_bits: usize,
+ ) -> Self {
+ Self {
+ montgomery_form: pow_montgomery_form(
+ &self.montgomery_form,
+ exponent,
+ exponent_bits,
+ &self.residue_params.modulus,
+ &self.residue_params.r,
+ self.residue_params.mod_neg_inv,
+ ),
+ residue_params: self.residue_params,
+ }
+ }
+}
+
+impl<const LIMBS: usize> PowBoundedExp<Uint<LIMBS>> for DynResidue<LIMBS> {
+ fn pow_bounded_exp(&self, exponent: &Uint<LIMBS>, exponent_bits: usize) -> Self {
+ self.pow_bounded_exp(exponent, exponent_bits)
+ }
+}
diff --git a/vendor/crypto-bigint/src/uint/modular/runtime_mod/runtime_sub.rs b/vendor/crypto-bigint/src/uint/modular/runtime_mod/runtime_sub.rs
new file mode 100644
index 0000000..dd6fd84
--- /dev/null
+++ b/vendor/crypto-bigint/src/uint/modular/runtime_mod/runtime_sub.rs
@@ -0,0 +1,92 @@
+use core::ops::{Sub, SubAssign};
+
+use crate::modular::sub::sub_montgomery_form;
+
+use super::DynResidue;
+
+impl<const LIMBS: usize> DynResidue<LIMBS> {
+ /// Subtracts `rhs`.
+ pub const fn sub(&self, rhs: &Self) -> Self {
+ Self {
+ montgomery_form: sub_montgomery_form(
+ &self.montgomery_form,
+ &rhs.montgomery_form,
+ &self.residue_params.modulus,
+ ),
+ residue_params: self.residue_params,
+ }
+ }
+}
+
+impl<const LIMBS: usize> Sub<&DynResidue<LIMBS>> for &DynResidue<LIMBS> {
+ type Output = DynResidue<LIMBS>;
+ fn sub(self, rhs: &DynResidue<LIMBS>) -> DynResidue<LIMBS> {
+ debug_assert_eq!(self.residue_params, rhs.residue_params);
+ self.sub(rhs)
+ }
+}
+
+impl<const LIMBS: usize> Sub<DynResidue<LIMBS>> for &DynResidue<LIMBS> {
+ type Output = DynResidue<LIMBS>;
+ #[allow(clippy::op_ref)]
+ fn sub(self, rhs: DynResidue<LIMBS>) -> DynResidue<LIMBS> {
+ self - &rhs
+ }
+}
+
+impl<const LIMBS: usize> Sub<&DynResidue<LIMBS>> for DynResidue<LIMBS> {
+ type Output = DynResidue<LIMBS>;
+ #[allow(clippy::op_ref)]
+ fn sub(self, rhs: &DynResidue<LIMBS>) -> DynResidue<LIMBS> {
+ &self - rhs
+ }
+}
+
+impl<const LIMBS: usize> Sub<DynResidue<LIMBS>> for DynResidue<LIMBS> {
+ type Output = DynResidue<LIMBS>;
+ fn sub(self, rhs: DynResidue<LIMBS>) -> DynResidue<LIMBS> {
+ &self - &rhs
+ }
+}
+
+impl<const LIMBS: usize> SubAssign<&DynResidue<LIMBS>> for DynResidue<LIMBS> {
+ fn sub_assign(&mut self, rhs: &DynResidue<LIMBS>) {
+ *self = *self - rhs;
+ }
+}
+
+impl<const LIMBS: usize> SubAssign<DynResidue<LIMBS>> for DynResidue<LIMBS> {
+ fn sub_assign(&mut self, rhs: DynResidue<LIMBS>) {
+ *self -= &rhs;
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use crate::{
+ modular::runtime_mod::{DynResidue, DynResidueParams},
+ U256,
+ };
+
+ #[test]
+ fn sub_overflow() {
+ let params = DynResidueParams::new(&U256::from_be_hex(
+ "ffffffff00000000ffffffffffffffffbce6faada7179e84f3b9cac2fc632551",
+ ));
+
+ let x =
+ U256::from_be_hex("44acf6b7e36c1342c2c5897204fe09504e1e2efb1a900377dbc4e7a6a133ec56");
+ let mut x_mod = DynResidue::new(&x, params);
+
+ let y =
+ U256::from_be_hex("d5777c45019673125ad240f83094d4252d829516fac8601ed01979ec1ec1a251");
+ let y_mod = DynResidue::new(&y, params);
+
+ x_mod -= &y_mod;
+
+ let expected =
+ U256::from_be_hex("6f357a71e1d5a03167f34879d469352add829491c6df41ddff65387d7ed56f56");
+
+ assert_eq!(expected, x_mod.retrieve());
+ }
+}
diff --git a/vendor/crypto-bigint/src/uint/modular/sub.rs b/vendor/crypto-bigint/src/uint/modular/sub.rs
new file mode 100644
index 0000000..9c47170
--- /dev/null
+++ b/vendor/crypto-bigint/src/uint/modular/sub.rs
@@ -0,0 +1,9 @@
+use crate::Uint;
+
+pub(crate) const fn sub_montgomery_form<const LIMBS: usize>(
+ a: &Uint<LIMBS>,
+ b: &Uint<LIMBS>,
+ modulus: &Uint<LIMBS>,
+) -> Uint<LIMBS> {
+ a.sub_mod(b, modulus)
+}
diff --git a/vendor/crypto-bigint/src/uint/mul.rs b/vendor/crypto-bigint/src/uint/mul.rs
new file mode 100644
index 0000000..cb29332
--- /dev/null
+++ b/vendor/crypto-bigint/src/uint/mul.rs
@@ -0,0 +1,414 @@
+//! [`Uint`] addition operations.
+
+use crate::{Checked, CheckedMul, Concat, ConcatMixed, Limb, Uint, WideWord, Word, Wrapping, Zero};
+use core::ops::{Mul, MulAssign};
+use subtle::CtOption;
+
+impl<const LIMBS: usize> Uint<LIMBS> {
+ /// Multiply `self` by `rhs`, returning a concatenated "wide" result.
+ pub fn mul<const HLIMBS: usize>(
+ &self,
+ rhs: &Uint<HLIMBS>,
+ ) -> <Uint<HLIMBS> as ConcatMixed<Self>>::MixedOutput
+ where
+ Uint<HLIMBS>: ConcatMixed<Self>,
+ {
+ let (lo, hi) = self.mul_wide(rhs);
+ hi.concat_mixed(&lo)
+ }
+
+ /// Compute "wide" multiplication, with a product twice the size of the input.
+ ///
+ /// Returns a tuple containing the `(lo, hi)` components of the product.
+ ///
+ /// # Ordering note
+ ///
+ /// Releases of `crypto-bigint` prior to v0.3 used `(hi, lo)` ordering
+ /// instead. This has been changed for better consistency with the rest of
+ /// the APIs in this crate.
+ ///
+ /// For more info see: <https://github.com/RustCrypto/crypto-bigint/issues/4>
+ pub const fn mul_wide<const HLIMBS: usize>(&self, rhs: &Uint<HLIMBS>) -> (Self, Uint<HLIMBS>) {
+ let mut i = 0;
+ let mut lo = Self::ZERO;
+ let mut hi = Uint::<HLIMBS>::ZERO;
+
+ // Schoolbook multiplication.
+ // TODO(tarcieri): use Karatsuba for better performance?
+ while i < LIMBS {
+ let mut j = 0;
+ let mut carry = Limb::ZERO;
+
+ while j < HLIMBS {
+ let k = i + j;
+
+ if k >= LIMBS {
+ let (n, c) = hi.limbs[k - LIMBS].mac(self.limbs[i], rhs.limbs[j], carry);
+ hi.limbs[k - LIMBS] = n;
+ carry = c;
+ } else {
+ let (n, c) = lo.limbs[k].mac(self.limbs[i], rhs.limbs[j], carry);
+ lo.limbs[k] = n;
+ carry = c;
+ }
+
+ j += 1;
+ }
+
+ if i + j >= LIMBS {
+ hi.limbs[i + j - LIMBS] = carry;
+ } else {
+ lo.limbs[i + j] = carry;
+ }
+ i += 1;
+ }
+
+ (lo, hi)
+ }
+
+ /// Perform saturating multiplication, returning `MAX` on overflow.
+ pub const fn saturating_mul<const HLIMBS: usize>(&self, rhs: &Uint<HLIMBS>) -> Self {
+ let (res, overflow) = self.mul_wide(rhs);
+ Self::ct_select(&res, &Self::MAX, overflow.ct_is_nonzero())
+ }
+
+ /// Perform wrapping multiplication, discarding overflow.
+ pub const fn wrapping_mul<const H: usize>(&self, rhs: &Uint<H>) -> Self {
+ self.mul_wide(rhs).0
+ }
+
+ /// Square self, returning a concatenated "wide" result.
+ pub fn square(&self) -> <Self as Concat>::Output
+ where
+ Self: Concat,
+ {
+ let (lo, hi) = self.square_wide();
+ hi.concat(&lo)
+ }
+
+ /// Square self, returning a "wide" result in two parts as (lo, hi).
+ pub const fn square_wide(&self) -> (Self, Self) {
+ // Translated from https://github.com/ucbrise/jedi-pairing/blob/c4bf151/include/core/bigint.hpp#L410
+ //
+ // Permission to relicense the resulting translation as Apache 2.0 + MIT was given
+ // by the original author Sam Kumar: https://github.com/RustCrypto/crypto-bigint/pull/133#discussion_r1056870411
+ let mut lo = Self::ZERO;
+ let mut hi = Self::ZERO;
+
+ // Schoolbook multiplication, but only considering half of the multiplication grid
+ let mut i = 1;
+ while i < LIMBS {
+ let mut j = 0;
+ let mut carry = Limb::ZERO;
+
+ while j < i {
+ let k = i + j;
+
+ if k >= LIMBS {
+ let (n, c) = hi.limbs[k - LIMBS].mac(self.limbs[i], self.limbs[j], carry);
+ hi.limbs[k - LIMBS] = n;
+ carry = c;
+ } else {
+ let (n, c) = lo.limbs[k].mac(self.limbs[i], self.limbs[j], carry);
+ lo.limbs[k] = n;
+ carry = c;
+ }
+
+ j += 1;
+ }
+
+ if (2 * i) < LIMBS {
+ lo.limbs[2 * i] = carry;
+ } else {
+ hi.limbs[2 * i - LIMBS] = carry;
+ }
+
+ i += 1;
+ }
+
+ // Double the current result, this accounts for the other half of the multiplication grid.
+ // TODO: The top word is empty so we can also use a special purpose shl.
+ (lo, hi) = Self::shl_vartime_wide((lo, hi), 1);
+
+ // Handle the diagonal of the multiplication grid, which finishes the multiplication grid.
+ let mut carry = Limb::ZERO;
+ let mut i = 0;
+ while i < LIMBS {
+ if (i * 2) < LIMBS {
+ let (n, c) = lo.limbs[i * 2].mac(self.limbs[i], self.limbs[i], carry);
+ lo.limbs[i * 2] = n;
+ carry = c;
+ } else {
+ let (n, c) = hi.limbs[i * 2 - LIMBS].mac(self.limbs[i], self.limbs[i], carry);
+ hi.limbs[i * 2 - LIMBS] = n;
+ carry = c;
+ }
+
+ if (i * 2 + 1) < LIMBS {
+ let n = lo.limbs[i * 2 + 1].0 as WideWord + carry.0 as WideWord;
+ lo.limbs[i * 2 + 1] = Limb(n as Word);
+ carry = Limb((n >> Word::BITS) as Word);
+ } else {
+ let n = hi.limbs[i * 2 + 1 - LIMBS].0 as WideWord + carry.0 as WideWord;
+ hi.limbs[i * 2 + 1 - LIMBS] = Limb(n as Word);
+ carry = Limb((n >> Word::BITS) as Word);
+ }
+
+ i += 1;
+ }
+
+ (lo, hi)
+ }
+}
+
+impl<const LIMBS: usize, const HLIMBS: usize> CheckedMul<&Uint<HLIMBS>> for Uint<LIMBS> {
+ type Output = Self;
+
+ fn checked_mul(&self, rhs: &Uint<HLIMBS>) -> CtOption<Self> {
+ let (lo, hi) = self.mul_wide(rhs);
+ CtOption::new(lo, hi.is_zero())
+ }
+}
+
+impl<const LIMBS: usize, const HLIMBS: usize> Mul<Wrapping<Uint<HLIMBS>>>
+ for Wrapping<Uint<LIMBS>>
+{
+ type Output = Self;
+
+ fn mul(self, rhs: Wrapping<Uint<HLIMBS>>) -> Wrapping<Uint<LIMBS>> {
+ Wrapping(self.0.wrapping_mul(&rhs.0))
+ }
+}
+
+impl<const LIMBS: usize, const HLIMBS: usize> Mul<&Wrapping<Uint<HLIMBS>>>
+ for Wrapping<Uint<LIMBS>>
+{
+ type Output = Self;
+
+ fn mul(self, rhs: &Wrapping<Uint<HLIMBS>>) -> Wrapping<Uint<LIMBS>> {
+ Wrapping(self.0.wrapping_mul(&rhs.0))
+ }
+}
+
+impl<const LIMBS: usize, const HLIMBS: usize> Mul<Wrapping<Uint<HLIMBS>>>
+ for &Wrapping<Uint<LIMBS>>
+{
+ type Output = Wrapping<Uint<LIMBS>>;
+
+ fn mul(self, rhs: Wrapping<Uint<HLIMBS>>) -> Wrapping<Uint<LIMBS>> {
+ Wrapping(self.0.wrapping_mul(&rhs.0))
+ }
+}
+
+impl<const LIMBS: usize, const HLIMBS: usize> Mul<&Wrapping<Uint<HLIMBS>>>
+ for &Wrapping<Uint<LIMBS>>
+{
+ type Output = Wrapping<Uint<LIMBS>>;
+
+ fn mul(self, rhs: &Wrapping<Uint<HLIMBS>>) -> Wrapping<Uint<LIMBS>> {
+ Wrapping(self.0.wrapping_mul(&rhs.0))
+ }
+}
+
+impl<const LIMBS: usize, const HLIMBS: usize> MulAssign<Wrapping<Uint<HLIMBS>>>
+ for Wrapping<Uint<LIMBS>>
+{
+ fn mul_assign(&mut self, other: Wrapping<Uint<HLIMBS>>) {
+ *self = *self * other;
+ }
+}
+
+impl<const LIMBS: usize, const HLIMBS: usize> MulAssign<&Wrapping<Uint<HLIMBS>>>
+ for Wrapping<Uint<LIMBS>>
+{
+ fn mul_assign(&mut self, other: &Wrapping<Uint<HLIMBS>>) {
+ *self = *self * other;
+ }
+}
+
+impl<const LIMBS: usize, const HLIMBS: usize> Mul<Checked<Uint<HLIMBS>>> for Checked<Uint<LIMBS>> {
+ type Output = Self;
+
+ fn mul(self, rhs: Checked<Uint<HLIMBS>>) -> Checked<Uint<LIMBS>> {
+ Checked(self.0.and_then(|a| rhs.0.and_then(|b| a.checked_mul(&b))))
+ }
+}
+
+impl<const LIMBS: usize, const HLIMBS: usize> Mul<&Checked<Uint<HLIMBS>>> for Checked<Uint<LIMBS>> {
+ type Output = Checked<Uint<LIMBS>>;
+
+ fn mul(self, rhs: &Checked<Uint<HLIMBS>>) -> Checked<Uint<LIMBS>> {
+ Checked(self.0.and_then(|a| rhs.0.and_then(|b| a.checked_mul(&b))))
+ }
+}
+
+impl<const LIMBS: usize, const HLIMBS: usize> Mul<Checked<Uint<HLIMBS>>> for &Checked<Uint<LIMBS>> {
+ type Output = Checked<Uint<LIMBS>>;
+
+ fn mul(self, rhs: Checked<Uint<HLIMBS>>) -> Checked<Uint<LIMBS>> {
+ Checked(self.0.and_then(|a| rhs.0.and_then(|b| a.checked_mul(&b))))
+ }
+}
+
+impl<const LIMBS: usize, const HLIMBS: usize> Mul<&Checked<Uint<HLIMBS>>>
+ for &Checked<Uint<LIMBS>>
+{
+ type Output = Checked<Uint<LIMBS>>;
+
+ fn mul(self, rhs: &Checked<Uint<HLIMBS>>) -> Checked<Uint<LIMBS>> {
+ Checked(self.0.and_then(|a| rhs.0.and_then(|b| a.checked_mul(&b))))
+ }
+}
+
+impl<const LIMBS: usize, const HLIMBS: usize> MulAssign<Checked<Uint<HLIMBS>>>
+ for Checked<Uint<LIMBS>>
+{
+ fn mul_assign(&mut self, other: Checked<Uint<HLIMBS>>) {
+ *self = *self * other;
+ }
+}
+
+impl<const LIMBS: usize, const HLIMBS: usize> MulAssign<&Checked<Uint<HLIMBS>>>
+ for Checked<Uint<LIMBS>>
+{
+ fn mul_assign(&mut self, other: &Checked<Uint<HLIMBS>>) {
+ *self = *self * other;
+ }
+}
+
+impl<const LIMBS: usize, const HLIMBS: usize> Mul<Uint<HLIMBS>> for Uint<LIMBS>
+where
+ Uint<HLIMBS>: ConcatMixed<Uint<LIMBS>>,
+{
+ type Output = <Uint<HLIMBS> as ConcatMixed<Self>>::MixedOutput;
+
+ fn mul(self, other: Uint<HLIMBS>) -> Self::Output {
+ Uint::mul(&self, &other)
+ }
+}
+
+impl<const LIMBS: usize, const HLIMBS: usize> Mul<&Uint<HLIMBS>> for Uint<LIMBS>
+where
+ Uint<HLIMBS>: ConcatMixed<Uint<LIMBS>>,
+{
+ type Output = <Uint<HLIMBS> as ConcatMixed<Self>>::MixedOutput;
+
+ fn mul(self, other: &Uint<HLIMBS>) -> Self::Output {
+ Uint::mul(&self, other)
+ }
+}
+
+impl<const LIMBS: usize, const HLIMBS: usize> Mul<Uint<HLIMBS>> for &Uint<LIMBS>
+where
+ Uint<HLIMBS>: ConcatMixed<Uint<LIMBS>>,
+{
+ type Output = <Uint<HLIMBS> as ConcatMixed<Uint<LIMBS>>>::MixedOutput;
+
+ fn mul(self, other: Uint<HLIMBS>) -> Self::Output {
+ Uint::mul(self, &other)
+ }
+}
+
+impl<const LIMBS: usize, const HLIMBS: usize> Mul<&Uint<HLIMBS>> for &Uint<LIMBS>
+where
+ Uint<HLIMBS>: ConcatMixed<Uint<LIMBS>>,
+{
+ type Output = <Uint<HLIMBS> as ConcatMixed<Uint<LIMBS>>>::MixedOutput;
+
+ fn mul(self, other: &Uint<HLIMBS>) -> Self::Output {
+ Uint::mul(self, other)
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use crate::{CheckedMul, Zero, U128, U192, U256, U64};
+
+ #[test]
+ fn mul_wide_zero_and_one() {
+ assert_eq!(U64::ZERO.mul_wide(&U64::ZERO), (U64::ZERO, U64::ZERO));
+ assert_eq!(U64::ZERO.mul_wide(&U64::ONE), (U64::ZERO, U64::ZERO));
+ assert_eq!(U64::ONE.mul_wide(&U64::ZERO), (U64::ZERO, U64::ZERO));
+ assert_eq!(U64::ONE.mul_wide(&U64::ONE), (U64::ONE, U64::ZERO));
+ }
+
+ #[test]
+ fn mul_wide_lo_only() {
+ let primes: &[u32] = &[3, 5, 17, 257, 65537];
+
+ for &a_int in primes {
+ for &b_int in primes {
+ let (lo, hi) = U64::from_u32(a_int).mul_wide(&U64::from_u32(b_int));
+ let expected = U64::from_u64(a_int as u64 * b_int as u64);
+ assert_eq!(lo, expected);
+ assert!(bool::from(hi.is_zero()));
+ }
+ }
+ }
+
+ #[test]
+ fn mul_concat_even() {
+ assert_eq!(U64::ZERO * U64::MAX, U128::ZERO);
+ assert_eq!(U64::MAX * U64::ZERO, U128::ZERO);
+ assert_eq!(
+ U64::MAX * U64::MAX,
+ U128::from_u128(0xfffffffffffffffe_0000000000000001)
+ );
+ assert_eq!(
+ U64::ONE * U64::MAX,
+ U128::from_u128(0x0000000000000000_ffffffffffffffff)
+ );
+ }
+
+ #[test]
+ fn mul_concat_mixed() {
+ let a = U64::from_u64(0x0011223344556677);
+ let b = U128::from_u128(0x8899aabbccddeeff_8899aabbccddeeff);
+ assert_eq!(a * b, U192::from(&a).saturating_mul(&b));
+ assert_eq!(b * a, U192::from(&b).saturating_mul(&a));
+ }
+
+ #[test]
+ fn checked_mul_ok() {
+ let n = U64::from_u32(0xffff_ffff);
+ assert_eq!(
+ n.checked_mul(&n).unwrap(),
+ U64::from_u64(0xffff_fffe_0000_0001)
+ );
+ }
+
+ #[test]
+ fn checked_mul_overflow() {
+ let n = U64::from_u64(0xffff_ffff_ffff_ffff);
+ assert!(bool::from(n.checked_mul(&n).is_none()));
+ }
+
+ #[test]
+ fn saturating_mul_no_overflow() {
+ let n = U64::from_u8(8);
+ assert_eq!(n.saturating_mul(&n), U64::from_u8(64));
+ }
+
+ #[test]
+ fn saturating_mul_overflow() {
+ let a = U64::from(0xffff_ffff_ffff_ffffu64);
+ let b = U64::from(2u8);
+ assert_eq!(a.saturating_mul(&b), U64::MAX);
+ }
+
+ #[test]
+ fn square() {
+ let n = U64::from_u64(0xffff_ffff_ffff_ffff);
+ let (hi, lo) = n.square().split();
+ assert_eq!(lo, U64::from_u64(1));
+ assert_eq!(hi, U64::from_u64(0xffff_ffff_ffff_fffe));
+ }
+
+ #[test]
+ fn square_larger() {
+ let n = U256::MAX;
+ let (hi, lo) = n.square().split();
+ assert_eq!(lo, U256::ONE);
+ assert_eq!(hi, U256::MAX.wrapping_sub(&U256::ONE));
+ }
+}
diff --git a/vendor/crypto-bigint/src/uint/mul_mod.rs b/vendor/crypto-bigint/src/uint/mul_mod.rs
new file mode 100644
index 0000000..0916ede
--- /dev/null
+++ b/vendor/crypto-bigint/src/uint/mul_mod.rs
@@ -0,0 +1,133 @@
+//! [`Uint`] multiplication modulus operations.
+
+use crate::{Limb, Uint, WideWord, Word};
+
+impl<const LIMBS: usize> Uint<LIMBS> {
+ /// Computes `self * rhs mod p` in constant time for the special modulus
+ /// `p = MAX+1-c` where `c` is small enough to fit in a single [`Limb`].
+ /// For the modulus reduction, this function implements Algorithm 14.47 from
+ /// the "Handbook of Applied Cryptography", by A. Menezes, P. van Oorschot,
+ /// and S. Vanstone, CRC Press, 1996.
+ pub const fn mul_mod_special(&self, rhs: &Self, c: Limb) -> Self {
+ // We implicitly assume `LIMBS > 0`, because `Uint<0>` doesn't compile.
+ // Still the case `LIMBS == 1` needs special handling.
+ if LIMBS == 1 {
+ let prod = self.limbs[0].0 as WideWord * rhs.limbs[0].0 as WideWord;
+ let reduced = prod % Word::MIN.wrapping_sub(c.0) as WideWord;
+ return Self::from_word(reduced as Word);
+ }
+
+ let (lo, hi) = self.mul_wide(rhs);
+
+ // Now use Algorithm 14.47 for the reduction
+ let (lo, carry) = mac_by_limb(&lo, &hi, c, Limb::ZERO);
+
+ let (lo, carry) = {
+ let rhs = (carry.0 + 1) as WideWord * c.0 as WideWord;
+ lo.adc(&Self::from_wide_word(rhs), Limb::ZERO)
+ };
+
+ let (lo, _) = {
+ let rhs = carry.0.wrapping_sub(1) & c.0;
+ lo.sbb(&Self::from_word(rhs), Limb::ZERO)
+ };
+
+ lo
+ }
+}
+
+/// Computes `a + (b * c) + carry`, returning the result along with the new carry.
+const fn mac_by_limb<const LIMBS: usize>(
+ a: &Uint<LIMBS>,
+ b: &Uint<LIMBS>,
+ c: Limb,
+ carry: Limb,
+) -> (Uint<LIMBS>, Limb) {
+ let mut i = 0;
+ let mut a = *a;
+ let mut carry = carry;
+
+ while i < LIMBS {
+ let (n, c) = a.limbs[i].mac(b.limbs[i], c, carry);
+ a.limbs[i] = n;
+ carry = c;
+ i += 1;
+ }
+
+ (a, carry)
+}
+
+#[cfg(all(test, feature = "rand"))]
+mod tests {
+ use crate::{Limb, NonZero, Random, RandomMod, Uint};
+ use rand_core::SeedableRng;
+
+ macro_rules! test_mul_mod_special {
+ ($size:expr, $test_name:ident) => {
+ #[test]
+ fn $test_name() {
+ let mut rng = rand_chacha::ChaCha8Rng::seed_from_u64(1);
+ let moduli = [
+ NonZero::<Limb>::random(&mut rng),
+ NonZero::<Limb>::random(&mut rng),
+ ];
+
+ for special in &moduli {
+ let p = &NonZero::new(Uint::ZERO.wrapping_sub(&Uint::from_word(special.0)))
+ .unwrap();
+
+ let minus_one = p.wrapping_sub(&Uint::ONE);
+
+ let base_cases = [
+ (Uint::ZERO, Uint::ZERO, Uint::ZERO),
+ (Uint::ONE, Uint::ZERO, Uint::ZERO),
+ (Uint::ZERO, Uint::ONE, Uint::ZERO),
+ (Uint::ONE, Uint::ONE, Uint::ONE),
+ (minus_one, minus_one, Uint::ONE),
+ (minus_one, Uint::ONE, minus_one),
+ (Uint::ONE, minus_one, minus_one),
+ ];
+ for (a, b, c) in &base_cases {
+ let x = a.mul_mod_special(&b, *special.as_ref());
+ assert_eq!(*c, x, "{} * {} mod {} = {} != {}", a, b, p, x, c);
+ }
+
+ for _i in 0..100 {
+ let a = Uint::<$size>::random_mod(&mut rng, p);
+ let b = Uint::<$size>::random_mod(&mut rng, p);
+
+ let c = a.mul_mod_special(&b, *special.as_ref());
+ assert!(c < **p, "not reduced: {} >= {} ", c, p);
+
+ let expected = {
+ let (lo, hi) = a.mul_wide(&b);
+ let mut prod = Uint::<{ 2 * $size }>::ZERO;
+ prod.limbs[..$size].clone_from_slice(&lo.limbs);
+ prod.limbs[$size..].clone_from_slice(&hi.limbs);
+ let mut modulus = Uint::ZERO;
+ modulus.limbs[..$size].clone_from_slice(&p.as_ref().limbs);
+ let reduced = prod.rem(&NonZero::new(modulus).unwrap());
+ let mut expected = Uint::ZERO;
+ expected.limbs[..].clone_from_slice(&reduced.limbs[..$size]);
+ expected
+ };
+ assert_eq!(c, expected, "incorrect result");
+ }
+ }
+ }
+ };
+ }
+
+ test_mul_mod_special!(1, mul_mod_special_1);
+ test_mul_mod_special!(2, mul_mod_special_2);
+ test_mul_mod_special!(3, mul_mod_special_3);
+ test_mul_mod_special!(4, mul_mod_special_4);
+ test_mul_mod_special!(5, mul_mod_special_5);
+ test_mul_mod_special!(6, mul_mod_special_6);
+ test_mul_mod_special!(7, mul_mod_special_7);
+ test_mul_mod_special!(8, mul_mod_special_8);
+ test_mul_mod_special!(9, mul_mod_special_9);
+ test_mul_mod_special!(10, mul_mod_special_10);
+ test_mul_mod_special!(11, mul_mod_special_11);
+ test_mul_mod_special!(12, mul_mod_special_12);
+}
diff --git a/vendor/crypto-bigint/src/uint/neg.rs b/vendor/crypto-bigint/src/uint/neg.rs
new file mode 100644
index 0000000..4881a27
--- /dev/null
+++ b/vendor/crypto-bigint/src/uint/neg.rs
@@ -0,0 +1,51 @@
+use core::ops::Neg;
+
+use crate::{CtChoice, Limb, Uint, WideWord, Word, Wrapping};
+
+impl<const LIMBS: usize> Neg for Wrapping<Uint<LIMBS>> {
+ type Output = Self;
+
+ fn neg(self) -> Self::Output {
+ Self(self.0.wrapping_neg())
+ }
+}
+
+impl<const LIMBS: usize> Uint<LIMBS> {
+ /// Negates based on `choice` by wrapping the integer.
+ pub(crate) const fn conditional_wrapping_neg(&self, choice: CtChoice) -> Uint<LIMBS> {
+ Uint::ct_select(self, &self.wrapping_neg(), choice)
+ }
+
+ /// Perform wrapping negation.
+ pub const fn wrapping_neg(&self) -> Self {
+ let mut ret = [Limb::ZERO; LIMBS];
+ let mut carry = 1;
+ let mut i = 0;
+ while i < LIMBS {
+ let r = (!self.limbs[i].0 as WideWord) + carry;
+ ret[i] = Limb(r as Word);
+ carry = r >> Limb::BITS;
+ i += 1;
+ }
+ Uint::new(ret)
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use crate::U256;
+
+ #[test]
+ fn wrapping_neg() {
+ assert_eq!(U256::ZERO.wrapping_neg(), U256::ZERO);
+ assert_eq!(U256::MAX.wrapping_neg(), U256::ONE);
+ assert_eq!(
+ U256::from_u64(13).wrapping_neg(),
+ U256::from_u64(13).not().saturating_add(&U256::ONE)
+ );
+ assert_eq!(
+ U256::from_u64(42).wrapping_neg(),
+ U256::from_u64(42).saturating_sub(&U256::ONE).not()
+ );
+ }
+}
diff --git a/vendor/crypto-bigint/src/uint/neg_mod.rs b/vendor/crypto-bigint/src/uint/neg_mod.rs
new file mode 100644
index 0000000..aaed276
--- /dev/null
+++ b/vendor/crypto-bigint/src/uint/neg_mod.rs
@@ -0,0 +1,68 @@
+//! [`Uint`] negation modulus operations.
+
+use crate::{Limb, NegMod, Uint};
+
+impl<const LIMBS: usize> Uint<LIMBS> {
+ /// Computes `-a mod p` in constant time.
+ /// Assumes `self` is in `[0, p)`.
+ pub const fn neg_mod(&self, p: &Self) -> Self {
+ let z = self.ct_is_nonzero();
+ let mut ret = p.sbb(self, Limb::ZERO).0;
+ let mut i = 0;
+ while i < LIMBS {
+ // Set ret to 0 if the original value was 0, in which
+ // case ret would be p.
+ ret.limbs[i].0 = z.if_true(ret.limbs[i].0);
+ i += 1;
+ }
+ ret
+ }
+
+ /// Computes `-a mod p` in constant time for the special modulus
+ /// `p = MAX+1-c` where `c` is small enough to fit in a single [`Limb`].
+ pub const fn neg_mod_special(&self, c: Limb) -> Self {
+ Self::ZERO.sub_mod_special(self, c)
+ }
+}
+
+impl<const LIMBS: usize> NegMod for Uint<LIMBS> {
+ type Output = Self;
+
+ fn neg_mod(&self, p: &Self) -> Self {
+ debug_assert!(self < p);
+ self.neg_mod(p)
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use crate::U256;
+
+ #[test]
+ fn neg_mod_random() {
+ let x =
+ U256::from_be_hex("8d16e171674b4e6d8529edba4593802bf30b8cb161dd30aa8e550d41380007c2");
+ let p =
+ U256::from_be_hex("928334a4e4be0843ec225a4c9c61df34bdc7a81513e4b6f76f2bfa3148e2e1b5");
+
+ let actual = x.neg_mod(&p);
+ let expected =
+ U256::from_be_hex("056c53337d72b9d666f86c9256ce5f08cabc1b63b207864ce0d6ecf010e2d9f3");
+
+ assert_eq!(expected, actual);
+ }
+
+ #[test]
+ fn neg_mod_zero() {
+ let x =
+ U256::from_be_hex("0000000000000000000000000000000000000000000000000000000000000000");
+ let p =
+ U256::from_be_hex("928334a4e4be0843ec225a4c9c61df34bdc7a81513e4b6f76f2bfa3148e2e1b5");
+
+ let actual = x.neg_mod(&p);
+ let expected =
+ U256::from_be_hex("0000000000000000000000000000000000000000000000000000000000000000");
+
+ assert_eq!(expected, actual);
+ }
+}
diff --git a/vendor/crypto-bigint/src/uint/rand.rs b/vendor/crypto-bigint/src/uint/rand.rs
new file mode 100644
index 0000000..c5f730b
--- /dev/null
+++ b/vendor/crypto-bigint/src/uint/rand.rs
@@ -0,0 +1,79 @@
+//! Random number generator support
+
+use super::Uint;
+use crate::{Limb, NonZero, Random, RandomMod};
+use rand_core::CryptoRngCore;
+use subtle::ConstantTimeLess;
+
+impl<const LIMBS: usize> Random for Uint<LIMBS> {
+ /// Generate a cryptographically secure random [`Uint`].
+ fn random(mut rng: &mut impl CryptoRngCore) -> Self {
+ let mut limbs = [Limb::ZERO; LIMBS];
+
+ for limb in &mut limbs {
+ *limb = Limb::random(&mut rng)
+ }
+
+ limbs.into()
+ }
+}
+
+impl<const LIMBS: usize> RandomMod for Uint<LIMBS> {
+ /// Generate a cryptographically secure random [`Uint`] which is less than
+ /// a given `modulus`.
+ ///
+ /// This function uses rejection sampling, a method which produces an
+ /// unbiased distribution of in-range values provided the underlying
+ /// CSRNG is unbiased, but runs in variable-time.
+ ///
+ /// The variable-time nature of the algorithm should not pose a security
+ /// issue so long as the underlying random number generator is truly a
+ /// CSRNG, where previous outputs are unrelated to subsequent
+ /// outputs and do not reveal information about the RNG's internal state.
+ fn random_mod(mut rng: &mut impl CryptoRngCore, modulus: &NonZero<Self>) -> Self {
+ let mut n = Self::ZERO;
+
+ let n_bits = modulus.as_ref().bits_vartime();
+ let n_limbs = (n_bits + Limb::BITS - 1) / Limb::BITS;
+ let mask = Limb::MAX >> (Limb::BITS * n_limbs - n_bits);
+
+ loop {
+ for i in 0..n_limbs {
+ n.limbs[i] = Limb::random(&mut rng);
+ }
+ n.limbs[n_limbs - 1] = n.limbs[n_limbs - 1] & mask;
+
+ if n.ct_lt(modulus).into() {
+ return n;
+ }
+ }
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use crate::{NonZero, RandomMod, U256};
+ use rand_core::SeedableRng;
+
+ #[test]
+ fn random_mod() {
+ let mut rng = rand_chacha::ChaCha8Rng::seed_from_u64(1);
+
+ // Ensure `random_mod` runs in a reasonable amount of time
+ let modulus = NonZero::new(U256::from(42u8)).unwrap();
+ let res = U256::random_mod(&mut rng, &modulus);
+
+ // Check that the value is in range
+ assert!(res >= U256::ZERO);
+ assert!(res < U256::from(42u8));
+
+ // Ensure `random_mod` runs in a reasonable amount of time
+ // when the modulus is larger than 1 limb
+ let modulus = NonZero::new(U256::from(0x10000000000000001u128)).unwrap();
+ let res = U256::random_mod(&mut rng, &modulus);
+
+ // Check that the value is in range
+ assert!(res >= U256::ZERO);
+ assert!(res < U256::from(0x10000000000000001u128));
+ }
+}
diff --git a/vendor/crypto-bigint/src/uint/resize.rs b/vendor/crypto-bigint/src/uint/resize.rs
new file mode 100644
index 0000000..2c80b89
--- /dev/null
+++ b/vendor/crypto-bigint/src/uint/resize.rs
@@ -0,0 +1,37 @@
+use super::Uint;
+
+impl<const LIMBS: usize> Uint<LIMBS> {
+ /// Construct a `Uint<T>` from the unsigned integer value,
+ /// truncating the upper bits if the value is too large to be
+ /// represented.
+ #[inline(always)]
+ pub const fn resize<const T: usize>(&self) -> Uint<T> {
+ let mut res = Uint::ZERO;
+ let mut i = 0;
+ let dim = if T < LIMBS { T } else { LIMBS };
+ while i < dim {
+ res.limbs[i] = self.limbs[i];
+ i += 1;
+ }
+ res
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use crate::{U128, U64};
+
+ #[test]
+ fn resize_larger() {
+ let u = U64::from_be_hex("AAAAAAAABBBBBBBB");
+ let u2: U128 = u.resize();
+ assert_eq!(u2, U128::from_be_hex("0000000000000000AAAAAAAABBBBBBBB"));
+ }
+
+ #[test]
+ fn resize_smaller() {
+ let u = U128::from_be_hex("AAAAAAAABBBBBBBBCCCCCCCCDDDDDDDD");
+ let u2: U64 = u.resize();
+ assert_eq!(u2, U64::from_be_hex("CCCCCCCCDDDDDDDD"));
+ }
+}
diff --git a/vendor/crypto-bigint/src/uint/shl.rs b/vendor/crypto-bigint/src/uint/shl.rs
new file mode 100644
index 0000000..1dbc40f
--- /dev/null
+++ b/vendor/crypto-bigint/src/uint/shl.rs
@@ -0,0 +1,216 @@
+//! [`Uint`] bitwise left shift operations.
+
+use crate::{CtChoice, Limb, Uint, Word};
+use core::ops::{Shl, ShlAssign};
+
+impl<const LIMBS: usize> Uint<LIMBS> {
+ /// Computes `self << shift` where `0 <= shift < Limb::BITS`,
+ /// returning the result and the carry.
+ #[inline(always)]
+ pub(crate) const fn shl_limb(&self, n: usize) -> (Self, Limb) {
+ let mut limbs = [Limb::ZERO; LIMBS];
+
+ let nz = Limb(n as Word).ct_is_nonzero();
+ let lshift = n as Word;
+ let rshift = Limb::ct_select(Limb::ZERO, Limb((Limb::BITS - n) as Word), nz).0;
+ let carry = Limb::ct_select(
+ Limb::ZERO,
+ Limb(self.limbs[LIMBS - 1].0.wrapping_shr(Word::BITS - n as u32)),
+ nz,
+ );
+
+ let mut i = LIMBS - 1;
+ while i > 0 {
+ let mut limb = self.limbs[i].0 << lshift;
+ let hi = self.limbs[i - 1].0 >> rshift;
+ limb |= nz.if_true(hi);
+ limbs[i] = Limb(limb);
+ i -= 1
+ }
+ limbs[0] = Limb(self.limbs[0].0 << lshift);
+
+ (Uint::<LIMBS>::new(limbs), carry)
+ }
+
+ /// Computes `self << shift`.
+ ///
+ /// NOTE: this operation is variable time with respect to `n` *ONLY*.
+ ///
+ /// When used with a fixed `n`, this function is constant-time with respect
+ /// to `self`.
+ #[inline(always)]
+ pub const fn shl_vartime(&self, n: usize) -> Self {
+ let mut limbs = [Limb::ZERO; LIMBS];
+
+ if n >= Limb::BITS * LIMBS {
+ return Self { limbs };
+ }
+
+ let shift_num = n / Limb::BITS;
+ let rem = n % Limb::BITS;
+
+ let mut i = LIMBS;
+ while i > shift_num {
+ i -= 1;
+ limbs[i] = self.limbs[i - shift_num];
+ }
+
+ let (new_lower, _carry) = (Self { limbs }).shl_limb(rem);
+ new_lower
+ }
+
+ /// Computes a left shift on a wide input as `(lo, hi)`.
+ ///
+ /// NOTE: this operation is variable time with respect to `n` *ONLY*.
+ ///
+ /// When used with a fixed `n`, this function is constant-time with respect
+ /// to `self`.
+ #[inline(always)]
+ pub const fn shl_vartime_wide(lower_upper: (Self, Self), n: usize) -> (Self, Self) {
+ let (lower, mut upper) = lower_upper;
+ let new_lower = lower.shl_vartime(n);
+ upper = upper.shl_vartime(n);
+ if n >= Self::BITS {
+ upper = upper.bitor(&lower.shl_vartime(n - Self::BITS));
+ } else {
+ upper = upper.bitor(&lower.shr_vartime(Self::BITS - n));
+ }
+
+ (new_lower, upper)
+ }
+
+ /// Computes `self << n`.
+ /// Returns zero if `n >= Self::BITS`.
+ pub const fn shl(&self, shift: usize) -> Self {
+ let overflow = CtChoice::from_usize_lt(shift, Self::BITS).not();
+ let shift = shift % Self::BITS;
+ let mut result = *self;
+ let mut i = 0;
+ while i < Self::LOG2_BITS {
+ let bit = CtChoice::from_lsb((shift as Word >> i) & 1);
+ result = Uint::ct_select(&result, &result.shl_vartime(1 << i), bit);
+ i += 1;
+ }
+
+ Uint::ct_select(&result, &Self::ZERO, overflow)
+ }
+}
+
+impl<const LIMBS: usize> Shl<usize> for Uint<LIMBS> {
+ type Output = Uint<LIMBS>;
+
+ /// NOTE: this operation is variable time with respect to `rhs` *ONLY*.
+ ///
+ /// When used with a fixed `rhs`, this function is constant-time with respect
+ /// to `self`.
+ fn shl(self, rhs: usize) -> Uint<LIMBS> {
+ Uint::<LIMBS>::shl(&self, rhs)
+ }
+}
+
+impl<const LIMBS: usize> Shl<usize> for &Uint<LIMBS> {
+ type Output = Uint<LIMBS>;
+
+ /// NOTE: this operation is variable time with respect to `rhs` *ONLY*.
+ ///
+ /// When used with a fixed `rhs`, this function is constant-time with respect
+ /// to `self`.
+ fn shl(self, rhs: usize) -> Uint<LIMBS> {
+ self.shl(rhs)
+ }
+}
+
+impl<const LIMBS: usize> ShlAssign<usize> for Uint<LIMBS> {
+ /// NOTE: this operation is variable time with respect to `rhs` *ONLY*.
+ ///
+ /// When used with a fixed `rhs`, this function is constant-time with respect
+ /// to `self`.
+ fn shl_assign(&mut self, rhs: usize) {
+ *self = self.shl(rhs)
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use crate::{Limb, Uint, U128, U256};
+
+ const N: U256 =
+ U256::from_be_hex("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141");
+
+ const TWO_N: U256 =
+ U256::from_be_hex("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFD755DB9CD5E9140777FA4BD19A06C8282");
+
+ const FOUR_N: U256 =
+ U256::from_be_hex("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFAEABB739ABD2280EEFF497A3340D90504");
+
+ const SIXTY_FIVE: U256 =
+ U256::from_be_hex("FFFFFFFFFFFFFFFD755DB9CD5E9140777FA4BD19A06C82820000000000000000");
+
+ const EIGHTY_EIGHT: U256 =
+ U256::from_be_hex("FFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD03641410000000000000000000000");
+
+ const SIXTY_FOUR: U256 =
+ U256::from_be_hex("FFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD03641410000000000000000");
+
+ #[test]
+ fn shl_simple() {
+ let mut t = U256::from(1u8);
+ assert_eq!(t << 1, U256::from(2u8));
+ t = U256::from(3u8);
+ assert_eq!(t << 8, U256::from(0x300u16));
+ }
+
+ #[test]
+ fn shl1() {
+ assert_eq!(N << 1, TWO_N);
+ }
+
+ #[test]
+ fn shl2() {
+ assert_eq!(N << 2, FOUR_N);
+ }
+
+ #[test]
+ fn shl65() {
+ assert_eq!(N << 65, SIXTY_FIVE);
+ }
+
+ #[test]
+ fn shl88() {
+ assert_eq!(N << 88, EIGHTY_EIGHT);
+ }
+
+ #[test]
+ fn shl256() {
+ assert_eq!(N << 256, U256::default());
+ }
+
+ #[test]
+ fn shl64() {
+ assert_eq!(N << 64, SIXTY_FOUR);
+ }
+
+ #[test]
+ fn shl_wide_1_1_128() {
+ assert_eq!(
+ Uint::shl_vartime_wide((U128::ONE, U128::ONE), 128),
+ (U128::ZERO, U128::ONE)
+ );
+ }
+
+ #[test]
+ fn shl_wide_max_0_1() {
+ assert_eq!(
+ Uint::shl_vartime_wide((U128::MAX, U128::ZERO), 1),
+ (U128::MAX.sbb(&U128::ONE, Limb::ZERO).0, U128::ONE)
+ );
+ }
+
+ #[test]
+ fn shl_wide_max_max_256() {
+ assert_eq!(
+ Uint::shl_vartime_wide((U128::MAX, U128::MAX), 256),
+ (U128::ZERO, U128::ZERO)
+ );
+ }
+}
diff --git a/vendor/crypto-bigint/src/uint/shr.rs b/vendor/crypto-bigint/src/uint/shr.rs
new file mode 100644
index 0000000..6a36fbe
--- /dev/null
+++ b/vendor/crypto-bigint/src/uint/shr.rs
@@ -0,0 +1,186 @@
+//! [`Uint`] bitwise right shift operations.
+
+use super::Uint;
+use crate::{limb::HI_BIT, CtChoice, Limb, Word};
+use core::ops::{Shr, ShrAssign};
+
+impl<const LIMBS: usize> Uint<LIMBS> {
+ /// Computes `self >> 1` in constant-time, returning [`CtChoice::TRUE`] if the overflowing bit
+ /// was set, and [`CtChoice::FALSE`] otherwise.
+ pub(crate) const fn shr_1(&self) -> (Self, CtChoice) {
+ let mut shifted_bits = [0; LIMBS];
+ let mut i = 0;
+ while i < LIMBS {
+ shifted_bits[i] = self.limbs[i].0 >> 1;
+ i += 1;
+ }
+
+ let mut carry_bits = [0; LIMBS];
+ let mut i = 0;
+ while i < LIMBS {
+ carry_bits[i] = self.limbs[i].0 << HI_BIT;
+ i += 1;
+ }
+
+ let mut limbs = [Limb(0); LIMBS];
+
+ let mut i = 0;
+ while i < (LIMBS - 1) {
+ limbs[i] = Limb(shifted_bits[i] | carry_bits[i + 1]);
+ i += 1;
+ }
+ limbs[LIMBS - 1] = Limb(shifted_bits[LIMBS - 1]);
+
+ debug_assert!(carry_bits[LIMBS - 1] == 0 || carry_bits[LIMBS - 1] == (1 << HI_BIT));
+ (
+ Uint::new(limbs),
+ CtChoice::from_lsb(carry_bits[0] >> HI_BIT),
+ )
+ }
+
+ /// Computes `self >> n`.
+ ///
+ /// NOTE: this operation is variable time with respect to `n` *ONLY*.
+ ///
+ /// When used with a fixed `n`, this function is constant-time with respect
+ /// to `self`.
+ #[inline(always)]
+ pub const fn shr_vartime(&self, shift: usize) -> Self {
+ let full_shifts = shift / Limb::BITS;
+ let small_shift = shift & (Limb::BITS - 1);
+ let mut limbs = [Limb::ZERO; LIMBS];
+
+ if shift > Limb::BITS * LIMBS {
+ return Self { limbs };
+ }
+
+ let n = LIMBS - full_shifts;
+ let mut i = 0;
+
+ if small_shift == 0 {
+ while i < n {
+ limbs[i] = Limb(self.limbs[i + full_shifts].0);
+ i += 1;
+ }
+ } else {
+ while i < n {
+ let mut lo = self.limbs[i + full_shifts].0 >> small_shift;
+
+ if i < (LIMBS - 1) - full_shifts {
+ lo |= self.limbs[i + full_shifts + 1].0 << (Limb::BITS - small_shift);
+ }
+
+ limbs[i] = Limb(lo);
+ i += 1;
+ }
+ }
+
+ Self { limbs }
+ }
+
+ /// Computes a right shift on a wide input as `(lo, hi)`.
+ ///
+ /// NOTE: this operation is variable time with respect to `n` *ONLY*.
+ ///
+ /// When used with a fixed `n`, this function is constant-time with respect
+ /// to `self`.
+ #[inline(always)]
+ pub const fn shr_vartime_wide(lower_upper: (Self, Self), n: usize) -> (Self, Self) {
+ let (mut lower, upper) = lower_upper;
+ let new_upper = upper.shr_vartime(n);
+ lower = lower.shr_vartime(n);
+ if n >= Self::BITS {
+ lower = lower.bitor(&upper.shr_vartime(n - Self::BITS));
+ } else {
+ lower = lower.bitor(&upper.shl_vartime(Self::BITS - n));
+ }
+
+ (lower, new_upper)
+ }
+
+ /// Computes `self << n`.
+ /// Returns zero if `n >= Self::BITS`.
+ pub const fn shr(&self, shift: usize) -> Self {
+ let overflow = CtChoice::from_usize_lt(shift, Self::BITS).not();
+ let shift = shift % Self::BITS;
+ let mut result = *self;
+ let mut i = 0;
+ while i < Self::LOG2_BITS {
+ let bit = CtChoice::from_lsb((shift as Word >> i) & 1);
+ result = Uint::ct_select(&result, &result.shr_vartime(1 << i), bit);
+ i += 1;
+ }
+
+ Uint::ct_select(&result, &Self::ZERO, overflow)
+ }
+}
+
+impl<const LIMBS: usize> Shr<usize> for Uint<LIMBS> {
+ type Output = Uint<LIMBS>;
+
+ /// NOTE: this operation is variable time with respect to `rhs` *ONLY*.
+ ///
+ /// When used with a fixed `rhs`, this function is constant-time with respect
+ /// to `self`.
+ fn shr(self, rhs: usize) -> Uint<LIMBS> {
+ Uint::<LIMBS>::shr(&self, rhs)
+ }
+}
+
+impl<const LIMBS: usize> Shr<usize> for &Uint<LIMBS> {
+ type Output = Uint<LIMBS>;
+
+ /// NOTE: this operation is variable time with respect to `rhs` *ONLY*.
+ ///
+ /// When used with a fixed `rhs`, this function is constant-time with respect
+ /// to `self`.
+ fn shr(self, rhs: usize) -> Uint<LIMBS> {
+ self.shr(rhs)
+ }
+}
+
+impl<const LIMBS: usize> ShrAssign<usize> for Uint<LIMBS> {
+ fn shr_assign(&mut self, rhs: usize) {
+ *self = self.shr(rhs);
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use crate::{Uint, U128, U256};
+
+ const N: U256 =
+ U256::from_be_hex("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141");
+
+ const N_2: U256 =
+ U256::from_be_hex("7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF5D576E7357A4501DDFE92F46681B20A0");
+
+ #[test]
+ fn shr1() {
+ assert_eq!(N >> 1, N_2);
+ }
+
+ #[test]
+ fn shr_wide_1_1_128() {
+ assert_eq!(
+ Uint::shr_vartime_wide((U128::ONE, U128::ONE), 128),
+ (U128::ONE, U128::ZERO)
+ );
+ }
+
+ #[test]
+ fn shr_wide_0_max_1() {
+ assert_eq!(
+ Uint::shr_vartime_wide((U128::ZERO, U128::MAX), 1),
+ (U128::ONE << 127, U128::MAX >> 1)
+ );
+ }
+
+ #[test]
+ fn shr_wide_max_max_256() {
+ assert_eq!(
+ Uint::shr_vartime_wide((U128::MAX, U128::MAX), 256),
+ (U128::ZERO, U128::ZERO)
+ );
+ }
+}
diff --git a/vendor/crypto-bigint/src/uint/split.rs b/vendor/crypto-bigint/src/uint/split.rs
new file mode 100644
index 0000000..e690974
--- /dev/null
+++ b/vendor/crypto-bigint/src/uint/split.rs
@@ -0,0 +1,37 @@
+use crate::{Limb, Uint};
+
+/// Split this number in half, returning its high and low components
+/// respectively.
+#[inline]
+pub(crate) const fn split_mixed<const L: usize, const H: usize, const O: usize>(
+ n: &Uint<O>,
+) -> (Uint<H>, Uint<L>) {
+ let top = L + H;
+ let top = if top < O { top } else { O };
+ let mut lo = [Limb::ZERO; L];
+ let mut hi = [Limb::ZERO; H];
+ let mut i = 0;
+
+ while i < top {
+ if i < L {
+ lo[i] = n.limbs[i];
+ } else {
+ hi[i - L] = n.limbs[i];
+ }
+ i += 1;
+ }
+
+ (Uint { limbs: hi }, Uint { limbs: lo })
+}
+
+#[cfg(test)]
+mod tests {
+ use crate::{U128, U64};
+
+ #[test]
+ fn split() {
+ let (hi, lo) = U128::from_be_hex("00112233445566778899aabbccddeeff").split();
+ assert_eq!(hi, U64::from_u64(0x0011223344556677));
+ assert_eq!(lo, U64::from_u64(0x8899aabbccddeeff));
+ }
+}
diff --git a/vendor/crypto-bigint/src/uint/sqrt.rs b/vendor/crypto-bigint/src/uint/sqrt.rs
new file mode 100644
index 0000000..5c96afb
--- /dev/null
+++ b/vendor/crypto-bigint/src/uint/sqrt.rs
@@ -0,0 +1,177 @@
+//! [`Uint`] square root operations.
+
+use super::Uint;
+use crate::{Limb, Word};
+use subtle::{ConstantTimeEq, CtOption};
+
+impl<const LIMBS: usize> Uint<LIMBS> {
+ /// See [`Self::sqrt_vartime`].
+ #[deprecated(
+ since = "0.5.3",
+ note = "This functionality will be moved to `sqrt_vartime` in a future release."
+ )]
+ pub const fn sqrt(&self) -> Self {
+ self.sqrt_vartime()
+ }
+
+ /// Computes √(`self`)
+ /// Uses Brent & Zimmermann, Modern Computer Arithmetic, v0.5.9, Algorithm 1.13
+ ///
+ /// Callers can check if `self` is a square by squaring the result
+ pub const fn sqrt_vartime(&self) -> Self {
+ let max_bits = (self.bits_vartime() + 1) >> 1;
+ let cap = Self::ONE.shl_vartime(max_bits);
+ let mut guess = cap; // ≥ √(`self`)
+ let mut xn = {
+ let q = self.wrapping_div(&guess);
+ let t = guess.wrapping_add(&q);
+ t.shr_vartime(1)
+ };
+
+ // If guess increased, the initial guess was low.
+ // Repeat until reverse course.
+ while Uint::ct_lt(&guess, &xn).is_true_vartime() {
+ // Sometimes an increase is too far, especially with large
+ // powers, and then takes a long time to walk back. The upper
+ // bound is based on bit size, so saturate on that.
+ let le = Limb::ct_le(Limb(xn.bits_vartime() as Word), Limb(max_bits as Word));
+ guess = Self::ct_select(&cap, &xn, le);
+ xn = {
+ let q = self.wrapping_div(&guess);
+ let t = guess.wrapping_add(&q);
+ t.shr_vartime(1)
+ };
+ }
+
+ // Repeat while guess decreases.
+ while Uint::ct_gt(&guess, &xn).is_true_vartime() && xn.ct_is_nonzero().is_true_vartime() {
+ guess = xn;
+ xn = {
+ let q = self.wrapping_div(&guess);
+ let t = guess.wrapping_add(&q);
+ t.shr_vartime(1)
+ };
+ }
+
+ Self::ct_select(&Self::ZERO, &guess, self.ct_is_nonzero())
+ }
+
+ /// See [`Self::wrapping_sqrt_vartime`].
+ #[deprecated(
+ since = "0.5.3",
+ note = "This functionality will be moved to `wrapping_sqrt_vartime` in a future release."
+ )]
+ pub const fn wrapping_sqrt(&self) -> Self {
+ self.wrapping_sqrt_vartime()
+ }
+
+ /// Wrapped sqrt is just normal √(`self`)
+ /// There’s no way wrapping could ever happen.
+ /// This function exists, so that all operations are accounted for in the wrapping operations.
+ pub const fn wrapping_sqrt_vartime(&self) -> Self {
+ self.sqrt_vartime()
+ }
+
+ /// See [`Self::checked_sqrt_vartime`].
+ #[deprecated(
+ since = "0.5.3",
+ note = "This functionality will be moved to `checked_sqrt_vartime` in a future release."
+ )]
+ pub fn checked_sqrt(&self) -> CtOption<Self> {
+ self.checked_sqrt_vartime()
+ }
+
+ /// Perform checked sqrt, returning a [`CtOption`] which `is_some`
+ /// only if the √(`self`)² == self
+ pub fn checked_sqrt_vartime(&self) -> CtOption<Self> {
+ let r = self.sqrt_vartime();
+ let s = r.wrapping_mul(&r);
+ CtOption::new(r, ConstantTimeEq::ct_eq(self, &s))
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use crate::{Limb, U256};
+
+ #[cfg(feature = "rand")]
+ use {
+ crate::{CheckedMul, Random, U512},
+ rand_chacha::ChaChaRng,
+ rand_core::{RngCore, SeedableRng},
+ };
+
+ #[test]
+ fn edge() {
+ assert_eq!(U256::ZERO.sqrt_vartime(), U256::ZERO);
+ assert_eq!(U256::ONE.sqrt_vartime(), U256::ONE);
+ let mut half = U256::ZERO;
+ for i in 0..half.limbs.len() / 2 {
+ half.limbs[i] = Limb::MAX;
+ }
+ assert_eq!(U256::MAX.sqrt_vartime(), half,);
+ }
+
+ #[test]
+ fn simple() {
+ let tests = [
+ (4u8, 2u8),
+ (9, 3),
+ (16, 4),
+ (25, 5),
+ (36, 6),
+ (49, 7),
+ (64, 8),
+ (81, 9),
+ (100, 10),
+ (121, 11),
+ (144, 12),
+ (169, 13),
+ ];
+ for (a, e) in &tests {
+ let l = U256::from(*a);
+ let r = U256::from(*e);
+ assert_eq!(l.sqrt_vartime(), r);
+ assert_eq!(l.checked_sqrt_vartime().is_some().unwrap_u8(), 1u8);
+ }
+ }
+
+ #[test]
+ fn nonsquares() {
+ assert_eq!(U256::from(2u8).sqrt_vartime(), U256::from(1u8));
+ assert_eq!(
+ U256::from(2u8).checked_sqrt_vartime().is_some().unwrap_u8(),
+ 0
+ );
+ assert_eq!(U256::from(3u8).sqrt_vartime(), U256::from(1u8));
+ assert_eq!(
+ U256::from(3u8).checked_sqrt_vartime().is_some().unwrap_u8(),
+ 0
+ );
+ assert_eq!(U256::from(5u8).sqrt_vartime(), U256::from(2u8));
+ assert_eq!(U256::from(6u8).sqrt_vartime(), U256::from(2u8));
+ assert_eq!(U256::from(7u8).sqrt_vartime(), U256::from(2u8));
+ assert_eq!(U256::from(8u8).sqrt_vartime(), U256::from(2u8));
+ assert_eq!(U256::from(10u8).sqrt_vartime(), U256::from(3u8));
+ }
+
+ #[cfg(feature = "rand")]
+ #[test]
+ fn fuzz() {
+ let mut rng = ChaChaRng::from_seed([7u8; 32]);
+ for _ in 0..50 {
+ let t = rng.next_u32() as u64;
+ let s = U256::from(t);
+ let s2 = s.checked_mul(&s).unwrap();
+ assert_eq!(s2.sqrt_vartime(), s);
+ assert_eq!(s2.checked_sqrt_vartime().is_some().unwrap_u8(), 1);
+ }
+
+ for _ in 0..50 {
+ let s = U256::random(&mut rng);
+ let mut s2 = U512::ZERO;
+ s2.limbs[..s.limbs.len()].copy_from_slice(&s.limbs);
+ assert_eq!(s.square().sqrt_vartime(), s2);
+ }
+ }
+}
diff --git a/vendor/crypto-bigint/src/uint/sub.rs b/vendor/crypto-bigint/src/uint/sub.rs
new file mode 100644
index 0000000..571dd6a
--- /dev/null
+++ b/vendor/crypto-bigint/src/uint/sub.rs
@@ -0,0 +1,215 @@
+//! [`Uint`] addition operations.
+
+use super::Uint;
+use crate::{Checked, CheckedSub, CtChoice, Limb, Wrapping, Zero};
+use core::ops::{Sub, SubAssign};
+use subtle::CtOption;
+
+impl<const LIMBS: usize> Uint<LIMBS> {
+ /// Computes `a - (b + borrow)`, returning the result along with the new borrow.
+ #[inline(always)]
+ pub const fn sbb(&self, rhs: &Self, mut borrow: Limb) -> (Self, Limb) {
+ let mut limbs = [Limb::ZERO; LIMBS];
+ let mut i = 0;
+
+ while i < LIMBS {
+ let (w, b) = self.limbs[i].sbb(rhs.limbs[i], borrow);
+ limbs[i] = w;
+ borrow = b;
+ i += 1;
+ }
+
+ (Self { limbs }, borrow)
+ }
+
+ /// Perform saturating subtraction, returning `ZERO` on underflow.
+ pub const fn saturating_sub(&self, rhs: &Self) -> Self {
+ let (res, underflow) = self.sbb(rhs, Limb::ZERO);
+ Self::ct_select(&res, &Self::ZERO, CtChoice::from_mask(underflow.0))
+ }
+
+ /// Perform wrapping subtraction, discarding underflow and wrapping around
+ /// the boundary of the type.
+ pub const fn wrapping_sub(&self, rhs: &Self) -> Self {
+ self.sbb(rhs, Limb::ZERO).0
+ }
+
+ /// Perform wrapping subtraction, returning the truthy value as the second element of the tuple
+ /// if an underflow has occurred.
+ pub(crate) const fn conditional_wrapping_sub(
+ &self,
+ rhs: &Self,
+ choice: CtChoice,
+ ) -> (Self, CtChoice) {
+ let actual_rhs = Uint::ct_select(&Uint::ZERO, rhs, choice);
+ let (res, borrow) = self.sbb(&actual_rhs, Limb::ZERO);
+ (res, CtChoice::from_mask(borrow.0))
+ }
+}
+
+impl<const LIMBS: usize> CheckedSub<&Uint<LIMBS>> for Uint<LIMBS> {
+ type Output = Self;
+
+ fn checked_sub(&self, rhs: &Self) -> CtOption<Self> {
+ let (result, underflow) = self.sbb(rhs, Limb::ZERO);
+ CtOption::new(result, underflow.is_zero())
+ }
+}
+
+impl<const LIMBS: usize> Sub for Wrapping<Uint<LIMBS>> {
+ type Output = Self;
+
+ fn sub(self, rhs: Self) -> Wrapping<Uint<LIMBS>> {
+ Wrapping(self.0.wrapping_sub(&rhs.0))
+ }
+}
+
+impl<const LIMBS: usize> Sub<&Wrapping<Uint<LIMBS>>> for Wrapping<Uint<LIMBS>> {
+ type Output = Wrapping<Uint<LIMBS>>;
+
+ fn sub(self, rhs: &Wrapping<Uint<LIMBS>>) -> Wrapping<Uint<LIMBS>> {
+ Wrapping(self.0.wrapping_sub(&rhs.0))
+ }
+}
+
+impl<const LIMBS: usize> Sub<Wrapping<Uint<LIMBS>>> for &Wrapping<Uint<LIMBS>> {
+ type Output = Wrapping<Uint<LIMBS>>;
+
+ fn sub(self, rhs: Wrapping<Uint<LIMBS>>) -> Wrapping<Uint<LIMBS>> {
+ Wrapping(self.0.wrapping_sub(&rhs.0))
+ }
+}
+
+impl<const LIMBS: usize> Sub<&Wrapping<Uint<LIMBS>>> for &Wrapping<Uint<LIMBS>> {
+ type Output = Wrapping<Uint<LIMBS>>;
+
+ fn sub(self, rhs: &Wrapping<Uint<LIMBS>>) -> Wrapping<Uint<LIMBS>> {
+ Wrapping(self.0.wrapping_sub(&rhs.0))
+ }
+}
+
+impl<const LIMBS: usize> SubAssign for Wrapping<Uint<LIMBS>> {
+ fn sub_assign(&mut self, other: Self) {
+ *self = *self - other;
+ }
+}
+
+impl<const LIMBS: usize> SubAssign<&Wrapping<Uint<LIMBS>>> for Wrapping<Uint<LIMBS>> {
+ fn sub_assign(&mut self, other: &Self) {
+ *self = *self - other;
+ }
+}
+
+impl<const LIMBS: usize> Sub for Checked<Uint<LIMBS>> {
+ type Output = Self;
+
+ fn sub(self, rhs: Self) -> Checked<Uint<LIMBS>> {
+ Checked(
+ self.0
+ .and_then(|lhs| rhs.0.and_then(|rhs| lhs.checked_sub(&rhs))),
+ )
+ }
+}
+
+impl<const LIMBS: usize> Sub<&Checked<Uint<LIMBS>>> for Checked<Uint<LIMBS>> {
+ type Output = Checked<Uint<LIMBS>>;
+
+ fn sub(self, rhs: &Checked<Uint<LIMBS>>) -> Checked<Uint<LIMBS>> {
+ Checked(
+ self.0
+ .and_then(|lhs| rhs.0.and_then(|rhs| lhs.checked_sub(&rhs))),
+ )
+ }
+}
+
+impl<const LIMBS: usize> Sub<Checked<Uint<LIMBS>>> for &Checked<Uint<LIMBS>> {
+ type Output = Checked<Uint<LIMBS>>;
+
+ fn sub(self, rhs: Checked<Uint<LIMBS>>) -> Checked<Uint<LIMBS>> {
+ Checked(
+ self.0
+ .and_then(|lhs| rhs.0.and_then(|rhs| lhs.checked_sub(&rhs))),
+ )
+ }
+}
+
+impl<const LIMBS: usize> Sub<&Checked<Uint<LIMBS>>> for &Checked<Uint<LIMBS>> {
+ type Output = Checked<Uint<LIMBS>>;
+
+ fn sub(self, rhs: &Checked<Uint<LIMBS>>) -> Checked<Uint<LIMBS>> {
+ Checked(
+ self.0
+ .and_then(|lhs| rhs.0.and_then(|rhs| lhs.checked_sub(&rhs))),
+ )
+ }
+}
+
+impl<const LIMBS: usize> SubAssign for Checked<Uint<LIMBS>> {
+ fn sub_assign(&mut self, other: Self) {
+ *self = *self - other;
+ }
+}
+
+impl<const LIMBS: usize> SubAssign<&Checked<Uint<LIMBS>>> for Checked<Uint<LIMBS>> {
+ fn sub_assign(&mut self, other: &Self) {
+ *self = *self - other;
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use crate::{CheckedSub, Limb, U128};
+
+ #[test]
+ fn sbb_no_borrow() {
+ let (res, borrow) = U128::ONE.sbb(&U128::ONE, Limb::ZERO);
+ assert_eq!(res, U128::ZERO);
+ assert_eq!(borrow, Limb::ZERO);
+ }
+
+ #[test]
+ fn sbb_with_borrow() {
+ let (res, borrow) = U128::ZERO.sbb(&U128::ONE, Limb::ZERO);
+
+ assert_eq!(res, U128::MAX);
+ assert_eq!(borrow, Limb::MAX);
+ }
+
+ #[test]
+ fn saturating_sub_no_borrow() {
+ assert_eq!(
+ U128::from(5u64).saturating_sub(&U128::ONE),
+ U128::from(4u64)
+ );
+ }
+
+ #[test]
+ fn saturating_sub_with_borrow() {
+ assert_eq!(
+ U128::from(4u64).saturating_sub(&U128::from(5u64)),
+ U128::ZERO
+ );
+ }
+
+ #[test]
+ fn wrapping_sub_no_borrow() {
+ assert_eq!(U128::ONE.wrapping_sub(&U128::ONE), U128::ZERO);
+ }
+
+ #[test]
+ fn wrapping_sub_with_borrow() {
+ assert_eq!(U128::ZERO.wrapping_sub(&U128::ONE), U128::MAX);
+ }
+
+ #[test]
+ fn checked_sub_ok() {
+ let result = U128::ONE.checked_sub(&U128::ONE);
+ assert_eq!(result.unwrap(), U128::ZERO);
+ }
+
+ #[test]
+ fn checked_sub_overflow() {
+ let result = U128::ZERO.checked_sub(&U128::ONE);
+ assert!(!bool::from(result.is_some()));
+ }
+}
diff --git a/vendor/crypto-bigint/src/uint/sub_mod.rs b/vendor/crypto-bigint/src/uint/sub_mod.rs
new file mode 100644
index 0000000..b32babb
--- /dev/null
+++ b/vendor/crypto-bigint/src/uint/sub_mod.rs
@@ -0,0 +1,191 @@
+//! [`Uint`] subtraction modulus operations.
+
+use crate::{Limb, SubMod, Uint};
+
+impl<const LIMBS: usize> Uint<LIMBS> {
+ /// Computes `self - rhs mod p` in constant time.
+ ///
+ /// Assumes `self - rhs` as unbounded signed integer is in `[-p, p)`.
+ pub const fn sub_mod(&self, rhs: &Uint<LIMBS>, p: &Uint<LIMBS>) -> Uint<LIMBS> {
+ let (out, borrow) = self.sbb(rhs, Limb::ZERO);
+
+ // If underflow occurred on the final limb, borrow = 0xfff...fff, otherwise
+ // borrow = 0x000...000. Thus, we use it as a mask to conditionally add the modulus.
+ let mask = Uint::from_words([borrow.0; LIMBS]);
+
+ out.wrapping_add(&p.bitand(&mask))
+ }
+
+ /// Returns `(self..., carry) - (rhs...) mod (p...)`, where `carry <= 1`.
+ /// Assumes `-(p...) <= (self..., carry) - (rhs...) < (p...)`.
+ #[inline(always)]
+ pub(crate) const fn sub_mod_with_carry(&self, carry: Limb, rhs: &Self, p: &Self) -> Self {
+ debug_assert!(carry.0 <= 1);
+
+ let (out, borrow) = self.sbb(rhs, Limb::ZERO);
+
+ // The new `borrow = Word::MAX` iff `carry == 0` and `borrow == Word::MAX`.
+ let borrow = (!carry.0.wrapping_neg()) & borrow.0;
+
+ // If underflow occurred on the final limb, borrow = 0xfff...fff, otherwise
+ // borrow = 0x000...000. Thus, we use it as a mask to conditionally add the modulus.
+ let mask = Uint::from_words([borrow; LIMBS]);
+
+ out.wrapping_add(&p.bitand(&mask))
+ }
+
+ /// Computes `self - rhs mod p` in constant time for the special modulus
+ /// `p = MAX+1-c` where `c` is small enough to fit in a single [`Limb`].
+ ///
+ /// Assumes `self - rhs` as unbounded signed integer is in `[-p, p)`.
+ pub const fn sub_mod_special(&self, rhs: &Self, c: Limb) -> Self {
+ let (out, borrow) = self.sbb(rhs, Limb::ZERO);
+
+ // If underflow occurred, then we need to subtract `c` to account for
+ // the underflow. This cannot underflow due to the assumption
+ // `self - rhs >= -p`.
+ let l = borrow.0 & c.0;
+ out.wrapping_sub(&Uint::from_word(l))
+ }
+}
+
+impl<const LIMBS: usize> SubMod for Uint<LIMBS> {
+ type Output = Self;
+
+ fn sub_mod(&self, rhs: &Self, p: &Self) -> Self {
+ debug_assert!(self < p);
+ debug_assert!(rhs < p);
+ self.sub_mod(rhs, p)
+ }
+}
+
+#[cfg(all(test, feature = "rand"))]
+mod tests {
+ use crate::{Limb, NonZero, Random, RandomMod, Uint};
+ use rand_core::SeedableRng;
+
+ macro_rules! test_sub_mod {
+ ($size:expr, $test_name:ident) => {
+ #[test]
+ fn $test_name() {
+ let mut rng = rand_chacha::ChaCha8Rng::seed_from_u64(1);
+ let moduli = [
+ NonZero::<Uint<$size>>::random(&mut rng),
+ NonZero::<Uint<$size>>::random(&mut rng),
+ ];
+
+ for p in &moduli {
+ let base_cases = [
+ (1u64, 0u64, 1u64.into()),
+ (0, 1, p.wrapping_sub(&1u64.into())),
+ (0, 0, 0u64.into()),
+ ];
+ for (a, b, c) in &base_cases {
+ let a: Uint<$size> = (*a).into();
+ let b: Uint<$size> = (*b).into();
+
+ let x = a.sub_mod(&b, p);
+ assert_eq!(*c, x, "{} - {} mod {} = {} != {}", a, b, p, x, c);
+ }
+
+ if $size > 1 {
+ for _i in 0..100 {
+ let a: Uint<$size> = Limb::random(&mut rng).into();
+ let b: Uint<$size> = Limb::random(&mut rng).into();
+ let (a, b) = if a < b { (b, a) } else { (a, b) };
+
+ let c = a.sub_mod(&b, p);
+ assert!(c < **p, "not reduced");
+ assert_eq!(c, a.wrapping_sub(&b), "result incorrect");
+ }
+ }
+
+ for _i in 0..100 {
+ let a = Uint::<$size>::random_mod(&mut rng, p);
+ let b = Uint::<$size>::random_mod(&mut rng, p);
+
+ let c = a.sub_mod(&b, p);
+ assert!(c < **p, "not reduced: {} >= {} ", c, p);
+
+ let x = a.wrapping_sub(&b);
+ if a >= b && x < **p {
+ assert_eq!(c, x, "incorrect result");
+ }
+ }
+ }
+ }
+ };
+ }
+
+ macro_rules! test_sub_mod_special {
+ ($size:expr, $test_name:ident) => {
+ #[test]
+ fn $test_name() {
+ let mut rng = rand_chacha::ChaCha8Rng::seed_from_u64(1);
+ let moduli = [
+ NonZero::<Limb>::random(&mut rng),
+ NonZero::<Limb>::random(&mut rng),
+ ];
+
+ for special in &moduli {
+ let p = &NonZero::new(Uint::ZERO.wrapping_sub(&Uint::from_word(special.0)))
+ .unwrap();
+
+ let minus_one = p.wrapping_sub(&Uint::ONE);
+
+ let base_cases = [
+ (Uint::ZERO, Uint::ZERO, Uint::ZERO),
+ (Uint::ONE, Uint::ZERO, Uint::ONE),
+ (Uint::ZERO, Uint::ONE, minus_one),
+ (minus_one, minus_one, Uint::ZERO),
+ (Uint::ZERO, minus_one, Uint::ONE),
+ ];
+ for (a, b, c) in &base_cases {
+ let x = a.sub_mod_special(&b, *special.as_ref());
+ assert_eq!(*c, x, "{} - {} mod {} = {} != {}", a, b, p, x, c);
+ }
+
+ for _i in 0..100 {
+ let a = Uint::<$size>::random_mod(&mut rng, p);
+ let b = Uint::<$size>::random_mod(&mut rng, p);
+
+ let c = a.sub_mod_special(&b, *special.as_ref());
+ assert!(c < **p, "not reduced: {} >= {} ", c, p);
+
+ let expected = a.sub_mod(&b, p);
+ assert_eq!(c, expected, "incorrect result");
+ }
+ }
+ }
+ };
+ }
+
+ // Test requires 1-limb is capable of representing a 64-bit integer
+ #[cfg(target_pointer_width = "64")]
+ test_sub_mod!(1, sub1);
+
+ test_sub_mod!(2, sub2);
+ test_sub_mod!(3, sub3);
+ test_sub_mod!(4, sub4);
+ test_sub_mod!(5, sub5);
+ test_sub_mod!(6, sub6);
+ test_sub_mod!(7, sub7);
+ test_sub_mod!(8, sub8);
+ test_sub_mod!(9, sub9);
+ test_sub_mod!(10, sub10);
+ test_sub_mod!(11, sub11);
+ test_sub_mod!(12, sub12);
+
+ test_sub_mod_special!(1, sub_mod_special_1);
+ test_sub_mod_special!(2, sub_mod_special_2);
+ test_sub_mod_special!(3, sub_mod_special_3);
+ test_sub_mod_special!(4, sub_mod_special_4);
+ test_sub_mod_special!(5, sub_mod_special_5);
+ test_sub_mod_special!(6, sub_mod_special_6);
+ test_sub_mod_special!(7, sub_mod_special_7);
+ test_sub_mod_special!(8, sub_mod_special_8);
+ test_sub_mod_special!(9, sub_mod_special_9);
+ test_sub_mod_special!(10, sub_mod_special_10);
+ test_sub_mod_special!(11, sub_mod_special_11);
+ test_sub_mod_special!(12, sub_mod_special_12);
+}
diff --git a/vendor/crypto-bigint/src/wrapping.rs b/vendor/crypto-bigint/src/wrapping.rs
new file mode 100644
index 0000000..7ee6016
--- /dev/null
+++ b/vendor/crypto-bigint/src/wrapping.rs
@@ -0,0 +1,117 @@
+//! Wrapping arithmetic.
+
+use crate::Zero;
+use core::fmt;
+use subtle::{Choice, ConditionallySelectable, ConstantTimeEq};
+
+#[cfg(feature = "rand_core")]
+use {crate::Random, rand_core::CryptoRngCore};
+
+#[cfg(feature = "serde")]
+use serdect::serde::{Deserialize, Deserializer, Serialize, Serializer};
+
+/// Provides intentionally-wrapped arithmetic on `T`.
+///
+/// This is analogous to [`core::num::Wrapping`] but allows this crate to
+/// define trait impls for this type.
+#[derive(Copy, Clone, Debug, Default, Eq, PartialEq, PartialOrd, Ord)]
+pub struct Wrapping<T>(pub T);
+
+impl<T: Zero> Zero for Wrapping<T> {
+ const ZERO: Self = Self(T::ZERO);
+}
+
+impl<T: fmt::Display> fmt::Display for Wrapping<T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ self.0.fmt(f)
+ }
+}
+
+impl<T: fmt::Binary> fmt::Binary for Wrapping<T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ self.0.fmt(f)
+ }
+}
+
+impl<T: fmt::Octal> fmt::Octal for Wrapping<T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ self.0.fmt(f)
+ }
+}
+
+impl<T: fmt::LowerHex> fmt::LowerHex for Wrapping<T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ self.0.fmt(f)
+ }
+}
+
+impl<T: fmt::UpperHex> fmt::UpperHex for Wrapping<T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ self.0.fmt(f)
+ }
+}
+
+impl<T: ConditionallySelectable> ConditionallySelectable for Wrapping<T> {
+ fn conditional_select(a: &Self, b: &Self, choice: Choice) -> Self {
+ Wrapping(T::conditional_select(&a.0, &b.0, choice))
+ }
+}
+
+impl<T: ConstantTimeEq> ConstantTimeEq for Wrapping<T> {
+ fn ct_eq(&self, other: &Self) -> Choice {
+ self.0.ct_eq(&other.0)
+ }
+}
+
+#[cfg(feature = "rand_core")]
+impl<T: Random> Random for Wrapping<T> {
+ fn random(rng: &mut impl CryptoRngCore) -> Self {
+ Wrapping(Random::random(rng))
+ }
+}
+
+#[cfg(feature = "serde")]
+impl<'de, T: Deserialize<'de>> Deserialize<'de> for Wrapping<T> {
+ fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
+ where
+ D: Deserializer<'de>,
+ {
+ Ok(Self(T::deserialize(deserializer)?))
+ }
+}
+
+#[cfg(feature = "serde")]
+impl<T: Serialize> Serialize for Wrapping<T> {
+ fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
+ where
+ S: Serializer,
+ {
+ self.0.serialize(serializer)
+ }
+}
+
+#[cfg(all(test, feature = "serde"))]
+#[allow(clippy::unwrap_used)]
+mod tests {
+ use crate::{Wrapping, U64};
+
+ #[test]
+ fn serde() {
+ const TEST: Wrapping<U64> = Wrapping(U64::from_u64(0x0011223344556677));
+
+ let serialized = bincode::serialize(&TEST).unwrap();
+ let deserialized: Wrapping<U64> = bincode::deserialize(&serialized).unwrap();
+
+ assert_eq!(TEST, deserialized);
+ }
+
+ #[test]
+ fn serde_owned() {
+ const TEST: Wrapping<U64> = Wrapping(U64::from_u64(0x0011223344556677));
+
+ let serialized = bincode::serialize(&TEST).unwrap();
+ let deserialized: Wrapping<U64> = bincode::deserialize_from(serialized.as_slice()).unwrap();
+
+ assert_eq!(TEST, deserialized);
+ }
+}