summaryrefslogtreecommitdiffstats
path: root/vendor/crypto-bigint/src/uint
diff options
context:
space:
mode:
Diffstat (limited to 'vendor/crypto-bigint/src/uint')
-rw-r--r--vendor/crypto-bigint/src/uint/add.rs17
-rw-r--r--vendor/crypto-bigint/src/uint/add_mod.rs4
-rw-r--r--vendor/crypto-bigint/src/uint/array.rs3
-rw-r--r--vendor/crypto-bigint/src/uint/bits.rs242
-rw-r--r--vendor/crypto-bigint/src/uint/cmp.rs95
-rw-r--r--vendor/crypto-bigint/src/uint/concat.rs83
-rw-r--r--vendor/crypto-bigint/src/uint/div.rs1
-rw-r--r--vendor/crypto-bigint/src/uint/div_limb.rs2
-rw-r--r--vendor/crypto-bigint/src/uint/encoding.rs66
-rw-r--r--vendor/crypto-bigint/src/uint/encoding/rlp.rs1
-rw-r--r--vendor/crypto-bigint/src/uint/extra_sizes.rs160
-rw-r--r--vendor/crypto-bigint/src/uint/from.rs41
-rw-r--r--vendor/crypto-bigint/src/uint/inv_mod.rs143
-rw-r--r--vendor/crypto-bigint/src/uint/macros.rs115
-rw-r--r--vendor/crypto-bigint/src/uint/modular/constant_mod.rs53
-rw-r--r--vendor/crypto-bigint/src/uint/modular/constant_mod/const_inv.rs2
-rw-r--r--vendor/crypto-bigint/src/uint/modular/constant_mod/const_pow.rs151
-rw-r--r--vendor/crypto-bigint/src/uint/modular/constant_mod/macros.rs39
-rw-r--r--vendor/crypto-bigint/src/uint/modular/pow.rs135
-rw-r--r--vendor/crypto-bigint/src/uint/modular/runtime_mod.rs178
-rw-r--r--vendor/crypto-bigint/src/uint/modular/runtime_mod/runtime_pow.rs92
-rw-r--r--vendor/crypto-bigint/src/uint/mul.rs186
-rw-r--r--vendor/crypto-bigint/src/uint/mul_mod.rs2
-rw-r--r--vendor/crypto-bigint/src/uint/neg.rs41
-rw-r--r--vendor/crypto-bigint/src/uint/neg_mod.rs4
-rw-r--r--vendor/crypto-bigint/src/uint/rand.rs33
-rw-r--r--vendor/crypto-bigint/src/uint/shl.rs52
-rw-r--r--vendor/crypto-bigint/src/uint/shr.rs24
-rw-r--r--vendor/crypto-bigint/src/uint/split.rs67
-rw-r--r--vendor/crypto-bigint/src/uint/sqrt.rs77
-rw-r--r--vendor/crypto-bigint/src/uint/sub.rs23
-rw-r--r--vendor/crypto-bigint/src/uint/sub_mod.rs4
32 files changed, 1768 insertions, 368 deletions
diff --git a/vendor/crypto-bigint/src/uint/add.rs b/vendor/crypto-bigint/src/uint/add.rs
index 21aa5d578..e4f7bfa42 100644
--- a/vendor/crypto-bigint/src/uint/add.rs
+++ b/vendor/crypto-bigint/src/uint/add.rs
@@ -24,12 +24,7 @@ impl<const LIMBS: usize> Uint<LIMBS> {
/// Perform saturating addition, returning `MAX` on overflow.
pub const fn saturating_add(&self, rhs: &Self) -> Self {
let (res, overflow) = self.adc(rhs, Limb::ZERO);
-
- if overflow.0 == 0 {
- res
- } else {
- Self::MAX
- }
+ Self::ct_select(&res, &Self::MAX, CtChoice::from_lsb(overflow.0))
}
/// Perform wrapping addition, discarding overflow.
@@ -178,6 +173,16 @@ mod tests {
}
#[test]
+ fn saturating_add_no_carry() {
+ assert_eq!(U128::ZERO.saturating_add(&U128::ONE), U128::ONE);
+ }
+
+ #[test]
+ fn saturating_add_with_carry() {
+ assert_eq!(U128::MAX.saturating_add(&U128::ONE), U128::MAX);
+ }
+
+ #[test]
fn wrapping_add_no_carry() {
assert_eq!(U128::ZERO.wrapping_add(&U128::ONE), U128::ONE);
}
diff --git a/vendor/crypto-bigint/src/uint/add_mod.rs b/vendor/crypto-bigint/src/uint/add_mod.rs
index 70674f5e8..091ba4634 100644
--- a/vendor/crypto-bigint/src/uint/add_mod.rs
+++ b/vendor/crypto-bigint/src/uint/add_mod.rs
@@ -3,7 +3,7 @@
use crate::{AddMod, Limb, Uint};
impl<const LIMBS: usize> Uint<LIMBS> {
- /// Computes `self + rhs mod p` in constant time.
+ /// Computes `self + rhs mod p`.
///
/// Assumes `self + rhs` as unbounded integer is `< 2p`.
pub const fn add_mod(&self, rhs: &Uint<LIMBS>, p: &Uint<LIMBS>) -> Uint<LIMBS> {
@@ -21,7 +21,7 @@ impl<const LIMBS: usize> Uint<LIMBS> {
w.wrapping_add(&p.bitand(&mask))
}
- /// Computes `self + rhs mod p` in constant time for the special modulus
+ /// Computes `self + rhs mod p` for the special modulus
/// `p = MAX+1-c` where `c` is small enough to fit in a single [`Limb`].
///
/// Assumes `self + rhs` as unbounded integer is `< 2p`.
diff --git a/vendor/crypto-bigint/src/uint/array.rs b/vendor/crypto-bigint/src/uint/array.rs
index 36f165e23..1f83dfc19 100644
--- a/vendor/crypto-bigint/src/uint/array.rs
+++ b/vendor/crypto-bigint/src/uint/array.rs
@@ -50,7 +50,7 @@ macro_rules! impl_uint_array_encoding {
};
}
-// TODO(tarcieri): use `const_evaluatable_checked` when stable to make generic around bits.
+// TODO(tarcieri): use `generic_const_exprs` when stable to make generic around bits.
impl_uint_array_encoding! {
(U64, typenum::U8),
(U128, typenum::U16),
@@ -61,6 +61,7 @@ impl_uint_array_encoding! {
(U512, typenum::U64),
(U576, typenum::U72),
(U768, typenum::U96),
+ (U832, typenum::U104),
(U896, typenum::U112),
(U1024, typenum::U128),
(U1536, typenum::U192),
diff --git a/vendor/crypto-bigint/src/uint/bits.rs b/vendor/crypto-bigint/src/uint/bits.rs
index 834014341..da514058f 100644
--- a/vendor/crypto-bigint/src/uint/bits.rs
+++ b/vendor/crypto-bigint/src/uint/bits.rs
@@ -2,8 +2,11 @@ use crate::{CtChoice, Limb, Uint, Word};
impl<const LIMBS: usize> Uint<LIMBS> {
/// Returns `true` if the bit at position `index` is set, `false` otherwise.
+ ///
+ /// # Remarks
+ /// This operation is variable time with respect to `index` only.
#[inline(always)]
- pub const fn bit_vartime(self, index: usize) -> bool {
+ pub const fn bit_vartime(&self, index: usize) -> bool {
if index >= Self::BITS {
false
} else {
@@ -12,19 +15,18 @@ impl<const LIMBS: usize> Uint<LIMBS> {
}
/// Calculate the number of bits needed to represent this number.
- #[allow(trivial_numeric_casts)]
- pub const fn bits_vartime(self) -> usize {
+ pub const fn bits_vartime(&self) -> usize {
let mut i = LIMBS - 1;
while i > 0 && self.limbs[i].0 == 0 {
i -= 1;
}
- let limb = self.limbs[i].0;
- Limb::BITS * (i + 1) - limb.leading_zeros() as usize
+ let limb = self.limbs[i];
+ Limb::BITS * (i + 1) - limb.leading_zeros()
}
/// Calculate the number of leading zeros in the binary representation of this number.
- pub const fn leading_zeros(self) -> usize {
+ pub const fn leading_zeros(&self) -> usize {
let limbs = self.as_limbs();
let mut count: Word = 0;
@@ -42,8 +44,28 @@ impl<const LIMBS: usize> Uint<LIMBS> {
count as usize
}
+ /// Calculate the number of leading zeros in the binary representation of this number,
+ /// variable time in `self`.
+ pub const fn leading_zeros_vartime(&self) -> usize {
+ let limbs = self.as_limbs();
+
+ let mut count = 0;
+ let mut i = LIMBS;
+ while i > 0 {
+ i -= 1;
+ let l = limbs[i];
+ let z = l.leading_zeros();
+ count += z;
+ if z != Limb::BITS {
+ break;
+ }
+ }
+
+ count
+ }
+
/// Calculate the number of trailing zeros in the binary representation of this number.
- pub const fn trailing_zeros(self) -> usize {
+ pub const fn trailing_zeros(&self) -> usize {
let limbs = self.as_limbs();
let mut count: Word = 0;
@@ -61,15 +83,74 @@ impl<const LIMBS: usize> Uint<LIMBS> {
count as usize
}
+ /// Calculate the number of trailing zeros in the binary representation of this number,
+ /// variable time in `self`.
+ pub const fn trailing_zeros_vartime(&self) -> usize {
+ let limbs = self.as_limbs();
+
+ let mut count = 0;
+ let mut i = 0;
+ while i < LIMBS {
+ let l = limbs[i];
+ let z = l.trailing_zeros();
+ count += z;
+ if z != Limb::BITS {
+ break;
+ }
+ i += 1;
+ }
+
+ count
+ }
+
+ /// Calculate the number of trailing ones in the binary representation of this number.
+ pub const fn trailing_ones(&self) -> usize {
+ let limbs = self.as_limbs();
+
+ let mut count: Word = 0;
+ let mut i = 0;
+ let mut nonmax_limb_not_encountered = CtChoice::TRUE;
+ while i < LIMBS {
+ let l = limbs[i];
+ let z = l.trailing_ones() as Word;
+ count += nonmax_limb_not_encountered.if_true(z);
+ nonmax_limb_not_encountered =
+ nonmax_limb_not_encountered.and(Limb::ct_eq(l, Limb::MAX));
+ i += 1;
+ }
+
+ count as usize
+ }
+
+ /// Calculate the number of trailing ones in the binary representation of this number,
+ /// variable time in `self`.
+ pub const fn trailing_ones_vartime(&self) -> usize {
+ let limbs = self.as_limbs();
+
+ let mut count = 0;
+ let mut i = 0;
+ while i < LIMBS {
+ let l = limbs[i];
+ let z = l.trailing_ones();
+ count += z;
+ if z != Limb::BITS {
+ break;
+ }
+ i += 1;
+ }
+
+ count
+ }
+
/// Calculate the number of bits needed to represent this number.
- pub const fn bits(self) -> usize {
+ pub const fn bits(&self) -> usize {
Self::BITS - self.leading_zeros()
}
/// Get the value of the bit at position `index`, as a truthy or falsy `CtChoice`.
/// Returns the falsy value for indices out of range.
- pub const fn bit(self, index: usize) -> CtChoice {
- let limb_num = Limb((index / Limb::BITS) as Word);
+ pub const fn bit(&self, index: usize) -> CtChoice {
+ let limb_num = index / Limb::BITS;
let index_in_limb = index % Limb::BITS;
let index_mask = 1 << index_in_limb;
@@ -79,18 +160,36 @@ impl<const LIMBS: usize> Uint<LIMBS> {
let mut i = 0;
while i < LIMBS {
let bit = limbs[i] & index_mask;
- let is_right_limb = Limb::ct_eq(limb_num, Limb(i as Word));
+ let is_right_limb = CtChoice::from_usize_equality(i, limb_num);
result |= is_right_limb.if_true(bit);
i += 1;
}
CtChoice::from_lsb(result >> index_in_limb)
}
+
+ /// Sets the bit at `index` to 0 or 1 depending on the value of `bit_value`.
+ pub(crate) const fn set_bit(self, index: usize, bit_value: CtChoice) -> Self {
+ let mut result = self;
+ let limb_num = index / Limb::BITS;
+ let index_in_limb = index % Limb::BITS;
+ let index_mask = 1 << index_in_limb;
+
+ let mut i = 0;
+ while i < LIMBS {
+ let is_right_limb = CtChoice::from_usize_equality(i, limb_num);
+ let old_limb = result.limbs[i].0;
+ let new_limb = bit_value.select(old_limb & !index_mask, old_limb | index_mask);
+ result.limbs[i] = Limb(is_right_limb.select(old_limb, new_limb));
+ i += 1;
+ }
+ result
+ }
}
#[cfg(test)]
mod tests {
- use crate::U256;
+ use crate::{CtChoice, U256};
fn uint_with_bits_at(positions: &[usize]) -> U256 {
let mut result = U256::ZERO;
@@ -127,36 +226,135 @@ mod tests {
#[test]
fn leading_zeros() {
let u = uint_with_bits_at(&[256 - 16, 256 - 79, 256 - 207]);
- assert_eq!(u.leading_zeros() as u32, 15);
+ assert_eq!(u.leading_zeros(), 15);
let u = uint_with_bits_at(&[256 - 79, 256 - 207]);
- assert_eq!(u.leading_zeros() as u32, 78);
+ assert_eq!(u.leading_zeros(), 78);
let u = uint_with_bits_at(&[256 - 207]);
- assert_eq!(u.leading_zeros() as u32, 206);
+ assert_eq!(u.leading_zeros(), 206);
let u = uint_with_bits_at(&[256 - 1, 256 - 75, 256 - 150]);
- assert_eq!(u.leading_zeros() as u32, 0);
+ assert_eq!(u.leading_zeros(), 0);
let u = U256::ZERO;
- assert_eq!(u.leading_zeros() as u32, 256);
+ assert_eq!(u.leading_zeros(), 256);
+ }
+
+ #[test]
+ fn leading_zeros_vartime() {
+ let u = uint_with_bits_at(&[256 - 16, 256 - 79, 256 - 207]);
+ assert_eq!(u.leading_zeros_vartime(), 15);
+
+ let u = uint_with_bits_at(&[256 - 79, 256 - 207]);
+ assert_eq!(u.leading_zeros_vartime(), 78);
+
+ let u = uint_with_bits_at(&[256 - 207]);
+ assert_eq!(u.leading_zeros_vartime(), 206);
+
+ let u = uint_with_bits_at(&[256 - 1, 256 - 75, 256 - 150]);
+ assert_eq!(u.leading_zeros_vartime(), 0);
+
+ let u = U256::ZERO;
+ assert_eq!(u.leading_zeros_vartime(), 256);
}
#[test]
fn trailing_zeros() {
let u = uint_with_bits_at(&[16, 79, 150]);
- assert_eq!(u.trailing_zeros() as u32, 16);
+ assert_eq!(u.trailing_zeros(), 16);
+
+ let u = uint_with_bits_at(&[79, 150]);
+ assert_eq!(u.trailing_zeros(), 79);
+
+ let u = uint_with_bits_at(&[150, 207]);
+ assert_eq!(u.trailing_zeros(), 150);
+
+ let u = uint_with_bits_at(&[0, 150, 207]);
+ assert_eq!(u.trailing_zeros(), 0);
+
+ let u = U256::ZERO;
+ assert_eq!(u.trailing_zeros(), 256);
+ }
+
+ #[test]
+ fn trailing_zeros_vartime() {
+ let u = uint_with_bits_at(&[16, 79, 150]);
+ assert_eq!(u.trailing_zeros_vartime(), 16);
let u = uint_with_bits_at(&[79, 150]);
- assert_eq!(u.trailing_zeros() as u32, 79);
+ assert_eq!(u.trailing_zeros_vartime(), 79);
let u = uint_with_bits_at(&[150, 207]);
- assert_eq!(u.trailing_zeros() as u32, 150);
+ assert_eq!(u.trailing_zeros_vartime(), 150);
let u = uint_with_bits_at(&[0, 150, 207]);
- assert_eq!(u.trailing_zeros() as u32, 0);
+ assert_eq!(u.trailing_zeros_vartime(), 0);
let u = U256::ZERO;
- assert_eq!(u.trailing_zeros() as u32, 256);
+ assert_eq!(u.trailing_zeros_vartime(), 256);
+ }
+
+ #[test]
+ fn trailing_ones() {
+ let u = !uint_with_bits_at(&[16, 79, 150]);
+ assert_eq!(u.trailing_ones(), 16);
+
+ let u = !uint_with_bits_at(&[79, 150]);
+ assert_eq!(u.trailing_ones(), 79);
+
+ let u = !uint_with_bits_at(&[150, 207]);
+ assert_eq!(u.trailing_ones(), 150);
+
+ let u = !uint_with_bits_at(&[0, 150, 207]);
+ assert_eq!(u.trailing_ones(), 0);
+
+ let u = U256::MAX;
+ assert_eq!(u.trailing_ones(), 256);
+ }
+
+ #[test]
+ fn trailing_ones_vartime() {
+ let u = !uint_with_bits_at(&[16, 79, 150]);
+ assert_eq!(u.trailing_ones_vartime(), 16);
+
+ let u = !uint_with_bits_at(&[79, 150]);
+ assert_eq!(u.trailing_ones_vartime(), 79);
+
+ let u = !uint_with_bits_at(&[150, 207]);
+ assert_eq!(u.trailing_ones_vartime(), 150);
+
+ let u = !uint_with_bits_at(&[0, 150, 207]);
+ assert_eq!(u.trailing_ones_vartime(), 0);
+
+ let u = U256::MAX;
+ assert_eq!(u.trailing_ones_vartime(), 256);
+ }
+
+ #[test]
+ fn set_bit() {
+ let u = uint_with_bits_at(&[16, 79, 150]);
+ assert_eq!(
+ u.set_bit(127, CtChoice::TRUE),
+ uint_with_bits_at(&[16, 79, 127, 150])
+ );
+
+ let u = uint_with_bits_at(&[16, 79, 150]);
+ assert_eq!(
+ u.set_bit(150, CtChoice::TRUE),
+ uint_with_bits_at(&[16, 79, 150])
+ );
+
+ let u = uint_with_bits_at(&[16, 79, 150]);
+ assert_eq!(
+ u.set_bit(127, CtChoice::FALSE),
+ uint_with_bits_at(&[16, 79, 150])
+ );
+
+ let u = uint_with_bits_at(&[16, 79, 150]);
+ assert_eq!(
+ u.set_bit(150, CtChoice::FALSE),
+ uint_with_bits_at(&[16, 79])
+ );
}
}
diff --git a/vendor/crypto-bigint/src/uint/cmp.rs b/vendor/crypto-bigint/src/uint/cmp.rs
index 8815369e3..b513242e4 100644
--- a/vendor/crypto-bigint/src/uint/cmp.rs
+++ b/vendor/crypto-bigint/src/uint/cmp.rs
@@ -72,12 +72,52 @@ impl<const LIMBS: usize> Uint<LIMBS> {
CtChoice::from_mask(borrow.0)
}
- /// Returns the truthy value if `self <= rhs` and the falsy value otherwise.
+ /// Returns the truthy value if `self >= rhs` and the falsy value otherwise.
#[inline]
pub(crate) const fn ct_gt(lhs: &Self, rhs: &Self) -> CtChoice {
let (_res, borrow) = rhs.sbb(lhs, Limb::ZERO);
CtChoice::from_mask(borrow.0)
}
+
+ /// Returns the ordering between `self` and `rhs` as an i8.
+ /// Values correspond to the Ordering enum:
+ /// -1 is Less
+ /// 0 is Equal
+ /// 1 is Greater
+ #[inline]
+ pub(crate) const fn ct_cmp(lhs: &Self, rhs: &Self) -> i8 {
+ let mut i = 0;
+ let mut borrow = Limb::ZERO;
+ let mut diff = Limb::ZERO;
+
+ while i < LIMBS {
+ let (w, b) = rhs.limbs[i].sbb(lhs.limbs[i], borrow);
+ diff = diff.bitor(w);
+ borrow = b;
+ i += 1;
+ }
+ let sgn = ((borrow.0 & 2) as i8) - 1;
+ (diff.ct_is_nonzero().to_u8() as i8) * sgn
+ }
+
+ /// Returns the Ordering between `self` and `rhs` in variable time.
+ pub const fn cmp_vartime(&self, rhs: &Self) -> Ordering {
+ let mut i = LIMBS - 1;
+ loop {
+ let (val, borrow) = self.limbs[i].sbb(rhs.limbs[i], Limb::ZERO);
+ if val.0 != 0 {
+ return if borrow.0 != 0 {
+ Ordering::Less
+ } else {
+ Ordering::Greater
+ };
+ }
+ if i == 0 {
+ return Ordering::Equal;
+ }
+ i -= 1;
+ }
+ }
}
impl<const LIMBS: usize> ConstantTimeEq for Uint<LIMBS> {
@@ -105,15 +145,11 @@ impl<const LIMBS: usize> Eq for Uint<LIMBS> {}
impl<const LIMBS: usize> Ord for Uint<LIMBS> {
fn cmp(&self, other: &Self) -> Ordering {
- let is_lt = self.ct_lt(other);
- let is_eq = self.ct_eq(other);
-
- if is_lt.into() {
- Ordering::Less
- } else if is_eq.into() {
- Ordering::Equal
- } else {
- Ordering::Greater
+ let c = Self::ct_cmp(self, other);
+ match c {
+ -1 => Ordering::Less,
+ 0 => Ordering::Equal,
+ _ => Ordering::Greater,
}
}
}
@@ -133,6 +169,7 @@ impl<const LIMBS: usize> PartialEq for Uint<LIMBS> {
#[cfg(test)]
mod tests {
use crate::{Integer, Zero, U128};
+ use core::cmp::Ordering;
use subtle::{ConstantTimeEq, ConstantTimeGreater, ConstantTimeLess};
#[test]
@@ -197,4 +234,42 @@ mod tests {
assert!(!bool::from(c.ct_lt(&a)));
assert!(!bool::from(c.ct_lt(&b)));
}
+
+ #[test]
+ fn cmp() {
+ let a = U128::ZERO;
+ let b = U128::ONE;
+ let c = U128::MAX;
+
+ assert_eq!(a.cmp(&b), Ordering::Less);
+ assert_eq!(a.cmp(&c), Ordering::Less);
+ assert_eq!(b.cmp(&c), Ordering::Less);
+
+ assert_eq!(a.cmp(&a), Ordering::Equal);
+ assert_eq!(b.cmp(&b), Ordering::Equal);
+ assert_eq!(c.cmp(&c), Ordering::Equal);
+
+ assert_eq!(b.cmp(&a), Ordering::Greater);
+ assert_eq!(c.cmp(&a), Ordering::Greater);
+ assert_eq!(c.cmp(&b), Ordering::Greater);
+ }
+
+ #[test]
+ fn cmp_vartime() {
+ let a = U128::ZERO;
+ let b = U128::ONE;
+ let c = U128::MAX;
+
+ assert_eq!(a.cmp_vartime(&b), Ordering::Less);
+ assert_eq!(a.cmp_vartime(&c), Ordering::Less);
+ assert_eq!(b.cmp_vartime(&c), Ordering::Less);
+
+ assert_eq!(a.cmp_vartime(&a), Ordering::Equal);
+ assert_eq!(b.cmp_vartime(&b), Ordering::Equal);
+ assert_eq!(c.cmp_vartime(&c), Ordering::Equal);
+
+ assert_eq!(b.cmp_vartime(&a), Ordering::Greater);
+ assert_eq!(c.cmp_vartime(&a), Ordering::Greater);
+ assert_eq!(c.cmp_vartime(&b), Ordering::Greater);
+ }
}
diff --git a/vendor/crypto-bigint/src/uint/concat.rs b/vendor/crypto-bigint/src/uint/concat.rs
index 8b53401d3..dde5242a0 100644
--- a/vendor/crypto-bigint/src/uint/concat.rs
+++ b/vendor/crypto-bigint/src/uint/concat.rs
@@ -1,52 +1,39 @@
-// TODO(tarcieri): use `const_evaluatable_checked` when stable to make generic around bits.
-macro_rules! impl_concat {
- ($(($name:ident, $bits:expr)),+) => {
- $(
- impl $name {
- /// Concatenate the two values, with `self` as most significant and `rhs`
- /// as the least significant.
- pub const fn concat(&self, rhs: &Self) -> Uint<{nlimbs!($bits) * 2}> {
- let mut limbs = [Limb::ZERO; nlimbs!($bits) * 2];
- let mut i = 0;
- let mut j = 0;
+use crate::{Concat, ConcatMixed, Limb, Uint};
- while j < nlimbs!($bits) {
- limbs[i] = rhs.limbs[j];
- i += 1;
- j += 1;
- }
-
- j = 0;
- while j < nlimbs!($bits) {
- limbs[i] = self.limbs[j];
- i += 1;
- j += 1;
- }
-
- Uint { limbs }
- }
- }
+impl<T> Concat for T
+where
+ T: ConcatMixed<T>,
+{
+ type Output = Self::MixedOutput;
+}
- impl Concat for $name {
- type Output = Uint<{nlimbs!($bits) * 2}>;
+/// Concatenate the two values, with `lo` as least significant and `hi`
+/// as the most significant.
+#[inline]
+pub(crate) const fn concat_mixed<const L: usize, const H: usize, const O: usize>(
+ lo: &Uint<L>,
+ hi: &Uint<H>,
+) -> Uint<O> {
+ let top = L + H;
+ let top = if top < O { top } else { O };
+ let mut limbs = [Limb::ZERO; O];
+ let mut i = 0;
- fn concat(&self, rhs: &Self) -> Self::Output {
- self.concat(rhs)
- }
- }
+ while i < top {
+ if i < L {
+ limbs[i] = lo.limbs[i];
+ } else {
+ limbs[i] = hi.limbs[i - L];
+ }
+ i += 1;
+ }
- impl From<($name, $name)> for Uint<{nlimbs!($bits) * 2}> {
- fn from(nums: ($name, $name)) -> Uint<{nlimbs!($bits) * 2}> {
- nums.1.concat(&nums.0)
- }
- }
- )+
- };
+ Uint { limbs }
}
#[cfg(test)]
mod tests {
- use crate::{U128, U64};
+ use crate::{ConcatMixed, U128, U192, U64};
#[test]
fn concat() {
@@ -59,6 +46,20 @@ mod tests {
}
#[test]
+ fn concat_mixed() {
+ let a = U64::from_u64(0x0011223344556677);
+ let b = U128::from_u128(0x8899aabbccddeeff_8899aabbccddeeff);
+ assert_eq!(
+ a.concat_mixed(&b),
+ U192::from_be_hex("00112233445566778899aabbccddeeff8899aabbccddeeff")
+ );
+ assert_eq!(
+ b.concat_mixed(&a),
+ U192::from_be_hex("8899aabbccddeeff8899aabbccddeeff0011223344556677")
+ );
+ }
+
+ #[test]
fn convert() {
let res: U128 = U64::ONE.mul_wide(&U64::ONE).into();
assert_eq!(res, U128::ONE);
diff --git a/vendor/crypto-bigint/src/uint/div.rs b/vendor/crypto-bigint/src/uint/div.rs
index 90741c472..7f5cda732 100644
--- a/vendor/crypto-bigint/src/uint/div.rs
+++ b/vendor/crypto-bigint/src/uint/div.rs
@@ -730,6 +730,7 @@ mod tests {
}
}
+ #[allow(clippy::op_ref)]
#[test]
fn rem_trait() {
let a = U256::from(10u64);
diff --git a/vendor/crypto-bigint/src/uint/div_limb.rs b/vendor/crypto-bigint/src/uint/div_limb.rs
index c00bc77c9..3edf80af8 100644
--- a/vendor/crypto-bigint/src/uint/div_limb.rs
+++ b/vendor/crypto-bigint/src/uint/div_limb.rs
@@ -81,7 +81,7 @@ const fn ct_select(a: u32, b: u32, c: u32) -> u32 {
a ^ (c & (a ^ b))
}
-/// Calculates `dividend / divisor` in constant time, given `dividend` and `divisor`
+/// Calculates `dividend / divisor`, given `dividend` and `divisor`
/// along with their maximum bitsizes.
#[inline(always)]
const fn short_div(dividend: u32, dividend_bits: u32, divisor: u32, divisor_bits: u32) -> u32 {
diff --git a/vendor/crypto-bigint/src/uint/encoding.rs b/vendor/crypto-bigint/src/uint/encoding.rs
index 5431b8166..42f9de183 100644
--- a/vendor/crypto-bigint/src/uint/encoding.rs
+++ b/vendor/crypto-bigint/src/uint/encoding.rs
@@ -46,18 +46,23 @@ impl<const LIMBS: usize> Uint<LIMBS> {
let mut res = [Limb::ZERO; LIMBS];
let mut buf = [0u8; Limb::BYTES];
let mut i = 0;
+ let mut err = 0;
while i < LIMBS {
let mut j = 0;
while j < Limb::BYTES {
let offset = (i * Limb::BYTES + j) * 2;
- buf[j] = decode_hex_byte([bytes[offset], bytes[offset + 1]]);
+ let (result, byte_err) = decode_hex_byte([bytes[offset], bytes[offset + 1]]);
+ err |= byte_err;
+ buf[j] = result;
j += 1;
}
res[LIMBS - i - 1] = Limb(Word::from_be_bytes(buf));
i += 1;
}
+ assert!(err == 0, "invalid hex byte");
+
Uint::new(res)
}
@@ -97,18 +102,23 @@ impl<const LIMBS: usize> Uint<LIMBS> {
let mut res = [Limb::ZERO; LIMBS];
let mut buf = [0u8; Limb::BYTES];
let mut i = 0;
+ let mut err = 0;
while i < LIMBS {
let mut j = 0;
while j < Limb::BYTES {
let offset = (i * Limb::BYTES + j) * 2;
- buf[j] = decode_hex_byte([bytes[offset], bytes[offset + 1]]);
+ let (result, byte_err) = decode_hex_byte([bytes[offset], bytes[offset + 1]]);
+ err |= byte_err;
+ buf[j] = result;
j += 1;
}
res[i] = Limb(Word::from_le_bytes(buf));
i += 1;
}
+ assert!(err == 0, "invalid hex byte");
+
Uint::new(res)
}
@@ -146,30 +156,36 @@ impl<const LIMBS: usize> Uint<LIMBS> {
}
}
-/// Decode a single byte encoded as two hexadecimal characters.
-const fn decode_hex_byte(bytes: [u8; 2]) -> u8 {
- let mut i = 0;
- let mut result = 0u8;
-
- while i < 2 {
- result <<= 4;
- result |= match bytes[i] {
- b @ b'0'..=b'9' => b - b'0',
- b @ b'a'..=b'f' => 10 + b - b'a',
- b @ b'A'..=b'F' => 10 + b - b'A',
- b => {
- assert!(
- matches!(b, b'0'..=b'9' | b'a' ..= b'f' | b'A'..=b'F'),
- "invalid hex byte"
- );
- 0
- }
- };
-
- i += 1;
- }
+/// Decode a single nibble of upper or lower hex
+#[inline(always)]
+const fn decode_nibble(src: u8) -> u16 {
+ let byte = src as i16;
+ let mut ret: i16 = -1;
+
+ // 0-9 0x30-0x39
+ // if (byte > 0x2f && byte < 0x3a) ret += byte - 0x30 + 1; // -47
+ ret += (((0x2fi16 - byte) & (byte - 0x3a)) >> 8) & (byte - 47);
+ // A-F 0x41-0x46
+ // if (byte > 0x40 && byte < 0x47) ret += byte - 0x41 + 10 + 1; // -54
+ ret += (((0x40i16 - byte) & (byte - 0x47)) >> 8) & (byte - 54);
+ // a-f 0x61-0x66
+ // if (byte > 0x60 && byte < 0x67) ret += byte - 0x61 + 10 + 1; // -86
+ ret += (((0x60i16 - byte) & (byte - 0x67)) >> 8) & (byte - 86);
+
+ ret as u16
+}
- result
+/// Decode a single byte encoded as two hexadecimal characters.
+/// Second element of the tuple is non-zero if the `bytes` values are not in the valid range
+/// (0-9, a-z, A-Z).
+#[inline(always)]
+const fn decode_hex_byte(bytes: [u8; 2]) -> (u8, u16) {
+ let hi = decode_nibble(bytes[0]);
+ let lo = decode_nibble(bytes[1]);
+ let byte = (hi << 4) | lo;
+ let err = byte >> 8;
+ let result = byte as u8;
+ (result, err)
}
#[cfg(test)]
diff --git a/vendor/crypto-bigint/src/uint/encoding/rlp.rs b/vendor/crypto-bigint/src/uint/encoding/rlp.rs
index 0f85bc350..112efb1eb 100644
--- a/vendor/crypto-bigint/src/uint/encoding/rlp.rs
+++ b/vendor/crypto-bigint/src/uint/encoding/rlp.rs
@@ -44,6 +44,7 @@ where
}
#[cfg(test)]
+#[allow(clippy::unwrap_used)]
mod tests {
use crate::U256;
use hex_literal::hex;
diff --git a/vendor/crypto-bigint/src/uint/extra_sizes.rs b/vendor/crypto-bigint/src/uint/extra_sizes.rs
new file mode 100644
index 000000000..fb639c713
--- /dev/null
+++ b/vendor/crypto-bigint/src/uint/extra_sizes.rs
@@ -0,0 +1,160 @@
+//! Support for additional integer sizes beyond the core set which is defined
+//! in the toplevel module.
+//!
+//! These are feature-gated to keep compile times down for applications which
+//! do not need them.
+// TODO(tarcieri): switch to a fully const generic implementation using `generic_const_exprs`
+
+use super::*;
+
+impl_uint_aliases! {
+ (U1088, 1088, "1088-bit"),
+ (U1152, 1152, "1152-bit"),
+ (U1216, 1216, "1216-bit"),
+ (U1344, 1344, "1344-bit"),
+ (U1408, 1408, "1408-bit"),
+ (U1472, 1472, "1472-bit"),
+ (U1600, 1600, "1600-bit"),
+ (U1664, 1664, "1664-bit"),
+ (U1728, 1728, "1728-bit"),
+ (U1856, 1856, "1856-bit"),
+ (U1920, 1920, "1920-bit"),
+ (U1984, 1984, "1984-bit"),
+ (U2112, 2112, "2112-bit"),
+ (U2176, 2176, "2176-bit"),
+ (U2240, 2240, "2240-bit"),
+ (U2304, 2304, "2304-bit"),
+ (U2368, 2368, "2368-bit"),
+ (U2432, 2432, "2432-bit"),
+ (U2496, 2496, "2496-bit"),
+ (U2560, 2560, "2560-bit"),
+ (U2624, 2624, "2624-bit"),
+ (U2688, 2688, "2688-bit"),
+ (U2752, 2752, "2752-bit"),
+ (U2816, 2816, "2816-bit"),
+ (U2880, 2880, "2880-bit"),
+ (U2944, 2944, "2944-bit"),
+ (U3008, 3008, "3008-bit"),
+ (U3136, 3136, "3136-bit"),
+ (U3200, 3200, "3200-bit"),
+ (U3264, 3264, "3264-bit"),
+ (U3328, 3328, "3328-bit"),
+ (U3392, 3392, "3392-bit"),
+ (U3456, 3456, "3456-bit"),
+ (U3520, 3520, "3520-bit"),
+ (U3648, 3648, "3648-bit"),
+ (U3712, 3712, "3712-bit"),
+ (U3776, 3776, "3776-bit"),
+ (U3840, 3840, "3840-bit"),
+ (U3904, 3904, "3904-bit"),
+ (U3968, 3968, "3968-bit"),
+ (U4032, 4032, "4032-bit"),
+ (U4160, 4160, "4160-bit"),
+ (U4288, 4288, "4288-bit"),
+ (U4416, 4416, "4416-bit"),
+ (U4480, 4480, "4480-bit"),
+ (U4544, 4544, "4544-bit"),
+ (U4608, 4608, "4608-bit"),
+ (U4672, 4672, "4672-bit"),
+ (U4736, 4736, "4736-bit"),
+ (U4800, 4800, "4800-bit"),
+ (U4864, 4864, "4864-bit"),
+ (U4928, 4928, "4928-bit"),
+ (U4992, 4992, "4992-bit"),
+ (U5056, 5056, "5056-bit"),
+ (U5120, 5120, "5120-bit"),
+ (U5184, 5184, "5184-bit"),
+ (U5248, 5248, "5248-bit"),
+ (U5312, 5312, "5312-bit"),
+ (U5376, 5376, "5376-bit"),
+ (U5440, 5440, "5440-bit"),
+ (U5504, 5504, "5504-bit"),
+ (U5568, 5568, "5568-bit"),
+ (U5632, 5632, "5632-bit"),
+ (U5696, 5696, "5696-bit"),
+ (U5760, 5760, "5760-bit"),
+ (U5824, 5824, "5824-bit"),
+ (U5888, 5888, "5888-bit"),
+ (U5952, 5952, "5952-bit"),
+ (U6016, 6016, "6016-bit"),
+ (U6080, 6080, "6080-bit"),
+ (U6208, 6208, "6208-bit"),
+ (U6272, 6272, "6272-bit"),
+ (U6336, 6336, "6336-bit"),
+ (U6400, 6400, "6400-bit"),
+ (U6464, 6464, "6464-bit"),
+ (U6528, 6528, "6528-bit"),
+ (U6592, 6592, "6592-bit"),
+ (U6656, 6656, "6656-bit"),
+ (U6720, 6720, "6720-bit"),
+ (U6784, 6784, "6784-bit"),
+ (U6848, 6848, "6848-bit"),
+ (U6912, 6912, "6912-bit"),
+ (U6976, 6976, "6976-bit"),
+ (U7040, 7040, "7040-bit"),
+ (U7104, 7104, "7104-bit"),
+ (U7168, 7168, "7168-bit"),
+ (U7232, 7232, "7232-bit"),
+ (U7296, 7296, "7296-bit"),
+ (U7360, 7360, "7360-bit"),
+ (U7424, 7424, "7424-bit"),
+ (U7488, 7488, "7488-bit"),
+ (U7552, 7552, "7552-bit"),
+ (U7616, 7616, "7616-bit"),
+ (U7680, 7680, "7680-bit"),
+ (U7744, 7744, "7744-bit"),
+ (U7808, 7808, "7808-bit"),
+ (U7872, 7872, "7872-bit"),
+ (U7936, 7936, "7936-bit"),
+ (U8000, 8000, "8000-bit"),
+ (U8064, 8064, "8064-bit"),
+ (U8128, 8128, "8128-bit")
+}
+
+impl_uint_concat_split_even! {
+ U1152,
+ U1408,
+ U1664,
+ U1920,
+ U2176,
+ U2304,
+ U2432,
+ U2560,
+ U2688,
+ U2816,
+ U2944,
+ U3200,
+ U3328,
+ U3456,
+ U3712,
+ U3840,
+ U3968,
+ U4480,
+ U4608,
+ U4736,
+ U4864,
+ U4992,
+ U5120,
+ U5248,
+ U5376,
+ U5504,
+ U5632,
+ U5760,
+ U5888,
+ U6016,
+ U6272,
+ U6400,
+ U6528,
+ U6656,
+ U6784,
+ U6912,
+ U7040,
+ U7168,
+ U7296,
+ U7424,
+ U7552,
+ U7680,
+ U7808,
+ U7936,
+ U8064,
+}
diff --git a/vendor/crypto-bigint/src/uint/from.rs b/vendor/crypto-bigint/src/uint/from.rs
index ac02f2ef2..0f1bfbca7 100644
--- a/vendor/crypto-bigint/src/uint/from.rs
+++ b/vendor/crypto-bigint/src/uint/from.rs
@@ -1,6 +1,6 @@
//! `From`-like conversions for [`Uint`].
-use crate::{Limb, Uint, WideWord, Word, U128, U64};
+use crate::{ConcatMixed, Limb, Uint, WideWord, Word, U128, U64};
impl<const LIMBS: usize> Uint<LIMBS> {
/// Create a [`Uint`] from a `u8` (const-friendly)
@@ -156,8 +156,13 @@ impl From<U64> for u64 {
impl From<U128> for u128 {
fn from(n: U128) -> u128 {
- let (hi, lo) = n.split();
- (u64::from(hi) as u128) << 64 | (u64::from(lo) as u128)
+ let mut i = U128::LIMBS - 1;
+ let mut res = n.limbs[i].0 as u128;
+ while i > 0 {
+ i -= 1;
+ res = (res << Limb::BITS) | (n.limbs[i].0 as u128);
+ }
+ res
}
}
@@ -191,6 +196,36 @@ impl<const LIMBS: usize> From<Limb> for Uint<LIMBS> {
}
}
+impl<const L: usize, const H: usize, const LIMBS: usize> From<(Uint<L>, Uint<H>)> for Uint<LIMBS>
+where
+ Uint<H>: ConcatMixed<Uint<L>, MixedOutput = Uint<LIMBS>>,
+{
+ fn from(nums: (Uint<L>, Uint<H>)) -> Uint<LIMBS> {
+ nums.1.concat_mixed(&nums.0)
+ }
+}
+
+impl<const L: usize, const H: usize, const LIMBS: usize> From<&(Uint<L>, Uint<H>)> for Uint<LIMBS>
+where
+ Uint<H>: ConcatMixed<Uint<L>, MixedOutput = Uint<LIMBS>>,
+{
+ fn from(nums: &(Uint<L>, Uint<H>)) -> Uint<LIMBS> {
+ nums.1.concat_mixed(&nums.0)
+ }
+}
+
+impl<const L: usize, const H: usize, const LIMBS: usize> From<Uint<LIMBS>> for (Uint<L>, Uint<H>) {
+ fn from(num: Uint<LIMBS>) -> (Uint<L>, Uint<H>) {
+ crate::uint::split::split_mixed(&num)
+ }
+}
+
+impl<const LIMBS: usize, const LIMBS2: usize> From<&Uint<LIMBS>> for Uint<LIMBS2> {
+ fn from(num: &Uint<LIMBS>) -> Uint<LIMBS2> {
+ num.resize()
+ }
+}
+
#[cfg(test)]
mod tests {
use crate::{Limb, Word, U128};
diff --git a/vendor/crypto-bigint/src/uint/inv_mod.rs b/vendor/crypto-bigint/src/uint/inv_mod.rs
index ef3c161f5..e41dc66b5 100644
--- a/vendor/crypto-bigint/src/uint/inv_mod.rs
+++ b/vendor/crypto-bigint/src/uint/inv_mod.rs
@@ -1,28 +1,68 @@
use super::Uint;
-use crate::{CtChoice, Limb};
+use crate::CtChoice;
impl<const LIMBS: usize> Uint<LIMBS> {
- /// Computes 1/`self` mod 2^k as specified in Algorithm 4 from
- /// A Secure Algorithm for Inversion Modulo 2k by
- /// Sadiel de la Fe and Carles Ferrer. See
- /// <https://www.mdpi.com/2410-387X/2/3/23>.
+ /// Computes 1/`self` mod `2^k`.
+ /// This method is constant-time w.r.t. `self` but not `k`.
///
/// Conditions: `self` < 2^k and `self` must be odd
- pub const fn inv_mod2k(&self, k: usize) -> Self {
- let mut x = Self::ZERO;
- let mut b = Self::ONE;
+ pub const fn inv_mod2k_vartime(&self, k: usize) -> Self {
+ // Using the Algorithm 3 from "A Secure Algorithm for Inversion Modulo 2k"
+ // by Sadiel de la Fe and Carles Ferrer.
+ // See <https://www.mdpi.com/2410-387X/2/3/23>.
+
+ // Note that we are not using Alrgorithm 4, since we have a different approach
+ // of enforcing constant-timeness w.r.t. `self`.
+
+ let mut x = Self::ZERO; // keeps `x` during iterations
+ let mut b = Self::ONE; // keeps `b_i` during iterations
let mut i = 0;
while i < k {
- let mut x_i = Self::ZERO;
- let j = b.limbs[0].0 & 1;
- x_i.limbs[0] = Limb(j);
- x = x.bitor(&x_i.shl_vartime(i));
+ // X_i = b_i mod 2
+ let x_i = b.limbs[0].0 & 1;
+ let x_i_choice = CtChoice::from_lsb(x_i);
+ // b_{i+1} = (b_i - a * X_i) / 2
+ b = Self::ct_select(&b, &b.wrapping_sub(self), x_i_choice).shr_vartime(1);
+ // Store the X_i bit in the result (x = x | (1 << X_i))
+ x = x.bitor(&Uint::from_word(x_i).shl_vartime(i));
+
+ i += 1;
+ }
+
+ x
+ }
+
+ /// Computes 1/`self` mod `2^k`.
+ ///
+ /// Conditions: `self` < 2^k and `self` must be odd
+ pub const fn inv_mod2k(&self, k: usize) -> Self {
+ // This is the same algorithm as in `inv_mod2k_vartime()`,
+ // but made constant-time w.r.t `k` as well.
+
+ let mut x = Self::ZERO; // keeps `x` during iterations
+ let mut b = Self::ONE; // keeps `b_i` during iterations
+ let mut i = 0;
+
+ while i < Self::BITS {
+ // Only iterations for i = 0..k need to change `x`,
+ // the rest are dummy ones performed for the sake of constant-timeness.
+ let within_range = CtChoice::from_usize_lt(i, k);
+
+ // X_i = b_i mod 2
+ let x_i = b.limbs[0].0 & 1;
+ let x_i_choice = CtChoice::from_lsb(x_i);
+ // b_{i+1} = (b_i - a * X_i) / 2
+ b = Self::ct_select(&b, &b.wrapping_sub(self), x_i_choice).shr_vartime(1);
+
+ // Store the X_i bit in the result (x = x | (1 << X_i))
+ // Don't change the result in dummy iterations.
+ let x_i_choice = x_i_choice.and(within_range);
+ x = x.set_bit(i, x_i_choice);
- let t = b.wrapping_sub(self);
- b = Self::ct_select(&b, &t, CtChoice::from_lsb(j)).shr_vartime(1);
i += 1;
}
+
x
}
@@ -97,10 +137,45 @@ impl<const LIMBS: usize> Uint<LIMBS> {
}
/// Computes the multiplicative inverse of `self` mod `modulus`, where `modulus` is odd.
- /// Returns `(inverse, Word::MAX)` if an inverse exists, otherwise `(undefined, Word::ZERO)`.
+ /// Returns `(inverse, CtChoice::TRUE)` if an inverse exists,
+ /// otherwise `(undefined, CtChoice::FALSE)`.
pub const fn inv_odd_mod(&self, modulus: &Self) -> (Self, CtChoice) {
self.inv_odd_mod_bounded(modulus, Uint::<LIMBS>::BITS, Uint::<LIMBS>::BITS)
}
+
+ /// Computes the multiplicative inverse of `self` mod `modulus`.
+ /// Returns `(inverse, CtChoice::TRUE)` if an inverse exists,
+ /// otherwise `(undefined, CtChoice::FALSE)`.
+ pub const fn inv_mod(&self, modulus: &Self) -> (Self, CtChoice) {
+ // Decompose `modulus = s * 2^k` where `s` is odd
+ let k = modulus.trailing_zeros();
+ let s = modulus.shr(k);
+
+ // Decompose `self` into RNS with moduli `2^k` and `s` and calculate the inverses.
+ // Using the fact that `(z^{-1} mod (m1 * m2)) mod m1 == z^{-1} mod m1`
+ let (a, a_is_some) = self.inv_odd_mod(&s);
+ let b = self.inv_mod2k(k);
+ // inverse modulo 2^k exists either if `k` is 0 or if `self` is odd.
+ let b_is_some = CtChoice::from_usize_being_nonzero(k)
+ .not()
+ .or(self.ct_is_odd());
+
+ // Restore from RNS:
+ // self^{-1} = a mod s = b mod 2^k
+ // => self^{-1} = a + s * ((b - a) * s^(-1) mod 2^k)
+ // (essentially one step of the Garner's algorithm for recovery from RNS).
+
+ let m_odd_inv = s.inv_mod2k(k); // `s` is odd, so this always exists
+
+ // This part is mod 2^k
+ let mask = Uint::ONE.shl(k).wrapping_sub(&Uint::ONE);
+ let t = (b.wrapping_sub(&a).wrapping_mul(&m_odd_inv)).bitand(&mask);
+
+ // Will not overflow since `a <= s - 1`, `t <= 2^k - 1`,
+ // so `a + s * t <= s * 2^k - 1 == modulus - 1`.
+ let result = a.wrapping_add(&s.wrapping_mul(&t));
+ (result, a_is_some.and(b_is_some))
+ }
}
#[cfg(test)]
@@ -125,7 +200,7 @@ mod tests {
}
#[test]
- fn test_invert() {
+ fn test_invert_odd() {
let a = U1024::from_be_hex(concat![
"000225E99153B467A5B451979A3F451DAEF3BF8D6C6521D2FA24BBB17F29544E",
"347A412B065B75A351EA9719E2430D2477B11CC9CF9C1AD6EDEE26CB15F463F8",
@@ -138,15 +213,45 @@ mod tests {
"D198D3155E5799DC4EA76652D64983A7E130B5EACEBAC768D28D589C36EC749C",
"558D0B64E37CD0775C0D0104AE7D98BA23C815185DD43CD8B16292FD94156767"
]);
-
- let (res, is_some) = a.inv_odd_mod(&m);
-
let expected = U1024::from_be_hex(concat![
"B03623284B0EBABCABD5C5881893320281460C0A8E7BF4BFDCFFCBCCBF436A55",
"D364235C8171E46C7D21AAD0680676E57274A8FDA6D12768EF961CACDD2DAE57",
"88D93DA5EB8EDC391EE3726CDCF4613C539F7D23E8702200CB31B5ED5B06E5CA",
"3E520968399B4017BF98A864FABA2B647EFC4998B56774D4F2CB026BC024A336"
]);
+
+ let (res, is_some) = a.inv_odd_mod(&m);
+ assert!(is_some.is_true_vartime());
+ assert_eq!(res, expected);
+
+ // Even though it is less efficient, it still works
+ let (res, is_some) = a.inv_mod(&m);
+ assert!(is_some.is_true_vartime());
+ assert_eq!(res, expected);
+ }
+
+ #[test]
+ fn test_invert_even() {
+ let a = U1024::from_be_hex(concat![
+ "000225E99153B467A5B451979A3F451DAEF3BF8D6C6521D2FA24BBB17F29544E",
+ "347A412B065B75A351EA9719E2430D2477B11CC9CF9C1AD6EDEE26CB15F463F8",
+ "BCC72EF87EA30288E95A48AA792226CEC959DCB0672D8F9D80A54CBBEA85CAD8",
+ "382EC224DEB2F5784E62D0CC2F81C2E6AD14EBABE646D6764B30C32B87688985"
+ ]);
+ let m = U1024::from_be_hex(concat![
+ "D509E7854ABDC81921F669F1DC6F61359523F3949803E58ED4EA8BC16483DC6F",
+ "37BFE27A9AC9EEA2969B357ABC5C0EE214BE16A7D4C58FC620D5B5A20AFF001A",
+ "D198D3155E5799DC4EA76652D64983A7E130B5EACEBAC768D28D589C36EC749C",
+ "558D0B64E37CD0775C0D0104AE7D98BA23C815185DD43CD8B16292FD94156000"
+ ]);
+ let expected = U1024::from_be_hex(concat![
+ "1EBF391306817E1BC610E213F4453AD70911CCBD59A901B2A468A4FC1D64F357",
+ "DBFC6381EC5635CAA664DF280028AF4651482C77A143DF38D6BFD4D64B6C0225",
+ "FC0E199B15A64966FB26D88A86AD144271F6BDCD3D63193AB2B3CC53B99F21A3",
+ "5B9BFAE5D43C6BC6E7A9856C71C7318C76530E9E5AE35882D5ABB02F1696874D",
+ ]);
+
+ let (res, is_some) = a.inv_mod(&m);
assert!(is_some.is_true_vartime());
assert_eq!(res, expected);
}
diff --git a/vendor/crypto-bigint/src/uint/macros.rs b/vendor/crypto-bigint/src/uint/macros.rs
new file mode 100644
index 000000000..47d32e79d
--- /dev/null
+++ b/vendor/crypto-bigint/src/uint/macros.rs
@@ -0,0 +1,115 @@
+// TODO(tarcieri): use `generic_const_exprs` when stable to make generic around bits.
+macro_rules! impl_uint_aliases {
+ ($(($name:ident, $bits:expr, $doc:expr)),+) => {
+ $(
+ #[doc = $doc]
+ #[doc="unsigned big integer."]
+ pub type $name = Uint<{nlimbs!($bits)}>;
+
+ impl Encoding for $name {
+
+ type Repr = [u8; $bits / 8];
+
+ #[inline]
+ fn from_be_bytes(bytes: Self::Repr) -> Self {
+ Self::from_be_slice(&bytes)
+ }
+
+ #[inline]
+ fn from_le_bytes(bytes: Self::Repr) -> Self {
+ Self::from_le_slice(&bytes)
+ }
+
+ #[inline]
+ fn to_be_bytes(&self) -> Self::Repr {
+ let mut result = [0u8; $bits / 8];
+ self.write_be_bytes(&mut result);
+ result
+ }
+
+ #[inline]
+ fn to_le_bytes(&self) -> Self::Repr {
+ let mut result = [0u8; $bits / 8];
+ self.write_le_bytes(&mut result);
+ result
+ }
+ }
+ )+
+ };
+}
+
+macro_rules! impl_uint_concat_split_mixed {
+ ($name:ident, $size:literal) => {
+ impl $crate::traits::ConcatMixed<Uint<{ U64::LIMBS * $size }>> for Uint<{ <$name>::LIMBS - U64::LIMBS * $size }>
+ {
+ type MixedOutput = $name;
+
+ fn concat_mixed(&self, lo: &Uint<{ U64::LIMBS * $size }>) -> Self::MixedOutput {
+ $crate::uint::concat::concat_mixed(lo, self)
+ }
+ }
+
+ impl $crate::traits::SplitMixed<Uint<{ U64::LIMBS * $size }>, Uint<{ <$name>::LIMBS - U64::LIMBS * $size }>> for $name
+ {
+ fn split_mixed(&self) -> (Uint<{ U64::LIMBS * $size }>, Uint<{ <$name>::LIMBS - U64::LIMBS * $size }>) {
+ $crate::uint::split::split_mixed(self)
+ }
+ }
+ };
+ ($name:ident, [ $($size:literal),+ ]) => {
+ $(
+ impl_uint_concat_split_mixed!($name, $size);
+ )+
+ };
+ ($( ($name:ident, $sizes:tt), )+) => {
+ $(
+ impl_uint_concat_split_mixed!($name, $sizes);
+ )+
+ };
+}
+
+macro_rules! impl_uint_concat_split_even {
+ ($name:ident) => {
+ impl $crate::traits::ConcatMixed<Uint<{ <$name>::LIMBS / 2 }>> for Uint<{ <$name>::LIMBS / 2 }>
+ {
+ type MixedOutput = $name;
+
+ fn concat_mixed(&self, lo: &Uint<{ <$name>::LIMBS / 2 }>) -> Self::MixedOutput {
+ $crate::uint::concat::concat_mixed(lo, self)
+ }
+ }
+
+ impl Uint<{ <$name>::LIMBS / 2 }> {
+ /// Concatenate the two values, with `self` as most significant and `rhs`
+ /// as the least significant.
+ pub const fn concat(&self, lo: &Uint<{ <$name>::LIMBS / 2 }>) -> $name {
+ $crate::uint::concat::concat_mixed(lo, self)
+ }
+ }
+
+ impl $crate::traits::SplitMixed<Uint<{ <$name>::LIMBS / 2 }>, Uint<{ <$name>::LIMBS / 2 }>> for $name
+ {
+ fn split_mixed(&self) -> (Uint<{ <$name>::LIMBS / 2 }>, Uint<{ <$name>::LIMBS / 2 }>) {
+ $crate::uint::split::split_mixed(self)
+ }
+ }
+
+ impl $crate::traits::Split for $name
+ {
+ type Output = Uint<{ <$name>::LIMBS / 2 }>;
+ }
+
+ impl $name {
+ /// Split this number in half, returning its high and low components
+ /// respectively.
+ pub const fn split(&self) -> (Uint<{ <$name>::LIMBS / 2 }>, Uint<{ <$name>::LIMBS / 2 }>) {
+ $crate::uint::split::split_mixed(self)
+ }
+ }
+ };
+ ($($name:ident,)+) => {
+ $(
+ impl_uint_concat_split_even!($name);
+ )+
+ }
+}
diff --git a/vendor/crypto-bigint/src/uint/modular/constant_mod.rs b/vendor/crypto-bigint/src/uint/modular/constant_mod.rs
index 3e9b522ef..bb4995501 100644
--- a/vendor/crypto-bigint/src/uint/modular/constant_mod.rs
+++ b/vendor/crypto-bigint/src/uint/modular/constant_mod.rs
@@ -1,6 +1,6 @@
use core::{fmt::Debug, marker::PhantomData};
-use subtle::{Choice, ConditionallySelectable, ConstantTimeEq};
+use subtle::{Choice, ConditionallySelectable, ConstantTimeEq, CtOption};
use crate::{Limb, Uint, Zero};
@@ -59,6 +59,7 @@ pub trait ResidueParams<const LIMBS: usize>:
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
/// A residue mod `MOD`, represented using `LIMBS` limbs. The modulus of this residue is constant, so it cannot be set at runtime.
+/// Internally, the value is stored in Montgomery form (multiplied by MOD::R) until it is retrieved.
pub struct Residue<MOD, const LIMBS: usize>
where
MOD: ResidueParams<LIMBS>,
@@ -86,8 +87,8 @@ impl<MOD: ResidueParams<LIMBS>, const LIMBS: usize> Residue<MOD, LIMBS> {
phantom: PhantomData,
};
- /// Instantiates a new `Residue` that represents this `integer` mod `MOD`.
- pub const fn new(integer: &Uint<LIMBS>) -> Self {
+ // Internal helper function to generate a residue; this lets us wrap the constructors more cleanly
+ const fn generate_residue(integer: &Uint<LIMBS>) -> Self {
let product = integer.mul_wide(&MOD::R2);
let montgomery_form =
montgomery_reduction::<LIMBS>(&product, &MOD::MODULUS, MOD::MOD_NEG_INV);
@@ -98,6 +99,29 @@ impl<MOD: ResidueParams<LIMBS>, const LIMBS: usize> Residue<MOD, LIMBS> {
}
}
+ /// Instantiates a new `Residue` that represents this `integer` mod `MOD`.
+ /// If the modulus represented by `MOD` is not odd, this function will panic; use [`new_checked`][`Residue::new_checked`] if you want to be able to detect an invalid modulus.
+ pub const fn new(integer: &Uint<LIMBS>) -> Self {
+ // A valid modulus must be odd
+ if MOD::MODULUS.ct_is_odd().to_u8() == 0 {
+ panic!("modulus must be odd");
+ }
+
+ Self::generate_residue(integer)
+ }
+
+ /// Instantiates a new `Residue` that represents this `integer` mod `MOD` if the modulus is odd.
+ /// Returns a `CtOption` that is `None` if the provided modulus is not odd; this is a safer version of [`new`][`Residue::new`], which can panic.
+ // TODO: remove this method when we can use `generic_const_exprs.` to ensure the modulus is
+ // always valid.
+ pub fn new_checked(integer: &Uint<LIMBS>) -> CtOption<Self> {
+ // A valid modulus must be odd.
+ CtOption::new(
+ Self::generate_residue(integer),
+ MOD::MODULUS.ct_is_odd().into(),
+ )
+ }
+
/// Retrieves the integer currently encoded in this `Residue`, guaranteed to be reduced.
pub const fn retrieve(&self) -> Uint<LIMBS> {
montgomery_reduction::<LIMBS>(
@@ -107,6 +131,29 @@ impl<MOD: ResidueParams<LIMBS>, const LIMBS: usize> Residue<MOD, LIMBS> {
)
}
+ /// Access the `Residue` value in Montgomery form.
+ pub const fn as_montgomery(&self) -> &Uint<LIMBS> {
+ &self.montgomery_form
+ }
+
+ /// Mutably access the `Residue` value in Montgomery form.
+ pub fn as_montgomery_mut(&mut self) -> &mut Uint<LIMBS> {
+ &mut self.montgomery_form
+ }
+
+ /// Create a `Residue` from a value in Montgomery form.
+ pub const fn from_montgomery(integer: Uint<LIMBS>) -> Self {
+ Self {
+ montgomery_form: integer,
+ phantom: PhantomData,
+ }
+ }
+
+ /// Extract the value from the `Residue` in Montgomery form.
+ pub const fn to_montgomery(&self) -> Uint<LIMBS> {
+ self.montgomery_form
+ }
+
/// Performs the modular division by 2, that is for given `x` returns `y`
/// such that `y * 2 = x mod p`. This means:
/// - if `x` is even, returns `x / 2`,
diff --git a/vendor/crypto-bigint/src/uint/modular/constant_mod/const_inv.rs b/vendor/crypto-bigint/src/uint/modular/constant_mod/const_inv.rs
index 5c8da7b88..28f0622e2 100644
--- a/vendor/crypto-bigint/src/uint/modular/constant_mod/const_inv.rs
+++ b/vendor/crypto-bigint/src/uint/modular/constant_mod/const_inv.rs
@@ -62,7 +62,7 @@ mod tests {
let x_mod = const_residue!(x, Modulus);
let (inv, _is_some) = x_mod.invert();
- let res = &x_mod * &inv;
+ let res = x_mod * inv;
assert_eq!(res.retrieve(), U256::ONE);
}
diff --git a/vendor/crypto-bigint/src/uint/modular/constant_mod/const_pow.rs b/vendor/crypto-bigint/src/uint/modular/constant_mod/const_pow.rs
index 60455b05b..803eac69c 100644
--- a/vendor/crypto-bigint/src/uint/modular/constant_mod/const_pow.rs
+++ b/vendor/crypto-bigint/src/uint/modular/constant_mod/const_pow.rs
@@ -1,11 +1,19 @@
-use crate::{modular::pow::pow_montgomery_form, PowBoundedExp, Uint};
+use crate::{modular::pow::pow_montgomery_form, MultiExponentiateBoundedExp, PowBoundedExp, Uint};
use super::{Residue, ResidueParams};
+use crate::modular::pow::multi_exponentiate_montgomery_form_array;
+#[cfg(feature = "alloc")]
+use crate::modular::pow::multi_exponentiate_montgomery_form_slice;
+#[cfg(feature = "alloc")]
+use alloc::vec::Vec;
impl<MOD: ResidueParams<LIMBS>, const LIMBS: usize> Residue<MOD, LIMBS> {
/// Raises to the `exponent` power.
- pub const fn pow(&self, exponent: &Uint<LIMBS>) -> Residue<MOD, LIMBS> {
- self.pow_bounded_exp(exponent, Uint::<LIMBS>::BITS)
+ pub const fn pow<const RHS_LIMBS: usize>(
+ &self,
+ exponent: &Uint<RHS_LIMBS>,
+ ) -> Residue<MOD, LIMBS> {
+ self.pow_bounded_exp(exponent, Uint::<RHS_LIMBS>::BITS)
}
/// Raises to the `exponent` power,
@@ -13,9 +21,9 @@ impl<MOD: ResidueParams<LIMBS>, const LIMBS: usize> Residue<MOD, LIMBS> {
/// to take into account for the exponent.
///
/// NOTE: `exponent_bits` may be leaked in the time pattern.
- pub const fn pow_bounded_exp(
+ pub const fn pow_bounded_exp<const RHS_LIMBS: usize>(
&self,
- exponent: &Uint<LIMBS>,
+ exponent: &Uint<RHS_LIMBS>,
exponent_bits: usize,
) -> Residue<MOD, LIMBS> {
Self {
@@ -32,16 +40,74 @@ impl<MOD: ResidueParams<LIMBS>, const LIMBS: usize> Residue<MOD, LIMBS> {
}
}
-impl<MOD: ResidueParams<LIMBS>, const LIMBS: usize> PowBoundedExp<Uint<LIMBS>>
- for Residue<MOD, LIMBS>
+impl<MOD: ResidueParams<LIMBS>, const LIMBS: usize, const RHS_LIMBS: usize>
+ PowBoundedExp<Uint<RHS_LIMBS>> for Residue<MOD, LIMBS>
{
- fn pow_bounded_exp(&self, exponent: &Uint<LIMBS>, exponent_bits: usize) -> Self {
+ fn pow_bounded_exp(&self, exponent: &Uint<RHS_LIMBS>, exponent_bits: usize) -> Self {
self.pow_bounded_exp(exponent, exponent_bits)
}
}
+impl<const N: usize, MOD: ResidueParams<LIMBS>, const LIMBS: usize, const RHS_LIMBS: usize>
+ MultiExponentiateBoundedExp<Uint<RHS_LIMBS>, [(Self, Uint<RHS_LIMBS>); N]>
+ for Residue<MOD, LIMBS>
+{
+ fn multi_exponentiate_bounded_exp(
+ bases_and_exponents: &[(Self, Uint<RHS_LIMBS>); N],
+ exponent_bits: usize,
+ ) -> Self {
+ let mut bases_and_exponents_montgomery_form =
+ [(Uint::<LIMBS>::ZERO, Uint::<RHS_LIMBS>::ZERO); N];
+
+ let mut i = 0;
+ while i < N {
+ let (base, exponent) = bases_and_exponents[i];
+ bases_and_exponents_montgomery_form[i] = (base.montgomery_form, exponent);
+ i += 1;
+ }
+
+ Self {
+ montgomery_form: multi_exponentiate_montgomery_form_array(
+ &bases_and_exponents_montgomery_form,
+ exponent_bits,
+ &MOD::MODULUS,
+ &MOD::R,
+ MOD::MOD_NEG_INV,
+ ),
+ phantom: core::marker::PhantomData,
+ }
+ }
+}
+
+#[cfg(feature = "alloc")]
+impl<MOD: ResidueParams<LIMBS>, const LIMBS: usize, const RHS_LIMBS: usize>
+ MultiExponentiateBoundedExp<Uint<RHS_LIMBS>, [(Self, Uint<RHS_LIMBS>)]>
+ for Residue<MOD, LIMBS>
+{
+ fn multi_exponentiate_bounded_exp(
+ bases_and_exponents: &[(Self, Uint<RHS_LIMBS>)],
+ exponent_bits: usize,
+ ) -> Self {
+ let bases_and_exponents: Vec<(Uint<LIMBS>, Uint<RHS_LIMBS>)> = bases_and_exponents
+ .iter()
+ .map(|(base, exp)| (base.montgomery_form, *exp))
+ .collect();
+ Self {
+ montgomery_form: multi_exponentiate_montgomery_form_slice(
+ &bases_and_exponents,
+ exponent_bits,
+ &MOD::MODULUS,
+ &MOD::R,
+ MOD::MOD_NEG_INV,
+ ),
+ phantom: core::marker::PhantomData,
+ }
+ }
+}
+
#[cfg(test)]
mod tests {
+ use crate::traits::MultiExponentiate;
use crate::{const_residue, impl_modulus, modular::constant_mod::ResidueParams, U256};
impl_modulus!(
@@ -95,4 +161,73 @@ mod tests {
U256::from_be_hex("3681BC0FEA2E5D394EB178155A127B0FD2EF405486D354251C385BDD51B9D421");
assert_eq!(res.retrieve(), expected);
}
+
+ #[test]
+ fn test_multi_exp_array() {
+ let base = U256::from(2u8);
+ let base_mod = const_residue!(base, Modulus);
+
+ let exponent = U256::from(33u8);
+ let bases_and_exponents = [(base_mod, exponent)];
+ let res =
+ crate::modular::constant_mod::Residue::<Modulus, { U256::LIMBS }>::multi_exponentiate(
+ &bases_and_exponents,
+ );
+
+ let expected =
+ U256::from_be_hex("0000000000000000000000000000000000000000000000000000000200000000");
+
+ assert_eq!(res.retrieve(), expected);
+
+ let base2 =
+ U256::from_be_hex("3435D18AA8313EBBE4D20002922225B53F75DC4453BB3EEC0378646F79B524A4");
+ let base2_mod = const_residue!(base2, Modulus);
+
+ let exponent2 =
+ U256::from_be_hex("77117F1273373C26C700D076B3F780074D03339F56DD0EFB60E7F58441FD3685");
+
+ let expected = base_mod.pow(&exponent) * base2_mod.pow(&exponent2);
+ let bases_and_exponents = [(base_mod, exponent), (base2_mod, exponent2)];
+ let res =
+ crate::modular::constant_mod::Residue::<Modulus, { U256::LIMBS }>::multi_exponentiate(
+ &bases_and_exponents,
+ );
+
+ assert_eq!(res, expected);
+ }
+
+ #[cfg(feature = "alloc")]
+ #[test]
+ fn test_multi_exp_slice() {
+ let base = U256::from(2u8);
+ let base_mod = const_residue!(base, Modulus);
+
+ let exponent = U256::from(33u8);
+ let bases_and_exponents = vec![(base_mod, exponent)];
+ let res =
+ crate::modular::constant_mod::Residue::<Modulus, { U256::LIMBS }>::multi_exponentiate(
+ bases_and_exponents.as_slice(),
+ );
+
+ let expected =
+ U256::from_be_hex("0000000000000000000000000000000000000000000000000000000200000000");
+
+ assert_eq!(res.retrieve(), expected);
+
+ let base2 =
+ U256::from_be_hex("3435D18AA8313EBBE4D20002922225B53F75DC4453BB3EEC0378646F79B524A4");
+ let base2_mod = const_residue!(base2, Modulus);
+
+ let exponent2 =
+ U256::from_be_hex("77117F1273373C26C700D076B3F780074D03339F56DD0EFB60E7F58441FD3685");
+
+ let expected = base_mod.pow(&exponent) * base2_mod.pow(&exponent2);
+ let bases_and_exponents = vec![(base_mod, exponent), (base2_mod, exponent2)];
+ let res =
+ crate::modular::constant_mod::Residue::<Modulus, { U256::LIMBS }>::multi_exponentiate(
+ bases_and_exponents.as_slice(),
+ );
+
+ assert_eq!(res, expected);
+ }
}
diff --git a/vendor/crypto-bigint/src/uint/modular/constant_mod/macros.rs b/vendor/crypto-bigint/src/uint/modular/constant_mod/macros.rs
index 1583ca529..dfa440e02 100644
--- a/vendor/crypto-bigint/src/uint/modular/constant_mod/macros.rs
+++ b/vendor/crypto-bigint/src/uint/modular/constant_mod/macros.rs
@@ -2,40 +2,46 @@
#[macro_export]
/// Implements a modulus with the given name, type, and value, in that specific order. Please `use crypto_bigint::traits::Encoding` to make this work.
/// For example, `impl_modulus!(MyModulus, U256, "73eda753299d7d483339d80809a1d80553bda402fffe5bfeffffffff00000001");` implements a 256-bit modulus named `MyModulus`.
+/// The modulus _must_ be odd, or this will panic.
macro_rules! impl_modulus {
($name:ident, $uint_type:ty, $value:expr) => {
#[derive(Clone, Copy, Debug, Default, Eq, PartialEq)]
pub struct $name {}
impl<const DLIMBS: usize>
- $crate::modular::constant_mod::ResidueParams<{ $crate::nlimbs!(<$uint_type>::BITS) }>
- for $name
+ $crate::modular::constant_mod::ResidueParams<{ <$uint_type>::LIMBS }> for $name
where
- $crate::Uint<{ $crate::nlimbs!(<$uint_type>::BITS) }>:
- $crate::Concat<Output = $crate::Uint<DLIMBS>>,
+ $uint_type: $crate::ConcatMixed<MixedOutput = $crate::Uint<DLIMBS>>,
{
- const LIMBS: usize = { $crate::nlimbs!(<$uint_type>::BITS) };
- const MODULUS: $crate::Uint<{ $crate::nlimbs!(<$uint_type>::BITS) }> =
- <$uint_type>::from_be_hex($value);
- const R: $crate::Uint<{ $crate::nlimbs!(<$uint_type>::BITS) }> = $crate::Uint::MAX
+ const LIMBS: usize = <$uint_type>::LIMBS;
+ const MODULUS: $uint_type = {
+ let res = <$uint_type>::from_be_hex($value);
+
+ // Check that the modulus is odd
+ if res.as_limbs()[0].0 & 1 == 0 {
+ panic!("modulus must be odd");
+ }
+
+ res
+ };
+ const R: $uint_type = $crate::Uint::MAX
.const_rem(&Self::MODULUS)
.0
.wrapping_add(&$crate::Uint::ONE);
- const R2: $crate::Uint<{ $crate::nlimbs!(<$uint_type>::BITS) }> =
+ const R2: $uint_type =
$crate::Uint::const_rem_wide(Self::R.square_wide(), &Self::MODULUS).0;
const MOD_NEG_INV: $crate::Limb = $crate::Limb(
$crate::Word::MIN.wrapping_sub(
Self::MODULUS
- .inv_mod2k($crate::Word::BITS as usize)
+ .inv_mod2k_vartime($crate::Word::BITS as usize)
.as_limbs()[0]
.0,
),
);
- const R3: $crate::Uint<{ $crate::nlimbs!(<$uint_type>::BITS) }> =
- $crate::modular::montgomery_reduction(
- &Self::R2.square_wide(),
- &Self::MODULUS,
- Self::MOD_NEG_INV,
- );
+ const R3: $uint_type = $crate::modular::montgomery_reduction(
+ &Self::R2.square_wide(),
+ &Self::MODULUS,
+ Self::MOD_NEG_INV,
+ );
}
};
}
@@ -43,6 +49,7 @@ macro_rules! impl_modulus {
#[macro_export]
/// Creates a `Residue` with the given value for a specific modulus.
/// For example, `residue!(U256::from(105u64), MyModulus);` creates a `Residue` for 105 mod `MyModulus`.
+/// The modulus _must_ be odd, or this will panic.
macro_rules! const_residue {
($variable:ident, $modulus:ident) => {
$crate::modular::constant_mod::Residue::<$modulus, { $modulus::LIMBS }>::new(&$variable)
diff --git a/vendor/crypto-bigint/src/uint/modular/pow.rs b/vendor/crypto-bigint/src/uint/modular/pow.rs
index 5ab1fd63d..f09bc4c6c 100644
--- a/vendor/crypto-bigint/src/uint/modular/pow.rs
+++ b/vendor/crypto-bigint/src/uint/modular/pow.rs
@@ -2,13 +2,39 @@ use crate::{Limb, Uint, Word};
use super::mul::{mul_montgomery_form, square_montgomery_form};
+#[cfg(feature = "alloc")]
+use alloc::vec::Vec;
+
+const WINDOW: usize = 4;
+const WINDOW_MASK: Word = (1 << WINDOW) - 1;
+
/// Performs modular exponentiation using Montgomery's ladder.
/// `exponent_bits` represents the number of bits to take into account for the exponent.
///
/// NOTE: this value is leaked in the time pattern.
-pub const fn pow_montgomery_form<const LIMBS: usize>(
+pub const fn pow_montgomery_form<const LIMBS: usize, const RHS_LIMBS: usize>(
x: &Uint<LIMBS>,
- exponent: &Uint<LIMBS>,
+ exponent: &Uint<RHS_LIMBS>,
+ exponent_bits: usize,
+ modulus: &Uint<LIMBS>,
+ r: &Uint<LIMBS>,
+ mod_neg_inv: Limb,
+) -> Uint<LIMBS> {
+ multi_exponentiate_montgomery_form_array(
+ &[(*x, *exponent)],
+ exponent_bits,
+ modulus,
+ r,
+ mod_neg_inv,
+ )
+}
+
+pub const fn multi_exponentiate_montgomery_form_array<
+ const LIMBS: usize,
+ const RHS_LIMBS: usize,
+ const N: usize,
+>(
+ bases_and_exponents: &[(Uint<LIMBS>, Uint<RHS_LIMBS>); N],
exponent_bits: usize,
modulus: &Uint<LIMBS>,
r: &Uint<LIMBS>,
@@ -18,18 +44,84 @@ pub const fn pow_montgomery_form<const LIMBS: usize>(
return *r; // 1 in Montgomery form
}
- const WINDOW: usize = 4;
- const WINDOW_MASK: Word = (1 << WINDOW) - 1;
+ let mut powers_and_exponents =
+ [([Uint::<LIMBS>::ZERO; 1 << WINDOW], Uint::<RHS_LIMBS>::ZERO); N];
+
+ let mut i = 0;
+ while i < N {
+ let (base, exponent) = bases_and_exponents[i];
+ powers_and_exponents[i] = (compute_powers(&base, modulus, r, mod_neg_inv), exponent);
+ i += 1;
+ }
+ multi_exponentiate_montgomery_form_internal(
+ &powers_and_exponents,
+ exponent_bits,
+ modulus,
+ r,
+ mod_neg_inv,
+ )
+}
+
+/// Performs modular multi-exponentiation using Montgomery's ladder.
+/// `exponent_bits` represents the number of bits to take into account for the exponent.
+///
+/// See: Straus, E. G. Problems and solutions: Addition chains of vectors. American Mathematical Monthly 71 (1964), 806–808.
+///
+/// NOTE: this value is leaked in the time pattern.
+#[cfg(feature = "alloc")]
+pub fn multi_exponentiate_montgomery_form_slice<const LIMBS: usize, const RHS_LIMBS: usize>(
+ bases_and_exponents: &[(Uint<LIMBS>, Uint<RHS_LIMBS>)],
+ exponent_bits: usize,
+ modulus: &Uint<LIMBS>,
+ r: &Uint<LIMBS>,
+ mod_neg_inv: Limb,
+) -> Uint<LIMBS> {
+ if exponent_bits == 0 {
+ return *r; // 1 in Montgomery form
+ }
+
+ let powers_and_exponents: Vec<([Uint<LIMBS>; 1 << WINDOW], Uint<RHS_LIMBS>)> =
+ bases_and_exponents
+ .iter()
+ .map(|(base, exponent)| (compute_powers(base, modulus, r, mod_neg_inv), *exponent))
+ .collect();
+
+ multi_exponentiate_montgomery_form_internal(
+ powers_and_exponents.as_slice(),
+ exponent_bits,
+ modulus,
+ r,
+ mod_neg_inv,
+ )
+}
+
+const fn compute_powers<const LIMBS: usize>(
+ x: &Uint<LIMBS>,
+ modulus: &Uint<LIMBS>,
+ r: &Uint<LIMBS>,
+ mod_neg_inv: Limb,
+) -> [Uint<LIMBS>; 1 << WINDOW] {
// powers[i] contains x^i
let mut powers = [*r; 1 << WINDOW];
powers[1] = *x;
+
let mut i = 2;
while i < powers.len() {
powers[i] = mul_montgomery_form(&powers[i - 1], x, modulus, mod_neg_inv);
i += 1;
}
+ powers
+}
+
+const fn multi_exponentiate_montgomery_form_internal<const LIMBS: usize, const RHS_LIMBS: usize>(
+ powers_and_exponents: &[([Uint<LIMBS>; 1 << WINDOW], Uint<RHS_LIMBS>)],
+ exponent_bits: usize,
+ modulus: &Uint<LIMBS>,
+ r: &Uint<LIMBS>,
+ mod_neg_inv: Limb,
+) -> Uint<LIMBS> {
let starting_limb = (exponent_bits - 1) / Limb::BITS;
let starting_bit_in_limb = (exponent_bits - 1) % Limb::BITS;
let starting_window = starting_bit_in_limb / WINDOW;
@@ -40,7 +132,6 @@ pub const fn pow_montgomery_form<const LIMBS: usize>(
let mut limb_num = starting_limb + 1;
while limb_num > 0 {
limb_num -= 1;
- let w = exponent.as_limbs()[limb_num].0;
let mut window_num = if limb_num == starting_limb {
starting_window + 1
@@ -50,11 +141,7 @@ pub const fn pow_montgomery_form<const LIMBS: usize>(
while window_num > 0 {
window_num -= 1;
- let mut idx = (w >> (window_num * WINDOW)) & WINDOW_MASK;
-
- if limb_num == starting_limb && window_num == starting_window {
- idx &= starting_window_mask;
- } else {
+ if limb_num != starting_limb || window_num != starting_window {
let mut i = 0;
while i < WINDOW {
i += 1;
@@ -62,16 +149,28 @@ pub const fn pow_montgomery_form<const LIMBS: usize>(
}
}
- // Constant-time lookup in the array of powers
- let mut power = powers[0];
- let mut i = 1;
- while i < 1 << WINDOW {
- let choice = Limb::ct_eq(Limb(i as Word), Limb(idx));
- power = Uint::<LIMBS>::ct_select(&power, &powers[i], choice);
+ let mut i = 0;
+ while i < powers_and_exponents.len() {
+ let (powers, exponent) = powers_and_exponents[i];
+ let w = exponent.as_limbs()[limb_num].0;
+ let mut idx = (w >> (window_num * WINDOW)) & WINDOW_MASK;
+
+ if limb_num == starting_limb && window_num == starting_window {
+ idx &= starting_window_mask;
+ }
+
+ // Constant-time lookup in the array of powers
+ let mut power = powers[0];
+ let mut j = 1;
+ while j < 1 << WINDOW {
+ let choice = Limb::ct_eq(Limb(j as Word), Limb(idx));
+ power = Uint::<LIMBS>::ct_select(&power, &powers[j], choice);
+ j += 1;
+ }
+
+ z = mul_montgomery_form(&z, &power, modulus, mod_neg_inv);
i += 1;
}
-
- z = mul_montgomery_form(&z, &power, modulus, mod_neg_inv);
}
}
diff --git a/vendor/crypto-bigint/src/uint/modular/runtime_mod.rs b/vendor/crypto-bigint/src/uint/modular/runtime_mod.rs
index 84622d167..9f15afe07 100644
--- a/vendor/crypto-bigint/src/uint/modular/runtime_mod.rs
+++ b/vendor/crypto-bigint/src/uint/modular/runtime_mod.rs
@@ -1,6 +1,13 @@
use crate::{Limb, Uint, Word};
-use super::{div_by_2::div_by_2, reduction::montgomery_reduction, Retrieve};
+use super::{
+ constant_mod::{Residue, ResidueParams},
+ div_by_2::div_by_2,
+ reduction::montgomery_reduction,
+ Retrieve,
+};
+
+use subtle::{Choice, ConditionallySelectable, ConstantTimeEq, CtOption};
/// Additions between residues with a modulus set at runtime
mod runtime_add;
@@ -15,7 +22,7 @@ mod runtime_pow;
/// Subtractions between residues with a modulus set at runtime
mod runtime_sub;
-/// The parameters to efficiently go to and from the Montgomery form for a modulus provided at runtime.
+/// The parameters to efficiently go to and from the Montgomery form for an odd modulus provided at runtime.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub struct DynResidueParams<const LIMBS: usize> {
// The constant modulus
@@ -32,16 +39,17 @@ pub struct DynResidueParams<const LIMBS: usize> {
}
impl<const LIMBS: usize> DynResidueParams<LIMBS> {
- /// Instantiates a new set of `ResidueParams` representing the given `modulus`.
- pub const fn new(modulus: &Uint<LIMBS>) -> Self {
+ // Internal helper function to generate parameters; this lets us wrap the constructors more cleanly
+ const fn generate_params(modulus: &Uint<LIMBS>) -> Self {
let r = Uint::MAX.const_rem(modulus).0.wrapping_add(&Uint::ONE);
let r2 = Uint::const_rem_wide(r.square_wide(), modulus).0;
// Since we are calculating the inverse modulo (Word::MAX+1),
// we can take the modulo right away and calculate the inverse of the first limb only.
let modulus_lo = Uint::<1>::from_words([modulus.limbs[0].0]);
- let mod_neg_inv =
- Limb(Word::MIN.wrapping_sub(modulus_lo.inv_mod2k(Word::BITS as usize).limbs[0].0));
+ let mod_neg_inv = Limb(
+ Word::MIN.wrapping_sub(modulus_lo.inv_mod2k_vartime(Word::BITS as usize).limbs[0].0),
+ );
let r3 = montgomery_reduction(&r2.square_wide(), modulus, mod_neg_inv);
@@ -54,10 +62,68 @@ impl<const LIMBS: usize> DynResidueParams<LIMBS> {
}
}
+ /// Instantiates a new set of `ResidueParams` representing the given `modulus`, which _must_ be odd.
+ /// If `modulus` is not odd, this function will panic; use [`new_checked`][`DynResidueParams::new_checked`] if you want to be able to detect an invalid modulus.
+ pub const fn new(modulus: &Uint<LIMBS>) -> Self {
+ // A valid modulus must be odd
+ if modulus.ct_is_odd().to_u8() == 0 {
+ panic!("modulus must be odd");
+ }
+
+ Self::generate_params(modulus)
+ }
+
+ /// Instantiates a new set of `ResidueParams` representing the given `modulus` if it is odd.
+ /// Returns a `CtOption` that is `None` if the provided modulus is not odd; this is a safer version of [`new`][`DynResidueParams::new`], which can panic.
+ #[deprecated(
+ since = "0.5.3",
+ note = "This functionality will be moved to `new` in a future release."
+ )]
+ pub fn new_checked(modulus: &Uint<LIMBS>) -> CtOption<Self> {
+ // A valid modulus must be odd.
+ CtOption::new(Self::generate_params(modulus), modulus.ct_is_odd().into())
+ }
+
/// Returns the modulus which was used to initialize these parameters.
pub const fn modulus(&self) -> &Uint<LIMBS> {
&self.modulus
}
+
+ /// Create `DynResidueParams` corresponding to a `ResidueParams`.
+ pub const fn from_residue_params<P>() -> Self
+ where
+ P: ResidueParams<LIMBS>,
+ {
+ Self {
+ modulus: P::MODULUS,
+ r: P::R,
+ r2: P::R2,
+ r3: P::R3,
+ mod_neg_inv: P::MOD_NEG_INV,
+ }
+ }
+}
+
+impl<const LIMBS: usize> ConditionallySelectable for DynResidueParams<LIMBS> {
+ fn conditional_select(a: &Self, b: &Self, choice: Choice) -> Self {
+ Self {
+ modulus: Uint::conditional_select(&a.modulus, &b.modulus, choice),
+ r: Uint::conditional_select(&a.r, &b.r, choice),
+ r2: Uint::conditional_select(&a.r2, &b.r2, choice),
+ r3: Uint::conditional_select(&a.r3, &b.r3, choice),
+ mod_neg_inv: Limb::conditional_select(&a.mod_neg_inv, &b.mod_neg_inv, choice),
+ }
+ }
+}
+
+impl<const LIMBS: usize> ConstantTimeEq for DynResidueParams<LIMBS> {
+ fn ct_eq(&self, other: &Self) -> Choice {
+ self.modulus.ct_eq(&other.modulus)
+ & self.r.ct_eq(&other.r)
+ & self.r2.ct_eq(&other.r2)
+ & self.r3.ct_eq(&other.r3)
+ & self.mod_neg_inv.ct_eq(&other.mod_neg_inv)
+ }
}
/// A residue represented using `LIMBS` limbs. The odd modulus of this residue is set at runtime.
@@ -113,6 +179,32 @@ impl<const LIMBS: usize> DynResidue<LIMBS> {
&self.residue_params
}
+ /// Access the `DynResidue` value in Montgomery form.
+ pub const fn as_montgomery(&self) -> &Uint<LIMBS> {
+ &self.montgomery_form
+ }
+
+ /// Mutably access the `DynResidue` value in Montgomery form.
+ pub fn as_montgomery_mut(&mut self) -> &mut Uint<LIMBS> {
+ &mut self.montgomery_form
+ }
+
+ /// Create a `DynResidue` from a value in Montgomery form.
+ pub const fn from_montgomery(
+ integer: Uint<LIMBS>,
+ residue_params: DynResidueParams<LIMBS>,
+ ) -> Self {
+ Self {
+ montgomery_form: integer,
+ residue_params,
+ }
+ }
+
+ /// Extract the value from the `DynResidue` in Montgomery form.
+ pub const fn to_montgomery(&self) -> Uint<LIMBS> {
+ self.montgomery_form
+ }
+
/// Performs the modular division by 2, that is for given `x` returns `y`
/// such that `y * 2 = x mod p`. This means:
/// - if `x` is even, returns `x / 2`,
@@ -132,3 +224,77 @@ impl<const LIMBS: usize> Retrieve for DynResidue<LIMBS> {
self.retrieve()
}
}
+
+impl<const LIMBS: usize, P: ResidueParams<LIMBS>> From<&Residue<P, LIMBS>> for DynResidue<LIMBS> {
+ fn from(residue: &Residue<P, LIMBS>) -> Self {
+ Self {
+ montgomery_form: residue.to_montgomery(),
+ residue_params: DynResidueParams::from_residue_params::<P>(),
+ }
+ }
+}
+
+impl<const LIMBS: usize> ConditionallySelectable for DynResidue<LIMBS> {
+ fn conditional_select(a: &Self, b: &Self, choice: Choice) -> Self {
+ Self {
+ montgomery_form: Uint::conditional_select(
+ &a.montgomery_form,
+ &b.montgomery_form,
+ choice,
+ ),
+ residue_params: DynResidueParams::conditional_select(
+ &a.residue_params,
+ &b.residue_params,
+ choice,
+ ),
+ }
+ }
+}
+
+impl<const LIMBS: usize> ConstantTimeEq for DynResidue<LIMBS> {
+ fn ct_eq(&self, other: &Self) -> Choice {
+ self.montgomery_form.ct_eq(&other.montgomery_form)
+ & self.residue_params.ct_eq(&other.residue_params)
+ }
+}
+
+/// NOTE: this does _not_ zeroize the parameters, in order to maintain some form of type consistency
+#[cfg(feature = "zeroize")]
+impl<const LIMBS: usize> zeroize::Zeroize for DynResidue<LIMBS> {
+ fn zeroize(&mut self) {
+ self.montgomery_form.zeroize()
+ }
+}
+
+#[cfg(test)]
+mod test {
+ use super::*;
+
+ const LIMBS: usize = nlimbs!(64);
+
+ #[test]
+ #[allow(deprecated)]
+ // Test that a valid modulus yields `DynResidueParams`
+ fn test_valid_modulus() {
+ let valid_modulus = Uint::<LIMBS>::from(3u8);
+
+ DynResidueParams::<LIMBS>::new_checked(&valid_modulus).unwrap();
+ DynResidueParams::<LIMBS>::new(&valid_modulus);
+ }
+
+ #[test]
+ #[allow(deprecated)]
+ // Test that an invalid checked modulus does not yield `DynResidueParams`
+ fn test_invalid_checked_modulus() {
+ assert!(bool::from(
+ DynResidueParams::<LIMBS>::new_checked(&Uint::from(2u8)).is_none()
+ ))
+ }
+
+ #[test]
+ #[should_panic]
+ // Tets that an invalid modulus panics
+ fn test_invalid_modulus() {
+ DynResidueParams::<LIMBS>::new(&Uint::from(2u8));
+ }
+}
diff --git a/vendor/crypto-bigint/src/uint/modular/runtime_mod/runtime_pow.rs b/vendor/crypto-bigint/src/uint/modular/runtime_mod/runtime_pow.rs
index 270f4c01c..42b6b8beb 100644
--- a/vendor/crypto-bigint/src/uint/modular/runtime_mod/runtime_pow.rs
+++ b/vendor/crypto-bigint/src/uint/modular/runtime_mod/runtime_pow.rs
@@ -1,11 +1,18 @@
-use crate::{modular::pow::pow_montgomery_form, PowBoundedExp, Uint};
-
use super::DynResidue;
+use crate::modular::pow::multi_exponentiate_montgomery_form_array;
+#[cfg(feature = "alloc")]
+use crate::modular::pow::multi_exponentiate_montgomery_form_slice;
+use crate::{modular::pow::pow_montgomery_form, MultiExponentiateBoundedExp, PowBoundedExp, Uint};
+#[cfg(feature = "alloc")]
+use alloc::vec::Vec;
impl<const LIMBS: usize> DynResidue<LIMBS> {
/// Raises to the `exponent` power.
- pub const fn pow(&self, exponent: &Uint<LIMBS>) -> DynResidue<LIMBS> {
- self.pow_bounded_exp(exponent, Uint::<LIMBS>::BITS)
+ pub const fn pow<const RHS_LIMBS: usize>(
+ &self,
+ exponent: &Uint<RHS_LIMBS>,
+ ) -> DynResidue<LIMBS> {
+ self.pow_bounded_exp(exponent, Uint::<RHS_LIMBS>::BITS)
}
/// Raises to the `exponent` power,
@@ -13,7 +20,11 @@ impl<const LIMBS: usize> DynResidue<LIMBS> {
/// to take into account for the exponent.
///
/// NOTE: `exponent_bits` may be leaked in the time pattern.
- pub const fn pow_bounded_exp(&self, exponent: &Uint<LIMBS>, exponent_bits: usize) -> Self {
+ pub const fn pow_bounded_exp<const RHS_LIMBS: usize>(
+ &self,
+ exponent: &Uint<RHS_LIMBS>,
+ exponent_bits: usize,
+ ) -> Self {
Self {
montgomery_form: pow_montgomery_form(
&self.montgomery_form,
@@ -28,8 +39,75 @@ impl<const LIMBS: usize> DynResidue<LIMBS> {
}
}
-impl<const LIMBS: usize> PowBoundedExp<Uint<LIMBS>> for DynResidue<LIMBS> {
- fn pow_bounded_exp(&self, exponent: &Uint<LIMBS>, exponent_bits: usize) -> Self {
+impl<const LIMBS: usize, const RHS_LIMBS: usize> PowBoundedExp<Uint<RHS_LIMBS>>
+ for DynResidue<LIMBS>
+{
+ fn pow_bounded_exp(&self, exponent: &Uint<RHS_LIMBS>, exponent_bits: usize) -> Self {
self.pow_bounded_exp(exponent, exponent_bits)
}
}
+
+impl<const N: usize, const LIMBS: usize, const RHS_LIMBS: usize>
+ MultiExponentiateBoundedExp<Uint<RHS_LIMBS>, [(Self, Uint<RHS_LIMBS>); N]>
+ for DynResidue<LIMBS>
+{
+ fn multi_exponentiate_bounded_exp(
+ bases_and_exponents: &[(Self, Uint<RHS_LIMBS>); N],
+ exponent_bits: usize,
+ ) -> Self {
+ const_assert_ne!(N, 0, "bases_and_exponents must not be empty");
+ let residue_params = bases_and_exponents[0].0.residue_params;
+
+ let mut bases_and_exponents_montgomery_form =
+ [(Uint::<LIMBS>::ZERO, Uint::<RHS_LIMBS>::ZERO); N];
+
+ let mut i = 0;
+ while i < N {
+ let (base, exponent) = bases_and_exponents[i];
+ bases_and_exponents_montgomery_form[i] = (base.montgomery_form, exponent);
+ i += 1;
+ }
+
+ Self {
+ montgomery_form: multi_exponentiate_montgomery_form_array(
+ &bases_and_exponents_montgomery_form,
+ exponent_bits,
+ &residue_params.modulus,
+ &residue_params.r,
+ residue_params.mod_neg_inv,
+ ),
+ residue_params,
+ }
+ }
+}
+
+#[cfg(feature = "alloc")]
+impl<const LIMBS: usize, const RHS_LIMBS: usize>
+ MultiExponentiateBoundedExp<Uint<RHS_LIMBS>, [(Self, Uint<RHS_LIMBS>)]> for DynResidue<LIMBS>
+{
+ fn multi_exponentiate_bounded_exp(
+ bases_and_exponents: &[(Self, Uint<RHS_LIMBS>)],
+ exponent_bits: usize,
+ ) -> Self {
+ assert!(
+ !bases_and_exponents.is_empty(),
+ "bases_and_exponents must not be empty"
+ );
+ let residue_params = bases_and_exponents[0].0.residue_params;
+
+ let bases_and_exponents: Vec<(Uint<LIMBS>, Uint<RHS_LIMBS>)> = bases_and_exponents
+ .iter()
+ .map(|(base, exp)| (base.montgomery_form, *exp))
+ .collect();
+ Self {
+ montgomery_form: multi_exponentiate_montgomery_form_slice(
+ &bases_and_exponents,
+ exponent_bits,
+ &residue_params.modulus,
+ &residue_params.r,
+ residue_params.mod_neg_inv,
+ ),
+ residue_params,
+ }
+ }
+}
diff --git a/vendor/crypto-bigint/src/uint/mul.rs b/vendor/crypto-bigint/src/uint/mul.rs
index fcfc756aa..cb29332c5 100644
--- a/vendor/crypto-bigint/src/uint/mul.rs
+++ b/vendor/crypto-bigint/src/uint/mul.rs
@@ -1,10 +1,22 @@
//! [`Uint`] addition operations.
-use crate::{Checked, CheckedMul, Concat, Limb, Uint, WideWord, Word, Wrapping, Zero};
+use crate::{Checked, CheckedMul, Concat, ConcatMixed, Limb, Uint, WideWord, Word, Wrapping, Zero};
use core::ops::{Mul, MulAssign};
use subtle::CtOption;
impl<const LIMBS: usize> Uint<LIMBS> {
+ /// Multiply `self` by `rhs`, returning a concatenated "wide" result.
+ pub fn mul<const HLIMBS: usize>(
+ &self,
+ rhs: &Uint<HLIMBS>,
+ ) -> <Uint<HLIMBS> as ConcatMixed<Self>>::MixedOutput
+ where
+ Uint<HLIMBS>: ConcatMixed<Self>,
+ {
+ let (lo, hi) = self.mul_wide(rhs);
+ hi.concat_mixed(&lo)
+ }
+
/// Compute "wide" multiplication, with a product twice the size of the input.
///
/// Returns a tuple containing the `(lo, hi)` components of the product.
@@ -16,11 +28,10 @@ impl<const LIMBS: usize> Uint<LIMBS> {
/// the APIs in this crate.
///
/// For more info see: <https://github.com/RustCrypto/crypto-bigint/issues/4>
- // TODO(tarcieri): use `concat` to construct a wide output
- pub const fn mul_wide(&self, rhs: &Self) -> (Self, Self) {
+ pub const fn mul_wide<const HLIMBS: usize>(&self, rhs: &Uint<HLIMBS>) -> (Self, Uint<HLIMBS>) {
let mut i = 0;
let mut lo = Self::ZERO;
- let mut hi = Self::ZERO;
+ let mut hi = Uint::<HLIMBS>::ZERO;
// Schoolbook multiplication.
// TODO(tarcieri): use Karatsuba for better performance?
@@ -28,7 +39,7 @@ impl<const LIMBS: usize> Uint<LIMBS> {
let mut j = 0;
let mut carry = Limb::ZERO;
- while j < LIMBS {
+ while j < HLIMBS {
let k = i + j;
if k >= LIMBS {
@@ -44,7 +55,11 @@ impl<const LIMBS: usize> Uint<LIMBS> {
j += 1;
}
- hi.limbs[i + j - LIMBS] = carry;
+ if i + j >= LIMBS {
+ hi.limbs[i + j - LIMBS] = carry;
+ } else {
+ lo.limbs[i + j] = carry;
+ }
i += 1;
}
@@ -52,26 +67,13 @@ impl<const LIMBS: usize> Uint<LIMBS> {
}
/// Perform saturating multiplication, returning `MAX` on overflow.
- pub const fn saturating_mul(&self, rhs: &Self) -> Self {
+ pub const fn saturating_mul<const HLIMBS: usize>(&self, rhs: &Uint<HLIMBS>) -> Self {
let (res, overflow) = self.mul_wide(rhs);
-
- let mut i = 0;
- let mut accumulator = 0;
-
- while i < LIMBS {
- accumulator |= overflow.limbs[i].0;
- i += 1;
- }
-
- if accumulator == 0 {
- res
- } else {
- Self::MAX
- }
+ Self::ct_select(&res, &Self::MAX, overflow.ct_is_nonzero())
}
/// Perform wrapping multiplication, discarding overflow.
- pub const fn wrapping_mul(&self, rhs: &Self) -> Self {
+ pub const fn wrapping_mul<const H: usize>(&self, rhs: &Uint<H>) -> Self {
self.mul_wide(rhs).0
}
@@ -159,106 +161,168 @@ impl<const LIMBS: usize> Uint<LIMBS> {
}
}
-impl<const LIMBS: usize> CheckedMul<&Uint<LIMBS>> for Uint<LIMBS> {
+impl<const LIMBS: usize, const HLIMBS: usize> CheckedMul<&Uint<HLIMBS>> for Uint<LIMBS> {
type Output = Self;
- fn checked_mul(&self, rhs: &Self) -> CtOption<Self> {
+ fn checked_mul(&self, rhs: &Uint<HLIMBS>) -> CtOption<Self> {
let (lo, hi) = self.mul_wide(rhs);
CtOption::new(lo, hi.is_zero())
}
}
-impl<const LIMBS: usize> Mul for Wrapping<Uint<LIMBS>> {
+impl<const LIMBS: usize, const HLIMBS: usize> Mul<Wrapping<Uint<HLIMBS>>>
+ for Wrapping<Uint<LIMBS>>
+{
type Output = Self;
- fn mul(self, rhs: Self) -> Wrapping<Uint<LIMBS>> {
+ fn mul(self, rhs: Wrapping<Uint<HLIMBS>>) -> Wrapping<Uint<LIMBS>> {
Wrapping(self.0.wrapping_mul(&rhs.0))
}
}
-impl<const LIMBS: usize> Mul<&Wrapping<Uint<LIMBS>>> for Wrapping<Uint<LIMBS>> {
- type Output = Wrapping<Uint<LIMBS>>;
+impl<const LIMBS: usize, const HLIMBS: usize> Mul<&Wrapping<Uint<HLIMBS>>>
+ for Wrapping<Uint<LIMBS>>
+{
+ type Output = Self;
- fn mul(self, rhs: &Wrapping<Uint<LIMBS>>) -> Wrapping<Uint<LIMBS>> {
+ fn mul(self, rhs: &Wrapping<Uint<HLIMBS>>) -> Wrapping<Uint<LIMBS>> {
Wrapping(self.0.wrapping_mul(&rhs.0))
}
}
-impl<const LIMBS: usize> Mul<Wrapping<Uint<LIMBS>>> for &Wrapping<Uint<LIMBS>> {
+impl<const LIMBS: usize, const HLIMBS: usize> Mul<Wrapping<Uint<HLIMBS>>>
+ for &Wrapping<Uint<LIMBS>>
+{
type Output = Wrapping<Uint<LIMBS>>;
- fn mul(self, rhs: Wrapping<Uint<LIMBS>>) -> Wrapping<Uint<LIMBS>> {
+ fn mul(self, rhs: Wrapping<Uint<HLIMBS>>) -> Wrapping<Uint<LIMBS>> {
Wrapping(self.0.wrapping_mul(&rhs.0))
}
}
-impl<const LIMBS: usize> Mul<&Wrapping<Uint<LIMBS>>> for &Wrapping<Uint<LIMBS>> {
+impl<const LIMBS: usize, const HLIMBS: usize> Mul<&Wrapping<Uint<HLIMBS>>>
+ for &Wrapping<Uint<LIMBS>>
+{
type Output = Wrapping<Uint<LIMBS>>;
- fn mul(self, rhs: &Wrapping<Uint<LIMBS>>) -> Wrapping<Uint<LIMBS>> {
+ fn mul(self, rhs: &Wrapping<Uint<HLIMBS>>) -> Wrapping<Uint<LIMBS>> {
Wrapping(self.0.wrapping_mul(&rhs.0))
}
}
-impl<const LIMBS: usize> MulAssign for Wrapping<Uint<LIMBS>> {
- fn mul_assign(&mut self, other: Self) {
+impl<const LIMBS: usize, const HLIMBS: usize> MulAssign<Wrapping<Uint<HLIMBS>>>
+ for Wrapping<Uint<LIMBS>>
+{
+ fn mul_assign(&mut self, other: Wrapping<Uint<HLIMBS>>) {
*self = *self * other;
}
}
-impl<const LIMBS: usize> MulAssign<&Wrapping<Uint<LIMBS>>> for Wrapping<Uint<LIMBS>> {
- fn mul_assign(&mut self, other: &Self) {
+impl<const LIMBS: usize, const HLIMBS: usize> MulAssign<&Wrapping<Uint<HLIMBS>>>
+ for Wrapping<Uint<LIMBS>>
+{
+ fn mul_assign(&mut self, other: &Wrapping<Uint<HLIMBS>>) {
*self = *self * other;
}
}
-impl<const LIMBS: usize> Mul for Checked<Uint<LIMBS>> {
+impl<const LIMBS: usize, const HLIMBS: usize> Mul<Checked<Uint<HLIMBS>>> for Checked<Uint<LIMBS>> {
type Output = Self;
- fn mul(self, rhs: Self) -> Checked<Uint<LIMBS>> {
+ fn mul(self, rhs: Checked<Uint<HLIMBS>>) -> Checked<Uint<LIMBS>> {
Checked(self.0.and_then(|a| rhs.0.and_then(|b| a.checked_mul(&b))))
}
}
-impl<const LIMBS: usize> Mul<&Checked<Uint<LIMBS>>> for Checked<Uint<LIMBS>> {
+impl<const LIMBS: usize, const HLIMBS: usize> Mul<&Checked<Uint<HLIMBS>>> for Checked<Uint<LIMBS>> {
type Output = Checked<Uint<LIMBS>>;
- fn mul(self, rhs: &Checked<Uint<LIMBS>>) -> Checked<Uint<LIMBS>> {
+ fn mul(self, rhs: &Checked<Uint<HLIMBS>>) -> Checked<Uint<LIMBS>> {
Checked(self.0.and_then(|a| rhs.0.and_then(|b| a.checked_mul(&b))))
}
}
-impl<const LIMBS: usize> Mul<Checked<Uint<LIMBS>>> for &Checked<Uint<LIMBS>> {
+impl<const LIMBS: usize, const HLIMBS: usize> Mul<Checked<Uint<HLIMBS>>> for &Checked<Uint<LIMBS>> {
type Output = Checked<Uint<LIMBS>>;
- fn mul(self, rhs: Checked<Uint<LIMBS>>) -> Checked<Uint<LIMBS>> {
+ fn mul(self, rhs: Checked<Uint<HLIMBS>>) -> Checked<Uint<LIMBS>> {
Checked(self.0.and_then(|a| rhs.0.and_then(|b| a.checked_mul(&b))))
}
}
-impl<const LIMBS: usize> Mul<&Checked<Uint<LIMBS>>> for &Checked<Uint<LIMBS>> {
+impl<const LIMBS: usize, const HLIMBS: usize> Mul<&Checked<Uint<HLIMBS>>>
+ for &Checked<Uint<LIMBS>>
+{
type Output = Checked<Uint<LIMBS>>;
- fn mul(self, rhs: &Checked<Uint<LIMBS>>) -> Checked<Uint<LIMBS>> {
+ fn mul(self, rhs: &Checked<Uint<HLIMBS>>) -> Checked<Uint<LIMBS>> {
Checked(self.0.and_then(|a| rhs.0.and_then(|b| a.checked_mul(&b))))
}
}
-impl<const LIMBS: usize> MulAssign for Checked<Uint<LIMBS>> {
- fn mul_assign(&mut self, other: Self) {
+impl<const LIMBS: usize, const HLIMBS: usize> MulAssign<Checked<Uint<HLIMBS>>>
+ for Checked<Uint<LIMBS>>
+{
+ fn mul_assign(&mut self, other: Checked<Uint<HLIMBS>>) {
*self = *self * other;
}
}
-impl<const LIMBS: usize> MulAssign<&Checked<Uint<LIMBS>>> for Checked<Uint<LIMBS>> {
- fn mul_assign(&mut self, other: &Self) {
+impl<const LIMBS: usize, const HLIMBS: usize> MulAssign<&Checked<Uint<HLIMBS>>>
+ for Checked<Uint<LIMBS>>
+{
+ fn mul_assign(&mut self, other: &Checked<Uint<HLIMBS>>) {
*self = *self * other;
}
}
+impl<const LIMBS: usize, const HLIMBS: usize> Mul<Uint<HLIMBS>> for Uint<LIMBS>
+where
+ Uint<HLIMBS>: ConcatMixed<Uint<LIMBS>>,
+{
+ type Output = <Uint<HLIMBS> as ConcatMixed<Self>>::MixedOutput;
+
+ fn mul(self, other: Uint<HLIMBS>) -> Self::Output {
+ Uint::mul(&self, &other)
+ }
+}
+
+impl<const LIMBS: usize, const HLIMBS: usize> Mul<&Uint<HLIMBS>> for Uint<LIMBS>
+where
+ Uint<HLIMBS>: ConcatMixed<Uint<LIMBS>>,
+{
+ type Output = <Uint<HLIMBS> as ConcatMixed<Self>>::MixedOutput;
+
+ fn mul(self, other: &Uint<HLIMBS>) -> Self::Output {
+ Uint::mul(&self, other)
+ }
+}
+
+impl<const LIMBS: usize, const HLIMBS: usize> Mul<Uint<HLIMBS>> for &Uint<LIMBS>
+where
+ Uint<HLIMBS>: ConcatMixed<Uint<LIMBS>>,
+{
+ type Output = <Uint<HLIMBS> as ConcatMixed<Uint<LIMBS>>>::MixedOutput;
+
+ fn mul(self, other: Uint<HLIMBS>) -> Self::Output {
+ Uint::mul(self, &other)
+ }
+}
+
+impl<const LIMBS: usize, const HLIMBS: usize> Mul<&Uint<HLIMBS>> for &Uint<LIMBS>
+where
+ Uint<HLIMBS>: ConcatMixed<Uint<LIMBS>>,
+{
+ type Output = <Uint<HLIMBS> as ConcatMixed<Uint<LIMBS>>>::MixedOutput;
+
+ fn mul(self, other: &Uint<HLIMBS>) -> Self::Output {
+ Uint::mul(self, other)
+ }
+}
+
#[cfg(test)]
mod tests {
- use crate::{CheckedMul, Zero, U256, U64};
+ use crate::{CheckedMul, Zero, U128, U192, U256, U64};
#[test]
fn mul_wide_zero_and_one() {
@@ -283,6 +347,28 @@ mod tests {
}
#[test]
+ fn mul_concat_even() {
+ assert_eq!(U64::ZERO * U64::MAX, U128::ZERO);
+ assert_eq!(U64::MAX * U64::ZERO, U128::ZERO);
+ assert_eq!(
+ U64::MAX * U64::MAX,
+ U128::from_u128(0xfffffffffffffffe_0000000000000001)
+ );
+ assert_eq!(
+ U64::ONE * U64::MAX,
+ U128::from_u128(0x0000000000000000_ffffffffffffffff)
+ );
+ }
+
+ #[test]
+ fn mul_concat_mixed() {
+ let a = U64::from_u64(0x0011223344556677);
+ let b = U128::from_u128(0x8899aabbccddeeff_8899aabbccddeeff);
+ assert_eq!(a * b, U192::from(&a).saturating_mul(&b));
+ assert_eq!(b * a, U192::from(&b).saturating_mul(&a));
+ }
+
+ #[test]
fn checked_mul_ok() {
let n = U64::from_u32(0xffff_ffff);
assert_eq!(
diff --git a/vendor/crypto-bigint/src/uint/mul_mod.rs b/vendor/crypto-bigint/src/uint/mul_mod.rs
index 0916ede44..c46274a4d 100644
--- a/vendor/crypto-bigint/src/uint/mul_mod.rs
+++ b/vendor/crypto-bigint/src/uint/mul_mod.rs
@@ -3,7 +3,7 @@
use crate::{Limb, Uint, WideWord, Word};
impl<const LIMBS: usize> Uint<LIMBS> {
- /// Computes `self * rhs mod p` in constant time for the special modulus
+ /// Computes `self * rhs mod p` for the special modulus
/// `p = MAX+1-c` where `c` is small enough to fit in a single [`Limb`].
/// For the modulus reduction, this function implements Algorithm 14.47 from
/// the "Handbook of Applied Cryptography", by A. Menezes, P. van Oorschot,
diff --git a/vendor/crypto-bigint/src/uint/neg.rs b/vendor/crypto-bigint/src/uint/neg.rs
index 5cdba201b..4881a279e 100644
--- a/vendor/crypto-bigint/src/uint/neg.rs
+++ b/vendor/crypto-bigint/src/uint/neg.rs
@@ -1,22 +1,51 @@
use core::ops::Neg;
-use crate::{CtChoice, Uint, Wrapping};
+use crate::{CtChoice, Limb, Uint, WideWord, Word, Wrapping};
impl<const LIMBS: usize> Neg for Wrapping<Uint<LIMBS>> {
type Output = Self;
fn neg(self) -> Self::Output {
- let shifted = Wrapping(self.0.shl_vartime(1));
- self - shifted
+ Self(self.0.wrapping_neg())
}
}
impl<const LIMBS: usize> Uint<LIMBS> {
/// Negates based on `choice` by wrapping the integer.
pub(crate) const fn conditional_wrapping_neg(&self, choice: CtChoice) -> Uint<LIMBS> {
- let (shifted, _) = self.shl_1();
- let negated_self = self.wrapping_sub(&shifted);
+ Uint::ct_select(self, &self.wrapping_neg(), choice)
+ }
+
+ /// Perform wrapping negation.
+ pub const fn wrapping_neg(&self) -> Self {
+ let mut ret = [Limb::ZERO; LIMBS];
+ let mut carry = 1;
+ let mut i = 0;
+ while i < LIMBS {
+ let r = (!self.limbs[i].0 as WideWord) + carry;
+ ret[i] = Limb(r as Word);
+ carry = r >> Limb::BITS;
+ i += 1;
+ }
+ Uint::new(ret)
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use crate::U256;
- Uint::ct_select(self, &negated_self, choice)
+ #[test]
+ fn wrapping_neg() {
+ assert_eq!(U256::ZERO.wrapping_neg(), U256::ZERO);
+ assert_eq!(U256::MAX.wrapping_neg(), U256::ONE);
+ assert_eq!(
+ U256::from_u64(13).wrapping_neg(),
+ U256::from_u64(13).not().saturating_add(&U256::ONE)
+ );
+ assert_eq!(
+ U256::from_u64(42).wrapping_neg(),
+ U256::from_u64(42).saturating_sub(&U256::ONE).not()
+ );
}
}
diff --git a/vendor/crypto-bigint/src/uint/neg_mod.rs b/vendor/crypto-bigint/src/uint/neg_mod.rs
index aaed2768f..38580ed5d 100644
--- a/vendor/crypto-bigint/src/uint/neg_mod.rs
+++ b/vendor/crypto-bigint/src/uint/neg_mod.rs
@@ -3,7 +3,7 @@
use crate::{Limb, NegMod, Uint};
impl<const LIMBS: usize> Uint<LIMBS> {
- /// Computes `-a mod p` in constant time.
+ /// Computes `-a mod p`.
/// Assumes `self` is in `[0, p)`.
pub const fn neg_mod(&self, p: &Self) -> Self {
let z = self.ct_is_nonzero();
@@ -18,7 +18,7 @@ impl<const LIMBS: usize> Uint<LIMBS> {
ret
}
- /// Computes `-a mod p` in constant time for the special modulus
+ /// Computes `-a mod p` for the special modulus
/// `p = MAX+1-c` where `c` is small enough to fit in a single [`Limb`].
pub const fn neg_mod_special(&self, c: Limb) -> Self {
Self::ZERO.sub_mod_special(self, c)
diff --git a/vendor/crypto-bigint/src/uint/rand.rs b/vendor/crypto-bigint/src/uint/rand.rs
index e283e2246..80af3bb16 100644
--- a/vendor/crypto-bigint/src/uint/rand.rs
+++ b/vendor/crypto-bigint/src/uint/rand.rs
@@ -1,7 +1,7 @@
//! Random number generator support
use super::Uint;
-use crate::{Limb, NonZero, Random, RandomMod};
+use crate::{Encoding, Limb, NonZero, Random, RandomMod};
use rand_core::CryptoRngCore;
use subtle::ConstantTimeLess;
@@ -30,18 +30,29 @@ impl<const LIMBS: usize> RandomMod for Uint<LIMBS> {
/// issue so long as the underlying random number generator is truly a
/// CSRNG, where previous outputs are unrelated to subsequent
/// outputs and do not reveal information about the RNG's internal state.
- fn random_mod(mut rng: &mut impl CryptoRngCore, modulus: &NonZero<Self>) -> Self {
+ fn random_mod(rng: &mut impl CryptoRngCore, modulus: &NonZero<Self>) -> Self {
let mut n = Self::ZERO;
let n_bits = modulus.as_ref().bits_vartime();
+ let n_bytes = (n_bits + 7) / 8;
let n_limbs = (n_bits + Limb::BITS - 1) / Limb::BITS;
- let mask = Limb::MAX >> (Limb::BITS * n_limbs - n_bits);
+ let hi_bytes = n_bytes - (n_limbs - 1) * Limb::BYTES;
+
+ let mut bytes = Limb::ZERO.to_le_bytes();
loop {
- for i in 0..n_limbs {
- n.limbs[i] = Limb::random(&mut rng);
+ for i in 0..n_limbs - 1 {
+ rng.fill_bytes(bytes.as_mut());
+ // Need to deserialize from little-endian to make sure that two 32-bit limbs
+ // deserialized sequentially are equal to one 64-bit limb produced from the same
+ // byte stream.
+ n.limbs[i] = Limb::from_le_bytes(bytes);
}
- n.limbs[n_limbs - 1] = n.limbs[n_limbs - 1] & mask;
+
+ // Generate the high limb which may need to only be filled partially.
+ bytes.as_mut().fill(0);
+ rng.fill_bytes(&mut (bytes.as_mut()[0..hi_bytes]));
+ n.limbs[n_limbs - 1] = Limb::from_le_bytes(bytes);
if n.ct_lt(modulus).into() {
return n;
@@ -63,15 +74,17 @@ mod tests {
let modulus = NonZero::new(U256::from(42u8)).unwrap();
let res = U256::random_mod(&mut rng, &modulus);
- // Sanity check that the return value isn't zero
- assert_ne!(res, U256::ZERO);
+ // Check that the value is in range
+ assert!(res >= U256::ZERO);
+ assert!(res < U256::from(42u8));
// Ensure `random_mod` runs in a reasonable amount of time
// when the modulus is larger than 1 limb
let modulus = NonZero::new(U256::from(0x10000000000000001u128)).unwrap();
let res = U256::random_mod(&mut rng, &modulus);
- // Sanity check that the return value isn't zero
- assert_ne!(res, U256::ZERO);
+ // Check that the value is in range
+ assert!(res >= U256::ZERO);
+ assert!(res < U256::from(0x10000000000000001u128));
}
}
diff --git a/vendor/crypto-bigint/src/uint/shl.rs b/vendor/crypto-bigint/src/uint/shl.rs
index eb8c713c9..1dbc40f05 100644
--- a/vendor/crypto-bigint/src/uint/shl.rs
+++ b/vendor/crypto-bigint/src/uint/shl.rs
@@ -1,37 +1,9 @@
//! [`Uint`] bitwise left shift operations.
-use crate::{limb::HI_BIT, CtChoice, Limb, Uint, Word};
+use crate::{CtChoice, Limb, Uint, Word};
use core::ops::{Shl, ShlAssign};
impl<const LIMBS: usize> Uint<LIMBS> {
- /// Computes `self << 1` in constant-time, returning the overflowing bit as a `CtChoice`.
- pub(crate) const fn shl_1(&self) -> (Self, CtChoice) {
- let mut shifted_bits = [0; LIMBS];
- let mut i = 0;
- while i < LIMBS {
- shifted_bits[i] = self.limbs[i].0 << 1;
- i += 1;
- }
-
- let mut carry_bits = [0; LIMBS];
- let mut i = 0;
- while i < LIMBS {
- carry_bits[i] = self.limbs[i].0 >> HI_BIT;
- i += 1;
- }
-
- let mut limbs = [Limb(0); LIMBS];
-
- limbs[0] = Limb(shifted_bits[0]);
- let mut i = 1;
- while i < LIMBS {
- limbs[i] = Limb(shifted_bits[i] | carry_bits[i - 1]);
- i += 1;
- }
-
- (Uint::new(limbs), CtChoice::from_lsb(carry_bits[LIMBS - 1]))
- }
-
/// Computes `self << shift` where `0 <= shift < Limb::BITS`,
/// returning the result and the carry.
#[inline(always)]
@@ -106,6 +78,22 @@ impl<const LIMBS: usize> Uint<LIMBS> {
(new_lower, upper)
}
+
+ /// Computes `self << n`.
+ /// Returns zero if `n >= Self::BITS`.
+ pub const fn shl(&self, shift: usize) -> Self {
+ let overflow = CtChoice::from_usize_lt(shift, Self::BITS).not();
+ let shift = shift % Self::BITS;
+ let mut result = *self;
+ let mut i = 0;
+ while i < Self::LOG2_BITS {
+ let bit = CtChoice::from_lsb((shift as Word >> i) & 1);
+ result = Uint::ct_select(&result, &result.shl_vartime(1 << i), bit);
+ i += 1;
+ }
+
+ Uint::ct_select(&result, &Self::ZERO, overflow)
+ }
}
impl<const LIMBS: usize> Shl<usize> for Uint<LIMBS> {
@@ -116,7 +104,7 @@ impl<const LIMBS: usize> Shl<usize> for Uint<LIMBS> {
/// When used with a fixed `rhs`, this function is constant-time with respect
/// to `self`.
fn shl(self, rhs: usize) -> Uint<LIMBS> {
- self.shl_vartime(rhs)
+ Uint::<LIMBS>::shl(&self, rhs)
}
}
@@ -128,7 +116,7 @@ impl<const LIMBS: usize> Shl<usize> for &Uint<LIMBS> {
/// When used with a fixed `rhs`, this function is constant-time with respect
/// to `self`.
fn shl(self, rhs: usize) -> Uint<LIMBS> {
- self.shl_vartime(rhs)
+ self.shl(rhs)
}
}
@@ -138,7 +126,7 @@ impl<const LIMBS: usize> ShlAssign<usize> for Uint<LIMBS> {
/// When used with a fixed `rhs`, this function is constant-time with respect
/// to `self`.
fn shl_assign(&mut self, rhs: usize) {
- *self = self.shl_vartime(rhs)
+ *self = self.shl(rhs)
}
}
diff --git a/vendor/crypto-bigint/src/uint/shr.rs b/vendor/crypto-bigint/src/uint/shr.rs
index 3698b970b..6a36fbe8b 100644
--- a/vendor/crypto-bigint/src/uint/shr.rs
+++ b/vendor/crypto-bigint/src/uint/shr.rs
@@ -1,7 +1,7 @@
//! [`Uint`] bitwise right shift operations.
use super::Uint;
-use crate::{limb::HI_BIT, CtChoice, Limb};
+use crate::{limb::HI_BIT, CtChoice, Limb, Word};
use core::ops::{Shr, ShrAssign};
impl<const LIMBS: usize> Uint<LIMBS> {
@@ -97,6 +97,22 @@ impl<const LIMBS: usize> Uint<LIMBS> {
(lower, new_upper)
}
+
+ /// Computes `self << n`.
+ /// Returns zero if `n >= Self::BITS`.
+ pub const fn shr(&self, shift: usize) -> Self {
+ let overflow = CtChoice::from_usize_lt(shift, Self::BITS).not();
+ let shift = shift % Self::BITS;
+ let mut result = *self;
+ let mut i = 0;
+ while i < Self::LOG2_BITS {
+ let bit = CtChoice::from_lsb((shift as Word >> i) & 1);
+ result = Uint::ct_select(&result, &result.shr_vartime(1 << i), bit);
+ i += 1;
+ }
+
+ Uint::ct_select(&result, &Self::ZERO, overflow)
+ }
}
impl<const LIMBS: usize> Shr<usize> for Uint<LIMBS> {
@@ -107,7 +123,7 @@ impl<const LIMBS: usize> Shr<usize> for Uint<LIMBS> {
/// When used with a fixed `rhs`, this function is constant-time with respect
/// to `self`.
fn shr(self, rhs: usize) -> Uint<LIMBS> {
- self.shr_vartime(rhs)
+ Uint::<LIMBS>::shr(&self, rhs)
}
}
@@ -119,13 +135,13 @@ impl<const LIMBS: usize> Shr<usize> for &Uint<LIMBS> {
/// When used with a fixed `rhs`, this function is constant-time with respect
/// to `self`.
fn shr(self, rhs: usize) -> Uint<LIMBS> {
- self.shr_vartime(rhs)
+ self.shr(rhs)
}
}
impl<const LIMBS: usize> ShrAssign<usize> for Uint<LIMBS> {
fn shr_assign(&mut self, rhs: usize) {
- *self = self.shr_vartime(rhs);
+ *self = self.shr(rhs);
}
}
diff --git a/vendor/crypto-bigint/src/uint/split.rs b/vendor/crypto-bigint/src/uint/split.rs
index b681a6154..e6909743c 100644
--- a/vendor/crypto-bigint/src/uint/split.rs
+++ b/vendor/crypto-bigint/src/uint/split.rs
@@ -1,48 +1,27 @@
-// TODO(tarcieri): use `const_evaluatable_checked` when stable to make generic around bits.
-macro_rules! impl_split {
- ($(($name:ident, $bits:expr)),+) => {
- $(
- impl $name {
- /// Split this number in half, returning its high and low components
- /// respectively.
- pub const fn split(&self) -> (Uint<{nlimbs!($bits) / 2}>, Uint<{nlimbs!($bits) / 2}>) {
- let mut lo = [Limb::ZERO; nlimbs!($bits) / 2];
- let mut hi = [Limb::ZERO; nlimbs!($bits) / 2];
- let mut i = 0;
- let mut j = 0;
-
- while j < (nlimbs!($bits) / 2) {
- lo[j] = self.limbs[i];
- i += 1;
- j += 1;
- }
-
- j = 0;
- while j < (nlimbs!($bits) / 2) {
- hi[j] = self.limbs[i];
- i += 1;
- j += 1;
- }
-
- (Uint { limbs: hi }, Uint { limbs: lo })
- }
- }
-
- impl Split for $name {
- type Output = Uint<{nlimbs!($bits) / 2}>;
-
- fn split(&self) -> (Self::Output, Self::Output) {
- self.split()
- }
- }
+use crate::{Limb, Uint};
+
+/// Split this number in half, returning its high and low components
+/// respectively.
+#[inline]
+pub(crate) const fn split_mixed<const L: usize, const H: usize, const O: usize>(
+ n: &Uint<O>,
+) -> (Uint<H>, Uint<L>) {
+ let top = L + H;
+ let top = if top < O { top } else { O };
+ let mut lo = [Limb::ZERO; L];
+ let mut hi = [Limb::ZERO; H];
+ let mut i = 0;
+
+ while i < top {
+ if i < L {
+ lo[i] = n.limbs[i];
+ } else {
+ hi[i - L] = n.limbs[i];
+ }
+ i += 1;
+ }
- impl From<$name> for (Uint<{nlimbs!($bits) / 2}>, Uint<{nlimbs!($bits) / 2}>) {
- fn from(num: $name) -> (Uint<{nlimbs!($bits) / 2}>, Uint<{nlimbs!($bits) / 2}>) {
- num.split()
- }
- }
- )+
- };
+ (Uint { limbs: hi }, Uint { limbs: lo })
}
#[cfg(test)]
diff --git a/vendor/crypto-bigint/src/uint/sqrt.rs b/vendor/crypto-bigint/src/uint/sqrt.rs
index 56815e2de..5c96afb1a 100644
--- a/vendor/crypto-bigint/src/uint/sqrt.rs
+++ b/vendor/crypto-bigint/src/uint/sqrt.rs
@@ -5,11 +5,20 @@ use crate::{Limb, Word};
use subtle::{ConstantTimeEq, CtOption};
impl<const LIMBS: usize> Uint<LIMBS> {
+ /// See [`Self::sqrt_vartime`].
+ #[deprecated(
+ since = "0.5.3",
+ note = "This functionality will be moved to `sqrt_vartime` in a future release."
+ )]
+ pub const fn sqrt(&self) -> Self {
+ self.sqrt_vartime()
+ }
+
/// Computes √(`self`)
/// Uses Brent & Zimmermann, Modern Computer Arithmetic, v0.5.9, Algorithm 1.13
///
/// Callers can check if `self` is a square by squaring the result
- pub const fn sqrt(&self) -> Self {
+ pub const fn sqrt_vartime(&self) -> Self {
let max_bits = (self.bits_vartime() + 1) >> 1;
let cap = Self::ONE.shl_vartime(max_bits);
let mut guess = cap; // ≥ √(`self`)
@@ -47,17 +56,35 @@ impl<const LIMBS: usize> Uint<LIMBS> {
Self::ct_select(&Self::ZERO, &guess, self.ct_is_nonzero())
}
+ /// See [`Self::wrapping_sqrt_vartime`].
+ #[deprecated(
+ since = "0.5.3",
+ note = "This functionality will be moved to `wrapping_sqrt_vartime` in a future release."
+ )]
+ pub const fn wrapping_sqrt(&self) -> Self {
+ self.wrapping_sqrt_vartime()
+ }
+
/// Wrapped sqrt is just normal √(`self`)
/// There’s no way wrapping could ever happen.
/// This function exists, so that all operations are accounted for in the wrapping operations.
- pub const fn wrapping_sqrt(&self) -> Self {
- self.sqrt()
+ pub const fn wrapping_sqrt_vartime(&self) -> Self {
+ self.sqrt_vartime()
+ }
+
+ /// See [`Self::checked_sqrt_vartime`].
+ #[deprecated(
+ since = "0.5.3",
+ note = "This functionality will be moved to `checked_sqrt_vartime` in a future release."
+ )]
+ pub fn checked_sqrt(&self) -> CtOption<Self> {
+ self.checked_sqrt_vartime()
}
/// Perform checked sqrt, returning a [`CtOption`] which `is_some`
/// only if the √(`self`)² == self
- pub fn checked_sqrt(&self) -> CtOption<Self> {
- let r = self.sqrt();
+ pub fn checked_sqrt_vartime(&self) -> CtOption<Self> {
+ let r = self.sqrt_vartime();
let s = r.wrapping_mul(&r);
CtOption::new(r, ConstantTimeEq::ct_eq(self, &s))
}
@@ -76,13 +103,13 @@ mod tests {
#[test]
fn edge() {
- assert_eq!(U256::ZERO.sqrt(), U256::ZERO);
- assert_eq!(U256::ONE.sqrt(), U256::ONE);
+ assert_eq!(U256::ZERO.sqrt_vartime(), U256::ZERO);
+ assert_eq!(U256::ONE.sqrt_vartime(), U256::ONE);
let mut half = U256::ZERO;
for i in 0..half.limbs.len() / 2 {
half.limbs[i] = Limb::MAX;
}
- assert_eq!(U256::MAX.sqrt(), half,);
+ assert_eq!(U256::MAX.sqrt_vartime(), half,);
}
#[test]
@@ -104,22 +131,28 @@ mod tests {
for (a, e) in &tests {
let l = U256::from(*a);
let r = U256::from(*e);
- assert_eq!(l.sqrt(), r);
- assert_eq!(l.checked_sqrt().is_some().unwrap_u8(), 1u8);
+ assert_eq!(l.sqrt_vartime(), r);
+ assert_eq!(l.checked_sqrt_vartime().is_some().unwrap_u8(), 1u8);
}
}
#[test]
fn nonsquares() {
- assert_eq!(U256::from(2u8).sqrt(), U256::from(1u8));
- assert_eq!(U256::from(2u8).checked_sqrt().is_some().unwrap_u8(), 0);
- assert_eq!(U256::from(3u8).sqrt(), U256::from(1u8));
- assert_eq!(U256::from(3u8).checked_sqrt().is_some().unwrap_u8(), 0);
- assert_eq!(U256::from(5u8).sqrt(), U256::from(2u8));
- assert_eq!(U256::from(6u8).sqrt(), U256::from(2u8));
- assert_eq!(U256::from(7u8).sqrt(), U256::from(2u8));
- assert_eq!(U256::from(8u8).sqrt(), U256::from(2u8));
- assert_eq!(U256::from(10u8).sqrt(), U256::from(3u8));
+ assert_eq!(U256::from(2u8).sqrt_vartime(), U256::from(1u8));
+ assert_eq!(
+ U256::from(2u8).checked_sqrt_vartime().is_some().unwrap_u8(),
+ 0
+ );
+ assert_eq!(U256::from(3u8).sqrt_vartime(), U256::from(1u8));
+ assert_eq!(
+ U256::from(3u8).checked_sqrt_vartime().is_some().unwrap_u8(),
+ 0
+ );
+ assert_eq!(U256::from(5u8).sqrt_vartime(), U256::from(2u8));
+ assert_eq!(U256::from(6u8).sqrt_vartime(), U256::from(2u8));
+ assert_eq!(U256::from(7u8).sqrt_vartime(), U256::from(2u8));
+ assert_eq!(U256::from(8u8).sqrt_vartime(), U256::from(2u8));
+ assert_eq!(U256::from(10u8).sqrt_vartime(), U256::from(3u8));
}
#[cfg(feature = "rand")]
@@ -130,15 +163,15 @@ mod tests {
let t = rng.next_u32() as u64;
let s = U256::from(t);
let s2 = s.checked_mul(&s).unwrap();
- assert_eq!(s2.sqrt(), s);
- assert_eq!(s2.checked_sqrt().is_some().unwrap_u8(), 1);
+ assert_eq!(s2.sqrt_vartime(), s);
+ assert_eq!(s2.checked_sqrt_vartime().is_some().unwrap_u8(), 1);
}
for _ in 0..50 {
let s = U256::random(&mut rng);
let mut s2 = U512::ZERO;
s2.limbs[..s.limbs.len()].copy_from_slice(&s.limbs);
- assert_eq!(s.square().sqrt(), s2);
+ assert_eq!(s.square().sqrt_vartime(), s2);
}
}
}
diff --git a/vendor/crypto-bigint/src/uint/sub.rs b/vendor/crypto-bigint/src/uint/sub.rs
index c39e54922..571dd6aa5 100644
--- a/vendor/crypto-bigint/src/uint/sub.rs
+++ b/vendor/crypto-bigint/src/uint/sub.rs
@@ -25,12 +25,7 @@ impl<const LIMBS: usize> Uint<LIMBS> {
/// Perform saturating subtraction, returning `ZERO` on underflow.
pub const fn saturating_sub(&self, rhs: &Self) -> Self {
let (res, underflow) = self.sbb(rhs, Limb::ZERO);
-
- if underflow.0 == 0 {
- res
- } else {
- Self::ZERO
- }
+ Self::ct_select(&res, &Self::ZERO, CtChoice::from_mask(underflow.0))
}
/// Perform wrapping subtraction, discarding underflow and wrapping around
@@ -181,6 +176,22 @@ mod tests {
}
#[test]
+ fn saturating_sub_no_borrow() {
+ assert_eq!(
+ U128::from(5u64).saturating_sub(&U128::ONE),
+ U128::from(4u64)
+ );
+ }
+
+ #[test]
+ fn saturating_sub_with_borrow() {
+ assert_eq!(
+ U128::from(4u64).saturating_sub(&U128::from(5u64)),
+ U128::ZERO
+ );
+ }
+
+ #[test]
fn wrapping_sub_no_borrow() {
assert_eq!(U128::ONE.wrapping_sub(&U128::ONE), U128::ZERO);
}
diff --git a/vendor/crypto-bigint/src/uint/sub_mod.rs b/vendor/crypto-bigint/src/uint/sub_mod.rs
index b32babb89..936c6d7a0 100644
--- a/vendor/crypto-bigint/src/uint/sub_mod.rs
+++ b/vendor/crypto-bigint/src/uint/sub_mod.rs
@@ -3,7 +3,7 @@
use crate::{Limb, SubMod, Uint};
impl<const LIMBS: usize> Uint<LIMBS> {
- /// Computes `self - rhs mod p` in constant time.
+ /// Computes `self - rhs mod p`.
///
/// Assumes `self - rhs` as unbounded signed integer is in `[-p, p)`.
pub const fn sub_mod(&self, rhs: &Uint<LIMBS>, p: &Uint<LIMBS>) -> Uint<LIMBS> {
@@ -34,7 +34,7 @@ impl<const LIMBS: usize> Uint<LIMBS> {
out.wrapping_add(&p.bitand(&mask))
}
- /// Computes `self - rhs mod p` in constant time for the special modulus
+ /// Computes `self - rhs mod p` for the special modulus
/// `p = MAX+1-c` where `c` is small enough to fit in a single [`Limb`].
///
/// Assumes `self - rhs` as unbounded signed integer is in `[-p, p)`.