summaryrefslogtreecommitdiffstats
path: root/vendor/crypto-bigint/src/uint
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-18 02:49:50 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-18 02:49:50 +0000
commit9835e2ae736235810b4ea1c162ca5e65c547e770 (patch)
tree3fcebf40ed70e581d776a8a4c65923e8ec20e026 /vendor/crypto-bigint/src/uint
parentReleasing progress-linux version 1.70.0+dfsg2-1~progress7.99u1. (diff)
downloadrustc-9835e2ae736235810b4ea1c162ca5e65c547e770.tar.xz
rustc-9835e2ae736235810b4ea1c162ca5e65c547e770.zip
Merging upstream version 1.71.1+dfsg1.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'vendor/crypto-bigint/src/uint')
-rw-r--r--vendor/crypto-bigint/src/uint/add.rs72
-rw-r--r--vendor/crypto-bigint/src/uint/add_mod.rs34
-rw-r--r--vendor/crypto-bigint/src/uint/array.rs42
-rw-r--r--vendor/crypto-bigint/src/uint/bit_and.rs59
-rw-r--r--vendor/crypto-bigint/src/uint/bit_not.rs11
-rw-r--r--vendor/crypto-bigint/src/uint/bit_or.rs59
-rw-r--r--vendor/crypto-bigint/src/uint/bit_xor.rs59
-rw-r--r--vendor/crypto-bigint/src/uint/bits.rs167
-rw-r--r--vendor/crypto-bigint/src/uint/cmp.rs122
-rw-r--r--vendor/crypto-bigint/src/uint/concat.rs21
-rw-r--r--vendor/crypto-bigint/src/uint/div.rs600
-rw-r--r--vendor/crypto-bigint/src/uint/div_limb.rs287
-rw-r--r--vendor/crypto-bigint/src/uint/encoding.rs104
-rw-r--r--vendor/crypto-bigint/src/uint/encoding/der.rs47
-rw-r--r--vendor/crypto-bigint/src/uint/encoding/rlp.rs8
-rw-r--r--vendor/crypto-bigint/src/uint/from.rs76
-rw-r--r--vendor/crypto-bigint/src/uint/inv_mod.rs189
-rw-r--r--vendor/crypto-bigint/src/uint/modular.rs163
-rw-r--r--vendor/crypto-bigint/src/uint/modular/add.rs9
-rw-r--r--vendor/crypto-bigint/src/uint/modular/constant_mod.rs189
-rw-r--r--vendor/crypto-bigint/src/uint/modular/constant_mod/const_add.rs98
-rw-r--r--vendor/crypto-bigint/src/uint/modular/constant_mod/const_inv.rs69
-rw-r--r--vendor/crypto-bigint/src/uint/modular/constant_mod/const_mul.rs94
-rw-r--r--vendor/crypto-bigint/src/uint/modular/constant_mod/const_neg.rs48
-rw-r--r--vendor/crypto-bigint/src/uint/modular/constant_mod/const_pow.rs98
-rw-r--r--vendor/crypto-bigint/src/uint/modular/constant_mod/const_sub.rs98
-rw-r--r--vendor/crypto-bigint/src/uint/modular/constant_mod/macros.rs50
-rw-r--r--vendor/crypto-bigint/src/uint/modular/inv.rs14
-rw-r--r--vendor/crypto-bigint/src/uint/modular/mul.rs22
-rw-r--r--vendor/crypto-bigint/src/uint/modular/pow.rs79
-rw-r--r--vendor/crypto-bigint/src/uint/modular/reduction.rs56
-rw-r--r--vendor/crypto-bigint/src/uint/modular/runtime_mod.rs107
-rw-r--r--vendor/crypto-bigint/src/uint/modular/runtime_mod/runtime_add.rs92
-rw-r--r--vendor/crypto-bigint/src/uint/modular/runtime_mod/runtime_inv.rs35
-rw-r--r--vendor/crypto-bigint/src/uint/modular/runtime_mod/runtime_mul.rs84
-rw-r--r--vendor/crypto-bigint/src/uint/modular/runtime_mod/runtime_neg.rs24
-rw-r--r--vendor/crypto-bigint/src/uint/modular/runtime_mod/runtime_pow.rs35
-rw-r--r--vendor/crypto-bigint/src/uint/modular/runtime_mod/runtime_sub.rs92
-rw-r--r--vendor/crypto-bigint/src/uint/modular/sub.rs9
-rw-r--r--vendor/crypto-bigint/src/uint/mul.rs150
-rw-r--r--vendor/crypto-bigint/src/uint/mul_mod.rs52
-rw-r--r--vendor/crypto-bigint/src/uint/neg.rs22
-rw-r--r--vendor/crypto-bigint/src/uint/neg_mod.rs10
-rw-r--r--vendor/crypto-bigint/src/uint/rand.rs45
-rw-r--r--vendor/crypto-bigint/src/uint/resize.rs10
-rw-r--r--vendor/crypto-bigint/src/uint/shl.rs144
-rw-r--r--vendor/crypto-bigint/src/uint/shr.rs108
-rw-r--r--vendor/crypto-bigint/src/uint/split.rs10
-rw-r--r--vendor/crypto-bigint/src/uint/sqrt.rs19
-rw-r--r--vendor/crypto-bigint/src/uint/sub.rs74
-rw-r--r--vendor/crypto-bigint/src/uint/sub_mod.rs48
51 files changed, 3427 insertions, 787 deletions
diff --git a/vendor/crypto-bigint/src/uint/add.rs b/vendor/crypto-bigint/src/uint/add.rs
index 2822e9e67..21aa5d578 100644
--- a/vendor/crypto-bigint/src/uint/add.rs
+++ b/vendor/crypto-bigint/src/uint/add.rs
@@ -1,10 +1,10 @@
-//! [`UInt`] addition operations.
+//! [`Uint`] addition operations.
-use crate::{Checked, CheckedAdd, Limb, UInt, Wrapping, Zero};
+use crate::{Checked, CheckedAdd, CtChoice, Limb, Uint, Wrapping, Zero};
use core::ops::{Add, AddAssign};
use subtle::CtOption;
-impl<const LIMBS: usize> UInt<LIMBS> {
+impl<const LIMBS: usize> Uint<LIMBS> {
/// Computes `a + b + carry`, returning the result along with the new carry.
#[inline(always)]
pub const fn adc(&self, rhs: &Self, mut carry: Limb) -> (Self, Limb) {
@@ -36,9 +36,21 @@ impl<const LIMBS: usize> UInt<LIMBS> {
pub const fn wrapping_add(&self, rhs: &Self) -> Self {
self.adc(rhs, Limb::ZERO).0
}
+
+ /// Perform wrapping addition, returning the truthy value as the second element of the tuple
+ /// if an overflow has occurred.
+ pub(crate) const fn conditional_wrapping_add(
+ &self,
+ rhs: &Self,
+ choice: CtChoice,
+ ) -> (Self, CtChoice) {
+ let actual_rhs = Uint::ct_select(&Uint::ZERO, rhs, choice);
+ let (sum, carry) = self.adc(&actual_rhs, Limb::ZERO);
+ (sum, CtChoice::from_lsb(carry.0))
+ }
}
-impl<const LIMBS: usize> CheckedAdd<&UInt<LIMBS>> for UInt<LIMBS> {
+impl<const LIMBS: usize> CheckedAdd<&Uint<LIMBS>> for Uint<LIMBS> {
type Output = Self;
fn checked_add(&self, rhs: &Self) -> CtOption<Self> {
@@ -47,54 +59,54 @@ impl<const LIMBS: usize> CheckedAdd<&UInt<LIMBS>> for UInt<LIMBS> {
}
}
-impl<const LIMBS: usize> Add for Wrapping<UInt<LIMBS>> {
+impl<const LIMBS: usize> Add for Wrapping<Uint<LIMBS>> {
type Output = Self;
- fn add(self, rhs: Self) -> Wrapping<UInt<LIMBS>> {
+ fn add(self, rhs: Self) -> Wrapping<Uint<LIMBS>> {
Wrapping(self.0.wrapping_add(&rhs.0))
}
}
-impl<const LIMBS: usize> Add<&Wrapping<UInt<LIMBS>>> for Wrapping<UInt<LIMBS>> {
- type Output = Wrapping<UInt<LIMBS>>;
+impl<const LIMBS: usize> Add<&Wrapping<Uint<LIMBS>>> for Wrapping<Uint<LIMBS>> {
+ type Output = Wrapping<Uint<LIMBS>>;
- fn add(self, rhs: &Wrapping<UInt<LIMBS>>) -> Wrapping<UInt<LIMBS>> {
+ fn add(self, rhs: &Wrapping<Uint<LIMBS>>) -> Wrapping<Uint<LIMBS>> {
Wrapping(self.0.wrapping_add(&rhs.0))
}
}
-impl<const LIMBS: usize> Add<Wrapping<UInt<LIMBS>>> for &Wrapping<UInt<LIMBS>> {
- type Output = Wrapping<UInt<LIMBS>>;
+impl<const LIMBS: usize> Add<Wrapping<Uint<LIMBS>>> for &Wrapping<Uint<LIMBS>> {
+ type Output = Wrapping<Uint<LIMBS>>;
- fn add(self, rhs: Wrapping<UInt<LIMBS>>) -> Wrapping<UInt<LIMBS>> {
+ fn add(self, rhs: Wrapping<Uint<LIMBS>>) -> Wrapping<Uint<LIMBS>> {
Wrapping(self.0.wrapping_add(&rhs.0))
}
}
-impl<const LIMBS: usize> Add<&Wrapping<UInt<LIMBS>>> for &Wrapping<UInt<LIMBS>> {
- type Output = Wrapping<UInt<LIMBS>>;
+impl<const LIMBS: usize> Add<&Wrapping<Uint<LIMBS>>> for &Wrapping<Uint<LIMBS>> {
+ type Output = Wrapping<Uint<LIMBS>>;
- fn add(self, rhs: &Wrapping<UInt<LIMBS>>) -> Wrapping<UInt<LIMBS>> {
+ fn add(self, rhs: &Wrapping<Uint<LIMBS>>) -> Wrapping<Uint<LIMBS>> {
Wrapping(self.0.wrapping_add(&rhs.0))
}
}
-impl<const LIMBS: usize> AddAssign for Wrapping<UInt<LIMBS>> {
+impl<const LIMBS: usize> AddAssign for Wrapping<Uint<LIMBS>> {
fn add_assign(&mut self, other: Self) {
*self = *self + other;
}
}
-impl<const LIMBS: usize> AddAssign<&Wrapping<UInt<LIMBS>>> for Wrapping<UInt<LIMBS>> {
+impl<const LIMBS: usize> AddAssign<&Wrapping<Uint<LIMBS>>> for Wrapping<Uint<LIMBS>> {
fn add_assign(&mut self, other: &Self) {
*self = *self + other;
}
}
-impl<const LIMBS: usize> Add for Checked<UInt<LIMBS>> {
+impl<const LIMBS: usize> Add for Checked<Uint<LIMBS>> {
type Output = Self;
- fn add(self, rhs: Self) -> Checked<UInt<LIMBS>> {
+ fn add(self, rhs: Self) -> Checked<Uint<LIMBS>> {
Checked(
self.0
.and_then(|lhs| rhs.0.and_then(|rhs| lhs.checked_add(&rhs))),
@@ -102,10 +114,10 @@ impl<const LIMBS: usize> Add for Checked<UInt<LIMBS>> {
}
}
-impl<const LIMBS: usize> Add<&Checked<UInt<LIMBS>>> for Checked<UInt<LIMBS>> {
- type Output = Checked<UInt<LIMBS>>;
+impl<const LIMBS: usize> Add<&Checked<Uint<LIMBS>>> for Checked<Uint<LIMBS>> {
+ type Output = Checked<Uint<LIMBS>>;
- fn add(self, rhs: &Checked<UInt<LIMBS>>) -> Checked<UInt<LIMBS>> {
+ fn add(self, rhs: &Checked<Uint<LIMBS>>) -> Checked<Uint<LIMBS>> {
Checked(
self.0
.and_then(|lhs| rhs.0.and_then(|rhs| lhs.checked_add(&rhs))),
@@ -113,10 +125,10 @@ impl<const LIMBS: usize> Add<&Checked<UInt<LIMBS>>> for Checked<UInt<LIMBS>> {
}
}
-impl<const LIMBS: usize> Add<Checked<UInt<LIMBS>>> for &Checked<UInt<LIMBS>> {
- type Output = Checked<UInt<LIMBS>>;
+impl<const LIMBS: usize> Add<Checked<Uint<LIMBS>>> for &Checked<Uint<LIMBS>> {
+ type Output = Checked<Uint<LIMBS>>;
- fn add(self, rhs: Checked<UInt<LIMBS>>) -> Checked<UInt<LIMBS>> {
+ fn add(self, rhs: Checked<Uint<LIMBS>>) -> Checked<Uint<LIMBS>> {
Checked(
self.0
.and_then(|lhs| rhs.0.and_then(|rhs| lhs.checked_add(&rhs))),
@@ -124,10 +136,10 @@ impl<const LIMBS: usize> Add<Checked<UInt<LIMBS>>> for &Checked<UInt<LIMBS>> {
}
}
-impl<const LIMBS: usize> Add<&Checked<UInt<LIMBS>>> for &Checked<UInt<LIMBS>> {
- type Output = Checked<UInt<LIMBS>>;
+impl<const LIMBS: usize> Add<&Checked<Uint<LIMBS>>> for &Checked<Uint<LIMBS>> {
+ type Output = Checked<Uint<LIMBS>>;
- fn add(self, rhs: &Checked<UInt<LIMBS>>) -> Checked<UInt<LIMBS>> {
+ fn add(self, rhs: &Checked<Uint<LIMBS>>) -> Checked<Uint<LIMBS>> {
Checked(
self.0
.and_then(|lhs| rhs.0.and_then(|rhs| lhs.checked_add(&rhs))),
@@ -135,13 +147,13 @@ impl<const LIMBS: usize> Add<&Checked<UInt<LIMBS>>> for &Checked<UInt<LIMBS>> {
}
}
-impl<const LIMBS: usize> AddAssign for Checked<UInt<LIMBS>> {
+impl<const LIMBS: usize> AddAssign for Checked<Uint<LIMBS>> {
fn add_assign(&mut self, other: Self) {
*self = *self + other;
}
}
-impl<const LIMBS: usize> AddAssign<&Checked<UInt<LIMBS>>> for Checked<UInt<LIMBS>> {
+impl<const LIMBS: usize> AddAssign<&Checked<Uint<LIMBS>>> for Checked<Uint<LIMBS>> {
fn add_assign(&mut self, other: &Self) {
*self = *self + other;
}
diff --git a/vendor/crypto-bigint/src/uint/add_mod.rs b/vendor/crypto-bigint/src/uint/add_mod.rs
index 3486a0a57..bfdda6ff5 100644
--- a/vendor/crypto-bigint/src/uint/add_mod.rs
+++ b/vendor/crypto-bigint/src/uint/add_mod.rs
@@ -1,12 +1,12 @@
-//! [`UInt`] addition modulus operations.
+//! [`Uint`] addition modulus operations.
-use crate::{AddMod, Limb, UInt};
+use crate::{AddMod, Limb, Uint};
-impl<const LIMBS: usize> UInt<LIMBS> {
+impl<const LIMBS: usize> Uint<LIMBS> {
/// Computes `self + rhs mod p` in constant time.
///
/// Assumes `self + rhs` as unbounded integer is `< 2p`.
- pub const fn add_mod(&self, rhs: &UInt<LIMBS>, p: &UInt<LIMBS>) -> UInt<LIMBS> {
+ pub const fn add_mod(&self, rhs: &Uint<LIMBS>, p: &Uint<LIMBS>) -> Uint<LIMBS> {
let (w, carry) = self.adc(rhs, Limb::ZERO);
// Attempt to subtract the modulus, to ensure the result is in the field.
@@ -36,19 +36,19 @@ impl<const LIMBS: usize> UInt<LIMBS> {
///
/// Assumes `self + rhs` as unbounded integer is `< 2p`.
pub const fn add_mod_special(&self, rhs: &Self, c: Limb) -> Self {
- // `UInt::adc` also works with a carry greater than 1.
+ // `Uint::adc` also works with a carry greater than 1.
let (out, carry) = self.adc(rhs, c);
// If overflow occurred, then above addition of `c` already accounts
// for the overflow. Otherwise, we need to subtract `c` again, which
// in that case cannot underflow.
let l = carry.0.wrapping_sub(1) & c.0;
- let (out, _) = out.sbb(&UInt::from_word(l), Limb::ZERO);
+ let (out, _) = out.sbb(&Uint::from_word(l), Limb::ZERO);
out
}
}
-impl<const LIMBS: usize> AddMod for UInt<LIMBS> {
+impl<const LIMBS: usize> AddMod for Uint<LIMBS> {
type Output = Self;
fn add_mod(&self, rhs: &Self, p: &Self) -> Self {
@@ -60,7 +60,7 @@ impl<const LIMBS: usize> AddMod for UInt<LIMBS> {
#[cfg(all(test, feature = "rand"))]
mod tests {
- use crate::{Limb, NonZero, Random, RandomMod, UInt, U256};
+ use crate::{Limb, NonZero, Random, RandomMod, Uint, U256};
use rand_core::SeedableRng;
// TODO(tarcieri): additional tests + proptests
@@ -92,17 +92,17 @@ mod tests {
];
for special in &moduli {
- let p = &NonZero::new(UInt::ZERO.wrapping_sub(&UInt::from_word(special.0)))
+ let p = &NonZero::new(Uint::ZERO.wrapping_sub(&Uint::from_word(special.0)))
.unwrap();
- let minus_one = p.wrapping_sub(&UInt::ONE);
+ let minus_one = p.wrapping_sub(&Uint::ONE);
let base_cases = [
- (UInt::ZERO, UInt::ZERO, UInt::ZERO),
- (UInt::ONE, UInt::ZERO, UInt::ONE),
- (UInt::ZERO, UInt::ONE, UInt::ONE),
- (minus_one, UInt::ONE, UInt::ZERO),
- (UInt::ONE, minus_one, UInt::ZERO),
+ (Uint::ZERO, Uint::ZERO, Uint::ZERO),
+ (Uint::ONE, Uint::ZERO, Uint::ONE),
+ (Uint::ZERO, Uint::ONE, Uint::ONE),
+ (minus_one, Uint::ONE, Uint::ZERO),
+ (Uint::ONE, minus_one, Uint::ZERO),
];
for (a, b, c) in &base_cases {
let x = a.add_mod_special(b, *special.as_ref());
@@ -110,8 +110,8 @@ mod tests {
}
for _i in 0..100 {
- let a = UInt::<$size>::random_mod(&mut rng, p);
- let b = UInt::<$size>::random_mod(&mut rng, p);
+ let a = Uint::<$size>::random_mod(&mut rng, p);
+ let b = Uint::<$size>::random_mod(&mut rng, p);
let c = a.add_mod_special(&b, *special.as_ref());
assert!(c < **p, "not reduced: {} >= {} ", c, p);
diff --git a/vendor/crypto-bigint/src/uint/array.rs b/vendor/crypto-bigint/src/uint/array.rs
index cba2b3716..36f165e23 100644
--- a/vendor/crypto-bigint/src/uint/array.rs
+++ b/vendor/crypto-bigint/src/uint/array.rs
@@ -1,4 +1,4 @@
-//! `generic-array` integration with `UInt`.
+//! `generic-array` integration with `Uint`.
// TODO(tarcieri): completely phase out `generic-array` when const generics are powerful enough
use crate::{ArrayDecoding, ArrayEncoding, ByteArray};
@@ -7,7 +7,6 @@ use generic_array::{typenum, GenericArray};
macro_rules! impl_uint_array_encoding {
($(($uint:ident, $bytes:path)),+) => {
$(
- #[cfg_attr(docsrs, doc(cfg(feature = "generic-array")))]
impl ArrayEncoding for super::$uint {
type ByteSize = $bytes;
@@ -36,7 +35,6 @@ macro_rules! impl_uint_array_encoding {
}
}
- #[cfg_attr(docsrs, doc(cfg(feature = "generic-array")))]
impl ArrayDecoding for GenericArray<u8, $bytes> {
type Output = super::$uint;
@@ -75,33 +73,39 @@ impl_uint_array_encoding! {
(U8192, typenum::U1024)
}
+#[cfg(target_pointer_width = "32")]
+impl_uint_array_encoding! {
+ (U224, typenum::U28), // For NIST P-224
+ (U544, typenum::U68) // For NIST P-521
+}
+
#[cfg(test)]
mod tests {
use crate::{ArrayDecoding, ArrayEncoding, Limb};
use hex_literal::hex;
#[cfg(target_pointer_width = "32")]
- use crate::U64 as UIntEx;
+ use crate::U64 as UintEx;
#[cfg(target_pointer_width = "64")]
- use crate::U128 as UIntEx;
+ use crate::U128 as UintEx;
- /// Byte array that corresponds to `UIntEx`
- type ByteArray = crate::ByteArray<UIntEx>;
+ /// Byte array that corresponds to `UintEx`
+ type ByteArray = crate::ByteArray<UintEx>;
#[test]
#[cfg(target_pointer_width = "32")]
fn from_be_byte_array() {
- let n = UIntEx::from_be_byte_array(hex!("0011223344556677").into());
- assert_eq!(n.limbs(), &[Limb(0x44556677), Limb(0x00112233)]);
+ let n = UintEx::from_be_byte_array(hex!("0011223344556677").into());
+ assert_eq!(n.as_limbs(), &[Limb(0x44556677), Limb(0x00112233)]);
}
#[test]
#[cfg(target_pointer_width = "64")]
fn from_be_byte_array() {
- let n = UIntEx::from_be_byte_array(hex!("00112233445566778899aabbccddeeff").into());
+ let n = UintEx::from_be_byte_array(hex!("00112233445566778899aabbccddeeff").into());
assert_eq!(
- n.limbs(),
+ n.as_limbs(),
&[Limb(0x8899aabbccddeeff), Limb(0x0011223344556677)]
);
}
@@ -109,16 +113,16 @@ mod tests {
#[test]
#[cfg(target_pointer_width = "32")]
fn from_le_byte_array() {
- let n = UIntEx::from_le_byte_array(hex!("7766554433221100").into());
- assert_eq!(n.limbs(), &[Limb(0x44556677), Limb(0x00112233)]);
+ let n = UintEx::from_le_byte_array(hex!("7766554433221100").into());
+ assert_eq!(n.as_limbs(), &[Limb(0x44556677), Limb(0x00112233)]);
}
#[test]
#[cfg(target_pointer_width = "64")]
fn from_le_byte_array() {
- let n = UIntEx::from_le_byte_array(hex!("ffeeddccbbaa99887766554433221100").into());
+ let n = UintEx::from_le_byte_array(hex!("ffeeddccbbaa99887766554433221100").into());
assert_eq!(
- n.limbs(),
+ n.as_limbs(),
&[Limb(0x8899aabbccddeeff), Limb(0x0011223344556677)]
);
}
@@ -127,7 +131,7 @@ mod tests {
#[cfg(target_pointer_width = "32")]
fn to_be_byte_array() {
let expected_bytes = ByteArray::from(hex!("0011223344556677"));
- let actual_bytes = UIntEx::from_be_byte_array(expected_bytes).to_be_byte_array();
+ let actual_bytes = UintEx::from_be_byte_array(expected_bytes).to_be_byte_array();
assert_eq!(expected_bytes, actual_bytes);
}
@@ -135,7 +139,7 @@ mod tests {
#[cfg(target_pointer_width = "64")]
fn to_be_byte_array() {
let expected_bytes = ByteArray::from(hex!("00112233445566778899aabbccddeeff"));
- let actual_bytes = UIntEx::from_be_byte_array(expected_bytes).to_be_byte_array();
+ let actual_bytes = UintEx::from_be_byte_array(expected_bytes).to_be_byte_array();
assert_eq!(expected_bytes, actual_bytes);
}
@@ -143,7 +147,7 @@ mod tests {
#[cfg(target_pointer_width = "32")]
fn to_le_byte_array() {
let expected_bytes = ByteArray::from(hex!("7766554433221100"));
- let actual_bytes = UIntEx::from_le_byte_array(expected_bytes).to_le_byte_array();
+ let actual_bytes = UintEx::from_le_byte_array(expected_bytes).to_le_byte_array();
assert_eq!(expected_bytes, actual_bytes);
}
@@ -151,7 +155,7 @@ mod tests {
#[cfg(target_pointer_width = "64")]
fn to_le_byte_array() {
let expected_bytes = ByteArray::from(hex!("ffeeddccbbaa99887766554433221100"));
- let actual_bytes = UIntEx::from_le_byte_array(expected_bytes).to_le_byte_array();
+ let actual_bytes = UintEx::from_le_byte_array(expected_bytes).to_le_byte_array();
assert_eq!(expected_bytes, actual_bytes);
}
diff --git a/vendor/crypto-bigint/src/uint/bit_and.rs b/vendor/crypto-bigint/src/uint/bit_and.rs
index cab89a429..18186fb82 100644
--- a/vendor/crypto-bigint/src/uint/bit_and.rs
+++ b/vendor/crypto-bigint/src/uint/bit_and.rs
@@ -1,11 +1,11 @@
-//! [`UInt`] bitwise and operations.
+//! [`Uint`] bitwise and operations.
-use super::UInt;
+use super::Uint;
use crate::{Limb, Wrapping};
use core::ops::{BitAnd, BitAndAssign};
use subtle::{Choice, CtOption};
-impl<const LIMBS: usize> UInt<LIMBS> {
+impl<const LIMBS: usize> Uint<LIMBS> {
/// Computes bitwise `a & b`.
#[inline(always)]
pub const fn bitand(&self, rhs: &Self) -> Self {
@@ -35,92 +35,93 @@ impl<const LIMBS: usize> UInt<LIMBS> {
}
}
-impl<const LIMBS: usize> BitAnd for UInt<LIMBS> {
+impl<const LIMBS: usize> BitAnd for Uint<LIMBS> {
type Output = Self;
- fn bitand(self, rhs: Self) -> UInt<LIMBS> {
+ fn bitand(self, rhs: Self) -> Uint<LIMBS> {
self.bitand(&rhs)
}
}
-impl<const LIMBS: usize> BitAnd<&UInt<LIMBS>> for UInt<LIMBS> {
- type Output = UInt<LIMBS>;
+impl<const LIMBS: usize> BitAnd<&Uint<LIMBS>> for Uint<LIMBS> {
+ type Output = Uint<LIMBS>;
- fn bitand(self, rhs: &UInt<LIMBS>) -> UInt<LIMBS> {
+ #[allow(clippy::needless_borrow)]
+ fn bitand(self, rhs: &Uint<LIMBS>) -> Uint<LIMBS> {
(&self).bitand(rhs)
}
}
-impl<const LIMBS: usize> BitAnd<UInt<LIMBS>> for &UInt<LIMBS> {
- type Output = UInt<LIMBS>;
+impl<const LIMBS: usize> BitAnd<Uint<LIMBS>> for &Uint<LIMBS> {
+ type Output = Uint<LIMBS>;
- fn bitand(self, rhs: UInt<LIMBS>) -> UInt<LIMBS> {
+ fn bitand(self, rhs: Uint<LIMBS>) -> Uint<LIMBS> {
self.bitand(&rhs)
}
}
-impl<const LIMBS: usize> BitAnd<&UInt<LIMBS>> for &UInt<LIMBS> {
- type Output = UInt<LIMBS>;
+impl<const LIMBS: usize> BitAnd<&Uint<LIMBS>> for &Uint<LIMBS> {
+ type Output = Uint<LIMBS>;
- fn bitand(self, rhs: &UInt<LIMBS>) -> UInt<LIMBS> {
+ fn bitand(self, rhs: &Uint<LIMBS>) -> Uint<LIMBS> {
self.bitand(rhs)
}
}
-impl<const LIMBS: usize> BitAndAssign for UInt<LIMBS> {
+impl<const LIMBS: usize> BitAndAssign for Uint<LIMBS> {
#[allow(clippy::assign_op_pattern)]
fn bitand_assign(&mut self, other: Self) {
*self = *self & other;
}
}
-impl<const LIMBS: usize> BitAndAssign<&UInt<LIMBS>> for UInt<LIMBS> {
+impl<const LIMBS: usize> BitAndAssign<&Uint<LIMBS>> for Uint<LIMBS> {
#[allow(clippy::assign_op_pattern)]
fn bitand_assign(&mut self, other: &Self) {
*self = *self & other;
}
}
-impl<const LIMBS: usize> BitAnd for Wrapping<UInt<LIMBS>> {
+impl<const LIMBS: usize> BitAnd for Wrapping<Uint<LIMBS>> {
type Output = Self;
- fn bitand(self, rhs: Self) -> Wrapping<UInt<LIMBS>> {
+ fn bitand(self, rhs: Self) -> Wrapping<Uint<LIMBS>> {
Wrapping(self.0.bitand(&rhs.0))
}
}
-impl<const LIMBS: usize> BitAnd<&Wrapping<UInt<LIMBS>>> for Wrapping<UInt<LIMBS>> {
- type Output = Wrapping<UInt<LIMBS>>;
+impl<const LIMBS: usize> BitAnd<&Wrapping<Uint<LIMBS>>> for Wrapping<Uint<LIMBS>> {
+ type Output = Wrapping<Uint<LIMBS>>;
- fn bitand(self, rhs: &Wrapping<UInt<LIMBS>>) -> Wrapping<UInt<LIMBS>> {
+ fn bitand(self, rhs: &Wrapping<Uint<LIMBS>>) -> Wrapping<Uint<LIMBS>> {
Wrapping(self.0.bitand(&rhs.0))
}
}
-impl<const LIMBS: usize> BitAnd<Wrapping<UInt<LIMBS>>> for &Wrapping<UInt<LIMBS>> {
- type Output = Wrapping<UInt<LIMBS>>;
+impl<const LIMBS: usize> BitAnd<Wrapping<Uint<LIMBS>>> for &Wrapping<Uint<LIMBS>> {
+ type Output = Wrapping<Uint<LIMBS>>;
- fn bitand(self, rhs: Wrapping<UInt<LIMBS>>) -> Wrapping<UInt<LIMBS>> {
+ fn bitand(self, rhs: Wrapping<Uint<LIMBS>>) -> Wrapping<Uint<LIMBS>> {
Wrapping(self.0.bitand(&rhs.0))
}
}
-impl<const LIMBS: usize> BitAnd<&Wrapping<UInt<LIMBS>>> for &Wrapping<UInt<LIMBS>> {
- type Output = Wrapping<UInt<LIMBS>>;
+impl<const LIMBS: usize> BitAnd<&Wrapping<Uint<LIMBS>>> for &Wrapping<Uint<LIMBS>> {
+ type Output = Wrapping<Uint<LIMBS>>;
- fn bitand(self, rhs: &Wrapping<UInt<LIMBS>>) -> Wrapping<UInt<LIMBS>> {
+ fn bitand(self, rhs: &Wrapping<Uint<LIMBS>>) -> Wrapping<Uint<LIMBS>> {
Wrapping(self.0.bitand(&rhs.0))
}
}
-impl<const LIMBS: usize> BitAndAssign for Wrapping<UInt<LIMBS>> {
+impl<const LIMBS: usize> BitAndAssign for Wrapping<Uint<LIMBS>> {
#[allow(clippy::assign_op_pattern)]
fn bitand_assign(&mut self, other: Self) {
*self = *self & other;
}
}
-impl<const LIMBS: usize> BitAndAssign<&Wrapping<UInt<LIMBS>>> for Wrapping<UInt<LIMBS>> {
+impl<const LIMBS: usize> BitAndAssign<&Wrapping<Uint<LIMBS>>> for Wrapping<Uint<LIMBS>> {
#[allow(clippy::assign_op_pattern)]
fn bitand_assign(&mut self, other: &Self) {
*self = *self & other;
diff --git a/vendor/crypto-bigint/src/uint/bit_not.rs b/vendor/crypto-bigint/src/uint/bit_not.rs
index 747d3b49e..52fea5fd0 100644
--- a/vendor/crypto-bigint/src/uint/bit_not.rs
+++ b/vendor/crypto-bigint/src/uint/bit_not.rs
@@ -1,10 +1,10 @@
-//! [`UInt`] bitwise not operations.
+//! [`Uint`] bitwise not operations.
-use super::UInt;
+use super::Uint;
use crate::{Limb, Wrapping};
use core::ops::Not;
-impl<const LIMBS: usize> UInt<LIMBS> {
+impl<const LIMBS: usize> Uint<LIMBS> {
/// Computes bitwise `!a`.
#[inline(always)]
pub const fn not(&self) -> Self {
@@ -20,15 +20,16 @@ impl<const LIMBS: usize> UInt<LIMBS> {
}
}
-impl<const LIMBS: usize> Not for UInt<LIMBS> {
+impl<const LIMBS: usize> Not for Uint<LIMBS> {
type Output = Self;
+ #[allow(clippy::needless_borrow)]
fn not(self) -> <Self as Not>::Output {
(&self).not()
}
}
-impl<const LIMBS: usize> Not for Wrapping<UInt<LIMBS>> {
+impl<const LIMBS: usize> Not for Wrapping<Uint<LIMBS>> {
type Output = Self;
fn not(self) -> <Self as Not>::Output {
diff --git a/vendor/crypto-bigint/src/uint/bit_or.rs b/vendor/crypto-bigint/src/uint/bit_or.rs
index 4a01a8343..9a78e366e 100644
--- a/vendor/crypto-bigint/src/uint/bit_or.rs
+++ b/vendor/crypto-bigint/src/uint/bit_or.rs
@@ -1,11 +1,11 @@
-//! [`UInt`] bitwise or operations.
+//! [`Uint`] bitwise or operations.
-use super::UInt;
+use super::Uint;
use crate::{Limb, Wrapping};
use core::ops::{BitOr, BitOrAssign};
use subtle::{Choice, CtOption};
-impl<const LIMBS: usize> UInt<LIMBS> {
+impl<const LIMBS: usize> Uint<LIMBS> {
/// Computes bitwise `a & b`.
#[inline(always)]
pub const fn bitor(&self, rhs: &Self) -> Self {
@@ -35,89 +35,90 @@ impl<const LIMBS: usize> UInt<LIMBS> {
}
}
-impl<const LIMBS: usize> BitOr for UInt<LIMBS> {
+impl<const LIMBS: usize> BitOr for Uint<LIMBS> {
type Output = Self;
- fn bitor(self, rhs: Self) -> UInt<LIMBS> {
+ fn bitor(self, rhs: Self) -> Uint<LIMBS> {
self.bitor(&rhs)
}
}
-impl<const LIMBS: usize> BitOr<&UInt<LIMBS>> for UInt<LIMBS> {
- type Output = UInt<LIMBS>;
+impl<const LIMBS: usize> BitOr<&Uint<LIMBS>> for Uint<LIMBS> {
+ type Output = Uint<LIMBS>;
- fn bitor(self, rhs: &UInt<LIMBS>) -> UInt<LIMBS> {
+ #[allow(clippy::needless_borrow)]
+ fn bitor(self, rhs: &Uint<LIMBS>) -> Uint<LIMBS> {
(&self).bitor(rhs)
}
}
-impl<const LIMBS: usize> BitOr<UInt<LIMBS>> for &UInt<LIMBS> {
- type Output = UInt<LIMBS>;
+impl<const LIMBS: usize> BitOr<Uint<LIMBS>> for &Uint<LIMBS> {
+ type Output = Uint<LIMBS>;
- fn bitor(self, rhs: UInt<LIMBS>) -> UInt<LIMBS> {
+ fn bitor(self, rhs: Uint<LIMBS>) -> Uint<LIMBS> {
self.bitor(&rhs)
}
}
-impl<const LIMBS: usize> BitOr<&UInt<LIMBS>> for &UInt<LIMBS> {
- type Output = UInt<LIMBS>;
+impl<const LIMBS: usize> BitOr<&Uint<LIMBS>> for &Uint<LIMBS> {
+ type Output = Uint<LIMBS>;
- fn bitor(self, rhs: &UInt<LIMBS>) -> UInt<LIMBS> {
+ fn bitor(self, rhs: &Uint<LIMBS>) -> Uint<LIMBS> {
self.bitor(rhs)
}
}
-impl<const LIMBS: usize> BitOrAssign for UInt<LIMBS> {
+impl<const LIMBS: usize> BitOrAssign for Uint<LIMBS> {
fn bitor_assign(&mut self, other: Self) {
*self = *self | other;
}
}
-impl<const LIMBS: usize> BitOrAssign<&UInt<LIMBS>> for UInt<LIMBS> {
+impl<const LIMBS: usize> BitOrAssign<&Uint<LIMBS>> for Uint<LIMBS> {
fn bitor_assign(&mut self, other: &Self) {
*self = *self | other;
}
}
-impl<const LIMBS: usize> BitOr for Wrapping<UInt<LIMBS>> {
+impl<const LIMBS: usize> BitOr for Wrapping<Uint<LIMBS>> {
type Output = Self;
- fn bitor(self, rhs: Self) -> Wrapping<UInt<LIMBS>> {
+ fn bitor(self, rhs: Self) -> Wrapping<Uint<LIMBS>> {
Wrapping(self.0.bitor(&rhs.0))
}
}
-impl<const LIMBS: usize> BitOr<&Wrapping<UInt<LIMBS>>> for Wrapping<UInt<LIMBS>> {
- type Output = Wrapping<UInt<LIMBS>>;
+impl<const LIMBS: usize> BitOr<&Wrapping<Uint<LIMBS>>> for Wrapping<Uint<LIMBS>> {
+ type Output = Wrapping<Uint<LIMBS>>;
- fn bitor(self, rhs: &Wrapping<UInt<LIMBS>>) -> Wrapping<UInt<LIMBS>> {
+ fn bitor(self, rhs: &Wrapping<Uint<LIMBS>>) -> Wrapping<Uint<LIMBS>> {
Wrapping(self.0.bitor(&rhs.0))
}
}
-impl<const LIMBS: usize> BitOr<Wrapping<UInt<LIMBS>>> for &Wrapping<UInt<LIMBS>> {
- type Output = Wrapping<UInt<LIMBS>>;
+impl<const LIMBS: usize> BitOr<Wrapping<Uint<LIMBS>>> for &Wrapping<Uint<LIMBS>> {
+ type Output = Wrapping<Uint<LIMBS>>;
- fn bitor(self, rhs: Wrapping<UInt<LIMBS>>) -> Wrapping<UInt<LIMBS>> {
+ fn bitor(self, rhs: Wrapping<Uint<LIMBS>>) -> Wrapping<Uint<LIMBS>> {
Wrapping(self.0.bitor(&rhs.0))
}
}
-impl<const LIMBS: usize> BitOr<&Wrapping<UInt<LIMBS>>> for &Wrapping<UInt<LIMBS>> {
- type Output = Wrapping<UInt<LIMBS>>;
+impl<const LIMBS: usize> BitOr<&Wrapping<Uint<LIMBS>>> for &Wrapping<Uint<LIMBS>> {
+ type Output = Wrapping<Uint<LIMBS>>;
- fn bitor(self, rhs: &Wrapping<UInt<LIMBS>>) -> Wrapping<UInt<LIMBS>> {
+ fn bitor(self, rhs: &Wrapping<Uint<LIMBS>>) -> Wrapping<Uint<LIMBS>> {
Wrapping(self.0.bitor(&rhs.0))
}
}
-impl<const LIMBS: usize> BitOrAssign for Wrapping<UInt<LIMBS>> {
+impl<const LIMBS: usize> BitOrAssign for Wrapping<Uint<LIMBS>> {
fn bitor_assign(&mut self, other: Self) {
*self = *self | other;
}
}
-impl<const LIMBS: usize> BitOrAssign<&Wrapping<UInt<LIMBS>>> for Wrapping<UInt<LIMBS>> {
+impl<const LIMBS: usize> BitOrAssign<&Wrapping<Uint<LIMBS>>> for Wrapping<Uint<LIMBS>> {
fn bitor_assign(&mut self, other: &Self) {
*self = *self | other;
}
diff --git a/vendor/crypto-bigint/src/uint/bit_xor.rs b/vendor/crypto-bigint/src/uint/bit_xor.rs
index 16d78ad3a..91121d234 100644
--- a/vendor/crypto-bigint/src/uint/bit_xor.rs
+++ b/vendor/crypto-bigint/src/uint/bit_xor.rs
@@ -1,11 +1,11 @@
-//! [`UInt`] bitwise xor operations.
+//! [`Uint`] bitwise xor operations.
-use super::UInt;
+use super::Uint;
use crate::{Limb, Wrapping};
use core::ops::{BitXor, BitXorAssign};
use subtle::{Choice, CtOption};
-impl<const LIMBS: usize> UInt<LIMBS> {
+impl<const LIMBS: usize> Uint<LIMBS> {
/// Computes bitwise `a ^ b`.
#[inline(always)]
pub const fn bitxor(&self, rhs: &Self) -> Self {
@@ -35,89 +35,90 @@ impl<const LIMBS: usize> UInt<LIMBS> {
}
}
-impl<const LIMBS: usize> BitXor for UInt<LIMBS> {
+impl<const LIMBS: usize> BitXor for Uint<LIMBS> {
type Output = Self;
- fn bitxor(self, rhs: Self) -> UInt<LIMBS> {
+ fn bitxor(self, rhs: Self) -> Uint<LIMBS> {
self.bitxor(&rhs)
}
}
-impl<const LIMBS: usize> BitXor<&UInt<LIMBS>> for UInt<LIMBS> {
- type Output = UInt<LIMBS>;
+impl<const LIMBS: usize> BitXor<&Uint<LIMBS>> for Uint<LIMBS> {
+ type Output = Uint<LIMBS>;
- fn bitxor(self, rhs: &UInt<LIMBS>) -> UInt<LIMBS> {
+ #[allow(clippy::needless_borrow)]
+ fn bitxor(self, rhs: &Uint<LIMBS>) -> Uint<LIMBS> {
(&self).bitxor(rhs)
}
}
-impl<const LIMBS: usize> BitXor<UInt<LIMBS>> for &UInt<LIMBS> {
- type Output = UInt<LIMBS>;
+impl<const LIMBS: usize> BitXor<Uint<LIMBS>> for &Uint<LIMBS> {
+ type Output = Uint<LIMBS>;
- fn bitxor(self, rhs: UInt<LIMBS>) -> UInt<LIMBS> {
+ fn bitxor(self, rhs: Uint<LIMBS>) -> Uint<LIMBS> {
self.bitxor(&rhs)
}
}
-impl<const LIMBS: usize> BitXor<&UInt<LIMBS>> for &UInt<LIMBS> {
- type Output = UInt<LIMBS>;
+impl<const LIMBS: usize> BitXor<&Uint<LIMBS>> for &Uint<LIMBS> {
+ type Output = Uint<LIMBS>;
- fn bitxor(self, rhs: &UInt<LIMBS>) -> UInt<LIMBS> {
+ fn bitxor(self, rhs: &Uint<LIMBS>) -> Uint<LIMBS> {
self.bitxor(rhs)
}
}
-impl<const LIMBS: usize> BitXorAssign for UInt<LIMBS> {
+impl<const LIMBS: usize> BitXorAssign for Uint<LIMBS> {
fn bitxor_assign(&mut self, other: Self) {
*self = *self ^ other;
}
}
-impl<const LIMBS: usize> BitXorAssign<&UInt<LIMBS>> for UInt<LIMBS> {
+impl<const LIMBS: usize> BitXorAssign<&Uint<LIMBS>> for Uint<LIMBS> {
fn bitxor_assign(&mut self, other: &Self) {
*self = *self ^ other;
}
}
-impl<const LIMBS: usize> BitXor for Wrapping<UInt<LIMBS>> {
+impl<const LIMBS: usize> BitXor for Wrapping<Uint<LIMBS>> {
type Output = Self;
- fn bitxor(self, rhs: Self) -> Wrapping<UInt<LIMBS>> {
+ fn bitxor(self, rhs: Self) -> Wrapping<Uint<LIMBS>> {
Wrapping(self.0.bitxor(&rhs.0))
}
}
-impl<const LIMBS: usize> BitXor<&Wrapping<UInt<LIMBS>>> for Wrapping<UInt<LIMBS>> {
- type Output = Wrapping<UInt<LIMBS>>;
+impl<const LIMBS: usize> BitXor<&Wrapping<Uint<LIMBS>>> for Wrapping<Uint<LIMBS>> {
+ type Output = Wrapping<Uint<LIMBS>>;
- fn bitxor(self, rhs: &Wrapping<UInt<LIMBS>>) -> Wrapping<UInt<LIMBS>> {
+ fn bitxor(self, rhs: &Wrapping<Uint<LIMBS>>) -> Wrapping<Uint<LIMBS>> {
Wrapping(self.0.bitxor(&rhs.0))
}
}
-impl<const LIMBS: usize> BitXor<Wrapping<UInt<LIMBS>>> for &Wrapping<UInt<LIMBS>> {
- type Output = Wrapping<UInt<LIMBS>>;
+impl<const LIMBS: usize> BitXor<Wrapping<Uint<LIMBS>>> for &Wrapping<Uint<LIMBS>> {
+ type Output = Wrapping<Uint<LIMBS>>;
- fn bitxor(self, rhs: Wrapping<UInt<LIMBS>>) -> Wrapping<UInt<LIMBS>> {
+ fn bitxor(self, rhs: Wrapping<Uint<LIMBS>>) -> Wrapping<Uint<LIMBS>> {
Wrapping(self.0.bitxor(&rhs.0))
}
}
-impl<const LIMBS: usize> BitXor<&Wrapping<UInt<LIMBS>>> for &Wrapping<UInt<LIMBS>> {
- type Output = Wrapping<UInt<LIMBS>>;
+impl<const LIMBS: usize> BitXor<&Wrapping<Uint<LIMBS>>> for &Wrapping<Uint<LIMBS>> {
+ type Output = Wrapping<Uint<LIMBS>>;
- fn bitxor(self, rhs: &Wrapping<UInt<LIMBS>>) -> Wrapping<UInt<LIMBS>> {
+ fn bitxor(self, rhs: &Wrapping<Uint<LIMBS>>) -> Wrapping<Uint<LIMBS>> {
Wrapping(self.0.bitxor(&rhs.0))
}
}
-impl<const LIMBS: usize> BitXorAssign for Wrapping<UInt<LIMBS>> {
+impl<const LIMBS: usize> BitXorAssign for Wrapping<Uint<LIMBS>> {
fn bitxor_assign(&mut self, other: Self) {
*self = *self ^ other;
}
}
-impl<const LIMBS: usize> BitXorAssign<&Wrapping<UInt<LIMBS>>> for Wrapping<UInt<LIMBS>> {
+impl<const LIMBS: usize> BitXorAssign<&Wrapping<Uint<LIMBS>>> for Wrapping<Uint<LIMBS>> {
fn bitxor_assign(&mut self, other: &Self) {
*self = *self ^ other;
}
diff --git a/vendor/crypto-bigint/src/uint/bits.rs b/vendor/crypto-bigint/src/uint/bits.rs
index b76d89fa5..834014341 100644
--- a/vendor/crypto-bigint/src/uint/bits.rs
+++ b/vendor/crypto-bigint/src/uint/bits.rs
@@ -1,25 +1,17 @@
-use crate::{Limb, UInt, Word};
+use crate::{CtChoice, Limb, Uint, Word};
-impl<const LIMBS: usize> UInt<LIMBS> {
- /// Get the value of the bit at position `index`, as a 0- or 1-valued Word.
- /// Returns 0 for indices out of range.
+impl<const LIMBS: usize> Uint<LIMBS> {
+ /// Returns `true` if the bit at position `index` is set, `false` otherwise.
#[inline(always)]
- pub const fn bit_vartime(self, index: usize) -> Word {
- if index >= LIMBS * Limb::BIT_SIZE {
- 0
+ pub const fn bit_vartime(self, index: usize) -> bool {
+ if index >= Self::BITS {
+ false
} else {
- (self.limbs[index / Limb::BIT_SIZE].0 >> (index % Limb::BIT_SIZE)) & 1
+ (self.limbs[index / Limb::BITS].0 >> (index % Limb::BITS)) & 1 == 1
}
}
/// Calculate the number of bits needed to represent this number.
- #[deprecated(note = "please use `bits_vartime` instead")]
- #[inline(always)]
- pub const fn bits(self) -> usize {
- self.bits_vartime()
- }
-
- /// Calculate the number of bits needed to represent this number.
#[allow(trivial_numeric_casts)]
pub const fn bits_vartime(self) -> usize {
let mut i = LIMBS - 1;
@@ -28,28 +20,143 @@ impl<const LIMBS: usize> UInt<LIMBS> {
}
let limb = self.limbs[i].0;
- let bits = (Limb::BIT_SIZE * (i + 1)) as Word - limb.leading_zeros() as Word;
+ Limb::BITS * (i + 1) - limb.leading_zeros() as usize
+ }
- Limb::ct_select(
- Limb(bits),
- Limb::ZERO,
- !self.limbs[0].is_nonzero() & !Limb(i as Word).is_nonzero(),
- )
- .0 as usize
+ /// Calculate the number of leading zeros in the binary representation of this number.
+ pub const fn leading_zeros(self) -> usize {
+ let limbs = self.as_limbs();
+
+ let mut count: Word = 0;
+ let mut i = LIMBS;
+ let mut nonzero_limb_not_encountered = CtChoice::TRUE;
+ while i > 0 {
+ i -= 1;
+ let l = limbs[i];
+ let z = l.leading_zeros() as Word;
+ count += nonzero_limb_not_encountered.if_true(z);
+ nonzero_limb_not_encountered =
+ nonzero_limb_not_encountered.and(l.ct_is_nonzero().not());
+ }
+
+ count as usize
+ }
+
+ /// Calculate the number of trailing zeros in the binary representation of this number.
+ pub const fn trailing_zeros(self) -> usize {
+ let limbs = self.as_limbs();
+
+ let mut count: Word = 0;
+ let mut i = 0;
+ let mut nonzero_limb_not_encountered = CtChoice::TRUE;
+ while i < LIMBS {
+ let l = limbs[i];
+ let z = l.trailing_zeros() as Word;
+ count += nonzero_limb_not_encountered.if_true(z);
+ nonzero_limb_not_encountered =
+ nonzero_limb_not_encountered.and(l.ct_is_nonzero().not());
+ i += 1;
+ }
+
+ count as usize
+ }
+
+ /// Calculate the number of bits needed to represent this number.
+ pub const fn bits(self) -> usize {
+ Self::BITS - self.leading_zeros()
+ }
+
+ /// Get the value of the bit at position `index`, as a truthy or falsy `CtChoice`.
+ /// Returns the falsy value for indices out of range.
+ pub const fn bit(self, index: usize) -> CtChoice {
+ let limb_num = Limb((index / Limb::BITS) as Word);
+ let index_in_limb = index % Limb::BITS;
+ let index_mask = 1 << index_in_limb;
+
+ let limbs = self.as_words();
+
+ let mut result: Word = 0;
+ let mut i = 0;
+ while i < LIMBS {
+ let bit = limbs[i] & index_mask;
+ let is_right_limb = Limb::ct_eq(limb_num, Limb(i as Word));
+ result |= is_right_limb.if_true(bit);
+ i += 1;
+ }
+
+ CtChoice::from_lsb(result >> index_in_limb)
}
}
#[cfg(test)]
mod tests {
- use crate::U128;
+ use crate::U256;
+
+ fn uint_with_bits_at(positions: &[usize]) -> U256 {
+ let mut result = U256::ZERO;
+ for pos in positions {
+ result |= U256::ONE << *pos;
+ }
+ result
+ }
+
+ #[test]
+ fn bit_vartime() {
+ let u = uint_with_bits_at(&[16, 48, 112, 127, 255]);
+ assert!(!u.bit_vartime(0));
+ assert!(!u.bit_vartime(1));
+ assert!(u.bit_vartime(16));
+ assert!(u.bit_vartime(127));
+ assert!(u.bit_vartime(255));
+ assert!(!u.bit_vartime(256));
+ assert!(!u.bit_vartime(260));
+ }
+
+ #[test]
+ fn bit() {
+ let u = uint_with_bits_at(&[16, 48, 112, 127, 255]);
+ assert!(!u.bit(0).is_true_vartime());
+ assert!(!u.bit(1).is_true_vartime());
+ assert!(u.bit(16).is_true_vartime());
+ assert!(u.bit(127).is_true_vartime());
+ assert!(u.bit(255).is_true_vartime());
+ assert!(!u.bit(256).is_true_vartime());
+ assert!(!u.bit(260).is_true_vartime());
+ }
#[test]
- fn bit_vartime_ok() {
- let u = U128::from_be_hex("f0010000000000000001000000010000");
- assert_eq!(u.bit_vartime(0), 0);
- assert_eq!(u.bit_vartime(1), 0);
- assert_eq!(u.bit_vartime(16), 1);
- assert_eq!(u.bit_vartime(127), 1);
- assert_eq!(u.bit_vartime(130), 0);
+ fn leading_zeros() {
+ let u = uint_with_bits_at(&[256 - 16, 256 - 79, 256 - 207]);
+ assert_eq!(u.leading_zeros() as u32, 15);
+
+ let u = uint_with_bits_at(&[256 - 79, 256 - 207]);
+ assert_eq!(u.leading_zeros() as u32, 78);
+
+ let u = uint_with_bits_at(&[256 - 207]);
+ assert_eq!(u.leading_zeros() as u32, 206);
+
+ let u = uint_with_bits_at(&[256 - 1, 256 - 75, 256 - 150]);
+ assert_eq!(u.leading_zeros() as u32, 0);
+
+ let u = U256::ZERO;
+ assert_eq!(u.leading_zeros() as u32, 256);
+ }
+
+ #[test]
+ fn trailing_zeros() {
+ let u = uint_with_bits_at(&[16, 79, 150]);
+ assert_eq!(u.trailing_zeros() as u32, 16);
+
+ let u = uint_with_bits_at(&[79, 150]);
+ assert_eq!(u.trailing_zeros() as u32, 79);
+
+ let u = uint_with_bits_at(&[150, 207]);
+ assert_eq!(u.trailing_zeros() as u32, 150);
+
+ let u = uint_with_bits_at(&[0, 150, 207]);
+ assert_eq!(u.trailing_zeros() as u32, 0);
+
+ let u = U256::ZERO;
+ assert_eq!(u.trailing_zeros() as u32, 256);
}
}
diff --git a/vendor/crypto-bigint/src/uint/cmp.rs b/vendor/crypto-bigint/src/uint/cmp.rs
index 19046df9b..8815369e3 100644
--- a/vendor/crypto-bigint/src/uint/cmp.rs
+++ b/vendor/crypto-bigint/src/uint/cmp.rs
@@ -1,18 +1,16 @@
-//! [`UInt`] comparisons.
+//! [`Uint`] comparisons.
//!
//! By default these are all constant-time and use the `subtle` crate.
-use super::UInt;
-use crate::{limb::HI_BIT, Limb, SignedWord, WideSignedWord, Word, Zero};
+use super::Uint;
+use crate::{CtChoice, Limb};
use core::cmp::Ordering;
use subtle::{Choice, ConstantTimeEq, ConstantTimeGreater, ConstantTimeLess};
-impl<const LIMBS: usize> UInt<LIMBS> {
- /// Return `a` if `c`==0 or `b` if `c`==`Word::MAX`.
- ///
- /// Const-friendly: we can't yet use `subtle` in `const fn` contexts.
+impl<const LIMBS: usize> Uint<LIMBS> {
+ /// Return `b` if `c` is truthy, otherwise return `a`.
#[inline]
- pub(crate) const fn ct_select(a: UInt<LIMBS>, b: UInt<LIMBS>, c: Word) -> Self {
+ pub(crate) const fn ct_select(a: &Self, b: &Self, c: CtChoice) -> Self {
let mut limbs = [Limb::ZERO; LIMBS];
let mut i = 0;
@@ -21,106 +19,112 @@ impl<const LIMBS: usize> UInt<LIMBS> {
i += 1;
}
- UInt { limbs }
+ Uint { limbs }
}
- /// Returns all 1's if `self`!=0 or 0 if `self`==0.
- ///
- /// Const-friendly: we can't yet use `subtle` in `const fn` contexts.
#[inline]
- pub(crate) const fn ct_is_nonzero(&self) -> Word {
+ pub(crate) const fn ct_swap(a: &Self, b: &Self, c: CtChoice) -> (Self, Self) {
+ let new_a = Self::ct_select(a, b, c);
+ let new_b = Self::ct_select(b, a, c);
+
+ (new_a, new_b)
+ }
+
+ /// Returns the truthy value if `self`!=0 or the falsy value otherwise.
+ #[inline]
+ pub(crate) const fn ct_is_nonzero(&self) -> CtChoice {
let mut b = 0;
let mut i = 0;
while i < LIMBS {
b |= self.limbs[i].0;
i += 1;
}
- Limb::is_nonzero(Limb(b))
+ Limb(b).ct_is_nonzero()
}
- /// Returns -1 if self < rhs
- /// 0 if self == rhs
- /// 1 if self > rhs
- ///
- /// Const-friendly: we can't yet use `subtle` in `const fn` contexts.
- #[inline]
- pub(crate) const fn ct_cmp(&self, rhs: &Self) -> SignedWord {
- let mut gt = 0;
- let mut lt = 0;
- let mut i = LIMBS;
-
- while i > 0 {
- let a = self.limbs[i - 1].0 as WideSignedWord;
- let b = rhs.limbs[i - 1].0 as WideSignedWord;
- gt |= ((b - a) >> Limb::BIT_SIZE) & 1 & !lt;
- lt |= ((a - b) >> Limb::BIT_SIZE) & 1 & !gt;
- i -= 1;
- }
- (gt as SignedWord) - (lt as SignedWord)
+ /// Returns the truthy value if `self` is odd or the falsy value otherwise.
+ pub(crate) const fn ct_is_odd(&self) -> CtChoice {
+ CtChoice::from_lsb(self.limbs[0].0 & 1)
}
- /// Returns 0 if self == rhs or Word::MAX if self != rhs.
- /// Const-friendly: we can't yet use `subtle` in `const fn` contexts.
+ /// Returns the truthy value if `self == rhs` or the falsy value otherwise.
#[inline]
- pub(crate) const fn ct_not_eq(&self, rhs: &Self) -> Word {
+ pub(crate) const fn ct_eq(lhs: &Self, rhs: &Self) -> CtChoice {
let mut acc = 0;
let mut i = 0;
while i < LIMBS {
- acc |= self.limbs[i].0 ^ rhs.limbs[i].0;
+ acc |= lhs.limbs[i].0 ^ rhs.limbs[i].0;
i += 1;
}
- let acc = acc as SignedWord;
- ((acc | acc.wrapping_neg()) >> HI_BIT) as Word
+
+ // acc == 0 if and only if self == rhs
+ Limb(acc).ct_is_nonzero().not()
+ }
+
+ /// Returns the truthy value if `self <= rhs` and the falsy value otherwise.
+ #[inline]
+ pub(crate) const fn ct_lt(lhs: &Self, rhs: &Self) -> CtChoice {
+ // We could use the same approach as in Limb::ct_lt(),
+ // but since we have to use Uint::wrapping_sub(), which calls `sbb()`,
+ // there are no savings compared to just calling `sbb()` directly.
+ let (_res, borrow) = lhs.sbb(rhs, Limb::ZERO);
+ CtChoice::from_mask(borrow.0)
+ }
+
+ /// Returns the truthy value if `self <= rhs` and the falsy value otherwise.
+ #[inline]
+ pub(crate) const fn ct_gt(lhs: &Self, rhs: &Self) -> CtChoice {
+ let (_res, borrow) = rhs.sbb(lhs, Limb::ZERO);
+ CtChoice::from_mask(borrow.0)
}
}
-impl<const LIMBS: usize> ConstantTimeEq for UInt<LIMBS> {
+impl<const LIMBS: usize> ConstantTimeEq for Uint<LIMBS> {
#[inline]
fn ct_eq(&self, other: &Self) -> Choice {
- Choice::from((!self.ct_not_eq(other) as u8) & 1)
+ Uint::ct_eq(self, other).into()
}
}
-impl<const LIMBS: usize> ConstantTimeGreater for UInt<LIMBS> {
+impl<const LIMBS: usize> ConstantTimeGreater for Uint<LIMBS> {
#[inline]
fn ct_gt(&self, other: &Self) -> Choice {
- let underflow = other.sbb(self, Limb::ZERO).1;
- !underflow.is_zero()
+ Uint::ct_gt(self, other).into()
}
}
-impl<const LIMBS: usize> ConstantTimeLess for UInt<LIMBS> {
+impl<const LIMBS: usize> ConstantTimeLess for Uint<LIMBS> {
#[inline]
fn ct_lt(&self, other: &Self) -> Choice {
- let underflow = self.sbb(other, Limb::ZERO).1;
- !underflow.is_zero()
+ Uint::ct_lt(self, other).into()
}
}
-impl<const LIMBS: usize> Eq for UInt<LIMBS> {}
+impl<const LIMBS: usize> Eq for Uint<LIMBS> {}
-impl<const LIMBS: usize> Ord for UInt<LIMBS> {
+impl<const LIMBS: usize> Ord for Uint<LIMBS> {
fn cmp(&self, other: &Self) -> Ordering {
- match self.ct_cmp(other) {
- -1 => Ordering::Less,
- 1 => Ordering::Greater,
- n => {
- debug_assert_eq!(n, 0);
- debug_assert!(bool::from(self.ct_eq(other)));
- Ordering::Equal
- }
+ let is_lt = self.ct_lt(other);
+ let is_eq = self.ct_eq(other);
+
+ if is_lt.into() {
+ Ordering::Less
+ } else if is_eq.into() {
+ Ordering::Equal
+ } else {
+ Ordering::Greater
}
}
}
-impl<const LIMBS: usize> PartialOrd for UInt<LIMBS> {
+impl<const LIMBS: usize> PartialOrd for Uint<LIMBS> {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}
-impl<const LIMBS: usize> PartialEq for UInt<LIMBS> {
+impl<const LIMBS: usize> PartialEq for Uint<LIMBS> {
fn eq(&self, other: &Self) -> bool {
self.ct_eq(other).into()
}
diff --git a/vendor/crypto-bigint/src/uint/concat.rs b/vendor/crypto-bigint/src/uint/concat.rs
index e92960da7..8b53401d3 100644
--- a/vendor/crypto-bigint/src/uint/concat.rs
+++ b/vendor/crypto-bigint/src/uint/concat.rs
@@ -5,7 +5,7 @@ macro_rules! impl_concat {
impl $name {
/// Concatenate the two values, with `self` as most significant and `rhs`
/// as the least significant.
- pub const fn concat(&self, rhs: &Self) -> UInt<{nlimbs!($bits) * 2}> {
+ pub const fn concat(&self, rhs: &Self) -> Uint<{nlimbs!($bits) * 2}> {
let mut limbs = [Limb::ZERO; nlimbs!($bits) * 2];
let mut i = 0;
let mut j = 0;
@@ -23,21 +23,21 @@ macro_rules! impl_concat {
j += 1;
}
- UInt { limbs }
+ Uint { limbs }
}
}
impl Concat for $name {
- type Output = UInt<{nlimbs!($bits) * 2}>;
+ type Output = Uint<{nlimbs!($bits) * 2}>;
fn concat(&self, rhs: &Self) -> Self::Output {
self.concat(rhs)
}
}
- impl From<($name, $name)> for UInt<{nlimbs!($bits) * 2}> {
- fn from(nums: ($name, $name)) -> UInt<{nlimbs!($bits) * 2}> {
- nums.0.concat(&nums.1)
+ impl From<($name, $name)> for Uint<{nlimbs!($bits) * 2}> {
+ fn from(nums: ($name, $name)) -> Uint<{nlimbs!($bits) * 2}> {
+ nums.1.concat(&nums.0)
}
}
)+
@@ -57,4 +57,13 @@ mod tests {
U128::from_be_hex("00112233445566778899aabbccddeeff")
);
}
+
+ #[test]
+ fn convert() {
+ let res: U128 = U64::ONE.mul_wide(&U64::ONE).into();
+ assert_eq!(res, U128::ONE);
+
+ let res: U128 = U64::ONE.square_wide().into();
+ assert_eq!(res, U128::ONE);
+ }
}
diff --git a/vendor/crypto-bigint/src/uint/div.rs b/vendor/crypto-bigint/src/uint/div.rs
index f7d9d6bf3..90741c472 100644
--- a/vendor/crypto-bigint/src/uint/div.rs
+++ b/vendor/crypto-bigint/src/uint/div.rs
@@ -1,33 +1,68 @@
-//! [`UInt`] division operations.
+//! [`Uint`] division operations.
-use super::UInt;
-use crate::limb::Word;
-use crate::{Integer, Limb, NonZero, Wrapping};
+use super::div_limb::{div_rem_limb_with_reciprocal, Reciprocal};
+use crate::{CtChoice, Limb, NonZero, Uint, Word, Wrapping};
use core::ops::{Div, DivAssign, Rem, RemAssign};
-use subtle::{Choice, CtOption};
+use subtle::CtOption;
+
+impl<const LIMBS: usize> Uint<LIMBS> {
+ /// Computes `self` / `rhs` using a pre-made reciprocal,
+ /// returns the quotient (q) and remainder (r).
+ #[inline(always)]
+ pub const fn ct_div_rem_limb_with_reciprocal(&self, reciprocal: &Reciprocal) -> (Self, Limb) {
+ div_rem_limb_with_reciprocal(self, reciprocal)
+ }
+
+ /// Computes `self` / `rhs` using a pre-made reciprocal,
+ /// returns the quotient (q) and remainder (r).
+ #[inline(always)]
+ pub fn div_rem_limb_with_reciprocal(
+ &self,
+ reciprocal: &CtOption<Reciprocal>,
+ ) -> CtOption<(Self, Limb)> {
+ reciprocal.map(|r| div_rem_limb_with_reciprocal(self, &r))
+ }
+
+ /// Computes `self` / `rhs`, returns the quotient (q) and remainder (r).
+ /// Returns the truthy value as the third element of the tuple if `rhs != 0`,
+ /// and the falsy value otherwise.
+ #[inline(always)]
+ pub(crate) const fn ct_div_rem_limb(&self, rhs: Limb) -> (Self, Limb, CtChoice) {
+ let (reciprocal, is_some) = Reciprocal::ct_new(rhs);
+ let (quo, rem) = div_rem_limb_with_reciprocal(self, &reciprocal);
+ (quo, rem, is_some)
+ }
+
+ /// Computes `self` / `rhs`, returns the quotient (q) and remainder (r).
+ #[inline(always)]
+ pub fn div_rem_limb(&self, rhs: NonZero<Limb>) -> (Self, Limb) {
+ // Guaranteed to succeed since `rhs` is nonzero.
+ let (quo, rem, _is_some) = self.ct_div_rem_limb(*rhs);
+ (quo, rem)
+ }
-impl<const LIMBS: usize> UInt<LIMBS> {
/// Computes `self` / `rhs`, returns the quotient (q), remainder (r)
- /// and 1 for is_some or 0 for is_none. The results can be wrapped in [`CtOption`].
- /// NOTE: Use only if you need to access const fn. Otherwise use `div_rem` because
+ /// and the truthy value for is_some or the falsy value for is_none.
+ ///
+ /// NOTE: Use only if you need to access const fn. Otherwise use [`Self::div_rem`] because
/// the value for is_some needs to be checked before using `q` and `r`.
///
/// This is variable only with respect to `rhs`.
///
/// When used with a fixed `rhs`, this function is constant-time with respect
/// to `self`.
- pub(crate) const fn ct_div_rem(&self, rhs: &Self) -> (Self, Self, u8) {
+ pub(crate) const fn ct_div_rem(&self, rhs: &Self) -> (Self, Self, CtChoice) {
let mb = rhs.bits_vartime();
- let mut bd = (LIMBS * Limb::BIT_SIZE) - mb;
+ let mut bd = Self::BITS - mb;
let mut rem = *self;
let mut quo = Self::ZERO;
let mut c = rhs.shl_vartime(bd);
loop {
let (mut r, borrow) = rem.sbb(&c, Limb::ZERO);
- rem = Self::ct_select(r, rem, borrow.0);
+ rem = Self::ct_select(&r, &rem, CtChoice::from_mask(borrow.0));
r = quo.bitor(&Self::ONE);
- quo = Self::ct_select(r, quo, borrow.0);
+ quo = Self::ct_select(&r, &quo, CtChoice::from_mask(borrow.0));
if bd == 0 {
break;
}
@@ -36,27 +71,28 @@ impl<const LIMBS: usize> UInt<LIMBS> {
quo = quo.shl_vartime(1);
}
- let is_some = Limb(mb as Word).is_nonzero();
- quo = Self::ct_select(Self::ZERO, quo, is_some);
- (quo, rem, (is_some & 1) as u8)
+ let is_some = Limb(mb as Word).ct_is_nonzero();
+ quo = Self::ct_select(&Self::ZERO, &quo, is_some);
+ (quo, rem, is_some)
}
/// Computes `self` % `rhs`, returns the remainder and
- /// and 1 for is_some or 0 for is_none. The results can be wrapped in [`CtOption`].
- /// NOTE: Use only if you need to access const fn. Otherwise use `reduce`
+ /// and the truthy value for is_some or the falsy value for is_none.
+ ///
+ /// NOTE: Use only if you need to access const fn. Otherwise use [`Self::rem`].
/// This is variable only with respect to `rhs`.
///
/// When used with a fixed `rhs`, this function is constant-time with respect
/// to `self`.
- pub(crate) const fn ct_reduce(&self, rhs: &Self) -> (Self, u8) {
+ pub const fn const_rem(&self, rhs: &Self) -> (Self, CtChoice) {
let mb = rhs.bits_vartime();
- let mut bd = (LIMBS * Limb::BIT_SIZE) - mb;
+ let mut bd = Self::BITS - mb;
let mut rem = *self;
let mut c = rhs.shl_vartime(bd);
loop {
let (r, borrow) = rem.sbb(&c, Limb::ZERO);
- rem = Self::ct_select(r, rem, borrow.0);
+ rem = Self::ct_select(&r, &rem, CtChoice::from_mask(borrow.0));
if bd == 0 {
break;
}
@@ -64,20 +100,55 @@ impl<const LIMBS: usize> UInt<LIMBS> {
c = c.shr_vartime(1);
}
- let is_some = Limb(mb as Word).is_nonzero();
- (rem, (is_some & 1) as u8)
+ let is_some = Limb(mb as Word).ct_is_nonzero();
+ (rem, is_some)
+ }
+
+ /// Computes `self` % `rhs`, returns the remainder and
+ /// and the truthy value for is_some or the falsy value for is_none.
+ ///
+ /// This is variable only with respect to `rhs`.
+ ///
+ /// When used with a fixed `rhs`, this function is constant-time with respect
+ /// to `self`.
+ pub const fn const_rem_wide(lower_upper: (Self, Self), rhs: &Self) -> (Self, CtChoice) {
+ let mb = rhs.bits_vartime();
+
+ // The number of bits to consider is two sets of limbs * BITS - mb (modulus bitcount)
+ let mut bd = (2 * Self::BITS) - mb;
+
+ // The wide integer to reduce, split into two halves
+ let (mut lower, mut upper) = lower_upper;
+
+ // Factor of the modulus, split into two halves
+ let mut c = Self::shl_vartime_wide((*rhs, Uint::ZERO), bd);
+
+ loop {
+ let (lower_sub, borrow) = lower.sbb(&c.0, Limb::ZERO);
+ let (upper_sub, borrow) = upper.sbb(&c.1, borrow);
+
+ lower = Self::ct_select(&lower_sub, &lower, CtChoice::from_mask(borrow.0));
+ upper = Self::ct_select(&upper_sub, &upper, CtChoice::from_mask(borrow.0));
+ if bd == 0 {
+ break;
+ }
+ bd -= 1;
+ c = Self::shr_vartime_wide(c, 1);
+ }
+
+ let is_some = Limb(mb as Word).ct_is_nonzero();
+ (lower, is_some)
}
/// Computes `self` % 2^k. Faster than reduce since its a power of 2.
- /// Limited to 2^16-1 since UInt doesn't support higher.
- pub const fn reduce2k(&self, k: usize) -> Self {
+ /// Limited to 2^16-1 since Uint doesn't support higher.
+ pub const fn rem2k(&self, k: usize) -> Self {
let highest = (LIMBS - 1) as u32;
- let index = k as u32 / (Limb::BIT_SIZE as u32);
- let res = Limb::ct_cmp(Limb::from_u32(index), Limb::from_u32(highest)) - 1;
- let le = Limb::is_nonzero(Limb(res as Word));
+ let index = k as u32 / (Limb::BITS as u32);
+ let le = Limb::ct_le(Limb::from_u32(index), Limb::from_u32(highest));
let word = Limb::ct_select(Limb::from_u32(highest), Limb::from_u32(index), le).0 as usize;
- let base = k % Limb::BIT_SIZE;
+ let base = k % Limb::BITS;
let mask = (1 << base) - 1;
let mut out = *self;
@@ -94,266 +165,418 @@ impl<const LIMBS: usize> UInt<LIMBS> {
out
}
- /// Computes self / rhs, returns the quotient, remainder
- /// if rhs != 0
- pub fn div_rem(&self, rhs: &Self) -> CtOption<(Self, Self)> {
- let (q, r, c) = self.ct_div_rem(rhs);
- CtOption::new((q, r), Choice::from(c))
+ /// Computes self / rhs, returns the quotient, remainder.
+ pub fn div_rem(&self, rhs: &NonZero<Self>) -> (Self, Self) {
+ // Since `rhs` is nonzero, this should always hold.
+ let (q, r, _c) = self.ct_div_rem(rhs);
+ (q, r)
}
- /// Computes self % rhs, returns the remainder
- /// if rhs != 0
- pub fn reduce(&self, rhs: &Self) -> CtOption<Self> {
- let (r, c) = self.ct_reduce(rhs);
- CtOption::new(r, Choice::from(c))
+ /// Computes self % rhs, returns the remainder.
+ pub fn rem(&self, rhs: &NonZero<Self>) -> Self {
+ // Since `rhs` is nonzero, this should always hold.
+ let (r, _c) = self.const_rem(rhs);
+ r
}
/// Wrapped division is just normal division i.e. `self` / `rhs`
/// There’s no way wrapping could ever happen.
/// This function exists, so that all operations are accounted for in the wrapping operations.
+ ///
+ /// Panics if `rhs == 0`.
pub const fn wrapping_div(&self, rhs: &Self) -> Self {
let (q, _, c) = self.ct_div_rem(rhs);
- assert!(c == 1, "divide by zero");
+ assert!(c.is_true_vartime(), "divide by zero");
q
}
/// Perform checked division, returning a [`CtOption`] which `is_some`
/// only if the rhs != 0
pub fn checked_div(&self, rhs: &Self) -> CtOption<Self> {
- let (q, _, c) = self.ct_div_rem(rhs);
- CtOption::new(q, Choice::from(c))
+ NonZero::new(*rhs).map(|rhs| {
+ let (q, _r) = self.div_rem(&rhs);
+ q
+ })
}
/// Wrapped (modular) remainder calculation is just `self` % `rhs`.
/// There’s no way wrapping could ever happen.
/// This function exists, so that all operations are accounted for in the wrapping operations.
+ ///
+ /// Panics if `rhs == 0`.
pub const fn wrapping_rem(&self, rhs: &Self) -> Self {
- let (r, c) = self.ct_reduce(rhs);
- assert!(c == 1, "modulo zero");
+ let (r, c) = self.const_rem(rhs);
+ assert!(c.is_true_vartime(), "modulo zero");
r
}
/// Perform checked reduction, returning a [`CtOption`] which `is_some`
/// only if the rhs != 0
pub fn checked_rem(&self, rhs: &Self) -> CtOption<Self> {
- let (r, c) = self.ct_reduce(rhs);
- CtOption::new(r, Choice::from(c))
+ NonZero::new(*rhs).map(|rhs| self.rem(&rhs))
}
}
-impl<const LIMBS: usize> Div<&NonZero<UInt<LIMBS>>> for &UInt<LIMBS>
-where
- UInt<LIMBS>: Integer,
-{
- type Output = UInt<LIMBS>;
+//
+// Division by a single limb
+//
+
+impl<const LIMBS: usize> Div<&NonZero<Limb>> for &Uint<LIMBS> {
+ type Output = Uint<LIMBS>;
- fn div(self, rhs: &NonZero<UInt<LIMBS>>) -> Self::Output {
+ fn div(self, rhs: &NonZero<Limb>) -> Self::Output {
*self / *rhs
}
}
-impl<const LIMBS: usize> Div<&NonZero<UInt<LIMBS>>> for UInt<LIMBS>
-where
- UInt<LIMBS>: Integer,
-{
- type Output = UInt<LIMBS>;
+impl<const LIMBS: usize> Div<&NonZero<Limb>> for Uint<LIMBS> {
+ type Output = Uint<LIMBS>;
- fn div(self, rhs: &NonZero<UInt<LIMBS>>) -> Self::Output {
+ fn div(self, rhs: &NonZero<Limb>) -> Self::Output {
self / *rhs
}
}
-impl<const LIMBS: usize> Div<NonZero<UInt<LIMBS>>> for &UInt<LIMBS>
-where
- UInt<LIMBS>: Integer,
-{
- type Output = UInt<LIMBS>;
+impl<const LIMBS: usize> Div<NonZero<Limb>> for &Uint<LIMBS> {
+ type Output = Uint<LIMBS>;
- fn div(self, rhs: NonZero<UInt<LIMBS>>) -> Self::Output {
+ fn div(self, rhs: NonZero<Limb>) -> Self::Output {
*self / rhs
}
}
-impl<const LIMBS: usize> Div<NonZero<UInt<LIMBS>>> for UInt<LIMBS>
-where
- UInt<LIMBS>: Integer,
-{
- type Output = UInt<LIMBS>;
+impl<const LIMBS: usize> Div<NonZero<Limb>> for Uint<LIMBS> {
+ type Output = Uint<LIMBS>;
- fn div(self, rhs: NonZero<UInt<LIMBS>>) -> Self::Output {
- let (q, _, _) = self.ct_div_rem(&rhs);
+ fn div(self, rhs: NonZero<Limb>) -> Self::Output {
+ let (q, _, _) = self.ct_div_rem_limb(*rhs);
q
}
}
-impl<const LIMBS: usize> DivAssign<&NonZero<UInt<LIMBS>>> for UInt<LIMBS>
-where
- UInt<LIMBS>: Integer,
-{
- fn div_assign(&mut self, rhs: &NonZero<UInt<LIMBS>>) {
- let (q, _, _) = self.ct_div_rem(rhs);
- *self = q
+impl<const LIMBS: usize> DivAssign<&NonZero<Limb>> for Uint<LIMBS> {
+ fn div_assign(&mut self, rhs: &NonZero<Limb>) {
+ *self /= *rhs;
}
}
-impl<const LIMBS: usize> DivAssign<NonZero<UInt<LIMBS>>> for UInt<LIMBS>
-where
- UInt<LIMBS>: Integer,
-{
- fn div_assign(&mut self, rhs: NonZero<UInt<LIMBS>>) {
- *self /= &rhs;
+impl<const LIMBS: usize> DivAssign<NonZero<Limb>> for Uint<LIMBS> {
+ fn div_assign(&mut self, rhs: NonZero<Limb>) {
+ *self = *self / rhs;
}
}
-impl<const LIMBS: usize> Div<NonZero<UInt<LIMBS>>> for Wrapping<UInt<LIMBS>> {
- type Output = Wrapping<UInt<LIMBS>>;
+impl<const LIMBS: usize> Div<NonZero<Limb>> for Wrapping<Uint<LIMBS>> {
+ type Output = Wrapping<Uint<LIMBS>>;
- fn div(self, rhs: NonZero<UInt<LIMBS>>) -> Self::Output {
- Wrapping(self.0.wrapping_div(rhs.as_ref()))
+ fn div(self, rhs: NonZero<Limb>) -> Self::Output {
+ Wrapping(self.0 / rhs)
}
}
-impl<const LIMBS: usize> Div<NonZero<UInt<LIMBS>>> for &Wrapping<UInt<LIMBS>> {
- type Output = Wrapping<UInt<LIMBS>>;
+impl<const LIMBS: usize> Div<NonZero<Limb>> for &Wrapping<Uint<LIMBS>> {
+ type Output = Wrapping<Uint<LIMBS>>;
- fn div(self, rhs: NonZero<UInt<LIMBS>>) -> Self::Output {
+ fn div(self, rhs: NonZero<Limb>) -> Self::Output {
*self / rhs
}
}
-impl<const LIMBS: usize> Div<&NonZero<UInt<LIMBS>>> for &Wrapping<UInt<LIMBS>> {
- type Output = Wrapping<UInt<LIMBS>>;
+impl<const LIMBS: usize> Div<&NonZero<Limb>> for &Wrapping<Uint<LIMBS>> {
+ type Output = Wrapping<Uint<LIMBS>>;
- fn div(self, rhs: &NonZero<UInt<LIMBS>>) -> Self::Output {
+ fn div(self, rhs: &NonZero<Limb>) -> Self::Output {
*self / *rhs
}
}
-impl<const LIMBS: usize> Div<&NonZero<UInt<LIMBS>>> for Wrapping<UInt<LIMBS>> {
- type Output = Wrapping<UInt<LIMBS>>;
+impl<const LIMBS: usize> Div<&NonZero<Limb>> for Wrapping<Uint<LIMBS>> {
+ type Output = Wrapping<Uint<LIMBS>>;
- fn div(self, rhs: &NonZero<UInt<LIMBS>>) -> Self::Output {
+ fn div(self, rhs: &NonZero<Limb>) -> Self::Output {
self / *rhs
}
}
-impl<const LIMBS: usize> DivAssign<&NonZero<UInt<LIMBS>>> for Wrapping<UInt<LIMBS>> {
- fn div_assign(&mut self, rhs: &NonZero<UInt<LIMBS>>) {
- *self = Wrapping(self.0.wrapping_div(rhs.as_ref()))
+impl<const LIMBS: usize> DivAssign<&NonZero<Limb>> for Wrapping<Uint<LIMBS>> {
+ fn div_assign(&mut self, rhs: &NonZero<Limb>) {
+ *self = Wrapping(self.0 / rhs)
}
}
-impl<const LIMBS: usize> DivAssign<NonZero<UInt<LIMBS>>> for Wrapping<UInt<LIMBS>> {
- fn div_assign(&mut self, rhs: NonZero<UInt<LIMBS>>) {
+impl<const LIMBS: usize> DivAssign<NonZero<Limb>> for Wrapping<Uint<LIMBS>> {
+ fn div_assign(&mut self, rhs: NonZero<Limb>) {
*self /= &rhs;
}
}
-impl<const LIMBS: usize> Rem<&NonZero<UInt<LIMBS>>> for &UInt<LIMBS>
-where
- UInt<LIMBS>: Integer,
-{
- type Output = UInt<LIMBS>;
+impl<const LIMBS: usize> Rem<&NonZero<Limb>> for &Uint<LIMBS> {
+ type Output = Limb;
- fn rem(self, rhs: &NonZero<UInt<LIMBS>>) -> Self::Output {
+ fn rem(self, rhs: &NonZero<Limb>) -> Self::Output {
*self % *rhs
}
}
-impl<const LIMBS: usize> Rem<&NonZero<UInt<LIMBS>>> for UInt<LIMBS>
-where
- UInt<LIMBS>: Integer,
-{
- type Output = UInt<LIMBS>;
+impl<const LIMBS: usize> Rem<&NonZero<Limb>> for Uint<LIMBS> {
+ type Output = Limb;
- fn rem(self, rhs: &NonZero<UInt<LIMBS>>) -> Self::Output {
+ fn rem(self, rhs: &NonZero<Limb>) -> Self::Output {
self % *rhs
}
}
-impl<const LIMBS: usize> Rem<NonZero<UInt<LIMBS>>> for &UInt<LIMBS>
-where
- UInt<LIMBS>: Integer,
-{
- type Output = UInt<LIMBS>;
+impl<const LIMBS: usize> Rem<NonZero<Limb>> for &Uint<LIMBS> {
+ type Output = Limb;
- fn rem(self, rhs: NonZero<UInt<LIMBS>>) -> Self::Output {
+ fn rem(self, rhs: NonZero<Limb>) -> Self::Output {
*self % rhs
}
}
-impl<const LIMBS: usize> Rem<NonZero<UInt<LIMBS>>> for UInt<LIMBS>
-where
- UInt<LIMBS>: Integer,
-{
- type Output = UInt<LIMBS>;
+impl<const LIMBS: usize> Rem<NonZero<Limb>> for Uint<LIMBS> {
+ type Output = Limb;
- fn rem(self, rhs: NonZero<UInt<LIMBS>>) -> Self::Output {
- let (r, _) = self.ct_reduce(&rhs);
+ fn rem(self, rhs: NonZero<Limb>) -> Self::Output {
+ let (_, r, _) = self.ct_div_rem_limb(*rhs);
r
}
}
-impl<const LIMBS: usize> RemAssign<&NonZero<UInt<LIMBS>>> for UInt<LIMBS>
-where
- UInt<LIMBS>: Integer,
-{
- fn rem_assign(&mut self, rhs: &NonZero<UInt<LIMBS>>) {
- let (r, _) = self.ct_reduce(rhs);
- *self = r
+impl<const LIMBS: usize> RemAssign<&NonZero<Limb>> for Uint<LIMBS> {
+ fn rem_assign(&mut self, rhs: &NonZero<Limb>) {
+ *self = (*self % rhs).into();
}
}
-impl<const LIMBS: usize> RemAssign<NonZero<UInt<LIMBS>>> for UInt<LIMBS>
-where
- UInt<LIMBS>: Integer,
-{
- fn rem_assign(&mut self, rhs: NonZero<UInt<LIMBS>>) {
+impl<const LIMBS: usize> RemAssign<NonZero<Limb>> for Uint<LIMBS> {
+ fn rem_assign(&mut self, rhs: NonZero<Limb>) {
*self %= &rhs;
}
}
-impl<const LIMBS: usize> Rem<NonZero<UInt<LIMBS>>> for Wrapping<UInt<LIMBS>> {
- type Output = Wrapping<UInt<LIMBS>>;
+impl<const LIMBS: usize> Rem<NonZero<Limb>> for Wrapping<Uint<LIMBS>> {
+ type Output = Wrapping<Limb>;
+
+ fn rem(self, rhs: NonZero<Limb>) -> Self::Output {
+ Wrapping(self.0 % rhs)
+ }
+}
+
+impl<const LIMBS: usize> Rem<NonZero<Limb>> for &Wrapping<Uint<LIMBS>> {
+ type Output = Wrapping<Limb>;
+
+ fn rem(self, rhs: NonZero<Limb>) -> Self::Output {
+ *self % rhs
+ }
+}
+
+impl<const LIMBS: usize> Rem<&NonZero<Limb>> for &Wrapping<Uint<LIMBS>> {
+ type Output = Wrapping<Limb>;
+
+ fn rem(self, rhs: &NonZero<Limb>) -> Self::Output {
+ *self % *rhs
+ }
+}
+
+impl<const LIMBS: usize> Rem<&NonZero<Limb>> for Wrapping<Uint<LIMBS>> {
+ type Output = Wrapping<Limb>;
+
+ fn rem(self, rhs: &NonZero<Limb>) -> Self::Output {
+ self % *rhs
+ }
+}
+
+impl<const LIMBS: usize> RemAssign<NonZero<Limb>> for Wrapping<Uint<LIMBS>> {
+ fn rem_assign(&mut self, rhs: NonZero<Limb>) {
+ *self %= &rhs;
+ }
+}
+
+impl<const LIMBS: usize> RemAssign<&NonZero<Limb>> for Wrapping<Uint<LIMBS>> {
+ fn rem_assign(&mut self, rhs: &NonZero<Limb>) {
+ *self = Wrapping((self.0 % rhs).into())
+ }
+}
+
+//
+// Division by an Uint
+//
+
+impl<const LIMBS: usize> Div<&NonZero<Uint<LIMBS>>> for &Uint<LIMBS> {
+ type Output = Uint<LIMBS>;
+
+ fn div(self, rhs: &NonZero<Uint<LIMBS>>) -> Self::Output {
+ *self / *rhs
+ }
+}
+
+impl<const LIMBS: usize> Div<&NonZero<Uint<LIMBS>>> for Uint<LIMBS> {
+ type Output = Uint<LIMBS>;
+
+ fn div(self, rhs: &NonZero<Uint<LIMBS>>) -> Self::Output {
+ self / *rhs
+ }
+}
+
+impl<const LIMBS: usize> Div<NonZero<Uint<LIMBS>>> for &Uint<LIMBS> {
+ type Output = Uint<LIMBS>;
+
+ fn div(self, rhs: NonZero<Uint<LIMBS>>) -> Self::Output {
+ *self / rhs
+ }
+}
+
+impl<const LIMBS: usize> Div<NonZero<Uint<LIMBS>>> for Uint<LIMBS> {
+ type Output = Uint<LIMBS>;
+
+ fn div(self, rhs: NonZero<Uint<LIMBS>>) -> Self::Output {
+ let (q, _) = self.div_rem(&rhs);
+ q
+ }
+}
+
+impl<const LIMBS: usize> DivAssign<&NonZero<Uint<LIMBS>>> for Uint<LIMBS> {
+ fn div_assign(&mut self, rhs: &NonZero<Uint<LIMBS>>) {
+ *self /= *rhs
+ }
+}
+
+impl<const LIMBS: usize> DivAssign<NonZero<Uint<LIMBS>>> for Uint<LIMBS> {
+ fn div_assign(&mut self, rhs: NonZero<Uint<LIMBS>>) {
+ *self = *self / rhs;
+ }
+}
+
+impl<const LIMBS: usize> Div<NonZero<Uint<LIMBS>>> for Wrapping<Uint<LIMBS>> {
+ type Output = Wrapping<Uint<LIMBS>>;
+
+ fn div(self, rhs: NonZero<Uint<LIMBS>>) -> Self::Output {
+ Wrapping(self.0 / rhs)
+ }
+}
+
+impl<const LIMBS: usize> Div<NonZero<Uint<LIMBS>>> for &Wrapping<Uint<LIMBS>> {
+ type Output = Wrapping<Uint<LIMBS>>;
- fn rem(self, rhs: NonZero<UInt<LIMBS>>) -> Self::Output {
- Wrapping(self.0.wrapping_rem(rhs.as_ref()))
+ fn div(self, rhs: NonZero<Uint<LIMBS>>) -> Self::Output {
+ *self / rhs
+ }
+}
+
+impl<const LIMBS: usize> Div<&NonZero<Uint<LIMBS>>> for &Wrapping<Uint<LIMBS>> {
+ type Output = Wrapping<Uint<LIMBS>>;
+
+ fn div(self, rhs: &NonZero<Uint<LIMBS>>) -> Self::Output {
+ *self / *rhs
}
}
-impl<const LIMBS: usize> Rem<NonZero<UInt<LIMBS>>> for &Wrapping<UInt<LIMBS>> {
- type Output = Wrapping<UInt<LIMBS>>;
+impl<const LIMBS: usize> Div<&NonZero<Uint<LIMBS>>> for Wrapping<Uint<LIMBS>> {
+ type Output = Wrapping<Uint<LIMBS>>;
+
+ fn div(self, rhs: &NonZero<Uint<LIMBS>>) -> Self::Output {
+ self / *rhs
+ }
+}
- fn rem(self, rhs: NonZero<UInt<LIMBS>>) -> Self::Output {
+impl<const LIMBS: usize> DivAssign<&NonZero<Uint<LIMBS>>> for Wrapping<Uint<LIMBS>> {
+ fn div_assign(&mut self, rhs: &NonZero<Uint<LIMBS>>) {
+ *self = Wrapping(self.0 / rhs);
+ }
+}
+
+impl<const LIMBS: usize> DivAssign<NonZero<Uint<LIMBS>>> for Wrapping<Uint<LIMBS>> {
+ fn div_assign(&mut self, rhs: NonZero<Uint<LIMBS>>) {
+ *self /= &rhs;
+ }
+}
+
+impl<const LIMBS: usize> Rem<&NonZero<Uint<LIMBS>>> for &Uint<LIMBS> {
+ type Output = Uint<LIMBS>;
+
+ fn rem(self, rhs: &NonZero<Uint<LIMBS>>) -> Self::Output {
+ *self % *rhs
+ }
+}
+
+impl<const LIMBS: usize> Rem<&NonZero<Uint<LIMBS>>> for Uint<LIMBS> {
+ type Output = Uint<LIMBS>;
+
+ fn rem(self, rhs: &NonZero<Uint<LIMBS>>) -> Self::Output {
+ self % *rhs
+ }
+}
+
+impl<const LIMBS: usize> Rem<NonZero<Uint<LIMBS>>> for &Uint<LIMBS> {
+ type Output = Uint<LIMBS>;
+
+ fn rem(self, rhs: NonZero<Uint<LIMBS>>) -> Self::Output {
*self % rhs
}
}
-impl<const LIMBS: usize> Rem<&NonZero<UInt<LIMBS>>> for &Wrapping<UInt<LIMBS>> {
- type Output = Wrapping<UInt<LIMBS>>;
+impl<const LIMBS: usize> Rem<NonZero<Uint<LIMBS>>> for Uint<LIMBS> {
+ type Output = Uint<LIMBS>;
+
+ fn rem(self, rhs: NonZero<Uint<LIMBS>>) -> Self::Output {
+ Self::rem(&self, &rhs)
+ }
+}
+
+impl<const LIMBS: usize> RemAssign<&NonZero<Uint<LIMBS>>> for Uint<LIMBS> {
+ fn rem_assign(&mut self, rhs: &NonZero<Uint<LIMBS>>) {
+ *self %= *rhs
+ }
+}
+
+impl<const LIMBS: usize> RemAssign<NonZero<Uint<LIMBS>>> for Uint<LIMBS> {
+ fn rem_assign(&mut self, rhs: NonZero<Uint<LIMBS>>) {
+ *self = *self % rhs;
+ }
+}
+
+impl<const LIMBS: usize> Rem<NonZero<Uint<LIMBS>>> for Wrapping<Uint<LIMBS>> {
+ type Output = Wrapping<Uint<LIMBS>>;
- fn rem(self, rhs: &NonZero<UInt<LIMBS>>) -> Self::Output {
+ fn rem(self, rhs: NonZero<Uint<LIMBS>>) -> Self::Output {
+ Wrapping(self.0 % rhs)
+ }
+}
+
+impl<const LIMBS: usize> Rem<NonZero<Uint<LIMBS>>> for &Wrapping<Uint<LIMBS>> {
+ type Output = Wrapping<Uint<LIMBS>>;
+
+ fn rem(self, rhs: NonZero<Uint<LIMBS>>) -> Self::Output {
+ *self % rhs
+ }
+}
+
+impl<const LIMBS: usize> Rem<&NonZero<Uint<LIMBS>>> for &Wrapping<Uint<LIMBS>> {
+ type Output = Wrapping<Uint<LIMBS>>;
+
+ fn rem(self, rhs: &NonZero<Uint<LIMBS>>) -> Self::Output {
*self % *rhs
}
}
-impl<const LIMBS: usize> Rem<&NonZero<UInt<LIMBS>>> for Wrapping<UInt<LIMBS>> {
- type Output = Wrapping<UInt<LIMBS>>;
+impl<const LIMBS: usize> Rem<&NonZero<Uint<LIMBS>>> for Wrapping<Uint<LIMBS>> {
+ type Output = Wrapping<Uint<LIMBS>>;
- fn rem(self, rhs: &NonZero<UInt<LIMBS>>) -> Self::Output {
+ fn rem(self, rhs: &NonZero<Uint<LIMBS>>) -> Self::Output {
self % *rhs
}
}
-impl<const LIMBS: usize> RemAssign<NonZero<UInt<LIMBS>>> for Wrapping<UInt<LIMBS>> {
- fn rem_assign(&mut self, rhs: NonZero<UInt<LIMBS>>) {
+impl<const LIMBS: usize> RemAssign<NonZero<Uint<LIMBS>>> for Wrapping<Uint<LIMBS>> {
+ fn rem_assign(&mut self, rhs: NonZero<Uint<LIMBS>>) {
*self %= &rhs;
}
}
-impl<const LIMBS: usize> RemAssign<&NonZero<UInt<LIMBS>>> for Wrapping<UInt<LIMBS>> {
- fn rem_assign(&mut self, rhs: &NonZero<UInt<LIMBS>>) {
- *self = Wrapping(self.0.wrapping_rem(rhs.as_ref()))
+impl<const LIMBS: usize> RemAssign<&NonZero<Uint<LIMBS>>> for Wrapping<Uint<LIMBS>> {
+ fn rem_assign(&mut self, rhs: &NonZero<Uint<LIMBS>>) {
+ *self = Wrapping(self.0 % rhs)
}
}
@@ -387,7 +610,7 @@ mod tests {
let lhs = U256::from(*n);
let rhs = U256::from(*d);
let (q, r, is_some) = lhs.ct_div_rem(&rhs);
- assert_eq!(is_some, 1);
+ assert!(is_some.is_true_vartime());
assert_eq!(U256::from(*e), q);
assert_eq!(U256::from(*ee), r);
}
@@ -401,9 +624,9 @@ mod tests {
let num = U256::random(&mut rng).shr_vartime(128);
let den = U256::random(&mut rng).shr_vartime(128);
let n = num.checked_mul(&den);
- if n.is_some().unwrap_u8() == 1 {
+ if n.is_some().into() {
let (q, _, is_some) = n.unwrap().ct_div_rem(&den);
- assert_eq!(is_some, 1);
+ assert!(is_some.is_true_vartime());
assert_eq!(q, num);
}
}
@@ -415,17 +638,17 @@ mod tests {
let mut b = U256::ZERO;
b.limbs[b.limbs.len() - 1] = Limb(Word::MAX);
let q = a.wrapping_div(&b);
- assert_eq!(q, UInt::ZERO);
+ assert_eq!(q, Uint::ZERO);
a.limbs[a.limbs.len() - 1] = Limb(1 << (HI_BIT - 7));
b.limbs[b.limbs.len() - 1] = Limb(0x82 << (HI_BIT - 7));
let q = a.wrapping_div(&b);
- assert_eq!(q, UInt::ZERO);
+ assert_eq!(q, Uint::ZERO);
}
#[test]
fn div_zero() {
let (q, r, is_some) = U256::ONE.ct_div_rem(&U256::ZERO);
- assert_eq!(is_some, 0);
+ assert!(!is_some.is_true_vartime());
assert_eq!(q, U256::ZERO);
assert_eq!(r, U256::ONE);
}
@@ -433,36 +656,49 @@ mod tests {
#[test]
fn div_one() {
let (q, r, is_some) = U256::from(10u8).ct_div_rem(&U256::ONE);
- assert_eq!(is_some, 1);
+ assert!(is_some.is_true_vartime());
assert_eq!(q, U256::from(10u8));
assert_eq!(r, U256::ZERO);
}
#[test]
fn reduce_one() {
- let (r, is_some) = U256::from(10u8).ct_reduce(&U256::ONE);
- assert_eq!(is_some, 1);
+ let (r, is_some) = U256::from(10u8).const_rem(&U256::ONE);
+ assert!(is_some.is_true_vartime());
assert_eq!(r, U256::ZERO);
}
#[test]
fn reduce_zero() {
let u = U256::from(10u8);
- let (r, is_some) = u.ct_reduce(&U256::ZERO);
- assert_eq!(is_some, 0);
+ let (r, is_some) = u.const_rem(&U256::ZERO);
+ assert!(!is_some.is_true_vartime());
assert_eq!(r, u);
}
#[test]
fn reduce_tests() {
- let (r, is_some) = U256::from(10u8).ct_reduce(&U256::from(2u8));
- assert_eq!(is_some, 1);
+ let (r, is_some) = U256::from(10u8).const_rem(&U256::from(2u8));
+ assert!(is_some.is_true_vartime());
assert_eq!(r, U256::ZERO);
- let (r, is_some) = U256::from(10u8).ct_reduce(&U256::from(3u8));
- assert_eq!(is_some, 1);
+ let (r, is_some) = U256::from(10u8).const_rem(&U256::from(3u8));
+ assert!(is_some.is_true_vartime());
assert_eq!(r, U256::ONE);
- let (r, is_some) = U256::from(10u8).ct_reduce(&U256::from(7u8));
- assert_eq!(is_some, 1);
+ let (r, is_some) = U256::from(10u8).const_rem(&U256::from(7u8));
+ assert!(is_some.is_true_vartime());
+ assert_eq!(r, U256::from(3u8));
+ }
+
+ #[test]
+ fn reduce_tests_wide_zero_padded() {
+ let (r, is_some) = U256::const_rem_wide((U256::from(10u8), U256::ZERO), &U256::from(2u8));
+ assert!(is_some.is_true_vartime());
+ assert_eq!(r, U256::ZERO);
+ let (r, is_some) = U256::const_rem_wide((U256::from(10u8), U256::ZERO), &U256::from(3u8));
+ assert!(is_some.is_true_vartime());
+ assert_eq!(r, U256::ONE);
+ let (r, is_some) = U256::const_rem_wide((U256::from(10u8), U256::ZERO), &U256::from(7u8));
+ assert!(is_some.is_true_vartime());
assert_eq!(r, U256::from(3u8));
}
@@ -472,7 +708,7 @@ mod tests {
let mut b = U256::ZERO;
b.limbs[b.limbs.len() - 1] = Limb(Word::MAX);
let r = a.wrapping_rem(&b);
- assert_eq!(r, UInt::ZERO);
+ assert_eq!(r, Uint::ZERO);
a.limbs[a.limbs.len() - 1] = Limb(1 << (HI_BIT - 7));
b.limbs[b.limbs.len() - 1] = Limb(0x82 << (HI_BIT - 7));
let r = a.wrapping_rem(&b);
@@ -481,16 +717,28 @@ mod tests {
#[cfg(feature = "rand")]
#[test]
- fn reduce2krand() {
+ fn rem2krand() {
let mut rng = ChaChaRng::from_seed([7u8; 32]);
for _ in 0..25 {
let num = U256::random(&mut rng);
let k = (rng.next_u32() % 256) as usize;
let den = U256::ONE.shl_vartime(k);
- let a = num.reduce2k(k);
+ let a = num.rem2k(k);
let e = num.wrapping_rem(&den);
assert_eq!(a, e);
}
}
+
+ #[test]
+ fn rem_trait() {
+ let a = U256::from(10u64);
+ let b = NonZero::new(U256::from(3u64)).unwrap();
+ let c = U256::from(1u64);
+
+ assert_eq!(a % b, c);
+ assert_eq!(a % &b, c);
+ assert_eq!(&a % b, c);
+ assert_eq!(&a % &b, c);
+ }
}
diff --git a/vendor/crypto-bigint/src/uint/div_limb.rs b/vendor/crypto-bigint/src/uint/div_limb.rs
new file mode 100644
index 000000000..9bbd828e4
--- /dev/null
+++ b/vendor/crypto-bigint/src/uint/div_limb.rs
@@ -0,0 +1,287 @@
+//! Implementation of constant-time division via reciprocal precomputation, as described in
+//! "Improved Division by Invariant Integers" by Niels Möller and Torbjorn Granlund
+//! (DOI: 10.1109/TC.2010.143, <https://gmplib.org/~tege/division-paper.pdf>).
+use subtle::{Choice, ConditionallySelectable, CtOption};
+
+use crate::{CtChoice, Limb, Uint, WideWord, Word};
+
+/// Calculates the reciprocal of the given 32-bit divisor with the highmost bit set.
+#[cfg(target_pointer_width = "32")]
+pub const fn reciprocal(d: Word) -> Word {
+ debug_assert!(d >= (1 << (Word::BITS - 1)));
+
+ let d0 = d & 1;
+ let d10 = d >> 22;
+ let d21 = (d >> 11) + 1;
+ let d31 = (d >> 1) + d0;
+ let v0 = short_div((1 << 24) - (1 << 14) + (1 << 9), 24, d10, 10);
+ let (hi, _lo) = mulhilo(v0 * v0, d21);
+ let v1 = (v0 << 4) - hi - 1;
+
+ // Checks that the expression for `e` can be simplified in the way we did below.
+ debug_assert!(mulhilo(v1, d31).0 == (1 << 16) - 1);
+ let e = Word::MAX - v1.wrapping_mul(d31) + 1 + (v1 >> 1) * d0;
+
+ let (hi, _lo) = mulhilo(v1, e);
+ // Note: the paper does not mention a wrapping add here,
+ // but the 64-bit version has it at this stage, and the function panics without it
+ // when calculating a reciprocal for `Word::MAX`.
+ let v2 = (v1 << 15).wrapping_add(hi >> 1);
+
+ // The paper has `(v2 + 1) * d / 2^32` (there's another 2^32, but it's accounted for later).
+ // If `v2 == 2^32-1` this should give `d`, but we can't achieve this in our wrapping arithmetic.
+ // Hence the `ct_select()`.
+ let x = v2.wrapping_add(1);
+ let (hi, _lo) = mulhilo(x, d);
+ let hi = Limb::ct_select(Limb(d), Limb(hi), Limb(x).ct_is_nonzero()).0;
+
+ v2.wrapping_sub(hi).wrapping_sub(d)
+}
+
+/// Calculates the reciprocal of the given 64-bit divisor with the highmost bit set.
+#[cfg(target_pointer_width = "64")]
+pub const fn reciprocal(d: Word) -> Word {
+ debug_assert!(d >= (1 << (Word::BITS - 1)));
+
+ let d0 = d & 1;
+ let d9 = d >> 55;
+ let d40 = (d >> 24) + 1;
+ let d63 = (d >> 1) + d0;
+ let v0 = short_div((1 << 19) - 3 * (1 << 8), 19, d9 as u32, 9) as u64;
+ let v1 = (v0 << 11) - ((v0 * v0 * d40) >> 40) - 1;
+ let v2 = (v1 << 13) + ((v1 * ((1 << 60) - v1 * d40)) >> 47);
+
+ // Checks that the expression for `e` can be simplified in the way we did below.
+ debug_assert!(mulhilo(v2, d63).0 == (1 << 32) - 1);
+ let e = Word::MAX - v2.wrapping_mul(d63) + 1 + (v2 >> 1) * d0;
+
+ let (hi, _lo) = mulhilo(v2, e);
+ let v3 = (v2 << 31).wrapping_add(hi >> 1);
+
+ // The paper has `(v3 + 1) * d / 2^64` (there's another 2^64, but it's accounted for later).
+ // If `v3 == 2^64-1` this should give `d`, but we can't achieve this in our wrapping arithmetic.
+ // Hence the `ct_select()`.
+ let x = v3.wrapping_add(1);
+ let (hi, _lo) = mulhilo(x, d);
+ let hi = Limb::ct_select(Limb(d), Limb(hi), Limb(x).ct_is_nonzero()).0;
+
+ v3.wrapping_sub(hi).wrapping_sub(d)
+}
+
+/// Returns `u32::MAX` if `a < b` and `0` otherwise.
+#[inline]
+const fn ct_lt(a: u32, b: u32) -> u32 {
+ let bit = (((!a) & b) | (((!a) | b) & (a.wrapping_sub(b)))) >> (u32::BITS - 1);
+ bit.wrapping_neg()
+}
+
+/// Returns `a` if `c == 0` and `b` if `c == u32::MAX`.
+#[inline(always)]
+const fn ct_select(a: u32, b: u32, c: u32) -> u32 {
+ a ^ (c & (a ^ b))
+}
+
+/// Calculates `dividend / divisor` in constant time, given `dividend` and `divisor`
+/// along with their maximum bitsizes.
+#[inline(always)]
+const fn short_div(dividend: u32, dividend_bits: u32, divisor: u32, divisor_bits: u32) -> u32 {
+ // TODO: this may be sped up even more using the fact that `dividend` is a known constant.
+
+ // In the paper this is a table lookup, but since we want it to be constant-time,
+ // we have to access all the elements of the table, which is quite large.
+ // So this shift-and-subtract approach is actually faster.
+
+ // Passing `dividend_bits` and `divisor_bits` because calling `.leading_zeros()`
+ // causes a significant slowdown, and we know those values anyway.
+
+ let mut dividend = dividend;
+ let mut divisor = divisor << (dividend_bits - divisor_bits);
+ let mut quotient: u32 = 0;
+ let mut i = dividend_bits - divisor_bits + 1;
+
+ while i > 0 {
+ i -= 1;
+ let bit = ct_lt(dividend, divisor);
+ dividend = ct_select(dividend.wrapping_sub(divisor), dividend, bit);
+ divisor >>= 1;
+ let inv_bit = !bit;
+ quotient |= (inv_bit >> (u32::BITS - 1)) << i;
+ }
+
+ quotient
+}
+
+/// Multiplies `x` and `y`, returning the most significant
+/// and the least significant words as `(hi, lo)`.
+#[inline(always)]
+const fn mulhilo(x: Word, y: Word) -> (Word, Word) {
+ let res = (x as WideWord) * (y as WideWord);
+ ((res >> Word::BITS) as Word, res as Word)
+}
+
+/// Adds wide numbers represented by pairs of (most significant word, least significant word)
+/// and returns the result in the same format `(hi, lo)`.
+#[inline(always)]
+const fn addhilo(x_hi: Word, x_lo: Word, y_hi: Word, y_lo: Word) -> (Word, Word) {
+ let res = (((x_hi as WideWord) << Word::BITS) | (x_lo as WideWord))
+ + (((y_hi as WideWord) << Word::BITS) | (y_lo as WideWord));
+ ((res >> Word::BITS) as Word, res as Word)
+}
+
+/// Calculate the quotient and the remainder of the division of a wide word
+/// (supplied as high and low words) by `d`, with a precalculated reciprocal `v`.
+#[inline(always)]
+const fn div2by1(u1: Word, u0: Word, reciprocal: &Reciprocal) -> (Word, Word) {
+ let d = reciprocal.divisor_normalized;
+
+ debug_assert!(d >= (1 << (Word::BITS - 1)));
+ debug_assert!(u1 < d);
+
+ let (q1, q0) = mulhilo(reciprocal.reciprocal, u1);
+ let (q1, q0) = addhilo(q1, q0, u1, u0);
+ let q1 = q1.wrapping_add(1);
+ let r = u0.wrapping_sub(q1.wrapping_mul(d));
+
+ let r_gt_q0 = Limb::ct_lt(Limb(q0), Limb(r));
+ let q1 = Limb::ct_select(Limb(q1), Limb(q1.wrapping_sub(1)), r_gt_q0).0;
+ let r = Limb::ct_select(Limb(r), Limb(r.wrapping_add(d)), r_gt_q0).0;
+
+ // If this was a normal `if`, we wouldn't need wrapping ops, because there would be no overflow.
+ // But since we caluculate both results either way, have to wrap.
+ // Added an assert to still check the lack of overflow in debug mode.
+ debug_assert!(r < d || q1 < Word::MAX);
+ let r_ge_d = Limb::ct_le(Limb(d), Limb(r));
+ let q1 = Limb::ct_select(Limb(q1), Limb(q1.wrapping_add(1)), r_ge_d).0;
+ let r = Limb::ct_select(Limb(r), Limb(r.wrapping_sub(d)), r_ge_d).0;
+
+ (q1, r)
+}
+
+/// A pre-calculated reciprocal for division by a single limb.
+#[derive(Copy, Clone, Debug, PartialEq, Eq)]
+pub struct Reciprocal {
+ divisor_normalized: Word,
+ shift: u32,
+ reciprocal: Word,
+}
+
+impl Reciprocal {
+ /// Pre-calculates a reciprocal for a known divisor,
+ /// to be used in the single-limb division later.
+ /// Returns the reciprocal, and the truthy value if `divisor != 0`
+ /// and the falsy value otherwise.
+ ///
+ /// Note: if the returned flag is falsy, the returned reciprocal object is still self-consistent
+ /// and can be passed to functions here without causing them to panic,
+ /// but the results are naturally not to be used.
+ pub const fn ct_new(divisor: Limb) -> (Self, CtChoice) {
+ // Assuming this is constant-time for primitive types.
+ let shift = divisor.0.leading_zeros();
+
+ #[allow(trivial_numeric_casts)]
+ let is_some = Limb((Word::BITS - shift) as Word).ct_is_nonzero();
+
+ // If `divisor = 0`, shifting `divisor` by `leading_zeros == Word::BITS` will cause a panic.
+ // Have to substitute a "bogus" shift in that case.
+ #[allow(trivial_numeric_casts)]
+ let shift_limb = Limb::ct_select(Limb::ZERO, Limb(shift as Word), is_some);
+
+ // Need to provide bogus normalized divisor and reciprocal too,
+ // so that we don't get a panic in low-level functions.
+ let divisor_normalized = divisor.shl(shift_limb);
+ let divisor_normalized = Limb::ct_select(Limb::MAX, divisor_normalized, is_some).0;
+
+ #[allow(trivial_numeric_casts)]
+ let shift = shift_limb.0 as u32;
+
+ (
+ Self {
+ divisor_normalized,
+ shift,
+ reciprocal: reciprocal(divisor_normalized),
+ },
+ is_some,
+ )
+ }
+
+ /// Returns a default instance of this object.
+ /// It is a self-consistent `Reciprocal` that will not cause panics in functions that take it.
+ ///
+ /// NOTE: intended for using it as a placeholder during compile-time array generation,
+ /// don't rely on the contents.
+ pub const fn default() -> Self {
+ Self {
+ divisor_normalized: Word::MAX,
+ shift: 0,
+ // The result of calling `reciprocal(Word::MAX)`
+ // This holds both for 32- and 64-bit versions.
+ reciprocal: 1,
+ }
+ }
+
+ /// A non-const-fn version of `new_const()`, wrapping the result in a `CtOption`.
+ pub fn new(divisor: Limb) -> CtOption<Self> {
+ let (rec, is_some) = Self::ct_new(divisor);
+ CtOption::new(rec, is_some.into())
+ }
+}
+
+impl ConditionallySelectable for Reciprocal {
+ fn conditional_select(a: &Self, b: &Self, choice: Choice) -> Self {
+ Self {
+ divisor_normalized: Word::conditional_select(
+ &a.divisor_normalized,
+ &b.divisor_normalized,
+ choice,
+ ),
+ shift: u32::conditional_select(&a.shift, &b.shift, choice),
+ reciprocal: Word::conditional_select(&a.reciprocal, &b.reciprocal, choice),
+ }
+ }
+}
+
+// `CtOption.map()` needs this; for some reason it doesn't use the value it already has
+// for the `None` branch.
+impl Default for Reciprocal {
+ fn default() -> Self {
+ Self::default()
+ }
+}
+
+/// Divides `u` by the divisor encoded in the `reciprocal`, and returns
+/// the quotient and the remainder.
+#[inline(always)]
+pub(crate) const fn div_rem_limb_with_reciprocal<const L: usize>(
+ u: &Uint<L>,
+ reciprocal: &Reciprocal,
+) -> (Uint<L>, Limb) {
+ let (u_shifted, u_hi) = u.shl_limb(reciprocal.shift as usize);
+ let mut r = u_hi.0;
+ let mut q = [Limb::ZERO; L];
+
+ let mut j = L;
+ while j > 0 {
+ j -= 1;
+ let (qj, rj) = div2by1(r, u_shifted.as_limbs()[j].0, reciprocal);
+ q[j] = Limb(qj);
+ r = rj;
+ }
+ (Uint::<L>::new(q), Limb(r >> reciprocal.shift))
+}
+
+#[cfg(test)]
+mod tests {
+ use super::{div2by1, Reciprocal};
+ use crate::{Limb, Word};
+ #[test]
+ fn div2by1_overflow() {
+ // A regression test for a situation when in div2by1() an operation (`q1 + 1`)
+ // that is protected from overflowing by a condition in the original paper (`r >= d`)
+ // still overflows because we're calculating the results for both branches.
+ let r = Reciprocal::new(Limb(Word::MAX - 1)).unwrap();
+ assert_eq!(
+ div2by1(Word::MAX - 2, Word::MAX - 63, &r),
+ (Word::MAX, Word::MAX - 65)
+ );
+ }
+}
diff --git a/vendor/crypto-bigint/src/uint/encoding.rs b/vendor/crypto-bigint/src/uint/encoding.rs
index a83976238..5431b8166 100644
--- a/vendor/crypto-bigint/src/uint/encoding.rs
+++ b/vendor/crypto-bigint/src/uint/encoding.rs
@@ -1,4 +1,4 @@
-//! Const-friendly decoding operations for [`UInt`]
+//! Const-friendly decoding operations for [`Uint`]
#[cfg(all(feature = "der", feature = "generic-array"))]
mod der;
@@ -6,51 +6,51 @@ mod der;
#[cfg(feature = "rlp")]
mod rlp;
-use super::UInt;
+use super::Uint;
use crate::{Encoding, Limb, Word};
-impl<const LIMBS: usize> UInt<LIMBS> {
- /// Create a new [`UInt`] from the provided big endian bytes.
+impl<const LIMBS: usize> Uint<LIMBS> {
+ /// Create a new [`Uint`] from the provided big endian bytes.
pub const fn from_be_slice(bytes: &[u8]) -> Self {
assert!(
- bytes.len() == Limb::BYTE_SIZE * LIMBS,
+ bytes.len() == Limb::BYTES * LIMBS,
"bytes are not the expected size"
);
let mut res = [Limb::ZERO; LIMBS];
- let mut buf = [0u8; Limb::BYTE_SIZE];
+ let mut buf = [0u8; Limb::BYTES];
let mut i = 0;
while i < LIMBS {
let mut j = 0;
- while j < Limb::BYTE_SIZE {
- buf[j] = bytes[i * Limb::BYTE_SIZE + j];
+ while j < Limb::BYTES {
+ buf[j] = bytes[i * Limb::BYTES + j];
j += 1;
}
res[LIMBS - i - 1] = Limb(Word::from_be_bytes(buf));
i += 1;
}
- UInt::new(res)
+ Uint::new(res)
}
- /// Create a new [`UInt`] from the provided big endian hex string.
+ /// Create a new [`Uint`] from the provided big endian hex string.
pub const fn from_be_hex(hex: &str) -> Self {
let bytes = hex.as_bytes();
assert!(
- bytes.len() == Limb::BYTE_SIZE * LIMBS * 2,
+ bytes.len() == Limb::BYTES * LIMBS * 2,
"hex string is not the expected size"
);
let mut res = [Limb::ZERO; LIMBS];
- let mut buf = [0u8; Limb::BYTE_SIZE];
+ let mut buf = [0u8; Limb::BYTES];
let mut i = 0;
while i < LIMBS {
let mut j = 0;
- while j < Limb::BYTE_SIZE {
- let offset = (i * Limb::BYTE_SIZE + j) * 2;
+ while j < Limb::BYTES {
+ let offset = (i * Limb::BYTES + j) * 2;
buf[j] = decode_hex_byte([bytes[offset], bytes[offset + 1]]);
j += 1;
}
@@ -58,50 +58,50 @@ impl<const LIMBS: usize> UInt<LIMBS> {
i += 1;
}
- UInt::new(res)
+ Uint::new(res)
}
- /// Create a new [`UInt`] from the provided little endian bytes.
+ /// Create a new [`Uint`] from the provided little endian bytes.
pub const fn from_le_slice(bytes: &[u8]) -> Self {
assert!(
- bytes.len() == Limb::BYTE_SIZE * LIMBS,
+ bytes.len() == Limb::BYTES * LIMBS,
"bytes are not the expected size"
);
let mut res = [Limb::ZERO; LIMBS];
- let mut buf = [0u8; Limb::BYTE_SIZE];
+ let mut buf = [0u8; Limb::BYTES];
let mut i = 0;
while i < LIMBS {
let mut j = 0;
- while j < Limb::BYTE_SIZE {
- buf[j] = bytes[i * Limb::BYTE_SIZE + j];
+ while j < Limb::BYTES {
+ buf[j] = bytes[i * Limb::BYTES + j];
j += 1;
}
res[i] = Limb(Word::from_le_bytes(buf));
i += 1;
}
- UInt::new(res)
+ Uint::new(res)
}
- /// Create a new [`UInt`] from the provided little endian hex string.
+ /// Create a new [`Uint`] from the provided little endian hex string.
pub const fn from_le_hex(hex: &str) -> Self {
let bytes = hex.as_bytes();
assert!(
- bytes.len() == Limb::BYTE_SIZE * LIMBS * 2,
+ bytes.len() == Limb::BYTES * LIMBS * 2,
"bytes are not the expected size"
);
let mut res = [Limb::ZERO; LIMBS];
- let mut buf = [0u8; Limb::BYTE_SIZE];
+ let mut buf = [0u8; Limb::BYTES];
let mut i = 0;
while i < LIMBS {
let mut j = 0;
- while j < Limb::BYTE_SIZE {
- let offset = (i * Limb::BYTE_SIZE + j) * 2;
+ while j < Limb::BYTES {
+ let offset = (i * Limb::BYTES + j) * 2;
buf[j] = decode_hex_byte([bytes[offset], bytes[offset + 1]]);
j += 1;
}
@@ -109,39 +109,37 @@ impl<const LIMBS: usize> UInt<LIMBS> {
i += 1;
}
- UInt::new(res)
+ Uint::new(res)
}
- /// Serialize this [`UInt`] as big-endian, writing it into the provided
+ /// Serialize this [`Uint`] as big-endian, writing it into the provided
/// byte slice.
#[inline]
- #[cfg_attr(docsrs, doc(cfg(feature = "generic-array")))]
pub(crate) fn write_be_bytes(&self, out: &mut [u8]) {
- debug_assert_eq!(out.len(), Limb::BYTE_SIZE * LIMBS);
+ debug_assert_eq!(out.len(), Limb::BYTES * LIMBS);
for (src, dst) in self
.limbs
.iter()
.rev()
.cloned()
- .zip(out.chunks_exact_mut(Limb::BYTE_SIZE))
+ .zip(out.chunks_exact_mut(Limb::BYTES))
{
dst.copy_from_slice(&src.to_be_bytes());
}
}
- /// Serialize this [`UInt`] as little-endian, writing it into the provided
+ /// Serialize this [`Uint`] as little-endian, writing it into the provided
/// byte slice.
#[inline]
- #[cfg_attr(docsrs, doc(cfg(feature = "generic-array")))]
pub(crate) fn write_le_bytes(&self, out: &mut [u8]) {
- debug_assert_eq!(out.len(), Limb::BYTE_SIZE * LIMBS);
+ debug_assert_eq!(out.len(), Limb::BYTES * LIMBS);
for (src, dst) in self
.limbs
.iter()
.cloned()
- .zip(out.chunks_exact_mut(Limb::BYTE_SIZE))
+ .zip(out.chunks_exact_mut(Limb::BYTES))
{
dst.copy_from_slice(&src.to_le_bytes());
}
@@ -183,26 +181,26 @@ mod tests {
use {crate::U128, alloc::format};
#[cfg(target_pointer_width = "32")]
- use crate::U64 as UIntEx;
+ use crate::U64 as UintEx;
#[cfg(target_pointer_width = "64")]
- use crate::U128 as UIntEx;
+ use crate::U128 as UintEx;
#[test]
#[cfg(target_pointer_width = "32")]
fn from_be_slice() {
let bytes = hex!("0011223344556677");
- let n = UIntEx::from_be_slice(&bytes);
- assert_eq!(n.limbs(), &[Limb(0x44556677), Limb(0x00112233)]);
+ let n = UintEx::from_be_slice(&bytes);
+ assert_eq!(n.as_limbs(), &[Limb(0x44556677), Limb(0x00112233)]);
}
#[test]
#[cfg(target_pointer_width = "64")]
fn from_be_slice() {
let bytes = hex!("00112233445566778899aabbccddeeff");
- let n = UIntEx::from_be_slice(&bytes);
+ let n = UintEx::from_be_slice(&bytes);
assert_eq!(
- n.limbs(),
+ n.as_limbs(),
&[Limb(0x8899aabbccddeeff), Limb(0x0011223344556677)]
);
}
@@ -211,17 +209,17 @@ mod tests {
#[cfg(target_pointer_width = "32")]
fn from_le_slice() {
let bytes = hex!("7766554433221100");
- let n = UIntEx::from_le_slice(&bytes);
- assert_eq!(n.limbs(), &[Limb(0x44556677), Limb(0x00112233)]);
+ let n = UintEx::from_le_slice(&bytes);
+ assert_eq!(n.as_limbs(), &[Limb(0x44556677), Limb(0x00112233)]);
}
#[test]
#[cfg(target_pointer_width = "64")]
fn from_le_slice() {
let bytes = hex!("ffeeddccbbaa99887766554433221100");
- let n = UIntEx::from_le_slice(&bytes);
+ let n = UintEx::from_le_slice(&bytes);
assert_eq!(
- n.limbs(),
+ n.as_limbs(),
&[Limb(0x8899aabbccddeeff), Limb(0x0011223344556677)]
);
}
@@ -229,16 +227,16 @@ mod tests {
#[test]
#[cfg(target_pointer_width = "32")]
fn from_be_hex() {
- let n = UIntEx::from_be_hex("0011223344556677");
- assert_eq!(n.limbs(), &[Limb(0x44556677), Limb(0x00112233)]);
+ let n = UintEx::from_be_hex("0011223344556677");
+ assert_eq!(n.as_limbs(), &[Limb(0x44556677), Limb(0x00112233)]);
}
#[test]
#[cfg(target_pointer_width = "64")]
fn from_be_hex() {
- let n = UIntEx::from_be_hex("00112233445566778899aabbccddeeff");
+ let n = UintEx::from_be_hex("00112233445566778899aabbccddeeff");
assert_eq!(
- n.limbs(),
+ n.as_limbs(),
&[Limb(0x8899aabbccddeeff), Limb(0x0011223344556677)]
);
}
@@ -246,16 +244,16 @@ mod tests {
#[test]
#[cfg(target_pointer_width = "32")]
fn from_le_hex() {
- let n = UIntEx::from_le_hex("7766554433221100");
- assert_eq!(n.limbs(), &[Limb(0x44556677), Limb(0x00112233)]);
+ let n = UintEx::from_le_hex("7766554433221100");
+ assert_eq!(n.as_limbs(), &[Limb(0x44556677), Limb(0x00112233)]);
}
#[test]
#[cfg(target_pointer_width = "64")]
fn from_le_hex() {
- let n = UIntEx::from_le_hex("ffeeddccbbaa99887766554433221100");
+ let n = UintEx::from_le_hex("ffeeddccbbaa99887766554433221100");
assert_eq!(
- n.limbs(),
+ n.as_limbs(),
&[Limb(0x8899aabbccddeeff), Limb(0x0011223344556677)]
);
}
diff --git a/vendor/crypto-bigint/src/uint/encoding/der.rs b/vendor/crypto-bigint/src/uint/encoding/der.rs
index cf1b9c31e..dcd766b2b 100644
--- a/vendor/crypto-bigint/src/uint/encoding/der.rs
+++ b/vendor/crypto-bigint/src/uint/encoding/der.rs
@@ -1,69 +1,64 @@
-//! Support for decoding/encoding [`UInt`] as an ASN.1 DER `INTEGER`.
+//! Support for decoding/encoding [`Uint`] as an ASN.1 DER `INTEGER`.
-use crate::{generic_array::GenericArray, ArrayEncoding, UInt};
+use crate::{generic_array::GenericArray, ArrayEncoding, Uint};
use ::der::{
- asn1::{AnyRef, UIntRef},
+ asn1::{AnyRef, UintRef},
DecodeValue, EncodeValue, FixedTag, Length, Tag,
};
-#[cfg_attr(docsrs, doc(cfg(feature = "der")))]
-impl<'a, const LIMBS: usize> TryFrom<AnyRef<'a>> for UInt<LIMBS>
+impl<'a, const LIMBS: usize> TryFrom<AnyRef<'a>> for Uint<LIMBS>
where
- UInt<LIMBS>: ArrayEncoding,
+ Uint<LIMBS>: ArrayEncoding,
{
type Error = der::Error;
- fn try_from(any: AnyRef<'a>) -> der::Result<UInt<LIMBS>> {
- UIntRef::try_from(any)?.try_into()
+ fn try_from(any: AnyRef<'a>) -> der::Result<Uint<LIMBS>> {
+ UintRef::try_from(any)?.try_into()
}
}
-#[cfg_attr(docsrs, doc(cfg(feature = "der")))]
-impl<'a, const LIMBS: usize> TryFrom<UIntRef<'a>> for UInt<LIMBS>
+impl<'a, const LIMBS: usize> TryFrom<UintRef<'a>> for Uint<LIMBS>
where
- UInt<LIMBS>: ArrayEncoding,
+ Uint<LIMBS>: ArrayEncoding,
{
type Error = der::Error;
- fn try_from(bytes: UIntRef<'a>) -> der::Result<UInt<LIMBS>> {
+ fn try_from(bytes: UintRef<'a>) -> der::Result<Uint<LIMBS>> {
let mut array = GenericArray::default();
let offset = array.len().saturating_sub(bytes.len().try_into()?);
array[offset..].copy_from_slice(bytes.as_bytes());
- Ok(UInt::from_be_byte_array(array))
+ Ok(Uint::from_be_byte_array(array))
}
}
-#[cfg_attr(docsrs, doc(cfg(feature = "der")))]
-impl<'a, const LIMBS: usize> DecodeValue<'a> for UInt<LIMBS>
+impl<'a, const LIMBS: usize> DecodeValue<'a> for Uint<LIMBS>
where
- UInt<LIMBS>: ArrayEncoding,
+ Uint<LIMBS>: ArrayEncoding,
{
fn decode_value<R: der::Reader<'a>>(reader: &mut R, header: der::Header) -> der::Result<Self> {
- UIntRef::decode_value(reader, header)?.try_into()
+ UintRef::decode_value(reader, header)?.try_into()
}
}
-#[cfg_attr(docsrs, doc(cfg(feature = "der")))]
-impl<const LIMBS: usize> EncodeValue for UInt<LIMBS>
+impl<const LIMBS: usize> EncodeValue for Uint<LIMBS>
where
- UInt<LIMBS>: ArrayEncoding,
+ Uint<LIMBS>: ArrayEncoding,
{
fn value_len(&self) -> der::Result<Length> {
// TODO(tarcieri): more efficient length calculation
let array = self.to_be_byte_array();
- UIntRef::new(&array)?.value_len()
+ UintRef::new(&array)?.value_len()
}
- fn encode_value(&self, encoder: &mut dyn der::Writer) -> der::Result<()> {
+ fn encode_value(&self, encoder: &mut impl der::Writer) -> der::Result<()> {
let array = self.to_be_byte_array();
- UIntRef::new(&array)?.encode_value(encoder)
+ UintRef::new(&array)?.encode_value(encoder)
}
}
-#[cfg_attr(docsrs, doc(cfg(feature = "der")))]
-impl<const LIMBS: usize> FixedTag for UInt<LIMBS>
+impl<const LIMBS: usize> FixedTag for Uint<LIMBS>
where
- UInt<LIMBS>: ArrayEncoding,
+ Uint<LIMBS>: ArrayEncoding,
{
const TAG: Tag = Tag::Integer;
}
diff --git a/vendor/crypto-bigint/src/uint/encoding/rlp.rs b/vendor/crypto-bigint/src/uint/encoding/rlp.rs
index 8a10170d5..0f85bc350 100644
--- a/vendor/crypto-bigint/src/uint/encoding/rlp.rs
+++ b/vendor/crypto-bigint/src/uint/encoding/rlp.rs
@@ -1,10 +1,9 @@
//! Recursive Length Prefix (RLP) encoding support.
-use crate::{Encoding, UInt};
+use crate::{Encoding, Uint};
use rlp::{DecoderError, Rlp, RlpStream};
-#[cfg_attr(docsrs, doc(cfg(feature = "rlp")))]
-impl<const LIMBS: usize> rlp::Encodable for UInt<LIMBS>
+impl<const LIMBS: usize> rlp::Encodable for Uint<LIMBS>
where
Self: Encoding,
{
@@ -20,8 +19,7 @@ where
}
}
-#[cfg_attr(docsrs, doc(cfg(feature = "rlp")))]
-impl<const LIMBS: usize> rlp::Decodable for UInt<LIMBS>
+impl<const LIMBS: usize> rlp::Decodable for Uint<LIMBS>
where
Self: Encoding,
<Self as Encoding>::Repr: Default,
diff --git a/vendor/crypto-bigint/src/uint/from.rs b/vendor/crypto-bigint/src/uint/from.rs
index daa5b7092..ac02f2ef2 100644
--- a/vendor/crypto-bigint/src/uint/from.rs
+++ b/vendor/crypto-bigint/src/uint/from.rs
@@ -1,9 +1,9 @@
-//! `From`-like conversions for [`UInt`].
+//! `From`-like conversions for [`Uint`].
-use crate::{Limb, UInt, WideWord, Word, U128, U64};
+use crate::{Limb, Uint, WideWord, Word, U128, U64};
-impl<const LIMBS: usize> UInt<LIMBS> {
- /// Create a [`UInt`] from a `u8` (const-friendly)
+impl<const LIMBS: usize> Uint<LIMBS> {
+ /// Create a [`Uint`] from a `u8` (const-friendly)
// TODO(tarcieri): replace with `const impl From<u8>` when stable
pub const fn from_u8(n: u8) -> Self {
assert!(LIMBS >= 1, "number of limbs must be greater than zero");
@@ -12,7 +12,7 @@ impl<const LIMBS: usize> UInt<LIMBS> {
Self { limbs }
}
- /// Create a [`UInt`] from a `u16` (const-friendly)
+ /// Create a [`Uint`] from a `u16` (const-friendly)
// TODO(tarcieri): replace with `const impl From<u16>` when stable
pub const fn from_u16(n: u16) -> Self {
assert!(LIMBS >= 1, "number of limbs must be greater than zero");
@@ -21,7 +21,7 @@ impl<const LIMBS: usize> UInt<LIMBS> {
Self { limbs }
}
- /// Create a [`UInt`] from a `u32` (const-friendly)
+ /// Create a [`Uint`] from a `u32` (const-friendly)
// TODO(tarcieri): replace with `const impl From<u32>` when stable
#[allow(trivial_numeric_casts)]
pub const fn from_u32(n: u32) -> Self {
@@ -31,7 +31,7 @@ impl<const LIMBS: usize> UInt<LIMBS> {
Self { limbs }
}
- /// Create a [`UInt`] from a `u64` (const-friendly)
+ /// Create a [`Uint`] from a `u64` (const-friendly)
// TODO(tarcieri): replace with `const impl From<u64>` when stable
#[cfg(target_pointer_width = "32")]
pub const fn from_u64(n: u64) -> Self {
@@ -42,7 +42,7 @@ impl<const LIMBS: usize> UInt<LIMBS> {
Self { limbs }
}
- /// Create a [`UInt`] from a `u64` (const-friendly)
+ /// Create a [`Uint`] from a `u64` (const-friendly)
// TODO(tarcieri): replace with `const impl From<u64>` when stable
#[cfg(target_pointer_width = "64")]
pub const fn from_u64(n: u64) -> Self {
@@ -52,11 +52,11 @@ impl<const LIMBS: usize> UInt<LIMBS> {
Self { limbs }
}
- /// Create a [`UInt`] from a `u128` (const-friendly)
+ /// Create a [`Uint`] from a `u128` (const-friendly)
// TODO(tarcieri): replace with `const impl From<u128>` when stable
pub const fn from_u128(n: u128) -> Self {
assert!(
- LIMBS >= (128 / Limb::BIT_SIZE),
+ LIMBS >= (128 / Limb::BITS),
"number of limbs must be greater than zero"
);
@@ -80,7 +80,7 @@ impl<const LIMBS: usize> UInt<LIMBS> {
Self { limbs }
}
- /// Create a [`UInt`] from a `Word` (const-friendly)
+ /// Create a [`Uint`] from a `Word` (const-friendly)
// TODO(tarcieri): replace with `const impl From<Word>` when stable
pub const fn from_word(n: Word) -> Self {
assert!(LIMBS >= 1, "number of limbs must be greater than zero");
@@ -89,18 +89,18 @@ impl<const LIMBS: usize> UInt<LIMBS> {
Self { limbs }
}
- /// Create a [`UInt`] from a `WideWord` (const-friendly)
+ /// Create a [`Uint`] from a `WideWord` (const-friendly)
// TODO(tarcieri): replace with `const impl From<WideWord>` when stable
pub const fn from_wide_word(n: WideWord) -> Self {
assert!(LIMBS >= 2, "number of limbs must be two or greater");
let mut limbs = [Limb::ZERO; LIMBS];
limbs[0].0 = n as Word;
- limbs[1].0 = (n >> Limb::BIT_SIZE) as Word;
+ limbs[1].0 = (n >> Limb::BITS) as Word;
Self { limbs }
}
}
-impl<const LIMBS: usize> From<u8> for UInt<LIMBS> {
+impl<const LIMBS: usize> From<u8> for Uint<LIMBS> {
fn from(n: u8) -> Self {
// TODO(tarcieri): const where clause when possible
debug_assert!(LIMBS > 0, "limbs must be non-zero");
@@ -108,7 +108,7 @@ impl<const LIMBS: usize> From<u8> for UInt<LIMBS> {
}
}
-impl<const LIMBS: usize> From<u16> for UInt<LIMBS> {
+impl<const LIMBS: usize> From<u16> for Uint<LIMBS> {
fn from(n: u16) -> Self {
// TODO(tarcieri): const where clause when possible
debug_assert!(LIMBS > 0, "limbs must be non-zero");
@@ -116,7 +116,7 @@ impl<const LIMBS: usize> From<u16> for UInt<LIMBS> {
}
}
-impl<const LIMBS: usize> From<u32> for UInt<LIMBS> {
+impl<const LIMBS: usize> From<u32> for Uint<LIMBS> {
fn from(n: u32) -> Self {
// TODO(tarcieri): const where clause when possible
debug_assert!(LIMBS > 0, "limbs must be non-zero");
@@ -124,24 +124,23 @@ impl<const LIMBS: usize> From<u32> for UInt<LIMBS> {
}
}
-impl<const LIMBS: usize> From<u64> for UInt<LIMBS> {
+impl<const LIMBS: usize> From<u64> for Uint<LIMBS> {
fn from(n: u64) -> Self {
// TODO(tarcieri): const where clause when possible
- debug_assert!(LIMBS >= (64 / Limb::BIT_SIZE), "not enough limbs");
+ debug_assert!(LIMBS >= (64 / Limb::BITS), "not enough limbs");
Self::from_u64(n)
}
}
-impl<const LIMBS: usize> From<u128> for UInt<LIMBS> {
+impl<const LIMBS: usize> From<u128> for Uint<LIMBS> {
fn from(n: u128) -> Self {
// TODO(tarcieri): const where clause when possible
- debug_assert!(LIMBS >= (128 / Limb::BIT_SIZE), "not enough limbs");
+ debug_assert!(LIMBS >= (128 / Limb::BITS), "not enough limbs");
Self::from_u128(n)
}
}
#[cfg(target_pointer_width = "32")]
-#[cfg_attr(docsrs, doc(cfg(target_pointer_width = "32")))]
impl From<U64> for u64 {
fn from(n: U64) -> u64 {
(n.limbs[0].0 as u64) | ((n.limbs[1].0 as u64) << 32)
@@ -149,7 +148,6 @@ impl From<U64> for u64 {
}
#[cfg(target_pointer_width = "64")]
-#[cfg_attr(docsrs, doc(cfg(target_pointer_width = "64")))]
impl From<U64> for u64 {
fn from(n: U64) -> u64 {
n.limbs[0].into()
@@ -163,31 +161,31 @@ impl From<U128> for u128 {
}
}
-impl<const LIMBS: usize> From<[Word; LIMBS]> for UInt<LIMBS> {
+impl<const LIMBS: usize> From<[Word; LIMBS]> for Uint<LIMBS> {
fn from(arr: [Word; LIMBS]) -> Self {
Self::from_words(arr)
}
}
-impl<const LIMBS: usize> From<UInt<LIMBS>> for [Word; LIMBS] {
- fn from(n: UInt<LIMBS>) -> [Word; LIMBS] {
+impl<const LIMBS: usize> From<Uint<LIMBS>> for [Word; LIMBS] {
+ fn from(n: Uint<LIMBS>) -> [Word; LIMBS] {
*n.as_ref()
}
}
-impl<const LIMBS: usize> From<[Limb; LIMBS]> for UInt<LIMBS> {
+impl<const LIMBS: usize> From<[Limb; LIMBS]> for Uint<LIMBS> {
fn from(limbs: [Limb; LIMBS]) -> Self {
Self { limbs }
}
}
-impl<const LIMBS: usize> From<UInt<LIMBS>> for [Limb; LIMBS] {
- fn from(n: UInt<LIMBS>) -> [Limb; LIMBS] {
+impl<const LIMBS: usize> From<Uint<LIMBS>> for [Limb; LIMBS] {
+ fn from(n: Uint<LIMBS>) -> [Limb; LIMBS] {
n.limbs
}
}
-impl<const LIMBS: usize> From<Limb> for UInt<LIMBS> {
+impl<const LIMBS: usize> From<Limb> for Uint<LIMBS> {
fn from(limb: Limb) -> Self {
limb.0.into()
}
@@ -198,40 +196,40 @@ mod tests {
use crate::{Limb, Word, U128};
#[cfg(target_pointer_width = "32")]
- use crate::U64 as UIntEx;
+ use crate::U64 as UintEx;
#[cfg(target_pointer_width = "64")]
- use crate::U128 as UIntEx;
+ use crate::U128 as UintEx;
#[test]
fn from_u8() {
- let n = UIntEx::from(42u8);
- assert_eq!(n.limbs(), &[Limb(42), Limb(0)]);
+ let n = UintEx::from(42u8);
+ assert_eq!(n.as_limbs(), &[Limb(42), Limb(0)]);
}
#[test]
fn from_u16() {
- let n = UIntEx::from(42u16);
- assert_eq!(n.limbs(), &[Limb(42), Limb(0)]);
+ let n = UintEx::from(42u16);
+ assert_eq!(n.as_limbs(), &[Limb(42), Limb(0)]);
}
#[test]
fn from_u64() {
- let n = UIntEx::from(42u64);
- assert_eq!(n.limbs(), &[Limb(42), Limb(0)]);
+ let n = UintEx::from(42u64);
+ assert_eq!(n.as_limbs(), &[Limb(42), Limb(0)]);
}
#[test]
fn from_u128() {
let n = U128::from(42u128);
- assert_eq!(&n.limbs()[..2], &[Limb(42), Limb(0)]);
+ assert_eq!(&n.as_limbs()[..2], &[Limb(42), Limb(0)]);
assert_eq!(u128::from(n), 42u128);
}
#[test]
fn array_round_trip() {
let arr1 = [1, 2];
- let n = UIntEx::from(arr1);
+ let n = UintEx::from(arr1);
let arr2: [Word; 2] = n.into();
assert_eq!(arr1, arr2);
}
diff --git a/vendor/crypto-bigint/src/uint/inv_mod.rs b/vendor/crypto-bigint/src/uint/inv_mod.rs
index a11408564..ef3c161f5 100644
--- a/vendor/crypto-bigint/src/uint/inv_mod.rs
+++ b/vendor/crypto-bigint/src/uint/inv_mod.rs
@@ -1,7 +1,7 @@
-use super::UInt;
-use crate::Limb;
+use super::Uint;
+use crate::{CtChoice, Limb};
-impl<const LIMBS: usize> UInt<LIMBS> {
+impl<const LIMBS: usize> Uint<LIMBS> {
/// Computes 1/`self` mod 2^k as specified in Algorithm 4 from
/// A Secure Algorithm for Inversion Modulo 2k by
/// Sadiel de la Fe and Carles Ferrer. See
@@ -20,43 +20,182 @@ impl<const LIMBS: usize> UInt<LIMBS> {
x = x.bitor(&x_i.shl_vartime(i));
let t = b.wrapping_sub(self);
- b = Self::ct_select(b, t, j.wrapping_neg()).shr_vartime(1);
+ b = Self::ct_select(&b, &t, CtChoice::from_lsb(j)).shr_vartime(1);
i += 1;
}
x
}
+
+ /// Computes the multiplicative inverse of `self` mod `modulus`, where `modulus` is odd.
+ /// In other words `self^-1 mod modulus`.
+ /// `bits` and `modulus_bits` are the bounds on the bit size
+ /// of `self` and `modulus`, respectively
+ /// (the inversion speed will be proportional to `bits + modulus_bits`).
+ /// The second element of the tuple is the truthy value if an inverse exists,
+ /// otherwise it is a falsy value.
+ ///
+ /// **Note:** variable time in `bits` and `modulus_bits`.
+ ///
+ /// The algorithm is the same as in GMP 6.2.1's `mpn_sec_invert`.
+ pub const fn inv_odd_mod_bounded(
+ &self,
+ modulus: &Self,
+ bits: usize,
+ modulus_bits: usize,
+ ) -> (Self, CtChoice) {
+ debug_assert!(modulus.ct_is_odd().is_true_vartime());
+
+ let mut a = *self;
+
+ let mut u = Uint::ONE;
+ let mut v = Uint::ZERO;
+
+ let mut b = *modulus;
+
+ // `bit_size` can be anything >= `self.bits()` + `modulus.bits()`, setting to the minimum.
+ let bit_size = bits + modulus_bits;
+
+ let mut m1hp = *modulus;
+ let (m1hp_new, carry) = m1hp.shr_1();
+ debug_assert!(carry.is_true_vartime());
+ m1hp = m1hp_new.wrapping_add(&Uint::ONE);
+
+ let mut i = 0;
+ while i < bit_size {
+ debug_assert!(b.ct_is_odd().is_true_vartime());
+
+ let self_odd = a.ct_is_odd();
+
+ // Set `self -= b` if `self` is odd.
+ let (new_a, swap) = a.conditional_wrapping_sub(&b, self_odd);
+ // Set `b += self` if `swap` is true.
+ b = Uint::ct_select(&b, &b.wrapping_add(&new_a), swap);
+ // Negate `self` if `swap` is true.
+ a = new_a.conditional_wrapping_neg(swap);
+
+ let (new_u, new_v) = Uint::ct_swap(&u, &v, swap);
+ let (new_u, cy) = new_u.conditional_wrapping_sub(&new_v, self_odd);
+ let (new_u, cyy) = new_u.conditional_wrapping_add(modulus, cy);
+ debug_assert!(cy.is_true_vartime() == cyy.is_true_vartime());
+
+ let (new_a, overflow) = a.shr_1();
+ debug_assert!(!overflow.is_true_vartime());
+ let (new_u, cy) = new_u.shr_1();
+ let (new_u, cy) = new_u.conditional_wrapping_add(&m1hp, cy);
+ debug_assert!(!cy.is_true_vartime());
+
+ a = new_a;
+ u = new_u;
+ v = new_v;
+
+ i += 1;
+ }
+
+ debug_assert!(!a.ct_is_nonzero().is_true_vartime());
+
+ (v, Uint::ct_eq(&b, &Uint::ONE))
+ }
+
+ /// Computes the multiplicative inverse of `self` mod `modulus`, where `modulus` is odd.
+ /// Returns `(inverse, Word::MAX)` if an inverse exists, otherwise `(undefined, Word::ZERO)`.
+ pub const fn inv_odd_mod(&self, modulus: &Self) -> (Self, CtChoice) {
+ self.inv_odd_mod_bounded(modulus, Uint::<LIMBS>::BITS, Uint::<LIMBS>::BITS)
+ }
}
#[cfg(test)]
mod tests {
- use crate::U256;
+ use crate::{U1024, U256, U64};
#[test]
fn inv_mod2k() {
- let v = U256::from_be_slice(&[
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe,
- 0xff, 0xff, 0xfc, 0x2f,
- ]);
- let e = U256::from_be_slice(&[
- 0x36, 0x42, 0xe6, 0xfa, 0xea, 0xac, 0x7c, 0x66, 0x63, 0xb9, 0x3d, 0x3d, 0x6a, 0x0d,
- 0x48, 0x9e, 0x43, 0x4d, 0xdc, 0x01, 0x23, 0xdb, 0x5f, 0xa6, 0x27, 0xc7, 0xf6, 0xe2,
- 0x2d, 0xda, 0xca, 0xcf,
- ]);
+ let v =
+ U256::from_be_hex("fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f");
+ let e =
+ U256::from_be_hex("3642e6faeaac7c6663b93d3d6a0d489e434ddc0123db5fa627c7f6e22ddacacf");
let a = v.inv_mod2k(256);
assert_eq!(e, a);
- let v = U256::from_be_slice(&[
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xfe, 0xba, 0xae, 0xdc, 0xe6, 0xaf, 0x48, 0xa0, 0x3b, 0xbf, 0xd2, 0x5e, 0x8c,
- 0xd0, 0x36, 0x41, 0x41,
- ]);
- let e = U256::from_be_slice(&[
- 0x26, 0x17, 0x76, 0xf2, 0x9b, 0x6b, 0x10, 0x6c, 0x76, 0x80, 0xcf, 0x3e, 0xd8, 0x30,
- 0x54, 0xa1, 0xaf, 0x5a, 0xe5, 0x37, 0xcb, 0x46, 0x13, 0xdb, 0xb4, 0xf2, 0x00, 0x99,
- 0xaa, 0x77, 0x4e, 0xc1,
- ]);
+ let v =
+ U256::from_be_hex("fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141");
+ let e =
+ U256::from_be_hex("261776f29b6b106c7680cf3ed83054a1af5ae537cb4613dbb4f20099aa774ec1");
let a = v.inv_mod2k(256);
assert_eq!(e, a);
}
+
+ #[test]
+ fn test_invert() {
+ let a = U1024::from_be_hex(concat![
+ "000225E99153B467A5B451979A3F451DAEF3BF8D6C6521D2FA24BBB17F29544E",
+ "347A412B065B75A351EA9719E2430D2477B11CC9CF9C1AD6EDEE26CB15F463F8",
+ "BCC72EF87EA30288E95A48AA792226CEC959DCB0672D8F9D80A54CBBEA85CAD8",
+ "382EC224DEB2F5784E62D0CC2F81C2E6AD14EBABE646D6764B30C32B87688985"
+ ]);
+ let m = U1024::from_be_hex(concat![
+ "D509E7854ABDC81921F669F1DC6F61359523F3949803E58ED4EA8BC16483DC6F",
+ "37BFE27A9AC9EEA2969B357ABC5C0EE214BE16A7D4C58FC620D5B5A20AFF001A",
+ "D198D3155E5799DC4EA76652D64983A7E130B5EACEBAC768D28D589C36EC749C",
+ "558D0B64E37CD0775C0D0104AE7D98BA23C815185DD43CD8B16292FD94156767"
+ ]);
+
+ let (res, is_some) = a.inv_odd_mod(&m);
+
+ let expected = U1024::from_be_hex(concat![
+ "B03623284B0EBABCABD5C5881893320281460C0A8E7BF4BFDCFFCBCCBF436A55",
+ "D364235C8171E46C7D21AAD0680676E57274A8FDA6D12768EF961CACDD2DAE57",
+ "88D93DA5EB8EDC391EE3726CDCF4613C539F7D23E8702200CB31B5ED5B06E5CA",
+ "3E520968399B4017BF98A864FABA2B647EFC4998B56774D4F2CB026BC024A336"
+ ]);
+ assert!(is_some.is_true_vartime());
+ assert_eq!(res, expected);
+ }
+
+ #[test]
+ fn test_invert_bounded() {
+ let a = U1024::from_be_hex(concat![
+ "0000000000000000000000000000000000000000000000000000000000000000",
+ "347A412B065B75A351EA9719E2430D2477B11CC9CF9C1AD6EDEE26CB15F463F8",
+ "BCC72EF87EA30288E95A48AA792226CEC959DCB0672D8F9D80A54CBBEA85CAD8",
+ "382EC224DEB2F5784E62D0CC2F81C2E6AD14EBABE646D6764B30C32B87688985"
+ ]);
+ let m = U1024::from_be_hex(concat![
+ "0000000000000000000000000000000000000000000000000000000000000000",
+ "0000000000000000000000000000000000000000000000000000000000000000",
+ "D198D3155E5799DC4EA76652D64983A7E130B5EACEBAC768D28D589C36EC749C",
+ "558D0B64E37CD0775C0D0104AE7D98BA23C815185DD43CD8B16292FD94156767"
+ ]);
+
+ let (res, is_some) = a.inv_odd_mod_bounded(&m, 768, 512);
+
+ let expected = U1024::from_be_hex(concat![
+ "0000000000000000000000000000000000000000000000000000000000000000",
+ "0000000000000000000000000000000000000000000000000000000000000000",
+ "0DCC94E2FE509E6EBBA0825645A38E73EF85D5927C79C1AD8FFE7C8DF9A822FA",
+ "09EB396A21B1EF05CBE51E1A8EF284EF01EBDD36A9A4EA17039D8EEFDD934768"
+ ]);
+ assert!(is_some.is_true_vartime());
+ assert_eq!(res, expected);
+ }
+
+ #[test]
+ fn test_invert_small() {
+ let a = U64::from(3u64);
+ let m = U64::from(13u64);
+
+ let (res, is_some) = a.inv_odd_mod(&m);
+
+ assert!(is_some.is_true_vartime());
+ assert_eq!(U64::from(9u64), res);
+ }
+
+ #[test]
+ fn test_no_inverse_small() {
+ let a = U64::from(14u64);
+ let m = U64::from(49u64);
+
+ let (_res, is_some) = a.inv_odd_mod(&m);
+
+ assert!(!is_some.is_true_vartime());
+ }
}
diff --git a/vendor/crypto-bigint/src/uint/modular.rs b/vendor/crypto-bigint/src/uint/modular.rs
new file mode 100644
index 000000000..ff20f443d
--- /dev/null
+++ b/vendor/crypto-bigint/src/uint/modular.rs
@@ -0,0 +1,163 @@
+mod reduction;
+
+/// Implements `Residue`s, supporting modular arithmetic with a constant modulus.
+pub mod constant_mod;
+/// Implements `DynResidue`s, supporting modular arithmetic with a modulus set at runtime.
+pub mod runtime_mod;
+
+mod add;
+mod inv;
+mod mul;
+mod pow;
+mod sub;
+
+pub use reduction::montgomery_reduction;
+
+/// A generalization for numbers kept in optimized representations (e.g. Montgomery)
+/// that can be converted back to the original form.
+pub trait Retrieve {
+ /// The original type.
+ type Output;
+
+ /// Convert the number back from the optimized representation.
+ fn retrieve(&self) -> Self::Output;
+}
+
+#[cfg(test)]
+mod tests {
+ use crate::{
+ const_residue, impl_modulus,
+ modular::{
+ constant_mod::Residue, constant_mod::ResidueParams, reduction::montgomery_reduction,
+ },
+ NonZero, Uint, U256, U64,
+ };
+
+ impl_modulus!(
+ Modulus1,
+ U256,
+ "73eda753299d7d483339d80809a1d80553bda402fffe5bfeffffffff00000001"
+ );
+
+ #[test]
+ fn test_montgomery_params() {
+ assert_eq!(
+ Modulus1::R,
+ U256::from_be_hex("1824b159acc5056f998c4fefecbc4ff55884b7fa0003480200000001fffffffe")
+ );
+ assert_eq!(
+ Modulus1::R2,
+ U256::from_be_hex("0748d9d99f59ff1105d314967254398f2b6cedcb87925c23c999e990f3f29c6d")
+ );
+ assert_eq!(
+ Modulus1::MOD_NEG_INV,
+ U64::from_be_hex("fffffffeffffffff").limbs[0]
+ );
+ }
+
+ impl_modulus!(
+ Modulus2,
+ U256,
+ "ffffffff00000000ffffffffffffffffbce6faada7179e84f3b9cac2fc632551"
+ );
+
+ #[test]
+ fn test_reducing_r() {
+ // Divide the value R by R, which should equal 1
+ assert_eq!(
+ montgomery_reduction::<{ Modulus2::LIMBS }>(
+ &(Modulus2::R, Uint::ZERO),
+ &Modulus2::MODULUS,
+ Modulus2::MOD_NEG_INV
+ ),
+ Uint::ONE
+ );
+ }
+
+ #[test]
+ fn test_reducing_r2() {
+ // Divide the value R^2 by R, which should equal R
+ assert_eq!(
+ montgomery_reduction::<{ Modulus2::LIMBS }>(
+ &(Modulus2::R2, Uint::ZERO),
+ &Modulus2::MODULUS,
+ Modulus2::MOD_NEG_INV
+ ),
+ Modulus2::R
+ );
+ }
+
+ #[test]
+ fn test_reducing_r2_wide() {
+ // Divide the value R^2 by R, which should equal R
+ let (hi, lo) = Modulus2::R.square().split();
+ assert_eq!(
+ montgomery_reduction::<{ Modulus2::LIMBS }>(
+ &(lo, hi),
+ &Modulus2::MODULUS,
+ Modulus2::MOD_NEG_INV
+ ),
+ Modulus2::R
+ );
+ }
+
+ #[test]
+ fn test_reducing_xr_wide() {
+ // Reducing xR should return x
+ let x =
+ U256::from_be_hex("44acf6b7e36c1342c2c5897204fe09504e1e2efb1a900377dbc4e7a6a133ec56");
+ let product = x.mul_wide(&Modulus2::R);
+ assert_eq!(
+ montgomery_reduction::<{ Modulus2::LIMBS }>(
+ &product,
+ &Modulus2::MODULUS,
+ Modulus2::MOD_NEG_INV
+ ),
+ x
+ );
+ }
+
+ #[test]
+ fn test_reducing_xr2_wide() {
+ // Reducing xR^2 should return xR
+ let x =
+ U256::from_be_hex("44acf6b7e36c1342c2c5897204fe09504e1e2efb1a900377dbc4e7a6a133ec56");
+ let product = x.mul_wide(&Modulus2::R2);
+
+ // Computing xR mod modulus without Montgomery reduction
+ let (lo, hi) = x.mul_wide(&Modulus2::R);
+ let c = hi.concat(&lo);
+ let red = c.rem(&NonZero::new(U256::ZERO.concat(&Modulus2::MODULUS)).unwrap());
+ let (hi, lo) = red.split();
+ assert_eq!(hi, Uint::ZERO);
+
+ assert_eq!(
+ montgomery_reduction::<{ Modulus2::LIMBS }>(
+ &product,
+ &Modulus2::MODULUS,
+ Modulus2::MOD_NEG_INV
+ ),
+ lo
+ );
+ }
+
+ #[test]
+ fn test_new_retrieve() {
+ let x =
+ U256::from_be_hex("44acf6b7e36c1342c2c5897204fe09504e1e2efb1a900377dbc4e7a6a133ec56");
+ let x_mod = Residue::<Modulus2, { Modulus2::LIMBS }>::new(&x);
+
+ // Confirm that when creating a Modular and retrieving the value, that it equals the original
+ assert_eq!(x, x_mod.retrieve());
+ }
+
+ #[test]
+ fn test_residue_macro() {
+ let x =
+ U256::from_be_hex("44acf6b7e36c1342c2c5897204fe09504e1e2efb1a900377dbc4e7a6a133ec56");
+ assert_eq!(
+ Residue::<Modulus2, { Modulus2::LIMBS }>::new(&x),
+ const_residue!(x, Modulus2)
+ );
+ }
+}
diff --git a/vendor/crypto-bigint/src/uint/modular/add.rs b/vendor/crypto-bigint/src/uint/modular/add.rs
new file mode 100644
index 000000000..f4d0f79d3
--- /dev/null
+++ b/vendor/crypto-bigint/src/uint/modular/add.rs
@@ -0,0 +1,9 @@
+use crate::Uint;
+
+pub(crate) const fn add_montgomery_form<const LIMBS: usize>(
+ a: &Uint<LIMBS>,
+ b: &Uint<LIMBS>,
+ modulus: &Uint<LIMBS>,
+) -> Uint<LIMBS> {
+ a.add_mod(b, modulus)
+}
diff --git a/vendor/crypto-bigint/src/uint/modular/constant_mod.rs b/vendor/crypto-bigint/src/uint/modular/constant_mod.rs
new file mode 100644
index 000000000..f94e4c475
--- /dev/null
+++ b/vendor/crypto-bigint/src/uint/modular/constant_mod.rs
@@ -0,0 +1,189 @@
+use core::{fmt::Debug, marker::PhantomData};
+
+use subtle::{Choice, ConditionallySelectable, ConstantTimeEq};
+
+use crate::{Limb, Uint, Zero};
+
+use super::{reduction::montgomery_reduction, Retrieve};
+
+#[cfg(feature = "rand_core")]
+use crate::{rand_core::CryptoRngCore, NonZero, Random, RandomMod};
+
+#[cfg(feature = "serde")]
+use {
+ crate::Encoding,
+ serdect::serde::de::Error,
+ serdect::serde::{Deserialize, Deserializer, Serialize, Serializer},
+};
+
+/// Additions between residues with a constant modulus
+mod const_add;
+/// Multiplicative inverses of residues with a constant modulus
+mod const_inv;
+/// Multiplications between residues with a constant modulus
+mod const_mul;
+/// Negations of residues with a constant modulus
+mod const_neg;
+/// Exponentiation of residues with a constant modulus
+mod const_pow;
+/// Subtractions between residues with a constant modulus
+mod const_sub;
+
+/// Macros to remove the boilerplate code when dealing with constant moduli.
+#[macro_use]
+mod macros;
+
+pub use macros::*;
+
+/// The parameters to efficiently go to and from the Montgomery form for a given odd modulus. An easy way to generate these parameters is using the `impl_modulus!` macro. These parameters are constant, so they cannot be set at runtime.
+///
+/// Unfortunately, `LIMBS` must be generic for now until const generics are stabilized.
+pub trait ResidueParams<const LIMBS: usize>:
+ Copy + Debug + Default + Eq + Send + Sync + 'static
+{
+ /// Number of limbs required to encode a residue
+ const LIMBS: usize;
+
+ /// The constant modulus
+ const MODULUS: Uint<LIMBS>;
+ /// Parameter used in Montgomery reduction
+ const R: Uint<LIMBS>;
+ /// R^2, used to move into Montgomery form
+ const R2: Uint<LIMBS>;
+ /// R^3, used to perform a multiplicative inverse
+ const R3: Uint<LIMBS>;
+ /// The lowest limbs of -(MODULUS^-1) mod R
+ // We only need the LSB because during reduction this value is multiplied modulo 2**Limb::BITS.
+ const MOD_NEG_INV: Limb;
+}
+
+#[derive(Debug, Clone, Copy, PartialEq, Eq)]
+/// A residue mod `MOD`, represented using `LIMBS` limbs. The modulus of this residue is constant, so it cannot be set at runtime.
+pub struct Residue<MOD, const LIMBS: usize>
+where
+ MOD: ResidueParams<LIMBS>,
+{
+ montgomery_form: Uint<LIMBS>,
+ phantom: PhantomData<MOD>,
+}
+
+impl<MOD: ResidueParams<LIMBS>, const LIMBS: usize> Residue<MOD, LIMBS> {
+ /// The representation of 0 mod `MOD`.
+ pub const ZERO: Self = Self {
+ montgomery_form: Uint::<LIMBS>::ZERO,
+ phantom: PhantomData,
+ };
+
+ /// The representation of 1 mod `MOD`.
+ pub const ONE: Self = Self {
+ montgomery_form: MOD::R,
+ phantom: PhantomData,
+ };
+
+ /// Instantiates a new `Residue` that represents this `integer` mod `MOD`.
+ pub const fn new(integer: &Uint<LIMBS>) -> Self {
+ let product = integer.mul_wide(&MOD::R2);
+ let montgomery_form =
+ montgomery_reduction::<LIMBS>(&product, &MOD::MODULUS, MOD::MOD_NEG_INV);
+
+ Self {
+ montgomery_form,
+ phantom: PhantomData,
+ }
+ }
+
+ /// Retrieves the integer currently encoded in this `Residue`, guaranteed to be reduced.
+ pub const fn retrieve(&self) -> Uint<LIMBS> {
+ montgomery_reduction::<LIMBS>(
+ &(self.montgomery_form, Uint::ZERO),
+ &MOD::MODULUS,
+ MOD::MOD_NEG_INV,
+ )
+ }
+}
+
+impl<MOD: ResidueParams<LIMBS> + Copy, const LIMBS: usize> ConditionallySelectable
+ for Residue<MOD, LIMBS>
+{
+ fn conditional_select(a: &Self, b: &Self, choice: Choice) -> Self {
+ Residue {
+ montgomery_form: Uint::conditional_select(
+ &a.montgomery_form,
+ &b.montgomery_form,
+ choice,
+ ),
+ phantom: PhantomData,
+ }
+ }
+}
+
+impl<MOD: ResidueParams<LIMBS>, const LIMBS: usize> ConstantTimeEq for Residue<MOD, LIMBS> {
+ fn ct_eq(&self, other: &Self) -> Choice {
+ ConstantTimeEq::ct_eq(&self.montgomery_form, &other.montgomery_form)
+ }
+}
+
+impl<MOD: ResidueParams<LIMBS>, const LIMBS: usize> Default for Residue<MOD, LIMBS> {
+ fn default() -> Self {
+ Self::ZERO
+ }
+}
+
+impl<MOD: ResidueParams<LIMBS>, const LIMBS: usize> Zero for Residue<MOD, LIMBS> {
+ const ZERO: Self = Self::ZERO;
+}
+
+#[cfg(feature = "rand_core")]
+impl<MOD, const LIMBS: usize> Random for Residue<MOD, LIMBS>
+where
+ MOD: ResidueParams<LIMBS>,
+{
+ #[inline]
+ fn random(rng: &mut impl CryptoRngCore) -> Self {
+ Self::new(&Uint::random_mod(rng, &NonZero::from_uint(MOD::MODULUS)))
+ }
+}
+
+impl<MOD: ResidueParams<LIMBS>, const LIMBS: usize> Retrieve for Residue<MOD, LIMBS> {
+ type Output = Uint<LIMBS>;
+ fn retrieve(&self) -> Self::Output {
+ self.retrieve()
+ }
+}
+
+#[cfg(feature = "serde")]
+impl<'de, MOD, const LIMBS: usize> Deserialize<'de> for Residue<MOD, LIMBS>
+where
+ MOD: ResidueParams<LIMBS>,
+ Uint<LIMBS>: Encoding,
+{
+ fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
+ where
+ D: Deserializer<'de>,
+ {
+ Uint::<LIMBS>::deserialize(deserializer).and_then(|montgomery_form| {
+ if Uint::ct_lt(&montgomery_form, &MOD::MODULUS).into() {
+ Ok(Self {
+ montgomery_form,
+ phantom: PhantomData,
+ })
+ } else {
+ Err(D::Error::custom("montgomery form must be reduced"))
+ }
+ })
+ }
+}
+
+#[cfg(feature = "serde")]
+impl<MOD, const LIMBS: usize> Serialize for Residue<MOD, LIMBS>
+where
+ MOD: ResidueParams<LIMBS>,
+ Uint<LIMBS>: Encoding,
+{
+ fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
+ where
+ S: Serializer,
+ {
+ self.montgomery_form.serialize(serializer)
+ }
+}
diff --git a/vendor/crypto-bigint/src/uint/modular/constant_mod/const_add.rs b/vendor/crypto-bigint/src/uint/modular/constant_mod/const_add.rs
new file mode 100644
index 000000000..82eb8826c
--- /dev/null
+++ b/vendor/crypto-bigint/src/uint/modular/constant_mod/const_add.rs
@@ -0,0 +1,98 @@
+use core::ops::{Add, AddAssign};
+
+use crate::modular::add::add_montgomery_form;
+
+use super::{Residue, ResidueParams};
+
+impl<MOD: ResidueParams<LIMBS>, const LIMBS: usize> Residue<MOD, LIMBS> {
+ /// Adds `rhs`.
+ pub const fn add(&self, rhs: &Residue<MOD, LIMBS>) -> Self {
+ Self {
+ montgomery_form: add_montgomery_form(
+ &self.montgomery_form,
+ &rhs.montgomery_form,
+ &MOD::MODULUS,
+ ),
+ phantom: core::marker::PhantomData,
+ }
+ }
+}
+
+impl<MOD: ResidueParams<LIMBS>, const LIMBS: usize> Add<&Residue<MOD, LIMBS>>
+ for &Residue<MOD, LIMBS>
+{
+ type Output = Residue<MOD, LIMBS>;
+ fn add(self, rhs: &Residue<MOD, LIMBS>) -> Residue<MOD, LIMBS> {
+ self.add(rhs)
+ }
+}
+
+impl<MOD: ResidueParams<LIMBS>, const LIMBS: usize> Add<Residue<MOD, LIMBS>>
+ for &Residue<MOD, LIMBS>
+{
+ type Output = Residue<MOD, LIMBS>;
+ #[allow(clippy::op_ref)]
+ fn add(self, rhs: Residue<MOD, LIMBS>) -> Residue<MOD, LIMBS> {
+ self + &rhs
+ }
+}
+
+impl<MOD: ResidueParams<LIMBS>, const LIMBS: usize> Add<&Residue<MOD, LIMBS>>
+ for Residue<MOD, LIMBS>
+{
+ type Output = Residue<MOD, LIMBS>;
+ #[allow(clippy::op_ref)]
+ fn add(self, rhs: &Residue<MOD, LIMBS>) -> Residue<MOD, LIMBS> {
+ &self + rhs
+ }
+}
+
+impl<MOD: ResidueParams<LIMBS>, const LIMBS: usize> Add<Residue<MOD, LIMBS>>
+ for Residue<MOD, LIMBS>
+{
+ type Output = Residue<MOD, LIMBS>;
+ fn add(self, rhs: Residue<MOD, LIMBS>) -> Residue<MOD, LIMBS> {
+ &self + &rhs
+ }
+}
+
+impl<MOD: ResidueParams<LIMBS>, const LIMBS: usize> AddAssign<&Self> for Residue<MOD, LIMBS> {
+ fn add_assign(&mut self, rhs: &Self) {
+ *self = *self + rhs;
+ }
+}
+
+impl<MOD: ResidueParams<LIMBS>, const LIMBS: usize> AddAssign<Self> for Residue<MOD, LIMBS> {
+ fn add_assign(&mut self, rhs: Self) {
+ *self += &rhs;
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use crate::{const_residue, impl_modulus, modular::constant_mod::ResidueParams, U256};
+
+ impl_modulus!(
+ Modulus,
+ U256,
+ "ffffffff00000000ffffffffffffffffbce6faada7179e84f3b9cac2fc632551"
+ );
+
+ #[test]
+ fn add_overflow() {
+ let x =
+ U256::from_be_hex("44acf6b7e36c1342c2c5897204fe09504e1e2efb1a900377dbc4e7a6a133ec56");
+ let mut x_mod = const_residue!(x, Modulus);
+
+ let y =
+ U256::from_be_hex("d5777c45019673125ad240f83094d4252d829516fac8601ed01979ec1ec1a251");
+ let y_mod = const_residue!(y, Modulus);
+
+ x_mod += &y_mod;
+
+ let expected =
+ U256::from_be_hex("1a2472fde50286541d97ca6a3592dd75beb9c9646e40c511b82496cfc3926956");
+
+ assert_eq!(expected, x_mod.retrieve());
+ }
+}
diff --git a/vendor/crypto-bigint/src/uint/modular/constant_mod/const_inv.rs b/vendor/crypto-bigint/src/uint/modular/constant_mod/const_inv.rs
new file mode 100644
index 000000000..5c8da7b88
--- /dev/null
+++ b/vendor/crypto-bigint/src/uint/modular/constant_mod/const_inv.rs
@@ -0,0 +1,69 @@
+use core::marker::PhantomData;
+
+use subtle::CtOption;
+
+use crate::{modular::inv::inv_montgomery_form, traits::Invert, CtChoice, NonZero};
+
+use super::{Residue, ResidueParams};
+
+impl<MOD: ResidueParams<LIMBS>, const LIMBS: usize> Residue<MOD, LIMBS> {
+ /// Computes the residue `self^-1` representing the multiplicative inverse of `self`.
+ /// I.e. `self * self^-1 = 1`.
+ /// If the number was invertible, the second element of the tuple is the truthy value,
+ /// otherwise it is the falsy value (in which case the first element's value is unspecified).
+ pub const fn invert(&self) -> (Self, CtChoice) {
+ let (montgomery_form, is_some) = inv_montgomery_form(
+ &self.montgomery_form,
+ &MOD::MODULUS,
+ &MOD::R3,
+ MOD::MOD_NEG_INV,
+ );
+
+ let value = Self {
+ montgomery_form,
+ phantom: PhantomData,
+ };
+
+ (value, is_some)
+ }
+}
+
+impl<MOD: ResidueParams<LIMBS>, const LIMBS: usize> Invert for Residue<MOD, LIMBS> {
+ type Output = CtOption<Self>;
+ fn invert(&self) -> Self::Output {
+ let (value, is_some) = self.invert();
+ CtOption::new(value, is_some.into())
+ }
+}
+
+impl<MOD: ResidueParams<LIMBS>, const LIMBS: usize> Invert for NonZero<Residue<MOD, LIMBS>> {
+ type Output = Self;
+ fn invert(&self) -> Self::Output {
+ // Always succeeds for a non-zero argument
+ let (value, _is_some) = self.as_ref().invert();
+ NonZero::new(value).unwrap()
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use crate::{const_residue, impl_modulus, modular::constant_mod::ResidueParams, U256};
+
+ impl_modulus!(
+ Modulus,
+ U256,
+ "15477BCCEFE197328255BFA79A1217899016D927EF460F4FF404029D24FA4409"
+ );
+
+ #[test]
+ fn test_self_inverse() {
+ let x =
+ U256::from_be_hex("77117F1273373C26C700D076B3F780074D03339F56DD0EFB60E7F58441FD3685");
+ let x_mod = const_residue!(x, Modulus);
+
+ let (inv, _is_some) = x_mod.invert();
+ let res = &x_mod * &inv;
+
+ assert_eq!(res.retrieve(), U256::ONE);
+ }
+}
diff --git a/vendor/crypto-bigint/src/uint/modular/constant_mod/const_mul.rs b/vendor/crypto-bigint/src/uint/modular/constant_mod/const_mul.rs
new file mode 100644
index 000000000..3bce1848a
--- /dev/null
+++ b/vendor/crypto-bigint/src/uint/modular/constant_mod/const_mul.rs
@@ -0,0 +1,94 @@
+use core::{
+ marker::PhantomData,
+ ops::{Mul, MulAssign},
+};
+
+use crate::{
+ modular::mul::{mul_montgomery_form, square_montgomery_form},
+ traits::Square,
+};
+
+use super::{Residue, ResidueParams};
+
+impl<MOD: ResidueParams<LIMBS>, const LIMBS: usize> Residue<MOD, LIMBS> {
+ /// Multiplies by `rhs`.
+ pub const fn mul(&self, rhs: &Self) -> Self {
+ Self {
+ montgomery_form: mul_montgomery_form(
+ &self.montgomery_form,
+ &rhs.montgomery_form,
+ &MOD::MODULUS,
+ MOD::MOD_NEG_INV,
+ ),
+ phantom: PhantomData,
+ }
+ }
+
+ /// Computes the (reduced) square of a residue.
+ pub const fn square(&self) -> Self {
+ Self {
+ montgomery_form: square_montgomery_form(
+ &self.montgomery_form,
+ &MOD::MODULUS,
+ MOD::MOD_NEG_INV,
+ ),
+ phantom: PhantomData,
+ }
+ }
+}
+
+impl<MOD: ResidueParams<LIMBS>, const LIMBS: usize> Mul<&Residue<MOD, LIMBS>>
+ for &Residue<MOD, LIMBS>
+{
+ type Output = Residue<MOD, LIMBS>;
+ fn mul(self, rhs: &Residue<MOD, LIMBS>) -> Residue<MOD, LIMBS> {
+ self.mul(rhs)
+ }
+}
+
+impl<MOD: ResidueParams<LIMBS>, const LIMBS: usize> Mul<Residue<MOD, LIMBS>>
+ for &Residue<MOD, LIMBS>
+{
+ type Output = Residue<MOD, LIMBS>;
+ #[allow(clippy::op_ref)]
+ fn mul(self, rhs: Residue<MOD, LIMBS>) -> Residue<MOD, LIMBS> {
+ self * &rhs
+ }
+}
+
+impl<MOD: ResidueParams<LIMBS>, const LIMBS: usize> Mul<&Residue<MOD, LIMBS>>
+ for Residue<MOD, LIMBS>
+{
+ type Output = Residue<MOD, LIMBS>;
+ #[allow(clippy::op_ref)]
+ fn mul(self, rhs: &Residue<MOD, LIMBS>) -> Residue<MOD, LIMBS> {
+ &self * rhs
+ }
+}
+
+impl<MOD: ResidueParams<LIMBS>, const LIMBS: usize> Mul<Residue<MOD, LIMBS>>
+ for Residue<MOD, LIMBS>
+{
+ type Output = Residue<MOD, LIMBS>;
+ fn mul(self, rhs: Residue<MOD, LIMBS>) -> Residue<MOD, LIMBS> {
+ &self * &rhs
+ }
+}
+
+impl<MOD: ResidueParams<LIMBS>, const LIMBS: usize> MulAssign<&Self> for Residue<MOD, LIMBS> {
+ fn mul_assign(&mut self, rhs: &Residue<MOD, LIMBS>) {
+ *self = *self * rhs;
+ }
+}
+
+impl<MOD: ResidueParams<LIMBS>, const LIMBS: usize> MulAssign<Self> for Residue<MOD, LIMBS> {
+ fn mul_assign(&mut self, rhs: Self) {
+ *self *= &rhs;
+ }
+}
+
+impl<MOD: ResidueParams<LIMBS>, const LIMBS: usize> Square for Residue<MOD, LIMBS> {
+ fn square(&self) -> Self {
+ Residue::square(self)
+ }
+}
diff --git a/vendor/crypto-bigint/src/uint/modular/constant_mod/const_neg.rs b/vendor/crypto-bigint/src/uint/modular/constant_mod/const_neg.rs
new file mode 100644
index 000000000..1981f5a20
--- /dev/null
+++ b/vendor/crypto-bigint/src/uint/modular/constant_mod/const_neg.rs
@@ -0,0 +1,48 @@
+use core::ops::Neg;
+
+use super::{Residue, ResidueParams};
+
+impl<MOD: ResidueParams<LIMBS>, const LIMBS: usize> Residue<MOD, LIMBS> {
+ /// Negates the number.
+ pub const fn neg(&self) -> Self {
+ Self::ZERO.sub(self)
+ }
+}
+
+impl<MOD: ResidueParams<LIMBS>, const LIMBS: usize> Neg for Residue<MOD, LIMBS> {
+ type Output = Self;
+ fn neg(self) -> Self {
+ Residue::neg(&self)
+ }
+}
+
+impl<MOD: ResidueParams<LIMBS>, const LIMBS: usize> Neg for &Residue<MOD, LIMBS> {
+ type Output = Residue<MOD, LIMBS>;
+ fn neg(self) -> Residue<MOD, LIMBS> {
+ Residue::neg(self)
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use crate::{const_residue, impl_modulus, modular::constant_mod::ResidueParams, U256};
+
+ impl_modulus!(
+ Modulus,
+ U256,
+ "15477BCCEFE197328255BFA79A1217899016D927EF460F4FF404029D24FA4409"
+ );
+
+ #[test]
+ fn test_negate() {
+ let x =
+ U256::from_be_hex("77117F1273373C26C700D076B3F780074D03339F56DD0EFB60E7F58441FD3685");
+ let x_mod = const_residue!(x, Modulus);
+
+ let res = -x_mod;
+ let expected =
+ U256::from_be_hex("089B67BB2C124F084701AD76E8750D321385E35044C74CE457301A2A9BE061B1");
+
+ assert_eq!(res.retrieve(), expected);
+ }
+}
diff --git a/vendor/crypto-bigint/src/uint/modular/constant_mod/const_pow.rs b/vendor/crypto-bigint/src/uint/modular/constant_mod/const_pow.rs
new file mode 100644
index 000000000..60455b05b
--- /dev/null
+++ b/vendor/crypto-bigint/src/uint/modular/constant_mod/const_pow.rs
@@ -0,0 +1,98 @@
+use crate::{modular::pow::pow_montgomery_form, PowBoundedExp, Uint};
+
+use super::{Residue, ResidueParams};
+
+impl<MOD: ResidueParams<LIMBS>, const LIMBS: usize> Residue<MOD, LIMBS> {
+ /// Raises to the `exponent` power.
+ pub const fn pow(&self, exponent: &Uint<LIMBS>) -> Residue<MOD, LIMBS> {
+ self.pow_bounded_exp(exponent, Uint::<LIMBS>::BITS)
+ }
+
+ /// Raises to the `exponent` power,
+ /// with `exponent_bits` representing the number of (least significant) bits
+ /// to take into account for the exponent.
+ ///
+ /// NOTE: `exponent_bits` may be leaked in the time pattern.
+ pub const fn pow_bounded_exp(
+ &self,
+ exponent: &Uint<LIMBS>,
+ exponent_bits: usize,
+ ) -> Residue<MOD, LIMBS> {
+ Self {
+ montgomery_form: pow_montgomery_form(
+ &self.montgomery_form,
+ exponent,
+ exponent_bits,
+ &MOD::MODULUS,
+ &MOD::R,
+ MOD::MOD_NEG_INV,
+ ),
+ phantom: core::marker::PhantomData,
+ }
+ }
+}
+
+impl<MOD: ResidueParams<LIMBS>, const LIMBS: usize> PowBoundedExp<Uint<LIMBS>>
+ for Residue<MOD, LIMBS>
+{
+ fn pow_bounded_exp(&self, exponent: &Uint<LIMBS>, exponent_bits: usize) -> Self {
+ self.pow_bounded_exp(exponent, exponent_bits)
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use crate::{const_residue, impl_modulus, modular::constant_mod::ResidueParams, U256};
+
+ impl_modulus!(
+ Modulus,
+ U256,
+ "9CC24C5DF431A864188AB905AC751B727C9447A8E99E6366E1AD78A21E8D882B"
+ );
+
+ #[test]
+ fn test_powmod_small_base() {
+ let base = U256::from(105u64);
+ let base_mod = const_residue!(base, Modulus);
+
+ let exponent =
+ U256::from_be_hex("77117F1273373C26C700D076B3F780074D03339F56DD0EFB60E7F58441FD3685");
+
+ let res = base_mod.pow(&exponent);
+
+ let expected =
+ U256::from_be_hex("7B2CD7BDDD96C271E6F232F2F415BB03FE2A90BD6CCCEA5E94F1BFD064993766");
+ assert_eq!(res.retrieve(), expected);
+ }
+
+ #[test]
+ fn test_powmod_small_exponent() {
+ let base =
+ U256::from_be_hex("3435D18AA8313EBBE4D20002922225B53F75DC4453BB3EEC0378646F79B524A4");
+ let base_mod = const_residue!(base, Modulus);
+
+ let exponent = U256::from(105u64);
+
+ let res = base_mod.pow(&exponent);
+
+ let expected =
+ U256::from_be_hex("89E2A4E99F649A5AE2C18068148C355CA927B34A3245C938178ED00D6EF218AA");
+ assert_eq!(res.retrieve(), expected);
+ }
+
+ #[test]
+ fn test_powmod() {
+ let base =
+ U256::from_be_hex("3435D18AA8313EBBE4D20002922225B53F75DC4453BB3EEC0378646F79B524A4");
+ let base_mod = const_residue!(base, Modulus);
+
+ let exponent =
+ U256::from_be_hex("77117F1273373C26C700D076B3F780074D03339F56DD0EFB60E7F58441FD3685");
+
+ let res = base_mod.pow(&exponent);
+
+ let expected =
+ U256::from_be_hex("3681BC0FEA2E5D394EB178155A127B0FD2EF405486D354251C385BDD51B9D421");
+ assert_eq!(res.retrieve(), expected);
+ }
+}
diff --git a/vendor/crypto-bigint/src/uint/modular/constant_mod/const_sub.rs b/vendor/crypto-bigint/src/uint/modular/constant_mod/const_sub.rs
new file mode 100644
index 000000000..f65061146
--- /dev/null
+++ b/vendor/crypto-bigint/src/uint/modular/constant_mod/const_sub.rs
@@ -0,0 +1,98 @@
+use core::ops::{Sub, SubAssign};
+
+use crate::modular::sub::sub_montgomery_form;
+
+use super::{Residue, ResidueParams};
+
+impl<MOD: ResidueParams<LIMBS>, const LIMBS: usize> Residue<MOD, LIMBS> {
+ /// Subtracts `rhs`.
+ pub const fn sub(&self, rhs: &Self) -> Self {
+ Self {
+ montgomery_form: sub_montgomery_form(
+ &self.montgomery_form,
+ &rhs.montgomery_form,
+ &MOD::MODULUS,
+ ),
+ phantom: core::marker::PhantomData,
+ }
+ }
+}
+
+impl<MOD: ResidueParams<LIMBS>, const LIMBS: usize> Sub<&Residue<MOD, LIMBS>>
+ for &Residue<MOD, LIMBS>
+{
+ type Output = Residue<MOD, LIMBS>;
+ fn sub(self, rhs: &Residue<MOD, LIMBS>) -> Residue<MOD, LIMBS> {
+ self.sub(rhs)
+ }
+}
+
+impl<MOD: ResidueParams<LIMBS>, const LIMBS: usize> Sub<Residue<MOD, LIMBS>>
+ for &Residue<MOD, LIMBS>
+{
+ type Output = Residue<MOD, LIMBS>;
+ #[allow(clippy::op_ref)]
+ fn sub(self, rhs: Residue<MOD, LIMBS>) -> Residue<MOD, LIMBS> {
+ self - &rhs
+ }
+}
+
+impl<MOD: ResidueParams<LIMBS>, const LIMBS: usize> Sub<&Residue<MOD, LIMBS>>
+ for Residue<MOD, LIMBS>
+{
+ type Output = Residue<MOD, LIMBS>;
+ #[allow(clippy::op_ref)]
+ fn sub(self, rhs: &Residue<MOD, LIMBS>) -> Residue<MOD, LIMBS> {
+ &self - rhs
+ }
+}
+
+impl<MOD: ResidueParams<LIMBS>, const LIMBS: usize> Sub<Residue<MOD, LIMBS>>
+ for Residue<MOD, LIMBS>
+{
+ type Output = Residue<MOD, LIMBS>;
+ fn sub(self, rhs: Residue<MOD, LIMBS>) -> Residue<MOD, LIMBS> {
+ &self - &rhs
+ }
+}
+
+impl<MOD: ResidueParams<LIMBS>, const LIMBS: usize> SubAssign<&Self> for Residue<MOD, LIMBS> {
+ fn sub_assign(&mut self, rhs: &Self) {
+ *self = *self - rhs;
+ }
+}
+
+impl<MOD: ResidueParams<LIMBS>, const LIMBS: usize> SubAssign<Self> for Residue<MOD, LIMBS> {
+ fn sub_assign(&mut self, rhs: Self) {
+ *self -= &rhs;
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use crate::{const_residue, impl_modulus, modular::constant_mod::ResidueParams, U256};
+
+ impl_modulus!(
+ Modulus,
+ U256,
+ "ffffffff00000000ffffffffffffffffbce6faada7179e84f3b9cac2fc632551"
+ );
+
+ #[test]
+ fn sub_overflow() {
+ let x =
+ U256::from_be_hex("44acf6b7e36c1342c2c5897204fe09504e1e2efb1a900377dbc4e7a6a133ec56");
+ let mut x_mod = const_residue!(x, Modulus);
+
+ let y =
+ U256::from_be_hex("d5777c45019673125ad240f83094d4252d829516fac8601ed01979ec1ec1a251");
+ let y_mod = const_residue!(y, Modulus);
+
+ x_mod -= &y_mod;
+
+ let expected =
+ U256::from_be_hex("6f357a71e1d5a03167f34879d469352add829491c6df41ddff65387d7ed56f56");
+
+ assert_eq!(expected, x_mod.retrieve());
+ }
+}
diff --git a/vendor/crypto-bigint/src/uint/modular/constant_mod/macros.rs b/vendor/crypto-bigint/src/uint/modular/constant_mod/macros.rs
new file mode 100644
index 000000000..1583ca529
--- /dev/null
+++ b/vendor/crypto-bigint/src/uint/modular/constant_mod/macros.rs
@@ -0,0 +1,50 @@
+// TODO: Use `adt_const_params` once stabilized to make a `Residue` generic around a modulus rather than having to implement a ZST + trait
+#[macro_export]
+/// Implements a modulus with the given name, type, and value, in that specific order. Please `use crypto_bigint::traits::Encoding` to make this work.
+/// For example, `impl_modulus!(MyModulus, U256, "73eda753299d7d483339d80809a1d80553bda402fffe5bfeffffffff00000001");` implements a 256-bit modulus named `MyModulus`.
+macro_rules! impl_modulus {
+ ($name:ident, $uint_type:ty, $value:expr) => {
+ #[derive(Clone, Copy, Debug, Default, Eq, PartialEq)]
+ pub struct $name {}
+ impl<const DLIMBS: usize>
+ $crate::modular::constant_mod::ResidueParams<{ $crate::nlimbs!(<$uint_type>::BITS) }>
+ for $name
+ where
+ $crate::Uint<{ $crate::nlimbs!(<$uint_type>::BITS) }>:
+ $crate::Concat<Output = $crate::Uint<DLIMBS>>,
+ {
+ const LIMBS: usize = { $crate::nlimbs!(<$uint_type>::BITS) };
+ const MODULUS: $crate::Uint<{ $crate::nlimbs!(<$uint_type>::BITS) }> =
+ <$uint_type>::from_be_hex($value);
+ const R: $crate::Uint<{ $crate::nlimbs!(<$uint_type>::BITS) }> = $crate::Uint::MAX
+ .const_rem(&Self::MODULUS)
+ .0
+ .wrapping_add(&$crate::Uint::ONE);
+ const R2: $crate::Uint<{ $crate::nlimbs!(<$uint_type>::BITS) }> =
+ $crate::Uint::const_rem_wide(Self::R.square_wide(), &Self::MODULUS).0;
+ const MOD_NEG_INV: $crate::Limb = $crate::Limb(
+ $crate::Word::MIN.wrapping_sub(
+ Self::MODULUS
+ .inv_mod2k($crate::Word::BITS as usize)
+ .as_limbs()[0]
+ .0,
+ ),
+ );
+ const R3: $crate::Uint<{ $crate::nlimbs!(<$uint_type>::BITS) }> =
+ $crate::modular::montgomery_reduction(
+ &Self::R2.square_wide(),
+ &Self::MODULUS,
+ Self::MOD_NEG_INV,
+ );
+ }
+ };
+}
+
+#[macro_export]
+/// Creates a `Residue` with the given value for a specific modulus.
+/// For example, `residue!(U256::from(105u64), MyModulus);` creates a `Residue` for 105 mod `MyModulus`.
+macro_rules! const_residue {
+ ($variable:ident, $modulus:ident) => {
+ $crate::modular::constant_mod::Residue::<$modulus, { $modulus::LIMBS }>::new(&$variable)
+ };
+}
diff --git a/vendor/crypto-bigint/src/uint/modular/inv.rs b/vendor/crypto-bigint/src/uint/modular/inv.rs
new file mode 100644
index 000000000..408c03fb8
--- /dev/null
+++ b/vendor/crypto-bigint/src/uint/modular/inv.rs
@@ -0,0 +1,14 @@
+use crate::{modular::reduction::montgomery_reduction, CtChoice, Limb, Uint};
+
+pub const fn inv_montgomery_form<const LIMBS: usize>(
+ x: &Uint<LIMBS>,
+ modulus: &Uint<LIMBS>,
+ r3: &Uint<LIMBS>,
+ mod_neg_inv: Limb,
+) -> (Uint<LIMBS>, CtChoice) {
+ let (inverse, is_some) = x.inv_odd_mod(modulus);
+ (
+ montgomery_reduction(&inverse.mul_wide(r3), modulus, mod_neg_inv),
+ is_some,
+ )
+}
diff --git a/vendor/crypto-bigint/src/uint/modular/mul.rs b/vendor/crypto-bigint/src/uint/modular/mul.rs
new file mode 100644
index 000000000..b84ceb5ca
--- /dev/null
+++ b/vendor/crypto-bigint/src/uint/modular/mul.rs
@@ -0,0 +1,22 @@
+use crate::{Limb, Uint};
+
+use super::reduction::montgomery_reduction;
+
+pub(crate) const fn mul_montgomery_form<const LIMBS: usize>(
+ a: &Uint<LIMBS>,
+ b: &Uint<LIMBS>,
+ modulus: &Uint<LIMBS>,
+ mod_neg_inv: Limb,
+) -> Uint<LIMBS> {
+ let product = a.mul_wide(b);
+ montgomery_reduction::<LIMBS>(&product, modulus, mod_neg_inv)
+}
+
+pub(crate) const fn square_montgomery_form<const LIMBS: usize>(
+ a: &Uint<LIMBS>,
+ modulus: &Uint<LIMBS>,
+ mod_neg_inv: Limb,
+) -> Uint<LIMBS> {
+ let product = a.square_wide();
+ montgomery_reduction::<LIMBS>(&product, modulus, mod_neg_inv)
+}
diff --git a/vendor/crypto-bigint/src/uint/modular/pow.rs b/vendor/crypto-bigint/src/uint/modular/pow.rs
new file mode 100644
index 000000000..5ab1fd63d
--- /dev/null
+++ b/vendor/crypto-bigint/src/uint/modular/pow.rs
@@ -0,0 +1,79 @@
+use crate::{Limb, Uint, Word};
+
+use super::mul::{mul_montgomery_form, square_montgomery_form};
+
+/// Performs modular exponentiation using Montgomery's ladder.
+/// `exponent_bits` represents the number of bits to take into account for the exponent.
+///
+/// NOTE: this value is leaked in the time pattern.
+pub const fn pow_montgomery_form<const LIMBS: usize>(
+ x: &Uint<LIMBS>,
+ exponent: &Uint<LIMBS>,
+ exponent_bits: usize,
+ modulus: &Uint<LIMBS>,
+ r: &Uint<LIMBS>,
+ mod_neg_inv: Limb,
+) -> Uint<LIMBS> {
+ if exponent_bits == 0 {
+ return *r; // 1 in Montgomery form
+ }
+
+ const WINDOW: usize = 4;
+ const WINDOW_MASK: Word = (1 << WINDOW) - 1;
+
+ // powers[i] contains x^i
+ let mut powers = [*r; 1 << WINDOW];
+ powers[1] = *x;
+ let mut i = 2;
+ while i < powers.len() {
+ powers[i] = mul_montgomery_form(&powers[i - 1], x, modulus, mod_neg_inv);
+ i += 1;
+ }
+
+ let starting_limb = (exponent_bits - 1) / Limb::BITS;
+ let starting_bit_in_limb = (exponent_bits - 1) % Limb::BITS;
+ let starting_window = starting_bit_in_limb / WINDOW;
+ let starting_window_mask = (1 << (starting_bit_in_limb % WINDOW + 1)) - 1;
+
+ let mut z = *r; // 1 in Montgomery form
+
+ let mut limb_num = starting_limb + 1;
+ while limb_num > 0 {
+ limb_num -= 1;
+ let w = exponent.as_limbs()[limb_num].0;
+
+ let mut window_num = if limb_num == starting_limb {
+ starting_window + 1
+ } else {
+ Limb::BITS / WINDOW
+ };
+ while window_num > 0 {
+ window_num -= 1;
+
+ let mut idx = (w >> (window_num * WINDOW)) & WINDOW_MASK;
+
+ if limb_num == starting_limb && window_num == starting_window {
+ idx &= starting_window_mask;
+ } else {
+ let mut i = 0;
+ while i < WINDOW {
+ i += 1;
+ z = square_montgomery_form(&z, modulus, mod_neg_inv);
+ }
+ }
+
+ // Constant-time lookup in the array of powers
+ let mut power = powers[0];
+ let mut i = 1;
+ while i < 1 << WINDOW {
+ let choice = Limb::ct_eq(Limb(i as Word), Limb(idx));
+ power = Uint::<LIMBS>::ct_select(&power, &powers[i], choice);
+ i += 1;
+ }
+
+ z = mul_montgomery_form(&z, &power, modulus, mod_neg_inv);
+ }
+ }
+
+ z
+}
diff --git a/vendor/crypto-bigint/src/uint/modular/reduction.rs b/vendor/crypto-bigint/src/uint/modular/reduction.rs
new file mode 100644
index 000000000..d3543bf97
--- /dev/null
+++ b/vendor/crypto-bigint/src/uint/modular/reduction.rs
@@ -0,0 +1,56 @@
+use crate::{CtChoice, Limb, Uint, WideWord, Word};
+
+/// Algorithm 14.32 in Handbook of Applied Cryptography <https://cacr.uwaterloo.ca/hac/about/chap14.pdf>
+pub const fn montgomery_reduction<const LIMBS: usize>(
+ lower_upper: &(Uint<LIMBS>, Uint<LIMBS>),
+ modulus: &Uint<LIMBS>,
+ mod_neg_inv: Limb,
+) -> Uint<LIMBS> {
+ let (mut lower, mut upper) = *lower_upper;
+
+ let mut meta_carry: WideWord = 0;
+
+ let mut i = 0;
+ while i < LIMBS {
+ let u = (lower.limbs[i].0.wrapping_mul(mod_neg_inv.0)) as WideWord;
+
+ let new_limb =
+ (u * modulus.limbs[0].0 as WideWord).wrapping_add(lower.limbs[i].0 as WideWord);
+ let mut carry = new_limb >> Word::BITS;
+
+ let mut j = 1;
+ while j < (LIMBS - i) {
+ let new_limb = (u * modulus.limbs[j].0 as WideWord)
+ .wrapping_add(lower.limbs[i + j].0 as WideWord)
+ .wrapping_add(carry);
+ carry = new_limb >> Word::BITS;
+ lower.limbs[i + j] = Limb(new_limb as Word);
+
+ j += 1;
+ }
+ while j < LIMBS {
+ let new_limb = (u * modulus.limbs[j].0 as WideWord)
+ .wrapping_add(upper.limbs[i + j - LIMBS].0 as WideWord)
+ .wrapping_add(carry);
+ carry = new_limb >> Word::BITS;
+ upper.limbs[i + j - LIMBS] = Limb(new_limb as Word);
+
+ j += 1;
+ }
+
+ let new_sum = (upper.limbs[i].0 as WideWord)
+ .wrapping_add(carry)
+ .wrapping_add(meta_carry);
+ meta_carry = new_sum >> Word::BITS;
+ upper.limbs[i] = Limb(new_sum as Word);
+
+ i += 1;
+ }
+
+ // Division is simply taking the upper half of the limbs
+ // Final reduction (at this point, the value is at most 2 * modulus)
+ let must_reduce = CtChoice::from_lsb(meta_carry as Word).or(Uint::ct_gt(modulus, &upper).not());
+ upper = upper.wrapping_sub(&Uint::ct_select(&Uint::ZERO, modulus, must_reduce));
+
+ upper
+}
diff --git a/vendor/crypto-bigint/src/uint/modular/runtime_mod.rs b/vendor/crypto-bigint/src/uint/modular/runtime_mod.rs
new file mode 100644
index 000000000..72f1094cf
--- /dev/null
+++ b/vendor/crypto-bigint/src/uint/modular/runtime_mod.rs
@@ -0,0 +1,107 @@
+use crate::{Limb, Uint, Word};
+
+use super::{reduction::montgomery_reduction, Retrieve};
+
+/// Additions between residues with a modulus set at runtime
+mod runtime_add;
+/// Multiplicative inverses of residues with a modulus set at runtime
+mod runtime_inv;
+/// Multiplications between residues with a modulus set at runtime
+mod runtime_mul;
+/// Negations of residues with a modulus set at runtime
+mod runtime_neg;
+/// Exponentiation of residues with a modulus set at runtime
+mod runtime_pow;
+/// Subtractions between residues with a modulus set at runtime
+mod runtime_sub;
+
+/// The parameters to efficiently go to and from the Montgomery form for a modulus provided at runtime.
+#[derive(Debug, Clone, Copy, PartialEq, Eq)]
+pub struct DynResidueParams<const LIMBS: usize> {
+ // The constant modulus
+ modulus: Uint<LIMBS>,
+ // Parameter used in Montgomery reduction
+ r: Uint<LIMBS>,
+ // R^2, used to move into Montgomery form
+ r2: Uint<LIMBS>,
+ // R^3, used to compute the multiplicative inverse
+ r3: Uint<LIMBS>,
+ // The lowest limbs of -(MODULUS^-1) mod R
+ // We only need the LSB because during reduction this value is multiplied modulo 2**Limb::BITS.
+ mod_neg_inv: Limb,
+}
+
+impl<const LIMBS: usize> DynResidueParams<LIMBS> {
+ /// Instantiates a new set of `ResidueParams` representing the given `modulus`.
+ pub fn new(modulus: &Uint<LIMBS>) -> Self {
+ let r = Uint::MAX.const_rem(modulus).0.wrapping_add(&Uint::ONE);
+ let r2 = Uint::const_rem_wide(r.square_wide(), modulus).0;
+ let mod_neg_inv =
+ Limb(Word::MIN.wrapping_sub(modulus.inv_mod2k(Word::BITS as usize).limbs[0].0));
+ let r3 = montgomery_reduction(&r2.square_wide(), modulus, mod_neg_inv);
+
+ Self {
+ modulus: *modulus,
+ r,
+ r2,
+ r3,
+ mod_neg_inv,
+ }
+ }
+}
+
+/// A residue represented using `LIMBS` limbs. The odd modulus of this residue is set at runtime.
+#[derive(Debug, Clone, Copy, PartialEq, Eq)]
+pub struct DynResidue<const LIMBS: usize> {
+ montgomery_form: Uint<LIMBS>,
+ residue_params: DynResidueParams<LIMBS>,
+}
+
+impl<const LIMBS: usize> DynResidue<LIMBS> {
+ /// Instantiates a new `Residue` that represents this `integer` mod `MOD`.
+ pub const fn new(integer: &Uint<LIMBS>, residue_params: DynResidueParams<LIMBS>) -> Self {
+ let product = integer.mul_wide(&residue_params.r2);
+ let montgomery_form = montgomery_reduction(
+ &product,
+ &residue_params.modulus,
+ residue_params.mod_neg_inv,
+ );
+
+ Self {
+ montgomery_form,
+ residue_params,
+ }
+ }
+
+ /// Retrieves the integer currently encoded in this `Residue`, guaranteed to be reduced.
+ pub const fn retrieve(&self) -> Uint<LIMBS> {
+ montgomery_reduction(
+ &(self.montgomery_form, Uint::ZERO),
+ &self.residue_params.modulus,
+ self.residue_params.mod_neg_inv,
+ )
+ }
+
+ /// Instantiates a new `Residue` that represents zero.
+ pub const fn zero(residue_params: DynResidueParams<LIMBS>) -> Self {
+ Self {
+ montgomery_form: Uint::<LIMBS>::ZERO,
+ residue_params,
+ }
+ }
+
+ /// Instantiates a new `Residue` that represents 1.
+ pub const fn one(residue_params: DynResidueParams<LIMBS>) -> Self {
+ Self {
+ montgomery_form: residue_params.r,
+ residue_params,
+ }
+ }
+}
+
+impl<const LIMBS: usize> Retrieve for DynResidue<LIMBS> {
+ type Output = Uint<LIMBS>;
+ fn retrieve(&self) -> Self::Output {
+ self.retrieve()
+ }
+}
diff --git a/vendor/crypto-bigint/src/uint/modular/runtime_mod/runtime_add.rs b/vendor/crypto-bigint/src/uint/modular/runtime_mod/runtime_add.rs
new file mode 100644
index 000000000..eb4708603
--- /dev/null
+++ b/vendor/crypto-bigint/src/uint/modular/runtime_mod/runtime_add.rs
@@ -0,0 +1,92 @@
+use core::ops::{Add, AddAssign};
+
+use crate::modular::add::add_montgomery_form;
+
+use super::DynResidue;
+
+impl<const LIMBS: usize> DynResidue<LIMBS> {
+ /// Adds `rhs`.
+ pub const fn add(&self, rhs: &Self) -> Self {
+ Self {
+ montgomery_form: add_montgomery_form(
+ &self.montgomery_form,
+ &rhs.montgomery_form,
+ &self.residue_params.modulus,
+ ),
+ residue_params: self.residue_params,
+ }
+ }
+}
+
+impl<const LIMBS: usize> Add<&DynResidue<LIMBS>> for &DynResidue<LIMBS> {
+ type Output = DynResidue<LIMBS>;
+ fn add(self, rhs: &DynResidue<LIMBS>) -> DynResidue<LIMBS> {
+ debug_assert_eq!(self.residue_params, rhs.residue_params);
+ self.add(rhs)
+ }
+}
+
+impl<const LIMBS: usize> Add<DynResidue<LIMBS>> for &DynResidue<LIMBS> {
+ type Output = DynResidue<LIMBS>;
+ #[allow(clippy::op_ref)]
+ fn add(self, rhs: DynResidue<LIMBS>) -> DynResidue<LIMBS> {
+ self + &rhs
+ }
+}
+
+impl<const LIMBS: usize> Add<&DynResidue<LIMBS>> for DynResidue<LIMBS> {
+ type Output = DynResidue<LIMBS>;
+ #[allow(clippy::op_ref)]
+ fn add(self, rhs: &DynResidue<LIMBS>) -> DynResidue<LIMBS> {
+ &self + rhs
+ }
+}
+
+impl<const LIMBS: usize> Add<DynResidue<LIMBS>> for DynResidue<LIMBS> {
+ type Output = DynResidue<LIMBS>;
+ fn add(self, rhs: DynResidue<LIMBS>) -> DynResidue<LIMBS> {
+ &self + &rhs
+ }
+}
+
+impl<const LIMBS: usize> AddAssign<&DynResidue<LIMBS>> for DynResidue<LIMBS> {
+ fn add_assign(&mut self, rhs: &DynResidue<LIMBS>) {
+ *self = *self + rhs;
+ }
+}
+
+impl<const LIMBS: usize> AddAssign<DynResidue<LIMBS>> for DynResidue<LIMBS> {
+ fn add_assign(&mut self, rhs: DynResidue<LIMBS>) {
+ *self += &rhs;
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use crate::{
+ modular::runtime_mod::{DynResidue, DynResidueParams},
+ U256,
+ };
+
+ #[test]
+ fn add_overflow() {
+ let params = DynResidueParams::new(&U256::from_be_hex(
+ "ffffffff00000000ffffffffffffffffbce6faada7179e84f3b9cac2fc632551",
+ ));
+
+ let x =
+ U256::from_be_hex("44acf6b7e36c1342c2c5897204fe09504e1e2efb1a900377dbc4e7a6a133ec56");
+ let mut x_mod = DynResidue::new(&x, params);
+
+ let y =
+ U256::from_be_hex("d5777c45019673125ad240f83094d4252d829516fac8601ed01979ec1ec1a251");
+ let y_mod = DynResidue::new(&y, params);
+
+ x_mod += &y_mod;
+
+ let expected =
+ U256::from_be_hex("1a2472fde50286541d97ca6a3592dd75beb9c9646e40c511b82496cfc3926956");
+
+ assert_eq!(expected, x_mod.retrieve());
+ }
+}
diff --git a/vendor/crypto-bigint/src/uint/modular/runtime_mod/runtime_inv.rs b/vendor/crypto-bigint/src/uint/modular/runtime_mod/runtime_inv.rs
new file mode 100644
index 000000000..5e639d439
--- /dev/null
+++ b/vendor/crypto-bigint/src/uint/modular/runtime_mod/runtime_inv.rs
@@ -0,0 +1,35 @@
+use subtle::CtOption;
+
+use crate::{modular::inv::inv_montgomery_form, traits::Invert, CtChoice};
+
+use super::DynResidue;
+
+impl<const LIMBS: usize> DynResidue<LIMBS> {
+ /// Computes the residue `self^-1` representing the multiplicative inverse of `self`.
+ /// I.e. `self * self^-1 = 1`.
+ /// If the number was invertible, the second element of the tuple is the truthy value,
+ /// otherwise it is the falsy value (in which case the first element's value is unspecified).
+ pub const fn invert(&self) -> (Self, CtChoice) {
+ let (montgomery_form, is_some) = inv_montgomery_form(
+ &self.montgomery_form,
+ &self.residue_params.modulus,
+ &self.residue_params.r3,
+ self.residue_params.mod_neg_inv,
+ );
+
+ let value = Self {
+ montgomery_form,
+ residue_params: self.residue_params,
+ };
+
+ (value, is_some)
+ }
+}
+
+impl<const LIMBS: usize> Invert for DynResidue<LIMBS> {
+ type Output = CtOption<Self>;
+ fn invert(&self) -> Self::Output {
+ let (value, is_some) = self.invert();
+ CtOption::new(value, is_some.into())
+ }
+}
diff --git a/vendor/crypto-bigint/src/uint/modular/runtime_mod/runtime_mul.rs b/vendor/crypto-bigint/src/uint/modular/runtime_mod/runtime_mul.rs
new file mode 100644
index 000000000..30c4b9c04
--- /dev/null
+++ b/vendor/crypto-bigint/src/uint/modular/runtime_mod/runtime_mul.rs
@@ -0,0 +1,84 @@
+use core::ops::{Mul, MulAssign};
+
+use crate::{
+ modular::mul::{mul_montgomery_form, square_montgomery_form},
+ traits::Square,
+};
+
+use super::DynResidue;
+
+impl<const LIMBS: usize> DynResidue<LIMBS> {
+ /// Multiplies by `rhs`.
+ pub const fn mul(&self, rhs: &Self) -> Self {
+ Self {
+ montgomery_form: mul_montgomery_form(
+ &self.montgomery_form,
+ &rhs.montgomery_form,
+ &self.residue_params.modulus,
+ self.residue_params.mod_neg_inv,
+ ),
+ residue_params: self.residue_params,
+ }
+ }
+
+ /// Computes the (reduced) square of a residue.
+ pub const fn square(&self) -> Self {
+ Self {
+ montgomery_form: square_montgomery_form(
+ &self.montgomery_form,
+ &self.residue_params.modulus,
+ self.residue_params.mod_neg_inv,
+ ),
+ residue_params: self.residue_params,
+ }
+ }
+}
+
+impl<const LIMBS: usize> Mul<&DynResidue<LIMBS>> for &DynResidue<LIMBS> {
+ type Output = DynResidue<LIMBS>;
+ fn mul(self, rhs: &DynResidue<LIMBS>) -> DynResidue<LIMBS> {
+ debug_assert_eq!(self.residue_params, rhs.residue_params);
+ self.mul(rhs)
+ }
+}
+
+impl<const LIMBS: usize> Mul<DynResidue<LIMBS>> for &DynResidue<LIMBS> {
+ type Output = DynResidue<LIMBS>;
+ #[allow(clippy::op_ref)]
+ fn mul(self, rhs: DynResidue<LIMBS>) -> DynResidue<LIMBS> {
+ self * &rhs
+ }
+}
+
+impl<const LIMBS: usize> Mul<&DynResidue<LIMBS>> for DynResidue<LIMBS> {
+ type Output = DynResidue<LIMBS>;
+ #[allow(clippy::op_ref)]
+ fn mul(self, rhs: &DynResidue<LIMBS>) -> DynResidue<LIMBS> {
+ &self * rhs
+ }
+}
+
+impl<const LIMBS: usize> Mul<DynResidue<LIMBS>> for DynResidue<LIMBS> {
+ type Output = DynResidue<LIMBS>;
+ fn mul(self, rhs: DynResidue<LIMBS>) -> DynResidue<LIMBS> {
+ &self * &rhs
+ }
+}
+
+impl<const LIMBS: usize> MulAssign<&DynResidue<LIMBS>> for DynResidue<LIMBS> {
+ fn mul_assign(&mut self, rhs: &DynResidue<LIMBS>) {
+ *self = *self * rhs;
+ }
+}
+
+impl<const LIMBS: usize> MulAssign<DynResidue<LIMBS>> for DynResidue<LIMBS> {
+ fn mul_assign(&mut self, rhs: DynResidue<LIMBS>) {
+ *self *= &rhs;
+ }
+}
+
+impl<const LIMBS: usize> Square for DynResidue<LIMBS> {
+ fn square(&self) -> Self {
+ DynResidue::square(self)
+ }
+}
diff --git a/vendor/crypto-bigint/src/uint/modular/runtime_mod/runtime_neg.rs b/vendor/crypto-bigint/src/uint/modular/runtime_mod/runtime_neg.rs
new file mode 100644
index 000000000..fca1ff875
--- /dev/null
+++ b/vendor/crypto-bigint/src/uint/modular/runtime_mod/runtime_neg.rs
@@ -0,0 +1,24 @@
+use core::ops::Neg;
+
+use super::DynResidue;
+
+impl<const LIMBS: usize> DynResidue<LIMBS> {
+ /// Negates the number.
+ pub const fn neg(&self) -> Self {
+ Self::zero(self.residue_params).sub(self)
+ }
+}
+
+impl<const LIMBS: usize> Neg for DynResidue<LIMBS> {
+ type Output = Self;
+ fn neg(self) -> Self {
+ DynResidue::neg(&self)
+ }
+}
+
+impl<const LIMBS: usize> Neg for &DynResidue<LIMBS> {
+ type Output = DynResidue<LIMBS>;
+ fn neg(self) -> DynResidue<LIMBS> {
+ DynResidue::neg(self)
+ }
+}
diff --git a/vendor/crypto-bigint/src/uint/modular/runtime_mod/runtime_pow.rs b/vendor/crypto-bigint/src/uint/modular/runtime_mod/runtime_pow.rs
new file mode 100644
index 000000000..270f4c01c
--- /dev/null
+++ b/vendor/crypto-bigint/src/uint/modular/runtime_mod/runtime_pow.rs
@@ -0,0 +1,35 @@
+use crate::{modular::pow::pow_montgomery_form, PowBoundedExp, Uint};
+
+use super::DynResidue;
+
+impl<const LIMBS: usize> DynResidue<LIMBS> {
+ /// Raises to the `exponent` power.
+ pub const fn pow(&self, exponent: &Uint<LIMBS>) -> DynResidue<LIMBS> {
+ self.pow_bounded_exp(exponent, Uint::<LIMBS>::BITS)
+ }
+
+ /// Raises to the `exponent` power,
+ /// with `exponent_bits` representing the number of (least significant) bits
+ /// to take into account for the exponent.
+ ///
+ /// NOTE: `exponent_bits` may be leaked in the time pattern.
+ pub const fn pow_bounded_exp(&self, exponent: &Uint<LIMBS>, exponent_bits: usize) -> Self {
+ Self {
+ montgomery_form: pow_montgomery_form(
+ &self.montgomery_form,
+ exponent,
+ exponent_bits,
+ &self.residue_params.modulus,
+ &self.residue_params.r,
+ self.residue_params.mod_neg_inv,
+ ),
+ residue_params: self.residue_params,
+ }
+ }
+}
+
+impl<const LIMBS: usize> PowBoundedExp<Uint<LIMBS>> for DynResidue<LIMBS> {
+ fn pow_bounded_exp(&self, exponent: &Uint<LIMBS>, exponent_bits: usize) -> Self {
+ self.pow_bounded_exp(exponent, exponent_bits)
+ }
+}
diff --git a/vendor/crypto-bigint/src/uint/modular/runtime_mod/runtime_sub.rs b/vendor/crypto-bigint/src/uint/modular/runtime_mod/runtime_sub.rs
new file mode 100644
index 000000000..dd6fd84c8
--- /dev/null
+++ b/vendor/crypto-bigint/src/uint/modular/runtime_mod/runtime_sub.rs
@@ -0,0 +1,92 @@
+use core::ops::{Sub, SubAssign};
+
+use crate::modular::sub::sub_montgomery_form;
+
+use super::DynResidue;
+
+impl<const LIMBS: usize> DynResidue<LIMBS> {
+ /// Subtracts `rhs`.
+ pub const fn sub(&self, rhs: &Self) -> Self {
+ Self {
+ montgomery_form: sub_montgomery_form(
+ &self.montgomery_form,
+ &rhs.montgomery_form,
+ &self.residue_params.modulus,
+ ),
+ residue_params: self.residue_params,
+ }
+ }
+}
+
+impl<const LIMBS: usize> Sub<&DynResidue<LIMBS>> for &DynResidue<LIMBS> {
+ type Output = DynResidue<LIMBS>;
+ fn sub(self, rhs: &DynResidue<LIMBS>) -> DynResidue<LIMBS> {
+ debug_assert_eq!(self.residue_params, rhs.residue_params);
+ self.sub(rhs)
+ }
+}
+
+impl<const LIMBS: usize> Sub<DynResidue<LIMBS>> for &DynResidue<LIMBS> {
+ type Output = DynResidue<LIMBS>;
+ #[allow(clippy::op_ref)]
+ fn sub(self, rhs: DynResidue<LIMBS>) -> DynResidue<LIMBS> {
+ self - &rhs
+ }
+}
+
+impl<const LIMBS: usize> Sub<&DynResidue<LIMBS>> for DynResidue<LIMBS> {
+ type Output = DynResidue<LIMBS>;
+ #[allow(clippy::op_ref)]
+ fn sub(self, rhs: &DynResidue<LIMBS>) -> DynResidue<LIMBS> {
+ &self - rhs
+ }
+}
+
+impl<const LIMBS: usize> Sub<DynResidue<LIMBS>> for DynResidue<LIMBS> {
+ type Output = DynResidue<LIMBS>;
+ fn sub(self, rhs: DynResidue<LIMBS>) -> DynResidue<LIMBS> {
+ &self - &rhs
+ }
+}
+
+impl<const LIMBS: usize> SubAssign<&DynResidue<LIMBS>> for DynResidue<LIMBS> {
+ fn sub_assign(&mut self, rhs: &DynResidue<LIMBS>) {
+ *self = *self - rhs;
+ }
+}
+
+impl<const LIMBS: usize> SubAssign<DynResidue<LIMBS>> for DynResidue<LIMBS> {
+ fn sub_assign(&mut self, rhs: DynResidue<LIMBS>) {
+ *self -= &rhs;
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use crate::{
+ modular::runtime_mod::{DynResidue, DynResidueParams},
+ U256,
+ };
+
+ #[test]
+ fn sub_overflow() {
+ let params = DynResidueParams::new(&U256::from_be_hex(
+ "ffffffff00000000ffffffffffffffffbce6faada7179e84f3b9cac2fc632551",
+ ));
+
+ let x =
+ U256::from_be_hex("44acf6b7e36c1342c2c5897204fe09504e1e2efb1a900377dbc4e7a6a133ec56");
+ let mut x_mod = DynResidue::new(&x, params);
+
+ let y =
+ U256::from_be_hex("d5777c45019673125ad240f83094d4252d829516fac8601ed01979ec1ec1a251");
+ let y_mod = DynResidue::new(&y, params);
+
+ x_mod -= &y_mod;
+
+ let expected =
+ U256::from_be_hex("6f357a71e1d5a03167f34879d469352add829491c6df41ddff65387d7ed56f56");
+
+ assert_eq!(expected, x_mod.retrieve());
+ }
+}
diff --git a/vendor/crypto-bigint/src/uint/modular/sub.rs b/vendor/crypto-bigint/src/uint/modular/sub.rs
new file mode 100644
index 000000000..9c4717033
--- /dev/null
+++ b/vendor/crypto-bigint/src/uint/modular/sub.rs
@@ -0,0 +1,9 @@
+use crate::Uint;
+
+pub(crate) const fn sub_montgomery_form<const LIMBS: usize>(
+ a: &Uint<LIMBS>,
+ b: &Uint<LIMBS>,
+ modulus: &Uint<LIMBS>,
+) -> Uint<LIMBS> {
+ a.sub_mod(b, modulus)
+}
diff --git a/vendor/crypto-bigint/src/uint/mul.rs b/vendor/crypto-bigint/src/uint/mul.rs
index ecb32fd10..fcfc756aa 100644
--- a/vendor/crypto-bigint/src/uint/mul.rs
+++ b/vendor/crypto-bigint/src/uint/mul.rs
@@ -1,10 +1,10 @@
-//! [`UInt`] addition operations.
+//! [`Uint`] addition operations.
-use crate::{Checked, CheckedMul, Concat, Limb, UInt, Wrapping, Zero};
+use crate::{Checked, CheckedMul, Concat, Limb, Uint, WideWord, Word, Wrapping, Zero};
use core::ops::{Mul, MulAssign};
use subtle::CtOption;
-impl<const LIMBS: usize> UInt<LIMBS> {
+impl<const LIMBS: usize> Uint<LIMBS> {
/// Compute "wide" multiplication, with a product twice the size of the input.
///
/// Returns a tuple containing the `(lo, hi)` components of the product.
@@ -75,17 +75,91 @@ impl<const LIMBS: usize> UInt<LIMBS> {
self.mul_wide(rhs).0
}
- /// Square self, returning a "wide" result.
+ /// Square self, returning a concatenated "wide" result.
pub fn square(&self) -> <Self as Concat>::Output
where
Self: Concat,
{
- let (lo, hi) = self.mul_wide(self);
+ let (lo, hi) = self.square_wide();
hi.concat(&lo)
}
+
+ /// Square self, returning a "wide" result in two parts as (lo, hi).
+ pub const fn square_wide(&self) -> (Self, Self) {
+ // Translated from https://github.com/ucbrise/jedi-pairing/blob/c4bf151/include/core/bigint.hpp#L410
+ //
+ // Permission to relicense the resulting translation as Apache 2.0 + MIT was given
+ // by the original author Sam Kumar: https://github.com/RustCrypto/crypto-bigint/pull/133#discussion_r1056870411
+ let mut lo = Self::ZERO;
+ let mut hi = Self::ZERO;
+
+ // Schoolbook multiplication, but only considering half of the multiplication grid
+ let mut i = 1;
+ while i < LIMBS {
+ let mut j = 0;
+ let mut carry = Limb::ZERO;
+
+ while j < i {
+ let k = i + j;
+
+ if k >= LIMBS {
+ let (n, c) = hi.limbs[k - LIMBS].mac(self.limbs[i], self.limbs[j], carry);
+ hi.limbs[k - LIMBS] = n;
+ carry = c;
+ } else {
+ let (n, c) = lo.limbs[k].mac(self.limbs[i], self.limbs[j], carry);
+ lo.limbs[k] = n;
+ carry = c;
+ }
+
+ j += 1;
+ }
+
+ if (2 * i) < LIMBS {
+ lo.limbs[2 * i] = carry;
+ } else {
+ hi.limbs[2 * i - LIMBS] = carry;
+ }
+
+ i += 1;
+ }
+
+ // Double the current result, this accounts for the other half of the multiplication grid.
+ // TODO: The top word is empty so we can also use a special purpose shl.
+ (lo, hi) = Self::shl_vartime_wide((lo, hi), 1);
+
+ // Handle the diagonal of the multiplication grid, which finishes the multiplication grid.
+ let mut carry = Limb::ZERO;
+ let mut i = 0;
+ while i < LIMBS {
+ if (i * 2) < LIMBS {
+ let (n, c) = lo.limbs[i * 2].mac(self.limbs[i], self.limbs[i], carry);
+ lo.limbs[i * 2] = n;
+ carry = c;
+ } else {
+ let (n, c) = hi.limbs[i * 2 - LIMBS].mac(self.limbs[i], self.limbs[i], carry);
+ hi.limbs[i * 2 - LIMBS] = n;
+ carry = c;
+ }
+
+ if (i * 2 + 1) < LIMBS {
+ let n = lo.limbs[i * 2 + 1].0 as WideWord + carry.0 as WideWord;
+ lo.limbs[i * 2 + 1] = Limb(n as Word);
+ carry = Limb((n >> Word::BITS) as Word);
+ } else {
+ let n = hi.limbs[i * 2 + 1 - LIMBS].0 as WideWord + carry.0 as WideWord;
+ hi.limbs[i * 2 + 1 - LIMBS] = Limb(n as Word);
+ carry = Limb((n >> Word::BITS) as Word);
+ }
+
+ i += 1;
+ }
+
+ (lo, hi)
+ }
}
-impl<const LIMBS: usize> CheckedMul<&UInt<LIMBS>> for UInt<LIMBS> {
+impl<const LIMBS: usize> CheckedMul<&Uint<LIMBS>> for Uint<LIMBS> {
type Output = Self;
fn checked_mul(&self, rhs: &Self) -> CtOption<Self> {
@@ -94,89 +168,89 @@ impl<const LIMBS: usize> CheckedMul<&UInt<LIMBS>> for UInt<LIMBS> {
}
}
-impl<const LIMBS: usize> Mul for Wrapping<UInt<LIMBS>> {
+impl<const LIMBS: usize> Mul for Wrapping<Uint<LIMBS>> {
type Output = Self;
- fn mul(self, rhs: Self) -> Wrapping<UInt<LIMBS>> {
+ fn mul(self, rhs: Self) -> Wrapping<Uint<LIMBS>> {
Wrapping(self.0.wrapping_mul(&rhs.0))
}
}
-impl<const LIMBS: usize> Mul<&Wrapping<UInt<LIMBS>>> for Wrapping<UInt<LIMBS>> {
- type Output = Wrapping<UInt<LIMBS>>;
+impl<const LIMBS: usize> Mul<&Wrapping<Uint<LIMBS>>> for Wrapping<Uint<LIMBS>> {
+ type Output = Wrapping<Uint<LIMBS>>;
- fn mul(self, rhs: &Wrapping<UInt<LIMBS>>) -> Wrapping<UInt<LIMBS>> {
+ fn mul(self, rhs: &Wrapping<Uint<LIMBS>>) -> Wrapping<Uint<LIMBS>> {
Wrapping(self.0.wrapping_mul(&rhs.0))
}
}
-impl<const LIMBS: usize> Mul<Wrapping<UInt<LIMBS>>> for &Wrapping<UInt<LIMBS>> {
- type Output = Wrapping<UInt<LIMBS>>;
+impl<const LIMBS: usize> Mul<Wrapping<Uint<LIMBS>>> for &Wrapping<Uint<LIMBS>> {
+ type Output = Wrapping<Uint<LIMBS>>;
- fn mul(self, rhs: Wrapping<UInt<LIMBS>>) -> Wrapping<UInt<LIMBS>> {
+ fn mul(self, rhs: Wrapping<Uint<LIMBS>>) -> Wrapping<Uint<LIMBS>> {
Wrapping(self.0.wrapping_mul(&rhs.0))
}
}
-impl<const LIMBS: usize> Mul<&Wrapping<UInt<LIMBS>>> for &Wrapping<UInt<LIMBS>> {
- type Output = Wrapping<UInt<LIMBS>>;
+impl<const LIMBS: usize> Mul<&Wrapping<Uint<LIMBS>>> for &Wrapping<Uint<LIMBS>> {
+ type Output = Wrapping<Uint<LIMBS>>;
- fn mul(self, rhs: &Wrapping<UInt<LIMBS>>) -> Wrapping<UInt<LIMBS>> {
+ fn mul(self, rhs: &Wrapping<Uint<LIMBS>>) -> Wrapping<Uint<LIMBS>> {
Wrapping(self.0.wrapping_mul(&rhs.0))
}
}
-impl<const LIMBS: usize> MulAssign for Wrapping<UInt<LIMBS>> {
+impl<const LIMBS: usize> MulAssign for Wrapping<Uint<LIMBS>> {
fn mul_assign(&mut self, other: Self) {
*self = *self * other;
}
}
-impl<const LIMBS: usize> MulAssign<&Wrapping<UInt<LIMBS>>> for Wrapping<UInt<LIMBS>> {
+impl<const LIMBS: usize> MulAssign<&Wrapping<Uint<LIMBS>>> for Wrapping<Uint<LIMBS>> {
fn mul_assign(&mut self, other: &Self) {
*self = *self * other;
}
}
-impl<const LIMBS: usize> Mul for Checked<UInt<LIMBS>> {
+impl<const LIMBS: usize> Mul for Checked<Uint<LIMBS>> {
type Output = Self;
- fn mul(self, rhs: Self) -> Checked<UInt<LIMBS>> {
+ fn mul(self, rhs: Self) -> Checked<Uint<LIMBS>> {
Checked(self.0.and_then(|a| rhs.0.and_then(|b| a.checked_mul(&b))))
}
}
-impl<const LIMBS: usize> Mul<&Checked<UInt<LIMBS>>> for Checked<UInt<LIMBS>> {
- type Output = Checked<UInt<LIMBS>>;
+impl<const LIMBS: usize> Mul<&Checked<Uint<LIMBS>>> for Checked<Uint<LIMBS>> {
+ type Output = Checked<Uint<LIMBS>>;
- fn mul(self, rhs: &Checked<UInt<LIMBS>>) -> Checked<UInt<LIMBS>> {
+ fn mul(self, rhs: &Checked<Uint<LIMBS>>) -> Checked<Uint<LIMBS>> {
Checked(self.0.and_then(|a| rhs.0.and_then(|b| a.checked_mul(&b))))
}
}
-impl<const LIMBS: usize> Mul<Checked<UInt<LIMBS>>> for &Checked<UInt<LIMBS>> {
- type Output = Checked<UInt<LIMBS>>;
+impl<const LIMBS: usize> Mul<Checked<Uint<LIMBS>>> for &Checked<Uint<LIMBS>> {
+ type Output = Checked<Uint<LIMBS>>;
- fn mul(self, rhs: Checked<UInt<LIMBS>>) -> Checked<UInt<LIMBS>> {
+ fn mul(self, rhs: Checked<Uint<LIMBS>>) -> Checked<Uint<LIMBS>> {
Checked(self.0.and_then(|a| rhs.0.and_then(|b| a.checked_mul(&b))))
}
}
-impl<const LIMBS: usize> Mul<&Checked<UInt<LIMBS>>> for &Checked<UInt<LIMBS>> {
- type Output = Checked<UInt<LIMBS>>;
+impl<const LIMBS: usize> Mul<&Checked<Uint<LIMBS>>> for &Checked<Uint<LIMBS>> {
+ type Output = Checked<Uint<LIMBS>>;
- fn mul(self, rhs: &Checked<UInt<LIMBS>>) -> Checked<UInt<LIMBS>> {
+ fn mul(self, rhs: &Checked<Uint<LIMBS>>) -> Checked<Uint<LIMBS>> {
Checked(self.0.and_then(|a| rhs.0.and_then(|b| a.checked_mul(&b))))
}
}
-impl<const LIMBS: usize> MulAssign for Checked<UInt<LIMBS>> {
+impl<const LIMBS: usize> MulAssign for Checked<Uint<LIMBS>> {
fn mul_assign(&mut self, other: Self) {
*self = *self * other;
}
}
-impl<const LIMBS: usize> MulAssign<&Checked<UInt<LIMBS>>> for Checked<UInt<LIMBS>> {
+impl<const LIMBS: usize> MulAssign<&Checked<Uint<LIMBS>>> for Checked<Uint<LIMBS>> {
fn mul_assign(&mut self, other: &Self) {
*self = *self * other;
}
@@ -184,7 +258,7 @@ impl<const LIMBS: usize> MulAssign<&Checked<UInt<LIMBS>>> for Checked<UInt<LIMBS
#[cfg(test)]
mod tests {
- use crate::{CheckedMul, Zero, U64};
+ use crate::{CheckedMul, Zero, U256, U64};
#[test]
fn mul_wide_zero_and_one() {
@@ -196,7 +270,7 @@ mod tests {
#[test]
fn mul_wide_lo_only() {
- let primes: &[u32] = &[3, 5, 17, 256, 65537];
+ let primes: &[u32] = &[3, 5, 17, 257, 65537];
for &a_int in primes {
for &b_int in primes {
@@ -243,4 +317,12 @@ mod tests {
assert_eq!(lo, U64::from_u64(1));
assert_eq!(hi, U64::from_u64(0xffff_ffff_ffff_fffe));
}
+
+ #[test]
+ fn square_larger() {
+ let n = U256::MAX;
+ let (hi, lo) = n.square().split();
+ assert_eq!(lo, U256::ONE);
+ assert_eq!(hi, U256::MAX.wrapping_sub(&U256::ONE));
+ }
}
diff --git a/vendor/crypto-bigint/src/uint/mul_mod.rs b/vendor/crypto-bigint/src/uint/mul_mod.rs
index 1e9c053ea..0916ede44 100644
--- a/vendor/crypto-bigint/src/uint/mul_mod.rs
+++ b/vendor/crypto-bigint/src/uint/mul_mod.rs
@@ -1,15 +1,15 @@
-//! [`UInt`] multiplication modulus operations.
+//! [`Uint`] multiplication modulus operations.
-use crate::{Limb, UInt, WideWord, Word};
+use crate::{Limb, Uint, WideWord, Word};
-impl<const LIMBS: usize> UInt<LIMBS> {
+impl<const LIMBS: usize> Uint<LIMBS> {
/// Computes `self * rhs mod p` in constant time for the special modulus
/// `p = MAX+1-c` where `c` is small enough to fit in a single [`Limb`].
/// For the modulus reduction, this function implements Algorithm 14.47 from
/// the "Handbook of Applied Cryptography", by A. Menezes, P. van Oorschot,
/// and S. Vanstone, CRC Press, 1996.
pub const fn mul_mod_special(&self, rhs: &Self, c: Limb) -> Self {
- // We implicitly assume `LIMBS > 0`, because `UInt<0>` doesn't compile.
+ // We implicitly assume `LIMBS > 0`, because `Uint<0>` doesn't compile.
// Still the case `LIMBS == 1` needs special handling.
if LIMBS == 1 {
let prod = self.limbs[0].0 as WideWord * rhs.limbs[0].0 as WideWord;
@@ -20,7 +20,7 @@ impl<const LIMBS: usize> UInt<LIMBS> {
let (lo, hi) = self.mul_wide(rhs);
// Now use Algorithm 14.47 for the reduction
- let (lo, carry) = mac_by_limb(lo, hi, c, Limb::ZERO);
+ let (lo, carry) = mac_by_limb(&lo, &hi, c, Limb::ZERO);
let (lo, carry) = {
let rhs = (carry.0 + 1) as WideWord * c.0 as WideWord;
@@ -38,12 +38,14 @@ impl<const LIMBS: usize> UInt<LIMBS> {
/// Computes `a + (b * c) + carry`, returning the result along with the new carry.
const fn mac_by_limb<const LIMBS: usize>(
- mut a: UInt<LIMBS>,
- b: UInt<LIMBS>,
+ a: &Uint<LIMBS>,
+ b: &Uint<LIMBS>,
c: Limb,
- mut carry: Limb,
-) -> (UInt<LIMBS>, Limb) {
+ carry: Limb,
+) -> (Uint<LIMBS>, Limb) {
let mut i = 0;
+ let mut a = *a;
+ let mut carry = carry;
while i < LIMBS {
let (n, c) = a.limbs[i].mac(b.limbs[i], c, carry);
@@ -57,7 +59,7 @@ const fn mac_by_limb<const LIMBS: usize>(
#[cfg(all(test, feature = "rand"))]
mod tests {
- use crate::{Limb, NonZero, Random, RandomMod, UInt};
+ use crate::{Limb, NonZero, Random, RandomMod, Uint};
use rand_core::SeedableRng;
macro_rules! test_mul_mod_special {
@@ -71,19 +73,19 @@ mod tests {
];
for special in &moduli {
- let p = &NonZero::new(UInt::ZERO.wrapping_sub(&UInt::from_word(special.0)))
+ let p = &NonZero::new(Uint::ZERO.wrapping_sub(&Uint::from_word(special.0)))
.unwrap();
- let minus_one = p.wrapping_sub(&UInt::ONE);
+ let minus_one = p.wrapping_sub(&Uint::ONE);
let base_cases = [
- (UInt::ZERO, UInt::ZERO, UInt::ZERO),
- (UInt::ONE, UInt::ZERO, UInt::ZERO),
- (UInt::ZERO, UInt::ONE, UInt::ZERO),
- (UInt::ONE, UInt::ONE, UInt::ONE),
- (minus_one, minus_one, UInt::ONE),
- (minus_one, UInt::ONE, minus_one),
- (UInt::ONE, minus_one, minus_one),
+ (Uint::ZERO, Uint::ZERO, Uint::ZERO),
+ (Uint::ONE, Uint::ZERO, Uint::ZERO),
+ (Uint::ZERO, Uint::ONE, Uint::ZERO),
+ (Uint::ONE, Uint::ONE, Uint::ONE),
+ (minus_one, minus_one, Uint::ONE),
+ (minus_one, Uint::ONE, minus_one),
+ (Uint::ONE, minus_one, minus_one),
];
for (a, b, c) in &base_cases {
let x = a.mul_mod_special(&b, *special.as_ref());
@@ -91,21 +93,21 @@ mod tests {
}
for _i in 0..100 {
- let a = UInt::<$size>::random_mod(&mut rng, p);
- let b = UInt::<$size>::random_mod(&mut rng, p);
+ let a = Uint::<$size>::random_mod(&mut rng, p);
+ let b = Uint::<$size>::random_mod(&mut rng, p);
let c = a.mul_mod_special(&b, *special.as_ref());
assert!(c < **p, "not reduced: {} >= {} ", c, p);
let expected = {
let (lo, hi) = a.mul_wide(&b);
- let mut prod = UInt::<{ 2 * $size }>::ZERO;
+ let mut prod = Uint::<{ 2 * $size }>::ZERO;
prod.limbs[..$size].clone_from_slice(&lo.limbs);
prod.limbs[$size..].clone_from_slice(&hi.limbs);
- let mut modulus = UInt::ZERO;
+ let mut modulus = Uint::ZERO;
modulus.limbs[..$size].clone_from_slice(&p.as_ref().limbs);
- let reduced = prod.reduce(&modulus).unwrap();
- let mut expected = UInt::ZERO;
+ let reduced = prod.rem(&NonZero::new(modulus).unwrap());
+ let mut expected = Uint::ZERO;
expected.limbs[..].clone_from_slice(&reduced.limbs[..$size]);
expected
};
diff --git a/vendor/crypto-bigint/src/uint/neg.rs b/vendor/crypto-bigint/src/uint/neg.rs
new file mode 100644
index 000000000..5cdba201b
--- /dev/null
+++ b/vendor/crypto-bigint/src/uint/neg.rs
@@ -0,0 +1,22 @@
+use core::ops::Neg;
+
+use crate::{CtChoice, Uint, Wrapping};
+
+impl<const LIMBS: usize> Neg for Wrapping<Uint<LIMBS>> {
+ type Output = Self;
+
+ fn neg(self) -> Self::Output {
+ let shifted = Wrapping(self.0.shl_vartime(1));
+ self - shifted
+ }
+}
+
+impl<const LIMBS: usize> Uint<LIMBS> {
+ /// Negates based on `choice` by wrapping the integer.
+ pub(crate) const fn conditional_wrapping_neg(&self, choice: CtChoice) -> Uint<LIMBS> {
+ let (shifted, _) = self.shl_1();
+ let negated_self = self.wrapping_sub(&shifted);
+
+ Uint::ct_select(self, &negated_self, choice)
+ }
+}
diff --git a/vendor/crypto-bigint/src/uint/neg_mod.rs b/vendor/crypto-bigint/src/uint/neg_mod.rs
index 0a1dc033a..aaed2768f 100644
--- a/vendor/crypto-bigint/src/uint/neg_mod.rs
+++ b/vendor/crypto-bigint/src/uint/neg_mod.rs
@@ -1,8 +1,8 @@
-//! [`UInt`] negation modulus operations.
+//! [`Uint`] negation modulus operations.
-use crate::{Limb, NegMod, UInt};
+use crate::{Limb, NegMod, Uint};
-impl<const LIMBS: usize> UInt<LIMBS> {
+impl<const LIMBS: usize> Uint<LIMBS> {
/// Computes `-a mod p` in constant time.
/// Assumes `self` is in `[0, p)`.
pub const fn neg_mod(&self, p: &Self) -> Self {
@@ -12,7 +12,7 @@ impl<const LIMBS: usize> UInt<LIMBS> {
while i < LIMBS {
// Set ret to 0 if the original value was 0, in which
// case ret would be p.
- ret.limbs[i].0 &= z;
+ ret.limbs[i].0 = z.if_true(ret.limbs[i].0);
i += 1;
}
ret
@@ -25,7 +25,7 @@ impl<const LIMBS: usize> UInt<LIMBS> {
}
}
-impl<const LIMBS: usize> NegMod for UInt<LIMBS> {
+impl<const LIMBS: usize> NegMod for Uint<LIMBS> {
type Output = Self;
fn neg_mod(&self, p: &Self) -> Self {
diff --git a/vendor/crypto-bigint/src/uint/rand.rs b/vendor/crypto-bigint/src/uint/rand.rs
index df551c71b..e283e2246 100644
--- a/vendor/crypto-bigint/src/uint/rand.rs
+++ b/vendor/crypto-bigint/src/uint/rand.rs
@@ -1,14 +1,13 @@
//! Random number generator support
-use super::UInt;
+use super::Uint;
use crate::{Limb, NonZero, Random, RandomMod};
-use rand_core::{CryptoRng, RngCore};
+use rand_core::CryptoRngCore;
use subtle::ConstantTimeLess;
-#[cfg_attr(docsrs, doc(cfg(feature = "rand_core")))]
-impl<const LIMBS: usize> Random for UInt<LIMBS> {
- /// Generate a cryptographically secure random [`UInt`].
- fn random(mut rng: impl CryptoRng + RngCore) -> Self {
+impl<const LIMBS: usize> Random for Uint<LIMBS> {
+ /// Generate a cryptographically secure random [`Uint`].
+ fn random(mut rng: &mut impl CryptoRngCore) -> Self {
let mut limbs = [Limb::ZERO; LIMBS];
for limb in &mut limbs {
@@ -19,44 +18,30 @@ impl<const LIMBS: usize> Random for UInt<LIMBS> {
}
}
-#[cfg_attr(docsrs, doc(cfg(feature = "rand_core")))]
-impl<const LIMBS: usize> RandomMod for UInt<LIMBS> {
- /// Generate a cryptographically secure random [`UInt`] which is less than
+impl<const LIMBS: usize> RandomMod for Uint<LIMBS> {
+ /// Generate a cryptographically secure random [`Uint`] which is less than
/// a given `modulus`.
///
/// This function uses rejection sampling, a method which produces an
/// unbiased distribution of in-range values provided the underlying
- /// [`CryptoRng`] is unbiased, but runs in variable-time.
+ /// CSRNG is unbiased, but runs in variable-time.
///
/// The variable-time nature of the algorithm should not pose a security
/// issue so long as the underlying random number generator is truly a
- /// [`CryptoRng`], where previous outputs are unrelated to subsequent
+ /// CSRNG, where previous outputs are unrelated to subsequent
/// outputs and do not reveal information about the RNG's internal state.
- fn random_mod(mut rng: impl CryptoRng + RngCore, modulus: &NonZero<Self>) -> Self {
+ fn random_mod(mut rng: &mut impl CryptoRngCore, modulus: &NonZero<Self>) -> Self {
let mut n = Self::ZERO;
- // TODO(tarcieri): use `div_ceil` when available
- // See: https://github.com/rust-lang/rust/issues/88581
- let mut n_limbs = modulus.bits_vartime() / Limb::BIT_SIZE;
- if n_limbs < LIMBS {
- n_limbs += 1;
- }
-
- // Compute the highest limb of `modulus` as a `NonZero`.
- // Add one to ensure `Limb::random_mod` returns values inclusive of this limb.
- let modulus_hi =
- NonZero::new(modulus.limbs[n_limbs.saturating_sub(1)].saturating_add(Limb::ONE))
- .unwrap(); // Always at least one due to `saturating_add`
+ let n_bits = modulus.as_ref().bits_vartime();
+ let n_limbs = (n_bits + Limb::BITS - 1) / Limb::BITS;
+ let mask = Limb::MAX >> (Limb::BITS * n_limbs - n_bits);
loop {
for i in 0..n_limbs {
- n.limbs[i] = if (i + 1 == n_limbs) && (*modulus_hi != Limb::MAX) {
- // Highest limb
- Limb::random_mod(&mut rng, &modulus_hi)
- } else {
- Limb::random(&mut rng)
- }
+ n.limbs[i] = Limb::random(&mut rng);
}
+ n.limbs[n_limbs - 1] = n.limbs[n_limbs - 1] & mask;
if n.ct_lt(modulus).into() {
return n;
diff --git a/vendor/crypto-bigint/src/uint/resize.rs b/vendor/crypto-bigint/src/uint/resize.rs
index 5a5ec7eef..2c80b895b 100644
--- a/vendor/crypto-bigint/src/uint/resize.rs
+++ b/vendor/crypto-bigint/src/uint/resize.rs
@@ -1,12 +1,12 @@
-use super::UInt;
+use super::Uint;
-impl<const LIMBS: usize> UInt<LIMBS> {
- /// Construct a `UInt<T>` from the unsigned integer value,
+impl<const LIMBS: usize> Uint<LIMBS> {
+ /// Construct a `Uint<T>` from the unsigned integer value,
/// truncating the upper bits if the value is too large to be
/// represented.
#[inline(always)]
- pub const fn resize<const T: usize>(&self) -> UInt<T> {
- let mut res = UInt::ZERO;
+ pub const fn resize<const T: usize>(&self) -> Uint<T> {
+ let mut res = Uint::ZERO;
let mut i = 0;
let dim = if T < LIMBS { T } else { LIMBS };
while i < dim {
diff --git a/vendor/crypto-bigint/src/uint/shl.rs b/vendor/crypto-bigint/src/uint/shl.rs
index 9d4669130..eb8c713c9 100644
--- a/vendor/crypto-bigint/src/uint/shl.rs
+++ b/vendor/crypto-bigint/src/uint/shl.rs
@@ -1,9 +1,65 @@
-//! [`UInt`] bitwise left shift operations.
+//! [`Uint`] bitwise left shift operations.
-use crate::{Limb, UInt, Word};
+use crate::{limb::HI_BIT, CtChoice, Limb, Uint, Word};
use core::ops::{Shl, ShlAssign};
-impl<const LIMBS: usize> UInt<LIMBS> {
+impl<const LIMBS: usize> Uint<LIMBS> {
+ /// Computes `self << 1` in constant-time, returning the overflowing bit as a `CtChoice`.
+ pub(crate) const fn shl_1(&self) -> (Self, CtChoice) {
+ let mut shifted_bits = [0; LIMBS];
+ let mut i = 0;
+ while i < LIMBS {
+ shifted_bits[i] = self.limbs[i].0 << 1;
+ i += 1;
+ }
+
+ let mut carry_bits = [0; LIMBS];
+ let mut i = 0;
+ while i < LIMBS {
+ carry_bits[i] = self.limbs[i].0 >> HI_BIT;
+ i += 1;
+ }
+
+ let mut limbs = [Limb(0); LIMBS];
+
+ limbs[0] = Limb(shifted_bits[0]);
+ let mut i = 1;
+ while i < LIMBS {
+ limbs[i] = Limb(shifted_bits[i] | carry_bits[i - 1]);
+ i += 1;
+ }
+
+ (Uint::new(limbs), CtChoice::from_lsb(carry_bits[LIMBS - 1]))
+ }
+
+ /// Computes `self << shift` where `0 <= shift < Limb::BITS`,
+ /// returning the result and the carry.
+ #[inline(always)]
+ pub(crate) const fn shl_limb(&self, n: usize) -> (Self, Limb) {
+ let mut limbs = [Limb::ZERO; LIMBS];
+
+ let nz = Limb(n as Word).ct_is_nonzero();
+ let lshift = n as Word;
+ let rshift = Limb::ct_select(Limb::ZERO, Limb((Limb::BITS - n) as Word), nz).0;
+ let carry = Limb::ct_select(
+ Limb::ZERO,
+ Limb(self.limbs[LIMBS - 1].0.wrapping_shr(Word::BITS - n as u32)),
+ nz,
+ );
+
+ let mut i = LIMBS - 1;
+ while i > 0 {
+ let mut limb = self.limbs[i].0 << lshift;
+ let hi = self.limbs[i - 1].0 >> rshift;
+ limb |= nz.if_true(hi);
+ limbs[i] = Limb(limb);
+ i -= 1
+ }
+ limbs[0] = Limb(self.limbs[0].0 << lshift);
+
+ (Uint::<LIMBS>::new(limbs), carry)
+ }
+
/// Computes `self << shift`.
///
/// NOTE: this operation is variable time with respect to `n` *ONLY*.
@@ -14,55 +70,69 @@ impl<const LIMBS: usize> UInt<LIMBS> {
pub const fn shl_vartime(&self, n: usize) -> Self {
let mut limbs = [Limb::ZERO; LIMBS];
- if n >= Limb::BIT_SIZE * LIMBS {
+ if n >= Limb::BITS * LIMBS {
return Self { limbs };
}
- let shift_num = n / Limb::BIT_SIZE;
- let rem = n % Limb::BIT_SIZE;
- let nz = Limb(rem as Word).is_nonzero();
- let lshift_rem = rem as Word;
- let rshift_rem = Limb::ct_select(Limb::ZERO, Limb((Limb::BIT_SIZE - rem) as Word), nz).0;
+ let shift_num = n / Limb::BITS;
+ let rem = n % Limb::BITS;
- let mut i = LIMBS - 1;
+ let mut i = LIMBS;
while i > shift_num {
- let mut limb = self.limbs[i - shift_num].0 << lshift_rem;
- let hi = self.limbs[i - shift_num - 1].0 >> rshift_rem;
- limb |= hi & nz;
- limbs[i] = Limb(limb);
- i -= 1
+ i -= 1;
+ limbs[i] = self.limbs[i - shift_num];
}
- limbs[shift_num] = Limb(self.limbs[0].0 << lshift_rem);
- Self { limbs }
+ let (new_lower, _carry) = (Self { limbs }).shl_limb(rem);
+ new_lower
+ }
+
+ /// Computes a left shift on a wide input as `(lo, hi)`.
+ ///
+ /// NOTE: this operation is variable time with respect to `n` *ONLY*.
+ ///
+ /// When used with a fixed `n`, this function is constant-time with respect
+ /// to `self`.
+ #[inline(always)]
+ pub const fn shl_vartime_wide(lower_upper: (Self, Self), n: usize) -> (Self, Self) {
+ let (lower, mut upper) = lower_upper;
+ let new_lower = lower.shl_vartime(n);
+ upper = upper.shl_vartime(n);
+ if n >= Self::BITS {
+ upper = upper.bitor(&lower.shl_vartime(n - Self::BITS));
+ } else {
+ upper = upper.bitor(&lower.shr_vartime(Self::BITS - n));
+ }
+
+ (new_lower, upper)
}
}
-impl<const LIMBS: usize> Shl<usize> for UInt<LIMBS> {
- type Output = UInt<LIMBS>;
+impl<const LIMBS: usize> Shl<usize> for Uint<LIMBS> {
+ type Output = Uint<LIMBS>;
/// NOTE: this operation is variable time with respect to `rhs` *ONLY*.
///
/// When used with a fixed `rhs`, this function is constant-time with respect
/// to `self`.
- fn shl(self, rhs: usize) -> UInt<LIMBS> {
+ fn shl(self, rhs: usize) -> Uint<LIMBS> {
self.shl_vartime(rhs)
}
}
-impl<const LIMBS: usize> Shl<usize> for &UInt<LIMBS> {
- type Output = UInt<LIMBS>;
+impl<const LIMBS: usize> Shl<usize> for &Uint<LIMBS> {
+ type Output = Uint<LIMBS>;
/// NOTE: this operation is variable time with respect to `rhs` *ONLY*.
///
/// When used with a fixed `rhs`, this function is constant-time with respect
/// to `self`.
- fn shl(self, rhs: usize) -> UInt<LIMBS> {
+ fn shl(self, rhs: usize) -> Uint<LIMBS> {
self.shl_vartime(rhs)
}
}
-impl<const LIMBS: usize> ShlAssign<usize> for UInt<LIMBS> {
+impl<const LIMBS: usize> ShlAssign<usize> for Uint<LIMBS> {
/// NOTE: this operation is variable time with respect to `rhs` *ONLY*.
///
/// When used with a fixed `rhs`, this function is constant-time with respect
@@ -74,7 +144,7 @@ impl<const LIMBS: usize> ShlAssign<usize> for UInt<LIMBS> {
#[cfg(test)]
mod tests {
- use crate::U256;
+ use crate::{Limb, Uint, U128, U256};
const N: U256 =
U256::from_be_hex("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141");
@@ -131,4 +201,28 @@ mod tests {
fn shl64() {
assert_eq!(N << 64, SIXTY_FOUR);
}
+
+ #[test]
+ fn shl_wide_1_1_128() {
+ assert_eq!(
+ Uint::shl_vartime_wide((U128::ONE, U128::ONE), 128),
+ (U128::ZERO, U128::ONE)
+ );
+ }
+
+ #[test]
+ fn shl_wide_max_0_1() {
+ assert_eq!(
+ Uint::shl_vartime_wide((U128::MAX, U128::ZERO), 1),
+ (U128::MAX.sbb(&U128::ONE, Limb::ZERO).0, U128::ONE)
+ );
+ }
+
+ #[test]
+ fn shl_wide_max_max_256() {
+ assert_eq!(
+ Uint::shl_vartime_wide((U128::MAX, U128::MAX), 256),
+ (U128::ZERO, U128::ZERO)
+ );
+ }
}
diff --git a/vendor/crypto-bigint/src/uint/shr.rs b/vendor/crypto-bigint/src/uint/shr.rs
index 54375ae72..071788fb4 100644
--- a/vendor/crypto-bigint/src/uint/shr.rs
+++ b/vendor/crypto-bigint/src/uint/shr.rs
@@ -1,10 +1,42 @@
-//! [`UInt`] bitwise right shift operations.
+//! [`Uint`] bitwise right shift operations.
-use super::UInt;
-use crate::Limb;
+use super::Uint;
+use crate::{limb::HI_BIT, CtChoice, Limb};
use core::ops::{Shr, ShrAssign};
-impl<const LIMBS: usize> UInt<LIMBS> {
+impl<const LIMBS: usize> Uint<LIMBS> {
+ /// Computes `self >> 1` in constant-time, returning the overflowing bit as a `Word` that is either 0...0 or 1...1.
+ pub(crate) const fn shr_1(&self) -> (Self, CtChoice) {
+ let mut shifted_bits = [0; LIMBS];
+ let mut i = 0;
+ while i < LIMBS {
+ shifted_bits[i] = self.limbs[i].0 >> 1;
+ i += 1;
+ }
+
+ let mut carry_bits = [0; LIMBS];
+ let mut i = 0;
+ while i < LIMBS {
+ carry_bits[i] = self.limbs[i].0 << HI_BIT;
+ i += 1;
+ }
+
+ let mut limbs = [Limb(0); LIMBS];
+
+ let mut i = 0;
+ while i < (LIMBS - 1) {
+ limbs[i] = Limb(shifted_bits[i] | carry_bits[i + 1]);
+ i += 1;
+ }
+ limbs[LIMBS - 1] = Limb(shifted_bits[LIMBS - 1]);
+
+ debug_assert!(carry_bits[LIMBS - 1] == 0 || carry_bits[LIMBS - 1] == (1 << HI_BIT));
+ (
+ Uint::new(limbs),
+ CtChoice::from_lsb(carry_bits[0] >> HI_BIT),
+ )
+ }
+
/// Computes `self >> n`.
///
/// NOTE: this operation is variable time with respect to `n` *ONLY*.
@@ -13,11 +45,11 @@ impl<const LIMBS: usize> UInt<LIMBS> {
/// to `self`.
#[inline(always)]
pub const fn shr_vartime(&self, shift: usize) -> Self {
- let full_shifts = shift / Limb::BIT_SIZE;
- let small_shift = shift & (Limb::BIT_SIZE - 1);
+ let full_shifts = shift / Limb::BITS;
+ let small_shift = shift & (Limb::BITS - 1);
let mut limbs = [Limb::ZERO; LIMBS];
- if shift > Limb::BIT_SIZE * LIMBS {
+ if shift > Limb::BITS * LIMBS {
return Self { limbs };
}
@@ -34,7 +66,7 @@ impl<const LIMBS: usize> UInt<LIMBS> {
let mut lo = self.limbs[i + full_shifts].0 >> small_shift;
if i < (LIMBS - 1) - full_shifts {
- lo |= self.limbs[i + full_shifts + 1].0 << (Limb::BIT_SIZE - small_shift);
+ lo |= self.limbs[i + full_shifts + 1].0 << (Limb::BITS - small_shift);
}
limbs[i] = Limb(lo);
@@ -44,33 +76,53 @@ impl<const LIMBS: usize> UInt<LIMBS> {
Self { limbs }
}
+
+ /// Computes a right shift on a wide input as `(lo, hi)`.
+ ///
+ /// NOTE: this operation is variable time with respect to `n` *ONLY*.
+ ///
+ /// When used with a fixed `n`, this function is constant-time with respect
+ /// to `self`.
+ #[inline(always)]
+ pub const fn shr_vartime_wide(lower_upper: (Self, Self), n: usize) -> (Self, Self) {
+ let (mut lower, upper) = lower_upper;
+ let new_upper = upper.shr_vartime(n);
+ lower = lower.shr_vartime(n);
+ if n >= Self::BITS {
+ lower = lower.bitor(&upper.shr_vartime(n - Self::BITS));
+ } else {
+ lower = lower.bitor(&upper.shl_vartime(Self::BITS - n));
+ }
+
+ (lower, new_upper)
+ }
}
-impl<const LIMBS: usize> Shr<usize> for UInt<LIMBS> {
- type Output = UInt<LIMBS>;
+impl<const LIMBS: usize> Shr<usize> for Uint<LIMBS> {
+ type Output = Uint<LIMBS>;
/// NOTE: this operation is variable time with respect to `rhs` *ONLY*.
///
/// When used with a fixed `rhs`, this function is constant-time with respect
/// to `self`.
- fn shr(self, rhs: usize) -> UInt<LIMBS> {
+ fn shr(self, rhs: usize) -> Uint<LIMBS> {
self.shr_vartime(rhs)
}
}
-impl<const LIMBS: usize> Shr<usize> for &UInt<LIMBS> {
- type Output = UInt<LIMBS>;
+impl<const LIMBS: usize> Shr<usize> for &Uint<LIMBS> {
+ type Output = Uint<LIMBS>;
/// NOTE: this operation is variable time with respect to `rhs` *ONLY*.
///
/// When used with a fixed `rhs`, this function is constant-time with respect
/// to `self`.
- fn shr(self, rhs: usize) -> UInt<LIMBS> {
+ fn shr(self, rhs: usize) -> Uint<LIMBS> {
self.shr_vartime(rhs)
}
}
-impl<const LIMBS: usize> ShrAssign<usize> for UInt<LIMBS> {
+impl<const LIMBS: usize> ShrAssign<usize> for Uint<LIMBS> {
fn shr_assign(&mut self, rhs: usize) {
*self = self.shr_vartime(rhs);
}
@@ -78,7 +130,7 @@ impl<const LIMBS: usize> ShrAssign<usize> for UInt<LIMBS> {
#[cfg(test)]
mod tests {
- use crate::U256;
+ use crate::{Uint, U128, U256};
const N: U256 =
U256::from_be_hex("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141");
@@ -90,4 +142,28 @@ mod tests {
fn shr1() {
assert_eq!(N >> 1, N_2);
}
+
+ #[test]
+ fn shr_wide_1_1_128() {
+ assert_eq!(
+ Uint::shr_vartime_wide((U128::ONE, U128::ONE), 128),
+ (U128::ONE, U128::ZERO)
+ );
+ }
+
+ #[test]
+ fn shr_wide_0_max_1() {
+ assert_eq!(
+ Uint::shr_vartime_wide((U128::ZERO, U128::MAX), 1),
+ (U128::ONE << 127, U128::MAX >> 1)
+ );
+ }
+
+ #[test]
+ fn shr_wide_max_max_256() {
+ assert_eq!(
+ Uint::shr_vartime_wide((U128::MAX, U128::MAX), 256),
+ (U128::ZERO, U128::ZERO)
+ );
+ }
}
diff --git a/vendor/crypto-bigint/src/uint/split.rs b/vendor/crypto-bigint/src/uint/split.rs
index ecff9d6d8..b681a6154 100644
--- a/vendor/crypto-bigint/src/uint/split.rs
+++ b/vendor/crypto-bigint/src/uint/split.rs
@@ -5,7 +5,7 @@ macro_rules! impl_split {
impl $name {
/// Split this number in half, returning its high and low components
/// respectively.
- pub const fn split(&self) -> (UInt<{nlimbs!($bits) / 2}>, UInt<{nlimbs!($bits) / 2}>) {
+ pub const fn split(&self) -> (Uint<{nlimbs!($bits) / 2}>, Uint<{nlimbs!($bits) / 2}>) {
let mut lo = [Limb::ZERO; nlimbs!($bits) / 2];
let mut hi = [Limb::ZERO; nlimbs!($bits) / 2];
let mut i = 0;
@@ -24,20 +24,20 @@ macro_rules! impl_split {
j += 1;
}
- (UInt { limbs: hi }, UInt { limbs: lo })
+ (Uint { limbs: hi }, Uint { limbs: lo })
}
}
impl Split for $name {
- type Output = UInt<{nlimbs!($bits) / 2}>;
+ type Output = Uint<{nlimbs!($bits) / 2}>;
fn split(&self) -> (Self::Output, Self::Output) {
self.split()
}
}
- impl From<$name> for (UInt<{nlimbs!($bits) / 2}>, UInt<{nlimbs!($bits) / 2}>) {
- fn from(num: $name) -> (UInt<{nlimbs!($bits) / 2}>, UInt<{nlimbs!($bits) / 2}>) {
+ impl From<$name> for (Uint<{nlimbs!($bits) / 2}>, Uint<{nlimbs!($bits) / 2}>) {
+ fn from(num: $name) -> (Uint<{nlimbs!($bits) / 2}>, Uint<{nlimbs!($bits) / 2}>) {
num.split()
}
}
diff --git a/vendor/crypto-bigint/src/uint/sqrt.rs b/vendor/crypto-bigint/src/uint/sqrt.rs
index 4a9f26a61..56815e2de 100644
--- a/vendor/crypto-bigint/src/uint/sqrt.rs
+++ b/vendor/crypto-bigint/src/uint/sqrt.rs
@@ -1,10 +1,10 @@
-//! [`UInt`] square root operations.
+//! [`Uint`] square root operations.
-use super::UInt;
+use super::Uint;
use crate::{Limb, Word};
use subtle::{ConstantTimeEq, CtOption};
-impl<const LIMBS: usize> UInt<LIMBS> {
+impl<const LIMBS: usize> Uint<LIMBS> {
/// Computes √(`self`)
/// Uses Brent & Zimmermann, Modern Computer Arithmetic, v0.5.9, Algorithm 1.13
///
@@ -21,13 +21,12 @@ impl<const LIMBS: usize> UInt<LIMBS> {
// If guess increased, the initial guess was low.
// Repeat until reverse course.
- while guess.ct_cmp(&xn) == -1 {
+ while Uint::ct_lt(&guess, &xn).is_true_vartime() {
// Sometimes an increase is too far, especially with large
// powers, and then takes a long time to walk back. The upper
// bound is based on bit size, so saturate on that.
- let res = Limb::ct_cmp(Limb(xn.bits_vartime() as Word), Limb(max_bits as Word)) - 1;
- let le = Limb::is_nonzero(Limb(res as Word));
- guess = Self::ct_select(cap, xn, le);
+ let le = Limb::ct_le(Limb(xn.bits_vartime() as Word), Limb(max_bits as Word));
+ guess = Self::ct_select(&cap, &xn, le);
xn = {
let q = self.wrapping_div(&guess);
let t = guess.wrapping_add(&q);
@@ -36,7 +35,7 @@ impl<const LIMBS: usize> UInt<LIMBS> {
}
// Repeat while guess decreases.
- while guess.ct_cmp(&xn) == 1 && xn.ct_is_nonzero() == Word::MAX {
+ while Uint::ct_gt(&guess, &xn).is_true_vartime() && xn.ct_is_nonzero().is_true_vartime() {
guess = xn;
xn = {
let q = self.wrapping_div(&guess);
@@ -45,7 +44,7 @@ impl<const LIMBS: usize> UInt<LIMBS> {
};
}
- Self::ct_select(Self::ZERO, guess, self.ct_is_nonzero())
+ Self::ct_select(&Self::ZERO, &guess, self.ct_is_nonzero())
}
/// Wrapped sqrt is just normal √(`self`)
@@ -60,7 +59,7 @@ impl<const LIMBS: usize> UInt<LIMBS> {
pub fn checked_sqrt(&self) -> CtOption<Self> {
let r = self.sqrt();
let s = r.wrapping_mul(&r);
- CtOption::new(r, self.ct_eq(&s))
+ CtOption::new(r, ConstantTimeEq::ct_eq(self, &s))
}
}
diff --git a/vendor/crypto-bigint/src/uint/sub.rs b/vendor/crypto-bigint/src/uint/sub.rs
index 102f6b978..c39e54922 100644
--- a/vendor/crypto-bigint/src/uint/sub.rs
+++ b/vendor/crypto-bigint/src/uint/sub.rs
@@ -1,11 +1,11 @@
-//! [`UInt`] addition operations.
+//! [`Uint`] addition operations.
-use super::UInt;
-use crate::{Checked, CheckedSub, Limb, Wrapping, Zero};
+use super::Uint;
+use crate::{Checked, CheckedSub, CtChoice, Limb, Wrapping, Zero};
use core::ops::{Sub, SubAssign};
use subtle::CtOption;
-impl<const LIMBS: usize> UInt<LIMBS> {
+impl<const LIMBS: usize> Uint<LIMBS> {
/// Computes `a - (b + borrow)`, returning the result along with the new borrow.
#[inline(always)]
pub const fn sbb(&self, rhs: &Self, mut borrow: Limb) -> (Self, Limb) {
@@ -38,9 +38,21 @@ impl<const LIMBS: usize> UInt<LIMBS> {
pub const fn wrapping_sub(&self, rhs: &Self) -> Self {
self.sbb(rhs, Limb::ZERO).0
}
+
+ /// Perform wrapping subtraction, returning the truthy value as the second element of the tuple
+ /// if an underflow has occurred.
+ pub(crate) const fn conditional_wrapping_sub(
+ &self,
+ rhs: &Self,
+ choice: CtChoice,
+ ) -> (Self, CtChoice) {
+ let actual_rhs = Uint::ct_select(&Uint::ZERO, rhs, choice);
+ let (res, borrow) = self.sbb(&actual_rhs, Limb::ZERO);
+ (res, CtChoice::from_mask(borrow.0))
+ }
}
-impl<const LIMBS: usize> CheckedSub<&UInt<LIMBS>> for UInt<LIMBS> {
+impl<const LIMBS: usize> CheckedSub<&Uint<LIMBS>> for Uint<LIMBS> {
type Output = Self;
fn checked_sub(&self, rhs: &Self) -> CtOption<Self> {
@@ -49,54 +61,54 @@ impl<const LIMBS: usize> CheckedSub<&UInt<LIMBS>> for UInt<LIMBS> {
}
}
-impl<const LIMBS: usize> Sub for Wrapping<UInt<LIMBS>> {
+impl<const LIMBS: usize> Sub for Wrapping<Uint<LIMBS>> {
type Output = Self;
- fn sub(self, rhs: Self) -> Wrapping<UInt<LIMBS>> {
+ fn sub(self, rhs: Self) -> Wrapping<Uint<LIMBS>> {
Wrapping(self.0.wrapping_sub(&rhs.0))
}
}
-impl<const LIMBS: usize> Sub<&Wrapping<UInt<LIMBS>>> for Wrapping<UInt<LIMBS>> {
- type Output = Wrapping<UInt<LIMBS>>;
+impl<const LIMBS: usize> Sub<&Wrapping<Uint<LIMBS>>> for Wrapping<Uint<LIMBS>> {
+ type Output = Wrapping<Uint<LIMBS>>;
- fn sub(self, rhs: &Wrapping<UInt<LIMBS>>) -> Wrapping<UInt<LIMBS>> {
+ fn sub(self, rhs: &Wrapping<Uint<LIMBS>>) -> Wrapping<Uint<LIMBS>> {
Wrapping(self.0.wrapping_sub(&rhs.0))
}
}
-impl<const LIMBS: usize> Sub<Wrapping<UInt<LIMBS>>> for &Wrapping<UInt<LIMBS>> {
- type Output = Wrapping<UInt<LIMBS>>;
+impl<const LIMBS: usize> Sub<Wrapping<Uint<LIMBS>>> for &Wrapping<Uint<LIMBS>> {
+ type Output = Wrapping<Uint<LIMBS>>;
- fn sub(self, rhs: Wrapping<UInt<LIMBS>>) -> Wrapping<UInt<LIMBS>> {
+ fn sub(self, rhs: Wrapping<Uint<LIMBS>>) -> Wrapping<Uint<LIMBS>> {
Wrapping(self.0.wrapping_sub(&rhs.0))
}
}
-impl<const LIMBS: usize> Sub<&Wrapping<UInt<LIMBS>>> for &Wrapping<UInt<LIMBS>> {
- type Output = Wrapping<UInt<LIMBS>>;
+impl<const LIMBS: usize> Sub<&Wrapping<Uint<LIMBS>>> for &Wrapping<Uint<LIMBS>> {
+ type Output = Wrapping<Uint<LIMBS>>;
- fn sub(self, rhs: &Wrapping<UInt<LIMBS>>) -> Wrapping<UInt<LIMBS>> {
+ fn sub(self, rhs: &Wrapping<Uint<LIMBS>>) -> Wrapping<Uint<LIMBS>> {
Wrapping(self.0.wrapping_sub(&rhs.0))
}
}
-impl<const LIMBS: usize> SubAssign for Wrapping<UInt<LIMBS>> {
+impl<const LIMBS: usize> SubAssign for Wrapping<Uint<LIMBS>> {
fn sub_assign(&mut self, other: Self) {
*self = *self - other;
}
}
-impl<const LIMBS: usize> SubAssign<&Wrapping<UInt<LIMBS>>> for Wrapping<UInt<LIMBS>> {
+impl<const LIMBS: usize> SubAssign<&Wrapping<Uint<LIMBS>>> for Wrapping<Uint<LIMBS>> {
fn sub_assign(&mut self, other: &Self) {
*self = *self - other;
}
}
-impl<const LIMBS: usize> Sub for Checked<UInt<LIMBS>> {
+impl<const LIMBS: usize> Sub for Checked<Uint<LIMBS>> {
type Output = Self;
- fn sub(self, rhs: Self) -> Checked<UInt<LIMBS>> {
+ fn sub(self, rhs: Self) -> Checked<Uint<LIMBS>> {
Checked(
self.0
.and_then(|lhs| rhs.0.and_then(|rhs| lhs.checked_sub(&rhs))),
@@ -104,10 +116,10 @@ impl<const LIMBS: usize> Sub for Checked<UInt<LIMBS>> {
}
}
-impl<const LIMBS: usize> Sub<&Checked<UInt<LIMBS>>> for Checked<UInt<LIMBS>> {
- type Output = Checked<UInt<LIMBS>>;
+impl<const LIMBS: usize> Sub<&Checked<Uint<LIMBS>>> for Checked<Uint<LIMBS>> {
+ type Output = Checked<Uint<LIMBS>>;
- fn sub(self, rhs: &Checked<UInt<LIMBS>>) -> Checked<UInt<LIMBS>> {
+ fn sub(self, rhs: &Checked<Uint<LIMBS>>) -> Checked<Uint<LIMBS>> {
Checked(
self.0
.and_then(|lhs| rhs.0.and_then(|rhs| lhs.checked_sub(&rhs))),
@@ -115,10 +127,10 @@ impl<const LIMBS: usize> Sub<&Checked<UInt<LIMBS>>> for Checked<UInt<LIMBS>> {
}
}
-impl<const LIMBS: usize> Sub<Checked<UInt<LIMBS>>> for &Checked<UInt<LIMBS>> {
- type Output = Checked<UInt<LIMBS>>;
+impl<const LIMBS: usize> Sub<Checked<Uint<LIMBS>>> for &Checked<Uint<LIMBS>> {
+ type Output = Checked<Uint<LIMBS>>;
- fn sub(self, rhs: Checked<UInt<LIMBS>>) -> Checked<UInt<LIMBS>> {
+ fn sub(self, rhs: Checked<Uint<LIMBS>>) -> Checked<Uint<LIMBS>> {
Checked(
self.0
.and_then(|lhs| rhs.0.and_then(|rhs| lhs.checked_sub(&rhs))),
@@ -126,10 +138,10 @@ impl<const LIMBS: usize> Sub<Checked<UInt<LIMBS>>> for &Checked<UInt<LIMBS>> {
}
}
-impl<const LIMBS: usize> Sub<&Checked<UInt<LIMBS>>> for &Checked<UInt<LIMBS>> {
- type Output = Checked<UInt<LIMBS>>;
+impl<const LIMBS: usize> Sub<&Checked<Uint<LIMBS>>> for &Checked<Uint<LIMBS>> {
+ type Output = Checked<Uint<LIMBS>>;
- fn sub(self, rhs: &Checked<UInt<LIMBS>>) -> Checked<UInt<LIMBS>> {
+ fn sub(self, rhs: &Checked<Uint<LIMBS>>) -> Checked<Uint<LIMBS>> {
Checked(
self.0
.and_then(|lhs| rhs.0.and_then(|rhs| lhs.checked_sub(&rhs))),
@@ -137,13 +149,13 @@ impl<const LIMBS: usize> Sub<&Checked<UInt<LIMBS>>> for &Checked<UInt<LIMBS>> {
}
}
-impl<const LIMBS: usize> SubAssign for Checked<UInt<LIMBS>> {
+impl<const LIMBS: usize> SubAssign for Checked<Uint<LIMBS>> {
fn sub_assign(&mut self, other: Self) {
*self = *self - other;
}
}
-impl<const LIMBS: usize> SubAssign<&Checked<UInt<LIMBS>>> for Checked<UInt<LIMBS>> {
+impl<const LIMBS: usize> SubAssign<&Checked<Uint<LIMBS>>> for Checked<Uint<LIMBS>> {
fn sub_assign(&mut self, other: &Self) {
*self = *self - other;
}
diff --git a/vendor/crypto-bigint/src/uint/sub_mod.rs b/vendor/crypto-bigint/src/uint/sub_mod.rs
index f699f66eb..728a92760 100644
--- a/vendor/crypto-bigint/src/uint/sub_mod.rs
+++ b/vendor/crypto-bigint/src/uint/sub_mod.rs
@@ -1,12 +1,12 @@
-//! [`UInt`] subtraction modulus operations.
+//! [`Uint`] subtraction modulus operations.
-use crate::{Limb, SubMod, UInt};
+use crate::{Limb, SubMod, Uint};
-impl<const LIMBS: usize> UInt<LIMBS> {
+impl<const LIMBS: usize> Uint<LIMBS> {
/// Computes `self - rhs mod p` in constant time.
///
/// Assumes `self - rhs` as unbounded signed integer is in `[-p, p)`.
- pub const fn sub_mod(&self, rhs: &UInt<LIMBS>, p: &UInt<LIMBS>) -> UInt<LIMBS> {
+ pub const fn sub_mod(&self, rhs: &Uint<LIMBS>, p: &Uint<LIMBS>) -> Uint<LIMBS> {
let (mut out, borrow) = self.sbb(rhs, Limb::ZERO);
// If underflow occurred on the final limb, borrow = 0xfff...fff, otherwise
@@ -35,12 +35,12 @@ impl<const LIMBS: usize> UInt<LIMBS> {
// the underflow. This cannot underflow due to the assumption
// `self - rhs >= -p`.
let l = borrow.0 & c.0;
- let (out, _) = out.sbb(&UInt::from_word(l), Limb::ZERO);
+ let (out, _) = out.sbb(&Uint::from_word(l), Limb::ZERO);
out
}
}
-impl<const LIMBS: usize> SubMod for UInt<LIMBS> {
+impl<const LIMBS: usize> SubMod for Uint<LIMBS> {
type Output = Self;
fn sub_mod(&self, rhs: &Self, p: &Self) -> Self {
@@ -52,7 +52,7 @@ impl<const LIMBS: usize> SubMod for UInt<LIMBS> {
#[cfg(all(test, feature = "rand"))]
mod tests {
- use crate::{Limb, NonZero, Random, RandomMod, UInt};
+ use crate::{Limb, NonZero, Random, RandomMod, Uint};
use rand_core::SeedableRng;
macro_rules! test_sub_mod {
@@ -61,8 +61,8 @@ mod tests {
fn $test_name() {
let mut rng = rand_chacha::ChaCha8Rng::seed_from_u64(1);
let moduli = [
- NonZero::<UInt<$size>>::random(&mut rng),
- NonZero::<UInt<$size>>::random(&mut rng),
+ NonZero::<Uint<$size>>::random(&mut rng),
+ NonZero::<Uint<$size>>::random(&mut rng),
];
for p in &moduli {
@@ -72,8 +72,8 @@ mod tests {
(0, 0, 0u64.into()),
];
for (a, b, c) in &base_cases {
- let a: UInt<$size> = (*a).into();
- let b: UInt<$size> = (*b).into();
+ let a: Uint<$size> = (*a).into();
+ let b: Uint<$size> = (*b).into();
let x = a.sub_mod(&b, p);
assert_eq!(*c, x, "{} - {} mod {} = {} != {}", a, b, p, x, c);
@@ -81,8 +81,8 @@ mod tests {
if $size > 1 {
for _i in 0..100 {
- let a: UInt<$size> = Limb::random(&mut rng).into();
- let b: UInt<$size> = Limb::random(&mut rng).into();
+ let a: Uint<$size> = Limb::random(&mut rng).into();
+ let b: Uint<$size> = Limb::random(&mut rng).into();
let (a, b) = if a < b { (b, a) } else { (a, b) };
let c = a.sub_mod(&b, p);
@@ -92,8 +92,8 @@ mod tests {
}
for _i in 0..100 {
- let a = UInt::<$size>::random_mod(&mut rng, p);
- let b = UInt::<$size>::random_mod(&mut rng, p);
+ let a = Uint::<$size>::random_mod(&mut rng, p);
+ let b = Uint::<$size>::random_mod(&mut rng, p);
let c = a.sub_mod(&b, p);
assert!(c < **p, "not reduced: {} >= {} ", c, p);
@@ -119,17 +119,17 @@ mod tests {
];
for special in &moduli {
- let p = &NonZero::new(UInt::ZERO.wrapping_sub(&UInt::from_word(special.0)))
+ let p = &NonZero::new(Uint::ZERO.wrapping_sub(&Uint::from_word(special.0)))
.unwrap();
- let minus_one = p.wrapping_sub(&UInt::ONE);
+ let minus_one = p.wrapping_sub(&Uint::ONE);
let base_cases = [
- (UInt::ZERO, UInt::ZERO, UInt::ZERO),
- (UInt::ONE, UInt::ZERO, UInt::ONE),
- (UInt::ZERO, UInt::ONE, minus_one),
- (minus_one, minus_one, UInt::ZERO),
- (UInt::ZERO, minus_one, UInt::ONE),
+ (Uint::ZERO, Uint::ZERO, Uint::ZERO),
+ (Uint::ONE, Uint::ZERO, Uint::ONE),
+ (Uint::ZERO, Uint::ONE, minus_one),
+ (minus_one, minus_one, Uint::ZERO),
+ (Uint::ZERO, minus_one, Uint::ONE),
];
for (a, b, c) in &base_cases {
let x = a.sub_mod_special(&b, *special.as_ref());
@@ -137,8 +137,8 @@ mod tests {
}
for _i in 0..100 {
- let a = UInt::<$size>::random_mod(&mut rng, p);
- let b = UInt::<$size>::random_mod(&mut rng, p);
+ let a = Uint::<$size>::random_mod(&mut rng, p);
+ let b = Uint::<$size>::random_mod(&mut rng, p);
let c = a.sub_mod_special(&b, *special.as_ref());
assert!(c < **p, "not reduced: {} >= {} ", c, p);