summaryrefslogtreecommitdiffstats
path: root/vendor/group/src
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-04 12:47:55 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-04 12:47:55 +0000
commit2aadc03ef15cb5ca5cc2af8a7c08e070742f0ac4 (patch)
tree033cc839730fda84ff08db877037977be94e5e3a /vendor/group/src
parentInitial commit. (diff)
downloadcargo-2aadc03ef15cb5ca5cc2af8a7c08e070742f0ac4.tar.xz
cargo-2aadc03ef15cb5ca5cc2af8a7c08e070742f0ac4.zip
Adding upstream version 0.70.1+ds1.upstream/0.70.1+ds1upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'vendor/group/src')
-rw-r--r--vendor/group/src/cofactor.rs100
-rw-r--r--vendor/group/src/lib.rs174
-rw-r--r--vendor/group/src/prime.rs50
-rw-r--r--vendor/group/src/tests/mod.rs448
-rw-r--r--vendor/group/src/wnaf.rs506
5 files changed, 1278 insertions, 0 deletions
diff --git a/vendor/group/src/cofactor.rs b/vendor/group/src/cofactor.rs
new file mode 100644
index 0000000..84bfe0a
--- /dev/null
+++ b/vendor/group/src/cofactor.rs
@@ -0,0 +1,100 @@
+use core::fmt;
+use core::ops::{Mul, Neg};
+use ff::PrimeField;
+use subtle::{Choice, CtOption};
+
+use crate::{prime::PrimeGroup, Curve, Group, GroupEncoding, GroupOps, GroupOpsOwned};
+
+/// This trait represents an element of a cryptographic group with a large prime-order
+/// subgroup and a comparatively-small cofactor.
+pub trait CofactorGroup:
+ Group
+ + GroupEncoding
+ + GroupOps<<Self as CofactorGroup>::Subgroup>
+ + GroupOpsOwned<<Self as CofactorGroup>::Subgroup>
+{
+ /// The large prime-order subgroup in which cryptographic operations are performed.
+ /// If `Self` implements `PrimeGroup`, then `Self::Subgroup` may be `Self`.
+ type Subgroup: PrimeGroup<Scalar = Self::Scalar> + Into<Self>;
+
+ /// Maps `self` to the prime-order subgroup by multiplying this element by some
+ /// `k`-multiple of the cofactor.
+ ///
+ /// The value `k` does not vary between inputs for a given implementation, but may
+ /// vary between different implementations of `CofactorGroup` because some groups have
+ /// more efficient methods of clearing the cofactor when `k` is allowed to be
+ /// different than `1`.
+ ///
+ /// If `Self` implements [`PrimeGroup`], this returns `self`.
+ fn clear_cofactor(&self) -> Self::Subgroup;
+
+ /// Returns `self` if it is contained in the prime-order subgroup.
+ ///
+ /// If `Self` implements [`PrimeGroup`], this returns `Some(self)`.
+ fn into_subgroup(self) -> CtOption<Self::Subgroup>;
+
+ /// Determines if this element is of small order.
+ ///
+ /// Returns:
+ /// - `true` if `self` is in the torsion subgroup.
+ /// - `false` if `self` is not in the torsion subgroup.
+ fn is_small_order(&self) -> Choice {
+ self.clear_cofactor().is_identity()
+ }
+
+ /// Determines if this element is "torsion free", i.e., is contained in the
+ /// prime-order subgroup.
+ ///
+ /// Returns:
+ /// - `true` if `self` has trivial torsion and is in the prime-order subgroup.
+ /// - `false` if `self` has non-zero torsion component and is not in the prime-order
+ /// subgroup.
+ fn is_torsion_free(&self) -> Choice;
+}
+
+/// Efficient representation of an elliptic curve point guaranteed to be
+/// in the correct prime order subgroup.
+pub trait CofactorCurve:
+ Curve<AffineRepr = <Self as CofactorCurve>::Affine> + CofactorGroup
+{
+ type Affine: CofactorCurveAffine<Curve = Self, Scalar = Self::Scalar>
+ + Mul<Self::Scalar, Output = Self>
+ + for<'r> Mul<&'r Self::Scalar, Output = Self>;
+}
+
+/// Affine representation of an elliptic curve point guaranteed to be
+/// in the correct prime order subgroup.
+pub trait CofactorCurveAffine:
+ GroupEncoding
+ + Copy
+ + Clone
+ + Sized
+ + Send
+ + Sync
+ + fmt::Debug
+ + PartialEq
+ + Eq
+ + 'static
+ + Neg<Output = Self>
+ + Mul<<Self as CofactorCurveAffine>::Scalar, Output = <Self as CofactorCurveAffine>::Curve>
+ + for<'r> Mul<
+ &'r <Self as CofactorCurveAffine>::Scalar,
+ Output = <Self as CofactorCurveAffine>::Curve,
+ >
+{
+ type Scalar: PrimeField;
+ type Curve: CofactorCurve<Affine = Self, Scalar = Self::Scalar>;
+
+ /// Returns the additive identity.
+ fn identity() -> Self;
+
+ /// Returns a fixed generator of unknown exponent.
+ fn generator() -> Self;
+
+ /// Determines if this point represents the point at infinity; the
+ /// additive identity.
+ fn is_identity(&self) -> Choice;
+
+ /// Converts this element to its curve representation.
+ fn to_curve(&self) -> Self::Curve;
+}
diff --git a/vendor/group/src/lib.rs b/vendor/group/src/lib.rs
new file mode 100644
index 0000000..27ed5c9
--- /dev/null
+++ b/vendor/group/src/lib.rs
@@ -0,0 +1,174 @@
+#![no_std]
+// Catch documentation errors caused by code changes.
+#![deny(rustdoc::broken_intra_doc_links)]
+
+#[cfg(feature = "alloc")]
+#[macro_use]
+extern crate alloc;
+
+// Re-export ff to make version-matching easier.
+pub use ff;
+
+use core::fmt;
+use core::iter::Sum;
+use core::ops::{Add, AddAssign, Mul, MulAssign, Neg, Sub, SubAssign};
+use ff::PrimeField;
+use rand_core::RngCore;
+use subtle::{Choice, CtOption};
+
+pub mod cofactor;
+pub mod prime;
+#[cfg(feature = "tests")]
+pub mod tests;
+
+#[cfg(feature = "alloc")]
+mod wnaf;
+#[cfg(feature = "alloc")]
+pub use self::wnaf::{Wnaf, WnafBase, WnafGroup, WnafScalar};
+
+/// A helper trait for types with a group operation.
+pub trait GroupOps<Rhs = Self, Output = Self>:
+ Add<Rhs, Output = Output> + Sub<Rhs, Output = Output> + AddAssign<Rhs> + SubAssign<Rhs>
+{
+}
+
+impl<T, Rhs, Output> GroupOps<Rhs, Output> for T where
+ T: Add<Rhs, Output = Output> + Sub<Rhs, Output = Output> + AddAssign<Rhs> + SubAssign<Rhs>
+{
+}
+
+/// A helper trait for references with a group operation.
+pub trait GroupOpsOwned<Rhs = Self, Output = Self>: for<'r> GroupOps<&'r Rhs, Output> {}
+impl<T, Rhs, Output> GroupOpsOwned<Rhs, Output> for T where T: for<'r> GroupOps<&'r Rhs, Output> {}
+
+/// A helper trait for types implementing group scalar multiplication.
+pub trait ScalarMul<Rhs, Output = Self>: Mul<Rhs, Output = Output> + MulAssign<Rhs> {}
+
+impl<T, Rhs, Output> ScalarMul<Rhs, Output> for T where T: Mul<Rhs, Output = Output> + MulAssign<Rhs>
+{}
+
+/// A helper trait for references implementing group scalar multiplication.
+pub trait ScalarMulOwned<Rhs, Output = Self>: for<'r> ScalarMul<&'r Rhs, Output> {}
+impl<T, Rhs, Output> ScalarMulOwned<Rhs, Output> for T where T: for<'r> ScalarMul<&'r Rhs, Output> {}
+
+/// This trait represents an element of a cryptographic group.
+pub trait Group:
+ Clone
+ + Copy
+ + fmt::Debug
+ + Eq
+ + Sized
+ + Send
+ + Sync
+ + 'static
+ + Sum
+ + for<'a> Sum<&'a Self>
+ + Neg<Output = Self>
+ + GroupOps
+ + GroupOpsOwned
+ + ScalarMul<<Self as Group>::Scalar>
+ + ScalarMulOwned<<Self as Group>::Scalar>
+{
+ /// Scalars modulo the order of this group's scalar field.
+ type Scalar: PrimeField;
+
+ /// Returns an element chosen uniformly at random from the non-identity elements of
+ /// this group.
+ ///
+ /// This function is non-deterministic, and samples from the user-provided RNG.
+ fn random(rng: impl RngCore) -> Self;
+
+ /// Returns the additive identity, also known as the "neutral element".
+ fn identity() -> Self;
+
+ /// Returns a fixed generator of the prime-order subgroup.
+ fn generator() -> Self;
+
+ /// Determines if this point is the identity.
+ fn is_identity(&self) -> Choice;
+
+ /// Doubles this element.
+ #[must_use]
+ fn double(&self) -> Self;
+}
+
+/// Efficient representation of an elliptic curve point guaranteed.
+pub trait Curve:
+ Group + GroupOps<<Self as Curve>::AffineRepr> + GroupOpsOwned<<Self as Curve>::AffineRepr>
+{
+ /// The affine representation for this elliptic curve.
+ type AffineRepr;
+
+ /// Converts a batch of projective elements into affine elements. This function will
+ /// panic if `p.len() != q.len()`.
+ fn batch_normalize(p: &[Self], q: &mut [Self::AffineRepr]) {
+ assert_eq!(p.len(), q.len());
+
+ for (p, q) in p.iter().zip(q.iter_mut()) {
+ *q = p.to_affine();
+ }
+ }
+
+ /// Converts this element into its affine representation.
+ fn to_affine(&self) -> Self::AffineRepr;
+}
+
+pub trait GroupEncoding: Sized {
+ /// The encoding of group elements.
+ ///
+ /// The `Default` implementation is not required to return a valid point encoding. The
+ /// bound is present to enable encodings to be constructed generically:
+ /// ```
+ /// # use group::GroupEncoding;
+ /// # use subtle::CtOption;
+ /// # struct G;
+ /// # impl GroupEncoding for G {
+ /// # type Repr = [u8; 0];
+ /// # fn from_bytes(bytes: &Self::Repr) -> CtOption<Self> { unimplemented!() }
+ /// # fn from_bytes_unchecked(bytes: &Self::Repr) -> CtOption<Self> { unimplemented!() }
+ /// # fn to_bytes(&self) -> Self::Repr { unimplemented!() }
+ /// # }
+ /// # let buf = &[0u8; 0][..];
+ /// let mut encoding = <G as GroupEncoding>::Repr::default();
+ /// encoding.as_mut().copy_from_slice(buf);
+ /// ```
+ ///
+ /// It is recommended that the default should be the all-zeroes encoding.
+ type Repr: Copy + Default + Send + Sync + 'static + AsRef<[u8]> + AsMut<[u8]>;
+
+ /// Attempts to deserialize a group element from its encoding.
+ fn from_bytes(bytes: &Self::Repr) -> CtOption<Self>;
+
+ /// Attempts to deserialize a group element, not checking if the element is valid.
+ ///
+ /// **This is dangerous to call unless you trust the bytes you are reading; otherwise,
+ /// API invariants may be broken.** Please consider using
+ /// [`GroupEncoding::from_bytes`] instead.
+ fn from_bytes_unchecked(bytes: &Self::Repr) -> CtOption<Self>;
+
+ /// Converts this element into its byte encoding. This may or may not support
+ /// encoding the identity.
+ // TODO: Figure out how to handle identity encoding generically.
+ fn to_bytes(&self) -> Self::Repr;
+}
+
+/// Affine representation of a point on an elliptic curve that has a defined uncompressed
+/// encoding.
+pub trait UncompressedEncoding: Sized {
+ type Uncompressed: Default + AsRef<[u8]> + AsMut<[u8]>;
+
+ /// Attempts to deserialize an element from its uncompressed encoding.
+ fn from_uncompressed(bytes: &Self::Uncompressed) -> CtOption<Self>;
+
+ /// Attempts to deserialize an uncompressed element, not checking if the element is in
+ /// the correct subgroup.
+ ///
+ /// **This is dangerous to call unless you trust the bytes you are reading; otherwise,
+ /// API invariants may be broken.** Please consider using
+ /// [`UncompressedEncoding::from_uncompressed`] instead.
+ fn from_uncompressed_unchecked(bytes: &Self::Uncompressed) -> CtOption<Self>;
+
+ /// Converts this element into its uncompressed encoding, so long as it's not
+ /// the point at infinity.
+ fn to_uncompressed(&self) -> Self::Uncompressed;
+}
diff --git a/vendor/group/src/prime.rs b/vendor/group/src/prime.rs
new file mode 100644
index 0000000..174888e
--- /dev/null
+++ b/vendor/group/src/prime.rs
@@ -0,0 +1,50 @@
+use core::fmt;
+use core::ops::{Mul, Neg};
+use ff::PrimeField;
+use subtle::Choice;
+
+use crate::{Curve, Group, GroupEncoding};
+
+/// This trait represents an element of a prime-order cryptographic group.
+pub trait PrimeGroup: Group + GroupEncoding {}
+
+/// Efficient representation of an elliptic curve point guaranteed to be
+/// in the correct prime order subgroup.
+pub trait PrimeCurve: Curve<AffineRepr = <Self as PrimeCurve>::Affine> + PrimeGroup {
+ type Affine: PrimeCurveAffine<Curve = Self, Scalar = Self::Scalar>
+ + Mul<Self::Scalar, Output = Self>
+ + for<'r> Mul<&'r Self::Scalar, Output = Self>;
+}
+
+/// Affine representation of an elliptic curve point guaranteed to be
+/// in the correct prime order subgroup.
+pub trait PrimeCurveAffine: GroupEncoding
+ + Copy
+ + Clone
+ + Sized
+ + Send
+ + Sync
+ + fmt::Debug
+ + PartialEq
+ + Eq
+ + 'static
+ + Neg<Output = Self>
+ + Mul<<Self as PrimeCurveAffine>::Scalar, Output = <Self as PrimeCurveAffine>::Curve>
+ + for<'r> Mul<&'r <Self as PrimeCurveAffine>::Scalar, Output = <Self as PrimeCurveAffine>::Curve>
+{
+ type Scalar: PrimeField;
+ type Curve: PrimeCurve<Affine = Self, Scalar = Self::Scalar>;
+
+ /// Returns the additive identity.
+ fn identity() -> Self;
+
+ /// Returns a fixed generator of unknown exponent.
+ fn generator() -> Self;
+
+ /// Determines if this point represents the point at infinity; the
+ /// additive identity.
+ fn is_identity(&self) -> Choice;
+
+ /// Converts this element to its curve representation.
+ fn to_curve(&self) -> Self::Curve;
+}
diff --git a/vendor/group/src/tests/mod.rs b/vendor/group/src/tests/mod.rs
new file mode 100644
index 0000000..ff79a9b
--- /dev/null
+++ b/vendor/group/src/tests/mod.rs
@@ -0,0 +1,448 @@
+use alloc::vec::Vec;
+use core::ops::{Mul, Neg};
+use ff::{Field, PrimeField};
+use rand::SeedableRng;
+use rand_xorshift::XorShiftRng;
+
+use crate::{
+ prime::{PrimeCurve, PrimeCurveAffine},
+ wnaf::WnafGroup,
+ GroupEncoding, UncompressedEncoding,
+};
+
+pub fn curve_tests<G: PrimeCurve>() {
+ let mut rng = XorShiftRng::from_seed([
+ 0x59, 0x62, 0xbe, 0x5d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06, 0xbc,
+ 0xe5,
+ ]);
+
+ // Negation edge case with identity.
+ {
+ let z = G::identity().neg();
+ assert!(bool::from(z.is_identity()));
+ }
+
+ // Doubling edge case with identity.
+ {
+ let z = G::identity().double();
+ assert!(bool::from(z.is_identity()));
+ }
+
+ // Addition edge cases with identity
+ {
+ let mut r = G::random(&mut rng);
+ let rcopy = r;
+ r.add_assign(&G::identity());
+ assert_eq!(r, rcopy);
+ r.add_assign(&G::Affine::identity());
+ assert_eq!(r, rcopy);
+
+ let mut z = G::identity();
+ z.add_assign(&G::identity());
+ assert!(bool::from(z.is_identity()));
+ z.add_assign(&G::Affine::identity());
+ assert!(bool::from(z.is_identity()));
+
+ let mut z2 = z;
+ z2.add_assign(&r);
+
+ z.add_assign(&r.to_affine());
+
+ assert_eq!(z, z2);
+ assert_eq!(z, r);
+ }
+
+ // Transformations
+ {
+ let a = G::random(&mut rng);
+ let b = a.to_affine().to_curve();
+ let c = a.to_affine().to_curve().to_affine().to_curve();
+ assert_eq!(a, b);
+ assert_eq!(b, c);
+ }
+
+ random_addition_tests::<G>();
+ random_multiplication_tests::<G>();
+ random_doubling_tests::<G>();
+ random_negation_tests::<G>();
+ random_transformation_tests::<G>();
+ random_compressed_encoding_tests::<G>();
+}
+
+pub fn random_wnaf_tests<G: WnafGroup>() {
+ use crate::wnaf::*;
+
+ let mut rng = XorShiftRng::from_seed([
+ 0x59, 0x62, 0xbe, 0x5d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06, 0xbc,
+ 0xe5,
+ ]);
+
+ {
+ let mut table = vec![];
+ let mut wnaf = vec![];
+
+ for w in 2..14 {
+ for _ in 0..100 {
+ let g = G::random(&mut rng);
+ let s = G::Scalar::random(&mut rng);
+ let mut g1 = g;
+ g1.mul_assign(s);
+
+ wnaf_table(&mut table, g, w);
+ wnaf_form(&mut wnaf, s.to_repr(), w);
+ let g2 = wnaf_exp(&table, &wnaf);
+
+ assert_eq!(g1, g2);
+ }
+ }
+ }
+
+ {
+ fn only_compiles_if_send<S: Send>(_: &S) {}
+
+ for _ in 0..100 {
+ let g = G::random(&mut rng);
+ let s = G::Scalar::random(&mut rng);
+ let mut g1 = g;
+ g1.mul_assign(s);
+
+ let g2 = {
+ let mut wnaf = Wnaf::new();
+ wnaf.base(g, 1).scalar(&s)
+ };
+ let g3 = {
+ let mut wnaf = Wnaf::new();
+ wnaf.scalar(&s).base(g)
+ };
+ let g4 = {
+ let mut wnaf = Wnaf::new();
+ let mut shared = wnaf.base(g, 1).shared();
+
+ only_compiles_if_send(&shared);
+
+ shared.scalar(&s)
+ };
+ let g5 = {
+ let mut wnaf = Wnaf::new();
+ let mut shared = wnaf.scalar(&s).shared();
+
+ only_compiles_if_send(&shared);
+
+ shared.base(g)
+ };
+
+ let g6 = {
+ let mut wnaf = Wnaf::new();
+ {
+ // Populate the vectors.
+ wnaf.base(G::random(&mut rng), 1)
+ .scalar(&G::Scalar::random(&mut rng));
+ }
+ wnaf.base(g, 1).scalar(&s)
+ };
+ let g7 = {
+ let mut wnaf = Wnaf::new();
+ {
+ // Populate the vectors.
+ wnaf.base(G::random(&mut rng), 1)
+ .scalar(&G::Scalar::random(&mut rng));
+ }
+ wnaf.scalar(&s).base(g)
+ };
+ let g8 = {
+ let mut wnaf = Wnaf::new();
+ {
+ // Populate the vectors.
+ wnaf.base(G::random(&mut rng), 1)
+ .scalar(&G::Scalar::random(&mut rng));
+ }
+ let mut shared = wnaf.base(g, 1).shared();
+
+ only_compiles_if_send(&shared);
+
+ shared.scalar(&s)
+ };
+ let g9 = {
+ let mut wnaf = Wnaf::new();
+ {
+ // Populate the vectors.
+ wnaf.base(G::random(&mut rng), 1)
+ .scalar(&G::Scalar::random(&mut rng));
+ }
+ let mut shared = wnaf.scalar(&s).shared();
+
+ only_compiles_if_send(&shared);
+
+ shared.base(g)
+ };
+
+ assert_eq!(g1, g2);
+ assert_eq!(g1, g3);
+ assert_eq!(g1, g4);
+ assert_eq!(g1, g5);
+ assert_eq!(g1, g6);
+ assert_eq!(g1, g7);
+ assert_eq!(g1, g8);
+ assert_eq!(g1, g9);
+ }
+ }
+}
+
+fn random_negation_tests<G: PrimeCurve>() {
+ let mut rng = XorShiftRng::from_seed([
+ 0x59, 0x62, 0xbe, 0x5d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06, 0xbc,
+ 0xe5,
+ ]);
+
+ for _ in 0..1000 {
+ let r = G::random(&mut rng);
+
+ let s = G::Scalar::random(&mut rng);
+ let sneg = s.neg();
+
+ let mut t1 = r;
+ t1.mul_assign(s);
+
+ let mut t2 = r;
+ t2.mul_assign(sneg);
+
+ let mut t3 = t1;
+ t3.add_assign(&t2);
+ assert!(bool::from(t3.is_identity()));
+
+ let mut t4 = t1;
+ t4.add_assign(&t2.to_affine());
+ assert!(bool::from(t4.is_identity()));
+
+ assert_eq!(t1.neg(), t2);
+ }
+}
+
+fn random_doubling_tests<G: PrimeCurve>() {
+ let mut rng = XorShiftRng::from_seed([
+ 0x59, 0x62, 0xbe, 0x5d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06, 0xbc,
+ 0xe5,
+ ]);
+
+ for _ in 0..1000 {
+ let mut a = G::random(&mut rng);
+ let mut b = G::random(&mut rng);
+
+ // 2(a + b)
+ let tmp1 = (a + b).double();
+
+ // 2a + 2b
+ a = a.double();
+ b = b.double();
+
+ let mut tmp2 = a;
+ tmp2.add_assign(&b);
+
+ let mut tmp3 = a;
+ tmp3.add_assign(&b.to_affine());
+
+ assert_eq!(tmp1, tmp2);
+ assert_eq!(tmp1, tmp3);
+ }
+}
+
+fn random_multiplication_tests<G: PrimeCurve>() {
+ let mut rng = XorShiftRng::from_seed([
+ 0x59, 0x62, 0xbe, 0x5d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06, 0xbc,
+ 0xe5,
+ ]);
+
+ for _ in 0..1000 {
+ let mut a = G::random(&mut rng);
+ let mut b = G::random(&mut rng);
+ let a_affine = a.to_affine();
+ let b_affine = b.to_affine();
+
+ let s = G::Scalar::random(&mut rng);
+
+ // s ( a + b )
+ let mut tmp1 = a;
+ tmp1.add_assign(&b);
+ tmp1.mul_assign(s);
+
+ // sa + sb
+ a.mul_assign(s);
+ b.mul_assign(s);
+
+ let mut tmp2 = a;
+ tmp2.add_assign(&b);
+
+ // Affine multiplication
+ let mut tmp3 = Mul::<G::Scalar>::mul(a_affine, s);
+ tmp3.add_assign(Mul::<G::Scalar>::mul(b_affine, s));
+
+ assert_eq!(tmp1, tmp2);
+ assert_eq!(tmp1, tmp3);
+ }
+}
+
+fn random_addition_tests<G: PrimeCurve>() {
+ let mut rng = XorShiftRng::from_seed([
+ 0x59, 0x62, 0xbe, 0x5d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06, 0xbc,
+ 0xe5,
+ ]);
+
+ for _ in 0..1000 {
+ let a = G::random(&mut rng);
+ let b = G::random(&mut rng);
+ let c = G::random(&mut rng);
+ let a_affine = a.to_affine();
+ let b_affine = b.to_affine();
+ let c_affine = c.to_affine();
+
+ // a + a should equal the doubling
+ {
+ let mut aplusa = a;
+ aplusa.add_assign(&a);
+
+ let mut aplusamixed = a;
+ aplusamixed.add_assign(&a.to_affine());
+
+ let adouble = a.double();
+
+ assert_eq!(aplusa, adouble);
+ assert_eq!(aplusa, aplusamixed);
+ }
+
+ let mut tmp = vec![G::identity(); 6];
+
+ // (a + b) + c
+ tmp[0] = a;
+ tmp[0].add_assign(&b);
+ tmp[0].add_assign(&c);
+
+ // a + (b + c)
+ tmp[1] = b;
+ tmp[1].add_assign(&c);
+ tmp[1].add_assign(&a);
+
+ // (a + c) + b
+ tmp[2] = a;
+ tmp[2].add_assign(&c);
+ tmp[2].add_assign(&b);
+
+ // Mixed addition
+
+ // (a + b) + c
+ tmp[3] = a_affine.to_curve();
+ tmp[3].add_assign(&b_affine);
+ tmp[3].add_assign(&c_affine);
+
+ // a + (b + c)
+ tmp[4] = b_affine.to_curve();
+ tmp[4].add_assign(&c_affine);
+ tmp[4].add_assign(&a_affine);
+
+ // (a + c) + b
+ tmp[5] = a_affine.to_curve();
+ tmp[5].add_assign(&c_affine);
+ tmp[5].add_assign(&b_affine);
+
+ // Comparisons
+ for i in 0..6 {
+ for j in 0..6 {
+ assert_eq!(tmp[i], tmp[j]);
+ assert_eq!(tmp[i].to_affine(), tmp[j].to_affine());
+ }
+
+ assert!(tmp[i] != a);
+ assert!(tmp[i] != b);
+ assert!(tmp[i] != c);
+
+ assert!(a != tmp[i]);
+ assert!(b != tmp[i]);
+ assert!(c != tmp[i]);
+ }
+ }
+}
+
+fn random_transformation_tests<G: PrimeCurve>() {
+ let mut rng = XorShiftRng::from_seed([
+ 0x59, 0x62, 0xbe, 0x5d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06, 0xbc,
+ 0xe5,
+ ]);
+
+ for _ in 0..1000 {
+ let g = G::random(&mut rng);
+ let g_affine = g.to_affine();
+ let g_projective = g_affine.to_curve();
+ assert_eq!(g, g_projective);
+ }
+
+ // Batch normalization
+ for _ in 0..10 {
+ let mut v = (0..1000).map(|_| G::random(&mut rng)).collect::<Vec<_>>();
+
+ use rand::distributions::{Distribution, Uniform};
+ let between = Uniform::new(0, 1000);
+ // Sprinkle in some normalized points
+ for _ in 0..5 {
+ v[between.sample(&mut rng)] = G::identity();
+ }
+ for _ in 0..5 {
+ let s = between.sample(&mut rng);
+ v[s] = v[s].to_affine().to_curve();
+ }
+
+ let expected_v = v.iter().map(|v| v.to_affine()).collect::<Vec<_>>();
+
+ let mut normalized = vec![G::Affine::identity(); v.len()];
+ G::batch_normalize(&v, &mut normalized);
+
+ assert_eq!(normalized, expected_v);
+ }
+}
+
+fn random_compressed_encoding_tests<G: PrimeCurve>() {
+ let mut rng = XorShiftRng::from_seed([
+ 0x59, 0x62, 0xbe, 0x5d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06, 0xbc,
+ 0xe5,
+ ]);
+
+ assert_eq!(
+ G::Affine::from_bytes(&G::Affine::identity().to_bytes()).unwrap(),
+ G::Affine::identity()
+ );
+
+ for _ in 0..1000 {
+ let mut r = G::random(&mut rng).to_affine();
+
+ let compressed = r.to_bytes();
+ let de_compressed = G::Affine::from_bytes(&compressed).unwrap();
+ assert_eq!(de_compressed, r);
+
+ r = r.neg();
+
+ let compressed = r.to_bytes();
+ let de_compressed = G::Affine::from_bytes(&compressed).unwrap();
+ assert_eq!(de_compressed, r);
+ }
+}
+
+pub fn random_uncompressed_encoding_tests<G: PrimeCurve>()
+where
+ <G as PrimeCurve>::Affine: UncompressedEncoding,
+{
+ let mut rng = XorShiftRng::from_seed([
+ 0x59, 0x62, 0xbe, 0x5d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06, 0xbc,
+ 0xe5,
+ ]);
+
+ assert_eq!(
+ G::Affine::from_uncompressed(&G::Affine::identity().to_uncompressed()).unwrap(),
+ G::Affine::identity()
+ );
+
+ for _ in 0..1000 {
+ let r = G::random(&mut rng).to_affine();
+
+ let uncompressed = r.to_uncompressed();
+ let de_uncompressed = G::Affine::from_uncompressed(&uncompressed).unwrap();
+ assert_eq!(de_uncompressed, r);
+ }
+}
diff --git a/vendor/group/src/wnaf.rs b/vendor/group/src/wnaf.rs
new file mode 100644
index 0000000..175d676
--- /dev/null
+++ b/vendor/group/src/wnaf.rs
@@ -0,0 +1,506 @@
+use alloc::vec::Vec;
+use core::iter;
+use core::marker::PhantomData;
+use core::ops::Mul;
+
+use ff::PrimeField;
+
+use super::Group;
+
+/// Extension trait on a [`Group`] that provides helpers used by [`Wnaf`].
+pub trait WnafGroup: Group {
+ /// Recommends a wNAF window size given the number of scalars you intend to multiply
+ /// a base by. Always returns a number between 2 and 22, inclusive.
+ fn recommended_wnaf_for_num_scalars(num_scalars: usize) -> usize;
+}
+
+/// Replaces the contents of `table` with a w-NAF window table for the given window size.
+pub(crate) fn wnaf_table<G: Group>(table: &mut Vec<G>, mut base: G, window: usize) {
+ table.truncate(0);
+ table.reserve(1 << (window - 1));
+
+ let dbl = base.double();
+
+ for _ in 0..(1 << (window - 1)) {
+ table.push(base);
+ base.add_assign(&dbl);
+ }
+}
+
+/// This struct represents a view of a sequence of bytes as a sequence of
+/// `u64` limbs in little-endian byte order. It maintains a current index, and
+/// allows access to the limb at that index and the one following it. Bytes
+/// beyond the end of the original buffer are treated as zero.
+struct LimbBuffer<'a> {
+ buf: &'a [u8],
+ cur_idx: usize,
+ cur_limb: u64,
+ next_limb: u64,
+}
+
+impl<'a> LimbBuffer<'a> {
+ fn new(buf: &'a [u8]) -> Self {
+ let mut ret = Self {
+ buf,
+ cur_idx: 0,
+ cur_limb: 0,
+ next_limb: 0,
+ };
+
+ // Initialise the limb buffers.
+ ret.increment_limb();
+ ret.increment_limb();
+ ret.cur_idx = 0usize;
+
+ ret
+ }
+
+ fn increment_limb(&mut self) {
+ self.cur_idx += 1;
+ self.cur_limb = self.next_limb;
+ match self.buf.len() {
+ // There are no more bytes in the buffer; zero-extend.
+ 0 => self.next_limb = 0,
+
+ // There are fewer bytes in the buffer than a u64 limb; zero-extend.
+ x @ 1..=7 => {
+ let mut next_limb = [0; 8];
+ next_limb[..x].copy_from_slice(self.buf);
+ self.next_limb = u64::from_le_bytes(next_limb);
+ self.buf = &[];
+ }
+
+ // There are at least eight bytes in the buffer; read the next u64 limb.
+ _ => {
+ let (next_limb, rest) = self.buf.split_at(8);
+ self.next_limb = u64::from_le_bytes(next_limb.try_into().unwrap());
+ self.buf = rest;
+ }
+ }
+ }
+
+ fn get(&mut self, idx: usize) -> (u64, u64) {
+ assert!([self.cur_idx, self.cur_idx + 1].contains(&idx));
+ if idx > self.cur_idx {
+ self.increment_limb();
+ }
+ (self.cur_limb, self.next_limb)
+ }
+}
+
+/// Replaces the contents of `wnaf` with the w-NAF representation of a little-endian
+/// scalar.
+pub(crate) fn wnaf_form<S: AsRef<[u8]>>(wnaf: &mut Vec<i64>, c: S, window: usize) {
+ // Required by the NAF definition
+ debug_assert!(window >= 2);
+ // Required so that the NAF digits fit in i64
+ debug_assert!(window <= 64);
+
+ let bit_len = c.as_ref().len() * 8;
+
+ wnaf.truncate(0);
+ wnaf.reserve(bit_len);
+
+ // Initialise the current and next limb buffers.
+ let mut limbs = LimbBuffer::new(c.as_ref());
+
+ let width = 1u64 << window;
+ let window_mask = width - 1;
+
+ let mut pos = 0;
+ let mut carry = 0;
+ while pos < bit_len {
+ // Construct a buffer of bits of the scalar, starting at bit `pos`
+ let u64_idx = pos / 64;
+ let bit_idx = pos % 64;
+ let (cur_u64, next_u64) = limbs.get(u64_idx);
+ let bit_buf = if bit_idx + window < 64 {
+ // This window's bits are contained in a single u64
+ cur_u64 >> bit_idx
+ } else {
+ // Combine the current u64's bits with the bits from the next u64
+ (cur_u64 >> bit_idx) | (next_u64 << (64 - bit_idx))
+ };
+
+ // Add the carry into the current window
+ let window_val = carry + (bit_buf & window_mask);
+
+ if window_val & 1 == 0 {
+ // If the window value is even, preserve the carry and emit 0.
+ // Why is the carry preserved?
+ // If carry == 0 and window_val & 1 == 0, then the next carry should be 0
+ // If carry == 1 and window_val & 1 == 0, then bit_buf & 1 == 1 so the next carry should be 1
+ wnaf.push(0);
+ pos += 1;
+ } else {
+ wnaf.push(if window_val < width / 2 {
+ carry = 0;
+ window_val as i64
+ } else {
+ carry = 1;
+ (window_val as i64).wrapping_sub(width as i64)
+ });
+ wnaf.extend(iter::repeat(0).take(window - 1));
+ pos += window;
+ }
+ }
+}
+
+/// Performs w-NAF exponentiation with the provided window table and w-NAF form scalar.
+///
+/// This function must be provided a `table` and `wnaf` that were constructed with
+/// the same window size; otherwise, it may panic or produce invalid results.
+pub(crate) fn wnaf_exp<G: Group>(table: &[G], wnaf: &[i64]) -> G {
+ let mut result = G::identity();
+
+ let mut found_one = false;
+
+ for n in wnaf.iter().rev() {
+ if found_one {
+ result = result.double();
+ }
+
+ if *n != 0 {
+ found_one = true;
+
+ if *n > 0 {
+ result += &table[(n / 2) as usize];
+ } else {
+ result -= &table[((-n) / 2) as usize];
+ }
+ }
+ }
+
+ result
+}
+
+/// A "w-ary non-adjacent form" scalar multiplication (also known as exponentiation)
+/// context.
+///
+/// # Examples
+///
+/// This struct can be used to implement several patterns:
+///
+/// ## One base, one scalar
+///
+/// For this pattern, you can use a transient `Wnaf` context:
+///
+/// ```ignore
+/// use group::Wnaf;
+///
+/// let result = Wnaf::new().scalar(&scalar).base(base);
+/// ```
+///
+/// ## Many bases, one scalar
+///
+/// For this pattern, you create a `Wnaf` context, load the scalar into it, and then
+/// process each base in turn:
+///
+/// ```ignore
+/// use group::Wnaf;
+///
+/// let mut wnaf = Wnaf::new();
+/// let mut wnaf_scalar = wnaf.scalar(&scalar);
+/// let results: Vec<_> = bases
+/// .into_iter()
+/// .map(|base| wnaf_scalar.base(base))
+/// .collect();
+/// ```
+///
+/// ## One base, many scalars
+///
+/// For this pattern, you create a `Wnaf` context, load the base into it, and then process
+/// each scalar in turn:
+///
+/// ```ignore
+/// use group::Wnaf;
+///
+/// let mut wnaf = Wnaf::new();
+/// let mut wnaf_base = wnaf.base(base, scalars.len());
+/// let results: Vec<_> = scalars
+/// .iter()
+/// .map(|scalar| wnaf_base.scalar(scalar))
+/// .collect();
+/// ```
+///
+/// ## Many bases, many scalars
+///
+/// Say you have `n` bases and `m` scalars, and want to produce `n * m` results. For this
+/// pattern, you need to cache the w-NAF tables for the bases and then compute the w-NAF
+/// form of the scalars on the fly for every base, or vice versa:
+///
+/// ```ignore
+/// use group::Wnaf;
+///
+/// let mut wnaf_contexts: Vec<_> = (0..bases.len()).map(|_| Wnaf::new()).collect();
+/// let mut wnaf_bases: Vec<_> = wnaf_contexts
+/// .iter_mut()
+/// .zip(bases)
+/// .map(|(wnaf, base)| wnaf.base(base, scalars.len()))
+/// .collect();
+/// let results: Vec<_> = wnaf_bases
+/// .iter()
+/// .flat_map(|wnaf_base| scalars.iter().map(|scalar| wnaf_base.scalar(scalar)))
+/// .collect();
+/// ```
+///
+/// Alternatively, use the [`WnafBase`] and [`WnafScalar`] types, which enable the various
+/// tables and w-NAF forms to be cached individually per base and scalar. These types can
+/// then be directly multiplied without any additional runtime work, at the cost of fixing
+/// a specific window size (rather than choosing the window size dynamically).
+#[derive(Debug)]
+pub struct Wnaf<W, B, S> {
+ base: B,
+ scalar: S,
+ window_size: W,
+}
+
+impl<G: Group> Wnaf<(), Vec<G>, Vec<i64>> {
+ /// Construct a new wNAF context without allocating.
+ pub fn new() -> Self {
+ Wnaf {
+ base: vec![],
+ scalar: vec![],
+ window_size: (),
+ }
+ }
+}
+
+#[cfg(feature = "wnaf-memuse")]
+impl<G: Group + memuse::DynamicUsage> memuse::DynamicUsage for Wnaf<(), Vec<G>, Vec<i64>> {
+ fn dynamic_usage(&self) -> usize {
+ self.base.dynamic_usage() + self.scalar.dynamic_usage()
+ }
+
+ fn dynamic_usage_bounds(&self) -> (usize, Option<usize>) {
+ let (base_lower, base_upper) = self.base.dynamic_usage_bounds();
+ let (scalar_lower, scalar_upper) = self.scalar.dynamic_usage_bounds();
+
+ (
+ base_lower + scalar_lower,
+ base_upper.zip(scalar_upper).map(|(a, b)| a + b),
+ )
+ }
+}
+
+impl<G: WnafGroup> Wnaf<(), Vec<G>, Vec<i64>> {
+ /// Given a base and a number of scalars, compute a window table and return a `Wnaf` object that
+ /// can perform exponentiations with `.scalar(..)`.
+ pub fn base(&mut self, base: G, num_scalars: usize) -> Wnaf<usize, &[G], &mut Vec<i64>> {
+ // Compute the appropriate window size based on the number of scalars.
+ let window_size = G::recommended_wnaf_for_num_scalars(num_scalars);
+
+ // Compute a wNAF table for the provided base and window size.
+ wnaf_table(&mut self.base, base, window_size);
+
+ // Return a Wnaf object that immutably borrows the computed base storage location,
+ // but mutably borrows the scalar storage location.
+ Wnaf {
+ base: &self.base[..],
+ scalar: &mut self.scalar,
+ window_size,
+ }
+ }
+
+ /// Given a scalar, compute its wNAF representation and return a `Wnaf` object that can perform
+ /// exponentiations with `.base(..)`.
+ pub fn scalar(&mut self, scalar: &<G as Group>::Scalar) -> Wnaf<usize, &mut Vec<G>, &[i64]> {
+ // We hard-code a window size of 4.
+ let window_size = 4;
+
+ // Compute the wNAF form of the scalar.
+ wnaf_form(&mut self.scalar, scalar.to_repr(), window_size);
+
+ // Return a Wnaf object that mutably borrows the base storage location, but
+ // immutably borrows the computed wNAF form scalar location.
+ Wnaf {
+ base: &mut self.base,
+ scalar: &self.scalar[..],
+ window_size,
+ }
+ }
+}
+
+impl<'a, G: Group> Wnaf<usize, &'a [G], &'a mut Vec<i64>> {
+ /// Constructs new space for the scalar representation while borrowing
+ /// the computed window table, for sending the window table across threads.
+ pub fn shared(&self) -> Wnaf<usize, &'a [G], Vec<i64>> {
+ Wnaf {
+ base: self.base,
+ scalar: vec![],
+ window_size: self.window_size,
+ }
+ }
+}
+
+#[cfg(feature = "wnaf-memuse")]
+impl<'a, G: Group> memuse::DynamicUsage for Wnaf<usize, &'a [G], Vec<i64>> {
+ fn dynamic_usage(&self) -> usize {
+ // The heap memory for the window table is counted in the parent `Wnaf`.
+ self.scalar.dynamic_usage()
+ }
+
+ fn dynamic_usage_bounds(&self) -> (usize, Option<usize>) {
+ self.scalar.dynamic_usage_bounds()
+ }
+}
+
+impl<'a, G: Group> Wnaf<usize, &'a mut Vec<G>, &'a [i64]> {
+ /// Constructs new space for the window table while borrowing
+ /// the computed scalar representation, for sending the scalar representation
+ /// across threads.
+ pub fn shared(&self) -> Wnaf<usize, Vec<G>, &'a [i64]> {
+ Wnaf {
+ base: vec![],
+ scalar: self.scalar,
+ window_size: self.window_size,
+ }
+ }
+}
+
+#[cfg(feature = "wnaf-memuse")]
+impl<'a, G: Group + memuse::DynamicUsage> memuse::DynamicUsage for Wnaf<usize, Vec<G>, &'a [i64]> {
+ fn dynamic_usage(&self) -> usize {
+ // The heap memory for the scalar representation is counted in the parent `Wnaf`.
+ self.base.dynamic_usage()
+ }
+
+ fn dynamic_usage_bounds(&self) -> (usize, Option<usize>) {
+ self.base.dynamic_usage_bounds()
+ }
+}
+
+impl<B, S: AsRef<[i64]>> Wnaf<usize, B, S> {
+ /// Performs exponentiation given a base.
+ pub fn base<G: Group>(&mut self, base: G) -> G
+ where
+ B: AsMut<Vec<G>>,
+ {
+ wnaf_table(self.base.as_mut(), base, self.window_size);
+ wnaf_exp(self.base.as_mut(), self.scalar.as_ref())
+ }
+}
+
+impl<B, S: AsMut<Vec<i64>>> Wnaf<usize, B, S> {
+ /// Performs exponentiation given a scalar.
+ pub fn scalar<G: Group>(&mut self, scalar: &<G as Group>::Scalar) -> G
+ where
+ B: AsRef<[G]>,
+ {
+ wnaf_form(self.scalar.as_mut(), scalar.to_repr(), self.window_size);
+ wnaf_exp(self.base.as_ref(), self.scalar.as_mut())
+ }
+}
+
+/// A "w-ary non-adjacent form" scalar, that uses precomputation to improve the speed of
+/// scalar multiplication.
+///
+/// # Examples
+///
+/// See [`WnafBase`] for usage examples.
+#[derive(Clone, Debug)]
+pub struct WnafScalar<F: PrimeField, const WINDOW_SIZE: usize> {
+ wnaf: Vec<i64>,
+ field: PhantomData<F>,
+}
+
+#[cfg(feature = "wnaf-memuse")]
+impl<F: PrimeField, const WINDOW_SIZE: usize> memuse::DynamicUsage for WnafScalar<F, WINDOW_SIZE> {
+ fn dynamic_usage(&self) -> usize {
+ self.wnaf.dynamic_usage()
+ }
+
+ fn dynamic_usage_bounds(&self) -> (usize, Option<usize>) {
+ self.wnaf.dynamic_usage_bounds()
+ }
+}
+
+impl<F: PrimeField, const WINDOW_SIZE: usize> WnafScalar<F, WINDOW_SIZE> {
+ /// Computes the w-NAF representation of the given scalar with the specified
+ /// `WINDOW_SIZE`.
+ pub fn new(scalar: &F) -> Self {
+ let mut wnaf = vec![];
+
+ // Compute the w-NAF form of the scalar.
+ wnaf_form(&mut wnaf, scalar.to_repr(), WINDOW_SIZE);
+
+ WnafScalar {
+ wnaf,
+ field: PhantomData::default(),
+ }
+ }
+}
+
+/// A fixed window table for a group element, precomputed to improve the speed of scalar
+/// multiplication.
+///
+/// This struct is designed for usage patterns that have long-term cached bases and/or
+/// scalars, or [Cartesian products] of bases and scalars. The [`Wnaf`] API enables one or
+/// the other to be cached, but requires either the base window tables or the scalar w-NAF
+/// forms to be computed repeatedly on the fly, which can become a significant performance
+/// issue for some use cases.
+///
+/// `WnafBase` and [`WnafScalar`] enable an alternative trade-off: by fixing the window
+/// size at compile time, the precomputations are guaranteed to only occur once per base
+/// and once per scalar. Users should select their window size based on how long the bases
+/// are expected to live; a larger window size will consume more memory and take longer to
+/// precompute, but result in faster scalar multiplications.
+///
+/// [Cartesian products]: https://en.wikipedia.org/wiki/Cartesian_product
+///
+/// # Examples
+///
+/// ```ignore
+/// use group::{WnafBase, WnafScalar};
+///
+/// let wnaf_bases: Vec<_> = bases.into_iter().map(WnafBase::<_, 4>::new).collect();
+/// let wnaf_scalars: Vec<_> = scalars.iter().map(WnafScalar::new).collect();
+/// let results: Vec<_> = wnaf_bases
+/// .iter()
+/// .flat_map(|base| wnaf_scalars.iter().map(|scalar| base * scalar))
+/// .collect();
+/// ```
+///
+/// Note that this pattern requires specifying a fixed window size (unlike previous
+/// patterns that picked a suitable window size internally). This is necessary to ensure
+/// in the type system that the base and scalar `Wnaf`s were computed with the same window
+/// size, allowing the result to be computed infallibly.
+#[derive(Clone, Debug)]
+pub struct WnafBase<G: Group, const WINDOW_SIZE: usize> {
+ table: Vec<G>,
+}
+
+#[cfg(feature = "wnaf-memuse")]
+impl<G: Group + memuse::DynamicUsage, const WINDOW_SIZE: usize> memuse::DynamicUsage
+ for WnafBase<G, WINDOW_SIZE>
+{
+ fn dynamic_usage(&self) -> usize {
+ self.table.dynamic_usage()
+ }
+
+ fn dynamic_usage_bounds(&self) -> (usize, Option<usize>) {
+ self.table.dynamic_usage_bounds()
+ }
+}
+
+impl<G: Group, const WINDOW_SIZE: usize> WnafBase<G, WINDOW_SIZE> {
+ /// Computes a window table for the given base with the specified `WINDOW_SIZE`.
+ pub fn new(base: G) -> Self {
+ let mut table = vec![];
+
+ // Compute a window table for the provided base and window size.
+ wnaf_table(&mut table, base, WINDOW_SIZE);
+
+ WnafBase { table }
+ }
+}
+
+impl<G: Group, const WINDOW_SIZE: usize> Mul<&WnafScalar<G::Scalar, WINDOW_SIZE>>
+ for &WnafBase<G, WINDOW_SIZE>
+{
+ type Output = G;
+
+ fn mul(self, rhs: &WnafScalar<G::Scalar, WINDOW_SIZE>) -> Self::Output {
+ wnaf_exp(&self.table, &rhs.wnaf)
+ }
+}