summaryrefslogtreecommitdiffstats
path: root/vendor/rand_core/src
diff options
context:
space:
mode:
Diffstat (limited to 'vendor/rand_core/src')
-rw-r--r--vendor/rand_core/src/block.rs134
-rw-r--r--vendor/rand_core/src/error.rs2
-rw-r--r--vendor/rand_core/src/impls.rs83
-rw-r--r--vendor/rand_core/src/lib.rs46
4 files changed, 215 insertions, 50 deletions
diff --git a/vendor/rand_core/src/block.rs b/vendor/rand_core/src/block.rs
index 005d071fb..d311b68cf 100644
--- a/vendor/rand_core/src/block.rs
+++ b/vendor/rand_core/src/block.rs
@@ -95,7 +95,7 @@ pub trait BlockRngCore {
/// [`fill_bytes`] / [`try_fill_bytes`] is called on a large array. These methods
/// also handle the bookkeeping of when to generate a new batch of values.
///
-/// No whole generated `u32` values are thown away and all values are consumed
+/// No whole generated `u32` values are thrown away and all values are consumed
/// in-order. [`next_u32`] simply takes the next available `u32` value.
/// [`next_u64`] is implemented by combining two `u32` values, least
/// significant first. [`fill_bytes`] and [`try_fill_bytes`] consume a whole
@@ -114,6 +114,12 @@ pub trait BlockRngCore {
/// [`try_fill_bytes`]: RngCore::try_fill_bytes
#[derive(Clone)]
#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
+#[cfg_attr(
+ feature = "serde1",
+ serde(
+ bound = "for<'x> R: Serialize + Deserialize<'x> + Sized, for<'x> R::Results: Serialize + Deserialize<'x>"
+ )
+)]
pub struct BlockRng<R: BlockRngCore + ?Sized> {
results: R::Results,
index: usize,
@@ -346,27 +352,21 @@ where
{
#[inline]
fn next_u32(&mut self) -> u32 {
- let mut index = self.index * 2 - self.half_used as usize;
- if index >= self.results.as_ref().len() * 2 {
+ let mut index = self.index - self.half_used as usize;
+ if index >= self.results.as_ref().len() {
self.core.generate(&mut self.results);
self.index = 0;
+ index = 0;
// `self.half_used` is by definition `false`
self.half_used = false;
- index = 0;
}
+ let shift = 32 * (self.half_used as usize);
+
self.half_used = !self.half_used;
self.index += self.half_used as usize;
- // Index as if this is a u32 slice.
- unsafe {
- let results = &*(self.results.as_ref() as *const [u64] as *const [u32]);
- if cfg!(target_endian = "little") {
- *results.get_unchecked(index)
- } else {
- *results.get_unchecked(index ^ 1)
- }
- }
+ (self.results.as_ref()[index] >> shift) as u32
}
#[inline]
@@ -429,3 +429,111 @@ impl<R: BlockRngCore + SeedableRng> SeedableRng for BlockRng64<R> {
}
impl<R: BlockRngCore + CryptoRng> CryptoRng for BlockRng<R> {}
+
+#[cfg(test)]
+mod test {
+ use crate::{SeedableRng, RngCore};
+ use crate::block::{BlockRng, BlockRng64, BlockRngCore};
+
+ #[derive(Debug, Clone)]
+ struct DummyRng {
+ counter: u32,
+ }
+
+ impl BlockRngCore for DummyRng {
+ type Item = u32;
+
+ type Results = [u32; 16];
+
+ fn generate(&mut self, results: &mut Self::Results) {
+ for r in results {
+ *r = self.counter;
+ self.counter = self.counter.wrapping_add(3511615421);
+ }
+ }
+ }
+
+ impl SeedableRng for DummyRng {
+ type Seed = [u8; 4];
+
+ fn from_seed(seed: Self::Seed) -> Self {
+ DummyRng { counter: u32::from_le_bytes(seed) }
+ }
+ }
+
+ #[test]
+ fn blockrng_next_u32_vs_next_u64() {
+ let mut rng1 = BlockRng::<DummyRng>::from_seed([1, 2, 3, 4]);
+ let mut rng2 = rng1.clone();
+ let mut rng3 = rng1.clone();
+
+ let mut a = [0; 16];
+ (&mut a[..4]).copy_from_slice(&rng1.next_u32().to_le_bytes());
+ (&mut a[4..12]).copy_from_slice(&rng1.next_u64().to_le_bytes());
+ (&mut a[12..]).copy_from_slice(&rng1.next_u32().to_le_bytes());
+
+ let mut b = [0; 16];
+ (&mut b[..4]).copy_from_slice(&rng2.next_u32().to_le_bytes());
+ (&mut b[4..8]).copy_from_slice(&rng2.next_u32().to_le_bytes());
+ (&mut b[8..]).copy_from_slice(&rng2.next_u64().to_le_bytes());
+ assert_eq!(a, b);
+
+ let mut c = [0; 16];
+ (&mut c[..8]).copy_from_slice(&rng3.next_u64().to_le_bytes());
+ (&mut c[8..12]).copy_from_slice(&rng3.next_u32().to_le_bytes());
+ (&mut c[12..]).copy_from_slice(&rng3.next_u32().to_le_bytes());
+ assert_eq!(a, c);
+ }
+
+ #[derive(Debug, Clone)]
+ struct DummyRng64 {
+ counter: u64,
+ }
+
+ impl BlockRngCore for DummyRng64 {
+ type Item = u64;
+
+ type Results = [u64; 8];
+
+ fn generate(&mut self, results: &mut Self::Results) {
+ for r in results {
+ *r = self.counter;
+ self.counter = self.counter.wrapping_add(2781463553396133981);
+ }
+ }
+ }
+
+ impl SeedableRng for DummyRng64 {
+ type Seed = [u8; 8];
+
+ fn from_seed(seed: Self::Seed) -> Self {
+ DummyRng64 { counter: u64::from_le_bytes(seed) }
+ }
+ }
+
+ #[test]
+ fn blockrng64_next_u32_vs_next_u64() {
+ let mut rng1 = BlockRng64::<DummyRng64>::from_seed([1, 2, 3, 4, 5, 6, 7, 8]);
+ let mut rng2 = rng1.clone();
+ let mut rng3 = rng1.clone();
+
+ let mut a = [0; 16];
+ (&mut a[..4]).copy_from_slice(&rng1.next_u32().to_le_bytes());
+ (&mut a[4..12]).copy_from_slice(&rng1.next_u64().to_le_bytes());
+ (&mut a[12..]).copy_from_slice(&rng1.next_u32().to_le_bytes());
+
+ let mut b = [0; 16];
+ (&mut b[..4]).copy_from_slice(&rng2.next_u32().to_le_bytes());
+ (&mut b[4..8]).copy_from_slice(&rng2.next_u32().to_le_bytes());
+ (&mut b[8..]).copy_from_slice(&rng2.next_u64().to_le_bytes());
+ assert_ne!(a, b);
+ assert_eq!(&a[..4], &b[..4]);
+ assert_eq!(&a[4..12], &b[8..]);
+
+ let mut c = [0; 16];
+ (&mut c[..8]).copy_from_slice(&rng3.next_u64().to_le_bytes());
+ (&mut c[8..12]).copy_from_slice(&rng3.next_u32().to_le_bytes());
+ (&mut c[12..]).copy_from_slice(&rng3.next_u32().to_le_bytes());
+ assert_eq!(b, c);
+ }
+}
diff --git a/vendor/rand_core/src/error.rs b/vendor/rand_core/src/error.rs
index a64c430da..411896f2c 100644
--- a/vendor/rand_core/src/error.rs
+++ b/vendor/rand_core/src/error.rs
@@ -82,7 +82,7 @@ impl Error {
///
/// This method is identical to `std::io::Error::raw_os_error()`, except
/// that it works in `no_std` contexts. If this method returns `None`, the
- /// error value can still be formatted via the `Diplay` implementation.
+ /// error value can still be formatted via the `Display` implementation.
#[inline]
pub fn raw_os_error(&self) -> Option<i32> {
#[cfg(feature = "std")]
diff --git a/vendor/rand_core/src/impls.rs b/vendor/rand_core/src/impls.rs
index 2588a72ea..4b7688c5c 100644
--- a/vendor/rand_core/src/impls.rs
+++ b/vendor/rand_core/src/impls.rs
@@ -52,36 +52,59 @@ pub fn fill_bytes_via_next<R: RngCore + ?Sized>(rng: &mut R, dest: &mut [u8]) {
}
}
-macro_rules! fill_via_chunks {
- ($src:expr, $dst:expr, $ty:ty) => {{
- const SIZE: usize = core::mem::size_of::<$ty>();
- let chunk_size_u8 = min($src.len() * SIZE, $dst.len());
- let chunk_size = (chunk_size_u8 + SIZE - 1) / SIZE;
-
- // The following can be replaced with safe code, but unfortunately it's
- // ca. 8% slower.
- if cfg!(target_endian = "little") {
- unsafe {
- core::ptr::copy_nonoverlapping(
- $src.as_ptr() as *const u8,
- $dst.as_mut_ptr(),
- chunk_size_u8);
- }
- } else {
- for (&n, chunk) in $src.iter().zip($dst.chunks_mut(SIZE)) {
- let tmp = n.to_le();
- let src_ptr = &tmp as *const $ty as *const u8;
- unsafe {
- core::ptr::copy_nonoverlapping(
- src_ptr,
- chunk.as_mut_ptr(),
- chunk.len());
- }
- }
+trait Observable: Copy {
+ type Bytes: AsRef<[u8]>;
+ fn to_le_bytes(self) -> Self::Bytes;
+
+ // Contract: observing self is memory-safe (implies no uninitialised padding)
+ fn as_byte_slice(x: &[Self]) -> &[u8];
+}
+impl Observable for u32 {
+ type Bytes = [u8; 4];
+ fn to_le_bytes(self) -> Self::Bytes {
+ self.to_le_bytes()
+ }
+ fn as_byte_slice(x: &[Self]) -> &[u8] {
+ let ptr = x.as_ptr() as *const u8;
+ let len = x.len() * core::mem::size_of::<Self>();
+ unsafe { core::slice::from_raw_parts(ptr, len) }
+ }
+}
+impl Observable for u64 {
+ type Bytes = [u8; 8];
+ fn to_le_bytes(self) -> Self::Bytes {
+ self.to_le_bytes()
+ }
+ fn as_byte_slice(x: &[Self]) -> &[u8] {
+ let ptr = x.as_ptr() as *const u8;
+ let len = x.len() * core::mem::size_of::<Self>();
+ unsafe { core::slice::from_raw_parts(ptr, len) }
+ }
+}
+
+fn fill_via_chunks<T: Observable>(src: &[T], dest: &mut [u8]) -> (usize, usize) {
+ let size = core::mem::size_of::<T>();
+ let byte_len = min(src.len() * size, dest.len());
+ let num_chunks = (byte_len + size - 1) / size;
+
+ if cfg!(target_endian = "little") {
+ // On LE we can do a simple copy, which is 25-50% faster:
+ dest[..byte_len].copy_from_slice(&T::as_byte_slice(&src[..num_chunks])[..byte_len]);
+ } else {
+ // This code is valid on all arches, but slower than the above:
+ let mut i = 0;
+ let mut iter = dest[..byte_len].chunks_exact_mut(size);
+ for chunk in &mut iter {
+ chunk.copy_from_slice(src[i].to_le_bytes().as_ref());
+ i += 1;
}
+ let chunk = iter.into_remainder();
+ if !chunk.is_empty() {
+ chunk.copy_from_slice(&src[i].to_le_bytes().as_ref()[..chunk.len()]);
+ }
+ }
- (chunk_size, chunk_size_u8)
- }};
+ (num_chunks, byte_len)
}
/// Implement `fill_bytes` by reading chunks from the output buffer of a block
@@ -115,7 +138,7 @@ macro_rules! fill_via_chunks {
/// }
/// ```
pub fn fill_via_u32_chunks(src: &[u32], dest: &mut [u8]) -> (usize, usize) {
- fill_via_chunks!(src, dest, u32)
+ fill_via_chunks(src, dest)
}
/// Implement `fill_bytes` by reading chunks from the output buffer of a block
@@ -129,7 +152,7 @@ pub fn fill_via_u32_chunks(src: &[u32], dest: &mut [u8]) -> (usize, usize) {
///
/// See `fill_via_u32_chunks` for an example.
pub fn fill_via_u64_chunks(src: &[u64], dest: &mut [u8]) -> (usize, usize) {
- fill_via_chunks!(src, dest, u64)
+ fill_via_chunks(src, dest)
}
/// Implement `next_u32` via `fill_bytes`, little-endian order.
diff --git a/vendor/rand_core/src/lib.rs b/vendor/rand_core/src/lib.rs
index 7e847ae49..1234a566c 100644
--- a/vendor/rand_core/src/lib.rs
+++ b/vendor/rand_core/src/lib.rs
@@ -76,12 +76,17 @@ pub mod le;
/// [`next_u32`] or [`next_u64`] since the latter methods are almost always used
/// with algorithmic generators (PRNGs), which are normally infallible.
///
+/// Implementers should produce bits uniformly. Pathological RNGs (e.g. always
+/// returning the same value, or never setting certain bits) can break rejection
+/// sampling used by random distributions, and also break other RNGs when
+/// seeding them via [`SeedableRng::from_rng`].
+///
/// Algorithmic generators implementing [`SeedableRng`] should normally have
/// *portable, reproducible* output, i.e. fix Endianness when converting values
/// to avoid platform differences, and avoid making any changes which affect
/// output (except by communicating that the release has breaking changes).
///
-/// Typically implementators will implement only one of the methods available
+/// Typically an RNG will implement only one of the methods available
/// in this trait directly, then use the helper functions from the
/// [`impls`] module to implement the other methods.
///
@@ -191,7 +196,7 @@ pub trait RngCore {
/// Some generators may satisfy an additional property, however this is not
/// required by this trait: if the CSPRNG's state is revealed, it should not be
/// computationally-feasible to reconstruct output prior to this. Some other
-/// generators allow backwards-computation and are consided *reversible*.
+/// generators allow backwards-computation and are considered *reversible*.
///
/// Note that this trait is provided for guidance only and cannot guarantee
/// suitability for cryptographic applications. In general it should only be
@@ -203,6 +208,35 @@ pub trait RngCore {
/// [`BlockRngCore`]: block::BlockRngCore
pub trait CryptoRng {}
+/// An extension trait that is automatically implemented for any type
+/// implementing [`RngCore`] and [`CryptoRng`].
+///
+/// It may be used as a trait object, and supports upcasting to [`RngCore`] via
+/// the [`CryptoRngCore::as_rngcore`] method.
+///
+/// # Example
+///
+/// ```
+/// use rand_core::CryptoRngCore;
+///
+/// #[allow(unused)]
+/// fn make_token(rng: &mut dyn CryptoRngCore) -> [u8; 32] {
+/// let mut buf = [0u8; 32];
+/// rng.fill_bytes(&mut buf);
+/// buf
+/// }
+/// ```
+pub trait CryptoRngCore: CryptoRng + RngCore {
+ /// Upcast to an [`RngCore`] trait object.
+ fn as_rngcore(&mut self) -> &mut dyn RngCore;
+}
+
+impl<T: CryptoRng + RngCore> CryptoRngCore for T {
+ fn as_rngcore(&mut self) -> &mut dyn RngCore {
+ self
+ }
+}
+
/// A random number generator that can be explicitly seeded.
///
/// This trait encapsulates the low-level functionality common to all
@@ -210,7 +244,7 @@ pub trait CryptoRng {}
///
/// [`rand`]: https://docs.rs/rand
pub trait SeedableRng: Sized {
- /// Seed type, which is restricted to types mutably-dereferencable as `u8`
+ /// Seed type, which is restricted to types mutably-dereferenceable as `u8`
/// arrays (we recommend `[u8; N]` for some `N`).
///
/// It is recommended to seed PRNGs with a seed of at least circa 100 bits,
@@ -443,10 +477,10 @@ impl std::io::Read for dyn RngCore {
}
}
-// Implement `CryptoRng` for references to an `CryptoRng`.
+// Implement `CryptoRng` for references to a `CryptoRng`.
impl<'a, R: CryptoRng + ?Sized> CryptoRng for &'a mut R {}
-// Implement `CryptoRng` for boxed references to an `CryptoRng`.
+// Implement `CryptoRng` for boxed references to a `CryptoRng`.
#[cfg(feature = "alloc")]
impl<R: CryptoRng + ?Sized> CryptoRng for Box<R> {}
@@ -480,7 +514,7 @@ mod test {
// This is the binomial distribution B(64, 0.5), so chance of
// weight < 20 is binocdf(19, 64, 0.5) = 7.8e-4, and same for
// weight > 44.
- assert!(weight >= 20 && weight <= 44);
+ assert!((20..=44).contains(&weight));
for (i2, r2) in results.iter().enumerate() {
if i1 == i2 {