summaryrefslogtreecommitdiffstats
path: root/vendor/rand_core
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-17 12:19:13 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-17 12:19:13 +0000
commit218caa410aa38c29984be31a5229b9fa717560ee (patch)
treec54bd55eeb6e4c508940a30e94c0032fbd45d677 /vendor/rand_core
parentReleasing progress-linux version 1.67.1+dfsg1-1~progress7.99u1. (diff)
downloadrustc-218caa410aa38c29984be31a5229b9fa717560ee.tar.xz
rustc-218caa410aa38c29984be31a5229b9fa717560ee.zip
Merging upstream version 1.68.2+dfsg1.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'vendor/rand_core')
-rw-r--r--vendor/rand_core/.cargo-checksum.json2
-rw-r--r--vendor/rand_core/CHANGELOG.md10
-rw-r--r--vendor/rand_core/Cargo.toml43
-rw-r--r--vendor/rand_core/LICENSE-APACHE14
-rw-r--r--vendor/rand_core/src/block.rs134
-rw-r--r--vendor/rand_core/src/error.rs2
-rw-r--r--vendor/rand_core/src/impls.rs83
-rw-r--r--vendor/rand_core/src/lib.rs46
8 files changed, 257 insertions, 77 deletions
diff --git a/vendor/rand_core/.cargo-checksum.json b/vendor/rand_core/.cargo-checksum.json
index 140f5b6ca..067463e3e 100644
--- a/vendor/rand_core/.cargo-checksum.json
+++ b/vendor/rand_core/.cargo-checksum.json
@@ -1 +1 @@
-{"files":{"CHANGELOG.md":"bf45fe5169dfbec63635b1fc4b6d51f3ec06fbc034ede0791c16a5878db2444f","COPYRIGHT":"90eb64f0279b0d9432accfa6023ff803bc4965212383697eee27a0f426d5f8d5","Cargo.toml":"a319b64f513a62f2250165a4f4d578d04b6af127ce067328f0a9373b0ff5a1aa","LICENSE-APACHE":"aaff376532ea30a0cd5330b9502ad4a4c8bf769c539c87ffe78819d188a18ebf","LICENSE-MIT":"209fbbe0ad52d9235e37badf9cadfe4dbdc87203179c0899e738b39ade42177b","README.md":"bb3bd3831adc9eaabbcea108ab7f02f5837e9d2f81e872ffd7d340ad466df4de","src/block.rs":"09c479be71069f757e7a03d20078d26ac6acfe922a3207359a9c6ff48ccbcc65","src/error.rs":"0b6d5a188a256fa367dfa368e7dd846b58977a957489c03902307eb78ce4c9e8","src/impls.rs":"d6a97255d92c06bdd1a54590648bbe4cfb41111ac9e761496baf6b7eb5e92f6a","src/le.rs":"f302239d09cc8d915aa8d4fe46c32c8981900cb20d42b12eef9c34e2e820bc88","src/lib.rs":"40476377ed3da9e3589f058aaf8ec27ffeb12b0b179240a31259dc7f816fce5f","src/os.rs":"47849479e19c43dd01259eb14be7b4daf8474d23567dc32a1547c10b108b7069"},"package":"34cf66eb183df1c5876e2dcf6b13d57340741e8dc255b48e40a26de954d06ae7"} \ No newline at end of file
+{"files":{"CHANGELOG.md":"a33a4318dd822ef757ec79c5e6027d50d9feee5c40f85d9aadf0f598eb7ef1b7","COPYRIGHT":"90eb64f0279b0d9432accfa6023ff803bc4965212383697eee27a0f426d5f8d5","Cargo.toml":"4a6762f8364d1a60f1436f4ba7d710de22465722340d2028101fcbd68e89396f","LICENSE-APACHE":"6df43f6f4b5d4587f3d8d71e45532c688fd168afa5fe89d571cb32fa09c4ef51","LICENSE-MIT":"209fbbe0ad52d9235e37badf9cadfe4dbdc87203179c0899e738b39ade42177b","README.md":"bb3bd3831adc9eaabbcea108ab7f02f5837e9d2f81e872ffd7d340ad466df4de","src/block.rs":"c0b606dc404a1f4b25eebf388e9c0da583ee571214cdcb0bac1b592450d6b4fa","src/error.rs":"c34af905a9ffbae65970f508a9e74480b56747128d05ad350475150898fc6452","src/impls.rs":"b861532f8a3500de6bd0e926b3677a15261df4b12d253e4a8fd6acc5e64f1d36","src/le.rs":"f302239d09cc8d915aa8d4fe46c32c8981900cb20d42b12eef9c34e2e820bc88","src/lib.rs":"df270489b859465fce144098d5e709b318d9f3b703a92f4a28e5f74334119107","src/os.rs":"47849479e19c43dd01259eb14be7b4daf8474d23567dc32a1547c10b108b7069"},"package":"ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c"} \ No newline at end of file
diff --git a/vendor/rand_core/CHANGELOG.md b/vendor/rand_core/CHANGELOG.md
index 23d1fa5a1..75fcbc667 100644
--- a/vendor/rand_core/CHANGELOG.md
+++ b/vendor/rand_core/CHANGELOG.md
@@ -4,6 +4,16 @@ All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/)
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
+## [0.6.4] - 2022-09-15
+- Fix unsoundness in `<BlockRng64 as RngCore>::next_u32` (#1160)
+- Reduce use of `unsafe` and improve gen_bytes performance (#1180)
+- Add `CryptoRngCore` trait (#1187, #1230)
+
+## [0.6.3] - 2021-06-15
+### Changed
+- Improved bound for `serde` impls on `BlockRng` (#1130)
+- Minor doc additions (#1118)
+
## [0.6.2] - 2021-02-12
### Fixed
- Fixed assertions in `le::read_u32_into` and `le::read_u64_into` which could
diff --git a/vendor/rand_core/Cargo.toml b/vendor/rand_core/Cargo.toml
index 51b8385ea..fd8c96d9b 100644
--- a/vendor/rand_core/Cargo.toml
+++ b/vendor/rand_core/Cargo.toml
@@ -3,32 +3,47 @@
# When uploading crates to the registry Cargo will automatically
# "normalize" Cargo.toml files for maximal compatibility
# with all versions of Cargo and also rewrite `path` dependencies
-# to registry (e.g., crates.io) dependencies
+# to registry (e.g., crates.io) dependencies.
#
-# If you believe there's an error in this file please file an
-# issue against the rust-lang/cargo repository. If you're
-# editing this file be aware that the upstream Cargo.toml
-# will likely look very different (and much more reasonable)
+# If you are reading this file be aware that the original Cargo.toml
+# will likely look very different (and much more reasonable).
+# See Cargo.toml.orig for the original contents.
[package]
edition = "2018"
name = "rand_core"
-version = "0.6.2"
-authors = ["The Rand Project Developers", "The Rust Project Developers"]
-description = "Core random number generator traits and tools for implementation.\n"
+version = "0.6.4"
+authors = [
+ "The Rand Project Developers",
+ "The Rust Project Developers",
+]
+description = """
+Core random number generator traits and tools for implementation.
+"""
homepage = "https://rust-random.github.io/book"
documentation = "https://docs.rs/rand_core"
readme = "README.md"
-keywords = ["random", "rng"]
-categories = ["algorithms", "no-std"]
+keywords = [
+ "random",
+ "rng",
+]
+categories = [
+ "algorithms",
+ "no-std",
+]
license = "MIT OR Apache-2.0"
repository = "https://github.com/rust-random/rand"
+
[package.metadata.docs.rs]
all-features = true
-rustdoc-args = ["--cfg", "doc_cfg"]
+rustdoc-args = [
+ "--cfg",
+ "doc_cfg",
+]
[package.metadata.playground]
all-features = true
+
[dependencies.getrandom]
version = "0.2"
optional = true
@@ -41,4 +56,8 @@ optional = true
[features]
alloc = []
serde1 = ["serde"]
-std = ["alloc", "getrandom", "getrandom/std"]
+std = [
+ "alloc",
+ "getrandom",
+ "getrandom/std",
+]
diff --git a/vendor/rand_core/LICENSE-APACHE b/vendor/rand_core/LICENSE-APACHE
index 17d74680f..455787c23 100644
--- a/vendor/rand_core/LICENSE-APACHE
+++ b/vendor/rand_core/LICENSE-APACHE
@@ -185,17 +185,3 @@ APPENDIX: How to apply the Apache License to your work.
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
-
-Copyright [yyyy] [name of copyright owner]
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- https://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
diff --git a/vendor/rand_core/src/block.rs b/vendor/rand_core/src/block.rs
index 005d071fb..d311b68cf 100644
--- a/vendor/rand_core/src/block.rs
+++ b/vendor/rand_core/src/block.rs
@@ -95,7 +95,7 @@ pub trait BlockRngCore {
/// [`fill_bytes`] / [`try_fill_bytes`] is called on a large array. These methods
/// also handle the bookkeeping of when to generate a new batch of values.
///
-/// No whole generated `u32` values are thown away and all values are consumed
+/// No whole generated `u32` values are thrown away and all values are consumed
/// in-order. [`next_u32`] simply takes the next available `u32` value.
/// [`next_u64`] is implemented by combining two `u32` values, least
/// significant first. [`fill_bytes`] and [`try_fill_bytes`] consume a whole
@@ -114,6 +114,12 @@ pub trait BlockRngCore {
/// [`try_fill_bytes`]: RngCore::try_fill_bytes
#[derive(Clone)]
#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
+#[cfg_attr(
+ feature = "serde1",
+ serde(
+ bound = "for<'x> R: Serialize + Deserialize<'x> + Sized, for<'x> R::Results: Serialize + Deserialize<'x>"
+ )
+)]
pub struct BlockRng<R: BlockRngCore + ?Sized> {
results: R::Results,
index: usize,
@@ -346,27 +352,21 @@ where
{
#[inline]
fn next_u32(&mut self) -> u32 {
- let mut index = self.index * 2 - self.half_used as usize;
- if index >= self.results.as_ref().len() * 2 {
+ let mut index = self.index - self.half_used as usize;
+ if index >= self.results.as_ref().len() {
self.core.generate(&mut self.results);
self.index = 0;
+ index = 0;
// `self.half_used` is by definition `false`
self.half_used = false;
- index = 0;
}
+ let shift = 32 * (self.half_used as usize);
+
self.half_used = !self.half_used;
self.index += self.half_used as usize;
- // Index as if this is a u32 slice.
- unsafe {
- let results = &*(self.results.as_ref() as *const [u64] as *const [u32]);
- if cfg!(target_endian = "little") {
- *results.get_unchecked(index)
- } else {
- *results.get_unchecked(index ^ 1)
- }
- }
+ (self.results.as_ref()[index] >> shift) as u32
}
#[inline]
@@ -429,3 +429,111 @@ impl<R: BlockRngCore + SeedableRng> SeedableRng for BlockRng64<R> {
}
impl<R: BlockRngCore + CryptoRng> CryptoRng for BlockRng<R> {}
+
+#[cfg(test)]
+mod test {
+ use crate::{SeedableRng, RngCore};
+ use crate::block::{BlockRng, BlockRng64, BlockRngCore};
+
+ #[derive(Debug, Clone)]
+ struct DummyRng {
+ counter: u32,
+ }
+
+ impl BlockRngCore for DummyRng {
+ type Item = u32;
+
+ type Results = [u32; 16];
+
+ fn generate(&mut self, results: &mut Self::Results) {
+ for r in results {
+ *r = self.counter;
+ self.counter = self.counter.wrapping_add(3511615421);
+ }
+ }
+ }
+
+ impl SeedableRng for DummyRng {
+ type Seed = [u8; 4];
+
+ fn from_seed(seed: Self::Seed) -> Self {
+ DummyRng { counter: u32::from_le_bytes(seed) }
+ }
+ }
+
+ #[test]
+ fn blockrng_next_u32_vs_next_u64() {
+ let mut rng1 = BlockRng::<DummyRng>::from_seed([1, 2, 3, 4]);
+ let mut rng2 = rng1.clone();
+ let mut rng3 = rng1.clone();
+
+ let mut a = [0; 16];
+ (&mut a[..4]).copy_from_slice(&rng1.next_u32().to_le_bytes());
+ (&mut a[4..12]).copy_from_slice(&rng1.next_u64().to_le_bytes());
+ (&mut a[12..]).copy_from_slice(&rng1.next_u32().to_le_bytes());
+
+ let mut b = [0; 16];
+ (&mut b[..4]).copy_from_slice(&rng2.next_u32().to_le_bytes());
+ (&mut b[4..8]).copy_from_slice(&rng2.next_u32().to_le_bytes());
+ (&mut b[8..]).copy_from_slice(&rng2.next_u64().to_le_bytes());
+ assert_eq!(a, b);
+
+ let mut c = [0; 16];
+ (&mut c[..8]).copy_from_slice(&rng3.next_u64().to_le_bytes());
+ (&mut c[8..12]).copy_from_slice(&rng3.next_u32().to_le_bytes());
+ (&mut c[12..]).copy_from_slice(&rng3.next_u32().to_le_bytes());
+ assert_eq!(a, c);
+ }
+
+ #[derive(Debug, Clone)]
+ struct DummyRng64 {
+ counter: u64,
+ }
+
+ impl BlockRngCore for DummyRng64 {
+ type Item = u64;
+
+ type Results = [u64; 8];
+
+ fn generate(&mut self, results: &mut Self::Results) {
+ for r in results {
+ *r = self.counter;
+ self.counter = self.counter.wrapping_add(2781463553396133981);
+ }
+ }
+ }
+
+ impl SeedableRng for DummyRng64 {
+ type Seed = [u8; 8];
+
+ fn from_seed(seed: Self::Seed) -> Self {
+ DummyRng64 { counter: u64::from_le_bytes(seed) }
+ }
+ }
+
+ #[test]
+ fn blockrng64_next_u32_vs_next_u64() {
+ let mut rng1 = BlockRng64::<DummyRng64>::from_seed([1, 2, 3, 4, 5, 6, 7, 8]);
+ let mut rng2 = rng1.clone();
+ let mut rng3 = rng1.clone();
+
+ let mut a = [0; 16];
+ (&mut a[..4]).copy_from_slice(&rng1.next_u32().to_le_bytes());
+ (&mut a[4..12]).copy_from_slice(&rng1.next_u64().to_le_bytes());
+ (&mut a[12..]).copy_from_slice(&rng1.next_u32().to_le_bytes());
+
+ let mut b = [0; 16];
+ (&mut b[..4]).copy_from_slice(&rng2.next_u32().to_le_bytes());
+ (&mut b[4..8]).copy_from_slice(&rng2.next_u32().to_le_bytes());
+ (&mut b[8..]).copy_from_slice(&rng2.next_u64().to_le_bytes());
+ assert_ne!(a, b);
+ assert_eq!(&a[..4], &b[..4]);
+ assert_eq!(&a[4..12], &b[8..]);
+
+ let mut c = [0; 16];
+ (&mut c[..8]).copy_from_slice(&rng3.next_u64().to_le_bytes());
+ (&mut c[8..12]).copy_from_slice(&rng3.next_u32().to_le_bytes());
+ (&mut c[12..]).copy_from_slice(&rng3.next_u32().to_le_bytes());
+ assert_eq!(b, c);
+ }
+}
diff --git a/vendor/rand_core/src/error.rs b/vendor/rand_core/src/error.rs
index a64c430da..411896f2c 100644
--- a/vendor/rand_core/src/error.rs
+++ b/vendor/rand_core/src/error.rs
@@ -82,7 +82,7 @@ impl Error {
///
/// This method is identical to `std::io::Error::raw_os_error()`, except
/// that it works in `no_std` contexts. If this method returns `None`, the
- /// error value can still be formatted via the `Diplay` implementation.
+ /// error value can still be formatted via the `Display` implementation.
#[inline]
pub fn raw_os_error(&self) -> Option<i32> {
#[cfg(feature = "std")]
diff --git a/vendor/rand_core/src/impls.rs b/vendor/rand_core/src/impls.rs
index 2588a72ea..4b7688c5c 100644
--- a/vendor/rand_core/src/impls.rs
+++ b/vendor/rand_core/src/impls.rs
@@ -52,36 +52,59 @@ pub fn fill_bytes_via_next<R: RngCore + ?Sized>(rng: &mut R, dest: &mut [u8]) {
}
}
-macro_rules! fill_via_chunks {
- ($src:expr, $dst:expr, $ty:ty) => {{
- const SIZE: usize = core::mem::size_of::<$ty>();
- let chunk_size_u8 = min($src.len() * SIZE, $dst.len());
- let chunk_size = (chunk_size_u8 + SIZE - 1) / SIZE;
-
- // The following can be replaced with safe code, but unfortunately it's
- // ca. 8% slower.
- if cfg!(target_endian = "little") {
- unsafe {
- core::ptr::copy_nonoverlapping(
- $src.as_ptr() as *const u8,
- $dst.as_mut_ptr(),
- chunk_size_u8);
- }
- } else {
- for (&n, chunk) in $src.iter().zip($dst.chunks_mut(SIZE)) {
- let tmp = n.to_le();
- let src_ptr = &tmp as *const $ty as *const u8;
- unsafe {
- core::ptr::copy_nonoverlapping(
- src_ptr,
- chunk.as_mut_ptr(),
- chunk.len());
- }
- }
+trait Observable: Copy {
+ type Bytes: AsRef<[u8]>;
+ fn to_le_bytes(self) -> Self::Bytes;
+
+ // Contract: observing self is memory-safe (implies no uninitialised padding)
+ fn as_byte_slice(x: &[Self]) -> &[u8];
+}
+impl Observable for u32 {
+ type Bytes = [u8; 4];
+ fn to_le_bytes(self) -> Self::Bytes {
+ self.to_le_bytes()
+ }
+ fn as_byte_slice(x: &[Self]) -> &[u8] {
+ let ptr = x.as_ptr() as *const u8;
+ let len = x.len() * core::mem::size_of::<Self>();
+ unsafe { core::slice::from_raw_parts(ptr, len) }
+ }
+}
+impl Observable for u64 {
+ type Bytes = [u8; 8];
+ fn to_le_bytes(self) -> Self::Bytes {
+ self.to_le_bytes()
+ }
+ fn as_byte_slice(x: &[Self]) -> &[u8] {
+ let ptr = x.as_ptr() as *const u8;
+ let len = x.len() * core::mem::size_of::<Self>();
+ unsafe { core::slice::from_raw_parts(ptr, len) }
+ }
+}
+
+fn fill_via_chunks<T: Observable>(src: &[T], dest: &mut [u8]) -> (usize, usize) {
+ let size = core::mem::size_of::<T>();
+ let byte_len = min(src.len() * size, dest.len());
+ let num_chunks = (byte_len + size - 1) / size;
+
+ if cfg!(target_endian = "little") {
+ // On LE we can do a simple copy, which is 25-50% faster:
+ dest[..byte_len].copy_from_slice(&T::as_byte_slice(&src[..num_chunks])[..byte_len]);
+ } else {
+ // This code is valid on all arches, but slower than the above:
+ let mut i = 0;
+ let mut iter = dest[..byte_len].chunks_exact_mut(size);
+ for chunk in &mut iter {
+ chunk.copy_from_slice(src[i].to_le_bytes().as_ref());
+ i += 1;
}
+ let chunk = iter.into_remainder();
+ if !chunk.is_empty() {
+ chunk.copy_from_slice(&src[i].to_le_bytes().as_ref()[..chunk.len()]);
+ }
+ }
- (chunk_size, chunk_size_u8)
- }};
+ (num_chunks, byte_len)
}
/// Implement `fill_bytes` by reading chunks from the output buffer of a block
@@ -115,7 +138,7 @@ macro_rules! fill_via_chunks {
/// }
/// ```
pub fn fill_via_u32_chunks(src: &[u32], dest: &mut [u8]) -> (usize, usize) {
- fill_via_chunks!(src, dest, u32)
+ fill_via_chunks(src, dest)
}
/// Implement `fill_bytes` by reading chunks from the output buffer of a block
@@ -129,7 +152,7 @@ pub fn fill_via_u32_chunks(src: &[u32], dest: &mut [u8]) -> (usize, usize) {
///
/// See `fill_via_u32_chunks` for an example.
pub fn fill_via_u64_chunks(src: &[u64], dest: &mut [u8]) -> (usize, usize) {
- fill_via_chunks!(src, dest, u64)
+ fill_via_chunks(src, dest)
}
/// Implement `next_u32` via `fill_bytes`, little-endian order.
diff --git a/vendor/rand_core/src/lib.rs b/vendor/rand_core/src/lib.rs
index 7e847ae49..1234a566c 100644
--- a/vendor/rand_core/src/lib.rs
+++ b/vendor/rand_core/src/lib.rs
@@ -76,12 +76,17 @@ pub mod le;
/// [`next_u32`] or [`next_u64`] since the latter methods are almost always used
/// with algorithmic generators (PRNGs), which are normally infallible.
///
+/// Implementers should produce bits uniformly. Pathological RNGs (e.g. always
+/// returning the same value, or never setting certain bits) can break rejection
+/// sampling used by random distributions, and also break other RNGs when
+/// seeding them via [`SeedableRng::from_rng`].
+///
/// Algorithmic generators implementing [`SeedableRng`] should normally have
/// *portable, reproducible* output, i.e. fix Endianness when converting values
/// to avoid platform differences, and avoid making any changes which affect
/// output (except by communicating that the release has breaking changes).
///
-/// Typically implementators will implement only one of the methods available
+/// Typically an RNG will implement only one of the methods available
/// in this trait directly, then use the helper functions from the
/// [`impls`] module to implement the other methods.
///
@@ -191,7 +196,7 @@ pub trait RngCore {
/// Some generators may satisfy an additional property, however this is not
/// required by this trait: if the CSPRNG's state is revealed, it should not be
/// computationally-feasible to reconstruct output prior to this. Some other
-/// generators allow backwards-computation and are consided *reversible*.
+/// generators allow backwards-computation and are considered *reversible*.
///
/// Note that this trait is provided for guidance only and cannot guarantee
/// suitability for cryptographic applications. In general it should only be
@@ -203,6 +208,35 @@ pub trait RngCore {
/// [`BlockRngCore`]: block::BlockRngCore
pub trait CryptoRng {}
+/// An extension trait that is automatically implemented for any type
+/// implementing [`RngCore`] and [`CryptoRng`].
+///
+/// It may be used as a trait object, and supports upcasting to [`RngCore`] via
+/// the [`CryptoRngCore::as_rngcore`] method.
+///
+/// # Example
+///
+/// ```
+/// use rand_core::CryptoRngCore;
+///
+/// #[allow(unused)]
+/// fn make_token(rng: &mut dyn CryptoRngCore) -> [u8; 32] {
+/// let mut buf = [0u8; 32];
+/// rng.fill_bytes(&mut buf);
+/// buf
+/// }
+/// ```
+pub trait CryptoRngCore: CryptoRng + RngCore {
+ /// Upcast to an [`RngCore`] trait object.
+ fn as_rngcore(&mut self) -> &mut dyn RngCore;
+}
+
+impl<T: CryptoRng + RngCore> CryptoRngCore for T {
+ fn as_rngcore(&mut self) -> &mut dyn RngCore {
+ self
+ }
+}
+
/// A random number generator that can be explicitly seeded.
///
/// This trait encapsulates the low-level functionality common to all
@@ -210,7 +244,7 @@ pub trait CryptoRng {}
///
/// [`rand`]: https://docs.rs/rand
pub trait SeedableRng: Sized {
- /// Seed type, which is restricted to types mutably-dereferencable as `u8`
+ /// Seed type, which is restricted to types mutably-dereferenceable as `u8`
/// arrays (we recommend `[u8; N]` for some `N`).
///
/// It is recommended to seed PRNGs with a seed of at least circa 100 bits,
@@ -443,10 +477,10 @@ impl std::io::Read for dyn RngCore {
}
}
-// Implement `CryptoRng` for references to an `CryptoRng`.
+// Implement `CryptoRng` for references to a `CryptoRng`.
impl<'a, R: CryptoRng + ?Sized> CryptoRng for &'a mut R {}
-// Implement `CryptoRng` for boxed references to an `CryptoRng`.
+// Implement `CryptoRng` for boxed references to a `CryptoRng`.
#[cfg(feature = "alloc")]
impl<R: CryptoRng + ?Sized> CryptoRng for Box<R> {}
@@ -480,7 +514,7 @@ mod test {
// This is the binomial distribution B(64, 0.5), so chance of
// weight < 20 is binocdf(19, 64, 0.5) = 7.8e-4, and same for
// weight > 44.
- assert!(weight >= 20 && weight <= 44);
+ assert!((20..=44).contains(&weight));
for (i2, r2) in results.iter().enumerate() {
if i1 == i2 {