summaryrefslogtreecommitdiffstats
path: root/third_party/rust/rand/src/rngs/adapter
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-19 00:47:55 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-19 00:47:55 +0000
commit26a029d407be480d791972afb5975cf62c9360a6 (patch)
treef435a8308119effd964b339f76abb83a57c29483 /third_party/rust/rand/src/rngs/adapter
parentInitial commit. (diff)
downloadfirefox-26a029d407be480d791972afb5975cf62c9360a6.tar.xz
firefox-26a029d407be480d791972afb5975cf62c9360a6.zip
Adding upstream version 124.0.1.upstream/124.0.1
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'third_party/rust/rand/src/rngs/adapter')
-rw-r--r--third_party/rust/rand/src/rngs/adapter/mod.rs16
-rw-r--r--third_party/rust/rand/src/rngs/adapter/read.rs150
-rw-r--r--third_party/rust/rand/src/rngs/adapter/reseeding.rs386
3 files changed, 552 insertions, 0 deletions
diff --git a/third_party/rust/rand/src/rngs/adapter/mod.rs b/third_party/rust/rand/src/rngs/adapter/mod.rs
new file mode 100644
index 0000000000..bd1d294323
--- /dev/null
+++ b/third_party/rust/rand/src/rngs/adapter/mod.rs
@@ -0,0 +1,16 @@
+// Copyright 2018 Developers of the Rand project.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Wrappers / adapters forming RNGs
+
+mod read;
+mod reseeding;
+
+#[allow(deprecated)]
+pub use self::read::{ReadError, ReadRng};
+pub use self::reseeding::ReseedingRng;
diff --git a/third_party/rust/rand/src/rngs/adapter/read.rs b/third_party/rust/rand/src/rngs/adapter/read.rs
new file mode 100644
index 0000000000..25a9ca7fca
--- /dev/null
+++ b/third_party/rust/rand/src/rngs/adapter/read.rs
@@ -0,0 +1,150 @@
+// Copyright 2018 Developers of the Rand project.
+// Copyright 2013 The Rust Project Developers.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! A wrapper around any Read to treat it as an RNG.
+
+#![allow(deprecated)]
+
+use std::fmt;
+use std::io::Read;
+
+use rand_core::{impls, Error, RngCore};
+
+
+/// An RNG that reads random bytes straight from any type supporting
+/// [`std::io::Read`], for example files.
+///
+/// This will work best with an infinite reader, but that is not required.
+///
+/// This can be used with `/dev/urandom` on Unix but it is recommended to use
+/// [`OsRng`] instead.
+///
+/// # Panics
+///
+/// `ReadRng` uses [`std::io::Read::read_exact`], which retries on interrupts.
+/// All other errors from the underlying reader, including when it does not
+/// have enough data, will only be reported through [`try_fill_bytes`].
+/// The other [`RngCore`] methods will panic in case of an error.
+///
+/// [`OsRng`]: crate::rngs::OsRng
+/// [`try_fill_bytes`]: RngCore::try_fill_bytes
+#[derive(Debug)]
+#[deprecated(since="0.8.4", note="removal due to lack of usage")]
+pub struct ReadRng<R> {
+ reader: R,
+}
+
+impl<R: Read> ReadRng<R> {
+ /// Create a new `ReadRng` from a `Read`.
+ pub fn new(r: R) -> ReadRng<R> {
+ ReadRng { reader: r }
+ }
+}
+
+impl<R: Read> RngCore for ReadRng<R> {
+ fn next_u32(&mut self) -> u32 {
+ impls::next_u32_via_fill(self)
+ }
+
+ fn next_u64(&mut self) -> u64 {
+ impls::next_u64_via_fill(self)
+ }
+
+ fn fill_bytes(&mut self, dest: &mut [u8]) {
+ self.try_fill_bytes(dest).unwrap_or_else(|err| {
+ panic!(
+ "reading random bytes from Read implementation failed; error: {}",
+ err
+ )
+ });
+ }
+
+ fn try_fill_bytes(&mut self, dest: &mut [u8]) -> Result<(), Error> {
+ if dest.is_empty() {
+ return Ok(());
+ }
+ // Use `std::io::read_exact`, which retries on `ErrorKind::Interrupted`.
+ self.reader
+ .read_exact(dest)
+ .map_err(|e| Error::new(ReadError(e)))
+ }
+}
+
+/// `ReadRng` error type
+#[derive(Debug)]
+#[deprecated(since="0.8.4")]
+pub struct ReadError(std::io::Error);
+
+impl fmt::Display for ReadError {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ write!(f, "ReadError: {}", self.0)
+ }
+}
+
+impl std::error::Error for ReadError {
+ fn source(&self) -> Option<&(dyn std::error::Error + 'static)> {
+ Some(&self.0)
+ }
+}
+
+
+#[cfg(test)]
+mod test {
+ use std::println;
+
+ use super::ReadRng;
+ use crate::RngCore;
+
+ #[test]
+ fn test_reader_rng_u64() {
+ // transmute from the target to avoid endianness concerns.
+ #[rustfmt::skip]
+ let v = [0u8, 0, 0, 0, 0, 0, 0, 1,
+ 0, 4, 0, 0, 3, 0, 0, 2,
+ 5, 0, 0, 0, 0, 0, 0, 0];
+ let mut rng = ReadRng::new(&v[..]);
+
+ assert_eq!(rng.next_u64(), 1 << 56);
+ assert_eq!(rng.next_u64(), (2 << 56) + (3 << 32) + (4 << 8));
+ assert_eq!(rng.next_u64(), 5);
+ }
+
+ #[test]
+ fn test_reader_rng_u32() {
+ let v = [0u8, 0, 0, 1, 0, 0, 2, 0, 3, 0, 0, 0];
+ let mut rng = ReadRng::new(&v[..]);
+
+ assert_eq!(rng.next_u32(), 1 << 24);
+ assert_eq!(rng.next_u32(), 2 << 16);
+ assert_eq!(rng.next_u32(), 3);
+ }
+
+ #[test]
+ fn test_reader_rng_fill_bytes() {
+ let v = [1u8, 2, 3, 4, 5, 6, 7, 8];
+ let mut w = [0u8; 8];
+
+ let mut rng = ReadRng::new(&v[..]);
+ rng.fill_bytes(&mut w);
+
+ assert!(v == w);
+ }
+
+ #[test]
+ fn test_reader_rng_insufficient_bytes() {
+ let v = [1u8, 2, 3, 4, 5, 6, 7, 8];
+ let mut w = [0u8; 9];
+
+ let mut rng = ReadRng::new(&v[..]);
+
+ let result = rng.try_fill_bytes(&mut w);
+ assert!(result.is_err());
+ println!("Error: {}", result.unwrap_err());
+ }
+}
diff --git a/third_party/rust/rand/src/rngs/adapter/reseeding.rs b/third_party/rust/rand/src/rngs/adapter/reseeding.rs
new file mode 100644
index 0000000000..ae3fcbb2fc
--- /dev/null
+++ b/third_party/rust/rand/src/rngs/adapter/reseeding.rs
@@ -0,0 +1,386 @@
+// Copyright 2018 Developers of the Rand project.
+// Copyright 2013 The Rust Project Developers.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! A wrapper around another PRNG that reseeds it after it
+//! generates a certain number of random bytes.
+
+use core::mem::size_of;
+
+use rand_core::block::{BlockRng, BlockRngCore};
+use rand_core::{CryptoRng, Error, RngCore, SeedableRng};
+
+/// A wrapper around any PRNG that implements [`BlockRngCore`], that adds the
+/// ability to reseed it.
+///
+/// `ReseedingRng` reseeds the underlying PRNG in the following cases:
+///
+/// - On a manual call to [`reseed()`].
+/// - After `clone()`, the clone will be reseeded on first use.
+/// - When a process is forked on UNIX, the RNGs in both the parent and child
+/// processes will be reseeded just before the next call to
+/// [`BlockRngCore::generate`], i.e. "soon". For ChaCha and Hc128 this is a
+/// maximum of fifteen `u32` values before reseeding.
+/// - After the PRNG has generated a configurable number of random bytes.
+///
+/// # When should reseeding after a fixed number of generated bytes be used?
+///
+/// Reseeding after a fixed number of generated bytes is never strictly
+/// *necessary*. Cryptographic PRNGs don't have a limited number of bytes they
+/// can output, or at least not a limit reachable in any practical way. There is
+/// no such thing as 'running out of entropy'.
+///
+/// Occasionally reseeding can be seen as some form of 'security in depth'. Even
+/// if in the future a cryptographic weakness is found in the CSPRNG being used,
+/// or a flaw in the implementation, occasionally reseeding should make
+/// exploiting it much more difficult or even impossible.
+///
+/// Use [`ReseedingRng::new`] with a `threshold` of `0` to disable reseeding
+/// after a fixed number of generated bytes.
+///
+/// # Limitations
+///
+/// It is recommended that a `ReseedingRng` (including `ThreadRng`) not be used
+/// from a fork handler.
+/// Use `OsRng` or `getrandom`, or defer your use of the RNG until later.
+///
+/// # Error handling
+///
+/// Although unlikely, reseeding the wrapped PRNG can fail. `ReseedingRng` will
+/// never panic but try to handle the error intelligently through some
+/// combination of retrying and delaying reseeding until later.
+/// If handling the source error fails `ReseedingRng` will continue generating
+/// data from the wrapped PRNG without reseeding.
+///
+/// Manually calling [`reseed()`] will not have this retry or delay logic, but
+/// reports the error.
+///
+/// # Example
+///
+/// ```
+/// use rand::prelude::*;
+/// use rand_chacha::ChaCha20Core; // Internal part of ChaChaRng that
+/// // implements BlockRngCore
+/// use rand::rngs::OsRng;
+/// use rand::rngs::adapter::ReseedingRng;
+///
+/// let prng = ChaCha20Core::from_entropy();
+/// let mut reseeding_rng = ReseedingRng::new(prng, 0, OsRng);
+///
+/// println!("{}", reseeding_rng.gen::<u64>());
+///
+/// let mut cloned_rng = reseeding_rng.clone();
+/// assert!(reseeding_rng.gen::<u64>() != cloned_rng.gen::<u64>());
+/// ```
+///
+/// [`BlockRngCore`]: rand_core::block::BlockRngCore
+/// [`ReseedingRng::new`]: ReseedingRng::new
+/// [`reseed()`]: ReseedingRng::reseed
+#[derive(Debug)]
+pub struct ReseedingRng<R, Rsdr>(BlockRng<ReseedingCore<R, Rsdr>>)
+where
+ R: BlockRngCore + SeedableRng,
+ Rsdr: RngCore;
+
+impl<R, Rsdr> ReseedingRng<R, Rsdr>
+where
+ R: BlockRngCore + SeedableRng,
+ Rsdr: RngCore,
+{
+ /// Create a new `ReseedingRng` from an existing PRNG, combined with a RNG
+ /// to use as reseeder.
+ ///
+ /// `threshold` sets the number of generated bytes after which to reseed the
+ /// PRNG. Set it to zero to never reseed based on the number of generated
+ /// values.
+ pub fn new(rng: R, threshold: u64, reseeder: Rsdr) -> Self {
+ ReseedingRng(BlockRng::new(ReseedingCore::new(rng, threshold, reseeder)))
+ }
+
+ /// Reseed the internal PRNG.
+ pub fn reseed(&mut self) -> Result<(), Error> {
+ self.0.core.reseed()
+ }
+}
+
+// TODO: this should be implemented for any type where the inner type
+// implements RngCore, but we can't specify that because ReseedingCore is private
+impl<R, Rsdr: RngCore> RngCore for ReseedingRng<R, Rsdr>
+where
+ R: BlockRngCore<Item = u32> + SeedableRng,
+ <R as BlockRngCore>::Results: AsRef<[u32]> + AsMut<[u32]>,
+{
+ #[inline(always)]
+ fn next_u32(&mut self) -> u32 {
+ self.0.next_u32()
+ }
+
+ #[inline(always)]
+ fn next_u64(&mut self) -> u64 {
+ self.0.next_u64()
+ }
+
+ fn fill_bytes(&mut self, dest: &mut [u8]) {
+ self.0.fill_bytes(dest)
+ }
+
+ fn try_fill_bytes(&mut self, dest: &mut [u8]) -> Result<(), Error> {
+ self.0.try_fill_bytes(dest)
+ }
+}
+
+impl<R, Rsdr> Clone for ReseedingRng<R, Rsdr>
+where
+ R: BlockRngCore + SeedableRng + Clone,
+ Rsdr: RngCore + Clone,
+{
+ fn clone(&self) -> ReseedingRng<R, Rsdr> {
+ // Recreating `BlockRng` seems easier than cloning it and resetting
+ // the index.
+ ReseedingRng(BlockRng::new(self.0.core.clone()))
+ }
+}
+
+impl<R, Rsdr> CryptoRng for ReseedingRng<R, Rsdr>
+where
+ R: BlockRngCore + SeedableRng + CryptoRng,
+ Rsdr: RngCore + CryptoRng,
+{
+}
+
+#[derive(Debug)]
+struct ReseedingCore<R, Rsdr> {
+ inner: R,
+ reseeder: Rsdr,
+ threshold: i64,
+ bytes_until_reseed: i64,
+ fork_counter: usize,
+}
+
+impl<R, Rsdr> BlockRngCore for ReseedingCore<R, Rsdr>
+where
+ R: BlockRngCore + SeedableRng,
+ Rsdr: RngCore,
+{
+ type Item = <R as BlockRngCore>::Item;
+ type Results = <R as BlockRngCore>::Results;
+
+ fn generate(&mut self, results: &mut Self::Results) {
+ let global_fork_counter = fork::get_fork_counter();
+ if self.bytes_until_reseed <= 0 || self.is_forked(global_fork_counter) {
+ // We get better performance by not calling only `reseed` here
+ // and continuing with the rest of the function, but by directly
+ // returning from a non-inlined function.
+ return self.reseed_and_generate(results, global_fork_counter);
+ }
+ let num_bytes = results.as_ref().len() * size_of::<Self::Item>();
+ self.bytes_until_reseed -= num_bytes as i64;
+ self.inner.generate(results);
+ }
+}
+
+impl<R, Rsdr> ReseedingCore<R, Rsdr>
+where
+ R: BlockRngCore + SeedableRng,
+ Rsdr: RngCore,
+{
+ /// Create a new `ReseedingCore`.
+ fn new(rng: R, threshold: u64, reseeder: Rsdr) -> Self {
+ use ::core::i64::MAX;
+ fork::register_fork_handler();
+
+ // Because generating more values than `i64::MAX` takes centuries on
+ // current hardware, we just clamp to that value.
+ // Also we set a threshold of 0, which indicates no limit, to that
+ // value.
+ let threshold = if threshold == 0 {
+ MAX
+ } else if threshold <= MAX as u64 {
+ threshold as i64
+ } else {
+ MAX
+ };
+
+ ReseedingCore {
+ inner: rng,
+ reseeder,
+ threshold: threshold as i64,
+ bytes_until_reseed: threshold as i64,
+ fork_counter: 0,
+ }
+ }
+
+ /// Reseed the internal PRNG.
+ fn reseed(&mut self) -> Result<(), Error> {
+ R::from_rng(&mut self.reseeder).map(|result| {
+ self.bytes_until_reseed = self.threshold;
+ self.inner = result
+ })
+ }
+
+ fn is_forked(&self, global_fork_counter: usize) -> bool {
+ // In theory, on 32-bit platforms, it is possible for
+ // `global_fork_counter` to wrap around after ~4e9 forks.
+ //
+ // This check will detect a fork in the normal case where
+ // `fork_counter < global_fork_counter`, and also when the difference
+ // between both is greater than `isize::MAX` (wrapped around).
+ //
+ // It will still fail to detect a fork if there have been more than
+ // `isize::MAX` forks, without any reseed in between. Seems unlikely
+ // enough.
+ (self.fork_counter.wrapping_sub(global_fork_counter) as isize) < 0
+ }
+
+ #[inline(never)]
+ fn reseed_and_generate(
+ &mut self, results: &mut <Self as BlockRngCore>::Results, global_fork_counter: usize,
+ ) {
+ #![allow(clippy::if_same_then_else)] // false positive
+ if self.is_forked(global_fork_counter) {
+ info!("Fork detected, reseeding RNG");
+ } else {
+ trace!("Reseeding RNG (periodic reseed)");
+ }
+
+ let num_bytes = results.as_ref().len() * size_of::<<R as BlockRngCore>::Item>();
+
+ if let Err(e) = self.reseed() {
+ warn!("Reseeding RNG failed: {}", e);
+ let _ = e;
+ }
+ self.fork_counter = global_fork_counter;
+
+ self.bytes_until_reseed = self.threshold - num_bytes as i64;
+ self.inner.generate(results);
+ }
+}
+
+impl<R, Rsdr> Clone for ReseedingCore<R, Rsdr>
+where
+ R: BlockRngCore + SeedableRng + Clone,
+ Rsdr: RngCore + Clone,
+{
+ fn clone(&self) -> ReseedingCore<R, Rsdr> {
+ ReseedingCore {
+ inner: self.inner.clone(),
+ reseeder: self.reseeder.clone(),
+ threshold: self.threshold,
+ bytes_until_reseed: 0, // reseed clone on first use
+ fork_counter: self.fork_counter,
+ }
+ }
+}
+
+impl<R, Rsdr> CryptoRng for ReseedingCore<R, Rsdr>
+where
+ R: BlockRngCore + SeedableRng + CryptoRng,
+ Rsdr: RngCore + CryptoRng,
+{
+}
+
+
+#[cfg(all(unix, not(target_os = "emscripten")))]
+mod fork {
+ use core::sync::atomic::{AtomicUsize, Ordering};
+ use std::sync::Once;
+
+ // Fork protection
+ //
+ // We implement fork protection on Unix using `pthread_atfork`.
+ // When the process is forked, we increment `RESEEDING_RNG_FORK_COUNTER`.
+ // Every `ReseedingRng` stores the last known value of the static in
+ // `fork_counter`. If the cached `fork_counter` is less than
+ // `RESEEDING_RNG_FORK_COUNTER`, it is time to reseed this RNG.
+ //
+ // If reseeding fails, we don't deal with this by setting a delay, but just
+ // don't update `fork_counter`, so a reseed is attempted as soon as
+ // possible.
+
+ static RESEEDING_RNG_FORK_COUNTER: AtomicUsize = AtomicUsize::new(0);
+
+ pub fn get_fork_counter() -> usize {
+ RESEEDING_RNG_FORK_COUNTER.load(Ordering::Relaxed)
+ }
+
+ extern "C" fn fork_handler() {
+ // Note: fetch_add is defined to wrap on overflow
+ // (which is what we want).
+ RESEEDING_RNG_FORK_COUNTER.fetch_add(1, Ordering::Relaxed);
+ }
+
+ pub fn register_fork_handler() {
+ static REGISTER: Once = Once::new();
+ REGISTER.call_once(|| {
+ // Bump the counter before and after forking (see #1169):
+ let ret = unsafe { libc::pthread_atfork(
+ Some(fork_handler),
+ Some(fork_handler),
+ Some(fork_handler),
+ ) };
+ if ret != 0 {
+ panic!("libc::pthread_atfork failed with code {}", ret);
+ }
+ });
+ }
+}
+
+#[cfg(not(all(unix, not(target_os = "emscripten"))))]
+mod fork {
+ pub fn get_fork_counter() -> usize {
+ 0
+ }
+ pub fn register_fork_handler() {}
+}
+
+
+#[cfg(feature = "std_rng")]
+#[cfg(test)]
+mod test {
+ use super::ReseedingRng;
+ use crate::rngs::mock::StepRng;
+ use crate::rngs::std::Core;
+ use crate::{Rng, SeedableRng};
+
+ #[test]
+ fn test_reseeding() {
+ let mut zero = StepRng::new(0, 0);
+ let rng = Core::from_rng(&mut zero).unwrap();
+ let thresh = 1; // reseed every time the buffer is exhausted
+ let mut reseeding = ReseedingRng::new(rng, thresh, zero);
+
+ // RNG buffer size is [u32; 64]
+ // Debug is only implemented up to length 32 so use two arrays
+ let mut buf = ([0u32; 32], [0u32; 32]);
+ reseeding.fill(&mut buf.0);
+ reseeding.fill(&mut buf.1);
+ let seq = buf;
+ for _ in 0..10 {
+ reseeding.fill(&mut buf.0);
+ reseeding.fill(&mut buf.1);
+ assert_eq!(buf, seq);
+ }
+ }
+
+ #[test]
+ fn test_clone_reseeding() {
+ #![allow(clippy::redundant_clone)]
+
+ let mut zero = StepRng::new(0, 0);
+ let rng = Core::from_rng(&mut zero).unwrap();
+ let mut rng1 = ReseedingRng::new(rng, 32 * 4, zero);
+
+ let first: u32 = rng1.gen();
+ for _ in 0..10 {
+ let _ = rng1.gen::<u32>();
+ }
+
+ let mut rng2 = rng1.clone();
+ assert_eq!(first, rng2.gen::<u32>());
+ }
+}