summaryrefslogtreecommitdiffstats
path: root/library/std/src/sync
diff options
context:
space:
mode:
Diffstat (limited to 'library/std/src/sync')
-rw-r--r--library/std/src/sync/mpsc/mpsc_queue/tests.rs2
-rw-r--r--library/std/src/sync/mpsc/spsc_queue/tests.rs5
-rw-r--r--library/std/src/sync/mpsc/sync_tests.rs21
-rw-r--r--library/std/src/sync/mpsc/tests.rs12
-rw-r--r--library/std/src/sync/mutex.rs1
-rw-r--r--library/std/src/sync/once_lock.rs55
-rw-r--r--library/std/src/sync/rwlock.rs2
-rw-r--r--library/std/src/sync/rwlock/tests.rs2
8 files changed, 27 insertions, 73 deletions
diff --git a/library/std/src/sync/mpsc/mpsc_queue/tests.rs b/library/std/src/sync/mpsc/mpsc_queue/tests.rs
index 9f4f31ed0..34b2a9a98 100644
--- a/library/std/src/sync/mpsc/mpsc_queue/tests.rs
+++ b/library/std/src/sync/mpsc/mpsc_queue/tests.rs
@@ -13,7 +13,7 @@ fn test_full() {
#[test]
fn test() {
let nthreads = 8;
- let nmsgs = 1000;
+ let nmsgs = if cfg!(miri) { 100 } else { 1000 };
let q = Queue::new();
match q.pop() {
Empty => {}
diff --git a/library/std/src/sync/mpsc/spsc_queue/tests.rs b/library/std/src/sync/mpsc/spsc_queue/tests.rs
index 467ef3dbd..eb6d5c2cf 100644
--- a/library/std/src/sync/mpsc/spsc_queue/tests.rs
+++ b/library/std/src/sync/mpsc/spsc_queue/tests.rs
@@ -77,12 +77,13 @@ fn stress() {
}
unsafe fn stress_bound(bound: usize) {
+ let count = if cfg!(miri) { 1000 } else { 100000 };
let q = Arc::new(Queue::with_additions(bound, (), ()));
let (tx, rx) = channel();
let q2 = q.clone();
let _t = thread::spawn(move || {
- for _ in 0..100000 {
+ for _ in 0..count {
loop {
match q2.pop() {
Some(1) => break,
@@ -93,7 +94,7 @@ fn stress() {
}
tx.send(()).unwrap();
});
- for _ in 0..100000 {
+ for _ in 0..count {
q.push(1);
}
rx.recv().unwrap();
diff --git a/library/std/src/sync/mpsc/sync_tests.rs b/library/std/src/sync/mpsc/sync_tests.rs
index e58649bab..63c794369 100644
--- a/library/std/src/sync/mpsc/sync_tests.rs
+++ b/library/std/src/sync/mpsc/sync_tests.rs
@@ -113,23 +113,25 @@ fn chan_gone_concurrent() {
#[test]
fn stress() {
+ let count = if cfg!(miri) { 100 } else { 10000 };
let (tx, rx) = sync_channel::<i32>(0);
thread::spawn(move || {
- for _ in 0..10000 {
+ for _ in 0..count {
tx.send(1).unwrap();
}
});
- for _ in 0..10000 {
+ for _ in 0..count {
assert_eq!(rx.recv().unwrap(), 1);
}
}
#[test]
fn stress_recv_timeout_two_threads() {
+ let count = if cfg!(miri) { 100 } else { 10000 };
let (tx, rx) = sync_channel::<i32>(0);
thread::spawn(move || {
- for _ in 0..10000 {
+ for _ in 0..count {
tx.send(1).unwrap();
}
});
@@ -146,12 +148,12 @@ fn stress_recv_timeout_two_threads() {
}
}
- assert_eq!(recv_count, 10000);
+ assert_eq!(recv_count, count);
}
#[test]
fn stress_recv_timeout_shared() {
- const AMT: u32 = 1000;
+ const AMT: u32 = if cfg!(miri) { 100 } else { 1000 };
const NTHREADS: u32 = 8;
let (tx, rx) = sync_channel::<i32>(0);
let (dtx, drx) = sync_channel::<()>(0);
@@ -191,7 +193,7 @@ fn stress_recv_timeout_shared() {
#[test]
fn stress_shared() {
- const AMT: u32 = 1000;
+ const AMT: u32 = if cfg!(miri) { 100 } else { 1000 };
const NTHREADS: u32 = 8;
let (tx, rx) = sync_channel::<i32>(0);
let (dtx, drx) = sync_channel::<()>(0);
@@ -438,12 +440,13 @@ fn stream_send_recv_stress() {
#[test]
fn recv_a_lot() {
+ let count = if cfg!(miri) { 1000 } else { 10000 };
// Regression test that we don't run out of stack in scheduler context
- let (tx, rx) = sync_channel(10000);
- for _ in 0..10000 {
+ let (tx, rx) = sync_channel(count);
+ for _ in 0..count {
tx.send(()).unwrap();
}
- for _ in 0..10000 {
+ for _ in 0..count {
rx.recv().unwrap();
}
}
diff --git a/library/std/src/sync/mpsc/tests.rs b/library/std/src/sync/mpsc/tests.rs
index 4deb3e596..f6d0796f6 100644
--- a/library/std/src/sync/mpsc/tests.rs
+++ b/library/std/src/sync/mpsc/tests.rs
@@ -120,13 +120,14 @@ fn chan_gone_concurrent() {
#[test]
fn stress() {
+ let count = if cfg!(miri) { 100 } else { 10000 };
let (tx, rx) = channel::<i32>();
let t = thread::spawn(move || {
- for _ in 0..10000 {
+ for _ in 0..count {
tx.send(1).unwrap();
}
});
- for _ in 0..10000 {
+ for _ in 0..count {
assert_eq!(rx.recv().unwrap(), 1);
}
t.join().ok().expect("thread panicked");
@@ -134,7 +135,7 @@ fn stress() {
#[test]
fn stress_shared() {
- const AMT: u32 = 10000;
+ const AMT: u32 = if cfg!(miri) { 100 } else { 10000 };
const NTHREADS: u32 = 8;
let (tx, rx) = channel::<i32>();
@@ -504,12 +505,13 @@ fn very_long_recv_timeout_wont_panic() {
#[test]
fn recv_a_lot() {
+ let count = if cfg!(miri) { 1000 } else { 10000 };
// Regression test that we don't run out of stack in scheduler context
let (tx, rx) = channel();
- for _ in 0..10000 {
+ for _ in 0..count {
tx.send(()).unwrap();
}
- for _ in 0..10000 {
+ for _ in 0..count {
rx.recv().unwrap();
}
}
diff --git a/library/std/src/sync/mutex.rs b/library/std/src/sync/mutex.rs
index e0d13cd64..de851c8fb 100644
--- a/library/std/src/sync/mutex.rs
+++ b/library/std/src/sync/mutex.rs
@@ -192,6 +192,7 @@ unsafe impl<T: ?Sized + Send> Sync for Mutex<T> {}
and cause Futures to not implement `Send`"]
#[stable(feature = "rust1", since = "1.0.0")]
#[clippy::has_significant_drop]
+#[cfg_attr(not(test), rustc_diagnostic_item = "MutexGuard")]
pub struct MutexGuard<'a, T: ?Sized + 'a> {
lock: &'a Mutex<T>,
poison: poison::Guard,
diff --git a/library/std/src/sync/once_lock.rs b/library/std/src/sync/once_lock.rs
index 813516040..37413ec62 100644
--- a/library/std/src/sync/once_lock.rs
+++ b/library/std/src/sync/once_lock.rs
@@ -3,7 +3,6 @@ use crate::fmt;
use crate::marker::PhantomData;
use crate::mem::MaybeUninit;
use crate::panic::{RefUnwindSafe, UnwindSafe};
-use crate::pin::Pin;
use crate::sync::Once;
/// A synchronization primitive which can be written to only once.
@@ -223,60 +222,6 @@ impl<T> OnceLock<T> {
Ok(unsafe { self.get_unchecked() })
}
- /// Internal-only API that gets the contents of the cell, initializing it
- /// in two steps with `f` and `g` if the cell was empty.
- ///
- /// `f` is called to construct the value, which is then moved into the cell
- /// and given as a (pinned) mutable reference to `g` to finish
- /// initialization.
- ///
- /// This allows `g` to inspect an manipulate the value after it has been
- /// moved into its final place in the cell, but before the cell is
- /// considered initialized.
- ///
- /// # Panics
- ///
- /// If `f` or `g` panics, the panic is propagated to the caller, and the
- /// cell remains uninitialized.
- ///
- /// With the current implementation, if `g` panics, the value from `f` will
- /// not be dropped. This should probably be fixed if this is ever used for
- /// a type where this matters.
- ///
- /// It is an error to reentrantly initialize the cell from `f`. The exact
- /// outcome is unspecified. Current implementation deadlocks, but this may
- /// be changed to a panic in the future.
- pub(crate) fn get_or_init_pin<F, G>(self: Pin<&Self>, f: F, g: G) -> Pin<&T>
- where
- F: FnOnce() -> T,
- G: FnOnce(Pin<&mut T>),
- {
- if let Some(value) = self.get_ref().get() {
- // SAFETY: The inner value was already initialized, and will not be
- // moved anymore.
- return unsafe { Pin::new_unchecked(value) };
- }
-
- let slot = &self.value;
-
- // Ignore poisoning from other threads
- // If another thread panics, then we'll be able to run our closure
- self.once.call_once_force(|_| {
- let value = f();
- // SAFETY: We use the Once (self.once) to guarantee unique access
- // to the UnsafeCell (slot).
- let value: &mut T = unsafe { (&mut *slot.get()).write(value) };
- // SAFETY: The value has been written to its final place in
- // self.value. We do not to move it anymore, which we promise here
- // with a Pin<&mut T>.
- g(unsafe { Pin::new_unchecked(value) });
- });
-
- // SAFETY: The inner value has been initialized, and will not be moved
- // anymore.
- unsafe { Pin::new_unchecked(self.get_ref().get_unchecked()) }
- }
-
/// Consumes the `OnceLock`, returning the wrapped value. Returns
/// `None` if the cell was empty.
///
diff --git a/library/std/src/sync/rwlock.rs b/library/std/src/sync/rwlock.rs
index 6e4a2cfc8..9ab781561 100644
--- a/library/std/src/sync/rwlock.rs
+++ b/library/std/src/sync/rwlock.rs
@@ -101,6 +101,7 @@ unsafe impl<T: ?Sized + Send + Sync> Sync for RwLock<T> {}
and cause Futures to not implement `Send`"]
#[stable(feature = "rust1", since = "1.0.0")]
#[clippy::has_significant_drop]
+#[cfg_attr(not(test), rustc_diagnostic_item = "RwLockReadGuard")]
pub struct RwLockReadGuard<'a, T: ?Sized + 'a> {
// NB: we use a pointer instead of `&'a T` to avoid `noalias` violations, because a
// `Ref` argument doesn't hold immutability for its whole scope, only until it drops.
@@ -130,6 +131,7 @@ unsafe impl<T: ?Sized + Sync> Sync for RwLockReadGuard<'_, T> {}
and cause Future's to not implement `Send`"]
#[stable(feature = "rust1", since = "1.0.0")]
#[clippy::has_significant_drop]
+#[cfg_attr(not(test), rustc_diagnostic_item = "RwLockWriteGuard")]
pub struct RwLockWriteGuard<'a, T: ?Sized + 'a> {
lock: &'a RwLock<T>,
poison: poison::Guard,
diff --git a/library/std/src/sync/rwlock/tests.rs b/library/std/src/sync/rwlock/tests.rs
index 08255c985..b5b3ad989 100644
--- a/library/std/src/sync/rwlock/tests.rs
+++ b/library/std/src/sync/rwlock/tests.rs
@@ -19,7 +19,7 @@ fn smoke() {
#[test]
fn frob() {
const N: u32 = 10;
- const M: usize = 1000;
+ const M: usize = if cfg!(miri) { 100 } else { 1000 };
let r = Arc::new(RwLock::new(()));