summaryrefslogtreecommitdiffstats
path: root/library/std/src/sync
diff options
context:
space:
mode:
Diffstat (limited to 'library/std/src/sync')
-rw-r--r--library/std/src/sync/mpsc/mpsc_queue/tests.rs2
-rw-r--r--library/std/src/sync/mpsc/spsc_queue/tests.rs5
-rw-r--r--library/std/src/sync/mpsc/stream.rs2
-rw-r--r--library/std/src/sync/mpsc/sync_tests.rs21
-rw-r--r--library/std/src/sync/mpsc/tests.rs12
-rw-r--r--library/std/src/sync/mutex.rs1
-rw-r--r--library/std/src/sync/once.rs312
-rw-r--r--library/std/src/sync/once_lock.rs55
-rw-r--r--library/std/src/sync/rwlock.rs66
-rw-r--r--library/std/src/sync/rwlock/tests.rs2
10 files changed, 84 insertions, 394 deletions
diff --git a/library/std/src/sync/mpsc/mpsc_queue/tests.rs b/library/std/src/sync/mpsc/mpsc_queue/tests.rs
index 9f4f31ed0..34b2a9a98 100644
--- a/library/std/src/sync/mpsc/mpsc_queue/tests.rs
+++ b/library/std/src/sync/mpsc/mpsc_queue/tests.rs
@@ -13,7 +13,7 @@ fn test_full() {
#[test]
fn test() {
let nthreads = 8;
- let nmsgs = 1000;
+ let nmsgs = if cfg!(miri) { 100 } else { 1000 };
let q = Queue::new();
match q.pop() {
Empty => {}
diff --git a/library/std/src/sync/mpsc/spsc_queue/tests.rs b/library/std/src/sync/mpsc/spsc_queue/tests.rs
index 467ef3dbd..eb6d5c2cf 100644
--- a/library/std/src/sync/mpsc/spsc_queue/tests.rs
+++ b/library/std/src/sync/mpsc/spsc_queue/tests.rs
@@ -77,12 +77,13 @@ fn stress() {
}
unsafe fn stress_bound(bound: usize) {
+ let count = if cfg!(miri) { 1000 } else { 100000 };
let q = Arc::new(Queue::with_additions(bound, (), ()));
let (tx, rx) = channel();
let q2 = q.clone();
let _t = thread::spawn(move || {
- for _ in 0..100000 {
+ for _ in 0..count {
loop {
match q2.pop() {
Some(1) => break,
@@ -93,7 +94,7 @@ fn stress() {
}
tx.send(()).unwrap();
});
- for _ in 0..100000 {
+ for _ in 0..count {
q.push(1);
}
rx.recv().unwrap();
diff --git a/library/std/src/sync/mpsc/stream.rs b/library/std/src/sync/mpsc/stream.rs
index 4c3812c79..4592e9141 100644
--- a/library/std/src/sync/mpsc/stream.rs
+++ b/library/std/src/sync/mpsc/stream.rs
@@ -114,7 +114,7 @@ impl<T> Packet<T> {
match self.queue.producer_addition().cnt.fetch_add(1, Ordering::SeqCst) {
// As described in the mod's doc comment, -1 == wakeup
-1 => UpWoke(self.take_to_wake()),
- // As as described before, SPSC queues must be >= -2
+ // As described before, SPSC queues must be >= -2
-2 => UpSuccess,
// Be sure to preserve the disconnected state, and the return value
diff --git a/library/std/src/sync/mpsc/sync_tests.rs b/library/std/src/sync/mpsc/sync_tests.rs
index e58649bab..63c794369 100644
--- a/library/std/src/sync/mpsc/sync_tests.rs
+++ b/library/std/src/sync/mpsc/sync_tests.rs
@@ -113,23 +113,25 @@ fn chan_gone_concurrent() {
#[test]
fn stress() {
+ let count = if cfg!(miri) { 100 } else { 10000 };
let (tx, rx) = sync_channel::<i32>(0);
thread::spawn(move || {
- for _ in 0..10000 {
+ for _ in 0..count {
tx.send(1).unwrap();
}
});
- for _ in 0..10000 {
+ for _ in 0..count {
assert_eq!(rx.recv().unwrap(), 1);
}
}
#[test]
fn stress_recv_timeout_two_threads() {
+ let count = if cfg!(miri) { 100 } else { 10000 };
let (tx, rx) = sync_channel::<i32>(0);
thread::spawn(move || {
- for _ in 0..10000 {
+ for _ in 0..count {
tx.send(1).unwrap();
}
});
@@ -146,12 +148,12 @@ fn stress_recv_timeout_two_threads() {
}
}
- assert_eq!(recv_count, 10000);
+ assert_eq!(recv_count, count);
}
#[test]
fn stress_recv_timeout_shared() {
- const AMT: u32 = 1000;
+ const AMT: u32 = if cfg!(miri) { 100 } else { 1000 };
const NTHREADS: u32 = 8;
let (tx, rx) = sync_channel::<i32>(0);
let (dtx, drx) = sync_channel::<()>(0);
@@ -191,7 +193,7 @@ fn stress_recv_timeout_shared() {
#[test]
fn stress_shared() {
- const AMT: u32 = 1000;
+ const AMT: u32 = if cfg!(miri) { 100 } else { 1000 };
const NTHREADS: u32 = 8;
let (tx, rx) = sync_channel::<i32>(0);
let (dtx, drx) = sync_channel::<()>(0);
@@ -438,12 +440,13 @@ fn stream_send_recv_stress() {
#[test]
fn recv_a_lot() {
+ let count = if cfg!(miri) { 1000 } else { 10000 };
// Regression test that we don't run out of stack in scheduler context
- let (tx, rx) = sync_channel(10000);
- for _ in 0..10000 {
+ let (tx, rx) = sync_channel(count);
+ for _ in 0..count {
tx.send(()).unwrap();
}
- for _ in 0..10000 {
+ for _ in 0..count {
rx.recv().unwrap();
}
}
diff --git a/library/std/src/sync/mpsc/tests.rs b/library/std/src/sync/mpsc/tests.rs
index 4deb3e596..f6d0796f6 100644
--- a/library/std/src/sync/mpsc/tests.rs
+++ b/library/std/src/sync/mpsc/tests.rs
@@ -120,13 +120,14 @@ fn chan_gone_concurrent() {
#[test]
fn stress() {
+ let count = if cfg!(miri) { 100 } else { 10000 };
let (tx, rx) = channel::<i32>();
let t = thread::spawn(move || {
- for _ in 0..10000 {
+ for _ in 0..count {
tx.send(1).unwrap();
}
});
- for _ in 0..10000 {
+ for _ in 0..count {
assert_eq!(rx.recv().unwrap(), 1);
}
t.join().ok().expect("thread panicked");
@@ -134,7 +135,7 @@ fn stress() {
#[test]
fn stress_shared() {
- const AMT: u32 = 10000;
+ const AMT: u32 = if cfg!(miri) { 100 } else { 10000 };
const NTHREADS: u32 = 8;
let (tx, rx) = channel::<i32>();
@@ -504,12 +505,13 @@ fn very_long_recv_timeout_wont_panic() {
#[test]
fn recv_a_lot() {
+ let count = if cfg!(miri) { 1000 } else { 10000 };
// Regression test that we don't run out of stack in scheduler context
let (tx, rx) = channel();
- for _ in 0..10000 {
+ for _ in 0..count {
tx.send(()).unwrap();
}
- for _ in 0..10000 {
+ for _ in 0..count {
rx.recv().unwrap();
}
}
diff --git a/library/std/src/sync/mutex.rs b/library/std/src/sync/mutex.rs
index e0d13cd64..de851c8fb 100644
--- a/library/std/src/sync/mutex.rs
+++ b/library/std/src/sync/mutex.rs
@@ -192,6 +192,7 @@ unsafe impl<T: ?Sized + Send> Sync for Mutex<T> {}
and cause Futures to not implement `Send`"]
#[stable(feature = "rust1", since = "1.0.0")]
#[clippy::has_significant_drop]
+#[cfg_attr(not(test), rustc_diagnostic_item = "MutexGuard")]
pub struct MutexGuard<'a, T: ?Sized + 'a> {
lock: &'a Mutex<T>,
poison: poison::Guard,
diff --git a/library/std/src/sync/once.rs b/library/std/src/sync/once.rs
index a7feea588..0f25417d6 100644
--- a/library/std/src/sync/once.rs
+++ b/library/std/src/sync/once.rs
@@ -3,99 +3,12 @@
//! This primitive is meant to be used to run one-time initialization. An
//! example use case would be for initializing an FFI library.
-// A "once" is a relatively simple primitive, and it's also typically provided
-// by the OS as well (see `pthread_once` or `InitOnceExecuteOnce`). The OS
-// primitives, however, tend to have surprising restrictions, such as the Unix
-// one doesn't allow an argument to be passed to the function.
-//
-// As a result, we end up implementing it ourselves in the standard library.
-// This also gives us the opportunity to optimize the implementation a bit which
-// should help the fast path on call sites. Consequently, let's explain how this
-// primitive works now!
-//
-// So to recap, the guarantees of a Once are that it will call the
-// initialization closure at most once, and it will never return until the one
-// that's running has finished running. This means that we need some form of
-// blocking here while the custom callback is running at the very least.
-// Additionally, we add on the restriction of **poisoning**. Whenever an
-// initialization closure panics, the Once enters a "poisoned" state which means
-// that all future calls will immediately panic as well.
-//
-// So to implement this, one might first reach for a `Mutex`, but those cannot
-// be put into a `static`. It also gets a lot harder with poisoning to figure
-// out when the mutex needs to be deallocated because it's not after the closure
-// finishes, but after the first successful closure finishes.
-//
-// All in all, this is instead implemented with atomics and lock-free
-// operations! Whee! Each `Once` has one word of atomic state, and this state is
-// CAS'd on to determine what to do. There are four possible state of a `Once`:
-//
-// * Incomplete - no initialization has run yet, and no thread is currently
-// using the Once.
-// * Poisoned - some thread has previously attempted to initialize the Once, but
-// it panicked, so the Once is now poisoned. There are no other
-// threads currently accessing this Once.
-// * Running - some thread is currently attempting to run initialization. It may
-// succeed, so all future threads need to wait for it to finish.
-// Note that this state is accompanied with a payload, described
-// below.
-// * Complete - initialization has completed and all future calls should finish
-// immediately.
-//
-// With 4 states we need 2 bits to encode this, and we use the remaining bits
-// in the word we have allocated as a queue of threads waiting for the thread
-// responsible for entering the RUNNING state. This queue is just a linked list
-// of Waiter nodes which is monotonically increasing in size. Each node is
-// allocated on the stack, and whenever the running closure finishes it will
-// consume the entire queue and notify all waiters they should try again.
-//
-// You'll find a few more details in the implementation, but that's the gist of
-// it!
-//
-// Atomic orderings:
-// When running `Once` we deal with multiple atomics:
-// `Once.state_and_queue` and an unknown number of `Waiter.signaled`.
-// * `state_and_queue` is used (1) as a state flag, (2) for synchronizing the
-// result of the `Once`, and (3) for synchronizing `Waiter` nodes.
-// - At the end of the `call_inner` function we have to make sure the result
-// of the `Once` is acquired. So every load which can be the only one to
-// load COMPLETED must have at least Acquire ordering, which means all
-// three of them.
-// - `WaiterQueue::Drop` is the only place that may store COMPLETED, and
-// must do so with Release ordering to make the result available.
-// - `wait` inserts `Waiter` nodes as a pointer in `state_and_queue`, and
-// needs to make the nodes available with Release ordering. The load in
-// its `compare_exchange` can be Relaxed because it only has to compare
-// the atomic, not to read other data.
-// - `WaiterQueue::Drop` must see the `Waiter` nodes, so it must load
-// `state_and_queue` with Acquire ordering.
-// - There is just one store where `state_and_queue` is used only as a
-// state flag, without having to synchronize data: switching the state
-// from INCOMPLETE to RUNNING in `call_inner`. This store can be Relaxed,
-// but the read has to be Acquire because of the requirements mentioned
-// above.
-// * `Waiter.signaled` is both used as a flag, and to protect a field with
-// interior mutability in `Waiter`. `Waiter.thread` is changed in
-// `WaiterQueue::Drop` which then sets `signaled` with Release ordering.
-// After `wait` loads `signaled` with Acquire and sees it is true, it needs to
-// see the changes to drop the `Waiter` struct correctly.
-// * There is one place where the two atomics `Once.state_and_queue` and
-// `Waiter.signaled` come together, and might be reordered by the compiler or
-// processor. Because both use Acquire ordering such a reordering is not
-// allowed, so no need for SeqCst.
-
#[cfg(all(test, not(target_os = "emscripten")))]
mod tests;
-use crate::cell::Cell;
use crate::fmt;
-use crate::marker;
use crate::panic::{RefUnwindSafe, UnwindSafe};
-use crate::ptr;
-use crate::sync::atomic::{AtomicBool, AtomicPtr, Ordering};
-use crate::thread::{self, Thread};
-
-type Masked = ();
+use crate::sys_common::once as sys;
/// A synchronization primitive which can be used to run a one-time global
/// initialization. Useful for one-time initialization for FFI or related
@@ -114,19 +27,9 @@ type Masked = ();
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub struct Once {
- // `state_and_queue` is actually a pointer to a `Waiter` with extra state
- // bits, so we add the `PhantomData` appropriately.
- state_and_queue: AtomicPtr<Masked>,
- _marker: marker::PhantomData<*const Waiter>,
+ inner: sys::Once,
}
-// The `PhantomData` of a raw pointer removes these two auto traits, but we
-// enforce both below in the implementation so this should be safe to add.
-#[stable(feature = "rust1", since = "1.0.0")]
-unsafe impl Sync for Once {}
-#[stable(feature = "rust1", since = "1.0.0")]
-unsafe impl Send for Once {}
-
#[stable(feature = "sync_once_unwind_safe", since = "1.59.0")]
impl UnwindSafe for Once {}
@@ -136,10 +39,8 @@ impl RefUnwindSafe for Once {}
/// State yielded to [`Once::call_once_force()`]’s closure parameter. The state
/// can be used to query the poison status of the [`Once`].
#[stable(feature = "once_poison", since = "1.51.0")]
-#[derive(Debug)]
pub struct OnceState {
- poisoned: bool,
- set_state_on_drop_to: Cell<*mut Masked>,
+ pub(crate) inner: sys::OnceState,
}
/// Initialization value for static [`Once`] values.
@@ -159,38 +60,6 @@ pub struct OnceState {
)]
pub const ONCE_INIT: Once = Once::new();
-// Four states that a Once can be in, encoded into the lower bits of
-// `state_and_queue` in the Once structure.
-const INCOMPLETE: usize = 0x0;
-const POISONED: usize = 0x1;
-const RUNNING: usize = 0x2;
-const COMPLETE: usize = 0x3;
-
-// Mask to learn about the state. All other bits are the queue of waiters if
-// this is in the RUNNING state.
-const STATE_MASK: usize = 0x3;
-
-// Representation of a node in the linked list of waiters, used while in the
-// RUNNING state.
-// Note: `Waiter` can't hold a mutable pointer to the next thread, because then
-// `wait` would both hand out a mutable reference to its `Waiter` node, and keep
-// a shared reference to check `signaled`. Instead we hold shared references and
-// use interior mutability.
-#[repr(align(4))] // Ensure the two lower bits are free to use as state bits.
-struct Waiter {
- thread: Cell<Option<Thread>>,
- signaled: AtomicBool,
- next: *const Waiter,
-}
-
-// Head of a linked list of waiters.
-// Every node is a struct on the stack of a waiting thread.
-// Will wake up the waiters when it gets dropped, i.e. also on panic.
-struct WaiterQueue<'a> {
- state_and_queue: &'a AtomicPtr<Masked>,
- set_state_on_drop_to: *mut Masked,
-}
-
impl Once {
/// Creates a new `Once` value.
#[inline]
@@ -198,10 +67,7 @@ impl Once {
#[rustc_const_stable(feature = "const_once_new", since = "1.32.0")]
#[must_use]
pub const fn new() -> Once {
- Once {
- state_and_queue: AtomicPtr::new(ptr::invalid_mut(INCOMPLETE)),
- _marker: marker::PhantomData,
- }
+ Once { inner: sys::Once::new() }
}
/// Performs an initialization routine once and only once. The given closure
@@ -261,6 +127,7 @@ impl Once {
/// This is similar to [poisoning with mutexes][poison].
///
/// [poison]: struct.Mutex.html#poisoning
+ #[inline]
#[stable(feature = "rust1", since = "1.0.0")]
#[track_caller]
pub fn call_once<F>(&self, f: F)
@@ -268,12 +135,12 @@ impl Once {
F: FnOnce(),
{
// Fast path check
- if self.is_completed() {
+ if self.inner.is_completed() {
return;
}
let mut f = Some(f);
- self.call_inner(false, &mut |_| f.take().unwrap()());
+ self.inner.call(false, &mut |_| f.take().unwrap()());
}
/// Performs the same function as [`call_once()`] except ignores poisoning.
@@ -320,18 +187,19 @@ impl Once {
/// // once any success happens, we stop propagating the poison
/// INIT.call_once(|| {});
/// ```
+ #[inline]
#[stable(feature = "once_poison", since = "1.51.0")]
pub fn call_once_force<F>(&self, f: F)
where
F: FnOnce(&OnceState),
{
// Fast path check
- if self.is_completed() {
+ if self.inner.is_completed() {
return;
}
let mut f = Some(f);
- self.call_inner(true, &mut |p| f.take().unwrap()(p));
+ self.inner.call(true, &mut |p| f.take().unwrap()(p));
}
/// Returns `true` if some [`call_once()`] call has completed
@@ -378,119 +246,7 @@ impl Once {
#[stable(feature = "once_is_completed", since = "1.43.0")]
#[inline]
pub fn is_completed(&self) -> bool {
- // An `Acquire` load is enough because that makes all the initialization
- // operations visible to us, and, this being a fast path, weaker
- // ordering helps with performance. This `Acquire` synchronizes with
- // `Release` operations on the slow path.
- self.state_and_queue.load(Ordering::Acquire).addr() == COMPLETE
- }
-
- // This is a non-generic function to reduce the monomorphization cost of
- // using `call_once` (this isn't exactly a trivial or small implementation).
- //
- // Additionally, this is tagged with `#[cold]` as it should indeed be cold
- // and it helps let LLVM know that calls to this function should be off the
- // fast path. Essentially, this should help generate more straight line code
- // in LLVM.
- //
- // Finally, this takes an `FnMut` instead of a `FnOnce` because there's
- // currently no way to take an `FnOnce` and call it via virtual dispatch
- // without some allocation overhead.
- #[cold]
- #[track_caller]
- fn call_inner(&self, ignore_poisoning: bool, init: &mut dyn FnMut(&OnceState)) {
- let mut state_and_queue = self.state_and_queue.load(Ordering::Acquire);
- loop {
- match state_and_queue.addr() {
- COMPLETE => break,
- POISONED if !ignore_poisoning => {
- // Panic to propagate the poison.
- panic!("Once instance has previously been poisoned");
- }
- POISONED | INCOMPLETE => {
- // Try to register this thread as the one RUNNING.
- let exchange_result = self.state_and_queue.compare_exchange(
- state_and_queue,
- ptr::invalid_mut(RUNNING),
- Ordering::Acquire,
- Ordering::Acquire,
- );
- if let Err(old) = exchange_result {
- state_and_queue = old;
- continue;
- }
- // `waiter_queue` will manage other waiting threads, and
- // wake them up on drop.
- let mut waiter_queue = WaiterQueue {
- state_and_queue: &self.state_and_queue,
- set_state_on_drop_to: ptr::invalid_mut(POISONED),
- };
- // Run the initialization function, letting it know if we're
- // poisoned or not.
- let init_state = OnceState {
- poisoned: state_and_queue.addr() == POISONED,
- set_state_on_drop_to: Cell::new(ptr::invalid_mut(COMPLETE)),
- };
- init(&init_state);
- waiter_queue.set_state_on_drop_to = init_state.set_state_on_drop_to.get();
- break;
- }
- _ => {
- // All other values must be RUNNING with possibly a
- // pointer to the waiter queue in the more significant bits.
- assert!(state_and_queue.addr() & STATE_MASK == RUNNING);
- wait(&self.state_and_queue, state_and_queue);
- state_and_queue = self.state_and_queue.load(Ordering::Acquire);
- }
- }
- }
- }
-}
-
-fn wait(state_and_queue: &AtomicPtr<Masked>, mut current_state: *mut Masked) {
- // Note: the following code was carefully written to avoid creating a
- // mutable reference to `node` that gets aliased.
- loop {
- // Don't queue this thread if the status is no longer running,
- // otherwise we will not be woken up.
- if current_state.addr() & STATE_MASK != RUNNING {
- return;
- }
-
- // Create the node for our current thread.
- let node = Waiter {
- thread: Cell::new(Some(thread::current())),
- signaled: AtomicBool::new(false),
- next: current_state.with_addr(current_state.addr() & !STATE_MASK) as *const Waiter,
- };
- let me = &node as *const Waiter as *const Masked as *mut Masked;
-
- // Try to slide in the node at the head of the linked list, making sure
- // that another thread didn't just replace the head of the linked list.
- let exchange_result = state_and_queue.compare_exchange(
- current_state,
- me.with_addr(me.addr() | RUNNING),
- Ordering::Release,
- Ordering::Relaxed,
- );
- if let Err(old) = exchange_result {
- current_state = old;
- continue;
- }
-
- // We have enqueued ourselves, now lets wait.
- // It is important not to return before being signaled, otherwise we
- // would drop our `Waiter` node and leave a hole in the linked list
- // (and a dangling reference). Guard against spurious wakeups by
- // reparking ourselves until we are signaled.
- while !node.signaled.load(Ordering::Acquire) {
- // If the managing thread happens to signal and unpark us before we
- // can park ourselves, the result could be this thread never gets
- // unparked. Luckily `park` comes with the guarantee that if it got
- // an `unpark` just before on an unparked thread it does not park.
- thread::park();
- }
- break;
+ self.inner.is_completed()
}
}
@@ -501,37 +257,6 @@ impl fmt::Debug for Once {
}
}
-impl Drop for WaiterQueue<'_> {
- fn drop(&mut self) {
- // Swap out our state with however we finished.
- let state_and_queue =
- self.state_and_queue.swap(self.set_state_on_drop_to, Ordering::AcqRel);
-
- // We should only ever see an old state which was RUNNING.
- assert_eq!(state_and_queue.addr() & STATE_MASK, RUNNING);
-
- // Walk the entire linked list of waiters and wake them up (in lifo
- // order, last to register is first to wake up).
- unsafe {
- // Right after setting `node.signaled = true` the other thread may
- // free `node` if there happens to be has a spurious wakeup.
- // So we have to take out the `thread` field and copy the pointer to
- // `next` first.
- let mut queue =
- state_and_queue.with_addr(state_and_queue.addr() & !STATE_MASK) as *const Waiter;
- while !queue.is_null() {
- let next = (*queue).next;
- let thread = (*queue).thread.take().unwrap();
- (*queue).signaled.store(true, Ordering::Release);
- // ^- FIXME (maybe): This is another case of issue #55005
- // `store()` has a potentially dangling ref to `signaled`.
- queue = next;
- thread.unpark();
- }
- }
- }
-}
-
impl OnceState {
/// Returns `true` if the associated [`Once`] was poisoned prior to the
/// invocation of the closure passed to [`Once::call_once_force()`].
@@ -568,13 +293,22 @@ impl OnceState {
/// assert!(!state.is_poisoned());
/// });
#[stable(feature = "once_poison", since = "1.51.0")]
+ #[inline]
pub fn is_poisoned(&self) -> bool {
- self.poisoned
+ self.inner.is_poisoned()
}
/// Poison the associated [`Once`] without explicitly panicking.
- // NOTE: This is currently only exposed for the `lazy` module
+ // NOTE: This is currently only exposed for `OnceLock`.
+ #[inline]
pub(crate) fn poison(&self) {
- self.set_state_on_drop_to.set(ptr::invalid_mut(POISONED));
+ self.inner.poison();
+ }
+}
+
+#[stable(feature = "std_debug", since = "1.16.0")]
+impl fmt::Debug for OnceState {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("OnceState").field("poisoned", &self.is_poisoned()).finish()
}
}
diff --git a/library/std/src/sync/once_lock.rs b/library/std/src/sync/once_lock.rs
index 813516040..37413ec62 100644
--- a/library/std/src/sync/once_lock.rs
+++ b/library/std/src/sync/once_lock.rs
@@ -3,7 +3,6 @@ use crate::fmt;
use crate::marker::PhantomData;
use crate::mem::MaybeUninit;
use crate::panic::{RefUnwindSafe, UnwindSafe};
-use crate::pin::Pin;
use crate::sync::Once;
/// A synchronization primitive which can be written to only once.
@@ -223,60 +222,6 @@ impl<T> OnceLock<T> {
Ok(unsafe { self.get_unchecked() })
}
- /// Internal-only API that gets the contents of the cell, initializing it
- /// in two steps with `f` and `g` if the cell was empty.
- ///
- /// `f` is called to construct the value, which is then moved into the cell
- /// and given as a (pinned) mutable reference to `g` to finish
- /// initialization.
- ///
- /// This allows `g` to inspect an manipulate the value after it has been
- /// moved into its final place in the cell, but before the cell is
- /// considered initialized.
- ///
- /// # Panics
- ///
- /// If `f` or `g` panics, the panic is propagated to the caller, and the
- /// cell remains uninitialized.
- ///
- /// With the current implementation, if `g` panics, the value from `f` will
- /// not be dropped. This should probably be fixed if this is ever used for
- /// a type where this matters.
- ///
- /// It is an error to reentrantly initialize the cell from `f`. The exact
- /// outcome is unspecified. Current implementation deadlocks, but this may
- /// be changed to a panic in the future.
- pub(crate) fn get_or_init_pin<F, G>(self: Pin<&Self>, f: F, g: G) -> Pin<&T>
- where
- F: FnOnce() -> T,
- G: FnOnce(Pin<&mut T>),
- {
- if let Some(value) = self.get_ref().get() {
- // SAFETY: The inner value was already initialized, and will not be
- // moved anymore.
- return unsafe { Pin::new_unchecked(value) };
- }
-
- let slot = &self.value;
-
- // Ignore poisoning from other threads
- // If another thread panics, then we'll be able to run our closure
- self.once.call_once_force(|_| {
- let value = f();
- // SAFETY: We use the Once (self.once) to guarantee unique access
- // to the UnsafeCell (slot).
- let value: &mut T = unsafe { (&mut *slot.get()).write(value) };
- // SAFETY: The value has been written to its final place in
- // self.value. We do not to move it anymore, which we promise here
- // with a Pin<&mut T>.
- g(unsafe { Pin::new_unchecked(value) });
- });
-
- // SAFETY: The inner value has been initialized, and will not be moved
- // anymore.
- unsafe { Pin::new_unchecked(self.get_ref().get_unchecked()) }
- }
-
/// Consumes the `OnceLock`, returning the wrapped value. Returns
/// `None` if the cell was empty.
///
diff --git a/library/std/src/sync/rwlock.rs b/library/std/src/sync/rwlock.rs
index 6e4a2cfc8..8b3877607 100644
--- a/library/std/src/sync/rwlock.rs
+++ b/library/std/src/sync/rwlock.rs
@@ -76,6 +76,7 @@ use crate::sys_common::rwlock as sys;
///
/// [`Mutex`]: super::Mutex
#[stable(feature = "rust1", since = "1.0.0")]
+#[cfg_attr(not(test), rustc_diagnostic_item = "RwLock")]
pub struct RwLock<T: ?Sized> {
inner: sys::MovableRwLock,
poison: poison::Flag,
@@ -101,6 +102,7 @@ unsafe impl<T: ?Sized + Send + Sync> Sync for RwLock<T> {}
and cause Futures to not implement `Send`"]
#[stable(feature = "rust1", since = "1.0.0")]
#[clippy::has_significant_drop]
+#[cfg_attr(not(test), rustc_diagnostic_item = "RwLockReadGuard")]
pub struct RwLockReadGuard<'a, T: ?Sized + 'a> {
// NB: we use a pointer instead of `&'a T` to avoid `noalias` violations, because a
// `Ref` argument doesn't hold immutability for its whole scope, only until it drops.
@@ -130,6 +132,7 @@ unsafe impl<T: ?Sized + Sync> Sync for RwLockReadGuard<'_, T> {}
and cause Future's to not implement `Send`"]
#[stable(feature = "rust1", since = "1.0.0")]
#[clippy::has_significant_drop]
+#[cfg_attr(not(test), rustc_diagnostic_item = "RwLockWriteGuard")]
pub struct RwLockWriteGuard<'a, T: ?Sized + 'a> {
lock: &'a RwLock<T>,
poison: poison::Guard,
@@ -164,7 +167,7 @@ impl<T> RwLock<T> {
}
impl<T: ?Sized> RwLock<T> {
- /// Locks this rwlock with shared read access, blocking the current thread
+ /// Locks this `RwLock` with shared read access, blocking the current thread
/// until it can be acquired.
///
/// The calling thread will be blocked until there are no more writers which
@@ -178,9 +181,10 @@ impl<T: ?Sized> RwLock<T> {
///
/// # Errors
///
- /// This function will return an error if the RwLock is poisoned. An RwLock
- /// is poisoned whenever a writer panics while holding an exclusive lock.
- /// The failure will occur immediately after the lock has been acquired.
+ /// This function will return an error if the `RwLock` is poisoned. An
+ /// `RwLock` is poisoned whenever a writer panics while holding an exclusive
+ /// lock. The failure will occur immediately after the lock has been
+ /// acquired.
///
/// # Panics
///
@@ -212,7 +216,7 @@ impl<T: ?Sized> RwLock<T> {
}
}
- /// Attempts to acquire this rwlock with shared read access.
+ /// Attempts to acquire this `RwLock` with shared read access.
///
/// If the access could not be granted at this time, then `Err` is returned.
/// Otherwise, an RAII guard is returned which will release the shared access
@@ -225,13 +229,13 @@ impl<T: ?Sized> RwLock<T> {
///
/// # Errors
///
- /// This function will return the [`Poisoned`] error if the RwLock is poisoned.
- /// An RwLock is poisoned whenever a writer panics while holding an exclusive
- /// lock. `Poisoned` will only be returned if the lock would have otherwise been
- /// acquired.
+ /// This function will return the [`Poisoned`] error if the `RwLock` is
+ /// poisoned. An `RwLock` is poisoned whenever a writer panics while holding
+ /// an exclusive lock. `Poisoned` will only be returned if the lock would
+ /// have otherwise been acquired.
///
- /// This function will return the [`WouldBlock`] error if the RwLock could not
- /// be acquired because it was already locked exclusively.
+ /// This function will return the [`WouldBlock`] error if the `RwLock` could
+ /// not be acquired because it was already locked exclusively.
///
/// [`Poisoned`]: TryLockError::Poisoned
/// [`WouldBlock`]: TryLockError::WouldBlock
@@ -260,20 +264,20 @@ impl<T: ?Sized> RwLock<T> {
}
}
- /// Locks this rwlock with exclusive write access, blocking the current
+ /// Locks this `RwLock` with exclusive write access, blocking the current
/// thread until it can be acquired.
///
/// This function will not return while other writers or other readers
/// currently have access to the lock.
///
- /// Returns an RAII guard which will drop the write access of this rwlock
+ /// Returns an RAII guard which will drop the write access of this `RwLock`
/// when dropped.
///
/// # Errors
///
- /// This function will return an error if the RwLock is poisoned. An RwLock
- /// is poisoned whenever a writer panics while holding an exclusive lock.
- /// An error will be returned when the lock is acquired.
+ /// This function will return an error if the `RwLock` is poisoned. An
+ /// `RwLock` is poisoned whenever a writer panics while holding an exclusive
+ /// lock. An error will be returned when the lock is acquired.
///
/// # Panics
///
@@ -300,7 +304,7 @@ impl<T: ?Sized> RwLock<T> {
}
}
- /// Attempts to lock this rwlock with exclusive write access.
+ /// Attempts to lock this `RwLock` with exclusive write access.
///
/// If the lock could not be acquired at this time, then `Err` is returned.
/// Otherwise, an RAII guard is returned which will release the lock when
@@ -313,13 +317,13 @@ impl<T: ?Sized> RwLock<T> {
///
/// # Errors
///
- /// This function will return the [`Poisoned`] error if the RwLock is
- /// poisoned. An RwLock is poisoned whenever a writer panics while holding
- /// an exclusive lock. `Poisoned` will only be returned if the lock would have
- /// otherwise been acquired.
+ /// This function will return the [`Poisoned`] error if the `RwLock` is
+ /// poisoned. An `RwLock` is poisoned whenever a writer panics while holding
+ /// an exclusive lock. `Poisoned` will only be returned if the lock would
+ /// have otherwise been acquired.
///
- /// This function will return the [`WouldBlock`] error if the RwLock could not
- /// be acquired because it was already locked exclusively.
+ /// This function will return the [`WouldBlock`] error if the `RwLock` could
+ /// not be acquired because it was already locked exclusively.
///
/// [`Poisoned`]: TryLockError::Poisoned
/// [`WouldBlock`]: TryLockError::WouldBlock
@@ -419,10 +423,10 @@ impl<T: ?Sized> RwLock<T> {
///
/// # Errors
///
- /// This function will return an error if the RwLock is poisoned. An RwLock
- /// is poisoned whenever a writer panics while holding an exclusive lock. An
- /// error will only be returned if the lock would have otherwise been
- /// acquired.
+ /// This function will return an error if the `RwLock` is poisoned. An
+ /// `RwLock` is poisoned whenever a writer panics while holding an exclusive
+ /// lock. An error will only be returned if the lock would have otherwise
+ /// been acquired.
///
/// # Examples
///
@@ -452,10 +456,10 @@ impl<T: ?Sized> RwLock<T> {
///
/// # Errors
///
- /// This function will return an error if the RwLock is poisoned. An RwLock
- /// is poisoned whenever a writer panics while holding an exclusive lock. An
- /// error will only be returned if the lock would have otherwise been
- /// acquired.
+ /// This function will return an error if the `RwLock` is poisoned. An
+ /// `RwLock` is poisoned whenever a writer panics while holding an exclusive
+ /// lock. An error will only be returned if the lock would have otherwise
+ /// been acquired.
///
/// # Examples
///
diff --git a/library/std/src/sync/rwlock/tests.rs b/library/std/src/sync/rwlock/tests.rs
index 08255c985..b5b3ad989 100644
--- a/library/std/src/sync/rwlock/tests.rs
+++ b/library/std/src/sync/rwlock/tests.rs
@@ -19,7 +19,7 @@ fn smoke() {
#[test]
fn frob() {
const N: u32 = 10;
- const M: usize = 1000;
+ const M: usize = if cfg!(miri) { 100 } else { 1000 };
let r = Arc::new(RwLock::new(()));