summaryrefslogtreecommitdiffstats
path: root/vendor/once_cell/src
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-17 12:06:37 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-17 12:06:37 +0000
commit246f239d9f40f633160f0c18f87a20922d4e77bb (patch)
tree5a88572663584b3d4d28e5a20e10abab1be40884 /vendor/once_cell/src
parentReleasing progress-linux version 1.64.0+dfsg1-1~progress7.99u1. (diff)
downloadrustc-246f239d9f40f633160f0c18f87a20922d4e77bb.tar.xz
rustc-246f239d9f40f633160f0c18f87a20922d4e77bb.zip
Merging debian version 1.65.0+dfsg1-2.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'vendor/once_cell/src')
-rw-r--r--vendor/once_cell/src/imp_std.rs93
1 files changed, 72 insertions, 21 deletions
diff --git a/vendor/once_cell/src/imp_std.rs b/vendor/once_cell/src/imp_std.rs
index 2bbe5b76e..4d5b5fd32 100644
--- a/vendor/once_cell/src/imp_std.rs
+++ b/vendor/once_cell/src/imp_std.rs
@@ -8,7 +8,7 @@ use std::{
hint::unreachable_unchecked,
marker::PhantomData,
panic::{RefUnwindSafe, UnwindSafe},
- sync::atomic::{AtomicBool, AtomicUsize, Ordering},
+ sync::atomic::{AtomicBool, AtomicPtr, Ordering},
thread::{self, Thread},
};
@@ -24,7 +24,7 @@ pub(crate) struct OnceCell<T> {
//
// State is encoded in two low bits. Only `INCOMPLETE` and `RUNNING` states
// allow waiters.
- queue: AtomicUsize,
+ queue: AtomicPtr<Waiter>,
_marker: PhantomData<*mut Waiter>,
value: UnsafeCell<Option<T>>,
}
@@ -43,7 +43,7 @@ impl<T: UnwindSafe> UnwindSafe for OnceCell<T> {}
impl<T> OnceCell<T> {
pub(crate) const fn new() -> OnceCell<T> {
OnceCell {
- queue: AtomicUsize::new(INCOMPLETE),
+ queue: AtomicPtr::new(INCOMPLETE_PTR),
_marker: PhantomData,
value: UnsafeCell::new(None),
}
@@ -51,7 +51,7 @@ impl<T> OnceCell<T> {
pub(crate) const fn with_value(value: T) -> OnceCell<T> {
OnceCell {
- queue: AtomicUsize::new(COMPLETE),
+ queue: AtomicPtr::new(COMPLETE_PTR),
_marker: PhantomData,
value: UnsafeCell::new(Some(value)),
}
@@ -64,7 +64,7 @@ impl<T> OnceCell<T> {
// operations visible to us, and, this being a fast path, weaker
// ordering helps with performance. This `Acquire` synchronizes with
// `SeqCst` operations on the slow path.
- self.queue.load(Ordering::Acquire) == COMPLETE
+ self.queue.load(Ordering::Acquire) == COMPLETE_PTR
}
/// Safety: synchronizes with store to value via SeqCst read from state,
@@ -145,6 +145,8 @@ impl<T> OnceCell<T> {
const INCOMPLETE: usize = 0x0;
const RUNNING: usize = 0x1;
const COMPLETE: usize = 0x2;
+const INCOMPLETE_PTR: *mut Waiter = INCOMPLETE as *mut Waiter;
+const COMPLETE_PTR: *mut Waiter = COMPLETE as *mut Waiter;
// Mask to learn about the state. All other bits are the queue of waiters if
// this is in the RUNNING state.
@@ -156,23 +158,24 @@ const STATE_MASK: usize = 0x3;
struct Waiter {
thread: Cell<Option<Thread>>,
signaled: AtomicBool,
- next: *const Waiter,
+ next: *mut Waiter,
}
/// Drains and notifies the queue of waiters on drop.
struct Guard<'a> {
- queue: &'a AtomicUsize,
- new_queue: usize,
+ queue: &'a AtomicPtr<Waiter>,
+ new_queue: *mut Waiter,
}
impl Drop for Guard<'_> {
fn drop(&mut self) {
let queue = self.queue.swap(self.new_queue, Ordering::AcqRel);
- assert_eq!(queue & STATE_MASK, RUNNING);
+ let state = strict::addr(queue) & STATE_MASK;
+ assert_eq!(state, RUNNING);
unsafe {
- let mut waiter = (queue & !STATE_MASK) as *const Waiter;
+ let mut waiter = strict::map_addr(queue, |q| q & !STATE_MASK);
while !waiter.is_null() {
let next = (*waiter).next;
let thread = (*waiter).thread.take().unwrap();
@@ -191,17 +194,17 @@ impl Drop for Guard<'_> {
//
// Note: this is intentionally monomorphic
#[inline(never)]
-fn initialize_or_wait(queue: &AtomicUsize, mut init: Option<&mut dyn FnMut() -> bool>) {
+fn initialize_or_wait(queue: &AtomicPtr<Waiter>, mut init: Option<&mut dyn FnMut() -> bool>) {
let mut curr_queue = queue.load(Ordering::Acquire);
loop {
- let curr_state = curr_queue & STATE_MASK;
+ let curr_state = strict::addr(curr_queue) & STATE_MASK;
match (curr_state, &mut init) {
(COMPLETE, _) => return,
(INCOMPLETE, Some(init)) => {
let exchange = queue.compare_exchange(
curr_queue,
- (curr_queue & !STATE_MASK) | RUNNING,
+ strict::map_addr(curr_queue, |q| (q & !STATE_MASK) | RUNNING),
Ordering::Acquire,
Ordering::Acquire,
);
@@ -209,9 +212,9 @@ fn initialize_or_wait(queue: &AtomicUsize, mut init: Option<&mut dyn FnMut() ->
curr_queue = new_queue;
continue;
}
- let mut guard = Guard { queue, new_queue: INCOMPLETE };
+ let mut guard = Guard { queue, new_queue: INCOMPLETE_PTR };
if init() {
- guard.new_queue = COMPLETE;
+ guard.new_queue = COMPLETE_PTR;
}
return;
}
@@ -224,24 +227,24 @@ fn initialize_or_wait(queue: &AtomicUsize, mut init: Option<&mut dyn FnMut() ->
}
}
-fn wait(queue: &AtomicUsize, mut curr_queue: usize) {
- let curr_state = curr_queue & STATE_MASK;
+fn wait(queue: &AtomicPtr<Waiter>, mut curr_queue: *mut Waiter) {
+ let curr_state = strict::addr(curr_queue) & STATE_MASK;
loop {
let node = Waiter {
thread: Cell::new(Some(thread::current())),
signaled: AtomicBool::new(false),
- next: (curr_queue & !STATE_MASK) as *const Waiter,
+ next: strict::map_addr(curr_queue, |q| q & !STATE_MASK),
};
- let me = &node as *const Waiter as usize;
+ let me = &node as *const Waiter as *mut Waiter;
let exchange = queue.compare_exchange(
curr_queue,
- me | curr_state,
+ strict::map_addr(me, |q| q | curr_state),
Ordering::Release,
Ordering::Relaxed,
);
if let Err(new_queue) = exchange {
- if new_queue & STATE_MASK != curr_state {
+ if strict::addr(new_queue) & STATE_MASK != curr_state {
return;
}
curr_queue = new_queue;
@@ -255,6 +258,54 @@ fn wait(queue: &AtomicUsize, mut curr_queue: usize) {
}
}
+// Polyfill of strict provenance from https://crates.io/crates/sptr.
+//
+// Use free-standing function rather than a trait to keep things simple and
+// avoid any potential conflicts with future stabile std API.
+mod strict {
+ #[must_use]
+ #[inline]
+ pub(crate) fn addr<T>(ptr: *mut T) -> usize
+ where
+ T: Sized,
+ {
+ // FIXME(strict_provenance_magic): I am magic and should be a compiler intrinsic.
+ // SAFETY: Pointer-to-integer transmutes are valid (if you are okay with losing the
+ // provenance).
+ unsafe { core::mem::transmute(ptr) }
+ }
+
+ #[must_use]
+ #[inline]
+ pub(crate) fn with_addr<T>(ptr: *mut T, addr: usize) -> *mut T
+ where
+ T: Sized,
+ {
+ // FIXME(strict_provenance_magic): I am magic and should be a compiler intrinsic.
+ //
+ // In the mean-time, this operation is defined to be "as if" it was
+ // a wrapping_offset, so we can emulate it as such. This should properly
+ // restore pointer provenance even under today's compiler.
+ let self_addr = self::addr(ptr) as isize;
+ let dest_addr = addr as isize;
+ let offset = dest_addr.wrapping_sub(self_addr);
+
+ // This is the canonical desugarring of this operation,
+ // but `pointer::cast` was only stabilized in 1.38.
+ // self.cast::<u8>().wrapping_offset(offset).cast::<T>()
+ (ptr as *mut u8).wrapping_offset(offset) as *mut T
+ }
+
+ #[must_use]
+ #[inline]
+ pub(crate) fn map_addr<T>(ptr: *mut T, f: impl FnOnce(usize) -> usize) -> *mut T
+ where
+ T: Sized,
+ {
+ self::with_addr(ptr, f(addr(ptr)))
+ }
+}
+
// These test are snatched from std as well.
#[cfg(test)]
mod tests {