summaryrefslogtreecommitdiffstats
path: root/library/core/src/sync
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--library/core/src/sync/atomic.rs134
-rw-r--r--library/core/src/sync/exclusive.rs9
2 files changed, 59 insertions, 84 deletions
diff --git a/library/core/src/sync/atomic.rs b/library/core/src/sync/atomic.rs
index 5e2e0c4d8..edc68d6fa 100644
--- a/library/core/src/sync/atomic.rs
+++ b/library/core/src/sync/atomic.rs
@@ -294,7 +294,7 @@ impl AtomicBool {
/// ```
/// use std::sync::atomic::AtomicBool;
///
- /// let atomic_true = AtomicBool::new(true);
+ /// let atomic_true = AtomicBool::new(true);
/// let atomic_false = AtomicBool::new(false);
/// ```
#[inline]
@@ -955,6 +955,14 @@ impl AtomicBool {
/// **Note:** This method is only available on platforms that support atomic
/// operations on `u8`.
///
+ /// # Considerations
+ ///
+ /// This method is not magic; it is not provided by the hardware.
+ /// It is implemented in terms of [`AtomicBool::compare_exchange_weak`], and suffers from the same drawbacks.
+ /// In particular, this method will not circumvent the [ABA Problem].
+ ///
+ /// [ABA Problem]: https://en.wikipedia.org/wiki/ABA_problem
+ ///
/// # Examples
///
/// ```rust
@@ -1171,7 +1179,7 @@ impl<T> AtomicPtr<T> {
/// use std::sync::atomic::{AtomicPtr, Ordering};
///
/// let ptr = &mut 5;
- /// let some_ptr = AtomicPtr::new(ptr);
+ /// let some_ptr = AtomicPtr::new(ptr);
///
/// let value = some_ptr.load(Ordering::Relaxed);
/// ```
@@ -1198,7 +1206,7 @@ impl<T> AtomicPtr<T> {
/// use std::sync::atomic::{AtomicPtr, Ordering};
///
/// let ptr = &mut 5;
- /// let some_ptr = AtomicPtr::new(ptr);
+ /// let some_ptr = AtomicPtr::new(ptr);
///
/// let other_ptr = &mut 10;
///
@@ -1230,7 +1238,7 @@ impl<T> AtomicPtr<T> {
/// use std::sync::atomic::{AtomicPtr, Ordering};
///
/// let ptr = &mut 5;
- /// let some_ptr = AtomicPtr::new(ptr);
+ /// let some_ptr = AtomicPtr::new(ptr);
///
/// let other_ptr = &mut 10;
///
@@ -1282,9 +1290,9 @@ impl<T> AtomicPtr<T> {
/// use std::sync::atomic::{AtomicPtr, Ordering};
///
/// let ptr = &mut 5;
- /// let some_ptr = AtomicPtr::new(ptr);
+ /// let some_ptr = AtomicPtr::new(ptr);
///
- /// let other_ptr = &mut 10;
+ /// let other_ptr = &mut 10;
///
/// let value = some_ptr.compare_and_swap(ptr, other_ptr, Ordering::Relaxed);
/// ```
@@ -1325,9 +1333,9 @@ impl<T> AtomicPtr<T> {
/// use std::sync::atomic::{AtomicPtr, Ordering};
///
/// let ptr = &mut 5;
- /// let some_ptr = AtomicPtr::new(ptr);
+ /// let some_ptr = AtomicPtr::new(ptr);
///
- /// let other_ptr = &mut 10;
+ /// let other_ptr = &mut 10;
///
/// let value = some_ptr.compare_exchange(ptr, other_ptr,
/// Ordering::SeqCst, Ordering::Relaxed);
@@ -1422,6 +1430,14 @@ impl<T> AtomicPtr<T> {
/// **Note:** This method is only available on platforms that support atomic
/// operations on pointers.
///
+ /// # Considerations
+ ///
+ /// This method is not magic; it is not provided by the hardware.
+ /// It is implemented in terms of [`AtomicPtr::compare_exchange_weak`], and suffers from the same drawbacks.
+ /// In particular, this method will not circumvent the [ABA Problem].
+ ///
+ /// [ABA Problem]: https://en.wikipedia.org/wiki/ABA_problem
+ ///
/// # Examples
///
/// ```rust
@@ -1554,8 +1570,8 @@ impl<T> AtomicPtr<T> {
/// Offsets the pointer's address by adding `val` *bytes*, returning the
/// previous pointer.
///
- /// This is equivalent to using [`wrapping_add`] and [`cast`] to atomically
- /// perform `ptr = ptr.cast::<u8>().wrapping_add(val).cast::<T>()`.
+ /// This is equivalent to using [`wrapping_byte_add`] to atomically
+ /// perform `ptr = ptr.wrapping_byte_add(val)`.
///
/// `fetch_byte_add` takes an [`Ordering`] argument which describes the
/// memory ordering of this operation. All ordering modes are possible. Note
@@ -1565,8 +1581,7 @@ impl<T> AtomicPtr<T> {
/// **Note**: This method is only available on platforms that support atomic
/// operations on [`AtomicPtr`].
///
- /// [`wrapping_add`]: pointer::wrapping_add
- /// [`cast`]: pointer::cast
+ /// [`wrapping_byte_add`]: pointer::wrapping_byte_add
///
/// # Examples
///
@@ -1584,23 +1599,15 @@ impl<T> AtomicPtr<T> {
#[unstable(feature = "strict_provenance_atomic_ptr", issue = "99108")]
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
pub fn fetch_byte_add(&self, val: usize, order: Ordering) -> *mut T {
- #[cfg(not(bootstrap))]
- // SAFETY: data races are prevented by atomic intrinsics.
- unsafe {
- atomic_add(self.p.get(), core::ptr::invalid_mut(val), order).cast()
- }
- #[cfg(bootstrap)]
// SAFETY: data races are prevented by atomic intrinsics.
- unsafe {
- atomic_add(self.p.get().cast::<usize>(), val, order) as *mut T
- }
+ unsafe { atomic_add(self.p.get(), core::ptr::invalid_mut(val), order).cast() }
}
/// Offsets the pointer's address by subtracting `val` *bytes*, returning the
/// previous pointer.
///
- /// This is equivalent to using [`wrapping_sub`] and [`cast`] to atomically
- /// perform `ptr = ptr.cast::<u8>().wrapping_sub(val).cast::<T>()`.
+ /// This is equivalent to using [`wrapping_byte_sub`] to atomically
+ /// perform `ptr = ptr.wrapping_byte_sub(val)`.
///
/// `fetch_byte_sub` takes an [`Ordering`] argument which describes the
/// memory ordering of this operation. All ordering modes are possible. Note
@@ -1610,8 +1617,7 @@ impl<T> AtomicPtr<T> {
/// **Note**: This method is only available on platforms that support atomic
/// operations on [`AtomicPtr`].
///
- /// [`wrapping_sub`]: pointer::wrapping_sub
- /// [`cast`]: pointer::cast
+ /// [`wrapping_byte_sub`]: pointer::wrapping_byte_sub
///
/// # Examples
///
@@ -1628,24 +1634,16 @@ impl<T> AtomicPtr<T> {
#[unstable(feature = "strict_provenance_atomic_ptr", issue = "99108")]
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
pub fn fetch_byte_sub(&self, val: usize, order: Ordering) -> *mut T {
- #[cfg(not(bootstrap))]
// SAFETY: data races are prevented by atomic intrinsics.
- unsafe {
- atomic_sub(self.p.get(), core::ptr::invalid_mut(val), order).cast()
- }
- #[cfg(bootstrap)]
- // SAFETY: data races are prevented by atomic intrinsics.
- unsafe {
- atomic_sub(self.p.get().cast::<usize>(), val, order) as *mut T
- }
+ unsafe { atomic_sub(self.p.get(), core::ptr::invalid_mut(val), order).cast() }
}
/// Performs a bitwise "or" operation on the address of the current pointer,
/// and the argument `val`, and stores a pointer with provenance of the
/// current pointer and the resulting address.
///
- /// This is equivalent equivalent to using [`map_addr`] to atomically
- /// perform `ptr = ptr.map_addr(|a| a | val)`. This can be used in tagged
+ /// This is equivalent to using [`map_addr`] to atomically perform
+ /// `ptr = ptr.map_addr(|a| a | val)`. This can be used in tagged
/// pointer schemes to atomically set tag bits.
///
/// **Caveat**: This operation returns the previous value. To compute the
@@ -1687,24 +1685,16 @@ impl<T> AtomicPtr<T> {
#[unstable(feature = "strict_provenance_atomic_ptr", issue = "99108")]
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
pub fn fetch_or(&self, val: usize, order: Ordering) -> *mut T {
- #[cfg(not(bootstrap))]
- // SAFETY: data races are prevented by atomic intrinsics.
- unsafe {
- atomic_or(self.p.get(), core::ptr::invalid_mut(val), order).cast()
- }
- #[cfg(bootstrap)]
// SAFETY: data races are prevented by atomic intrinsics.
- unsafe {
- atomic_or(self.p.get().cast::<usize>(), val, order) as *mut T
- }
+ unsafe { atomic_or(self.p.get(), core::ptr::invalid_mut(val), order).cast() }
}
/// Performs a bitwise "and" operation on the address of the current
/// pointer, and the argument `val`, and stores a pointer with provenance of
/// the current pointer and the resulting address.
///
- /// This is equivalent equivalent to using [`map_addr`] to atomically
- /// perform `ptr = ptr.map_addr(|a| a & val)`. This can be used in tagged
+ /// This is equivalent to using [`map_addr`] to atomically perform
+ /// `ptr = ptr.map_addr(|a| a & val)`. This can be used in tagged
/// pointer schemes to atomically unset tag bits.
///
/// **Caveat**: This operation returns the previous value. To compute the
@@ -1745,24 +1735,16 @@ impl<T> AtomicPtr<T> {
#[unstable(feature = "strict_provenance_atomic_ptr", issue = "99108")]
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
pub fn fetch_and(&self, val: usize, order: Ordering) -> *mut T {
- #[cfg(not(bootstrap))]
- // SAFETY: data races are prevented by atomic intrinsics.
- unsafe {
- atomic_and(self.p.get(), core::ptr::invalid_mut(val), order).cast()
- }
- #[cfg(bootstrap)]
// SAFETY: data races are prevented by atomic intrinsics.
- unsafe {
- atomic_and(self.p.get().cast::<usize>(), val, order) as *mut T
- }
+ unsafe { atomic_and(self.p.get(), core::ptr::invalid_mut(val), order).cast() }
}
/// Performs a bitwise "xor" operation on the address of the current
/// pointer, and the argument `val`, and stores a pointer with provenance of
/// the current pointer and the resulting address.
///
- /// This is equivalent equivalent to using [`map_addr`] to atomically
- /// perform `ptr = ptr.map_addr(|a| a ^ val)`. This can be used in tagged
+ /// This is equivalent to using [`map_addr`] to atomically perform
+ /// `ptr = ptr.map_addr(|a| a ^ val)`. This can be used in tagged
/// pointer schemes to atomically toggle tag bits.
///
/// **Caveat**: This operation returns the previous value. To compute the
@@ -1801,16 +1783,8 @@ impl<T> AtomicPtr<T> {
#[unstable(feature = "strict_provenance_atomic_ptr", issue = "99108")]
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
pub fn fetch_xor(&self, val: usize, order: Ordering) -> *mut T {
- #[cfg(not(bootstrap))]
- // SAFETY: data races are prevented by atomic intrinsics.
- unsafe {
- atomic_xor(self.p.get(), core::ptr::invalid_mut(val), order).cast()
- }
- #[cfg(bootstrap)]
// SAFETY: data races are prevented by atomic intrinsics.
- unsafe {
- atomic_xor(self.p.get().cast::<usize>(), val, order) as *mut T
- }
+ unsafe { atomic_xor(self.p.get(), core::ptr::invalid_mut(val), order).cast() }
}
}
@@ -2552,6 +2526,16 @@ macro_rules! atomic_int {
/// **Note**: This method is only available on platforms that support atomic operations on
#[doc = concat!("[`", $s_int_type, "`].")]
///
+ /// # Considerations
+ ///
+ /// This method is not magic; it is not provided by the hardware.
+ /// It is implemented in terms of
+ #[doc = concat!("[`", stringify!($atomic_type), "::compare_exchange_weak`],")]
+ /// and suffers from the same drawbacks.
+ /// In particular, this method will not circumvent the [ABA Problem].
+ ///
+ /// [ABA Problem]: https://en.wikipedia.org/wiki/ABA_problem
+ ///
/// # Examples
///
/// ```rust
@@ -3073,30 +3057,22 @@ unsafe fn atomic_compare_exchange<T: Copy>(
let (val, ok) = unsafe {
match (success, failure) {
(Relaxed, Relaxed) => intrinsics::atomic_cxchg_relaxed_relaxed(dst, old, new),
- #[cfg(not(bootstrap))]
(Relaxed, Acquire) => intrinsics::atomic_cxchg_relaxed_acquire(dst, old, new),
- #[cfg(not(bootstrap))]
(Relaxed, SeqCst) => intrinsics::atomic_cxchg_relaxed_seqcst(dst, old, new),
(Acquire, Relaxed) => intrinsics::atomic_cxchg_acquire_relaxed(dst, old, new),
(Acquire, Acquire) => intrinsics::atomic_cxchg_acquire_acquire(dst, old, new),
- #[cfg(not(bootstrap))]
(Acquire, SeqCst) => intrinsics::atomic_cxchg_acquire_seqcst(dst, old, new),
(Release, Relaxed) => intrinsics::atomic_cxchg_release_relaxed(dst, old, new),
- #[cfg(not(bootstrap))]
(Release, Acquire) => intrinsics::atomic_cxchg_release_acquire(dst, old, new),
- #[cfg(not(bootstrap))]
(Release, SeqCst) => intrinsics::atomic_cxchg_release_seqcst(dst, old, new),
(AcqRel, Relaxed) => intrinsics::atomic_cxchg_acqrel_relaxed(dst, old, new),
(AcqRel, Acquire) => intrinsics::atomic_cxchg_acqrel_acquire(dst, old, new),
- #[cfg(not(bootstrap))]
(AcqRel, SeqCst) => intrinsics::atomic_cxchg_acqrel_seqcst(dst, old, new),
(SeqCst, Relaxed) => intrinsics::atomic_cxchg_seqcst_relaxed(dst, old, new),
(SeqCst, Acquire) => intrinsics::atomic_cxchg_seqcst_acquire(dst, old, new),
(SeqCst, SeqCst) => intrinsics::atomic_cxchg_seqcst_seqcst(dst, old, new),
(_, AcqRel) => panic!("there is no such thing as an acquire-release failure ordering"),
(_, Release) => panic!("there is no such thing as a release failure ordering"),
- #[cfg(bootstrap)]
- _ => panic!("a failure ordering can't be stronger than a success ordering"),
}
};
if ok { Ok(val) } else { Err(val) }
@@ -3116,30 +3092,22 @@ unsafe fn atomic_compare_exchange_weak<T: Copy>(
let (val, ok) = unsafe {
match (success, failure) {
(Relaxed, Relaxed) => intrinsics::atomic_cxchgweak_relaxed_relaxed(dst, old, new),
- #[cfg(not(bootstrap))]
(Relaxed, Acquire) => intrinsics::atomic_cxchgweak_relaxed_acquire(dst, old, new),
- #[cfg(not(bootstrap))]
(Relaxed, SeqCst) => intrinsics::atomic_cxchgweak_relaxed_seqcst(dst, old, new),
(Acquire, Relaxed) => intrinsics::atomic_cxchgweak_acquire_relaxed(dst, old, new),
(Acquire, Acquire) => intrinsics::atomic_cxchgweak_acquire_acquire(dst, old, new),
- #[cfg(not(bootstrap))]
(Acquire, SeqCst) => intrinsics::atomic_cxchgweak_acquire_seqcst(dst, old, new),
(Release, Relaxed) => intrinsics::atomic_cxchgweak_release_relaxed(dst, old, new),
- #[cfg(not(bootstrap))]
(Release, Acquire) => intrinsics::atomic_cxchgweak_release_acquire(dst, old, new),
- #[cfg(not(bootstrap))]
(Release, SeqCst) => intrinsics::atomic_cxchgweak_release_seqcst(dst, old, new),
(AcqRel, Relaxed) => intrinsics::atomic_cxchgweak_acqrel_relaxed(dst, old, new),
(AcqRel, Acquire) => intrinsics::atomic_cxchgweak_acqrel_acquire(dst, old, new),
- #[cfg(not(bootstrap))]
(AcqRel, SeqCst) => intrinsics::atomic_cxchgweak_acqrel_seqcst(dst, old, new),
(SeqCst, Relaxed) => intrinsics::atomic_cxchgweak_seqcst_relaxed(dst, old, new),
(SeqCst, Acquire) => intrinsics::atomic_cxchgweak_seqcst_acquire(dst, old, new),
(SeqCst, SeqCst) => intrinsics::atomic_cxchgweak_seqcst_seqcst(dst, old, new),
(_, AcqRel) => panic!("there is no such thing as an acquire-release failure ordering"),
(_, Release) => panic!("there is no such thing as a release failure ordering"),
- #[cfg(bootstrap)]
- _ => panic!("a failure ordering can't be stronger than a success ordering"),
}
};
if ok { Ok(val) } else { Err(val) }
diff --git a/library/core/src/sync/exclusive.rs b/library/core/src/sync/exclusive.rs
index a7519ab5a..c65c27500 100644
--- a/library/core/src/sync/exclusive.rs
+++ b/library/core/src/sync/exclusive.rs
@@ -100,6 +100,7 @@ impl<T: Sized> Exclusive<T> {
/// Wrap a value in an `Exclusive`
#[unstable(feature = "exclusive_wrapper", issue = "98407")]
#[must_use]
+ #[inline]
pub const fn new(t: T) -> Self {
Self { inner: t }
}
@@ -107,6 +108,7 @@ impl<T: Sized> Exclusive<T> {
/// Unwrap the value contained in the `Exclusive`
#[unstable(feature = "exclusive_wrapper", issue = "98407")]
#[must_use]
+ #[inline]
pub const fn into_inner(self) -> T {
self.inner
}
@@ -116,6 +118,7 @@ impl<T: ?Sized> Exclusive<T> {
/// Get exclusive access to the underlying value.
#[unstable(feature = "exclusive_wrapper", issue = "98407")]
#[must_use]
+ #[inline]
pub const fn get_mut(&mut self) -> &mut T {
&mut self.inner
}
@@ -128,6 +131,7 @@ impl<T: ?Sized> Exclusive<T> {
/// produce _pinned_ access to the underlying value.
#[unstable(feature = "exclusive_wrapper", issue = "98407")]
#[must_use]
+ #[inline]
pub const fn get_pin_mut(self: Pin<&mut Self>) -> Pin<&mut T> {
// SAFETY: `Exclusive` can only produce `&mut T` if itself is unpinned
// `Pin::map_unchecked_mut` is not const, so we do this conversion manually
@@ -139,6 +143,7 @@ impl<T: ?Sized> Exclusive<T> {
/// building an `Exclusive` with [`Exclusive::new`].
#[unstable(feature = "exclusive_wrapper", issue = "98407")]
#[must_use]
+ #[inline]
pub const fn from_mut(r: &'_ mut T) -> &'_ mut Exclusive<T> {
// SAFETY: repr is ≥ C, so refs have the same layout; and `Exclusive` properties are `&mut`-agnostic
unsafe { &mut *(r as *mut T as *mut Exclusive<T>) }
@@ -149,6 +154,7 @@ impl<T: ?Sized> Exclusive<T> {
/// building an `Exclusive` with [`Exclusive::new`].
#[unstable(feature = "exclusive_wrapper", issue = "98407")]
#[must_use]
+ #[inline]
pub const fn from_pin_mut(r: Pin<&'_ mut T>) -> Pin<&'_ mut Exclusive<T>> {
// SAFETY: `Exclusive` can only produce `&mut T` if itself is unpinned
// `Pin::map_unchecked_mut` is not const, so we do this conversion manually
@@ -158,6 +164,7 @@ impl<T: ?Sized> Exclusive<T> {
#[unstable(feature = "exclusive_wrapper", issue = "98407")]
impl<T> From<T> for Exclusive<T> {
+ #[inline]
fn from(t: T) -> Self {
Self::new(t)
}
@@ -166,7 +173,7 @@ impl<T> From<T> for Exclusive<T> {
#[unstable(feature = "exclusive_wrapper", issue = "98407")]
impl<T: Future + ?Sized> Future for Exclusive<T> {
type Output = T::Output;
-
+ #[inline]
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
self.get_pin_mut().poll(cx)
}