summaryrefslogtreecommitdiffstats
path: root/third_party/rust/atomic/src
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-21 11:44:51 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-21 11:44:51 +0000
commit9e3c08db40b8916968b9f30096c7be3f00ce9647 (patch)
treea68f146d7fa01f0134297619fbe7e33db084e0aa /third_party/rust/atomic/src
parentInitial commit. (diff)
downloadthunderbird-9e3c08db40b8916968b9f30096c7be3f00ce9647.tar.xz
thunderbird-9e3c08db40b8916968b9f30096c7be3f00ce9647.zip
Adding upstream version 1:115.7.0.upstream/1%115.7.0upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'third_party/rust/atomic/src')
-rw-r--r--third_party/rust/atomic/src/fallback.rs196
-rw-r--r--third_party/rust/atomic/src/lib.rs735
-rw-r--r--third_party/rust/atomic/src/ops.rs666
3 files changed, 1597 insertions, 0 deletions
diff --git a/third_party/rust/atomic/src/fallback.rs b/third_party/rust/atomic/src/fallback.rs
new file mode 100644
index 0000000000..8b7e86119b
--- /dev/null
+++ b/third_party/rust/atomic/src/fallback.rs
@@ -0,0 +1,196 @@
+// Copyright 2016 Amanieu d'Antras
+//
+// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
+// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
+// http://opensource.org/licenses/MIT>, at your option. This file may not be
+// copied, modified, or distributed except according to those terms.
+
+use core::cmp;
+use core::mem;
+use core::num::Wrapping;
+use core::ops;
+use core::ptr;
+use core::slice;
+use core::sync::atomic::{self, AtomicUsize, Ordering};
+
+// We use an AtomicUsize instead of an AtomicBool because it performs better
+// on architectures that don't have byte-sized atomics.
+//
+// We give each spinlock its own cache line to avoid false sharing.
+#[repr(align(64))]
+struct SpinLock(AtomicUsize);
+
+impl SpinLock {
+ fn lock(&self) {
+ while self
+ .0
+ .compare_exchange_weak(0, 1, Ordering::Acquire, Ordering::Relaxed)
+ .is_err()
+ {
+ while self.0.load(Ordering::Relaxed) != 0 {
+ atomic::spin_loop_hint();
+ }
+ }
+ }
+
+ fn unlock(&self) {
+ self.0.store(0, Ordering::Release);
+ }
+}
+
+// A big array of spinlocks which we use to guard atomic accesses. A spinlock is
+// chosen based on a hash of the address of the atomic object, which helps to
+// reduce contention compared to a single global lock.
+macro_rules! array {
+ (@accum (0, $($_es:expr),*) -> ($($body:tt)*))
+ => {array!(@as_expr [$($body)*])};
+ (@accum (1, $($es:expr),*) -> ($($body:tt)*))
+ => {array!(@accum (0, $($es),*) -> ($($body)* $($es,)*))};
+ (@accum (2, $($es:expr),*) -> ($($body:tt)*))
+ => {array!(@accum (0, $($es),*) -> ($($body)* $($es,)* $($es,)*))};
+ (@accum (4, $($es:expr),*) -> ($($body:tt)*))
+ => {array!(@accum (2, $($es,)* $($es),*) -> ($($body)*))};
+ (@accum (8, $($es:expr),*) -> ($($body:tt)*))
+ => {array!(@accum (4, $($es,)* $($es),*) -> ($($body)*))};
+ (@accum (16, $($es:expr),*) -> ($($body:tt)*))
+ => {array!(@accum (8, $($es,)* $($es),*) -> ($($body)*))};
+ (@accum (32, $($es:expr),*) -> ($($body:tt)*))
+ => {array!(@accum (16, $($es,)* $($es),*) -> ($($body)*))};
+ (@accum (64, $($es:expr),*) -> ($($body:tt)*))
+ => {array!(@accum (32, $($es,)* $($es),*) -> ($($body)*))};
+
+ (@as_expr $e:expr) => {$e};
+
+ [$e:expr; $n:tt] => { array!(@accum ($n, $e) -> ()) };
+}
+static SPINLOCKS: [SpinLock; 64] = array![SpinLock(AtomicUsize::new(0)); 64];
+
+// Spinlock pointer hashing function from compiler-rt
+#[inline]
+fn lock_for_addr(addr: usize) -> &'static SpinLock {
+ // Disregard the lowest 4 bits. We want all values that may be part of the
+ // same memory operation to hash to the same value and therefore use the same
+ // lock.
+ let mut hash = addr >> 4;
+ // Use the next bits as the basis for the hash
+ let low = hash & (SPINLOCKS.len() - 1);
+ // Now use the high(er) set of bits to perturb the hash, so that we don't
+ // get collisions from atomic fields in a single object
+ hash >>= 16;
+ hash ^= low;
+ // Return a pointer to the lock to use
+ &SPINLOCKS[hash & (SPINLOCKS.len() - 1)]
+}
+
+#[inline]
+fn lock(addr: usize) -> LockGuard {
+ let lock = lock_for_addr(addr);
+ lock.lock();
+ LockGuard(lock)
+}
+
+struct LockGuard(&'static SpinLock);
+impl Drop for LockGuard {
+ #[inline]
+ fn drop(&mut self) {
+ self.0.unlock();
+ }
+}
+
+#[inline]
+pub unsafe fn atomic_load<T>(dst: *mut T) -> T {
+ let _l = lock(dst as usize);
+ ptr::read(dst)
+}
+
+#[inline]
+pub unsafe fn atomic_store<T>(dst: *mut T, val: T) {
+ let _l = lock(dst as usize);
+ ptr::write(dst, val);
+}
+
+#[inline]
+pub unsafe fn atomic_swap<T>(dst: *mut T, val: T) -> T {
+ let _l = lock(dst as usize);
+ ptr::replace(dst, val)
+}
+
+#[inline]
+pub unsafe fn atomic_compare_exchange<T>(dst: *mut T, current: T, new: T) -> Result<T, T> {
+ let _l = lock(dst as usize);
+ let result = ptr::read(dst);
+ // compare_exchange compares with memcmp instead of Eq
+ let a = slice::from_raw_parts(&result as *const _ as *const u8, mem::size_of_val(&result));
+ let b = slice::from_raw_parts(
+ &current as *const _ as *const u8,
+ mem::size_of_val(&current),
+ );
+ if a == b {
+ ptr::write(dst, new);
+ Ok(result)
+ } else {
+ Err(result)
+ }
+}
+
+#[inline]
+pub unsafe fn atomic_add<T: Copy>(dst: *mut T, val: T) -> T
+where
+ Wrapping<T>: ops::Add<Output = Wrapping<T>>,
+{
+ let _l = lock(dst as usize);
+ let result = ptr::read(dst);
+ ptr::write(dst, (Wrapping(result) + Wrapping(val)).0);
+ result
+}
+
+#[inline]
+pub unsafe fn atomic_sub<T: Copy>(dst: *mut T, val: T) -> T
+where
+ Wrapping<T>: ops::Sub<Output = Wrapping<T>>,
+{
+ let _l = lock(dst as usize);
+ let result = ptr::read(dst);
+ ptr::write(dst, (Wrapping(result) - Wrapping(val)).0);
+ result
+}
+
+#[inline]
+pub unsafe fn atomic_and<T: Copy + ops::BitAnd<Output = T>>(dst: *mut T, val: T) -> T {
+ let _l = lock(dst as usize);
+ let result = ptr::read(dst);
+ ptr::write(dst, result & val);
+ result
+}
+
+#[inline]
+pub unsafe fn atomic_or<T: Copy + ops::BitOr<Output = T>>(dst: *mut T, val: T) -> T {
+ let _l = lock(dst as usize);
+ let result = ptr::read(dst);
+ ptr::write(dst, result | val);
+ result
+}
+
+#[inline]
+pub unsafe fn atomic_xor<T: Copy + ops::BitXor<Output = T>>(dst: *mut T, val: T) -> T {
+ let _l = lock(dst as usize);
+ let result = ptr::read(dst);
+ ptr::write(dst, result ^ val);
+ result
+}
+
+#[inline]
+pub unsafe fn atomic_min<T: Copy + cmp::Ord>(dst: *mut T, val: T) -> T {
+ let _l = lock(dst as usize);
+ let result = ptr::read(dst);
+ ptr::write(dst, cmp::min(result, val));
+ result
+}
+
+#[inline]
+pub unsafe fn atomic_max<T: Copy + cmp::Ord>(dst: *mut T, val: T) -> T {
+ let _l = lock(dst as usize);
+ let result = ptr::read(dst);
+ ptr::write(dst, cmp::max(result, val));
+ result
+}
diff --git a/third_party/rust/atomic/src/lib.rs b/third_party/rust/atomic/src/lib.rs
new file mode 100644
index 0000000000..d6b19a651b
--- /dev/null
+++ b/third_party/rust/atomic/src/lib.rs
@@ -0,0 +1,735 @@
+// Copyright 2016 Amanieu d'Antras
+//
+// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
+// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
+// http://opensource.org/licenses/MIT>, at your option. This file may not be
+// copied, modified, or distributed except according to those terms.
+
+//! Generic `Atomic<T>` wrapper type
+//!
+//! Atomic types provide primitive shared-memory communication between
+//! threads, and are the building blocks of other concurrent types.
+//!
+//! This library defines a generic atomic wrapper type `Atomic<T>` for all
+//! `T: Copy` types.
+//! Atomic types present operations that, when used correctly, synchronize
+//! updates between threads.
+//!
+//! Each method takes an `Ordering` which represents the strength of
+//! the memory barrier for that operation. These orderings are the
+//! same as [LLVM atomic orderings][1].
+//!
+//! [1]: http://llvm.org/docs/LangRef.html#memory-model-for-concurrent-operations
+//!
+//! Atomic variables are safe to share between threads (they implement `Sync`)
+//! but they do not themselves provide the mechanism for sharing. The most
+//! common way to share an atomic variable is to put it into an `Arc` (an
+//! atomically-reference-counted shared pointer).
+//!
+//! Most atomic types may be stored in static variables, initialized using
+//! the `const fn` constructors (only available on nightly). Atomic statics
+//! are often used for lazy global initialization.
+
+#![warn(missing_docs)]
+#![no_std]
+#![cfg_attr(
+ feature = "nightly", feature(const_fn, cfg_target_has_atomic, atomic_min_max)
+)]
+
+#[cfg(any(test, feature = "std"))]
+#[macro_use]
+extern crate std;
+
+// Re-export some useful definitions from libcore
+pub use core::sync::atomic::{fence, Ordering};
+
+use core::cell::UnsafeCell;
+use core::fmt;
+
+#[cfg(feature = "std")]
+use std::panic::RefUnwindSafe;
+
+mod fallback;
+mod ops;
+
+/// A generic atomic wrapper type which allows an object to be safely shared
+/// between threads.
+pub struct Atomic<T: Copy> {
+ v: UnsafeCell<T>,
+}
+
+// Atomic<T> is only Sync if T is Send
+unsafe impl<T: Copy + Send> Sync for Atomic<T> {}
+
+// Given that atomicity is guaranteed, Atomic<T> is RefUnwindSafe if T is
+//
+// This is trivially correct for native lock-free atomic types. For those whose
+// atomicity is emulated using a spinlock, it is still correct because the
+// `Atomic` API does not allow doing any panic-inducing operation after writing
+// to the target object.
+#[cfg(feature = "std")]
+impl<T: Copy + RefUnwindSafe> RefUnwindSafe for Atomic<T> {}
+
+impl<T: Copy + Default> Default for Atomic<T> {
+ #[inline]
+ fn default() -> Self {
+ Self::new(Default::default())
+ }
+}
+
+impl<T: Copy + fmt::Debug> fmt::Debug for Atomic<T> {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ f.debug_tuple("Atomic")
+ .field(&self.load(Ordering::SeqCst))
+ .finish()
+ }
+}
+
+impl<T: Copy> Atomic<T> {
+ /// Creates a new `Atomic`.
+ #[inline]
+ #[cfg(feature = "nightly")]
+ pub const fn new(v: T) -> Atomic<T> {
+ Atomic {
+ v: UnsafeCell::new(v),
+ }
+ }
+
+ /// Creates a new `Atomic`.
+ #[inline]
+ #[cfg(not(feature = "nightly"))]
+ pub fn new(v: T) -> Atomic<T> {
+ Atomic {
+ v: UnsafeCell::new(v),
+ }
+ }
+
+ /// Checks if `Atomic` objects of this type are lock-free.
+ ///
+ /// If an `Atomic` is not lock-free then it may be implemented using locks
+ /// internally, which makes it unsuitable for some situations (such as
+ /// communicating with a signal handler).
+ #[inline]
+ #[cfg(feature = "nightly")]
+ pub const fn is_lock_free() -> bool {
+ ops::atomic_is_lock_free::<T>()
+ }
+
+ /// Checks if `Atomic` objects of this type are lock-free.
+ ///
+ /// If an `Atomic` is not lock-free then it may be implemented using locks
+ /// internally, which makes it unsuitable for some situations (such as
+ /// communicating with a signal handler).
+ #[inline]
+ #[cfg(not(feature = "nightly"))]
+ pub fn is_lock_free() -> bool {
+ ops::atomic_is_lock_free::<T>()
+ }
+
+ /// Returns a mutable reference to the underlying type.
+ ///
+ /// This is safe because the mutable reference guarantees that no other threads are
+ /// concurrently accessing the atomic data.
+ #[inline]
+ pub fn get_mut(&mut self) -> &mut T {
+ unsafe { &mut *self.v.get() }
+ }
+
+ /// Consumes the atomic and returns the contained value.
+ ///
+ /// This is safe because passing `self` by value guarantees that no other threads are
+ /// concurrently accessing the atomic data.
+ #[inline]
+ pub fn into_inner(self) -> T {
+ self.v.into_inner()
+ }
+
+ /// Loads a value from the `Atomic`.
+ ///
+ /// `load` takes an `Ordering` argument which describes the memory ordering
+ /// of this operation.
+ ///
+ /// # Panics
+ ///
+ /// Panics if `order` is `Release` or `AcqRel`.
+ #[inline]
+ pub fn load(&self, order: Ordering) -> T {
+ unsafe { ops::atomic_load(self.v.get(), order) }
+ }
+
+ /// Stores a value into the `Atomic`.
+ ///
+ /// `store` takes an `Ordering` argument which describes the memory ordering
+ /// of this operation.
+ ///
+ /// # Panics
+ ///
+ /// Panics if `order` is `Acquire` or `AcqRel`.
+ #[inline]
+ pub fn store(&self, val: T, order: Ordering) {
+ unsafe {
+ ops::atomic_store(self.v.get(), val, order);
+ }
+ }
+
+ /// Stores a value into the `Atomic`, returning the old value.
+ ///
+ /// `swap` takes an `Ordering` argument which describes the memory ordering
+ /// of this operation.
+ #[inline]
+ pub fn swap(&self, val: T, order: Ordering) -> T {
+ unsafe { ops::atomic_swap(self.v.get(), val, order) }
+ }
+
+ /// Stores a value into the `Atomic` if the current value is the same as the
+ /// `current` value.
+ ///
+ /// The return value is a result indicating whether the new value was
+ /// written and containing the previous value. On success this value is
+ /// guaranteed to be equal to `new`.
+ ///
+ /// `compare_exchange` takes two `Ordering` arguments to describe the memory
+ /// ordering of this operation. The first describes the required ordering if
+ /// the operation succeeds while the second describes the required ordering
+ /// when the operation fails. The failure ordering can't be `Acquire` or
+ /// `AcqRel` and must be equivalent or weaker than the success ordering.
+ #[inline]
+ pub fn compare_exchange(
+ &self,
+ current: T,
+ new: T,
+ success: Ordering,
+ failure: Ordering,
+ ) -> Result<T, T> {
+ unsafe { ops::atomic_compare_exchange(self.v.get(), current, new, success, failure) }
+ }
+
+ /// Stores a value into the `Atomic` if the current value is the same as the
+ /// `current` value.
+ ///
+ /// Unlike `compare_exchange`, this function is allowed to spuriously fail
+ /// even when the comparison succeeds, which can result in more efficient
+ /// code on some platforms. The return value is a result indicating whether
+ /// the new value was written and containing the previous value.
+ ///
+ /// `compare_exchange` takes two `Ordering` arguments to describe the memory
+ /// ordering of this operation. The first describes the required ordering if
+ /// the operation succeeds while the second describes the required ordering
+ /// when the operation fails. The failure ordering can't be `Acquire` or
+ /// `AcqRel` and must be equivalent or weaker than the success ordering.
+ /// success ordering.
+ #[inline]
+ pub fn compare_exchange_weak(
+ &self,
+ current: T,
+ new: T,
+ success: Ordering,
+ failure: Ordering,
+ ) -> Result<T, T> {
+ unsafe { ops::atomic_compare_exchange_weak(self.v.get(), current, new, success, failure) }
+ }
+}
+
+impl Atomic<bool> {
+ /// Logical "and" with a boolean value.
+ ///
+ /// Performs a logical "and" operation on the current value and the argument
+ /// `val`, and sets the new value to the result.
+ ///
+ /// Returns the previous value.
+ #[inline]
+ pub fn fetch_and(&self, val: bool, order: Ordering) -> bool {
+ unsafe { ops::atomic_and(self.v.get(), val, order) }
+ }
+
+ /// Logical "or" with a boolean value.
+ ///
+ /// Performs a logical "or" operation on the current value and the argument
+ /// `val`, and sets the new value to the result.
+ ///
+ /// Returns the previous value.
+ #[inline]
+ pub fn fetch_or(&self, val: bool, order: Ordering) -> bool {
+ unsafe { ops::atomic_or(self.v.get(), val, order) }
+ }
+
+ /// Logical "xor" with a boolean value.
+ ///
+ /// Performs a logical "xor" operation on the current value and the argument
+ /// `val`, and sets the new value to the result.
+ ///
+ /// Returns the previous value.
+ #[inline]
+ pub fn fetch_xor(&self, val: bool, order: Ordering) -> bool {
+ unsafe { ops::atomic_xor(self.v.get(), val, order) }
+ }
+}
+
+macro_rules! atomic_ops_common {
+ ($($t:ty)*) => ($(
+ impl Atomic<$t> {
+ /// Add to the current value, returning the previous value.
+ #[inline]
+ pub fn fetch_add(&self, val: $t, order: Ordering) -> $t {
+ unsafe { ops::atomic_add(self.v.get(), val, order) }
+ }
+
+ /// Subtract from the current value, returning the previous value.
+ #[inline]
+ pub fn fetch_sub(&self, val: $t, order: Ordering) -> $t {
+ unsafe { ops::atomic_sub(self.v.get(), val, order) }
+ }
+
+ /// Bitwise and with the current value, returning the previous value.
+ #[inline]
+ pub fn fetch_and(&self, val: $t, order: Ordering) -> $t {
+ unsafe { ops::atomic_and(self.v.get(), val, order) }
+ }
+
+ /// Bitwise or with the current value, returning the previous value.
+ #[inline]
+ pub fn fetch_or(&self, val: $t, order: Ordering) -> $t {
+ unsafe { ops::atomic_or(self.v.get(), val, order) }
+ }
+
+ /// Bitwise xor with the current value, returning the previous value.
+ #[inline]
+ pub fn fetch_xor(&self, val: $t, order: Ordering) -> $t {
+ unsafe { ops::atomic_xor(self.v.get(), val, order) }
+ }
+ }
+ )*);
+}
+macro_rules! atomic_ops_signed {
+ ($($t:ty)*) => (
+ atomic_ops_common!{ $($t)* }
+ $(
+ impl Atomic<$t> {
+ /// Minimum with the current value.
+ #[inline]
+ pub fn fetch_min(&self, val: $t, order: Ordering) -> $t {
+ unsafe { ops::atomic_min(self.v.get(), val, order) }
+ }
+
+ /// Maximum with the current value.
+ #[inline]
+ pub fn fetch_max(&self, val: $t, order: Ordering) -> $t {
+ unsafe { ops::atomic_max(self.v.get(), val, order) }
+ }
+ }
+ )*
+ );
+}
+macro_rules! atomic_ops_unsigned {
+ ($($t:ty)*) => (
+ atomic_ops_common!{ $($t)* }
+ $(
+ impl Atomic<$t> {
+ /// Minimum with the current value.
+ #[inline]
+ pub fn fetch_min(&self, val: $t, order: Ordering) -> $t {
+ unsafe { ops::atomic_umin(self.v.get(), val, order) }
+ }
+
+ /// Maximum with the current value.
+ #[inline]
+ pub fn fetch_max(&self, val: $t, order: Ordering) -> $t {
+ unsafe { ops::atomic_umax(self.v.get(), val, order) }
+ }
+ }
+ )*
+ );
+}
+atomic_ops_signed!{ i8 i16 i32 i64 isize i128 }
+atomic_ops_unsigned!{ u8 u16 u32 u64 usize u128 }
+
+#[cfg(test)]
+mod tests {
+ use core::mem;
+ use Atomic;
+ use Ordering::*;
+
+ #[derive(Copy, Clone, Eq, PartialEq, Debug, Default)]
+ struct Foo(u8, u8);
+ #[derive(Copy, Clone, Eq, PartialEq, Debug, Default)]
+ struct Bar(u64, u64);
+ #[derive(Copy, Clone, Eq, PartialEq, Debug, Default)]
+ struct Quux(u32);
+
+ #[test]
+ fn atomic_bool() {
+ let a = Atomic::new(false);
+ assert_eq!(Atomic::<bool>::is_lock_free(), cfg!(feature = "nightly"));
+ assert_eq!(format!("{:?}", a), "Atomic(false)");
+ assert_eq!(a.load(SeqCst), false);
+ a.store(true, SeqCst);
+ assert_eq!(a.swap(false, SeqCst), true);
+ assert_eq!(a.compare_exchange(true, false, SeqCst, SeqCst), Err(false));
+ assert_eq!(a.compare_exchange(false, true, SeqCst, SeqCst), Ok(false));
+ assert_eq!(a.fetch_and(false, SeqCst), true);
+ assert_eq!(a.fetch_or(true, SeqCst), false);
+ assert_eq!(a.fetch_xor(false, SeqCst), true);
+ assert_eq!(a.load(SeqCst), true);
+ }
+
+ #[test]
+ fn atomic_i8() {
+ let a = Atomic::new(0i8);
+ assert_eq!(
+ Atomic::<i8>::is_lock_free(),
+ cfg!(any(
+ target_pointer_width = "8",
+ all(feature = "nightly", target_has_atomic = "8")
+ ))
+ );
+ assert_eq!(format!("{:?}", a), "Atomic(0)");
+ assert_eq!(a.load(SeqCst), 0);
+ a.store(1, SeqCst);
+ assert_eq!(a.swap(2, SeqCst), 1);
+ assert_eq!(a.compare_exchange(5, 45, SeqCst, SeqCst), Err(2));
+ assert_eq!(a.compare_exchange(2, 3, SeqCst, SeqCst), Ok(2));
+ assert_eq!(a.fetch_add(123, SeqCst), 3);
+ // Make sure overflows are handled correctly
+ assert_eq!(a.fetch_sub(-56, SeqCst), 126);
+ assert_eq!(a.fetch_and(7, SeqCst), -74);
+ assert_eq!(a.fetch_or(64, SeqCst), 6);
+ assert_eq!(a.fetch_xor(1, SeqCst), 70);
+ assert_eq!(a.fetch_min(30, SeqCst), 71);
+ assert_eq!(a.fetch_max(-25, SeqCst), 30);
+ assert_eq!(a.load(SeqCst), 30);
+ }
+
+ #[test]
+ fn atomic_i16() {
+ let a = Atomic::new(0i16);
+ assert_eq!(
+ Atomic::<i16>::is_lock_free(),
+ cfg!(any(
+ target_pointer_width = "16",
+ all(feature = "nightly", target_has_atomic = "16")
+ ))
+ );
+ assert_eq!(format!("{:?}", a), "Atomic(0)");
+ assert_eq!(a.load(SeqCst), 0);
+ a.store(1, SeqCst);
+ assert_eq!(a.swap(2, SeqCst), 1);
+ assert_eq!(a.compare_exchange(5, 45, SeqCst, SeqCst), Err(2));
+ assert_eq!(a.compare_exchange(2, 3, SeqCst, SeqCst), Ok(2));
+ assert_eq!(a.fetch_add(123, SeqCst), 3);
+ assert_eq!(a.fetch_sub(-56, SeqCst), 126);
+ assert_eq!(a.fetch_and(7, SeqCst), 182);
+ assert_eq!(a.fetch_or(64, SeqCst), 6);
+ assert_eq!(a.fetch_xor(1, SeqCst), 70);
+ assert_eq!(a.fetch_min(30, SeqCst), 71);
+ assert_eq!(a.fetch_max(-25, SeqCst), 30);
+ assert_eq!(a.load(SeqCst), 30);
+ }
+
+ #[test]
+ fn atomic_i32() {
+ let a = Atomic::new(0i32);
+ assert_eq!(
+ Atomic::<i32>::is_lock_free(),
+ cfg!(any(
+ target_pointer_width = "32",
+ all(feature = "nightly", target_has_atomic = "32")
+ ))
+ );
+ assert_eq!(format!("{:?}", a), "Atomic(0)");
+ assert_eq!(a.load(SeqCst), 0);
+ a.store(1, SeqCst);
+ assert_eq!(a.swap(2, SeqCst), 1);
+ assert_eq!(a.compare_exchange(5, 45, SeqCst, SeqCst), Err(2));
+ assert_eq!(a.compare_exchange(2, 3, SeqCst, SeqCst), Ok(2));
+ assert_eq!(a.fetch_add(123, SeqCst), 3);
+ assert_eq!(a.fetch_sub(-56, SeqCst), 126);
+ assert_eq!(a.fetch_and(7, SeqCst), 182);
+ assert_eq!(a.fetch_or(64, SeqCst), 6);
+ assert_eq!(a.fetch_xor(1, SeqCst), 70);
+ assert_eq!(a.fetch_min(30, SeqCst), 71);
+ assert_eq!(a.fetch_max(-25, SeqCst), 30);
+ assert_eq!(a.load(SeqCst), 30);
+ }
+
+ #[test]
+ fn atomic_i64() {
+ let a = Atomic::new(0i64);
+ assert_eq!(
+ Atomic::<i64>::is_lock_free(),
+ cfg!(any(
+ target_pointer_width = "64",
+ all(feature = "nightly", target_has_atomic = "64")
+ )) && mem::align_of::<i64>() == 8
+ );
+ assert_eq!(format!("{:?}", a), "Atomic(0)");
+ assert_eq!(a.load(SeqCst), 0);
+ a.store(1, SeqCst);
+ assert_eq!(a.swap(2, SeqCst), 1);
+ assert_eq!(a.compare_exchange(5, 45, SeqCst, SeqCst), Err(2));
+ assert_eq!(a.compare_exchange(2, 3, SeqCst, SeqCst), Ok(2));
+ assert_eq!(a.fetch_add(123, SeqCst), 3);
+ assert_eq!(a.fetch_sub(-56, SeqCst), 126);
+ assert_eq!(a.fetch_and(7, SeqCst), 182);
+ assert_eq!(a.fetch_or(64, SeqCst), 6);
+ assert_eq!(a.fetch_xor(1, SeqCst), 70);
+ assert_eq!(a.fetch_min(30, SeqCst), 71);
+ assert_eq!(a.fetch_max(-25, SeqCst), 30);
+ assert_eq!(a.load(SeqCst), 30);
+ }
+
+ #[test]
+ fn atomic_i128() {
+ let a = Atomic::new(0i128);
+ assert_eq!(
+ Atomic::<i128>::is_lock_free(),
+ cfg!(any(
+ target_pointer_width = "128",
+ all(feature = "nightly", target_has_atomic = "128")
+ ))
+ );
+ assert_eq!(format!("{:?}", a), "Atomic(0)");
+ assert_eq!(a.load(SeqCst), 0);
+ a.store(1, SeqCst);
+ assert_eq!(a.swap(2, SeqCst), 1);
+ assert_eq!(a.compare_exchange(5, 45, SeqCst, SeqCst), Err(2));
+ assert_eq!(a.compare_exchange(2, 3, SeqCst, SeqCst), Ok(2));
+ assert_eq!(a.fetch_add(123, SeqCst), 3);
+ assert_eq!(a.fetch_sub(-56, SeqCst), 126);
+ assert_eq!(a.fetch_and(7, SeqCst), 182);
+ assert_eq!(a.fetch_or(64, SeqCst), 6);
+ assert_eq!(a.fetch_xor(1, SeqCst), 70);
+ assert_eq!(a.fetch_min(30, SeqCst), 71);
+ assert_eq!(a.fetch_max(-25, SeqCst), 30);
+ assert_eq!(a.load(SeqCst), 30);
+ }
+
+ #[test]
+ fn atomic_isize() {
+ let a = Atomic::new(0isize);
+ assert!(Atomic::<isize>::is_lock_free());
+ assert_eq!(format!("{:?}", a), "Atomic(0)");
+ assert_eq!(a.load(SeqCst), 0);
+ a.store(1, SeqCst);
+ assert_eq!(a.swap(2, SeqCst), 1);
+ assert_eq!(a.compare_exchange(5, 45, SeqCst, SeqCst), Err(2));
+ assert_eq!(a.compare_exchange(2, 3, SeqCst, SeqCst), Ok(2));
+ assert_eq!(a.fetch_add(123, SeqCst), 3);
+ assert_eq!(a.fetch_sub(-56, SeqCst), 126);
+ assert_eq!(a.fetch_and(7, SeqCst), 182);
+ assert_eq!(a.fetch_or(64, SeqCst), 6);
+ assert_eq!(a.fetch_xor(1, SeqCst), 70);
+ assert_eq!(a.fetch_min(30, SeqCst), 71);
+ assert_eq!(a.fetch_max(-25, SeqCst), 30);
+ assert_eq!(a.load(SeqCst), 30);
+ }
+
+ #[test]
+ fn atomic_u8() {
+ let a = Atomic::new(0u8);
+ assert_eq!(
+ Atomic::<u8>::is_lock_free(),
+ cfg!(any(
+ target_pointer_width = "8",
+ all(feature = "nightly", target_has_atomic = "8")
+ ))
+ );
+ assert_eq!(format!("{:?}", a), "Atomic(0)");
+ assert_eq!(a.load(SeqCst), 0);
+ a.store(1, SeqCst);
+ assert_eq!(a.swap(2, SeqCst), 1);
+ assert_eq!(a.compare_exchange(5, 45, SeqCst, SeqCst), Err(2));
+ assert_eq!(a.compare_exchange(2, 3, SeqCst, SeqCst), Ok(2));
+ assert_eq!(a.fetch_add(123, SeqCst), 3);
+ assert_eq!(a.fetch_sub(56, SeqCst), 126);
+ assert_eq!(a.fetch_and(7, SeqCst), 70);
+ assert_eq!(a.fetch_or(64, SeqCst), 6);
+ assert_eq!(a.fetch_xor(1, SeqCst), 70);
+ assert_eq!(a.fetch_min(30, SeqCst), 71);
+ assert_eq!(a.fetch_max(25, SeqCst), 30);
+ assert_eq!(a.load(SeqCst), 30);
+ }
+
+ #[test]
+ fn atomic_u16() {
+ let a = Atomic::new(0u16);
+ assert_eq!(
+ Atomic::<u16>::is_lock_free(),
+ cfg!(any(
+ target_pointer_width = "16",
+ all(feature = "nightly", target_has_atomic = "16")
+ ))
+ );
+ assert_eq!(format!("{:?}", a), "Atomic(0)");
+ assert_eq!(a.load(SeqCst), 0);
+ a.store(1, SeqCst);
+ assert_eq!(a.swap(2, SeqCst), 1);
+ assert_eq!(a.compare_exchange(5, 45, SeqCst, SeqCst), Err(2));
+ assert_eq!(a.compare_exchange(2, 3, SeqCst, SeqCst), Ok(2));
+ assert_eq!(a.fetch_add(123, SeqCst), 3);
+ assert_eq!(a.fetch_sub(56, SeqCst), 126);
+ assert_eq!(a.fetch_and(7, SeqCst), 70);
+ assert_eq!(a.fetch_or(64, SeqCst), 6);
+ assert_eq!(a.fetch_xor(1, SeqCst), 70);
+ assert_eq!(a.fetch_min(30, SeqCst), 71);
+ assert_eq!(a.fetch_max(25, SeqCst), 30);
+ assert_eq!(a.load(SeqCst), 30);
+ }
+
+ #[test]
+ fn atomic_u32() {
+ let a = Atomic::new(0u32);
+ assert_eq!(
+ Atomic::<u32>::is_lock_free(),
+ cfg!(any(
+ target_pointer_width = "32",
+ all(feature = "nightly", target_has_atomic = "32")
+ ))
+ );
+ assert_eq!(format!("{:?}", a), "Atomic(0)");
+ assert_eq!(a.load(SeqCst), 0);
+ a.store(1, SeqCst);
+ assert_eq!(a.swap(2, SeqCst), 1);
+ assert_eq!(a.compare_exchange(5, 45, SeqCst, SeqCst), Err(2));
+ assert_eq!(a.compare_exchange(2, 3, SeqCst, SeqCst), Ok(2));
+ assert_eq!(a.fetch_add(123, SeqCst), 3);
+ assert_eq!(a.fetch_sub(56, SeqCst), 126);
+ assert_eq!(a.fetch_and(7, SeqCst), 70);
+ assert_eq!(a.fetch_or(64, SeqCst), 6);
+ assert_eq!(a.fetch_xor(1, SeqCst), 70);
+ assert_eq!(a.fetch_min(30, SeqCst), 71);
+ assert_eq!(a.fetch_max(25, SeqCst), 30);
+ assert_eq!(a.load(SeqCst), 30);
+ }
+
+ #[test]
+ fn atomic_u64() {
+ let a = Atomic::new(0u64);
+ assert_eq!(
+ Atomic::<u64>::is_lock_free(),
+ cfg!(any(
+ target_pointer_width = "64",
+ all(feature = "nightly", target_has_atomic = "64")
+ )) && mem::align_of::<u64>() == 8
+ );
+ assert_eq!(format!("{:?}", a), "Atomic(0)");
+ assert_eq!(a.load(SeqCst), 0);
+ a.store(1, SeqCst);
+ assert_eq!(a.swap(2, SeqCst), 1);
+ assert_eq!(a.compare_exchange(5, 45, SeqCst, SeqCst), Err(2));
+ assert_eq!(a.compare_exchange(2, 3, SeqCst, SeqCst), Ok(2));
+ assert_eq!(a.fetch_add(123, SeqCst), 3);
+ assert_eq!(a.fetch_sub(56, SeqCst), 126);
+ assert_eq!(a.fetch_and(7, SeqCst), 70);
+ assert_eq!(a.fetch_or(64, SeqCst), 6);
+ assert_eq!(a.fetch_xor(1, SeqCst), 70);
+ assert_eq!(a.fetch_min(30, SeqCst), 71);
+ assert_eq!(a.fetch_max(25, SeqCst), 30);
+ assert_eq!(a.load(SeqCst), 30);
+ }
+
+ #[test]
+ fn atomic_u128() {
+ let a = Atomic::new(0u128);
+ assert_eq!(
+ Atomic::<u128>::is_lock_free(),
+ cfg!(any(
+ target_pointer_width = "128",
+ all(feature = "nightly", target_has_atomic = "128")
+ ))
+ );
+ assert_eq!(format!("{:?}", a), "Atomic(0)");
+ assert_eq!(a.load(SeqCst), 0);
+ a.store(1, SeqCst);
+ assert_eq!(a.swap(2, SeqCst), 1);
+ assert_eq!(a.compare_exchange(5, 45, SeqCst, SeqCst), Err(2));
+ assert_eq!(a.compare_exchange(2, 3, SeqCst, SeqCst), Ok(2));
+ assert_eq!(a.fetch_add(123, SeqCst), 3);
+ assert_eq!(a.fetch_sub(56, SeqCst), 126);
+ assert_eq!(a.fetch_and(7, SeqCst), 70);
+ assert_eq!(a.fetch_or(64, SeqCst), 6);
+ assert_eq!(a.fetch_xor(1, SeqCst), 70);
+ assert_eq!(a.fetch_min(30, SeqCst), 71);
+ assert_eq!(a.fetch_max(25, SeqCst), 30);
+ assert_eq!(a.load(SeqCst), 30);
+ }
+
+ #[test]
+ fn atomic_usize() {
+ let a = Atomic::new(0usize);
+ assert!(Atomic::<usize>::is_lock_free());
+ assert_eq!(format!("{:?}", a), "Atomic(0)");
+ assert_eq!(a.load(SeqCst), 0);
+ a.store(1, SeqCst);
+ assert_eq!(a.swap(2, SeqCst), 1);
+ assert_eq!(a.compare_exchange(5, 45, SeqCst, SeqCst), Err(2));
+ assert_eq!(a.compare_exchange(2, 3, SeqCst, SeqCst), Ok(2));
+ assert_eq!(a.fetch_add(123, SeqCst), 3);
+ assert_eq!(a.fetch_sub(56, SeqCst), 126);
+ assert_eq!(a.fetch_and(7, SeqCst), 70);
+ assert_eq!(a.fetch_or(64, SeqCst), 6);
+ assert_eq!(a.fetch_xor(1, SeqCst), 70);
+ assert_eq!(a.fetch_min(30, SeqCst), 71);
+ assert_eq!(a.fetch_max(25, SeqCst), 30);
+ assert_eq!(a.load(SeqCst), 30);
+ }
+
+ #[test]
+ fn atomic_foo() {
+ let a = Atomic::default();
+ assert_eq!(Atomic::<Foo>::is_lock_free(), false);
+ assert_eq!(format!("{:?}", a), "Atomic(Foo(0, 0))");
+ assert_eq!(a.load(SeqCst), Foo(0, 0));
+ a.store(Foo(1, 1), SeqCst);
+ assert_eq!(a.swap(Foo(2, 2), SeqCst), Foo(1, 1));
+ assert_eq!(
+ a.compare_exchange(Foo(5, 5), Foo(45, 45), SeqCst, SeqCst),
+ Err(Foo(2, 2))
+ );
+ assert_eq!(
+ a.compare_exchange(Foo(2, 2), Foo(3, 3), SeqCst, SeqCst),
+ Ok(Foo(2, 2))
+ );
+ assert_eq!(a.load(SeqCst), Foo(3, 3));
+ }
+
+ #[test]
+ fn atomic_bar() {
+ let a = Atomic::default();
+ assert_eq!(Atomic::<Bar>::is_lock_free(), false);
+ assert_eq!(format!("{:?}", a), "Atomic(Bar(0, 0))");
+ assert_eq!(a.load(SeqCst), Bar(0, 0));
+ a.store(Bar(1, 1), SeqCst);
+ assert_eq!(a.swap(Bar(2, 2), SeqCst), Bar(1, 1));
+ assert_eq!(
+ a.compare_exchange(Bar(5, 5), Bar(45, 45), SeqCst, SeqCst),
+ Err(Bar(2, 2))
+ );
+ assert_eq!(
+ a.compare_exchange(Bar(2, 2), Bar(3, 3), SeqCst, SeqCst),
+ Ok(Bar(2, 2))
+ );
+ assert_eq!(a.load(SeqCst), Bar(3, 3));
+ }
+
+ #[test]
+ fn atomic_quxx() {
+ let a = Atomic::default();
+ assert_eq!(
+ Atomic::<Quux>::is_lock_free(),
+ cfg!(any(feature = "nightly", target_pointer_width = "32"))
+ );
+ assert_eq!(format!("{:?}", a), "Atomic(Quux(0))");
+ assert_eq!(a.load(SeqCst), Quux(0));
+ a.store(Quux(1), SeqCst);
+ assert_eq!(a.swap(Quux(2), SeqCst), Quux(1));
+ assert_eq!(
+ a.compare_exchange(Quux(5), Quux(45), SeqCst, SeqCst),
+ Err(Quux(2))
+ );
+ assert_eq!(
+ a.compare_exchange(Quux(2), Quux(3), SeqCst, SeqCst),
+ Ok(Quux(2))
+ );
+ assert_eq!(a.load(SeqCst), Quux(3));
+ }
+}
diff --git a/third_party/rust/atomic/src/ops.rs b/third_party/rust/atomic/src/ops.rs
new file mode 100644
index 0000000000..3626c9ed9a
--- /dev/null
+++ b/third_party/rust/atomic/src/ops.rs
@@ -0,0 +1,666 @@
+// Copyright 2016 Amanieu d'Antras
+//
+// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
+// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
+// http://opensource.org/licenses/MIT>, at your option. This file may not be
+// copied, modified, or distributed except according to those terms.
+
+use core::cmp;
+use core::mem;
+use core::num::Wrapping;
+use core::ops;
+use core::sync::atomic::Ordering;
+use fallback;
+
+#[cfg(all(feature = "nightly", target_has_atomic = "8"))]
+use core::sync::atomic::{ AtomicI8, AtomicU8 };
+#[cfg(all(feature = "nightly", target_has_atomic = "16"))]
+use core::sync::atomic::{ AtomicI16, AtomicU16 };
+#[cfg(all(feature = "nightly", target_has_atomic = "32"))]
+use core::sync::atomic::{ AtomicI32, AtomicU32 };
+#[cfg(all(feature = "nightly", target_has_atomic = "64"))]
+use core::sync::atomic::{ AtomicI64, AtomicU64 };
+
+#[cfg(not(feature = "nightly"))]
+use core::sync::atomic::AtomicUsize;
+#[cfg(not(feature = "nightly"))]
+const SIZEOF_USIZE: usize = mem::size_of::<usize>();
+#[cfg(not(feature = "nightly"))]
+const ALIGNOF_USIZE: usize = mem::align_of::<usize>();
+
+#[cfg(feature = "nightly")]
+#[inline]
+pub const fn atomic_is_lock_free<T>() -> bool {
+ let size = mem::size_of::<T>();
+ // FIXME: switch to … && … && … once that operator is supported in const functions
+ (1 == size.count_ones()) & (8 >= size) & (mem::align_of::<T>() >= size)
+}
+
+#[cfg(not(feature = "nightly"))]
+#[inline]
+pub fn atomic_is_lock_free<T>() -> bool {
+ let size = mem::size_of::<T>();
+ 1 == size.count_ones() && SIZEOF_USIZE >= size && mem::align_of::<T>() >= ALIGNOF_USIZE
+}
+
+#[inline]
+pub unsafe fn atomic_load<T>(dst: *mut T, order: Ordering) -> T {
+ match mem::size_of::<T>() {
+ #[cfg(all(feature = "nightly", target_has_atomic = "8"))]
+ 1 if mem::align_of::<T>() >= 1 =>
+ {
+ mem::transmute_copy(&(*(dst as *const AtomicU8)).load(order))
+ }
+ #[cfg(all(feature = "nightly", target_has_atomic = "16"))]
+ 2 if mem::align_of::<T>() >= 2 =>
+ {
+ mem::transmute_copy(&(*(dst as *const AtomicU16)).load(order))
+ }
+ #[cfg(all(feature = "nightly", target_has_atomic = "32"))]
+ 4 if mem::align_of::<T>() >= 4 =>
+ {
+ mem::transmute_copy(&(*(dst as *const AtomicU32)).load(order))
+ }
+ #[cfg(all(feature = "nightly", target_has_atomic = "64"))]
+ 8 if mem::align_of::<T>() >= 8 =>
+ {
+ mem::transmute_copy(&(*(dst as *const AtomicU64)).load(order))
+ }
+ #[cfg(not(feature = "nightly"))]
+ SIZEOF_USIZE if mem::align_of::<T>() >= ALIGNOF_USIZE =>
+ {
+ mem::transmute_copy(&(*(dst as *const AtomicUsize)).load(order))
+ }
+ _ => fallback::atomic_load(dst),
+ }
+}
+
+#[inline]
+pub unsafe fn atomic_store<T>(dst: *mut T, val: T, order: Ordering) {
+ match mem::size_of::<T>() {
+ #[cfg(all(feature = "nightly", target_has_atomic = "8"))]
+ 1 if mem::align_of::<T>() >= 1 =>
+ {
+ (*(dst as *const AtomicU8)).store(mem::transmute_copy(&val), order)
+ }
+ #[cfg(all(feature = "nightly", target_has_atomic = "16"))]
+ 2 if mem::align_of::<T>() >= 2 =>
+ {
+ (*(dst as *const AtomicU16)).store(mem::transmute_copy(&val), order)
+ }
+ #[cfg(all(feature = "nightly", target_has_atomic = "32"))]
+ 4 if mem::align_of::<T>() >= 4 =>
+ {
+ (*(dst as *const AtomicU32)).store(mem::transmute_copy(&val), order)
+ }
+ #[cfg(all(feature = "nightly", target_has_atomic = "64"))]
+ 8 if mem::align_of::<T>() >= 8 =>
+ {
+ (*(dst as *const AtomicU64)).store(mem::transmute_copy(&val), order)
+ }
+ #[cfg(not(feature = "nightly"))]
+ SIZEOF_USIZE if mem::align_of::<T>() >= ALIGNOF_USIZE =>
+ {
+ (*(dst as *const AtomicUsize)).store(mem::transmute_copy(&val), order)
+ }
+ _ => fallback::atomic_store(dst, val),
+ }
+}
+
+#[inline]
+pub unsafe fn atomic_swap<T>(dst: *mut T, val: T, order: Ordering) -> T {
+ match mem::size_of::<T>() {
+ #[cfg(all(feature = "nightly", target_has_atomic = "8"))]
+ 1 if mem::align_of::<T>() >= 1 =>
+ {
+ mem::transmute_copy(&(*(dst as *const AtomicU8)).swap(mem::transmute_copy(&val), order))
+ }
+ #[cfg(all(feature = "nightly", target_has_atomic = "16"))]
+ 2 if mem::align_of::<T>() >= 2 =>
+ {
+ mem::transmute_copy(
+ &(*(dst as *const AtomicU16)).swap(mem::transmute_copy(&val), order),
+ )
+ }
+ #[cfg(all(feature = "nightly", target_has_atomic = "32"))]
+ 4 if mem::align_of::<T>() >= 4 =>
+ {
+ mem::transmute_copy(
+ &(*(dst as *const AtomicU32)).swap(mem::transmute_copy(&val), order),
+ )
+ }
+ #[cfg(all(feature = "nightly", target_has_atomic = "64"))]
+ 8 if mem::align_of::<T>() >= 8 =>
+ {
+ mem::transmute_copy(
+ &(*(dst as *const AtomicU64)).swap(mem::transmute_copy(&val), order),
+ )
+ }
+ #[cfg(not(feature = "nightly"))]
+ SIZEOF_USIZE if mem::align_of::<T>() >= ALIGNOF_USIZE =>
+ {
+ mem::transmute_copy(
+ &(*(dst as *const AtomicUsize)).swap(mem::transmute_copy(&val), order),
+ )
+ }
+ _ => fallback::atomic_swap(dst, val),
+ }
+}
+
+#[inline]
+unsafe fn map_result<T, U>(r: Result<T, T>) -> Result<U, U> {
+ match r {
+ Ok(x) => Ok(mem::transmute_copy(&x)),
+ Err(x) => Err(mem::transmute_copy(&x)),
+ }
+}
+
+#[inline]
+pub unsafe fn atomic_compare_exchange<T>(
+ dst: *mut T,
+ current: T,
+ new: T,
+ success: Ordering,
+ failure: Ordering,
+) -> Result<T, T> {
+ match mem::size_of::<T>() {
+ #[cfg(all(feature = "nightly", target_has_atomic = "8"))]
+ 1 if mem::align_of::<T>() >= 1 =>
+ {
+ map_result((*(dst as *const AtomicU8)).compare_exchange(
+ mem::transmute_copy(&current),
+ mem::transmute_copy(&new),
+ success,
+ failure,
+ ))
+ }
+ #[cfg(all(feature = "nightly", target_has_atomic = "16"))]
+ 2 if mem::align_of::<T>() >= 2 =>
+ {
+ map_result((*(dst as *const AtomicU16)).compare_exchange(
+ mem::transmute_copy(&current),
+ mem::transmute_copy(&new),
+ success,
+ failure,
+ ))
+ }
+ #[cfg(all(feature = "nightly", target_has_atomic = "32"))]
+ 4 if mem::align_of::<T>() >= 4 =>
+ {
+ map_result((*(dst as *const AtomicU32)).compare_exchange(
+ mem::transmute_copy(&current),
+ mem::transmute_copy(&new),
+ success,
+ failure,
+ ))
+ }
+ #[cfg(all(feature = "nightly", target_has_atomic = "64"))]
+ 8 if mem::align_of::<T>() >= 8 =>
+ {
+ map_result((*(dst as *const AtomicU64)).compare_exchange(
+ mem::transmute_copy(&current),
+ mem::transmute_copy(&new),
+ success,
+ failure,
+ ))
+ }
+ #[cfg(not(feature = "nightly"))]
+ SIZEOF_USIZE if mem::align_of::<T>() >= ALIGNOF_USIZE =>
+ {
+ map_result((*(dst as *const AtomicUsize)).compare_exchange(
+ mem::transmute_copy(&current),
+ mem::transmute_copy(&new),
+ success,
+ failure,
+ ))
+ }
+ _ => fallback::atomic_compare_exchange(dst, current, new),
+ }
+}
+
+#[inline]
+pub unsafe fn atomic_compare_exchange_weak<T>(
+ dst: *mut T,
+ current: T,
+ new: T,
+ success: Ordering,
+ failure: Ordering,
+) -> Result<T, T> {
+ match mem::size_of::<T>() {
+ #[cfg(all(feature = "nightly", target_has_atomic = "8"))]
+ 1 if mem::align_of::<T>() >= 1 =>
+ {
+ map_result((*(dst as *const AtomicU8)).compare_exchange_weak(
+ mem::transmute_copy(&current),
+ mem::transmute_copy(&new),
+ success,
+ failure,
+ ))
+ }
+ #[cfg(all(feature = "nightly", target_has_atomic = "16"))]
+ 2 if mem::align_of::<T>() >= 2 =>
+ {
+ map_result((*(dst as *const AtomicU16)).compare_exchange_weak(
+ mem::transmute_copy(&current),
+ mem::transmute_copy(&new),
+ success,
+ failure,
+ ))
+ }
+ #[cfg(all(feature = "nightly", target_has_atomic = "32"))]
+ 4 if mem::align_of::<T>() >= 4 =>
+ {
+ map_result((*(dst as *const AtomicU32)).compare_exchange_weak(
+ mem::transmute_copy(&current),
+ mem::transmute_copy(&new),
+ success,
+ failure,
+ ))
+ }
+ #[cfg(all(feature = "nightly", target_has_atomic = "64"))]
+ 8 if mem::align_of::<T>() >= 8 =>
+ {
+ map_result((*(dst as *const AtomicU64)).compare_exchange_weak(
+ mem::transmute_copy(&current),
+ mem::transmute_copy(&new),
+ success,
+ failure,
+ ))
+ }
+ #[cfg(not(feature = "nightly"))]
+ SIZEOF_USIZE if mem::align_of::<T>() >= ALIGNOF_USIZE =>
+ {
+ map_result((*(dst as *const AtomicUsize)).compare_exchange_weak(
+ mem::transmute_copy(&current),
+ mem::transmute_copy(&new),
+ success,
+ failure,
+ ))
+ }
+ _ => fallback::atomic_compare_exchange(dst, current, new),
+ }
+}
+
+#[inline]
+pub unsafe fn atomic_add<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T
+where
+ Wrapping<T>: ops::Add<Output = Wrapping<T>>,
+{
+ match mem::size_of::<T>() {
+ #[cfg(all(feature = "nightly", target_has_atomic = "8"))]
+ 1 if mem::align_of::<T>() >= 1 =>
+ {
+ mem::transmute_copy(
+ &(*(dst as *const AtomicU8)).fetch_add(mem::transmute_copy(&val), order),
+ )
+ }
+ #[cfg(all(feature = "nightly", target_has_atomic = "16"))]
+ 2 if mem::align_of::<T>() >= 2 =>
+ {
+ mem::transmute_copy(
+ &(*(dst as *const AtomicU16)).fetch_add(mem::transmute_copy(&val), order),
+ )
+ }
+ #[cfg(all(feature = "nightly", target_has_atomic = "32"))]
+ 4 if mem::align_of::<T>() >= 4 =>
+ {
+ mem::transmute_copy(
+ &(*(dst as *const AtomicU32)).fetch_add(mem::transmute_copy(&val), order),
+ )
+ }
+ #[cfg(all(feature = "nightly", target_has_atomic = "64"))]
+ 8 if mem::align_of::<T>() >= 8 =>
+ {
+ mem::transmute_copy(
+ &(*(dst as *const AtomicU64)).fetch_add(mem::transmute_copy(&val), order),
+ )
+ }
+ #[cfg(not(feature = "nightly"))]
+ SIZEOF_USIZE if mem::align_of::<T>() >= ALIGNOF_USIZE =>
+ {
+ mem::transmute_copy(
+ &(*(dst as *const AtomicUsize)).fetch_add(mem::transmute_copy(&val), order),
+ )
+ }
+ _ => fallback::atomic_add(dst, val),
+ }
+}
+
+#[inline]
+pub unsafe fn atomic_sub<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T
+where
+ Wrapping<T>: ops::Sub<Output = Wrapping<T>>,
+{
+ match mem::size_of::<T>() {
+ #[cfg(all(feature = "nightly", target_has_atomic = "8"))]
+ 1 if mem::align_of::<T>() >= 1 =>
+ {
+ mem::transmute_copy(
+ &(*(dst as *const AtomicU8)).fetch_sub(mem::transmute_copy(&val), order),
+ )
+ }
+ #[cfg(all(feature = "nightly", target_has_atomic = "16"))]
+ 2 if mem::align_of::<T>() >= 2 =>
+ {
+ mem::transmute_copy(
+ &(*(dst as *const AtomicU16)).fetch_sub(mem::transmute_copy(&val), order),
+ )
+ }
+ #[cfg(all(feature = "nightly", target_has_atomic = "32"))]
+ 4 if mem::align_of::<T>() >= 4 =>
+ {
+ mem::transmute_copy(
+ &(*(dst as *const AtomicU32)).fetch_sub(mem::transmute_copy(&val), order),
+ )
+ }
+ #[cfg(all(feature = "nightly", target_has_atomic = "64"))]
+ 8 if mem::align_of::<T>() >= 8 =>
+ {
+ mem::transmute_copy(
+ &(*(dst as *const AtomicU64)).fetch_sub(mem::transmute_copy(&val), order),
+ )
+ }
+ #[cfg(not(feature = "nightly"))]
+ SIZEOF_USIZE if mem::align_of::<T>() >= ALIGNOF_USIZE =>
+ {
+ mem::transmute_copy(
+ &(*(dst as *const AtomicUsize)).fetch_sub(mem::transmute_copy(&val), order),
+ )
+ }
+ _ => fallback::atomic_sub(dst, val),
+ }
+}
+
+#[inline]
+pub unsafe fn atomic_and<T: Copy + ops::BitAnd<Output = T>>(
+ dst: *mut T,
+ val: T,
+ order: Ordering,
+) -> T {
+ match mem::size_of::<T>() {
+ #[cfg(all(feature = "nightly", target_has_atomic = "8"))]
+ 1 if mem::align_of::<T>() >= 1 =>
+ {
+ mem::transmute_copy(
+ &(*(dst as *const AtomicU8)).fetch_and(mem::transmute_copy(&val), order),
+ )
+ }
+ #[cfg(all(feature = "nightly", target_has_atomic = "16"))]
+ 2 if mem::align_of::<T>() >= 2 =>
+ {
+ mem::transmute_copy(
+ &(*(dst as *const AtomicU16)).fetch_and(mem::transmute_copy(&val), order),
+ )
+ }
+ #[cfg(all(feature = "nightly", target_has_atomic = "32"))]
+ 4 if mem::align_of::<T>() >= 4 =>
+ {
+ mem::transmute_copy(
+ &(*(dst as *const AtomicU32)).fetch_and(mem::transmute_copy(&val), order),
+ )
+ }
+ #[cfg(all(feature = "nightly", target_has_atomic = "64"))]
+ 8 if mem::align_of::<T>() >= 8 =>
+ {
+ mem::transmute_copy(
+ &(*(dst as *const AtomicU64)).fetch_and(mem::transmute_copy(&val), order),
+ )
+ }
+ #[cfg(not(feature = "nightly"))]
+ SIZEOF_USIZE if mem::align_of::<T>() >= ALIGNOF_USIZE =>
+ {
+ mem::transmute_copy(
+ &(*(dst as *const AtomicUsize)).fetch_and(mem::transmute_copy(&val), order),
+ )
+ }
+ _ => fallback::atomic_and(dst, val),
+ }
+}
+
+#[inline]
+pub unsafe fn atomic_or<T: Copy + ops::BitOr<Output = T>>(
+ dst: *mut T,
+ val: T,
+ order: Ordering,
+) -> T {
+ match mem::size_of::<T>() {
+ #[cfg(all(feature = "nightly", target_has_atomic = "8"))]
+ 1 if mem::align_of::<T>() >= 1 =>
+ {
+ mem::transmute_copy(
+ &(*(dst as *const AtomicU8)).fetch_or(mem::transmute_copy(&val), order),
+ )
+ }
+ #[cfg(all(feature = "nightly", target_has_atomic = "16"))]
+ 2 if mem::align_of::<T>() >= 2 =>
+ {
+ mem::transmute_copy(
+ &(*(dst as *const AtomicU16)).fetch_or(mem::transmute_copy(&val), order),
+ )
+ }
+ #[cfg(all(feature = "nightly", target_has_atomic = "32"))]
+ 4 if mem::align_of::<T>() >= 4 =>
+ {
+ mem::transmute_copy(
+ &(*(dst as *const AtomicU32)).fetch_or(mem::transmute_copy(&val), order),
+ )
+ }
+ #[cfg(all(feature = "nightly", target_has_atomic = "64"))]
+ 8 if mem::align_of::<T>() >= 8 =>
+ {
+ mem::transmute_copy(
+ &(*(dst as *const AtomicU64)).fetch_or(mem::transmute_copy(&val), order),
+ )
+ }
+ #[cfg(not(feature = "nightly"))]
+ SIZEOF_USIZE if mem::align_of::<T>() >= ALIGNOF_USIZE =>
+ {
+ mem::transmute_copy(
+ &(*(dst as *const AtomicUsize)).fetch_or(mem::transmute_copy(&val), order),
+ )
+ }
+ _ => fallback::atomic_or(dst, val),
+ }
+}
+
+#[inline]
+pub unsafe fn atomic_xor<T: Copy + ops::BitXor<Output = T>>(
+ dst: *mut T,
+ val: T,
+ order: Ordering,
+) -> T {
+ match mem::size_of::<T>() {
+ #[cfg(all(feature = "nightly", target_has_atomic = "8"))]
+ 1 if mem::align_of::<T>() >= 1 =>
+ {
+ mem::transmute_copy(
+ &(*(dst as *const AtomicU8)).fetch_xor(mem::transmute_copy(&val), order),
+ )
+ }
+ #[cfg(all(feature = "nightly", target_has_atomic = "16"))]
+ 2 if mem::align_of::<T>() >= 2 =>
+ {
+ mem::transmute_copy(
+ &(*(dst as *const AtomicU16)).fetch_xor(mem::transmute_copy(&val), order),
+ )
+ }
+ #[cfg(all(feature = "nightly", target_has_atomic = "32"))]
+ 4 if mem::align_of::<T>() >= 4 =>
+ {
+ mem::transmute_copy(
+ &(*(dst as *const AtomicU32)).fetch_xor(mem::transmute_copy(&val), order),
+ )
+ }
+ #[cfg(all(feature = "nightly", target_has_atomic = "64"))]
+ 8 if mem::align_of::<T>() >= 8 =>
+ {
+ mem::transmute_copy(
+ &(*(dst as *const AtomicU64)).fetch_xor(mem::transmute_copy(&val), order),
+ )
+ }
+ #[cfg(not(feature = "nightly"))]
+ SIZEOF_USIZE if mem::align_of::<T>() >= ALIGNOF_USIZE =>
+ {
+ mem::transmute_copy(
+ &(*(dst as *const AtomicUsize)).fetch_xor(mem::transmute_copy(&val), order),
+ )
+ }
+ _ => fallback::atomic_xor(dst, val),
+ }
+}
+
+#[inline]
+pub unsafe fn atomic_min<T: Copy + cmp::Ord>(dst: *mut T, val: T, order: Ordering) -> T {
+ // Silence warning, fetch_min is not stable yet
+ #[cfg(not(feature = "nightly"))]
+ let _ = order;
+
+ match mem::size_of::<T>() {
+ #[cfg(all(feature = "nightly", target_has_atomic = "8"))]
+ 1 if mem::align_of::<T>() >= 1 =>
+ {
+ mem::transmute_copy(
+ &(*(dst as *const AtomicI8)).fetch_min(mem::transmute_copy(&val), order),
+ )
+ }
+ #[cfg(all(feature = "nightly", target_has_atomic = "16"))]
+ 2 if mem::align_of::<T>() >= 2 =>
+ {
+ mem::transmute_copy(
+ &(*(dst as *const AtomicI16)).fetch_min(mem::transmute_copy(&val), order),
+ )
+ }
+ #[cfg(all(feature = "nightly", target_has_atomic = "32"))]
+ 4 if mem::align_of::<T>() >= 4 =>
+ {
+ mem::transmute_copy(
+ &(*(dst as *const AtomicI32)).fetch_min(mem::transmute_copy(&val), order),
+ )
+ }
+ #[cfg(all(feature = "nightly", target_has_atomic = "64"))]
+ 8 if mem::align_of::<T>() >= 8 =>
+ {
+ mem::transmute_copy(
+ &(*(dst as *const AtomicI64)).fetch_min(mem::transmute_copy(&val), order),
+ )
+ }
+ _ => fallback::atomic_min(dst, val),
+ }
+}
+
+#[inline]
+pub unsafe fn atomic_max<T: Copy + cmp::Ord>(dst: *mut T, val: T, order: Ordering) -> T {
+ // Silence warning, fetch_min is not stable yet
+ #[cfg(not(feature = "nightly"))]
+ let _ = order;
+
+ match mem::size_of::<T>() {
+ #[cfg(all(feature = "nightly", target_has_atomic = "8"))]
+ 1 if mem::align_of::<T>() >= 1 =>
+ {
+ mem::transmute_copy(
+ &(*(dst as *const AtomicI8)).fetch_max(mem::transmute_copy(&val), order),
+ )
+ }
+ #[cfg(all(feature = "nightly", target_has_atomic = "16"))]
+ 2 if mem::align_of::<T>() >= 2 =>
+ {
+ mem::transmute_copy(
+ &(*(dst as *const AtomicI16)).fetch_max(mem::transmute_copy(&val), order),
+ )
+ }
+ #[cfg(all(feature = "nightly", target_has_atomic = "32"))]
+ 4 if mem::align_of::<T>() >= 4 =>
+ {
+ mem::transmute_copy(
+ &(*(dst as *const AtomicI32)).fetch_max(mem::transmute_copy(&val), order),
+ )
+ }
+ #[cfg(all(feature = "nightly", target_has_atomic = "64"))]
+ 8 if mem::align_of::<T>() >= 8 =>
+ {
+ mem::transmute_copy(
+ &(*(dst as *const AtomicI64)).fetch_max(mem::transmute_copy(&val), order),
+ )
+ }
+ _ => fallback::atomic_max(dst, val),
+ }
+}
+
+#[inline]
+pub unsafe fn atomic_umin<T: Copy + cmp::Ord>(dst: *mut T, val: T, order: Ordering) -> T {
+ // Silence warning, fetch_min is not stable yet
+ #[cfg(not(feature = "nightly"))]
+ let _ = order;
+
+ match mem::size_of::<T>() {
+ #[cfg(all(feature = "nightly", target_has_atomic = "8"))]
+ 1 if mem::align_of::<T>() >= 1 =>
+ {
+ mem::transmute_copy(
+ &(*(dst as *const AtomicU8)).fetch_min(mem::transmute_copy(&val), order),
+ )
+ }
+ #[cfg(all(feature = "nightly", target_has_atomic = "16"))]
+ 2 if mem::align_of::<T>() >= 2 =>
+ {
+ mem::transmute_copy(
+ &(*(dst as *const AtomicU16)).fetch_min(mem::transmute_copy(&val), order),
+ )
+ }
+ #[cfg(all(feature = "nightly", target_has_atomic = "32"))]
+ 4 if mem::align_of::<T>() >= 4 =>
+ {
+ mem::transmute_copy(
+ &(*(dst as *const AtomicU32)).fetch_min(mem::transmute_copy(&val), order),
+ )
+ }
+ #[cfg(all(feature = "nightly", target_has_atomic = "64"))]
+ 8 if mem::align_of::<T>() >= 8 =>
+ {
+ mem::transmute_copy(
+ &(*(dst as *const AtomicU64)).fetch_min(mem::transmute_copy(&val), order),
+ )
+ }
+ _ => fallback::atomic_min(dst, val),
+ }
+}
+
+#[inline]
+pub unsafe fn atomic_umax<T: Copy + cmp::Ord>(dst: *mut T, val: T, order: Ordering) -> T {
+ // Silence warning, fetch_min is not stable yet
+ #[cfg(not(feature = "nightly"))]
+ let _ = order;
+
+ match mem::size_of::<T>() {
+ #[cfg(all(feature = "nightly", target_has_atomic = "8"))]
+ 1 if mem::align_of::<T>() >= 1 =>
+ {
+ mem::transmute_copy(
+ &(*(dst as *const AtomicU8)).fetch_max(mem::transmute_copy(&val), order),
+ )
+ }
+ #[cfg(all(feature = "nightly", target_has_atomic = "16"))]
+ 2 if mem::align_of::<T>() >= 2 =>
+ {
+ mem::transmute_copy(
+ &(*(dst as *const AtomicU16)).fetch_max(mem::transmute_copy(&val), order),
+ )
+ }
+ #[cfg(all(feature = "nightly", target_has_atomic = "32"))]
+ 4 if mem::align_of::<T>() >= 4 =>
+ {
+ mem::transmute_copy(
+ &(*(dst as *const AtomicU32)).fetch_max(mem::transmute_copy(&val), order),
+ )
+ }
+ #[cfg(all(feature = "nightly", target_has_atomic = "64"))]
+ 8 if mem::align_of::<T>() >= 8 =>
+ {
+ mem::transmute_copy(
+ &(*(dst as *const AtomicU64)).fetch_max(mem::transmute_copy(&val), order),
+ )
+ }
+ _ => fallback::atomic_max(dst, val),
+ }
+}