summaryrefslogtreecommitdiffstats
path: root/vendor/crossbeam-epoch/src
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-17 12:06:31 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-17 12:06:31 +0000
commit2ff14448863ac1a1dd9533461708e29aae170c2d (patch)
tree85b9fea2bbfe3f06473cfa381eed11f273b57c5c /vendor/crossbeam-epoch/src
parentAdding debian version 1.64.0+dfsg1-1. (diff)
downloadrustc-2ff14448863ac1a1dd9533461708e29aae170c2d.tar.xz
rustc-2ff14448863ac1a1dd9533461708e29aae170c2d.zip
Adding debian version 1.65.0+dfsg1-2.debian/1.65.0+dfsg1-2
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'vendor/crossbeam-epoch/src')
-rw-r--r--vendor/crossbeam-epoch/src/atomic.rs108
-rw-r--r--vendor/crossbeam-epoch/src/collector.rs6
-rw-r--r--vendor/crossbeam-epoch/src/deferred.rs19
-rw-r--r--vendor/crossbeam-epoch/src/guard.rs6
-rw-r--r--vendor/crossbeam-epoch/src/internal.rs109
5 files changed, 144 insertions, 104 deletions
diff --git a/vendor/crossbeam-epoch/src/atomic.rs b/vendor/crossbeam-epoch/src/atomic.rs
index b0b6a68e7..19bab4729 100644
--- a/vendor/crossbeam-epoch/src/atomic.rs
+++ b/vendor/crossbeam-epoch/src/atomic.rs
@@ -252,7 +252,7 @@ impl<T> Pointable for [MaybeUninit<T>] {
let size = mem::size_of::<Array<T>>() + mem::size_of::<MaybeUninit<T>>() * len;
let align = mem::align_of::<Array<T>>();
let layout = alloc::Layout::from_size_align(size, align).unwrap();
- let ptr = alloc::alloc(layout) as *mut Array<T>;
+ let ptr = alloc::alloc(layout).cast::<Array<T>>();
if ptr.is_null() {
alloc::handle_alloc_error(layout);
}
@@ -305,6 +305,7 @@ impl<T> Atomic<T> {
/// use crossbeam_epoch::Atomic;
///
/// let a = Atomic::new(1234);
+ /// # unsafe { drop(a.into_owned()); } // avoid leak
/// ```
pub fn new(init: T) -> Atomic<T> {
Self::init(init)
@@ -320,6 +321,7 @@ impl<T: ?Sized + Pointable> Atomic<T> {
/// use crossbeam_epoch::Atomic;
///
/// let a = Atomic::<i32>::init(1234);
+ /// # unsafe { drop(a.into_owned()); } // avoid leak
/// ```
pub fn init(init: T::Init) -> Atomic<T> {
Self::from(Owned::init(init))
@@ -342,7 +344,7 @@ impl<T: ?Sized + Pointable> Atomic<T> {
///
/// let a = Atomic::<i32>::null();
/// ```
- #[cfg(all(crossbeam_const_fn_trait_bound, not(crossbeam_loom)))]
+ #[cfg(all(not(crossbeam_no_const_fn_trait_bound), not(crossbeam_loom)))]
pub const fn null() -> Atomic<T> {
Self {
data: AtomicUsize::new(0),
@@ -351,7 +353,7 @@ impl<T: ?Sized + Pointable> Atomic<T> {
}
/// Returns a new null atomic pointer.
- #[cfg(not(all(crossbeam_const_fn_trait_bound, not(crossbeam_loom))))]
+ #[cfg(not(all(not(crossbeam_no_const_fn_trait_bound), not(crossbeam_loom))))]
pub fn null() -> Atomic<T> {
Self {
data: AtomicUsize::new(0),
@@ -373,6 +375,7 @@ impl<T: ?Sized + Pointable> Atomic<T> {
/// let a = Atomic::new(1234);
/// let guard = &epoch::pin();
/// let p = a.load(SeqCst, guard);
+ /// # unsafe { drop(a.into_owned()); } // avoid leak
/// ```
pub fn load<'g>(&self, ord: Ordering, _: &'g Guard) -> Shared<'g, T> {
unsafe { Shared::from_usize(self.data.load(ord)) }
@@ -398,6 +401,7 @@ impl<T: ?Sized + Pointable> Atomic<T> {
/// let a = Atomic::new(1234);
/// let guard = &epoch::pin();
/// let p = a.load_consume(guard);
+ /// # unsafe { drop(a.into_owned()); } // avoid leak
/// ```
pub fn load_consume<'g>(&self, _: &'g Guard) -> Shared<'g, T> {
unsafe { Shared::from_usize(self.data.load_consume()) }
@@ -415,8 +419,10 @@ impl<T: ?Sized + Pointable> Atomic<T> {
/// use std::sync::atomic::Ordering::SeqCst;
///
/// let a = Atomic::new(1234);
+ /// # unsafe { drop(a.load(SeqCst, &crossbeam_epoch::pin()).into_owned()); } // avoid leak
/// a.store(Shared::null(), SeqCst);
/// a.store(Owned::new(1234), SeqCst);
+ /// # unsafe { drop(a.into_owned()); } // avoid leak
/// ```
pub fn store<P: Pointer<T>>(&self, new: P, ord: Ordering) {
self.data.store(new.into_usize(), ord);
@@ -437,6 +443,7 @@ impl<T: ?Sized + Pointable> Atomic<T> {
/// let a = Atomic::new(1234);
/// let guard = &epoch::pin();
/// let p = a.swap(Shared::null(), SeqCst, guard);
+ /// # unsafe { drop(p.into_owned()); } // avoid leak
/// ```
pub fn swap<'g, P: Pointer<T>>(&self, new: P, ord: Ordering, _: &'g Guard) -> Shared<'g, T> {
unsafe { Shared::from_usize(self.data.swap(new.into_usize(), ord)) }
@@ -471,6 +478,7 @@ impl<T: ?Sized + Pointable> Atomic<T> {
/// let curr = a.load(SeqCst, guard);
/// let res1 = a.compare_exchange(curr, Shared::null(), SeqCst, SeqCst, guard);
/// let res2 = a.compare_exchange(curr, Owned::new(5678), SeqCst, SeqCst, guard);
+ /// # unsafe { drop(curr.into_owned()); } // avoid leak
/// ```
pub fn compare_exchange<'g, P>(
&self,
@@ -526,6 +534,7 @@ impl<T: ?Sized + Pointable> Atomic<T> {
///
/// let mut new = Owned::new(5678);
/// let mut ptr = a.load(SeqCst, guard);
+ /// # unsafe { drop(a.load(SeqCst, guard).into_owned()); } // avoid leak
/// loop {
/// match a.compare_exchange_weak(ptr, new, SeqCst, SeqCst, guard) {
/// Ok(p) => {
@@ -546,6 +555,7 @@ impl<T: ?Sized + Pointable> Atomic<T> {
/// Err(err) => curr = err.current,
/// }
/// }
+ /// # unsafe { drop(curr.into_owned()); } // avoid leak
/// ```
pub fn compare_exchange_weak<'g, P>(
&self,
@@ -608,6 +618,7 @@ impl<T: ?Sized + Pointable> Atomic<T> {
///
/// let res2 = a.fetch_update(SeqCst, SeqCst, guard, |x| None);
/// assert!(res2.is_err());
+ /// # unsafe { drop(a.into_owned()); } // avoid leak
/// ```
pub fn fetch_update<'g, F>(
&self,
@@ -666,6 +677,7 @@ impl<T: ?Sized + Pointable> Atomic<T> {
/// let curr = a.load(SeqCst, guard);
/// let res1 = a.compare_and_set(curr, Shared::null(), SeqCst, guard);
/// let res2 = a.compare_and_set(curr, Owned::new(5678), SeqCst, guard);
+ /// # unsafe { drop(curr.into_owned()); } // avoid leak
/// ```
// TODO: remove in the next major version.
#[allow(deprecated)]
@@ -723,6 +735,7 @@ impl<T: ?Sized + Pointable> Atomic<T> {
///
/// let mut new = Owned::new(5678);
/// let mut ptr = a.load(SeqCst, guard);
+ /// # unsafe { drop(a.load(SeqCst, guard).into_owned()); } // avoid leak
/// loop {
/// match a.compare_and_set_weak(ptr, new, SeqCst, guard) {
/// Ok(p) => {
@@ -743,6 +756,7 @@ impl<T: ?Sized + Pointable> Atomic<T> {
/// Err(err) => curr = err.current,
/// }
/// }
+ /// # unsafe { drop(curr.into_owned()); } // avoid leak
/// ```
// TODO: remove in the next major version.
#[allow(deprecated)]
@@ -877,6 +891,52 @@ impl<T: ?Sized + Pointable> Atomic<T> {
Owned::from_usize(self.data.into_inner())
}
}
+
+ /// Takes ownership of the pointee if it is non-null.
+ ///
+ /// This consumes the atomic and converts it into [`Owned`]. As [`Atomic`] doesn't have a
+ /// destructor and doesn't drop the pointee while [`Owned`] does, this is suitable for
+ /// destructors of data structures.
+ ///
+ /// # Safety
+ ///
+ /// This method may be called only if the pointer is valid and nobody else is holding a
+ /// reference to the same object, or the pointer is null.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// # use std::mem;
+ /// # use crossbeam_epoch::Atomic;
+ /// struct DataStructure {
+ /// ptr: Atomic<usize>,
+ /// }
+ ///
+ /// impl Drop for DataStructure {
+ /// fn drop(&mut self) {
+ /// // By now the DataStructure lives only in our thread and we are sure we don't hold
+ /// // any Shared or & to it ourselves, but it may be null, so we have to be careful.
+ /// let old = mem::replace(&mut self.ptr, Atomic::null());
+ /// unsafe {
+ /// if let Some(x) = old.try_into_owned() {
+ /// drop(x)
+ /// }
+ /// }
+ /// }
+ /// }
+ /// ```
+ pub unsafe fn try_into_owned(self) -> Option<Owned<T>> {
+ // FIXME: See self.into_owned()
+ #[cfg(crossbeam_loom)]
+ let data = self.data.unsync_load();
+ #[cfg(not(crossbeam_loom))]
+ let data = self.data.into_inner();
+ if decompose_tag::<T>(data).0 == 0 {
+ None
+ } else {
+ Some(Owned::from_usize(data))
+ }
+ }
}
impl<T: ?Sized + Pointable> fmt::Debug for Atomic<T> {
@@ -925,6 +985,7 @@ impl<T: ?Sized + Pointable> From<Owned<T>> for Atomic<T> {
/// use crossbeam_epoch::{Atomic, Owned};
///
/// let a = Atomic::<i32>::from(Owned::new(1234));
+ /// # unsafe { drop(a.into_owned()); } // avoid leak
/// ```
fn from(owned: Owned<T>) -> Self {
let data = owned.data;
@@ -1108,6 +1169,7 @@ impl<T: ?Sized + Pointable> Owned<T> {
/// let o = Owned::new(1234);
/// let guard = &epoch::pin();
/// let p = o.into_shared(guard);
+ /// # unsafe { drop(p.into_owned()); } // avoid leak
/// ```
#[allow(clippy::needless_lifetimes)]
pub fn into_shared<'g>(self, _: &'g Guard) -> Shared<'g, T> {
@@ -1291,6 +1353,7 @@ impl<'g, T> Shared<'g, T> {
/// let guard = &epoch::pin();
/// let p = a.load(SeqCst, guard);
/// assert_eq!(p.as_raw(), raw);
+ /// # unsafe { drop(a.into_owned()); } // avoid leak
/// ```
pub fn as_raw(&self) -> *const T {
let (raw, _) = decompose_tag::<T>(self.data);
@@ -1329,6 +1392,7 @@ impl<'g, T: ?Sized + Pointable> Shared<'g, T> {
/// assert!(a.load(SeqCst, guard).is_null());
/// a.store(Owned::new(1234), SeqCst);
/// assert!(!a.load(SeqCst, guard).is_null());
+ /// # unsafe { drop(a.into_owned()); } // avoid leak
/// ```
pub fn is_null(&self) -> bool {
let (raw, _) = decompose_tag::<T>(self.data);
@@ -1365,6 +1429,7 @@ impl<'g, T: ?Sized + Pointable> Shared<'g, T> {
/// unsafe {
/// assert_eq!(p.deref(), &1234);
/// }
+ /// # unsafe { drop(a.into_owned()); } // avoid leak
/// ```
pub unsafe fn deref(&self) -> &'g T {
let (raw, _) = decompose_tag::<T>(self.data);
@@ -1406,6 +1471,7 @@ impl<'g, T: ?Sized + Pointable> Shared<'g, T> {
/// unsafe {
/// assert_eq!(p.deref(), &vec![1, 2, 3, 4, 5]);
/// }
+ /// # unsafe { drop(a.into_owned()); } // avoid leak
/// ```
pub unsafe fn deref_mut(&mut self) -> &'g mut T {
let (raw, _) = decompose_tag::<T>(self.data);
@@ -1442,6 +1508,7 @@ impl<'g, T: ?Sized + Pointable> Shared<'g, T> {
/// unsafe {
/// assert_eq!(p.as_ref(), Some(&1234));
/// }
+ /// # unsafe { drop(a.into_owned()); } // avoid leak
/// ```
pub unsafe fn as_ref(&self) -> Option<&'g T> {
let (raw, _) = decompose_tag::<T>(self.data);
@@ -1481,6 +1548,36 @@ impl<'g, T: ?Sized + Pointable> Shared<'g, T> {
Owned::from_usize(self.data)
}
+ /// Takes ownership of the pointee if it is not null.
+ ///
+ /// # Safety
+ ///
+ /// This method may be called only if the pointer is valid and nobody else is holding a
+ /// reference to the same object, or if the pointer is null.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use crossbeam_epoch::{self as epoch, Atomic};
+ /// use std::sync::atomic::Ordering::SeqCst;
+ ///
+ /// let a = Atomic::new(1234);
+ /// unsafe {
+ /// let guard = &epoch::unprotected();
+ /// let p = a.load(SeqCst, guard);
+ /// if let Some(x) = p.try_into_owned() {
+ /// drop(x);
+ /// }
+ /// }
+ /// ```
+ pub unsafe fn try_into_owned(self) -> Option<Owned<T>> {
+ if self.is_null() {
+ None
+ } else {
+ Some(Owned::from_usize(self.data))
+ }
+ }
+
/// Returns the tag stored within the pointer.
///
/// # Examples
@@ -1493,6 +1590,7 @@ impl<'g, T: ?Sized + Pointable> Shared<'g, T> {
/// let guard = &epoch::pin();
/// let p = a.load(SeqCst, guard);
/// assert_eq!(p.tag(), 2);
+ /// # unsafe { drop(a.into_owned()); } // avoid leak
/// ```
pub fn tag(&self) -> usize {
let (_, tag) = decompose_tag::<T>(self.data);
@@ -1516,6 +1614,7 @@ impl<'g, T: ?Sized + Pointable> Shared<'g, T> {
/// assert_eq!(p1.tag(), 0);
/// assert_eq!(p2.tag(), 2);
/// assert_eq!(p1.as_raw(), p2.as_raw());
+ /// # unsafe { drop(a.into_owned()); } // avoid leak
/// ```
pub fn with_tag(&self, tag: usize) -> Shared<'g, T> {
unsafe { Self::from_usize(compose_tag::<T>(self.data, tag)) }
@@ -1536,6 +1635,7 @@ impl<T> From<*const T> for Shared<'_, T> {
///
/// let p = Shared::from(Box::into_raw(Box::new(1234)) as *const _);
/// assert!(!p.is_null());
+ /// # unsafe { drop(p.into_owned()); } // avoid leak
/// ```
fn from(raw: *const T) -> Self {
let raw = raw as usize;
@@ -1612,7 +1712,7 @@ mod tests {
#[test]
fn array_init() {
let owned = Owned::<[MaybeUninit<usize>]>::init(10);
- let arr: &[MaybeUninit<usize>] = &*owned;
+ let arr: &[MaybeUninit<usize>] = &owned;
assert_eq!(arr.len(), 10);
}
}
diff --git a/vendor/crossbeam-epoch/src/collector.rs b/vendor/crossbeam-epoch/src/collector.rs
index 099a2ffc6..5b0851184 100644
--- a/vendor/crossbeam-epoch/src/collector.rs
+++ b/vendor/crossbeam-epoch/src/collector.rs
@@ -111,7 +111,7 @@ impl fmt::Debug for LocalHandle {
#[cfg(all(test, not(crossbeam_loom)))]
mod tests {
- use std::mem;
+ use std::mem::ManuallyDrop;
use std::sync::atomic::{AtomicUsize, Ordering};
use crossbeam_utils::thread;
@@ -402,15 +402,13 @@ mod tests {
v.push(i as i32);
}
- let ptr = v.as_mut_ptr() as usize;
let len = v.len();
+ let ptr = ManuallyDrop::new(v).as_mut_ptr() as usize;
guard.defer_unchecked(move || {
drop(Vec::from_raw_parts(ptr as *const i32 as *mut i32, len, len));
DESTROYS.fetch_add(len, Ordering::Relaxed);
});
guard.flush();
-
- mem::forget(v);
}
while DESTROYS.load(Ordering::Relaxed) < COUNT {
diff --git a/vendor/crossbeam-epoch/src/deferred.rs b/vendor/crossbeam-epoch/src/deferred.rs
index c33d51502..2f3d79fdf 100644
--- a/vendor/crossbeam-epoch/src/deferred.rs
+++ b/vendor/crossbeam-epoch/src/deferred.rs
@@ -29,6 +29,15 @@ impl fmt::Debug for Deferred {
}
impl Deferred {
+ pub(crate) const NO_OP: Self = {
+ fn no_op_call(_raw: *mut u8) {}
+ Self {
+ call: no_op_call,
+ data: MaybeUninit::uninit(),
+ _marker: PhantomData,
+ }
+ };
+
/// Constructs a new `Deferred` from a `FnOnce()`.
pub(crate) fn new<F: FnOnce()>(f: F) -> Self {
let size = mem::size_of::<F>();
@@ -37,10 +46,10 @@ impl Deferred {
unsafe {
if size <= mem::size_of::<Data>() && align <= mem::align_of::<Data>() {
let mut data = MaybeUninit::<Data>::uninit();
- ptr::write(data.as_mut_ptr() as *mut F, f);
+ ptr::write(data.as_mut_ptr().cast::<F>(), f);
unsafe fn call<F: FnOnce()>(raw: *mut u8) {
- let f: F = ptr::read(raw as *mut F);
+ let f: F = ptr::read(raw.cast::<F>());
f();
}
@@ -52,12 +61,12 @@ impl Deferred {
} else {
let b: Box<F> = Box::new(f);
let mut data = MaybeUninit::<Data>::uninit();
- ptr::write(data.as_mut_ptr() as *mut Box<F>, b);
+ ptr::write(data.as_mut_ptr().cast::<Box<F>>(), b);
unsafe fn call<F: FnOnce()>(raw: *mut u8) {
// It's safe to cast `raw` from `*mut u8` to `*mut Box<F>`, because `raw` is
// originally derived from `*mut Box<F>`.
- let b: Box<F> = ptr::read(raw as *mut Box<F>);
+ let b: Box<F> = ptr::read(raw.cast::<Box<F>>());
(*b)();
}
@@ -74,7 +83,7 @@ impl Deferred {
#[inline]
pub(crate) fn call(mut self) {
let call = self.call;
- unsafe { call(self.data.as_mut_ptr() as *mut u8) };
+ unsafe { call(self.data.as_mut_ptr().cast::<u8>()) };
}
}
diff --git a/vendor/crossbeam-epoch/src/guard.rs b/vendor/crossbeam-epoch/src/guard.rs
index 6db3750b0..ba7fe1b11 100644
--- a/vendor/crossbeam-epoch/src/guard.rs
+++ b/vendor/crossbeam-epoch/src/guard.rs
@@ -46,6 +46,7 @@ use crate::internal::Local;
/// if let Some(num) = unsafe { p.as_ref() } {
/// println!("The number is {}.", num);
/// }
+/// # unsafe { drop(a.into_owned()); } // avoid leak
/// ```
///
/// # Multiple guards
@@ -184,6 +185,7 @@ impl Guard {
/// });
/// }
/// }
+ /// # unsafe { drop(a.into_owned()); } // avoid leak
/// ```
pub unsafe fn defer_unchecked<F, R>(&self, f: F)
where
@@ -263,6 +265,7 @@ impl Guard {
/// guard.defer_destroy(p);
/// }
/// }
+ /// # unsafe { drop(a.into_owned()); } // avoid leak
/// ```
pub unsafe fn defer_destroy<T>(&self, ptr: Shared<'_, T>) {
self.defer_unchecked(move || ptr.into_owned());
@@ -320,6 +323,7 @@ impl Guard {
/// let p = a.load(SeqCst, &guard);
/// assert_eq!(unsafe { p.as_ref() }, Some(&777));
/// }
+ /// # unsafe { drop(a.into_owned()); } // avoid leak
/// ```
pub fn repin(&mut self) {
if let Some(local) = unsafe { self.local.as_ref() } {
@@ -356,6 +360,7 @@ impl Guard {
/// let p = a.load(SeqCst, &guard);
/// assert_eq!(unsafe { p.as_ref() }, Some(&777));
/// }
+ /// # unsafe { drop(a.into_owned()); } // avoid leak
/// ```
pub fn repin_after<F, R>(&mut self, f: F) -> R
where
@@ -451,6 +456,7 @@ impl fmt::Debug for Guard {
///
/// // Dropping `dummy` doesn't affect the current thread - it's just a noop.
/// }
+/// # unsafe { drop(a.into_owned()); } // avoid leak
/// ```
///
/// The most common use of this function is when constructing or destructing a data structure.
diff --git a/vendor/crossbeam-epoch/src/internal.rs b/vendor/crossbeam-epoch/src/internal.rs
index de208b13e..00c66a40a 100644
--- a/vendor/crossbeam-epoch/src/internal.rs
+++ b/vendor/crossbeam-epoch/src/internal.rs
@@ -55,9 +55,10 @@ use crate::sync::list::{Entry, IsElement, IterError, List};
use crate::sync::queue::Queue;
/// Maximum number of objects a bag can contain.
-#[cfg(not(crossbeam_sanitize))]
-const MAX_OBJECTS: usize = 62;
-#[cfg(crossbeam_sanitize)]
+#[cfg(not(any(crossbeam_sanitize, miri)))]
+const MAX_OBJECTS: usize = 64;
+// Makes it more likely to trigger any potential data races.
+#[cfg(any(crossbeam_sanitize, miri))]
const MAX_OBJECTS: usize = 4;
/// A bag of deferred functions.
@@ -106,87 +107,11 @@ impl Bag {
}
impl Default for Bag {
- #[rustfmt::skip]
fn default() -> Self {
- // TODO: [no_op; MAX_OBJECTS] syntax blocked by https://github.com/rust-lang/rust/issues/49147
- #[cfg(not(crossbeam_sanitize))]
- return Bag {
+ Bag {
len: 0,
- deferreds: [
- Deferred::new(no_op_func),
- Deferred::new(no_op_func),
- Deferred::new(no_op_func),
- Deferred::new(no_op_func),
- Deferred::new(no_op_func),
- Deferred::new(no_op_func),
- Deferred::new(no_op_func),
- Deferred::new(no_op_func),
- Deferred::new(no_op_func),
- Deferred::new(no_op_func),
- Deferred::new(no_op_func),
- Deferred::new(no_op_func),
- Deferred::new(no_op_func),
- Deferred::new(no_op_func),
- Deferred::new(no_op_func),
- Deferred::new(no_op_func),
- Deferred::new(no_op_func),
- Deferred::new(no_op_func),
- Deferred::new(no_op_func),
- Deferred::new(no_op_func),
- Deferred::new(no_op_func),
- Deferred::new(no_op_func),
- Deferred::new(no_op_func),
- Deferred::new(no_op_func),
- Deferred::new(no_op_func),
- Deferred::new(no_op_func),
- Deferred::new(no_op_func),
- Deferred::new(no_op_func),
- Deferred::new(no_op_func),
- Deferred::new(no_op_func),
- Deferred::new(no_op_func),
- Deferred::new(no_op_func),
- Deferred::new(no_op_func),
- Deferred::new(no_op_func),
- Deferred::new(no_op_func),
- Deferred::new(no_op_func),
- Deferred::new(no_op_func),
- Deferred::new(no_op_func),
- Deferred::new(no_op_func),
- Deferred::new(no_op_func),
- Deferred::new(no_op_func),
- Deferred::new(no_op_func),
- Deferred::new(no_op_func),
- Deferred::new(no_op_func),
- Deferred::new(no_op_func),
- Deferred::new(no_op_func),
- Deferred::new(no_op_func),
- Deferred::new(no_op_func),
- Deferred::new(no_op_func),
- Deferred::new(no_op_func),
- Deferred::new(no_op_func),
- Deferred::new(no_op_func),
- Deferred::new(no_op_func),
- Deferred::new(no_op_func),
- Deferred::new(no_op_func),
- Deferred::new(no_op_func),
- Deferred::new(no_op_func),
- Deferred::new(no_op_func),
- Deferred::new(no_op_func),
- Deferred::new(no_op_func),
- Deferred::new(no_op_func),
- Deferred::new(no_op_func),
- ],
- };
- #[cfg(crossbeam_sanitize)]
- return Bag {
- len: 0,
- deferreds: [
- Deferred::new(no_op_func),
- Deferred::new(no_op_func),
- Deferred::new(no_op_func),
- Deferred::new(no_op_func),
- ],
- };
+ deferreds: [Deferred::NO_OP; MAX_OBJECTS],
+ }
}
}
@@ -194,7 +119,7 @@ impl Drop for Bag {
fn drop(&mut self) {
// Call all deferred functions.
for deferred in &mut self.deferreds[..self.len] {
- let no_op = Deferred::new(no_op_func);
+ let no_op = Deferred::NO_OP;
let owned_deferred = mem::replace(deferred, no_op);
owned_deferred.call();
}
@@ -210,8 +135,6 @@ impl fmt::Debug for Bag {
}
}
-fn no_op_func() {}
-
/// A pair of an epoch and a bag.
#[derive(Default, Debug)]
struct SealedBag {
@@ -375,13 +298,14 @@ pub(crate) struct Local {
// Make sure `Local` is less than or equal to 2048 bytes.
// https://github.com/crossbeam-rs/crossbeam/issues/551
-#[cfg(not(crossbeam_sanitize))] // `crossbeam_sanitize` reduces the size of `Local`
+#[cfg(not(any(crossbeam_sanitize, miri)))] // `crossbeam_sanitize` and `miri` reduce the size of `Local`
#[test]
fn local_size() {
- assert!(
- core::mem::size_of::<Local>() <= 2048,
- "An allocation of `Local` should be <= 2048 bytes."
- );
+ // TODO: https://github.com/crossbeam-rs/crossbeam/issues/869
+ // assert!(
+ // core::mem::size_of::<Local>() <= 2048,
+ // "An allocation of `Local` should be <= 2048 bytes."
+ // );
}
impl Local {
@@ -468,7 +392,10 @@ impl Local {
// Now we must store `new_epoch` into `self.epoch` and execute a `SeqCst` fence.
// The fence makes sure that any future loads from `Atomic`s will not happen before
// this store.
- if cfg!(any(target_arch = "x86", target_arch = "x86_64")) {
+ if cfg!(all(
+ any(target_arch = "x86", target_arch = "x86_64"),
+ not(miri)
+ )) {
// HACK(stjepang): On x86 architectures there are two different ways of executing
// a `SeqCst` fence.
//