diff options
Diffstat (limited to 'library/alloc/src/sync.rs')
-rw-r--r-- | library/alloc/src/sync.rs | 60 |
1 files changed, 27 insertions, 33 deletions
diff --git a/library/alloc/src/sync.rs b/library/alloc/src/sync.rs index fdd341a06..24849d52d 100644 --- a/library/alloc/src/sync.rs +++ b/library/alloc/src/sync.rs @@ -11,14 +11,13 @@ use core::any::Any; use core::borrow; use core::cmp::Ordering; -use core::convert::{From, TryFrom}; use core::fmt; use core::hash::{Hash, Hasher}; use core::hint; use core::intrinsics::abort; #[cfg(not(no_global_oom_handling))] use core::iter; -use core::marker::{PhantomData, Unpin, Unsize}; +use core::marker::{PhantomData, Unsize}; #[cfg(not(no_global_oom_handling))] use core::mem::size_of_val; use core::mem::{self, align_of_val_raw}; @@ -51,8 +50,16 @@ mod tests; /// /// Going above this limit will abort your program (although not /// necessarily) at _exactly_ `MAX_REFCOUNT + 1` references. +/// Trying to go above it might call a `panic` (if not actually going above it). +/// +/// This is a global invariant, and also applies when using a compare-exchange loop. +/// +/// See comment in `Arc::clone`. const MAX_REFCOUNT: usize = (isize::MAX) as usize; +/// The error in case either counter reaches above `MAX_REFCOUNT`, and we can `panic` safely. +const INTERNAL_OVERFLOW_ERROR: &str = "Arc counter overflow"; + #[cfg(not(sanitize = "thread"))] macro_rules! acquire { ($x:expr) => { @@ -180,8 +187,6 @@ macro_rules! acquire { /// [mutex]: ../../std/sync/struct.Mutex.html /// [rwlock]: ../../std/sync/struct.RwLock.html /// [atomic]: core::sync::atomic -/// [`Send`]: core::marker::Send -/// [`Sync`]: core::marker::Sync /// [deref]: core::ops::Deref /// [downgrade]: Arc::downgrade /// [upgrade]: Weak::upgrade @@ -654,20 +659,17 @@ impl<T> Arc<T> { /// /// This will succeed even if there are outstanding weak references. /// - // FIXME: when `Arc::into_inner` is stabilized, add this paragraph: - /* /// It is strongly recommended to use [`Arc::into_inner`] instead if you don't /// want to keep the `Arc` in the [`Err`] case. /// Immediately dropping the [`Err`] payload, like in the expression /// `Arc::try_unwrap(this).ok()`, can still cause the strong count to /// drop to zero and the inner value of the `Arc` to be dropped: - /// For instance if two threads execute this expression in parallel, then + /// For instance if two threads each execute this expression in parallel, then /// there is a race condition. The threads could first both check whether they /// have the last clone of their `Arc` via `Arc::try_unwrap`, and then /// both drop their `Arc` in the call to [`ok`][`Result::ok`], /// taking the strong count from two down to zero. /// - */ /// # Examples /// /// ``` @@ -711,20 +713,13 @@ impl<T> Arc<T> { /// This means in particular that the inner value is not dropped. /// /// The similar expression `Arc::try_unwrap(this).ok()` does not - /// offer such a guarantee. See the last example below. - // - // FIXME: when `Arc::into_inner` is stabilized, add this to end - // of the previous sentence: - /* + /// offer such a guarantee. See the last example below /// and the documentation of [`Arc::try_unwrap`]. - */ /// /// # Examples /// /// Minimal example demonstrating the guarantee that `Arc::into_inner` gives. /// ``` - /// #![feature(arc_into_inner)] - /// /// use std::sync::Arc; /// /// let x = Arc::new(3); @@ -748,8 +743,6 @@ impl<T> Arc<T> { /// /// A more practical example demonstrating the need for `Arc::into_inner`: /// ``` - /// #![feature(arc_into_inner)] - /// /// use std::sync::Arc; /// /// // Definition of a simple singly linked list using `Arc`: @@ -799,13 +792,8 @@ impl<T> Arc<T> { /// x_thread.join().unwrap(); /// y_thread.join().unwrap(); /// ``` - - // FIXME: when `Arc::into_inner` is stabilized, adjust above documentation - // and the documentation of `Arc::try_unwrap` according to the `FIXME`s. Also - // open an issue on rust-lang/rust-clippy, asking for a lint against - // `Arc::try_unwrap(...).ok()`. #[inline] - #[unstable(feature = "arc_into_inner", issue = "106894")] + #[stable(feature = "arc_into_inner", since = "1.70.0")] pub fn into_inner(this: Self) -> Option<T> { // Make sure that the ordinary `Drop` implementation isn’t called as well let mut this = mem::ManuallyDrop::new(this); @@ -1104,6 +1092,9 @@ impl<T: ?Sized> Arc<T> { continue; } + // We can't allow the refcount to increase much past `MAX_REFCOUNT`. + assert!(cur <= MAX_REFCOUNT, "{}", INTERNAL_OVERFLOW_ERROR); + // NOTE: this code currently ignores the possibility of overflow // into usize::MAX; in general both Rc and Arc need to be adjusted // to deal with overflow. @@ -1247,7 +1238,7 @@ impl<T: ?Sized> Arc<T> { #[inline] #[stable(feature = "arc_mutate_strong_count", since = "1.51.0")] pub unsafe fn decrement_strong_count(ptr: *const T) { - unsafe { mem::drop(Arc::from_raw(ptr)) }; + unsafe { drop(Arc::from_raw(ptr)) }; } #[inline] @@ -1410,7 +1401,7 @@ impl<T> Arc<[T]> { /// /// Behavior is undefined should the size be wrong. #[cfg(not(no_global_oom_handling))] - unsafe fn from_iter_exact(iter: impl iter::Iterator<Item = T>, len: usize) -> Arc<[T]> { + unsafe fn from_iter_exact(iter: impl Iterator<Item = T>, len: usize) -> Arc<[T]> { // Panic guard while cloning T elements. // In the event of a panic, elements that have been written // into the new ArcInner will be dropped, then the memory freed. @@ -1519,6 +1510,11 @@ impl<T: ?Sized> Clone for Arc<T> { // the worst already happened and we actually do overflow the `usize` counter. However, that // requires the counter to grow from `isize::MAX` to `usize::MAX` between the increment // above and the `abort` below, which seems exceedingly unlikely. + // + // This is a global invariant, and also applies when using a compare-exchange loop to increment + // counters in other methods. + // Otherwise, the counter could be brought to an almost-overflow using a compare-exchange loop, + // and then overflow using a few `fetch_add`s. if old_size > MAX_REFCOUNT { abort(); } @@ -2180,9 +2176,7 @@ impl<T: ?Sized> Weak<T> { return None; } // See comments in `Arc::clone` for why we do this (for `mem::forget`). - if n > MAX_REFCOUNT { - abort(); - } + assert!(n <= MAX_REFCOUNT, "{}", INTERNAL_OVERFLOW_ERROR); Some(n + 1) }) .ok() @@ -2461,10 +2455,10 @@ impl<T: ?Sized + PartialEq> PartialEq for Arc<T> { /// Inequality for two `Arc`s. /// - /// Two `Arc`s are unequal if their inner values are unequal. + /// Two `Arc`s are not equal if their inner values are not equal. /// /// If `T` also implements `Eq` (implying reflexivity of equality), - /// two `Arc`s that point to the same value are never unequal. + /// two `Arc`s that point to the same value are always equal. /// /// # Examples /// @@ -2821,7 +2815,7 @@ impl<T, const N: usize> TryFrom<Arc<[T]>> for Arc<[T; N]> { #[cfg(not(no_global_oom_handling))] #[stable(feature = "shared_from_iter", since = "1.37.0")] -impl<T> iter::FromIterator<T> for Arc<[T]> { +impl<T> FromIterator<T> for Arc<[T]> { /// Takes each element in the `Iterator` and collects it into an `Arc<[T]>`. /// /// # Performance characteristics @@ -2860,7 +2854,7 @@ impl<T> iter::FromIterator<T> for Arc<[T]> { /// let evens: Arc<[u8]> = (0..10).collect(); // Just a single allocation happens here. /// # assert_eq!(&*evens, &*(0..10).collect::<Vec<_>>()); /// ``` - fn from_iter<I: iter::IntoIterator<Item = T>>(iter: I) -> Self { + fn from_iter<I: IntoIterator<Item = T>>(iter: I) -> Self { ToArcSlice::to_arc_slice(iter.into_iter()) } } |