From 698f8c2f01ea549d77d7dc3338a12e04c11057b9 Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Wed, 17 Apr 2024 14:02:58 +0200 Subject: Adding upstream version 1.64.0+dfsg1. Signed-off-by: Daniel Baumann --- library/alloc/src/sync.rs | 2765 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 2765 insertions(+) create mode 100644 library/alloc/src/sync.rs (limited to 'library/alloc/src/sync.rs') diff --git a/library/alloc/src/sync.rs b/library/alloc/src/sync.rs new file mode 100644 index 000000000..4c03cc3ed --- /dev/null +++ b/library/alloc/src/sync.rs @@ -0,0 +1,2765 @@ +#![stable(feature = "rust1", since = "1.0.0")] + +//! Thread-safe reference-counting pointers. +//! +//! See the [`Arc`][Arc] documentation for more details. + +use core::any::Any; +use core::borrow; +use core::cmp::Ordering; +use core::convert::{From, TryFrom}; +use core::fmt; +use core::hash::{Hash, Hasher}; +use core::hint; +use core::intrinsics::abort; +#[cfg(not(no_global_oom_handling))] +use core::iter; +use core::marker::{PhantomData, Unpin, Unsize}; +#[cfg(not(no_global_oom_handling))] +use core::mem::size_of_val; +use core::mem::{self, align_of_val_raw}; +use core::ops::{CoerceUnsized, Deref, DispatchFromDyn, Receiver}; +use core::panic::{RefUnwindSafe, UnwindSafe}; +use core::pin::Pin; +use core::ptr::{self, NonNull}; +#[cfg(not(no_global_oom_handling))] +use core::slice::from_raw_parts_mut; +use core::sync::atomic; +use core::sync::atomic::Ordering::{Acquire, Relaxed, Release}; + +#[cfg(not(no_global_oom_handling))] +use crate::alloc::handle_alloc_error; +#[cfg(not(no_global_oom_handling))] +use crate::alloc::{box_free, WriteCloneIntoRaw}; +use crate::alloc::{AllocError, Allocator, Global, Layout}; +use crate::borrow::{Cow, ToOwned}; +use crate::boxed::Box; +use crate::rc::is_dangling; +#[cfg(not(no_global_oom_handling))] +use crate::string::String; +#[cfg(not(no_global_oom_handling))] +use crate::vec::Vec; + +#[cfg(test)] +mod tests; + +/// A soft limit on the amount of references that may be made to an `Arc`. +/// +/// Going above this limit will abort your program (although not +/// necessarily) at _exactly_ `MAX_REFCOUNT + 1` references. +const MAX_REFCOUNT: usize = (isize::MAX) as usize; + +#[cfg(not(sanitize = "thread"))] +macro_rules! acquire { + ($x:expr) => { + atomic::fence(Acquire) + }; +} + +// ThreadSanitizer does not support memory fences. To avoid false positive +// reports in Arc / Weak implementation use atomic loads for synchronization +// instead. +#[cfg(sanitize = "thread")] +macro_rules! acquire { + ($x:expr) => { + $x.load(Acquire) + }; +} + +/// A thread-safe reference-counting pointer. 'Arc' stands for 'Atomically +/// Reference Counted'. +/// +/// The type `Arc` provides shared ownership of a value of type `T`, +/// allocated in the heap. Invoking [`clone`][clone] on `Arc` produces +/// a new `Arc` instance, which points to the same allocation on the heap as the +/// source `Arc`, while increasing a reference count. When the last `Arc` +/// pointer to a given allocation is destroyed, the value stored in that allocation (often +/// referred to as "inner value") is also dropped. +/// +/// Shared references in Rust disallow mutation by default, and `Arc` is no +/// exception: you cannot generally obtain a mutable reference to something +/// inside an `Arc`. If you need to mutate through an `Arc`, use +/// [`Mutex`][mutex], [`RwLock`][rwlock], or one of the [`Atomic`][atomic] +/// types. +/// +/// ## Thread Safety +/// +/// Unlike [`Rc`], `Arc` uses atomic operations for its reference +/// counting. This means that it is thread-safe. The disadvantage is that +/// atomic operations are more expensive than ordinary memory accesses. If you +/// are not sharing reference-counted allocations between threads, consider using +/// [`Rc`] for lower overhead. [`Rc`] is a safe default, because the +/// compiler will catch any attempt to send an [`Rc`] between threads. +/// However, a library might choose `Arc` in order to give library consumers +/// more flexibility. +/// +/// `Arc` will implement [`Send`] and [`Sync`] as long as the `T` implements +/// [`Send`] and [`Sync`]. Why can't you put a non-thread-safe type `T` in an +/// `Arc` to make it thread-safe? This may be a bit counter-intuitive at +/// first: after all, isn't the point of `Arc` thread safety? The key is +/// this: `Arc` makes it thread safe to have multiple ownership of the same +/// data, but it doesn't add thread safety to its data. Consider +/// Arc<[RefCell\]>. [`RefCell`] isn't [`Sync`], and if `Arc` was always +/// [`Send`], Arc<[RefCell\]> would be as well. But then we'd have a problem: +/// [`RefCell`] is not thread safe; it keeps track of the borrowing count using +/// non-atomic operations. +/// +/// In the end, this means that you may need to pair `Arc` with some sort of +/// [`std::sync`] type, usually [`Mutex`][mutex]. +/// +/// ## Breaking cycles with `Weak` +/// +/// The [`downgrade`][downgrade] method can be used to create a non-owning +/// [`Weak`] pointer. A [`Weak`] pointer can be [`upgrade`][upgrade]d +/// to an `Arc`, but this will return [`None`] if the value stored in the allocation has +/// already been dropped. In other words, `Weak` pointers do not keep the value +/// inside the allocation alive; however, they *do* keep the allocation +/// (the backing store for the value) alive. +/// +/// A cycle between `Arc` pointers will never be deallocated. For this reason, +/// [`Weak`] is used to break cycles. For example, a tree could have +/// strong `Arc` pointers from parent nodes to children, and [`Weak`] +/// pointers from children back to their parents. +/// +/// # Cloning references +/// +/// Creating a new reference from an existing reference-counted pointer is done using the +/// `Clone` trait implemented for [`Arc`][Arc] and [`Weak`][Weak]. +/// +/// ``` +/// use std::sync::Arc; +/// let foo = Arc::new(vec![1.0, 2.0, 3.0]); +/// // The two syntaxes below are equivalent. +/// let a = foo.clone(); +/// let b = Arc::clone(&foo); +/// // a, b, and foo are all Arcs that point to the same memory location +/// ``` +/// +/// ## `Deref` behavior +/// +/// `Arc` automatically dereferences to `T` (via the [`Deref`][deref] trait), +/// so you can call `T`'s methods on a value of type `Arc`. To avoid name +/// clashes with `T`'s methods, the methods of `Arc` itself are associated +/// functions, called using [fully qualified syntax]: +/// +/// ``` +/// use std::sync::Arc; +/// +/// let my_arc = Arc::new(()); +/// let my_weak = Arc::downgrade(&my_arc); +/// ``` +/// +/// `Arc`'s implementations of traits like `Clone` may also be called using +/// fully qualified syntax. Some people prefer to use fully qualified syntax, +/// while others prefer using method-call syntax. +/// +/// ``` +/// use std::sync::Arc; +/// +/// let arc = Arc::new(()); +/// // Method-call syntax +/// let arc2 = arc.clone(); +/// // Fully qualified syntax +/// let arc3 = Arc::clone(&arc); +/// ``` +/// +/// [`Weak`][Weak] does not auto-dereference to `T`, because the inner value may have +/// already been dropped. +/// +/// [`Rc`]: crate::rc::Rc +/// [clone]: Clone::clone +/// [mutex]: ../../std/sync/struct.Mutex.html +/// [rwlock]: ../../std/sync/struct.RwLock.html +/// [atomic]: core::sync::atomic +/// [`Send`]: core::marker::Send +/// [`Sync`]: core::marker::Sync +/// [deref]: core::ops::Deref +/// [downgrade]: Arc::downgrade +/// [upgrade]: Weak::upgrade +/// [RefCell\]: core::cell::RefCell +/// [`RefCell`]: core::cell::RefCell +/// [`std::sync`]: ../../std/sync/index.html +/// [`Arc::clone(&from)`]: Arc::clone +/// [fully qualified syntax]: https://doc.rust-lang.org/book/ch19-03-advanced-traits.html#fully-qualified-syntax-for-disambiguation-calling-methods-with-the-same-name +/// +/// # Examples +/// +/// Sharing some immutable data between threads: +/// +// Note that we **do not** run these tests here. The windows builders get super +// unhappy if a thread outlives the main thread and then exits at the same time +// (something deadlocks) so we just avoid this entirely by not running these +// tests. +/// ```no_run +/// use std::sync::Arc; +/// use std::thread; +/// +/// let five = Arc::new(5); +/// +/// for _ in 0..10 { +/// let five = Arc::clone(&five); +/// +/// thread::spawn(move || { +/// println!("{five:?}"); +/// }); +/// } +/// ``` +/// +/// Sharing a mutable [`AtomicUsize`]: +/// +/// [`AtomicUsize`]: core::sync::atomic::AtomicUsize "sync::atomic::AtomicUsize" +/// +/// ```no_run +/// use std::sync::Arc; +/// use std::sync::atomic::{AtomicUsize, Ordering}; +/// use std::thread; +/// +/// let val = Arc::new(AtomicUsize::new(5)); +/// +/// for _ in 0..10 { +/// let val = Arc::clone(&val); +/// +/// thread::spawn(move || { +/// let v = val.fetch_add(1, Ordering::SeqCst); +/// println!("{v:?}"); +/// }); +/// } +/// ``` +/// +/// See the [`rc` documentation][rc_examples] for more examples of reference +/// counting in general. +/// +/// [rc_examples]: crate::rc#examples +#[cfg_attr(not(test), rustc_diagnostic_item = "Arc")] +#[stable(feature = "rust1", since = "1.0.0")] +pub struct Arc { + ptr: NonNull>, + phantom: PhantomData>, +} + +#[stable(feature = "rust1", since = "1.0.0")] +unsafe impl Send for Arc {} +#[stable(feature = "rust1", since = "1.0.0")] +unsafe impl Sync for Arc {} + +#[stable(feature = "catch_unwind", since = "1.9.0")] +impl UnwindSafe for Arc {} + +#[unstable(feature = "coerce_unsized", issue = "27732")] +impl, U: ?Sized> CoerceUnsized> for Arc {} + +#[unstable(feature = "dispatch_from_dyn", issue = "none")] +impl, U: ?Sized> DispatchFromDyn> for Arc {} + +impl Arc { + unsafe fn from_inner(ptr: NonNull>) -> Self { + Self { ptr, phantom: PhantomData } + } + + unsafe fn from_ptr(ptr: *mut ArcInner) -> Self { + unsafe { Self::from_inner(NonNull::new_unchecked(ptr)) } + } +} + +/// `Weak` is a version of [`Arc`] that holds a non-owning reference to the +/// managed allocation. The allocation is accessed by calling [`upgrade`] on the `Weak` +/// pointer, which returns an [Option]<[Arc]\>. +/// +/// Since a `Weak` reference does not count towards ownership, it will not +/// prevent the value stored in the allocation from being dropped, and `Weak` itself makes no +/// guarantees about the value still being present. Thus it may return [`None`] +/// when [`upgrade`]d. Note however that a `Weak` reference *does* prevent the allocation +/// itself (the backing store) from being deallocated. +/// +/// A `Weak` pointer is useful for keeping a temporary reference to the allocation +/// managed by [`Arc`] without preventing its inner value from being dropped. It is also used to +/// prevent circular references between [`Arc`] pointers, since mutual owning references +/// would never allow either [`Arc`] to be dropped. For example, a tree could +/// have strong [`Arc`] pointers from parent nodes to children, and `Weak` +/// pointers from children back to their parents. +/// +/// The typical way to obtain a `Weak` pointer is to call [`Arc::downgrade`]. +/// +/// [`upgrade`]: Weak::upgrade +#[stable(feature = "arc_weak", since = "1.4.0")] +pub struct Weak { + // This is a `NonNull` to allow optimizing the size of this type in enums, + // but it is not necessarily a valid pointer. + // `Weak::new` sets this to `usize::MAX` so that it doesn’t need + // to allocate space on the heap. That's not a value a real pointer + // will ever have because RcBox has alignment at least 2. + // This is only possible when `T: Sized`; unsized `T` never dangle. + ptr: NonNull>, +} + +#[stable(feature = "arc_weak", since = "1.4.0")] +unsafe impl Send for Weak {} +#[stable(feature = "arc_weak", since = "1.4.0")] +unsafe impl Sync for Weak {} + +#[unstable(feature = "coerce_unsized", issue = "27732")] +impl, U: ?Sized> CoerceUnsized> for Weak {} +#[unstable(feature = "dispatch_from_dyn", issue = "none")] +impl, U: ?Sized> DispatchFromDyn> for Weak {} + +#[stable(feature = "arc_weak", since = "1.4.0")] +impl fmt::Debug for Weak { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "(Weak)") + } +} + +// This is repr(C) to future-proof against possible field-reordering, which +// would interfere with otherwise safe [into|from]_raw() of transmutable +// inner types. +#[repr(C)] +struct ArcInner { + strong: atomic::AtomicUsize, + + // the value usize::MAX acts as a sentinel for temporarily "locking" the + // ability to upgrade weak pointers or downgrade strong ones; this is used + // to avoid races in `make_mut` and `get_mut`. + weak: atomic::AtomicUsize, + + data: T, +} + +unsafe impl Send for ArcInner {} +unsafe impl Sync for ArcInner {} + +impl Arc { + /// Constructs a new `Arc`. + /// + /// # Examples + /// + /// ``` + /// use std::sync::Arc; + /// + /// let five = Arc::new(5); + /// ``` + #[cfg(not(no_global_oom_handling))] + #[inline] + #[stable(feature = "rust1", since = "1.0.0")] + pub fn new(data: T) -> Arc { + // Start the weak pointer count as 1 which is the weak pointer that's + // held by all the strong pointers (kinda), see std/rc.rs for more info + let x: Box<_> = Box::new(ArcInner { + strong: atomic::AtomicUsize::new(1), + weak: atomic::AtomicUsize::new(1), + data, + }); + unsafe { Self::from_inner(Box::leak(x).into()) } + } + + /// Constructs a new `Arc` while giving you a `Weak` to the allocation, + /// to allow you to construct a `T` which holds a weak pointer to itself. + /// + /// Generally, a structure circularly referencing itself, either directly or + /// indirectly, should not hold a strong reference to itself to prevent a memory leak. + /// Using this function, you get access to the weak pointer during the + /// initialization of `T`, before the `Arc` is created, such that you can + /// clone and store it inside the `T`. + /// + /// `new_cyclic` first allocates the managed allocation for the `Arc`, + /// then calls your closure, giving it a `Weak` to this allocation, + /// and only afterwards completes the construction of the `Arc` by placing + /// the `T` returned from your closure into the allocation. + /// + /// Since the new `Arc` is not fully-constructed until `Arc::new_cyclic` + /// returns, calling [`upgrade`] on the weak reference inside your closure will + /// fail and result in a `None` value. + /// + /// # Panics + /// + /// If `data_fn` panics, the panic is propagated to the caller, and the + /// temporary [`Weak`] is dropped normally. + /// + /// # Example + /// + /// ``` + /// # #![allow(dead_code)] + /// use std::sync::{Arc, Weak}; + /// + /// struct Gadget { + /// me: Weak, + /// } + /// + /// impl Gadget { + /// /// Construct a reference counted Gadget. + /// fn new() -> Arc { + /// // `me` is a `Weak` pointing at the new allocation of the + /// // `Arc` we're constructing. + /// Arc::new_cyclic(|me| { + /// // Create the actual struct here. + /// Gadget { me: me.clone() } + /// }) + /// } + /// + /// /// Return a reference counted pointer to Self. + /// fn me(&self) -> Arc { + /// self.me.upgrade().unwrap() + /// } + /// } + /// ``` + /// [`upgrade`]: Weak::upgrade + #[cfg(not(no_global_oom_handling))] + #[inline] + #[stable(feature = "arc_new_cyclic", since = "1.60.0")] + pub fn new_cyclic(data_fn: F) -> Arc + where + F: FnOnce(&Weak) -> T, + { + // Construct the inner in the "uninitialized" state with a single + // weak reference. + let uninit_ptr: NonNull<_> = Box::leak(Box::new(ArcInner { + strong: atomic::AtomicUsize::new(0), + weak: atomic::AtomicUsize::new(1), + data: mem::MaybeUninit::::uninit(), + })) + .into(); + let init_ptr: NonNull> = uninit_ptr.cast(); + + let weak = Weak { ptr: init_ptr }; + + // It's important we don't give up ownership of the weak pointer, or + // else the memory might be freed by the time `data_fn` returns. If + // we really wanted to pass ownership, we could create an additional + // weak pointer for ourselves, but this would result in additional + // updates to the weak reference count which might not be necessary + // otherwise. + let data = data_fn(&weak); + + // Now we can properly initialize the inner value and turn our weak + // reference into a strong reference. + let strong = unsafe { + let inner = init_ptr.as_ptr(); + ptr::write(ptr::addr_of_mut!((*inner).data), data); + + // The above write to the data field must be visible to any threads which + // observe a non-zero strong count. Therefore we need at least "Release" ordering + // in order to synchronize with the `compare_exchange_weak` in `Weak::upgrade`. + // + // "Acquire" ordering is not required. When considering the possible behaviours + // of `data_fn` we only need to look at what it could do with a reference to a + // non-upgradeable `Weak`: + // - It can *clone* the `Weak`, increasing the weak reference count. + // - It can drop those clones, decreasing the weak reference count (but never to zero). + // + // These side effects do not impact us in any way, and no other side effects are + // possible with safe code alone. + let prev_value = (*inner).strong.fetch_add(1, Release); + debug_assert_eq!(prev_value, 0, "No prior strong references should exist"); + + Arc::from_inner(init_ptr) + }; + + // Strong references should collectively own a shared weak reference, + // so don't run the destructor for our old weak reference. + mem::forget(weak); + strong + } + + /// Constructs a new `Arc` with uninitialized contents. + /// + /// # Examples + /// + /// ``` + /// #![feature(new_uninit)] + /// #![feature(get_mut_unchecked)] + /// + /// use std::sync::Arc; + /// + /// let mut five = Arc::::new_uninit(); + /// + /// // Deferred initialization: + /// Arc::get_mut(&mut five).unwrap().write(5); + /// + /// let five = unsafe { five.assume_init() }; + /// + /// assert_eq!(*five, 5) + /// ``` + #[cfg(not(no_global_oom_handling))] + #[unstable(feature = "new_uninit", issue = "63291")] + #[must_use] + pub fn new_uninit() -> Arc> { + unsafe { + Arc::from_ptr(Arc::allocate_for_layout( + Layout::new::(), + |layout| Global.allocate(layout), + |mem| mem as *mut ArcInner>, + )) + } + } + + /// Constructs a new `Arc` with uninitialized contents, with the memory + /// being filled with `0` bytes. + /// + /// See [`MaybeUninit::zeroed`][zeroed] for examples of correct and incorrect usage + /// of this method. + /// + /// # Examples + /// + /// ``` + /// #![feature(new_uninit)] + /// + /// use std::sync::Arc; + /// + /// let zero = Arc::::new_zeroed(); + /// let zero = unsafe { zero.assume_init() }; + /// + /// assert_eq!(*zero, 0) + /// ``` + /// + /// [zeroed]: mem::MaybeUninit::zeroed + #[cfg(not(no_global_oom_handling))] + #[unstable(feature = "new_uninit", issue = "63291")] + #[must_use] + pub fn new_zeroed() -> Arc> { + unsafe { + Arc::from_ptr(Arc::allocate_for_layout( + Layout::new::(), + |layout| Global.allocate_zeroed(layout), + |mem| mem as *mut ArcInner>, + )) + } + } + + /// Constructs a new `Pin>`. If `T` does not implement `Unpin`, then + /// `data` will be pinned in memory and unable to be moved. + #[cfg(not(no_global_oom_handling))] + #[stable(feature = "pin", since = "1.33.0")] + #[must_use] + pub fn pin(data: T) -> Pin> { + unsafe { Pin::new_unchecked(Arc::new(data)) } + } + + /// Constructs a new `Pin>`, return an error if allocation fails. + #[unstable(feature = "allocator_api", issue = "32838")] + #[inline] + pub fn try_pin(data: T) -> Result>, AllocError> { + unsafe { Ok(Pin::new_unchecked(Arc::try_new(data)?)) } + } + + /// Constructs a new `Arc`, returning an error if allocation fails. + /// + /// # Examples + /// + /// ``` + /// #![feature(allocator_api)] + /// use std::sync::Arc; + /// + /// let five = Arc::try_new(5)?; + /// # Ok::<(), std::alloc::AllocError>(()) + /// ``` + #[unstable(feature = "allocator_api", issue = "32838")] + #[inline] + pub fn try_new(data: T) -> Result, AllocError> { + // Start the weak pointer count as 1 which is the weak pointer that's + // held by all the strong pointers (kinda), see std/rc.rs for more info + let x: Box<_> = Box::try_new(ArcInner { + strong: atomic::AtomicUsize::new(1), + weak: atomic::AtomicUsize::new(1), + data, + })?; + unsafe { Ok(Self::from_inner(Box::leak(x).into())) } + } + + /// Constructs a new `Arc` with uninitialized contents, returning an error + /// if allocation fails. + /// + /// # Examples + /// + /// ``` + /// #![feature(new_uninit, allocator_api)] + /// #![feature(get_mut_unchecked)] + /// + /// use std::sync::Arc; + /// + /// let mut five = Arc::::try_new_uninit()?; + /// + /// // Deferred initialization: + /// Arc::get_mut(&mut five).unwrap().write(5); + /// + /// let five = unsafe { five.assume_init() }; + /// + /// assert_eq!(*five, 5); + /// # Ok::<(), std::alloc::AllocError>(()) + /// ``` + #[unstable(feature = "allocator_api", issue = "32838")] + // #[unstable(feature = "new_uninit", issue = "63291")] + pub fn try_new_uninit() -> Result>, AllocError> { + unsafe { + Ok(Arc::from_ptr(Arc::try_allocate_for_layout( + Layout::new::(), + |layout| Global.allocate(layout), + |mem| mem as *mut ArcInner>, + )?)) + } + } + + /// Constructs a new `Arc` with uninitialized contents, with the memory + /// being filled with `0` bytes, returning an error if allocation fails. + /// + /// See [`MaybeUninit::zeroed`][zeroed] for examples of correct and incorrect usage + /// of this method. + /// + /// # Examples + /// + /// ``` + /// #![feature(new_uninit, allocator_api)] + /// + /// use std::sync::Arc; + /// + /// let zero = Arc::::try_new_zeroed()?; + /// let zero = unsafe { zero.assume_init() }; + /// + /// assert_eq!(*zero, 0); + /// # Ok::<(), std::alloc::AllocError>(()) + /// ``` + /// + /// [zeroed]: mem::MaybeUninit::zeroed + #[unstable(feature = "allocator_api", issue = "32838")] + // #[unstable(feature = "new_uninit", issue = "63291")] + pub fn try_new_zeroed() -> Result>, AllocError> { + unsafe { + Ok(Arc::from_ptr(Arc::try_allocate_for_layout( + Layout::new::(), + |layout| Global.allocate_zeroed(layout), + |mem| mem as *mut ArcInner>, + )?)) + } + } + /// Returns the inner value, if the `Arc` has exactly one strong reference. + /// + /// Otherwise, an [`Err`] is returned with the same `Arc` that was + /// passed in. + /// + /// This will succeed even if there are outstanding weak references. + /// + /// # Examples + /// + /// ``` + /// use std::sync::Arc; + /// + /// let x = Arc::new(3); + /// assert_eq!(Arc::try_unwrap(x), Ok(3)); + /// + /// let x = Arc::new(4); + /// let _y = Arc::clone(&x); + /// assert_eq!(*Arc::try_unwrap(x).unwrap_err(), 4); + /// ``` + #[inline] + #[stable(feature = "arc_unique", since = "1.4.0")] + pub fn try_unwrap(this: Self) -> Result { + if this.inner().strong.compare_exchange(1, 0, Relaxed, Relaxed).is_err() { + return Err(this); + } + + acquire!(this.inner().strong); + + unsafe { + let elem = ptr::read(&this.ptr.as_ref().data); + + // Make a weak pointer to clean up the implicit strong-weak reference + let _weak = Weak { ptr: this.ptr }; + mem::forget(this); + + Ok(elem) + } + } +} + +impl Arc<[T]> { + /// Constructs a new atomically reference-counted slice with uninitialized contents. + /// + /// # Examples + /// + /// ``` + /// #![feature(new_uninit)] + /// #![feature(get_mut_unchecked)] + /// + /// use std::sync::Arc; + /// + /// let mut values = Arc::<[u32]>::new_uninit_slice(3); + /// + /// // Deferred initialization: + /// let data = Arc::get_mut(&mut values).unwrap(); + /// data[0].write(1); + /// data[1].write(2); + /// data[2].write(3); + /// + /// let values = unsafe { values.assume_init() }; + /// + /// assert_eq!(*values, [1, 2, 3]) + /// ``` + #[cfg(not(no_global_oom_handling))] + #[unstable(feature = "new_uninit", issue = "63291")] + #[must_use] + pub fn new_uninit_slice(len: usize) -> Arc<[mem::MaybeUninit]> { + unsafe { Arc::from_ptr(Arc::allocate_for_slice(len)) } + } + + /// Constructs a new atomically reference-counted slice with uninitialized contents, with the memory being + /// filled with `0` bytes. + /// + /// See [`MaybeUninit::zeroed`][zeroed] for examples of correct and + /// incorrect usage of this method. + /// + /// # Examples + /// + /// ``` + /// #![feature(new_uninit)] + /// + /// use std::sync::Arc; + /// + /// let values = Arc::<[u32]>::new_zeroed_slice(3); + /// let values = unsafe { values.assume_init() }; + /// + /// assert_eq!(*values, [0, 0, 0]) + /// ``` + /// + /// [zeroed]: mem::MaybeUninit::zeroed + #[cfg(not(no_global_oom_handling))] + #[unstable(feature = "new_uninit", issue = "63291")] + #[must_use] + pub fn new_zeroed_slice(len: usize) -> Arc<[mem::MaybeUninit]> { + unsafe { + Arc::from_ptr(Arc::allocate_for_layout( + Layout::array::(len).unwrap(), + |layout| Global.allocate_zeroed(layout), + |mem| { + ptr::slice_from_raw_parts_mut(mem as *mut T, len) + as *mut ArcInner<[mem::MaybeUninit]> + }, + )) + } + } +} + +impl Arc> { + /// Converts to `Arc`. + /// + /// # Safety + /// + /// As with [`MaybeUninit::assume_init`], + /// it is up to the caller to guarantee that the inner value + /// really is in an initialized state. + /// Calling this when the content is not yet fully initialized + /// causes immediate undefined behavior. + /// + /// [`MaybeUninit::assume_init`]: mem::MaybeUninit::assume_init + /// + /// # Examples + /// + /// ``` + /// #![feature(new_uninit)] + /// #![feature(get_mut_unchecked)] + /// + /// use std::sync::Arc; + /// + /// let mut five = Arc::::new_uninit(); + /// + /// // Deferred initialization: + /// Arc::get_mut(&mut five).unwrap().write(5); + /// + /// let five = unsafe { five.assume_init() }; + /// + /// assert_eq!(*five, 5) + /// ``` + #[unstable(feature = "new_uninit", issue = "63291")] + #[must_use = "`self` will be dropped if the result is not used"] + #[inline] + pub unsafe fn assume_init(self) -> Arc { + unsafe { Arc::from_inner(mem::ManuallyDrop::new(self).ptr.cast()) } + } +} + +impl Arc<[mem::MaybeUninit]> { + /// Converts to `Arc<[T]>`. + /// + /// # Safety + /// + /// As with [`MaybeUninit::assume_init`], + /// it is up to the caller to guarantee that the inner value + /// really is in an initialized state. + /// Calling this when the content is not yet fully initialized + /// causes immediate undefined behavior. + /// + /// [`MaybeUninit::assume_init`]: mem::MaybeUninit::assume_init + /// + /// # Examples + /// + /// ``` + /// #![feature(new_uninit)] + /// #![feature(get_mut_unchecked)] + /// + /// use std::sync::Arc; + /// + /// let mut values = Arc::<[u32]>::new_uninit_slice(3); + /// + /// // Deferred initialization: + /// let data = Arc::get_mut(&mut values).unwrap(); + /// data[0].write(1); + /// data[1].write(2); + /// data[2].write(3); + /// + /// let values = unsafe { values.assume_init() }; + /// + /// assert_eq!(*values, [1, 2, 3]) + /// ``` + #[unstable(feature = "new_uninit", issue = "63291")] + #[must_use = "`self` will be dropped if the result is not used"] + #[inline] + pub unsafe fn assume_init(self) -> Arc<[T]> { + unsafe { Arc::from_ptr(mem::ManuallyDrop::new(self).ptr.as_ptr() as _) } + } +} + +impl Arc { + /// Consumes the `Arc`, returning the wrapped pointer. + /// + /// To avoid a memory leak the pointer must be converted back to an `Arc` using + /// [`Arc::from_raw`]. + /// + /// # Examples + /// + /// ``` + /// use std::sync::Arc; + /// + /// let x = Arc::new("hello".to_owned()); + /// let x_ptr = Arc::into_raw(x); + /// assert_eq!(unsafe { &*x_ptr }, "hello"); + /// ``` + #[must_use = "losing the pointer will leak memory"] + #[stable(feature = "rc_raw", since = "1.17.0")] + pub fn into_raw(this: Self) -> *const T { + let ptr = Self::as_ptr(&this); + mem::forget(this); + ptr + } + + /// Provides a raw pointer to the data. + /// + /// The counts are not affected in any way and the `Arc` is not consumed. The pointer is valid for + /// as long as there are strong counts in the `Arc`. + /// + /// # Examples + /// + /// ``` + /// use std::sync::Arc; + /// + /// let x = Arc::new("hello".to_owned()); + /// let y = Arc::clone(&x); + /// let x_ptr = Arc::as_ptr(&x); + /// assert_eq!(x_ptr, Arc::as_ptr(&y)); + /// assert_eq!(unsafe { &*x_ptr }, "hello"); + /// ``` + #[must_use] + #[stable(feature = "rc_as_ptr", since = "1.45.0")] + pub fn as_ptr(this: &Self) -> *const T { + let ptr: *mut ArcInner = NonNull::as_ptr(this.ptr); + + // SAFETY: This cannot go through Deref::deref or RcBoxPtr::inner because + // this is required to retain raw/mut provenance such that e.g. `get_mut` can + // write through the pointer after the Rc is recovered through `from_raw`. + unsafe { ptr::addr_of_mut!((*ptr).data) } + } + + /// Constructs an `Arc` from a raw pointer. + /// + /// The raw pointer must have been previously returned by a call to + /// [`Arc::into_raw`][into_raw] where `U` must have the same size and + /// alignment as `T`. This is trivially true if `U` is `T`. + /// Note that if `U` is not `T` but has the same size and alignment, this is + /// basically like transmuting references of different types. See + /// [`mem::transmute`][transmute] for more information on what + /// restrictions apply in this case. + /// + /// The user of `from_raw` has to make sure a specific value of `T` is only + /// dropped once. + /// + /// This function is unsafe because improper use may lead to memory unsafety, + /// even if the returned `Arc` is never accessed. + /// + /// [into_raw]: Arc::into_raw + /// [transmute]: core::mem::transmute + /// + /// # Examples + /// + /// ``` + /// use std::sync::Arc; + /// + /// let x = Arc::new("hello".to_owned()); + /// let x_ptr = Arc::into_raw(x); + /// + /// unsafe { + /// // Convert back to an `Arc` to prevent leak. + /// let x = Arc::from_raw(x_ptr); + /// assert_eq!(&*x, "hello"); + /// + /// // Further calls to `Arc::from_raw(x_ptr)` would be memory-unsafe. + /// } + /// + /// // The memory was freed when `x` went out of scope above, so `x_ptr` is now dangling! + /// ``` + #[stable(feature = "rc_raw", since = "1.17.0")] + pub unsafe fn from_raw(ptr: *const T) -> Self { + unsafe { + let offset = data_offset(ptr); + + // Reverse the offset to find the original ArcInner. + let arc_ptr = ptr.byte_sub(offset) as *mut ArcInner; + + Self::from_ptr(arc_ptr) + } + } + + /// Creates a new [`Weak`] pointer to this allocation. + /// + /// # Examples + /// + /// ``` + /// use std::sync::Arc; + /// + /// let five = Arc::new(5); + /// + /// let weak_five = Arc::downgrade(&five); + /// ``` + #[must_use = "this returns a new `Weak` pointer, \ + without modifying the original `Arc`"] + #[stable(feature = "arc_weak", since = "1.4.0")] + pub fn downgrade(this: &Self) -> Weak { + // This Relaxed is OK because we're checking the value in the CAS + // below. + let mut cur = this.inner().weak.load(Relaxed); + + loop { + // check if the weak counter is currently "locked"; if so, spin. + if cur == usize::MAX { + hint::spin_loop(); + cur = this.inner().weak.load(Relaxed); + continue; + } + + // NOTE: this code currently ignores the possibility of overflow + // into usize::MAX; in general both Rc and Arc need to be adjusted + // to deal with overflow. + + // Unlike with Clone(), we need this to be an Acquire read to + // synchronize with the write coming from `is_unique`, so that the + // events prior to that write happen before this read. + match this.inner().weak.compare_exchange_weak(cur, cur + 1, Acquire, Relaxed) { + Ok(_) => { + // Make sure we do not create a dangling Weak + debug_assert!(!is_dangling(this.ptr.as_ptr())); + return Weak { ptr: this.ptr }; + } + Err(old) => cur = old, + } + } + } + + /// Gets the number of [`Weak`] pointers to this allocation. + /// + /// # Safety + /// + /// This method by itself is safe, but using it correctly requires extra care. + /// Another thread can change the weak count at any time, + /// including potentially between calling this method and acting on the result. + /// + /// # Examples + /// + /// ``` + /// use std::sync::Arc; + /// + /// let five = Arc::new(5); + /// let _weak_five = Arc::downgrade(&five); + /// + /// // This assertion is deterministic because we haven't shared + /// // the `Arc` or `Weak` between threads. + /// assert_eq!(1, Arc::weak_count(&five)); + /// ``` + #[inline] + #[must_use] + #[stable(feature = "arc_counts", since = "1.15.0")] + pub fn weak_count(this: &Self) -> usize { + let cnt = this.inner().weak.load(Acquire); + // If the weak count is currently locked, the value of the + // count was 0 just before taking the lock. + if cnt == usize::MAX { 0 } else { cnt - 1 } + } + + /// Gets the number of strong (`Arc`) pointers to this allocation. + /// + /// # Safety + /// + /// This method by itself is safe, but using it correctly requires extra care. + /// Another thread can change the strong count at any time, + /// including potentially between calling this method and acting on the result. + /// + /// # Examples + /// + /// ``` + /// use std::sync::Arc; + /// + /// let five = Arc::new(5); + /// let _also_five = Arc::clone(&five); + /// + /// // This assertion is deterministic because we haven't shared + /// // the `Arc` between threads. + /// assert_eq!(2, Arc::strong_count(&five)); + /// ``` + #[inline] + #[must_use] + #[stable(feature = "arc_counts", since = "1.15.0")] + pub fn strong_count(this: &Self) -> usize { + this.inner().strong.load(Acquire) + } + + /// Increments the strong reference count on the `Arc` associated with the + /// provided pointer by one. + /// + /// # Safety + /// + /// The pointer must have been obtained through `Arc::into_raw`, and the + /// associated `Arc` instance must be valid (i.e. the strong count must be at + /// least 1) for the duration of this method. + /// + /// # Examples + /// + /// ``` + /// use std::sync::Arc; + /// + /// let five = Arc::new(5); + /// + /// unsafe { + /// let ptr = Arc::into_raw(five); + /// Arc::increment_strong_count(ptr); + /// + /// // This assertion is deterministic because we haven't shared + /// // the `Arc` between threads. + /// let five = Arc::from_raw(ptr); + /// assert_eq!(2, Arc::strong_count(&five)); + /// } + /// ``` + #[inline] + #[stable(feature = "arc_mutate_strong_count", since = "1.51.0")] + pub unsafe fn increment_strong_count(ptr: *const T) { + // Retain Arc, but don't touch refcount by wrapping in ManuallyDrop + let arc = unsafe { mem::ManuallyDrop::new(Arc::::from_raw(ptr)) }; + // Now increase refcount, but don't drop new refcount either + let _arc_clone: mem::ManuallyDrop<_> = arc.clone(); + } + + /// Decrements the strong reference count on the `Arc` associated with the + /// provided pointer by one. + /// + /// # Safety + /// + /// The pointer must have been obtained through `Arc::into_raw`, and the + /// associated `Arc` instance must be valid (i.e. the strong count must be at + /// least 1) when invoking this method. This method can be used to release the final + /// `Arc` and backing storage, but **should not** be called after the final `Arc` has been + /// released. + /// + /// # Examples + /// + /// ``` + /// use std::sync::Arc; + /// + /// let five = Arc::new(5); + /// + /// unsafe { + /// let ptr = Arc::into_raw(five); + /// Arc::increment_strong_count(ptr); + /// + /// // Those assertions are deterministic because we haven't shared + /// // the `Arc` between threads. + /// let five = Arc::from_raw(ptr); + /// assert_eq!(2, Arc::strong_count(&five)); + /// Arc::decrement_strong_count(ptr); + /// assert_eq!(1, Arc::strong_count(&five)); + /// } + /// ``` + #[inline] + #[stable(feature = "arc_mutate_strong_count", since = "1.51.0")] + pub unsafe fn decrement_strong_count(ptr: *const T) { + unsafe { mem::drop(Arc::from_raw(ptr)) }; + } + + #[inline] + fn inner(&self) -> &ArcInner { + // This unsafety is ok because while this arc is alive we're guaranteed + // that the inner pointer is valid. Furthermore, we know that the + // `ArcInner` structure itself is `Sync` because the inner data is + // `Sync` as well, so we're ok loaning out an immutable pointer to these + // contents. + unsafe { self.ptr.as_ref() } + } + + // Non-inlined part of `drop`. + #[inline(never)] + unsafe fn drop_slow(&mut self) { + // Destroy the data at this time, even though we must not free the box + // allocation itself (there might still be weak pointers lying around). + unsafe { ptr::drop_in_place(Self::get_mut_unchecked(self)) }; + + // Drop the weak ref collectively held by all strong references + drop(Weak { ptr: self.ptr }); + } + + /// Returns `true` if the two `Arc`s point to the same allocation + /// (in a vein similar to [`ptr::eq`]). + /// + /// # Examples + /// + /// ``` + /// use std::sync::Arc; + /// + /// let five = Arc::new(5); + /// let same_five = Arc::clone(&five); + /// let other_five = Arc::new(5); + /// + /// assert!(Arc::ptr_eq(&five, &same_five)); + /// assert!(!Arc::ptr_eq(&five, &other_five)); + /// ``` + /// + /// [`ptr::eq`]: core::ptr::eq "ptr::eq" + #[inline] + #[must_use] + #[stable(feature = "ptr_eq", since = "1.17.0")] + pub fn ptr_eq(this: &Self, other: &Self) -> bool { + this.ptr.as_ptr() == other.ptr.as_ptr() + } +} + +impl Arc { + /// Allocates an `ArcInner` with sufficient space for + /// a possibly-unsized inner value where the value has the layout provided. + /// + /// The function `mem_to_arcinner` is called with the data pointer + /// and must return back a (potentially fat)-pointer for the `ArcInner`. + #[cfg(not(no_global_oom_handling))] + unsafe fn allocate_for_layout( + value_layout: Layout, + allocate: impl FnOnce(Layout) -> Result, AllocError>, + mem_to_arcinner: impl FnOnce(*mut u8) -> *mut ArcInner, + ) -> *mut ArcInner { + // Calculate layout using the given value layout. + // Previously, layout was calculated on the expression + // `&*(ptr as *const ArcInner)`, but this created a misaligned + // reference (see #54908). + let layout = Layout::new::>().extend(value_layout).unwrap().0.pad_to_align(); + unsafe { + Arc::try_allocate_for_layout(value_layout, allocate, mem_to_arcinner) + .unwrap_or_else(|_| handle_alloc_error(layout)) + } + } + + /// Allocates an `ArcInner` with sufficient space for + /// a possibly-unsized inner value where the value has the layout provided, + /// returning an error if allocation fails. + /// + /// The function `mem_to_arcinner` is called with the data pointer + /// and must return back a (potentially fat)-pointer for the `ArcInner`. + unsafe fn try_allocate_for_layout( + value_layout: Layout, + allocate: impl FnOnce(Layout) -> Result, AllocError>, + mem_to_arcinner: impl FnOnce(*mut u8) -> *mut ArcInner, + ) -> Result<*mut ArcInner, AllocError> { + // Calculate layout using the given value layout. + // Previously, layout was calculated on the expression + // `&*(ptr as *const ArcInner)`, but this created a misaligned + // reference (see #54908). + let layout = Layout::new::>().extend(value_layout).unwrap().0.pad_to_align(); + + let ptr = allocate(layout)?; + + // Initialize the ArcInner + let inner = mem_to_arcinner(ptr.as_non_null_ptr().as_ptr()); + debug_assert_eq!(unsafe { Layout::for_value(&*inner) }, layout); + + unsafe { + ptr::write(&mut (*inner).strong, atomic::AtomicUsize::new(1)); + ptr::write(&mut (*inner).weak, atomic::AtomicUsize::new(1)); + } + + Ok(inner) + } + + /// Allocates an `ArcInner` with sufficient space for an unsized inner value. + #[cfg(not(no_global_oom_handling))] + unsafe fn allocate_for_ptr(ptr: *const T) -> *mut ArcInner { + // Allocate for the `ArcInner` using the given value. + unsafe { + Self::allocate_for_layout( + Layout::for_value(&*ptr), + |layout| Global.allocate(layout), + |mem| mem.with_metadata_of(ptr as *mut ArcInner), + ) + } + } + + #[cfg(not(no_global_oom_handling))] + fn from_box(v: Box) -> Arc { + unsafe { + let (box_unique, alloc) = Box::into_unique(v); + let bptr = box_unique.as_ptr(); + + let value_size = size_of_val(&*bptr); + let ptr = Self::allocate_for_ptr(bptr); + + // Copy value as bytes + ptr::copy_nonoverlapping( + bptr as *const T as *const u8, + &mut (*ptr).data as *mut _ as *mut u8, + value_size, + ); + + // Free the allocation without dropping its contents + box_free(box_unique, alloc); + + Self::from_ptr(ptr) + } + } +} + +impl Arc<[T]> { + /// Allocates an `ArcInner<[T]>` with the given length. + #[cfg(not(no_global_oom_handling))] + unsafe fn allocate_for_slice(len: usize) -> *mut ArcInner<[T]> { + unsafe { + Self::allocate_for_layout( + Layout::array::(len).unwrap(), + |layout| Global.allocate(layout), + |mem| ptr::slice_from_raw_parts_mut(mem as *mut T, len) as *mut ArcInner<[T]>, + ) + } + } + + /// Copy elements from slice into newly allocated Arc<\[T\]> + /// + /// Unsafe because the caller must either take ownership or bind `T: Copy`. + #[cfg(not(no_global_oom_handling))] + unsafe fn copy_from_slice(v: &[T]) -> Arc<[T]> { + unsafe { + let ptr = Self::allocate_for_slice(v.len()); + + ptr::copy_nonoverlapping(v.as_ptr(), &mut (*ptr).data as *mut [T] as *mut T, v.len()); + + Self::from_ptr(ptr) + } + } + + /// Constructs an `Arc<[T]>` from an iterator known to be of a certain size. + /// + /// Behavior is undefined should the size be wrong. + #[cfg(not(no_global_oom_handling))] + unsafe fn from_iter_exact(iter: impl iter::Iterator, len: usize) -> Arc<[T]> { + // Panic guard while cloning T elements. + // In the event of a panic, elements that have been written + // into the new ArcInner will be dropped, then the memory freed. + struct Guard { + mem: NonNull, + elems: *mut T, + layout: Layout, + n_elems: usize, + } + + impl Drop for Guard { + fn drop(&mut self) { + unsafe { + let slice = from_raw_parts_mut(self.elems, self.n_elems); + ptr::drop_in_place(slice); + + Global.deallocate(self.mem, self.layout); + } + } + } + + unsafe { + let ptr = Self::allocate_for_slice(len); + + let mem = ptr as *mut _ as *mut u8; + let layout = Layout::for_value(&*ptr); + + // Pointer to first element + let elems = &mut (*ptr).data as *mut [T] as *mut T; + + let mut guard = Guard { mem: NonNull::new_unchecked(mem), elems, layout, n_elems: 0 }; + + for (i, item) in iter.enumerate() { + ptr::write(elems.add(i), item); + guard.n_elems += 1; + } + + // All clear. Forget the guard so it doesn't free the new ArcInner. + mem::forget(guard); + + Self::from_ptr(ptr) + } + } +} + +/// Specialization trait used for `From<&[T]>`. +#[cfg(not(no_global_oom_handling))] +trait ArcFromSlice { + fn from_slice(slice: &[T]) -> Self; +} + +#[cfg(not(no_global_oom_handling))] +impl ArcFromSlice for Arc<[T]> { + #[inline] + default fn from_slice(v: &[T]) -> Self { + unsafe { Self::from_iter_exact(v.iter().cloned(), v.len()) } + } +} + +#[cfg(not(no_global_oom_handling))] +impl ArcFromSlice for Arc<[T]> { + #[inline] + fn from_slice(v: &[T]) -> Self { + unsafe { Arc::copy_from_slice(v) } + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl Clone for Arc { + /// Makes a clone of the `Arc` pointer. + /// + /// This creates another pointer to the same allocation, increasing the + /// strong reference count. + /// + /// # Examples + /// + /// ``` + /// use std::sync::Arc; + /// + /// let five = Arc::new(5); + /// + /// let _ = Arc::clone(&five); + /// ``` + #[inline] + fn clone(&self) -> Arc { + // Using a relaxed ordering is alright here, as knowledge of the + // original reference prevents other threads from erroneously deleting + // the object. + // + // As explained in the [Boost documentation][1], Increasing the + // reference counter can always be done with memory_order_relaxed: New + // references to an object can only be formed from an existing + // reference, and passing an existing reference from one thread to + // another must already provide any required synchronization. + // + // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html) + let old_size = self.inner().strong.fetch_add(1, Relaxed); + + // However we need to guard against massive refcounts in case someone is `mem::forget`ing + // Arcs. If we don't do this the count can overflow and users will use-after free. This + // branch will never be taken in any realistic program. We abort because such a program is + // incredibly degenerate, and we don't care to support it. + // + // This check is not 100% water-proof: we error when the refcount grows beyond `isize::MAX`. + // But we do that check *after* having done the increment, so there is a chance here that + // the worst already happened and we actually do overflow the `usize` counter. However, that + // requires the counter to grow from `isize::MAX` to `usize::MAX` between the increment + // above and the `abort` below, which seems exceedingly unlikely. + if old_size > MAX_REFCOUNT { + abort(); + } + + unsafe { Self::from_inner(self.ptr) } + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl Deref for Arc { + type Target = T; + + #[inline] + fn deref(&self) -> &T { + &self.inner().data + } +} + +#[unstable(feature = "receiver_trait", issue = "none")] +impl Receiver for Arc {} + +impl Arc { + /// Makes a mutable reference into the given `Arc`. + /// + /// If there are other `Arc` pointers to the same allocation, then `make_mut` will + /// [`clone`] the inner value to a new allocation to ensure unique ownership. This is also + /// referred to as clone-on-write. + /// + /// However, if there are no other `Arc` pointers to this allocation, but some [`Weak`] + /// pointers, then the [`Weak`] pointers will be dissociated and the inner value will not + /// be cloned. + /// + /// See also [`get_mut`], which will fail rather than cloning the inner value + /// or dissociating [`Weak`] pointers. + /// + /// [`clone`]: Clone::clone + /// [`get_mut`]: Arc::get_mut + /// + /// # Examples + /// + /// ``` + /// use std::sync::Arc; + /// + /// let mut data = Arc::new(5); + /// + /// *Arc::make_mut(&mut data) += 1; // Won't clone anything + /// let mut other_data = Arc::clone(&data); // Won't clone inner data + /// *Arc::make_mut(&mut data) += 1; // Clones inner data + /// *Arc::make_mut(&mut data) += 1; // Won't clone anything + /// *Arc::make_mut(&mut other_data) *= 2; // Won't clone anything + /// + /// // Now `data` and `other_data` point to different allocations. + /// assert_eq!(*data, 8); + /// assert_eq!(*other_data, 12); + /// ``` + /// + /// [`Weak`] pointers will be dissociated: + /// + /// ``` + /// use std::sync::Arc; + /// + /// let mut data = Arc::new(75); + /// let weak = Arc::downgrade(&data); + /// + /// assert!(75 == *data); + /// assert!(75 == *weak.upgrade().unwrap()); + /// + /// *Arc::make_mut(&mut data) += 1; + /// + /// assert!(76 == *data); + /// assert!(weak.upgrade().is_none()); + /// ``` + #[cfg(not(no_global_oom_handling))] + #[inline] + #[stable(feature = "arc_unique", since = "1.4.0")] + pub fn make_mut(this: &mut Self) -> &mut T { + // Note that we hold both a strong reference and a weak reference. + // Thus, releasing our strong reference only will not, by itself, cause + // the memory to be deallocated. + // + // Use Acquire to ensure that we see any writes to `weak` that happen + // before release writes (i.e., decrements) to `strong`. Since we hold a + // weak count, there's no chance the ArcInner itself could be + // deallocated. + if this.inner().strong.compare_exchange(1, 0, Acquire, Relaxed).is_err() { + // Another strong pointer exists, so we must clone. + // Pre-allocate memory to allow writing the cloned value directly. + let mut arc = Self::new_uninit(); + unsafe { + let data = Arc::get_mut_unchecked(&mut arc); + (**this).write_clone_into_raw(data.as_mut_ptr()); + *this = arc.assume_init(); + } + } else if this.inner().weak.load(Relaxed) != 1 { + // Relaxed suffices in the above because this is fundamentally an + // optimization: we are always racing with weak pointers being + // dropped. Worst case, we end up allocated a new Arc unnecessarily. + + // We removed the last strong ref, but there are additional weak + // refs remaining. We'll move the contents to a new Arc, and + // invalidate the other weak refs. + + // Note that it is not possible for the read of `weak` to yield + // usize::MAX (i.e., locked), since the weak count can only be + // locked by a thread with a strong reference. + + // Materialize our own implicit weak pointer, so that it can clean + // up the ArcInner as needed. + let _weak = Weak { ptr: this.ptr }; + + // Can just steal the data, all that's left is Weaks + let mut arc = Self::new_uninit(); + unsafe { + let data = Arc::get_mut_unchecked(&mut arc); + data.as_mut_ptr().copy_from_nonoverlapping(&**this, 1); + ptr::write(this, arc.assume_init()); + } + } else { + // We were the sole reference of either kind; bump back up the + // strong ref count. + this.inner().strong.store(1, Release); + } + + // As with `get_mut()`, the unsafety is ok because our reference was + // either unique to begin with, or became one upon cloning the contents. + unsafe { Self::get_mut_unchecked(this) } + } + + /// If we have the only reference to `T` then unwrap it. Otherwise, clone `T` and return the + /// clone. + /// + /// Assuming `arc_t` is of type `Arc`, this function is functionally equivalent to + /// `(*arc_t).clone()`, but will avoid cloning the inner value where possible. + /// + /// # Examples + /// + /// ``` + /// #![feature(arc_unwrap_or_clone)] + /// # use std::{ptr, sync::Arc}; + /// let inner = String::from("test"); + /// let ptr = inner.as_ptr(); + /// + /// let arc = Arc::new(inner); + /// let inner = Arc::unwrap_or_clone(arc); + /// // The inner value was not cloned + /// assert!(ptr::eq(ptr, inner.as_ptr())); + /// + /// let arc = Arc::new(inner); + /// let arc2 = arc.clone(); + /// let inner = Arc::unwrap_or_clone(arc); + /// // Because there were 2 references, we had to clone the inner value. + /// assert!(!ptr::eq(ptr, inner.as_ptr())); + /// // `arc2` is the last reference, so when we unwrap it we get back + /// // the original `String`. + /// let inner = Arc::unwrap_or_clone(arc2); + /// assert!(ptr::eq(ptr, inner.as_ptr())); + /// ``` + #[inline] + #[unstable(feature = "arc_unwrap_or_clone", issue = "93610")] + pub fn unwrap_or_clone(this: Self) -> T { + Arc::try_unwrap(this).unwrap_or_else(|arc| (*arc).clone()) + } +} + +impl Arc { + /// Returns a mutable reference into the given `Arc`, if there are + /// no other `Arc` or [`Weak`] pointers to the same allocation. + /// + /// Returns [`None`] otherwise, because it is not safe to + /// mutate a shared value. + /// + /// See also [`make_mut`][make_mut], which will [`clone`][clone] + /// the inner value when there are other `Arc` pointers. + /// + /// [make_mut]: Arc::make_mut + /// [clone]: Clone::clone + /// + /// # Examples + /// + /// ``` + /// use std::sync::Arc; + /// + /// let mut x = Arc::new(3); + /// *Arc::get_mut(&mut x).unwrap() = 4; + /// assert_eq!(*x, 4); + /// + /// let _y = Arc::clone(&x); + /// assert!(Arc::get_mut(&mut x).is_none()); + /// ``` + #[inline] + #[stable(feature = "arc_unique", since = "1.4.0")] + pub fn get_mut(this: &mut Self) -> Option<&mut T> { + if this.is_unique() { + // This unsafety is ok because we're guaranteed that the pointer + // returned is the *only* pointer that will ever be returned to T. Our + // reference count is guaranteed to be 1 at this point, and we required + // the Arc itself to be `mut`, so we're returning the only possible + // reference to the inner data. + unsafe { Some(Arc::get_mut_unchecked(this)) } + } else { + None + } + } + + /// Returns a mutable reference into the given `Arc`, + /// without any check. + /// + /// See also [`get_mut`], which is safe and does appropriate checks. + /// + /// [`get_mut`]: Arc::get_mut + /// + /// # Safety + /// + /// Any other `Arc` or [`Weak`] pointers to the same allocation must not be dereferenced + /// for the duration of the returned borrow. + /// This is trivially the case if no such pointers exist, + /// for example immediately after `Arc::new`. + /// + /// # Examples + /// + /// ``` + /// #![feature(get_mut_unchecked)] + /// + /// use std::sync::Arc; + /// + /// let mut x = Arc::new(String::new()); + /// unsafe { + /// Arc::get_mut_unchecked(&mut x).push_str("foo") + /// } + /// assert_eq!(*x, "foo"); + /// ``` + #[inline] + #[unstable(feature = "get_mut_unchecked", issue = "63292")] + pub unsafe fn get_mut_unchecked(this: &mut Self) -> &mut T { + // We are careful to *not* create a reference covering the "count" fields, as + // this would alias with concurrent access to the reference counts (e.g. by `Weak`). + unsafe { &mut (*this.ptr.as_ptr()).data } + } + + /// Determine whether this is the unique reference (including weak refs) to + /// the underlying data. + /// + /// Note that this requires locking the weak ref count. + fn is_unique(&mut self) -> bool { + // lock the weak pointer count if we appear to be the sole weak pointer + // holder. + // + // The acquire label here ensures a happens-before relationship with any + // writes to `strong` (in particular in `Weak::upgrade`) prior to decrements + // of the `weak` count (via `Weak::drop`, which uses release). If the upgraded + // weak ref was never dropped, the CAS here will fail so we do not care to synchronize. + if self.inner().weak.compare_exchange(1, usize::MAX, Acquire, Relaxed).is_ok() { + // This needs to be an `Acquire` to synchronize with the decrement of the `strong` + // counter in `drop` -- the only access that happens when any but the last reference + // is being dropped. + let unique = self.inner().strong.load(Acquire) == 1; + + // The release write here synchronizes with a read in `downgrade`, + // effectively preventing the above read of `strong` from happening + // after the write. + self.inner().weak.store(1, Release); // release the lock + unique + } else { + false + } + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +unsafe impl<#[may_dangle] T: ?Sized> Drop for Arc { + /// Drops the `Arc`. + /// + /// This will decrement the strong reference count. If the strong reference + /// count reaches zero then the only other references (if any) are + /// [`Weak`], so we `drop` the inner value. + /// + /// # Examples + /// + /// ``` + /// use std::sync::Arc; + /// + /// struct Foo; + /// + /// impl Drop for Foo { + /// fn drop(&mut self) { + /// println!("dropped!"); + /// } + /// } + /// + /// let foo = Arc::new(Foo); + /// let foo2 = Arc::clone(&foo); + /// + /// drop(foo); // Doesn't print anything + /// drop(foo2); // Prints "dropped!" + /// ``` + #[inline] + fn drop(&mut self) { + // Because `fetch_sub` is already atomic, we do not need to synchronize + // with other threads unless we are going to delete the object. This + // same logic applies to the below `fetch_sub` to the `weak` count. + if self.inner().strong.fetch_sub(1, Release) != 1 { + return; + } + + // This fence is needed to prevent reordering of use of the data and + // deletion of the data. Because it is marked `Release`, the decreasing + // of the reference count synchronizes with this `Acquire` fence. This + // means that use of the data happens before decreasing the reference + // count, which happens before this fence, which happens before the + // deletion of the data. + // + // As explained in the [Boost documentation][1], + // + // > It is important to enforce any possible access to the object in one + // > thread (through an existing reference) to *happen before* deleting + // > the object in a different thread. This is achieved by a "release" + // > operation after dropping a reference (any access to the object + // > through this reference must obviously happened before), and an + // > "acquire" operation before deleting the object. + // + // In particular, while the contents of an Arc are usually immutable, it's + // possible to have interior writes to something like a Mutex. Since a + // Mutex is not acquired when it is deleted, we can't rely on its + // synchronization logic to make writes in thread A visible to a destructor + // running in thread B. + // + // Also note that the Acquire fence here could probably be replaced with an + // Acquire load, which could improve performance in highly-contended + // situations. See [2]. + // + // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html) + // [2]: (https://github.com/rust-lang/rust/pull/41714) + acquire!(self.inner().strong); + + unsafe { + self.drop_slow(); + } + } +} + +impl Arc { + /// Attempt to downcast the `Arc` to a concrete type. + /// + /// # Examples + /// + /// ``` + /// use std::any::Any; + /// use std::sync::Arc; + /// + /// fn print_if_string(value: Arc) { + /// if let Ok(string) = value.downcast::() { + /// println!("String ({}): {}", string.len(), string); + /// } + /// } + /// + /// let my_string = "Hello World".to_string(); + /// print_if_string(Arc::new(my_string)); + /// print_if_string(Arc::new(0i8)); + /// ``` + #[inline] + #[stable(feature = "rc_downcast", since = "1.29.0")] + pub fn downcast(self) -> Result, Self> + where + T: Any + Send + Sync, + { + if (*self).is::() { + unsafe { + let ptr = self.ptr.cast::>(); + mem::forget(self); + Ok(Arc::from_inner(ptr)) + } + } else { + Err(self) + } + } + + /// Downcasts the `Arc` to a concrete type. + /// + /// For a safe alternative see [`downcast`]. + /// + /// # Examples + /// + /// ``` + /// #![feature(downcast_unchecked)] + /// + /// use std::any::Any; + /// use std::sync::Arc; + /// + /// let x: Arc = Arc::new(1_usize); + /// + /// unsafe { + /// assert_eq!(*x.downcast_unchecked::(), 1); + /// } + /// ``` + /// + /// # Safety + /// + /// The contained value must be of type `T`. Calling this method + /// with the incorrect type is *undefined behavior*. + /// + /// + /// [`downcast`]: Self::downcast + #[inline] + #[unstable(feature = "downcast_unchecked", issue = "90850")] + pub unsafe fn downcast_unchecked(self) -> Arc + where + T: Any + Send + Sync, + { + unsafe { + let ptr = self.ptr.cast::>(); + mem::forget(self); + Arc::from_inner(ptr) + } + } +} + +impl Weak { + /// Constructs a new `Weak`, without allocating any memory. + /// Calling [`upgrade`] on the return value always gives [`None`]. + /// + /// [`upgrade`]: Weak::upgrade + /// + /// # Examples + /// + /// ``` + /// use std::sync::Weak; + /// + /// let empty: Weak = Weak::new(); + /// assert!(empty.upgrade().is_none()); + /// ``` + #[stable(feature = "downgraded_weak", since = "1.10.0")] + #[rustc_const_unstable(feature = "const_weak_new", issue = "95091", reason = "recently added")] + #[must_use] + pub const fn new() -> Weak { + Weak { ptr: unsafe { NonNull::new_unchecked(ptr::invalid_mut::>(usize::MAX)) } } + } +} + +/// Helper type to allow accessing the reference counts without +/// making any assertions about the data field. +struct WeakInner<'a> { + weak: &'a atomic::AtomicUsize, + strong: &'a atomic::AtomicUsize, +} + +impl Weak { + /// Returns a raw pointer to the object `T` pointed to by this `Weak`. + /// + /// The pointer is valid only if there are some strong references. The pointer may be dangling, + /// unaligned or even [`null`] otherwise. + /// + /// # Examples + /// + /// ``` + /// use std::sync::Arc; + /// use std::ptr; + /// + /// let strong = Arc::new("hello".to_owned()); + /// let weak = Arc::downgrade(&strong); + /// // Both point to the same object + /// assert!(ptr::eq(&*strong, weak.as_ptr())); + /// // The strong here keeps it alive, so we can still access the object. + /// assert_eq!("hello", unsafe { &*weak.as_ptr() }); + /// + /// drop(strong); + /// // But not any more. We can do weak.as_ptr(), but accessing the pointer would lead to + /// // undefined behaviour. + /// // assert_eq!("hello", unsafe { &*weak.as_ptr() }); + /// ``` + /// + /// [`null`]: core::ptr::null "ptr::null" + #[must_use] + #[stable(feature = "weak_into_raw", since = "1.45.0")] + pub fn as_ptr(&self) -> *const T { + let ptr: *mut ArcInner = NonNull::as_ptr(self.ptr); + + if is_dangling(ptr) { + // If the pointer is dangling, we return the sentinel directly. This cannot be + // a valid payload address, as the payload is at least as aligned as ArcInner (usize). + ptr as *const T + } else { + // SAFETY: if is_dangling returns false, then the pointer is dereferenceable. + // The payload may be dropped at this point, and we have to maintain provenance, + // so use raw pointer manipulation. + unsafe { ptr::addr_of_mut!((*ptr).data) } + } + } + + /// Consumes the `Weak` and turns it into a raw pointer. + /// + /// This converts the weak pointer into a raw pointer, while still preserving the ownership of + /// one weak reference (the weak count is not modified by this operation). It can be turned + /// back into the `Weak` with [`from_raw`]. + /// + /// The same restrictions of accessing the target of the pointer as with + /// [`as_ptr`] apply. + /// + /// # Examples + /// + /// ``` + /// use std::sync::{Arc, Weak}; + /// + /// let strong = Arc::new("hello".to_owned()); + /// let weak = Arc::downgrade(&strong); + /// let raw = weak.into_raw(); + /// + /// assert_eq!(1, Arc::weak_count(&strong)); + /// assert_eq!("hello", unsafe { &*raw }); + /// + /// drop(unsafe { Weak::from_raw(raw) }); + /// assert_eq!(0, Arc::weak_count(&strong)); + /// ``` + /// + /// [`from_raw`]: Weak::from_raw + /// [`as_ptr`]: Weak::as_ptr + #[must_use = "`self` will be dropped if the result is not used"] + #[stable(feature = "weak_into_raw", since = "1.45.0")] + pub fn into_raw(self) -> *const T { + let result = self.as_ptr(); + mem::forget(self); + result + } + + /// Converts a raw pointer previously created by [`into_raw`] back into `Weak`. + /// + /// This can be used to safely get a strong reference (by calling [`upgrade`] + /// later) or to deallocate the weak count by dropping the `Weak`. + /// + /// It takes ownership of one weak reference (with the exception of pointers created by [`new`], + /// as these don't own anything; the method still works on them). + /// + /// # Safety + /// + /// The pointer must have originated from the [`into_raw`] and must still own its potential + /// weak reference. + /// + /// It is allowed for the strong count to be 0 at the time of calling this. Nevertheless, this + /// takes ownership of one weak reference currently represented as a raw pointer (the weak + /// count is not modified by this operation) and therefore it must be paired with a previous + /// call to [`into_raw`]. + /// # Examples + /// + /// ``` + /// use std::sync::{Arc, Weak}; + /// + /// let strong = Arc::new("hello".to_owned()); + /// + /// let raw_1 = Arc::downgrade(&strong).into_raw(); + /// let raw_2 = Arc::downgrade(&strong).into_raw(); + /// + /// assert_eq!(2, Arc::weak_count(&strong)); + /// + /// assert_eq!("hello", &*unsafe { Weak::from_raw(raw_1) }.upgrade().unwrap()); + /// assert_eq!(1, Arc::weak_count(&strong)); + /// + /// drop(strong); + /// + /// // Decrement the last weak count. + /// assert!(unsafe { Weak::from_raw(raw_2) }.upgrade().is_none()); + /// ``` + /// + /// [`new`]: Weak::new + /// [`into_raw`]: Weak::into_raw + /// [`upgrade`]: Weak::upgrade + #[stable(feature = "weak_into_raw", since = "1.45.0")] + pub unsafe fn from_raw(ptr: *const T) -> Self { + // See Weak::as_ptr for context on how the input pointer is derived. + + let ptr = if is_dangling(ptr as *mut T) { + // This is a dangling Weak. + ptr as *mut ArcInner + } else { + // Otherwise, we're guaranteed the pointer came from a nondangling Weak. + // SAFETY: data_offset is safe to call, as ptr references a real (potentially dropped) T. + let offset = unsafe { data_offset(ptr) }; + // Thus, we reverse the offset to get the whole RcBox. + // SAFETY: the pointer originated from a Weak, so this offset is safe. + unsafe { ptr.byte_sub(offset) as *mut ArcInner } + }; + + // SAFETY: we now have recovered the original Weak pointer, so can create the Weak. + Weak { ptr: unsafe { NonNull::new_unchecked(ptr) } } + } +} + +impl Weak { + /// Attempts to upgrade the `Weak` pointer to an [`Arc`], delaying + /// dropping of the inner value if successful. + /// + /// Returns [`None`] if the inner value has since been dropped. + /// + /// # Examples + /// + /// ``` + /// use std::sync::Arc; + /// + /// let five = Arc::new(5); + /// + /// let weak_five = Arc::downgrade(&five); + /// + /// let strong_five: Option> = weak_five.upgrade(); + /// assert!(strong_five.is_some()); + /// + /// // Destroy all strong pointers. + /// drop(strong_five); + /// drop(five); + /// + /// assert!(weak_five.upgrade().is_none()); + /// ``` + #[must_use = "this returns a new `Arc`, \ + without modifying the original weak pointer"] + #[stable(feature = "arc_weak", since = "1.4.0")] + pub fn upgrade(&self) -> Option> { + // We use a CAS loop to increment the strong count instead of a + // fetch_add as this function should never take the reference count + // from zero to one. + let inner = self.inner()?; + + // Relaxed load because any write of 0 that we can observe + // leaves the field in a permanently zero state (so a + // "stale" read of 0 is fine), and any other value is + // confirmed via the CAS below. + let mut n = inner.strong.load(Relaxed); + + loop { + if n == 0 { + return None; + } + + // See comments in `Arc::clone` for why we do this (for `mem::forget`). + if n > MAX_REFCOUNT { + abort(); + } + + // Relaxed is fine for the failure case because we don't have any expectations about the new state. + // Acquire is necessary for the success case to synchronise with `Arc::new_cyclic`, when the inner + // value can be initialized after `Weak` references have already been created. In that case, we + // expect to observe the fully initialized value. + match inner.strong.compare_exchange_weak(n, n + 1, Acquire, Relaxed) { + Ok(_) => return Some(unsafe { Arc::from_inner(self.ptr) }), // null checked above + Err(old) => n = old, + } + } + } + + /// Gets the number of strong (`Arc`) pointers pointing to this allocation. + /// + /// If `self` was created using [`Weak::new`], this will return 0. + #[must_use] + #[stable(feature = "weak_counts", since = "1.41.0")] + pub fn strong_count(&self) -> usize { + if let Some(inner) = self.inner() { inner.strong.load(Acquire) } else { 0 } + } + + /// Gets an approximation of the number of `Weak` pointers pointing to this + /// allocation. + /// + /// If `self` was created using [`Weak::new`], or if there are no remaining + /// strong pointers, this will return 0. + /// + /// # Accuracy + /// + /// Due to implementation details, the returned value can be off by 1 in + /// either direction when other threads are manipulating any `Arc`s or + /// `Weak`s pointing to the same allocation. + #[must_use] + #[stable(feature = "weak_counts", since = "1.41.0")] + pub fn weak_count(&self) -> usize { + self.inner() + .map(|inner| { + let weak = inner.weak.load(Acquire); + let strong = inner.strong.load(Acquire); + if strong == 0 { + 0 + } else { + // Since we observed that there was at least one strong pointer + // after reading the weak count, we know that the implicit weak + // reference (present whenever any strong references are alive) + // was still around when we observed the weak count, and can + // therefore safely subtract it. + weak - 1 + } + }) + .unwrap_or(0) + } + + /// Returns `None` when the pointer is dangling and there is no allocated `ArcInner`, + /// (i.e., when this `Weak` was created by `Weak::new`). + #[inline] + fn inner(&self) -> Option> { + if is_dangling(self.ptr.as_ptr()) { + None + } else { + // We are careful to *not* create a reference covering the "data" field, as + // the field may be mutated concurrently (for example, if the last `Arc` + // is dropped, the data field will be dropped in-place). + Some(unsafe { + let ptr = self.ptr.as_ptr(); + WeakInner { strong: &(*ptr).strong, weak: &(*ptr).weak } + }) + } + } + + /// Returns `true` if the two `Weak`s point to the same allocation (similar to + /// [`ptr::eq`]), or if both don't point to any allocation + /// (because they were created with `Weak::new()`). + /// + /// # Notes + /// + /// Since this compares pointers it means that `Weak::new()` will equal each + /// other, even though they don't point to any allocation. + /// + /// # Examples + /// + /// ``` + /// use std::sync::Arc; + /// + /// let first_rc = Arc::new(5); + /// let first = Arc::downgrade(&first_rc); + /// let second = Arc::downgrade(&first_rc); + /// + /// assert!(first.ptr_eq(&second)); + /// + /// let third_rc = Arc::new(5); + /// let third = Arc::downgrade(&third_rc); + /// + /// assert!(!first.ptr_eq(&third)); + /// ``` + /// + /// Comparing `Weak::new`. + /// + /// ``` + /// use std::sync::{Arc, Weak}; + /// + /// let first = Weak::new(); + /// let second = Weak::new(); + /// assert!(first.ptr_eq(&second)); + /// + /// let third_rc = Arc::new(()); + /// let third = Arc::downgrade(&third_rc); + /// assert!(!first.ptr_eq(&third)); + /// ``` + /// + /// [`ptr::eq`]: core::ptr::eq "ptr::eq" + #[inline] + #[must_use] + #[stable(feature = "weak_ptr_eq", since = "1.39.0")] + pub fn ptr_eq(&self, other: &Self) -> bool { + self.ptr.as_ptr() == other.ptr.as_ptr() + } +} + +#[stable(feature = "arc_weak", since = "1.4.0")] +impl Clone for Weak { + /// Makes a clone of the `Weak` pointer that points to the same allocation. + /// + /// # Examples + /// + /// ``` + /// use std::sync::{Arc, Weak}; + /// + /// let weak_five = Arc::downgrade(&Arc::new(5)); + /// + /// let _ = Weak::clone(&weak_five); + /// ``` + #[inline] + fn clone(&self) -> Weak { + let inner = if let Some(inner) = self.inner() { + inner + } else { + return Weak { ptr: self.ptr }; + }; + // See comments in Arc::clone() for why this is relaxed. This can use a + // fetch_add (ignoring the lock) because the weak count is only locked + // where are *no other* weak pointers in existence. (So we can't be + // running this code in that case). + let old_size = inner.weak.fetch_add(1, Relaxed); + + // See comments in Arc::clone() for why we do this (for mem::forget). + if old_size > MAX_REFCOUNT { + abort(); + } + + Weak { ptr: self.ptr } + } +} + +#[stable(feature = "downgraded_weak", since = "1.10.0")] +impl Default for Weak { + /// Constructs a new `Weak`, without allocating memory. + /// Calling [`upgrade`] on the return value always + /// gives [`None`]. + /// + /// [`upgrade`]: Weak::upgrade + /// + /// # Examples + /// + /// ``` + /// use std::sync::Weak; + /// + /// let empty: Weak = Default::default(); + /// assert!(empty.upgrade().is_none()); + /// ``` + fn default() -> Weak { + Weak::new() + } +} + +#[stable(feature = "arc_weak", since = "1.4.0")] +unsafe impl<#[may_dangle] T: ?Sized> Drop for Weak { + /// Drops the `Weak` pointer. + /// + /// # Examples + /// + /// ``` + /// use std::sync::{Arc, Weak}; + /// + /// struct Foo; + /// + /// impl Drop for Foo { + /// fn drop(&mut self) { + /// println!("dropped!"); + /// } + /// } + /// + /// let foo = Arc::new(Foo); + /// let weak_foo = Arc::downgrade(&foo); + /// let other_weak_foo = Weak::clone(&weak_foo); + /// + /// drop(weak_foo); // Doesn't print anything + /// drop(foo); // Prints "dropped!" + /// + /// assert!(other_weak_foo.upgrade().is_none()); + /// ``` + fn drop(&mut self) { + // If we find out that we were the last weak pointer, then its time to + // deallocate the data entirely. See the discussion in Arc::drop() about + // the memory orderings + // + // It's not necessary to check for the locked state here, because the + // weak count can only be locked if there was precisely one weak ref, + // meaning that drop could only subsequently run ON that remaining weak + // ref, which can only happen after the lock is released. + let inner = if let Some(inner) = self.inner() { inner } else { return }; + + if inner.weak.fetch_sub(1, Release) == 1 { + acquire!(inner.weak); + unsafe { Global.deallocate(self.ptr.cast(), Layout::for_value_raw(self.ptr.as_ptr())) } + } + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +trait ArcEqIdent { + fn eq(&self, other: &Arc) -> bool; + fn ne(&self, other: &Arc) -> bool; +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl ArcEqIdent for Arc { + #[inline] + default fn eq(&self, other: &Arc) -> bool { + **self == **other + } + #[inline] + default fn ne(&self, other: &Arc) -> bool { + **self != **other + } +} + +/// We're doing this specialization here, and not as a more general optimization on `&T`, because it +/// would otherwise add a cost to all equality checks on refs. We assume that `Arc`s are used to +/// store large values, that are slow to clone, but also heavy to check for equality, causing this +/// cost to pay off more easily. It's also more likely to have two `Arc` clones, that point to +/// the same value, than two `&T`s. +/// +/// We can only do this when `T: Eq` as a `PartialEq` might be deliberately irreflexive. +#[stable(feature = "rust1", since = "1.0.0")] +impl ArcEqIdent for Arc { + #[inline] + fn eq(&self, other: &Arc) -> bool { + Arc::ptr_eq(self, other) || **self == **other + } + + #[inline] + fn ne(&self, other: &Arc) -> bool { + !Arc::ptr_eq(self, other) && **self != **other + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl PartialEq for Arc { + /// Equality for two `Arc`s. + /// + /// Two `Arc`s are equal if their inner values are equal, even if they are + /// stored in different allocation. + /// + /// If `T` also implements `Eq` (implying reflexivity of equality), + /// two `Arc`s that point to the same allocation are always equal. + /// + /// # Examples + /// + /// ``` + /// use std::sync::Arc; + /// + /// let five = Arc::new(5); + /// + /// assert!(five == Arc::new(5)); + /// ``` + #[inline] + fn eq(&self, other: &Arc) -> bool { + ArcEqIdent::eq(self, other) + } + + /// Inequality for two `Arc`s. + /// + /// Two `Arc`s are unequal if their inner values are unequal. + /// + /// If `T` also implements `Eq` (implying reflexivity of equality), + /// two `Arc`s that point to the same value are never unequal. + /// + /// # Examples + /// + /// ``` + /// use std::sync::Arc; + /// + /// let five = Arc::new(5); + /// + /// assert!(five != Arc::new(6)); + /// ``` + #[inline] + fn ne(&self, other: &Arc) -> bool { + ArcEqIdent::ne(self, other) + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl PartialOrd for Arc { + /// Partial comparison for two `Arc`s. + /// + /// The two are compared by calling `partial_cmp()` on their inner values. + /// + /// # Examples + /// + /// ``` + /// use std::sync::Arc; + /// use std::cmp::Ordering; + /// + /// let five = Arc::new(5); + /// + /// assert_eq!(Some(Ordering::Less), five.partial_cmp(&Arc::new(6))); + /// ``` + fn partial_cmp(&self, other: &Arc) -> Option { + (**self).partial_cmp(&**other) + } + + /// Less-than comparison for two `Arc`s. + /// + /// The two are compared by calling `<` on their inner values. + /// + /// # Examples + /// + /// ``` + /// use std::sync::Arc; + /// + /// let five = Arc::new(5); + /// + /// assert!(five < Arc::new(6)); + /// ``` + fn lt(&self, other: &Arc) -> bool { + *(*self) < *(*other) + } + + /// 'Less than or equal to' comparison for two `Arc`s. + /// + /// The two are compared by calling `<=` on their inner values. + /// + /// # Examples + /// + /// ``` + /// use std::sync::Arc; + /// + /// let five = Arc::new(5); + /// + /// assert!(five <= Arc::new(5)); + /// ``` + fn le(&self, other: &Arc) -> bool { + *(*self) <= *(*other) + } + + /// Greater-than comparison for two `Arc`s. + /// + /// The two are compared by calling `>` on their inner values. + /// + /// # Examples + /// + /// ``` + /// use std::sync::Arc; + /// + /// let five = Arc::new(5); + /// + /// assert!(five > Arc::new(4)); + /// ``` + fn gt(&self, other: &Arc) -> bool { + *(*self) > *(*other) + } + + /// 'Greater than or equal to' comparison for two `Arc`s. + /// + /// The two are compared by calling `>=` on their inner values. + /// + /// # Examples + /// + /// ``` + /// use std::sync::Arc; + /// + /// let five = Arc::new(5); + /// + /// assert!(five >= Arc::new(5)); + /// ``` + fn ge(&self, other: &Arc) -> bool { + *(*self) >= *(*other) + } +} +#[stable(feature = "rust1", since = "1.0.0")] +impl Ord for Arc { + /// Comparison for two `Arc`s. + /// + /// The two are compared by calling `cmp()` on their inner values. + /// + /// # Examples + /// + /// ``` + /// use std::sync::Arc; + /// use std::cmp::Ordering; + /// + /// let five = Arc::new(5); + /// + /// assert_eq!(Ordering::Less, five.cmp(&Arc::new(6))); + /// ``` + fn cmp(&self, other: &Arc) -> Ordering { + (**self).cmp(&**other) + } +} +#[stable(feature = "rust1", since = "1.0.0")] +impl Eq for Arc {} + +#[stable(feature = "rust1", since = "1.0.0")] +impl fmt::Display for Arc { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt::Display::fmt(&**self, f) + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl fmt::Debug for Arc { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt::Debug::fmt(&**self, f) + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl fmt::Pointer for Arc { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt::Pointer::fmt(&(&**self as *const T), f) + } +} + +#[cfg(not(no_global_oom_handling))] +#[stable(feature = "rust1", since = "1.0.0")] +impl Default for Arc { + /// Creates a new `Arc`, with the `Default` value for `T`. + /// + /// # Examples + /// + /// ``` + /// use std::sync::Arc; + /// + /// let x: Arc = Default::default(); + /// assert_eq!(*x, 0); + /// ``` + fn default() -> Arc { + Arc::new(Default::default()) + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl Hash for Arc { + fn hash(&self, state: &mut H) { + (**self).hash(state) + } +} + +#[cfg(not(no_global_oom_handling))] +#[stable(feature = "from_for_ptrs", since = "1.6.0")] +impl From for Arc { + /// Converts a `T` into an `Arc` + /// + /// The conversion moves the value into a + /// newly allocated `Arc`. It is equivalent to + /// calling `Arc::new(t)`. + /// + /// # Example + /// ```rust + /// # use std::sync::Arc; + /// let x = 5; + /// let arc = Arc::new(5); + /// + /// assert_eq!(Arc::from(x), arc); + /// ``` + fn from(t: T) -> Self { + Arc::new(t) + } +} + +#[cfg(not(no_global_oom_handling))] +#[stable(feature = "shared_from_slice", since = "1.21.0")] +impl From<&[T]> for Arc<[T]> { + /// Allocate a reference-counted slice and fill it by cloning `v`'s items. + /// + /// # Example + /// + /// ``` + /// # use std::sync::Arc; + /// let original: &[i32] = &[1, 2, 3]; + /// let shared: Arc<[i32]> = Arc::from(original); + /// assert_eq!(&[1, 2, 3], &shared[..]); + /// ``` + #[inline] + fn from(v: &[T]) -> Arc<[T]> { + >::from_slice(v) + } +} + +#[cfg(not(no_global_oom_handling))] +#[stable(feature = "shared_from_slice", since = "1.21.0")] +impl From<&str> for Arc { + /// Allocate a reference-counted `str` and copy `v` into it. + /// + /// # Example + /// + /// ``` + /// # use std::sync::Arc; + /// let shared: Arc = Arc::from("eggplant"); + /// assert_eq!("eggplant", &shared[..]); + /// ``` + #[inline] + fn from(v: &str) -> Arc { + let arc = Arc::<[u8]>::from(v.as_bytes()); + unsafe { Arc::from_raw(Arc::into_raw(arc) as *const str) } + } +} + +#[cfg(not(no_global_oom_handling))] +#[stable(feature = "shared_from_slice", since = "1.21.0")] +impl From for Arc { + /// Allocate a reference-counted `str` and copy `v` into it. + /// + /// # Example + /// + /// ``` + /// # use std::sync::Arc; + /// let unique: String = "eggplant".to_owned(); + /// let shared: Arc = Arc::from(unique); + /// assert_eq!("eggplant", &shared[..]); + /// ``` + #[inline] + fn from(v: String) -> Arc { + Arc::from(&v[..]) + } +} + +#[cfg(not(no_global_oom_handling))] +#[stable(feature = "shared_from_slice", since = "1.21.0")] +impl From> for Arc { + /// Move a boxed object to a new, reference-counted allocation. + /// + /// # Example + /// + /// ``` + /// # use std::sync::Arc; + /// let unique: Box = Box::from("eggplant"); + /// let shared: Arc = Arc::from(unique); + /// assert_eq!("eggplant", &shared[..]); + /// ``` + #[inline] + fn from(v: Box) -> Arc { + Arc::from_box(v) + } +} + +#[cfg(not(no_global_oom_handling))] +#[stable(feature = "shared_from_slice", since = "1.21.0")] +impl From> for Arc<[T]> { + /// Allocate a reference-counted slice and move `v`'s items into it. + /// + /// # Example + /// + /// ``` + /// # use std::sync::Arc; + /// let unique: Vec = vec![1, 2, 3]; + /// let shared: Arc<[i32]> = Arc::from(unique); + /// assert_eq!(&[1, 2, 3], &shared[..]); + /// ``` + #[inline] + fn from(mut v: Vec) -> Arc<[T]> { + unsafe { + let arc = Arc::copy_from_slice(&v); + + // Allow the Vec to free its memory, but not destroy its contents + v.set_len(0); + + arc + } + } +} + +#[stable(feature = "shared_from_cow", since = "1.45.0")] +impl<'a, B> From> for Arc +where + B: ToOwned + ?Sized, + Arc: From<&'a B> + From, +{ + /// Create an atomically reference-counted pointer from + /// a clone-on-write pointer by copying its content. + /// + /// # Example + /// + /// ```rust + /// # use std::sync::Arc; + /// # use std::borrow::Cow; + /// let cow: Cow = Cow::Borrowed("eggplant"); + /// let shared: Arc = Arc::from(cow); + /// assert_eq!("eggplant", &shared[..]); + /// ``` + #[inline] + fn from(cow: Cow<'a, B>) -> Arc { + match cow { + Cow::Borrowed(s) => Arc::from(s), + Cow::Owned(s) => Arc::from(s), + } + } +} + +#[stable(feature = "shared_from_str", since = "1.62.0")] +impl From> for Arc<[u8]> { + /// Converts an atomically reference-counted string slice into a byte slice. + /// + /// # Example + /// + /// ``` + /// # use std::sync::Arc; + /// let string: Arc = Arc::from("eggplant"); + /// let bytes: Arc<[u8]> = Arc::from(string); + /// assert_eq!("eggplant".as_bytes(), bytes.as_ref()); + /// ``` + #[inline] + fn from(rc: Arc) -> Self { + // SAFETY: `str` has the same layout as `[u8]`. + unsafe { Arc::from_raw(Arc::into_raw(rc) as *const [u8]) } + } +} + +#[stable(feature = "boxed_slice_try_from", since = "1.43.0")] +impl TryFrom> for Arc<[T; N]> { + type Error = Arc<[T]>; + + fn try_from(boxed_slice: Arc<[T]>) -> Result { + if boxed_slice.len() == N { + Ok(unsafe { Arc::from_raw(Arc::into_raw(boxed_slice) as *mut [T; N]) }) + } else { + Err(boxed_slice) + } + } +} + +#[cfg(not(no_global_oom_handling))] +#[stable(feature = "shared_from_iter", since = "1.37.0")] +impl iter::FromIterator for Arc<[T]> { + /// Takes each element in the `Iterator` and collects it into an `Arc<[T]>`. + /// + /// # Performance characteristics + /// + /// ## The general case + /// + /// In the general case, collecting into `Arc<[T]>` is done by first + /// collecting into a `Vec`. That is, when writing the following: + /// + /// ```rust + /// # use std::sync::Arc; + /// let evens: Arc<[u8]> = (0..10).filter(|&x| x % 2 == 0).collect(); + /// # assert_eq!(&*evens, &[0, 2, 4, 6, 8]); + /// ``` + /// + /// this behaves as if we wrote: + /// + /// ```rust + /// # use std::sync::Arc; + /// let evens: Arc<[u8]> = (0..10).filter(|&x| x % 2 == 0) + /// .collect::>() // The first set of allocations happens here. + /// .into(); // A second allocation for `Arc<[T]>` happens here. + /// # assert_eq!(&*evens, &[0, 2, 4, 6, 8]); + /// ``` + /// + /// This will allocate as many times as needed for constructing the `Vec` + /// and then it will allocate once for turning the `Vec` into the `Arc<[T]>`. + /// + /// ## Iterators of known length + /// + /// When your `Iterator` implements `TrustedLen` and is of an exact size, + /// a single allocation will be made for the `Arc<[T]>`. For example: + /// + /// ```rust + /// # use std::sync::Arc; + /// let evens: Arc<[u8]> = (0..10).collect(); // Just a single allocation happens here. + /// # assert_eq!(&*evens, &*(0..10).collect::>()); + /// ``` + fn from_iter>(iter: I) -> Self { + ToArcSlice::to_arc_slice(iter.into_iter()) + } +} + +/// Specialization trait used for collecting into `Arc<[T]>`. +trait ToArcSlice: Iterator + Sized { + fn to_arc_slice(self) -> Arc<[T]>; +} + +#[cfg(not(no_global_oom_handling))] +impl> ToArcSlice for I { + default fn to_arc_slice(self) -> Arc<[T]> { + self.collect::>().into() + } +} + +#[cfg(not(no_global_oom_handling))] +impl> ToArcSlice for I { + fn to_arc_slice(self) -> Arc<[T]> { + // This is the case for a `TrustedLen` iterator. + let (low, high) = self.size_hint(); + if let Some(high) = high { + debug_assert_eq!( + low, + high, + "TrustedLen iterator's size hint is not exact: {:?}", + (low, high) + ); + + unsafe { + // SAFETY: We need to ensure that the iterator has an exact length and we have. + Arc::from_iter_exact(self, low) + } + } else { + // TrustedLen contract guarantees that `upper_bound == `None` implies an iterator + // length exceeding `usize::MAX`. + // The default implementation would collect into a vec which would panic. + // Thus we panic here immediately without invoking `Vec` code. + panic!("capacity overflow"); + } + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl borrow::Borrow for Arc { + fn borrow(&self) -> &T { + &**self + } +} + +#[stable(since = "1.5.0", feature = "smart_ptr_as_ref")] +impl AsRef for Arc { + fn as_ref(&self) -> &T { + &**self + } +} + +#[stable(feature = "pin", since = "1.33.0")] +impl Unpin for Arc {} + +/// Get the offset within an `ArcInner` for the payload behind a pointer. +/// +/// # Safety +/// +/// The pointer must point to (and have valid metadata for) a previously +/// valid instance of T, but the T is allowed to be dropped. +unsafe fn data_offset(ptr: *const T) -> usize { + // Align the unsized value to the end of the ArcInner. + // Because RcBox is repr(C), it will always be the last field in memory. + // SAFETY: since the only unsized types possible are slices, trait objects, + // and extern types, the input safety requirement is currently enough to + // satisfy the requirements of align_of_val_raw; this is an implementation + // detail of the language that must not be relied upon outside of std. + unsafe { data_offset_align(align_of_val_raw(ptr)) } +} + +#[inline] +fn data_offset_align(align: usize) -> usize { + let layout = Layout::new::>(); + layout.size() + layout.padding_needed_for(align) +} -- cgit v1.2.3