#![stable(feature = "rust1", since = "1.0.0")] //! Thread-safe reference-counting pointers. //! //! See the [`Arc`][Arc] documentation for more details. //! //! **Note**: This module is only available on platforms that support atomic //! loads and stores of pointers. This may be detected at compile time using //! `#[cfg(target_has_atomic = "ptr")]`. use core::any::Any; use core::borrow; use core::cmp::Ordering; use core::fmt; use core::hash::{Hash, Hasher}; use core::hint; use core::intrinsics::abort; #[cfg(not(no_global_oom_handling))] use core::iter; use core::marker::{PhantomData, Unsize}; #[cfg(not(no_global_oom_handling))] use core::mem::size_of_val; use core::mem::{self, align_of_val_raw}; use core::ops::{CoerceUnsized, Deref, DispatchFromDyn, Receiver}; use core::panic::{RefUnwindSafe, UnwindSafe}; use core::pin::Pin; use core::ptr::{self, NonNull}; #[cfg(not(no_global_oom_handling))] use core::slice::from_raw_parts_mut; use core::sync::atomic; use core::sync::atomic::Ordering::{Acquire, Relaxed, Release}; #[cfg(not(no_global_oom_handling))] use crate::alloc::handle_alloc_error; #[cfg(not(no_global_oom_handling))] use crate::alloc::WriteCloneIntoRaw; use crate::alloc::{AllocError, Allocator, Global, Layout}; use crate::borrow::{Cow, ToOwned}; use crate::boxed::Box; use crate::rc::is_dangling; #[cfg(not(no_global_oom_handling))] use crate::string::String; #[cfg(not(no_global_oom_handling))] use crate::vec::Vec; #[cfg(test)] mod tests; /// A soft limit on the amount of references that may be made to an `Arc`. /// /// Going above this limit will abort your program (although not /// necessarily) at _exactly_ `MAX_REFCOUNT + 1` references. /// Trying to go above it might call a `panic` (if not actually going above it). /// /// This is a global invariant, and also applies when using a compare-exchange loop. /// /// See comment in `Arc::clone`. const MAX_REFCOUNT: usize = (isize::MAX) as usize; /// The error in case either counter reaches above `MAX_REFCOUNT`, and we can `panic` safely. const INTERNAL_OVERFLOW_ERROR: &str = "Arc counter overflow"; #[cfg(not(sanitize = "thread"))] macro_rules! acquire { ($x:expr) => { atomic::fence(Acquire) }; } // ThreadSanitizer does not support memory fences. To avoid false positive // reports in Arc / Weak implementation use atomic loads for synchronization // instead. #[cfg(sanitize = "thread")] macro_rules! acquire { ($x:expr) => { $x.load(Acquire) }; } /// A thread-safe reference-counting pointer. 'Arc' stands for 'Atomically /// Reference Counted'. /// /// The type `Arc` provides shared ownership of a value of type `T`, /// allocated in the heap. Invoking [`clone`][clone] on `Arc` produces /// a new `Arc` instance, which points to the same allocation on the heap as the /// source `Arc`, while increasing a reference count. When the last `Arc` /// pointer to a given allocation is destroyed, the value stored in that allocation (often /// referred to as "inner value") is also dropped. /// /// Shared references in Rust disallow mutation by default, and `Arc` is no /// exception: you cannot generally obtain a mutable reference to something /// inside an `Arc`. If you need to mutate through an `Arc`, use /// [`Mutex`][mutex], [`RwLock`][rwlock], or one of the [`Atomic`][atomic] /// types. /// /// **Note**: This type is only available on platforms that support atomic /// loads and stores of pointers, which includes all platforms that support /// the `std` crate but not all those which only support [`alloc`](crate). /// This may be detected at compile time using `#[cfg(target_has_atomic = "ptr")]`. /// /// ## Thread Safety /// /// Unlike [`Rc`], `Arc` uses atomic operations for its reference /// counting. This means that it is thread-safe. The disadvantage is that /// atomic operations are more expensive than ordinary memory accesses. If you /// are not sharing reference-counted allocations between threads, consider using /// [`Rc`] for lower overhead. [`Rc`] is a safe default, because the /// compiler will catch any attempt to send an [`Rc`] between threads. /// However, a library might choose `Arc` in order to give library consumers /// more flexibility. /// /// `Arc` will implement [`Send`] and [`Sync`] as long as the `T` implements /// [`Send`] and [`Sync`]. Why can't you put a non-thread-safe type `T` in an /// `Arc` to make it thread-safe? This may be a bit counter-intuitive at /// first: after all, isn't the point of `Arc` thread safety? The key is /// this: `Arc` makes it thread safe to have multiple ownership of the same /// data, but it doesn't add thread safety to its data. Consider /// Arc<[RefCell\]>. [`RefCell`] isn't [`Sync`], and if `Arc` was always /// [`Send`], Arc<[RefCell\]> would be as well. But then we'd have a problem: /// [`RefCell`] is not thread safe; it keeps track of the borrowing count using /// non-atomic operations. /// /// In the end, this means that you may need to pair `Arc` with some sort of /// [`std::sync`] type, usually [`Mutex`][mutex]. /// /// ## Breaking cycles with `Weak` /// /// The [`downgrade`][downgrade] method can be used to create a non-owning /// [`Weak`] pointer. A [`Weak`] pointer can be [`upgrade`][upgrade]d /// to an `Arc`, but this will return [`None`] if the value stored in the allocation has /// already been dropped. In other words, `Weak` pointers do not keep the value /// inside the allocation alive; however, they *do* keep the allocation /// (the backing store for the value) alive. /// /// A cycle between `Arc` pointers will never be deallocated. For this reason, /// [`Weak`] is used to break cycles. For example, a tree could have /// strong `Arc` pointers from parent nodes to children, and [`Weak`] /// pointers from children back to their parents. /// /// # Cloning references /// /// Creating a new reference from an existing reference-counted pointer is done using the /// `Clone` trait implemented for [`Arc`][Arc] and [`Weak`][Weak]. /// /// ``` /// use std::sync::Arc; /// let foo = Arc::new(vec![1.0, 2.0, 3.0]); /// // The two syntaxes below are equivalent. /// let a = foo.clone(); /// let b = Arc::clone(&foo); /// // a, b, and foo are all Arcs that point to the same memory location /// ``` /// /// ## `Deref` behavior /// /// `Arc` automatically dereferences to `T` (via the [`Deref`] trait), /// so you can call `T`'s methods on a value of type `Arc`. To avoid name /// clashes with `T`'s methods, the methods of `Arc` itself are associated /// functions, called using [fully qualified syntax]: /// /// ``` /// use std::sync::Arc; /// /// let my_arc = Arc::new(()); /// let my_weak = Arc::downgrade(&my_arc); /// ``` /// /// `Arc`'s implementations of traits like `Clone` may also be called using /// fully qualified syntax. Some people prefer to use fully qualified syntax, /// while others prefer using method-call syntax. /// /// ``` /// use std::sync::Arc; /// /// let arc = Arc::new(()); /// // Method-call syntax /// let arc2 = arc.clone(); /// // Fully qualified syntax /// let arc3 = Arc::clone(&arc); /// ``` /// /// [`Weak`][Weak] does not auto-dereference to `T`, because the inner value may have /// already been dropped. /// /// [`Rc`]: crate::rc::Rc /// [clone]: Clone::clone /// [mutex]: ../../std/sync/struct.Mutex.html /// [rwlock]: ../../std/sync/struct.RwLock.html /// [atomic]: core::sync::atomic /// [downgrade]: Arc::downgrade /// [upgrade]: Weak::upgrade /// [RefCell\]: core::cell::RefCell /// [`RefCell`]: core::cell::RefCell /// [`std::sync`]: ../../std/sync/index.html /// [`Arc::clone(&from)`]: Arc::clone /// [fully qualified syntax]: https://doc.rust-lang.org/book/ch19-03-advanced-traits.html#fully-qualified-syntax-for-disambiguation-calling-methods-with-the-same-name /// /// # Examples /// /// Sharing some immutable data between threads: /// // Note that we **do not** run these tests here. The windows builders get super // unhappy if a thread outlives the main thread and then exits at the same time // (something deadlocks) so we just avoid this entirely by not running these // tests. /// ```no_run /// use std::sync::Arc; /// use std::thread; /// /// let five = Arc::new(5); /// /// for _ in 0..10 { /// let five = Arc::clone(&five); /// /// thread::spawn(move || { /// println!("{five:?}"); /// }); /// } /// ``` /// /// Sharing a mutable [`AtomicUsize`]: /// /// [`AtomicUsize`]: core::sync::atomic::AtomicUsize "sync::atomic::AtomicUsize" /// /// ```no_run /// use std::sync::Arc; /// use std::sync::atomic::{AtomicUsize, Ordering}; /// use std::thread; /// /// let val = Arc::new(AtomicUsize::new(5)); /// /// for _ in 0..10 { /// let val = Arc::clone(&val); /// /// thread::spawn(move || { /// let v = val.fetch_add(1, Ordering::SeqCst); /// println!("{v:?}"); /// }); /// } /// ``` /// /// See the [`rc` documentation][rc_examples] for more examples of reference /// counting in general. /// /// [rc_examples]: crate::rc#examples #[cfg_attr(not(test), rustc_diagnostic_item = "Arc")] #[stable(feature = "rust1", since = "1.0.0")] pub struct Arc< T: ?Sized, #[unstable(feature = "allocator_api", issue = "32838")] A: Allocator = Global, > { ptr: NonNull>, phantom: PhantomData>, alloc: A, } #[stable(feature = "rust1", since = "1.0.0")] unsafe impl Send for Arc {} #[stable(feature = "rust1", since = "1.0.0")] unsafe impl Sync for Arc {} #[stable(feature = "catch_unwind", since = "1.9.0")] impl UnwindSafe for Arc {} #[unstable(feature = "coerce_unsized", issue = "18598")] impl, U: ?Sized, A: Allocator> CoerceUnsized> for Arc {} #[unstable(feature = "dispatch_from_dyn", issue = "none")] impl, U: ?Sized> DispatchFromDyn> for Arc {} impl Arc { unsafe fn from_inner(ptr: NonNull>) -> Self { unsafe { Self::from_inner_in(ptr, Global) } } unsafe fn from_ptr(ptr: *mut ArcInner) -> Self { unsafe { Self::from_ptr_in(ptr, Global) } } } impl Arc { #[inline] unsafe fn from_inner_in(ptr: NonNull>, alloc: A) -> Self { Self { ptr, phantom: PhantomData, alloc } } #[inline] unsafe fn from_ptr_in(ptr: *mut ArcInner, alloc: A) -> Self { unsafe { Self::from_inner_in(NonNull::new_unchecked(ptr), alloc) } } } /// `Weak` is a version of [`Arc`] that holds a non-owning reference to the /// managed allocation. The allocation is accessed by calling [`upgrade`] on the `Weak` /// pointer, which returns an [Option]<[Arc]\>. /// /// Since a `Weak` reference does not count towards ownership, it will not /// prevent the value stored in the allocation from being dropped, and `Weak` itself makes no /// guarantees about the value still being present. Thus it may return [`None`] /// when [`upgrade`]d. Note however that a `Weak` reference *does* prevent the allocation /// itself (the backing store) from being deallocated. /// /// A `Weak` pointer is useful for keeping a temporary reference to the allocation /// managed by [`Arc`] without preventing its inner value from being dropped. It is also used to /// prevent circular references between [`Arc`] pointers, since mutual owning references /// would never allow either [`Arc`] to be dropped. For example, a tree could /// have strong [`Arc`] pointers from parent nodes to children, and `Weak` /// pointers from children back to their parents. /// /// The typical way to obtain a `Weak` pointer is to call [`Arc::downgrade`]. /// /// [`upgrade`]: Weak::upgrade #[stable(feature = "arc_weak", since = "1.4.0")] pub struct Weak< T: ?Sized, #[unstable(feature = "allocator_api", issue = "32838")] A: Allocator = Global, > { // This is a `NonNull` to allow optimizing the size of this type in enums, // but it is not necessarily a valid pointer. // `Weak::new` sets this to `usize::MAX` so that it doesn’t need // to allocate space on the heap. That's not a value a real pointer // will ever have because RcBox has alignment at least 2. // This is only possible when `T: Sized`; unsized `T` never dangle. ptr: NonNull>, alloc: A, } #[stable(feature = "arc_weak", since = "1.4.0")] unsafe impl Send for Weak {} #[stable(feature = "arc_weak", since = "1.4.0")] unsafe impl Sync for Weak {} #[unstable(feature = "coerce_unsized", issue = "18598")] impl, U: ?Sized, A: Allocator> CoerceUnsized> for Weak {} #[unstable(feature = "dispatch_from_dyn", issue = "none")] impl, U: ?Sized> DispatchFromDyn> for Weak {} #[stable(feature = "arc_weak", since = "1.4.0")] impl fmt::Debug for Weak { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "(Weak)") } } // This is repr(C) to future-proof against possible field-reordering, which // would interfere with otherwise safe [into|from]_raw() of transmutable // inner types. #[repr(C)] struct ArcInner { strong: atomic::AtomicUsize, // the value usize::MAX acts as a sentinel for temporarily "locking" the // ability to upgrade weak pointers or downgrade strong ones; this is used // to avoid races in `make_mut` and `get_mut`. weak: atomic::AtomicUsize, data: T, } /// Calculate layout for `ArcInner` using the inner value's layout fn arcinner_layout_for_value_layout(layout: Layout) -> Layout { // Calculate layout using the given value layout. // Previously, layout was calculated on the expression // `&*(ptr as *const ArcInner)`, but this created a misaligned // reference (see #54908). Layout::new::>().extend(layout).unwrap().0.pad_to_align() } unsafe impl Send for ArcInner {} unsafe impl Sync for ArcInner {} impl Arc { /// Constructs a new `Arc`. /// /// # Examples /// /// ``` /// use std::sync::Arc; /// /// let five = Arc::new(5); /// ``` #[cfg(not(no_global_oom_handling))] #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub fn new(data: T) -> Arc { // Start the weak pointer count as 1 which is the weak pointer that's // held by all the strong pointers (kinda), see std/rc.rs for more info let x: Box<_> = Box::new(ArcInner { strong: atomic::AtomicUsize::new(1), weak: atomic::AtomicUsize::new(1), data, }); unsafe { Self::from_inner(Box::leak(x).into()) } } /// Constructs a new `Arc` while giving you a `Weak` to the allocation, /// to allow you to construct a `T` which holds a weak pointer to itself. /// /// Generally, a structure circularly referencing itself, either directly or /// indirectly, should not hold a strong reference to itself to prevent a memory leak. /// Using this function, you get access to the weak pointer during the /// initialization of `T`, before the `Arc` is created, such that you can /// clone and store it inside the `T`. /// /// `new_cyclic` first allocates the managed allocation for the `Arc`, /// then calls your closure, giving it a `Weak` to this allocation, /// and only afterwards completes the construction of the `Arc` by placing /// the `T` returned from your closure into the allocation. /// /// Since the new `Arc` is not fully-constructed until `Arc::new_cyclic` /// returns, calling [`upgrade`] on the weak reference inside your closure will /// fail and result in a `None` value. /// /// # Panics /// /// If `data_fn` panics, the panic is propagated to the caller, and the /// temporary [`Weak`] is dropped normally. /// /// # Example /// /// ``` /// # #![allow(dead_code)] /// use std::sync::{Arc, Weak}; /// /// struct Gadget { /// me: Weak, /// } /// /// impl Gadget { /// /// Construct a reference counted Gadget. /// fn new() -> Arc { /// // `me` is a `Weak` pointing at the new allocation of the /// // `Arc` we're constructing. /// Arc::new_cyclic(|me| { /// // Create the actual struct here. /// Gadget { me: me.clone() } /// }) /// } /// /// /// Return a reference counted pointer to Self. /// fn me(&self) -> Arc { /// self.me.upgrade().unwrap() /// } /// } /// ``` /// [`upgrade`]: Weak::upgrade #[cfg(not(no_global_oom_handling))] #[inline] #[stable(feature = "arc_new_cyclic", since = "1.60.0")] pub fn new_cyclic(data_fn: F) -> Arc where F: FnOnce(&Weak) -> T, { // Construct the inner in the "uninitialized" state with a single // weak reference. let uninit_ptr: NonNull<_> = Box::leak(Box::new(ArcInner { strong: atomic::AtomicUsize::new(0), weak: atomic::AtomicUsize::new(1), data: mem::MaybeUninit::::uninit(), })) .into(); let init_ptr: NonNull> = uninit_ptr.cast(); let weak = Weak { ptr: init_ptr, alloc: Global }; // It's important we don't give up ownership of the weak pointer, or // else the memory might be freed by the time `data_fn` returns. If // we really wanted to pass ownership, we could create an additional // weak pointer for ourselves, but this would result in additional // updates to the weak reference count which might not be necessary // otherwise. let data = data_fn(&weak); // Now we can properly initialize the inner value and turn our weak // reference into a strong reference. let strong = unsafe { let inner = init_ptr.as_ptr(); ptr::write(ptr::addr_of_mut!((*inner).data), data); // The above write to the data field must be visible to any threads which // observe a non-zero strong count. Therefore we need at least "Release" ordering // in order to synchronize with the `compare_exchange_weak` in `Weak::upgrade`. // // "Acquire" ordering is not required. When considering the possible behaviours // of `data_fn` we only need to look at what it could do with a reference to a // non-upgradeable `Weak`: // - It can *clone* the `Weak`, increasing the weak reference count. // - It can drop those clones, decreasing the weak reference count (but never to zero). // // These side effects do not impact us in any way, and no other side effects are // possible with safe code alone. let prev_value = (*inner).strong.fetch_add(1, Release); debug_assert_eq!(prev_value, 0, "No prior strong references should exist"); Arc::from_inner(init_ptr) }; // Strong references should collectively own a shared weak reference, // so don't run the destructor for our old weak reference. mem::forget(weak); strong } /// Constructs a new `Arc` with uninitialized contents. /// /// # Examples /// /// ``` /// #![feature(new_uninit)] /// #![feature(get_mut_unchecked)] /// /// use std::sync::Arc; /// /// let mut five = Arc::::new_uninit(); /// /// // Deferred initialization: /// Arc::get_mut(&mut five).unwrap().write(5); /// /// let five = unsafe { five.assume_init() }; /// /// assert_eq!(*five, 5) /// ``` #[cfg(not(no_global_oom_handling))] #[inline] #[unstable(feature = "new_uninit", issue = "63291")] #[must_use] pub fn new_uninit() -> Arc> { unsafe { Arc::from_ptr(Arc::allocate_for_layout( Layout::new::(), |layout| Global.allocate(layout), <*mut u8>::cast, )) } } /// Constructs a new `Arc` with uninitialized contents, with the memory /// being filled with `0` bytes. /// /// See [`MaybeUninit::zeroed`][zeroed] for examples of correct and incorrect usage /// of this method. /// /// # Examples /// /// ``` /// #![feature(new_uninit)] /// /// use std::sync::Arc; /// /// let zero = Arc::::new_zeroed(); /// let zero = unsafe { zero.assume_init() }; /// /// assert_eq!(*zero, 0) /// ``` /// /// [zeroed]: mem::MaybeUninit::zeroed #[cfg(not(no_global_oom_handling))] #[inline] #[unstable(feature = "new_uninit", issue = "63291")] #[must_use] pub fn new_zeroed() -> Arc> { unsafe { Arc::from_ptr(Arc::allocate_for_layout( Layout::new::(), |layout| Global.allocate_zeroed(layout), <*mut u8>::cast, )) } } /// Constructs a new `Pin>`. If `T` does not implement `Unpin`, then /// `data` will be pinned in memory and unable to be moved. #[cfg(not(no_global_oom_handling))] #[stable(feature = "pin", since = "1.33.0")] #[must_use] pub fn pin(data: T) -> Pin> { unsafe { Pin::new_unchecked(Arc::new(data)) } } /// Constructs a new `Pin>`, return an error if allocation fails. #[unstable(feature = "allocator_api", issue = "32838")] #[inline] pub fn try_pin(data: T) -> Result>, AllocError> { unsafe { Ok(Pin::new_unchecked(Arc::try_new(data)?)) } } /// Constructs a new `Arc`, returning an error if allocation fails. /// /// # Examples /// /// ``` /// #![feature(allocator_api)] /// use std::sync::Arc; /// /// let five = Arc::try_new(5)?; /// # Ok::<(), std::alloc::AllocError>(()) /// ``` #[unstable(feature = "allocator_api", issue = "32838")] #[inline] pub fn try_new(data: T) -> Result, AllocError> { // Start the weak pointer count as 1 which is the weak pointer that's // held by all the strong pointers (kinda), see std/rc.rs for more info let x: Box<_> = Box::try_new(ArcInner { strong: atomic::AtomicUsize::new(1), weak: atomic::AtomicUsize::new(1), data, })?; unsafe { Ok(Self::from_inner(Box::leak(x).into())) } } /// Constructs a new `Arc` with uninitialized contents, returning an error /// if allocation fails. /// /// # Examples /// /// ``` /// #![feature(new_uninit, allocator_api)] /// #![feature(get_mut_unchecked)] /// /// use std::sync::Arc; /// /// let mut five = Arc::::try_new_uninit()?; /// /// // Deferred initialization: /// Arc::get_mut(&mut five).unwrap().write(5); /// /// let five = unsafe { five.assume_init() }; /// /// assert_eq!(*five, 5); /// # Ok::<(), std::alloc::AllocError>(()) /// ``` #[unstable(feature = "allocator_api", issue = "32838")] // #[unstable(feature = "new_uninit", issue = "63291")] pub fn try_new_uninit() -> Result>, AllocError> { unsafe { Ok(Arc::from_ptr(Arc::try_allocate_for_layout( Layout::new::(), |layout| Global.allocate(layout), <*mut u8>::cast, )?)) } } /// Constructs a new `Arc` with uninitialized contents, with the memory /// being filled with `0` bytes, returning an error if allocation fails. /// /// See [`MaybeUninit::zeroed`][zeroed] for examples of correct and incorrect usage /// of this method. /// /// # Examples /// /// ``` /// #![feature(new_uninit, allocator_api)] /// /// use std::sync::Arc; /// /// let zero = Arc::::try_new_zeroed()?; /// let zero = unsafe { zero.assume_init() }; /// /// assert_eq!(*zero, 0); /// # Ok::<(), std::alloc::AllocError>(()) /// ``` /// /// [zeroed]: mem::MaybeUninit::zeroed #[unstable(feature = "allocator_api", issue = "32838")] // #[unstable(feature = "new_uninit", issue = "63291")] pub fn try_new_zeroed() -> Result>, AllocError> { unsafe { Ok(Arc::from_ptr(Arc::try_allocate_for_layout( Layout::new::(), |layout| Global.allocate_zeroed(layout), <*mut u8>::cast, )?)) } } } impl Arc { /// Returns a reference to the underlying allocator. /// /// Note: this is an associated function, which means that you have /// to call it as `Arc::allocator(&a)` instead of `a.allocator()`. This /// is so that there is no conflict with a method on the inner type. #[inline] #[unstable(feature = "allocator_api", issue = "32838")] pub fn allocator(this: &Self) -> &A { &this.alloc } /// Constructs a new `Arc` in the provided allocator. /// /// # Examples /// /// ``` /// #![feature(allocator_api)] /// /// use std::sync::Arc; /// use std::alloc::System; /// /// let five = Arc::new_in(5, System); /// ``` #[inline] #[cfg(not(no_global_oom_handling))] #[unstable(feature = "allocator_api", issue = "32838")] pub fn new_in(data: T, alloc: A) -> Arc { // Start the weak pointer count as 1 which is the weak pointer that's // held by all the strong pointers (kinda), see std/rc.rs for more info let x = Box::new_in( ArcInner { strong: atomic::AtomicUsize::new(1), weak: atomic::AtomicUsize::new(1), data, }, alloc, ); let (ptr, alloc) = Box::into_unique(x); unsafe { Self::from_inner_in(ptr.into(), alloc) } } /// Constructs a new `Arc` with uninitialized contents in the provided allocator. /// /// # Examples /// /// ``` /// #![feature(new_uninit)] /// #![feature(get_mut_unchecked)] /// #![feature(allocator_api)] /// /// use std::sync::Arc; /// use std::alloc::System; /// /// let mut five = Arc::::new_uninit_in(System); /// /// let five = unsafe { /// // Deferred initialization: /// Arc::get_mut_unchecked(&mut five).as_mut_ptr().write(5); /// /// five.assume_init() /// }; /// /// assert_eq!(*five, 5) /// ``` #[cfg(not(no_global_oom_handling))] #[unstable(feature = "allocator_api", issue = "32838")] // #[unstable(feature = "new_uninit", issue = "63291")] #[inline] pub fn new_uninit_in(alloc: A) -> Arc, A> { unsafe { Arc::from_ptr_in( Arc::allocate_for_layout( Layout::new::(), |layout| alloc.allocate(layout), <*mut u8>::cast, ), alloc, ) } } /// Constructs a new `Arc` with uninitialized contents, with the memory /// being filled with `0` bytes, in the provided allocator. /// /// See [`MaybeUninit::zeroed`][zeroed] for examples of correct and incorrect usage /// of this method. /// /// # Examples /// /// ``` /// #![feature(new_uninit)] /// #![feature(allocator_api)] /// /// use std::sync::Arc; /// use std::alloc::System; /// /// let zero = Arc::::new_zeroed_in(System); /// let zero = unsafe { zero.assume_init() }; /// /// assert_eq!(*zero, 0) /// ``` /// /// [zeroed]: mem::MaybeUninit::zeroed #[cfg(not(no_global_oom_handling))] #[unstable(feature = "allocator_api", issue = "32838")] // #[unstable(feature = "new_uninit", issue = "63291")] #[inline] pub fn new_zeroed_in(alloc: A) -> Arc, A> { unsafe { Arc::from_ptr_in( Arc::allocate_for_layout( Layout::new::(), |layout| alloc.allocate_zeroed(layout), <*mut u8>::cast, ), alloc, ) } } /// Constructs a new `Pin>` in the provided allocator. If `T` does not implement `Unpin`, /// then `data` will be pinned in memory and unable to be moved. #[cfg(not(no_global_oom_handling))] #[unstable(feature = "allocator_api", issue = "32838")] #[inline] pub fn pin_in(data: T, alloc: A) -> Pin> { unsafe { Pin::new_unchecked(Arc::new_in(data, alloc)) } } /// Constructs a new `Pin>` in the provided allocator, return an error if allocation /// fails. #[inline] #[unstable(feature = "allocator_api", issue = "32838")] pub fn try_pin_in(data: T, alloc: A) -> Result>, AllocError> { unsafe { Ok(Pin::new_unchecked(Arc::try_new_in(data, alloc)?)) } } /// Constructs a new `Arc` in the provided allocator, returning an error if allocation fails. /// /// # Examples /// /// ``` /// #![feature(allocator_api)] /// /// use std::sync::Arc; /// use std::alloc::System; /// /// let five = Arc::try_new_in(5, System)?; /// # Ok::<(), std::alloc::AllocError>(()) /// ``` #[inline] #[unstable(feature = "allocator_api", issue = "32838")] #[inline] pub fn try_new_in(data: T, alloc: A) -> Result, AllocError> { // Start the weak pointer count as 1 which is the weak pointer that's // held by all the strong pointers (kinda), see std/rc.rs for more info let x = Box::try_new_in( ArcInner { strong: atomic::AtomicUsize::new(1), weak: atomic::AtomicUsize::new(1), data, }, alloc, )?; let (ptr, alloc) = Box::into_unique(x); Ok(unsafe { Self::from_inner_in(ptr.into(), alloc) }) } /// Constructs a new `Arc` with uninitialized contents, in the provided allocator, returning an /// error if allocation fails. /// /// # Examples /// /// ``` /// #![feature(new_uninit, allocator_api)] /// #![feature(get_mut_unchecked)] /// /// use std::sync::Arc; /// use std::alloc::System; /// /// let mut five = Arc::::try_new_uninit_in(System)?; /// /// let five = unsafe { /// // Deferred initialization: /// Arc::get_mut_unchecked(&mut five).as_mut_ptr().write(5); /// /// five.assume_init() /// }; /// /// assert_eq!(*five, 5); /// # Ok::<(), std::alloc::AllocError>(()) /// ``` #[unstable(feature = "allocator_api", issue = "32838")] // #[unstable(feature = "new_uninit", issue = "63291")] #[inline] pub fn try_new_uninit_in(alloc: A) -> Result, A>, AllocError> { unsafe { Ok(Arc::from_ptr_in( Arc::try_allocate_for_layout( Layout::new::(), |layout| alloc.allocate(layout), <*mut u8>::cast, )?, alloc, )) } } /// Constructs a new `Arc` with uninitialized contents, with the memory /// being filled with `0` bytes, in the provided allocator, returning an error if allocation /// fails. /// /// See [`MaybeUninit::zeroed`][zeroed] for examples of correct and incorrect usage /// of this method. /// /// # Examples /// /// ``` /// #![feature(new_uninit, allocator_api)] /// /// use std::sync::Arc; /// use std::alloc::System; /// /// let zero = Arc::::try_new_zeroed_in(System)?; /// let zero = unsafe { zero.assume_init() }; /// /// assert_eq!(*zero, 0); /// # Ok::<(), std::alloc::AllocError>(()) /// ``` /// /// [zeroed]: mem::MaybeUninit::zeroed #[unstable(feature = "allocator_api", issue = "32838")] // #[unstable(feature = "new_uninit", issue = "63291")] #[inline] pub fn try_new_zeroed_in(alloc: A) -> Result, A>, AllocError> { unsafe { Ok(Arc::from_ptr_in( Arc::try_allocate_for_layout( Layout::new::(), |layout| alloc.allocate_zeroed(layout), <*mut u8>::cast, )?, alloc, )) } } /// Returns the inner value, if the `Arc` has exactly one strong reference. /// /// Otherwise, an [`Err`] is returned with the same `Arc` that was /// passed in. /// /// This will succeed even if there are outstanding weak references. /// /// It is strongly recommended to use [`Arc::into_inner`] instead if you don't /// want to keep the `Arc` in the [`Err`] case. /// Immediately dropping the [`Err`] payload, like in the expression /// `Arc::try_unwrap(this).ok()`, can still cause the strong count to /// drop to zero and the inner value of the `Arc` to be dropped: /// For instance if two threads each execute this expression in parallel, then /// there is a race condition. The threads could first both check whether they /// have the last clone of their `Arc` via `Arc::try_unwrap`, and then /// both drop their `Arc` in the call to [`ok`][`Result::ok`], /// taking the strong count from two down to zero. /// /// # Examples /// /// ``` /// use std::sync::Arc; /// /// let x = Arc::new(3); /// assert_eq!(Arc::try_unwrap(x), Ok(3)); /// /// let x = Arc::new(4); /// let _y = Arc::clone(&x); /// assert_eq!(*Arc::try_unwrap(x).unwrap_err(), 4); /// ``` #[inline] #[stable(feature = "arc_unique", since = "1.4.0")] pub fn try_unwrap(this: Self) -> Result { if this.inner().strong.compare_exchange(1, 0, Relaxed, Relaxed).is_err() { return Err(this); } acquire!(this.inner().strong); unsafe { let elem = ptr::read(&this.ptr.as_ref().data); let alloc = ptr::read(&this.alloc); // copy the allocator // Make a weak pointer to clean up the implicit strong-weak reference let _weak = Weak { ptr: this.ptr, alloc }; mem::forget(this); Ok(elem) } } /// Returns the inner value, if the `Arc` has exactly one strong reference. /// /// Otherwise, [`None`] is returned and the `Arc` is dropped. /// /// This will succeed even if there are outstanding weak references. /// /// If `Arc::into_inner` is called on every clone of this `Arc`, /// it is guaranteed that exactly one of the calls returns the inner value. /// This means in particular that the inner value is not dropped. /// /// The similar expression `Arc::try_unwrap(this).ok()` does not /// offer such a guarantee. See the last example below /// and the documentation of [`Arc::try_unwrap`]. /// /// # Examples /// /// Minimal example demonstrating the guarantee that `Arc::into_inner` gives. /// ``` /// use std::sync::Arc; /// /// let x = Arc::new(3); /// let y = Arc::clone(&x); /// /// // Two threads calling `Arc::into_inner` on both clones of an `Arc`: /// let x_thread = std::thread::spawn(|| Arc::into_inner(x)); /// let y_thread = std::thread::spawn(|| Arc::into_inner(y)); /// /// let x_inner_value = x_thread.join().unwrap(); /// let y_inner_value = y_thread.join().unwrap(); /// /// // One of the threads is guaranteed to receive the inner value: /// assert!(matches!( /// (x_inner_value, y_inner_value), /// (None, Some(3)) | (Some(3), None) /// )); /// // The result could also be `(None, None)` if the threads called /// // `Arc::try_unwrap(x).ok()` and `Arc::try_unwrap(y).ok()` instead. /// ``` /// /// A more practical example demonstrating the need for `Arc::into_inner`: /// ``` /// use std::sync::Arc; /// /// // Definition of a simple singly linked list using `Arc`: /// #[derive(Clone)] /// struct LinkedList(Option>>); /// struct Node(T, Option>>); /// /// // Dropping a long `LinkedList` relying on the destructor of `Arc` /// // can cause a stack overflow. To prevent this, we can provide a /// // manual `Drop` implementation that does the destruction in a loop: /// impl Drop for LinkedList { /// fn drop(&mut self) { /// let mut link = self.0.take(); /// while let Some(arc_node) = link.take() { /// if let Some(Node(_value, next)) = Arc::into_inner(arc_node) { /// link = next; /// } /// } /// } /// } /// /// // Implementation of `new` and `push` omitted /// impl LinkedList { /// /* ... */ /// # fn new() -> Self { /// # LinkedList(None) /// # } /// # fn push(&mut self, x: T) { /// # self.0 = Some(Arc::new(Node(x, self.0.take()))); /// # } /// } /// /// // The following code could have still caused a stack overflow /// // despite the manual `Drop` impl if that `Drop` impl had used /// // `Arc::try_unwrap(arc).ok()` instead of `Arc::into_inner(arc)`. /// /// // Create a long list and clone it /// let mut x = LinkedList::new(); /// for i in 0..100000 { /// x.push(i); // Adds i to the front of x /// } /// let y = x.clone(); /// /// // Drop the clones in parallel /// let x_thread = std::thread::spawn(|| drop(x)); /// let y_thread = std::thread::spawn(|| drop(y)); /// x_thread.join().unwrap(); /// y_thread.join().unwrap(); /// ``` #[inline] #[stable(feature = "arc_into_inner", since = "1.70.0")] pub fn into_inner(this: Self) -> Option { // Make sure that the ordinary `Drop` implementation isn’t called as well let mut this = mem::ManuallyDrop::new(this); // Following the implementation of `drop` and `drop_slow` if this.inner().strong.fetch_sub(1, Release) != 1 { return None; } acquire!(this.inner().strong); // SAFETY: This mirrors the line // // unsafe { ptr::drop_in_place(Self::get_mut_unchecked(self)) }; // // in `drop_slow`. Instead of dropping the value behind the pointer, // it is read and eventually returned; `ptr::read` has the same // safety conditions as `ptr::drop_in_place`. let inner = unsafe { ptr::read(Self::get_mut_unchecked(&mut this)) }; let alloc = unsafe { ptr::read(&this.alloc) }; drop(Weak { ptr: this.ptr, alloc }); Some(inner) } } impl Arc<[T]> { /// Constructs a new atomically reference-counted slice with uninitialized contents. /// /// # Examples /// /// ``` /// #![feature(new_uninit)] /// #![feature(get_mut_unchecked)] /// /// use std::sync::Arc; /// /// let mut values = Arc::<[u32]>::new_uninit_slice(3); /// /// // Deferred initialization: /// let data = Arc::get_mut(&mut values).unwrap(); /// data[0].write(1); /// data[1].write(2); /// data[2].write(3); /// /// let values = unsafe { values.assume_init() }; /// /// assert_eq!(*values, [1, 2, 3]) /// ``` #[cfg(not(no_global_oom_handling))] #[inline] #[unstable(feature = "new_uninit", issue = "63291")] #[must_use] pub fn new_uninit_slice(len: usize) -> Arc<[mem::MaybeUninit]> { unsafe { Arc::from_ptr(Arc::allocate_for_slice(len)) } } /// Constructs a new atomically reference-counted slice with uninitialized contents, with the memory being /// filled with `0` bytes. /// /// See [`MaybeUninit::zeroed`][zeroed] for examples of correct and /// incorrect usage of this method. /// /// # Examples /// /// ``` /// #![feature(new_uninit)] /// /// use std::sync::Arc; /// /// let values = Arc::<[u32]>::new_zeroed_slice(3); /// let values = unsafe { values.assume_init() }; /// /// assert_eq!(*values, [0, 0, 0]) /// ``` /// /// [zeroed]: mem::MaybeUninit::zeroed #[cfg(not(no_global_oom_handling))] #[inline] #[unstable(feature = "new_uninit", issue = "63291")] #[must_use] pub fn new_zeroed_slice(len: usize) -> Arc<[mem::MaybeUninit]> { unsafe { Arc::from_ptr(Arc::allocate_for_layout( Layout::array::(len).unwrap(), |layout| Global.allocate_zeroed(layout), |mem| { ptr::slice_from_raw_parts_mut(mem as *mut T, len) as *mut ArcInner<[mem::MaybeUninit]> }, )) } } } impl Arc<[T], A> { /// Constructs a new atomically reference-counted slice with uninitialized contents in the /// provided allocator. /// /// # Examples /// /// ``` /// #![feature(new_uninit)] /// #![feature(get_mut_unchecked)] /// #![feature(allocator_api)] /// /// use std::sync::Arc; /// use std::alloc::System; /// /// let mut values = Arc::<[u32], _>::new_uninit_slice_in(3, System); /// /// let values = unsafe { /// // Deferred initialization: /// Arc::get_mut_unchecked(&mut values)[0].as_mut_ptr().write(1); /// Arc::get_mut_unchecked(&mut values)[1].as_mut_ptr().write(2); /// Arc::get_mut_unchecked(&mut values)[2].as_mut_ptr().write(3); /// /// values.assume_init() /// }; /// /// assert_eq!(*values, [1, 2, 3]) /// ``` #[cfg(not(no_global_oom_handling))] #[unstable(feature = "new_uninit", issue = "63291")] #[inline] pub fn new_uninit_slice_in(len: usize, alloc: A) -> Arc<[mem::MaybeUninit], A> { unsafe { Arc::from_ptr_in(Arc::allocate_for_slice_in(len, &alloc), alloc) } } /// Constructs a new atomically reference-counted slice with uninitialized contents, with the memory being /// filled with `0` bytes, in the provided allocator. /// /// See [`MaybeUninit::zeroed`][zeroed] for examples of correct and /// incorrect usage of this method. /// /// # Examples /// /// ``` /// #![feature(new_uninit)] /// #![feature(allocator_api)] /// /// use std::sync::Arc; /// use std::alloc::System; /// /// let values = Arc::<[u32], _>::new_zeroed_slice_in(3, System); /// let values = unsafe { values.assume_init() }; /// /// assert_eq!(*values, [0, 0, 0]) /// ``` /// /// [zeroed]: mem::MaybeUninit::zeroed #[cfg(not(no_global_oom_handling))] #[unstable(feature = "new_uninit", issue = "63291")] #[inline] pub fn new_zeroed_slice_in(len: usize, alloc: A) -> Arc<[mem::MaybeUninit], A> { unsafe { Arc::from_ptr_in( Arc::allocate_for_layout( Layout::array::(len).unwrap(), |layout| alloc.allocate_zeroed(layout), |mem| { ptr::slice_from_raw_parts_mut(mem.cast::(), len) as *mut ArcInner<[mem::MaybeUninit]> }, ), alloc, ) } } } impl Arc, A> { /// Converts to `Arc`. /// /// # Safety /// /// As with [`MaybeUninit::assume_init`], /// it is up to the caller to guarantee that the inner value /// really is in an initialized state. /// Calling this when the content is not yet fully initialized /// causes immediate undefined behavior. /// /// [`MaybeUninit::assume_init`]: mem::MaybeUninit::assume_init /// /// # Examples /// /// ``` /// #![feature(new_uninit)] /// #![feature(get_mut_unchecked)] /// /// use std::sync::Arc; /// /// let mut five = Arc::::new_uninit(); /// /// // Deferred initialization: /// Arc::get_mut(&mut five).unwrap().write(5); /// /// let five = unsafe { five.assume_init() }; /// /// assert_eq!(*five, 5) /// ``` #[unstable(feature = "new_uninit", issue = "63291")] #[must_use = "`self` will be dropped if the result is not used"] #[inline] pub unsafe fn assume_init(self) -> Arc where A: Clone, { let md_self = mem::ManuallyDrop::new(self); unsafe { Arc::from_inner_in(md_self.ptr.cast(), md_self.alloc.clone()) } } } impl Arc<[mem::MaybeUninit], A> { /// Converts to `Arc<[T]>`. /// /// # Safety /// /// As with [`MaybeUninit::assume_init`], /// it is up to the caller to guarantee that the inner value /// really is in an initialized state. /// Calling this when the content is not yet fully initialized /// causes immediate undefined behavior. /// /// [`MaybeUninit::assume_init`]: mem::MaybeUninit::assume_init /// /// # Examples /// /// ``` /// #![feature(new_uninit)] /// #![feature(get_mut_unchecked)] /// /// use std::sync::Arc; /// /// let mut values = Arc::<[u32]>::new_uninit_slice(3); /// /// // Deferred initialization: /// let data = Arc::get_mut(&mut values).unwrap(); /// data[0].write(1); /// data[1].write(2); /// data[2].write(3); /// /// let values = unsafe { values.assume_init() }; /// /// assert_eq!(*values, [1, 2, 3]) /// ``` #[unstable(feature = "new_uninit", issue = "63291")] #[must_use = "`self` will be dropped if the result is not used"] #[inline] pub unsafe fn assume_init(self) -> Arc<[T], A> where A: Clone, { let md_self = mem::ManuallyDrop::new(self); unsafe { Arc::from_ptr_in(md_self.ptr.as_ptr() as _, md_self.alloc.clone()) } } } impl Arc { /// Constructs an `Arc` from a raw pointer. /// /// The raw pointer must have been previously returned by a call to /// [`Arc::into_raw`][into_raw] where `U` must have the same size and /// alignment as `T`. This is trivially true if `U` is `T`. /// Note that if `U` is not `T` but has the same size and alignment, this is /// basically like transmuting references of different types. See /// [`mem::transmute`][transmute] for more information on what /// restrictions apply in this case. /// /// The user of `from_raw` has to make sure a specific value of `T` is only /// dropped once. /// /// This function is unsafe because improper use may lead to memory unsafety, /// even if the returned `Arc` is never accessed. /// /// [into_raw]: Arc::into_raw /// [transmute]: core::mem::transmute /// /// # Examples /// /// ``` /// use std::sync::Arc; /// /// let x = Arc::new("hello".to_owned()); /// let x_ptr = Arc::into_raw(x); /// /// unsafe { /// // Convert back to an `Arc` to prevent leak. /// let x = Arc::from_raw(x_ptr); /// assert_eq!(&*x, "hello"); /// /// // Further calls to `Arc::from_raw(x_ptr)` would be memory-unsafe. /// } /// /// // The memory was freed when `x` went out of scope above, so `x_ptr` is now dangling! /// ``` #[inline] #[stable(feature = "rc_raw", since = "1.17.0")] pub unsafe fn from_raw(ptr: *const T) -> Self { unsafe { Arc::from_raw_in(ptr, Global) } } /// Increments the strong reference count on the `Arc` associated with the /// provided pointer by one. /// /// # Safety /// /// The pointer must have been obtained through `Arc::into_raw`, and the /// associated `Arc` instance must be valid (i.e. the strong count must be at /// least 1) for the duration of this method. /// /// # Examples /// /// ``` /// use std::sync::Arc; /// /// let five = Arc::new(5); /// /// unsafe { /// let ptr = Arc::into_raw(five); /// Arc::increment_strong_count(ptr); /// /// // This assertion is deterministic because we haven't shared /// // the `Arc` between threads. /// let five = Arc::from_raw(ptr); /// assert_eq!(2, Arc::strong_count(&five)); /// } /// ``` #[inline] #[stable(feature = "arc_mutate_strong_count", since = "1.51.0")] pub unsafe fn increment_strong_count(ptr: *const T) { unsafe { Arc::increment_strong_count_in(ptr, Global) } } /// Decrements the strong reference count on the `Arc` associated with the /// provided pointer by one. /// /// # Safety /// /// The pointer must have been obtained through `Arc::into_raw`, and the /// associated `Arc` instance must be valid (i.e. the strong count must be at /// least 1) when invoking this method. This method can be used to release the final /// `Arc` and backing storage, but **should not** be called after the final `Arc` has been /// released. /// /// # Examples /// /// ``` /// use std::sync::Arc; /// /// let five = Arc::new(5); /// /// unsafe { /// let ptr = Arc::into_raw(five); /// Arc::increment_strong_count(ptr); /// /// // Those assertions are deterministic because we haven't shared /// // the `Arc` between threads. /// let five = Arc::from_raw(ptr); /// assert_eq!(2, Arc::strong_count(&five)); /// Arc::decrement_strong_count(ptr); /// assert_eq!(1, Arc::strong_count(&five)); /// } /// ``` #[inline] #[stable(feature = "arc_mutate_strong_count", since = "1.51.0")] pub unsafe fn decrement_strong_count(ptr: *const T) { unsafe { Arc::decrement_strong_count_in(ptr, Global) } } } impl Arc { /// Consumes the `Arc`, returning the wrapped pointer. /// /// To avoid a memory leak the pointer must be converted back to an `Arc` using /// [`Arc::from_raw`]. /// /// # Examples /// /// ``` /// use std::sync::Arc; /// /// let x = Arc::new("hello".to_owned()); /// let x_ptr = Arc::into_raw(x); /// assert_eq!(unsafe { &*x_ptr }, "hello"); /// ``` #[must_use = "losing the pointer will leak memory"] #[stable(feature = "rc_raw", since = "1.17.0")] #[cfg_attr(not(bootstrap), rustc_never_returns_null_ptr)] pub fn into_raw(this: Self) -> *const T { let ptr = Self::as_ptr(&this); mem::forget(this); ptr } /// Provides a raw pointer to the data. /// /// The counts are not affected in any way and the `Arc` is not consumed. The pointer is valid for /// as long as there are strong counts in the `Arc`. /// /// # Examples /// /// ``` /// use std::sync::Arc; /// /// let x = Arc::new("hello".to_owned()); /// let y = Arc::clone(&x); /// let x_ptr = Arc::as_ptr(&x); /// assert_eq!(x_ptr, Arc::as_ptr(&y)); /// assert_eq!(unsafe { &*x_ptr }, "hello"); /// ``` #[must_use] #[stable(feature = "rc_as_ptr", since = "1.45.0")] #[cfg_attr(not(bootstrap), rustc_never_returns_null_ptr)] pub fn as_ptr(this: &Self) -> *const T { let ptr: *mut ArcInner = NonNull::as_ptr(this.ptr); // SAFETY: This cannot go through Deref::deref or RcBoxPtr::inner because // this is required to retain raw/mut provenance such that e.g. `get_mut` can // write through the pointer after the Rc is recovered through `from_raw`. unsafe { ptr::addr_of_mut!((*ptr).data) } } /// Constructs an `Arc` from a raw pointer. /// /// The raw pointer must have been previously returned by a call to /// [`Arc::into_raw`][into_raw] where `U` must have the same size and /// alignment as `T`. This is trivially true if `U` is `T`. /// Note that if `U` is not `T` but has the same size and alignment, this is /// basically like transmuting references of different types. See /// [`mem::transmute`] for more information on what /// restrictions apply in this case. /// /// The raw pointer must point to a block of memory allocated by `alloc` /// /// The user of `from_raw` has to make sure a specific value of `T` is only /// dropped once. /// /// This function is unsafe because improper use may lead to memory unsafety, /// even if the returned `Arc` is never accessed. /// /// [into_raw]: Arc::into_raw /// /// # Examples /// /// ``` /// #![feature(allocator_api)] /// /// use std::sync::Arc; /// use std::alloc::System; /// /// let x = Arc::new_in("hello".to_owned(), System); /// let x_ptr = Arc::into_raw(x); /// /// unsafe { /// // Convert back to an `Arc` to prevent leak. /// let x = Arc::from_raw_in(x_ptr, System); /// assert_eq!(&*x, "hello"); /// /// // Further calls to `Arc::from_raw(x_ptr)` would be memory-unsafe. /// } /// /// // The memory was freed when `x` went out of scope above, so `x_ptr` is now dangling! /// ``` #[inline] #[unstable(feature = "allocator_api", issue = "32838")] pub unsafe fn from_raw_in(ptr: *const T, alloc: A) -> Self { unsafe { let offset = data_offset(ptr); // Reverse the offset to find the original ArcInner. let arc_ptr = ptr.byte_sub(offset) as *mut ArcInner; Self::from_ptr_in(arc_ptr, alloc) } } /// Creates a new [`Weak`] pointer to this allocation. /// /// # Examples /// /// ``` /// use std::sync::Arc; /// /// let five = Arc::new(5); /// /// let weak_five = Arc::downgrade(&five); /// ``` #[must_use = "this returns a new `Weak` pointer, \ without modifying the original `Arc`"] #[stable(feature = "arc_weak", since = "1.4.0")] pub fn downgrade(this: &Self) -> Weak where A: Clone, { // This Relaxed is OK because we're checking the value in the CAS // below. let mut cur = this.inner().weak.load(Relaxed); loop { // check if the weak counter is currently "locked"; if so, spin. if cur == usize::MAX { hint::spin_loop(); cur = this.inner().weak.load(Relaxed); continue; } // We can't allow the refcount to increase much past `MAX_REFCOUNT`. assert!(cur <= MAX_REFCOUNT, "{}", INTERNAL_OVERFLOW_ERROR); // NOTE: this code currently ignores the possibility of overflow // into usize::MAX; in general both Rc and Arc need to be adjusted // to deal with overflow. // Unlike with Clone(), we need this to be an Acquire read to // synchronize with the write coming from `is_unique`, so that the // events prior to that write happen before this read. match this.inner().weak.compare_exchange_weak(cur, cur + 1, Acquire, Relaxed) { Ok(_) => { // Make sure we do not create a dangling Weak debug_assert!(!is_dangling(this.ptr.as_ptr())); return Weak { ptr: this.ptr, alloc: this.alloc.clone() }; } Err(old) => cur = old, } } } /// Gets the number of [`Weak`] pointers to this allocation. /// /// # Safety /// /// This method by itself is safe, but using it correctly requires extra care. /// Another thread can change the weak count at any time, /// including potentially between calling this method and acting on the result. /// /// # Examples /// /// ``` /// use std::sync::Arc; /// /// let five = Arc::new(5); /// let _weak_five = Arc::downgrade(&five); /// /// // This assertion is deterministic because we haven't shared /// // the `Arc` or `Weak` between threads. /// assert_eq!(1, Arc::weak_count(&five)); /// ``` #[inline] #[must_use] #[stable(feature = "arc_counts", since = "1.15.0")] pub fn weak_count(this: &Self) -> usize { let cnt = this.inner().weak.load(Relaxed); // If the weak count is currently locked, the value of the // count was 0 just before taking the lock. if cnt == usize::MAX { 0 } else { cnt - 1 } } /// Gets the number of strong (`Arc`) pointers to this allocation. /// /// # Safety /// /// This method by itself is safe, but using it correctly requires extra care. /// Another thread can change the strong count at any time, /// including potentially between calling this method and acting on the result. /// /// # Examples /// /// ``` /// use std::sync::Arc; /// /// let five = Arc::new(5); /// let _also_five = Arc::clone(&five); /// /// // This assertion is deterministic because we haven't shared /// // the `Arc` between threads. /// assert_eq!(2, Arc::strong_count(&five)); /// ``` #[inline] #[must_use] #[stable(feature = "arc_counts", since = "1.15.0")] pub fn strong_count(this: &Self) -> usize { this.inner().strong.load(Relaxed) } /// Increments the strong reference count on the `Arc` associated with the /// provided pointer by one. /// /// # Safety /// /// The pointer must have been obtained through `Arc::into_raw`, and the /// associated `Arc` instance must be valid (i.e. the strong count must be at /// least 1) for the duration of this method,, and `ptr` must point to a block of memory /// allocated by `alloc`. /// /// # Examples /// /// ``` /// #![feature(allocator_api)] /// /// use std::sync::Arc; /// use std::alloc::System; /// /// let five = Arc::new_in(5, System); /// /// unsafe { /// let ptr = Arc::into_raw(five); /// Arc::increment_strong_count_in(ptr, System); /// /// // This assertion is deterministic because we haven't shared /// // the `Arc` between threads. /// let five = Arc::from_raw_in(ptr, System); /// assert_eq!(2, Arc::strong_count(&five)); /// } /// ``` #[inline] #[unstable(feature = "allocator_api", issue = "32838")] pub unsafe fn increment_strong_count_in(ptr: *const T, alloc: A) where A: Clone, { // Retain Arc, but don't touch refcount by wrapping in ManuallyDrop let arc = unsafe { mem::ManuallyDrop::new(Arc::from_raw_in(ptr, alloc)) }; // Now increase refcount, but don't drop new refcount either let _arc_clone: mem::ManuallyDrop<_> = arc.clone(); } /// Decrements the strong reference count on the `Arc` associated with the /// provided pointer by one. /// /// # Safety /// /// The pointer must have been obtained through `Arc::into_raw`, the /// associated `Arc` instance must be valid (i.e. the strong count must be at /// least 1) when invoking this method, and `ptr` must point to a block of memory /// allocated by `alloc`. This method can be used to release the final /// `Arc` and backing storage, but **should not** be called after the final `Arc` has been /// released. /// /// # Examples /// /// ``` /// #![feature(allocator_api)] /// /// use std::sync::Arc; /// use std::alloc::System; /// /// let five = Arc::new_in(5, System); /// /// unsafe { /// let ptr = Arc::into_raw(five); /// Arc::increment_strong_count_in(ptr, System); /// /// // Those assertions are deterministic because we haven't shared /// // the `Arc` between threads. /// let five = Arc::from_raw_in(ptr, System); /// assert_eq!(2, Arc::strong_count(&five)); /// Arc::decrement_strong_count_in(ptr, System); /// assert_eq!(1, Arc::strong_count(&five)); /// } /// ``` #[inline] #[unstable(feature = "allocator_api", issue = "32838")] pub unsafe fn decrement_strong_count_in(ptr: *const T, alloc: A) { unsafe { drop(Arc::from_raw_in(ptr, alloc)) }; } #[inline] fn inner(&self) -> &ArcInner { // This unsafety is ok because while this arc is alive we're guaranteed // that the inner pointer is valid. Furthermore, we know that the // `ArcInner` structure itself is `Sync` because the inner data is // `Sync` as well, so we're ok loaning out an immutable pointer to these // contents. unsafe { self.ptr.as_ref() } } // Non-inlined part of `drop`. #[inline(never)] unsafe fn drop_slow(&mut self) { // Destroy the data at this time, even though we must not free the box // allocation itself (there might still be weak pointers lying around). unsafe { ptr::drop_in_place(Self::get_mut_unchecked(self)) }; // Drop the weak ref collectively held by all strong references // Take a reference to `self.alloc` instead of cloning because 1. it'll // last long enough, and 2. you should be able to drop `Arc`s with // unclonable allocators drop(Weak { ptr: self.ptr, alloc: &self.alloc }); } /// Returns `true` if the two `Arc`s point to the same allocation in a vein similar to /// [`ptr::eq`]. This function ignores the metadata of `dyn Trait` pointers. /// /// # Examples /// /// ``` /// use std::sync::Arc; /// /// let five = Arc::new(5); /// let same_five = Arc::clone(&five); /// let other_five = Arc::new(5); /// /// assert!(Arc::ptr_eq(&five, &same_five)); /// assert!(!Arc::ptr_eq(&five, &other_five)); /// ``` /// /// [`ptr::eq`]: core::ptr::eq "ptr::eq" #[inline] #[must_use] #[stable(feature = "ptr_eq", since = "1.17.0")] pub fn ptr_eq(this: &Self, other: &Self) -> bool { this.ptr.as_ptr() as *const () == other.ptr.as_ptr() as *const () } } impl Arc { /// Allocates an `ArcInner` with sufficient space for /// a possibly-unsized inner value where the value has the layout provided. /// /// The function `mem_to_arcinner` is called with the data pointer /// and must return back a (potentially fat)-pointer for the `ArcInner`. #[cfg(not(no_global_oom_handling))] unsafe fn allocate_for_layout( value_layout: Layout, allocate: impl FnOnce(Layout) -> Result, AllocError>, mem_to_arcinner: impl FnOnce(*mut u8) -> *mut ArcInner, ) -> *mut ArcInner { let layout = arcinner_layout_for_value_layout(value_layout); let ptr = allocate(layout).unwrap_or_else(|_| handle_alloc_error(layout)); unsafe { Self::initialize_arcinner(ptr, layout, mem_to_arcinner) } } /// Allocates an `ArcInner` with sufficient space for /// a possibly-unsized inner value where the value has the layout provided, /// returning an error if allocation fails. /// /// The function `mem_to_arcinner` is called with the data pointer /// and must return back a (potentially fat)-pointer for the `ArcInner`. unsafe fn try_allocate_for_layout( value_layout: Layout, allocate: impl FnOnce(Layout) -> Result, AllocError>, mem_to_arcinner: impl FnOnce(*mut u8) -> *mut ArcInner, ) -> Result<*mut ArcInner, AllocError> { let layout = arcinner_layout_for_value_layout(value_layout); let ptr = allocate(layout)?; let inner = unsafe { Self::initialize_arcinner(ptr, layout, mem_to_arcinner) }; Ok(inner) } unsafe fn initialize_arcinner( ptr: NonNull<[u8]>, layout: Layout, mem_to_arcinner: impl FnOnce(*mut u8) -> *mut ArcInner, ) -> *mut ArcInner { let inner = mem_to_arcinner(ptr.as_non_null_ptr().as_ptr()); debug_assert_eq!(unsafe { Layout::for_value(&*inner) }, layout); unsafe { ptr::write(&mut (*inner).strong, atomic::AtomicUsize::new(1)); ptr::write(&mut (*inner).weak, atomic::AtomicUsize::new(1)); } inner } } impl Arc { /// Allocates an `ArcInner` with sufficient space for an unsized inner value. #[inline] #[cfg(not(no_global_oom_handling))] unsafe fn allocate_for_ptr_in(ptr: *const T, alloc: &A) -> *mut ArcInner { // Allocate for the `ArcInner` using the given value. unsafe { Arc::allocate_for_layout( Layout::for_value(&*ptr), |layout| alloc.allocate(layout), |mem| mem.with_metadata_of(ptr as *const ArcInner), ) } } #[cfg(not(no_global_oom_handling))] fn from_box_in(src: Box) -> Arc { unsafe { let value_size = size_of_val(&*src); let ptr = Self::allocate_for_ptr_in(&*src, Box::allocator(&src)); // Copy value as bytes ptr::copy_nonoverlapping( &*src as *const T as *const u8, &mut (*ptr).data as *mut _ as *mut u8, value_size, ); // Free the allocation without dropping its contents let (bptr, alloc) = Box::into_raw_with_allocator(src); let src = Box::from_raw(bptr as *mut mem::ManuallyDrop); drop(src); Self::from_ptr_in(ptr, alloc) } } } impl Arc<[T]> { /// Allocates an `ArcInner<[T]>` with the given length. #[cfg(not(no_global_oom_handling))] unsafe fn allocate_for_slice(len: usize) -> *mut ArcInner<[T]> { unsafe { Self::allocate_for_layout( Layout::array::(len).unwrap(), |layout| Global.allocate(layout), |mem| ptr::slice_from_raw_parts_mut(mem.cast::(), len) as *mut ArcInner<[T]>, ) } } /// Copy elements from slice into newly allocated `Arc<[T]>` /// /// Unsafe because the caller must either take ownership or bind `T: Copy`. #[cfg(not(no_global_oom_handling))] unsafe fn copy_from_slice(v: &[T]) -> Arc<[T]> { unsafe { let ptr = Self::allocate_for_slice(v.len()); ptr::copy_nonoverlapping(v.as_ptr(), &mut (*ptr).data as *mut [T] as *mut T, v.len()); Self::from_ptr(ptr) } } /// Constructs an `Arc<[T]>` from an iterator known to be of a certain size. /// /// Behavior is undefined should the size be wrong. #[cfg(not(no_global_oom_handling))] unsafe fn from_iter_exact(iter: impl Iterator, len: usize) -> Arc<[T]> { // Panic guard while cloning T elements. // In the event of a panic, elements that have been written // into the new ArcInner will be dropped, then the memory freed. struct Guard { mem: NonNull, elems: *mut T, layout: Layout, n_elems: usize, } impl Drop for Guard { fn drop(&mut self) { unsafe { let slice = from_raw_parts_mut(self.elems, self.n_elems); ptr::drop_in_place(slice); Global.deallocate(self.mem, self.layout); } } } unsafe { let ptr = Self::allocate_for_slice(len); let mem = ptr as *mut _ as *mut u8; let layout = Layout::for_value(&*ptr); // Pointer to first element let elems = &mut (*ptr).data as *mut [T] as *mut T; let mut guard = Guard { mem: NonNull::new_unchecked(mem), elems, layout, n_elems: 0 }; for (i, item) in iter.enumerate() { ptr::write(elems.add(i), item); guard.n_elems += 1; } // All clear. Forget the guard so it doesn't free the new ArcInner. mem::forget(guard); Self::from_ptr(ptr) } } } impl Arc<[T], A> { /// Allocates an `ArcInner<[T]>` with the given length. #[inline] #[cfg(not(no_global_oom_handling))] unsafe fn allocate_for_slice_in(len: usize, alloc: &A) -> *mut ArcInner<[T]> { unsafe { Arc::allocate_for_layout( Layout::array::(len).unwrap(), |layout| alloc.allocate(layout), |mem| ptr::slice_from_raw_parts_mut(mem.cast::(), len) as *mut ArcInner<[T]>, ) } } } /// Specialization trait used for `From<&[T]>`. #[cfg(not(no_global_oom_handling))] trait ArcFromSlice { fn from_slice(slice: &[T]) -> Self; } #[cfg(not(no_global_oom_handling))] impl ArcFromSlice for Arc<[T]> { #[inline] default fn from_slice(v: &[T]) -> Self { unsafe { Self::from_iter_exact(v.iter().cloned(), v.len()) } } } #[cfg(not(no_global_oom_handling))] impl ArcFromSlice for Arc<[T]> { #[inline] fn from_slice(v: &[T]) -> Self { unsafe { Arc::copy_from_slice(v) } } } #[stable(feature = "rust1", since = "1.0.0")] impl Clone for Arc { /// Makes a clone of the `Arc` pointer. /// /// This creates another pointer to the same allocation, increasing the /// strong reference count. /// /// # Examples /// /// ``` /// use std::sync::Arc; /// /// let five = Arc::new(5); /// /// let _ = Arc::clone(&five); /// ``` #[inline] fn clone(&self) -> Arc { // Using a relaxed ordering is alright here, as knowledge of the // original reference prevents other threads from erroneously deleting // the object. // // As explained in the [Boost documentation][1], Increasing the // reference counter can always be done with memory_order_relaxed: New // references to an object can only be formed from an existing // reference, and passing an existing reference from one thread to // another must already provide any required synchronization. // // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html) let old_size = self.inner().strong.fetch_add(1, Relaxed); // However we need to guard against massive refcounts in case someone is `mem::forget`ing // Arcs. If we don't do this the count can overflow and users will use-after free. This // branch will never be taken in any realistic program. We abort because such a program is // incredibly degenerate, and we don't care to support it. // // This check is not 100% water-proof: we error when the refcount grows beyond `isize::MAX`. // But we do that check *after* having done the increment, so there is a chance here that // the worst already happened and we actually do overflow the `usize` counter. However, that // requires the counter to grow from `isize::MAX` to `usize::MAX` between the increment // above and the `abort` below, which seems exceedingly unlikely. // // This is a global invariant, and also applies when using a compare-exchange loop to increment // counters in other methods. // Otherwise, the counter could be brought to an almost-overflow using a compare-exchange loop, // and then overflow using a few `fetch_add`s. if old_size > MAX_REFCOUNT { abort(); } unsafe { Self::from_inner_in(self.ptr, self.alloc.clone()) } } } #[stable(feature = "rust1", since = "1.0.0")] impl Deref for Arc { type Target = T; #[inline] fn deref(&self) -> &T { &self.inner().data } } #[unstable(feature = "receiver_trait", issue = "none")] impl Receiver for Arc {} impl Arc { /// Makes a mutable reference into the given `Arc`. /// /// If there are other `Arc` pointers to the same allocation, then `make_mut` will /// [`clone`] the inner value to a new allocation to ensure unique ownership. This is also /// referred to as clone-on-write. /// /// However, if there are no other `Arc` pointers to this allocation, but some [`Weak`] /// pointers, then the [`Weak`] pointers will be dissociated and the inner value will not /// be cloned. /// /// See also [`get_mut`], which will fail rather than cloning the inner value /// or dissociating [`Weak`] pointers. /// /// [`clone`]: Clone::clone /// [`get_mut`]: Arc::get_mut /// /// # Examples /// /// ``` /// use std::sync::Arc; /// /// let mut data = Arc::new(5); /// /// *Arc::make_mut(&mut data) += 1; // Won't clone anything /// let mut other_data = Arc::clone(&data); // Won't clone inner data /// *Arc::make_mut(&mut data) += 1; // Clones inner data /// *Arc::make_mut(&mut data) += 1; // Won't clone anything /// *Arc::make_mut(&mut other_data) *= 2; // Won't clone anything /// /// // Now `data` and `other_data` point to different allocations. /// assert_eq!(*data, 8); /// assert_eq!(*other_data, 12); /// ``` /// /// [`Weak`] pointers will be dissociated: /// /// ``` /// use std::sync::Arc; /// /// let mut data = Arc::new(75); /// let weak = Arc::downgrade(&data); /// /// assert!(75 == *data); /// assert!(75 == *weak.upgrade().unwrap()); /// /// *Arc::make_mut(&mut data) += 1; /// /// assert!(76 == *data); /// assert!(weak.upgrade().is_none()); /// ``` #[cfg(not(no_global_oom_handling))] #[inline] #[stable(feature = "arc_unique", since = "1.4.0")] pub fn make_mut(this: &mut Self) -> &mut T { // Note that we hold both a strong reference and a weak reference. // Thus, releasing our strong reference only will not, by itself, cause // the memory to be deallocated. // // Use Acquire to ensure that we see any writes to `weak` that happen // before release writes (i.e., decrements) to `strong`. Since we hold a // weak count, there's no chance the ArcInner itself could be // deallocated. if this.inner().strong.compare_exchange(1, 0, Acquire, Relaxed).is_err() { // Another strong pointer exists, so we must clone. // Pre-allocate memory to allow writing the cloned value directly. let mut arc = Self::new_uninit_in(this.alloc.clone()); unsafe { let data = Arc::get_mut_unchecked(&mut arc); (**this).write_clone_into_raw(data.as_mut_ptr()); *this = arc.assume_init(); } } else if this.inner().weak.load(Relaxed) != 1 { // Relaxed suffices in the above because this is fundamentally an // optimization: we are always racing with weak pointers being // dropped. Worst case, we end up allocated a new Arc unnecessarily. // We removed the last strong ref, but there are additional weak // refs remaining. We'll move the contents to a new Arc, and // invalidate the other weak refs. // Note that it is not possible for the read of `weak` to yield // usize::MAX (i.e., locked), since the weak count can only be // locked by a thread with a strong reference. // Materialize our own implicit weak pointer, so that it can clean // up the ArcInner as needed. let _weak = Weak { ptr: this.ptr, alloc: this.alloc.clone() }; // Can just steal the data, all that's left is Weaks let mut arc = Self::new_uninit_in(this.alloc.clone()); unsafe { let data = Arc::get_mut_unchecked(&mut arc); data.as_mut_ptr().copy_from_nonoverlapping(&**this, 1); ptr::write(this, arc.assume_init()); } } else { // We were the sole reference of either kind; bump back up the // strong ref count. this.inner().strong.store(1, Release); } // As with `get_mut()`, the unsafety is ok because our reference was // either unique to begin with, or became one upon cloning the contents. unsafe { Self::get_mut_unchecked(this) } } /// If we have the only reference to `T` then unwrap it. Otherwise, clone `T` and return the /// clone. /// /// Assuming `arc_t` is of type `Arc`, this function is functionally equivalent to /// `(*arc_t).clone()`, but will avoid cloning the inner value where possible. /// /// # Examples /// /// ``` /// #![feature(arc_unwrap_or_clone)] /// # use std::{ptr, sync::Arc}; /// let inner = String::from("test"); /// let ptr = inner.as_ptr(); /// /// let arc = Arc::new(inner); /// let inner = Arc::unwrap_or_clone(arc); /// // The inner value was not cloned /// assert!(ptr::eq(ptr, inner.as_ptr())); /// /// let arc = Arc::new(inner); /// let arc2 = arc.clone(); /// let inner = Arc::unwrap_or_clone(arc); /// // Because there were 2 references, we had to clone the inner value. /// assert!(!ptr::eq(ptr, inner.as_ptr())); /// // `arc2` is the last reference, so when we unwrap it we get back /// // the original `String`. /// let inner = Arc::unwrap_or_clone(arc2); /// assert!(ptr::eq(ptr, inner.as_ptr())); /// ``` #[inline] #[unstable(feature = "arc_unwrap_or_clone", issue = "93610")] pub fn unwrap_or_clone(this: Self) -> T { Arc::try_unwrap(this).unwrap_or_else(|arc| (*arc).clone()) } } impl Arc { /// Returns a mutable reference into the given `Arc`, if there are /// no other `Arc` or [`Weak`] pointers to the same allocation. /// /// Returns [`None`] otherwise, because it is not safe to /// mutate a shared value. /// /// See also [`make_mut`][make_mut], which will [`clone`][clone] /// the inner value when there are other `Arc` pointers. /// /// [make_mut]: Arc::make_mut /// [clone]: Clone::clone /// /// # Examples /// /// ``` /// use std::sync::Arc; /// /// let mut x = Arc::new(3); /// *Arc::get_mut(&mut x).unwrap() = 4; /// assert_eq!(*x, 4); /// /// let _y = Arc::clone(&x); /// assert!(Arc::get_mut(&mut x).is_none()); /// ``` #[inline] #[stable(feature = "arc_unique", since = "1.4.0")] pub fn get_mut(this: &mut Self) -> Option<&mut T> { if this.is_unique() { // This unsafety is ok because we're guaranteed that the pointer // returned is the *only* pointer that will ever be returned to T. Our // reference count is guaranteed to be 1 at this point, and we required // the Arc itself to be `mut`, so we're returning the only possible // reference to the inner data. unsafe { Some(Arc::get_mut_unchecked(this)) } } else { None } } /// Returns a mutable reference into the given `Arc`, /// without any check. /// /// See also [`get_mut`], which is safe and does appropriate checks. /// /// [`get_mut`]: Arc::get_mut /// /// # Safety /// /// If any other `Arc` or [`Weak`] pointers to the same allocation exist, then /// they must not be dereferenced or have active borrows for the duration /// of the returned borrow, and their inner type must be exactly the same as the /// inner type of this Rc (including lifetimes). This is trivially the case if no /// such pointers exist, for example immediately after `Arc::new`. /// /// # Examples /// /// ``` /// #![feature(get_mut_unchecked)] /// /// use std::sync::Arc; /// /// let mut x = Arc::new(String::new()); /// unsafe { /// Arc::get_mut_unchecked(&mut x).push_str("foo") /// } /// assert_eq!(*x, "foo"); /// ``` /// Other `Arc` pointers to the same allocation must be to the same type. /// ```no_run /// #![feature(get_mut_unchecked)] /// /// use std::sync::Arc; /// /// let x: Arc = Arc::from("Hello, world!"); /// let mut y: Arc<[u8]> = x.clone().into(); /// unsafe { /// // this is Undefined Behavior, because x's inner type is str, not [u8] /// Arc::get_mut_unchecked(&mut y).fill(0xff); // 0xff is invalid in UTF-8 /// } /// println!("{}", &*x); // Invalid UTF-8 in a str /// ``` /// Other `Arc` pointers to the same allocation must be to the exact same type, including lifetimes. /// ```no_run /// #![feature(get_mut_unchecked)] /// /// use std::sync::Arc; /// /// let x: Arc<&str> = Arc::new("Hello, world!"); /// { /// let s = String::from("Oh, no!"); /// let mut y: Arc<&str> = x.clone().into(); /// unsafe { /// // this is Undefined Behavior, because x's inner type /// // is &'long str, not &'short str /// *Arc::get_mut_unchecked(&mut y) = &s; /// } /// } /// println!("{}", &*x); // Use-after-free /// ``` #[inline] #[unstable(feature = "get_mut_unchecked", issue = "63292")] pub unsafe fn get_mut_unchecked(this: &mut Self) -> &mut T { // We are careful to *not* create a reference covering the "count" fields, as // this would alias with concurrent access to the reference counts (e.g. by `Weak`). unsafe { &mut (*this.ptr.as_ptr()).data } } /// Determine whether this is the unique reference (including weak refs) to /// the underlying data. /// /// Note that this requires locking the weak ref count. fn is_unique(&mut self) -> bool { // lock the weak pointer count if we appear to be the sole weak pointer // holder. // // The acquire label here ensures a happens-before relationship with any // writes to `strong` (in particular in `Weak::upgrade`) prior to decrements // of the `weak` count (via `Weak::drop`, which uses release). If the upgraded // weak ref was never dropped, the CAS here will fail so we do not care to synchronize. if self.inner().weak.compare_exchange(1, usize::MAX, Acquire, Relaxed).is_ok() { // This needs to be an `Acquire` to synchronize with the decrement of the `strong` // counter in `drop` -- the only access that happens when any but the last reference // is being dropped. let unique = self.inner().strong.load(Acquire) == 1; // The release write here synchronizes with a read in `downgrade`, // effectively preventing the above read of `strong` from happening // after the write. self.inner().weak.store(1, Release); // release the lock unique } else { false } } } #[stable(feature = "rust1", since = "1.0.0")] unsafe impl<#[may_dangle] T: ?Sized, A: Allocator> Drop for Arc { /// Drops the `Arc`. /// /// This will decrement the strong reference count. If the strong reference /// count reaches zero then the only other references (if any) are /// [`Weak`], so we `drop` the inner value. /// /// # Examples /// /// ``` /// use std::sync::Arc; /// /// struct Foo; /// /// impl Drop for Foo { /// fn drop(&mut self) { /// println!("dropped!"); /// } /// } /// /// let foo = Arc::new(Foo); /// let foo2 = Arc::clone(&foo); /// /// drop(foo); // Doesn't print anything /// drop(foo2); // Prints "dropped!" /// ``` #[inline] fn drop(&mut self) { // Because `fetch_sub` is already atomic, we do not need to synchronize // with other threads unless we are going to delete the object. This // same logic applies to the below `fetch_sub` to the `weak` count. if self.inner().strong.fetch_sub(1, Release) != 1 { return; } // This fence is needed to prevent reordering of use of the data and // deletion of the data. Because it is marked `Release`, the decreasing // of the reference count synchronizes with this `Acquire` fence. This // means that use of the data happens before decreasing the reference // count, which happens before this fence, which happens before the // deletion of the data. // // As explained in the [Boost documentation][1], // // > It is important to enforce any possible access to the object in one // > thread (through an existing reference) to *happen before* deleting // > the object in a different thread. This is achieved by a "release" // > operation after dropping a reference (any access to the object // > through this reference must obviously happened before), and an // > "acquire" operation before deleting the object. // // In particular, while the contents of an Arc are usually immutable, it's // possible to have interior writes to something like a Mutex. Since a // Mutex is not acquired when it is deleted, we can't rely on its // synchronization logic to make writes in thread A visible to a destructor // running in thread B. // // Also note that the Acquire fence here could probably be replaced with an // Acquire load, which could improve performance in highly-contended // situations. See [2]. // // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html) // [2]: (https://github.com/rust-lang/rust/pull/41714) acquire!(self.inner().strong); unsafe { self.drop_slow(); } } } impl Arc { /// Attempt to downcast the `Arc` to a concrete type. /// /// # Examples /// /// ``` /// use std::any::Any; /// use std::sync::Arc; /// /// fn print_if_string(value: Arc) { /// if let Ok(string) = value.downcast::() { /// println!("String ({}): {}", string.len(), string); /// } /// } /// /// let my_string = "Hello World".to_string(); /// print_if_string(Arc::new(my_string)); /// print_if_string(Arc::new(0i8)); /// ``` #[inline] #[stable(feature = "rc_downcast", since = "1.29.0")] pub fn downcast(self) -> Result, Self> where T: Any + Send + Sync, { if (*self).is::() { unsafe { let ptr = self.ptr.cast::>(); let alloc = self.alloc.clone(); mem::forget(self); Ok(Arc::from_inner_in(ptr, alloc)) } } else { Err(self) } } /// Downcasts the `Arc` to a concrete type. /// /// For a safe alternative see [`downcast`]. /// /// # Examples /// /// ``` /// #![feature(downcast_unchecked)] /// /// use std::any::Any; /// use std::sync::Arc; /// /// let x: Arc = Arc::new(1_usize); /// /// unsafe { /// assert_eq!(*x.downcast_unchecked::(), 1); /// } /// ``` /// /// # Safety /// /// The contained value must be of type `T`. Calling this method /// with the incorrect type is *undefined behavior*. /// /// /// [`downcast`]: Self::downcast #[inline] #[unstable(feature = "downcast_unchecked", issue = "90850")] pub unsafe fn downcast_unchecked(self) -> Arc where T: Any + Send + Sync, { unsafe { let ptr = self.ptr.cast::>(); let alloc = self.alloc.clone(); mem::forget(self); Arc::from_inner_in(ptr, alloc) } } } impl Weak { /// Constructs a new `Weak`, without allocating any memory. /// Calling [`upgrade`] on the return value always gives [`None`]. /// /// [`upgrade`]: Weak::upgrade /// /// # Examples /// /// ``` /// use std::sync::Weak; /// /// let empty: Weak = Weak::new(); /// assert!(empty.upgrade().is_none()); /// ``` #[inline] #[stable(feature = "downgraded_weak", since = "1.10.0")] #[rustc_const_stable(feature = "const_weak_new", since = "1.73.0")] #[must_use] pub const fn new() -> Weak { Weak { ptr: unsafe { NonNull::new_unchecked(ptr::invalid_mut::>(usize::MAX)) }, alloc: Global, } } } impl Weak { /// Constructs a new `Weak`, without allocating any memory, technically in the provided /// allocator. /// Calling [`upgrade`] on the return value always gives [`None`]. /// /// [`upgrade`]: Weak::upgrade /// /// # Examples /// /// ``` /// #![feature(allocator_api)] /// /// use std::sync::Weak; /// use std::alloc::System; /// /// let empty: Weak = Weak::new_in(System); /// assert!(empty.upgrade().is_none()); /// ``` #[inline] #[unstable(feature = "allocator_api", issue = "32838")] pub fn new_in(alloc: A) -> Weak { Weak { ptr: unsafe { NonNull::new_unchecked(ptr::invalid_mut::>(usize::MAX)) }, alloc, } } } /// Helper type to allow accessing the reference counts without /// making any assertions about the data field. struct WeakInner<'a> { weak: &'a atomic::AtomicUsize, strong: &'a atomic::AtomicUsize, } impl Weak { /// Converts a raw pointer previously created by [`into_raw`] back into `Weak`. /// /// This can be used to safely get a strong reference (by calling [`upgrade`] /// later) or to deallocate the weak count by dropping the `Weak`. /// /// It takes ownership of one weak reference (with the exception of pointers created by [`new`], /// as these don't own anything; the method still works on them). /// /// # Safety /// /// The pointer must have originated from the [`into_raw`] and must still own its potential /// weak reference. /// /// It is allowed for the strong count to be 0 at the time of calling this. Nevertheless, this /// takes ownership of one weak reference currently represented as a raw pointer (the weak /// count is not modified by this operation) and therefore it must be paired with a previous /// call to [`into_raw`]. /// # Examples /// /// ``` /// use std::sync::{Arc, Weak}; /// /// let strong = Arc::new("hello".to_owned()); /// /// let raw_1 = Arc::downgrade(&strong).into_raw(); /// let raw_2 = Arc::downgrade(&strong).into_raw(); /// /// assert_eq!(2, Arc::weak_count(&strong)); /// /// assert_eq!("hello", &*unsafe { Weak::from_raw(raw_1) }.upgrade().unwrap()); /// assert_eq!(1, Arc::weak_count(&strong)); /// /// drop(strong); /// /// // Decrement the last weak count. /// assert!(unsafe { Weak::from_raw(raw_2) }.upgrade().is_none()); /// ``` /// /// [`new`]: Weak::new /// [`into_raw`]: Weak::into_raw /// [`upgrade`]: Weak::upgrade #[inline] #[stable(feature = "weak_into_raw", since = "1.45.0")] pub unsafe fn from_raw(ptr: *const T) -> Self { unsafe { Weak::from_raw_in(ptr, Global) } } } impl Weak { /// Returns a raw pointer to the object `T` pointed to by this `Weak`. /// /// The pointer is valid only if there are some strong references. The pointer may be dangling, /// unaligned or even [`null`] otherwise. /// /// # Examples /// /// ``` /// use std::sync::Arc; /// use std::ptr; /// /// let strong = Arc::new("hello".to_owned()); /// let weak = Arc::downgrade(&strong); /// // Both point to the same object /// assert!(ptr::eq(&*strong, weak.as_ptr())); /// // The strong here keeps it alive, so we can still access the object. /// assert_eq!("hello", unsafe { &*weak.as_ptr() }); /// /// drop(strong); /// // But not any more. We can do weak.as_ptr(), but accessing the pointer would lead to /// // undefined behaviour. /// // assert_eq!("hello", unsafe { &*weak.as_ptr() }); /// ``` /// /// [`null`]: core::ptr::null "ptr::null" #[must_use] #[stable(feature = "weak_into_raw", since = "1.45.0")] pub fn as_ptr(&self) -> *const T { let ptr: *mut ArcInner = NonNull::as_ptr(self.ptr); if is_dangling(ptr) { // If the pointer is dangling, we return the sentinel directly. This cannot be // a valid payload address, as the payload is at least as aligned as ArcInner (usize). ptr as *const T } else { // SAFETY: if is_dangling returns false, then the pointer is dereferenceable. // The payload may be dropped at this point, and we have to maintain provenance, // so use raw pointer manipulation. unsafe { ptr::addr_of_mut!((*ptr).data) } } } /// Consumes the `Weak` and turns it into a raw pointer. /// /// This converts the weak pointer into a raw pointer, while still preserving the ownership of /// one weak reference (the weak count is not modified by this operation). It can be turned /// back into the `Weak` with [`from_raw`]. /// /// The same restrictions of accessing the target of the pointer as with /// [`as_ptr`] apply. /// /// # Examples /// /// ``` /// use std::sync::{Arc, Weak}; /// /// let strong = Arc::new("hello".to_owned()); /// let weak = Arc::downgrade(&strong); /// let raw = weak.into_raw(); /// /// assert_eq!(1, Arc::weak_count(&strong)); /// assert_eq!("hello", unsafe { &*raw }); /// /// drop(unsafe { Weak::from_raw(raw) }); /// assert_eq!(0, Arc::weak_count(&strong)); /// ``` /// /// [`from_raw`]: Weak::from_raw /// [`as_ptr`]: Weak::as_ptr #[must_use = "`self` will be dropped if the result is not used"] #[stable(feature = "weak_into_raw", since = "1.45.0")] pub fn into_raw(self) -> *const T { let result = self.as_ptr(); mem::forget(self); result } /// Converts a raw pointer previously created by [`into_raw`] back into `Weak` in the provided /// allocator. /// /// This can be used to safely get a strong reference (by calling [`upgrade`] /// later) or to deallocate the weak count by dropping the `Weak`. /// /// It takes ownership of one weak reference (with the exception of pointers created by [`new`], /// as these don't own anything; the method still works on them). /// /// # Safety /// /// The pointer must have originated from the [`into_raw`] and must still own its potential /// weak reference, and must point to a block of memory allocated by `alloc`. /// /// It is allowed for the strong count to be 0 at the time of calling this. Nevertheless, this /// takes ownership of one weak reference currently represented as a raw pointer (the weak /// count is not modified by this operation) and therefore it must be paired with a previous /// call to [`into_raw`]. /// # Examples /// /// ``` /// use std::sync::{Arc, Weak}; /// /// let strong = Arc::new("hello".to_owned()); /// /// let raw_1 = Arc::downgrade(&strong).into_raw(); /// let raw_2 = Arc::downgrade(&strong).into_raw(); /// /// assert_eq!(2, Arc::weak_count(&strong)); /// /// assert_eq!("hello", &*unsafe { Weak::from_raw(raw_1) }.upgrade().unwrap()); /// assert_eq!(1, Arc::weak_count(&strong)); /// /// drop(strong); /// /// // Decrement the last weak count. /// assert!(unsafe { Weak::from_raw(raw_2) }.upgrade().is_none()); /// ``` /// /// [`new`]: Weak::new /// [`into_raw`]: Weak::into_raw /// [`upgrade`]: Weak::upgrade #[inline] #[unstable(feature = "allocator_api", issue = "32838")] pub unsafe fn from_raw_in(ptr: *const T, alloc: A) -> Self { // See Weak::as_ptr for context on how the input pointer is derived. let ptr = if is_dangling(ptr as *mut T) { // This is a dangling Weak. ptr as *mut ArcInner } else { // Otherwise, we're guaranteed the pointer came from a nondangling Weak. // SAFETY: data_offset is safe to call, as ptr references a real (potentially dropped) T. let offset = unsafe { data_offset(ptr) }; // Thus, we reverse the offset to get the whole RcBox. // SAFETY: the pointer originated from a Weak, so this offset is safe. unsafe { ptr.byte_sub(offset) as *mut ArcInner } }; // SAFETY: we now have recovered the original Weak pointer, so can create the Weak. Weak { ptr: unsafe { NonNull::new_unchecked(ptr) }, alloc } } } impl Weak { /// Attempts to upgrade the `Weak` pointer to an [`Arc`], delaying /// dropping of the inner value if successful. /// /// Returns [`None`] if the inner value has since been dropped. /// /// # Examples /// /// ``` /// use std::sync::Arc; /// /// let five = Arc::new(5); /// /// let weak_five = Arc::downgrade(&five); /// /// let strong_five: Option> = weak_five.upgrade(); /// assert!(strong_five.is_some()); /// /// // Destroy all strong pointers. /// drop(strong_five); /// drop(five); /// /// assert!(weak_five.upgrade().is_none()); /// ``` #[must_use = "this returns a new `Arc`, \ without modifying the original weak pointer"] #[stable(feature = "arc_weak", since = "1.4.0")] pub fn upgrade(&self) -> Option> where A: Clone, { #[inline] fn checked_increment(n: usize) -> Option { // Any write of 0 we can observe leaves the field in permanently zero state. if n == 0 { return None; } // See comments in `Arc::clone` for why we do this (for `mem::forget`). assert!(n <= MAX_REFCOUNT, "{}", INTERNAL_OVERFLOW_ERROR); Some(n + 1) } // We use a CAS loop to increment the strong count instead of a // fetch_add as this function should never take the reference count // from zero to one. // // Relaxed is fine for the failure case because we don't have any expectations about the new state. // Acquire is necessary for the success case to synchronise with `Arc::new_cyclic`, when the inner // value can be initialized after `Weak` references have already been created. In that case, we // expect to observe the fully initialized value. if self.inner()?.strong.fetch_update(Acquire, Relaxed, checked_increment).is_ok() { // SAFETY: pointer is not null, verified in checked_increment unsafe { Some(Arc::from_inner_in(self.ptr, self.alloc.clone())) } } else { None } } /// Gets the number of strong (`Arc`) pointers pointing to this allocation. /// /// If `self` was created using [`Weak::new`], this will return 0. #[must_use] #[stable(feature = "weak_counts", since = "1.41.0")] pub fn strong_count(&self) -> usize { if let Some(inner) = self.inner() { inner.strong.load(Relaxed) } else { 0 } } /// Gets an approximation of the number of `Weak` pointers pointing to this /// allocation. /// /// If `self` was created using [`Weak::new`], or if there are no remaining /// strong pointers, this will return 0. /// /// # Accuracy /// /// Due to implementation details, the returned value can be off by 1 in /// either direction when other threads are manipulating any `Arc`s or /// `Weak`s pointing to the same allocation. #[must_use] #[stable(feature = "weak_counts", since = "1.41.0")] pub fn weak_count(&self) -> usize { if let Some(inner) = self.inner() { let weak = inner.weak.load(Acquire); let strong = inner.strong.load(Relaxed); if strong == 0 { 0 } else { // Since we observed that there was at least one strong pointer // after reading the weak count, we know that the implicit weak // reference (present whenever any strong references are alive) // was still around when we observed the weak count, and can // therefore safely subtract it. weak - 1 } } else { 0 } } /// Returns `None` when the pointer is dangling and there is no allocated `ArcInner`, /// (i.e., when this `Weak` was created by `Weak::new`). #[inline] fn inner(&self) -> Option> { if is_dangling(self.ptr.as_ptr()) { None } else { // We are careful to *not* create a reference covering the "data" field, as // the field may be mutated concurrently (for example, if the last `Arc` // is dropped, the data field will be dropped in-place). Some(unsafe { let ptr = self.ptr.as_ptr(); WeakInner { strong: &(*ptr).strong, weak: &(*ptr).weak } }) } } /// Returns `true` if the two `Weak`s point to the same allocation similar to [`ptr::eq`], or if /// both don't point to any allocation (because they were created with `Weak::new()`). However, /// this function ignores the metadata of `dyn Trait` pointers. /// /// # Notes /// /// Since this compares pointers it means that `Weak::new()` will equal each /// other, even though they don't point to any allocation. /// /// # Examples /// /// ``` /// use std::sync::Arc; /// /// let first_rc = Arc::new(5); /// let first = Arc::downgrade(&first_rc); /// let second = Arc::downgrade(&first_rc); /// /// assert!(first.ptr_eq(&second)); /// /// let third_rc = Arc::new(5); /// let third = Arc::downgrade(&third_rc); /// /// assert!(!first.ptr_eq(&third)); /// ``` /// /// Comparing `Weak::new`. /// /// ``` /// use std::sync::{Arc, Weak}; /// /// let first = Weak::new(); /// let second = Weak::new(); /// assert!(first.ptr_eq(&second)); /// /// let third_rc = Arc::new(()); /// let third = Arc::downgrade(&third_rc); /// assert!(!first.ptr_eq(&third)); /// ``` /// /// [`ptr::eq`]: core::ptr::eq "ptr::eq" #[inline] #[must_use] #[stable(feature = "weak_ptr_eq", since = "1.39.0")] pub fn ptr_eq(&self, other: &Self) -> bool { ptr::eq(self.ptr.as_ptr() as *const (), other.ptr.as_ptr() as *const ()) } } #[stable(feature = "arc_weak", since = "1.4.0")] impl Clone for Weak { /// Makes a clone of the `Weak` pointer that points to the same allocation. /// /// # Examples /// /// ``` /// use std::sync::{Arc, Weak}; /// /// let weak_five = Arc::downgrade(&Arc::new(5)); /// /// let _ = Weak::clone(&weak_five); /// ``` #[inline] fn clone(&self) -> Weak { let inner = if let Some(inner) = self.inner() { inner } else { return Weak { ptr: self.ptr, alloc: self.alloc.clone() }; }; // See comments in Arc::clone() for why this is relaxed. This can use a // fetch_add (ignoring the lock) because the weak count is only locked // where are *no other* weak pointers in existence. (So we can't be // running this code in that case). let old_size = inner.weak.fetch_add(1, Relaxed); // See comments in Arc::clone() for why we do this (for mem::forget). if old_size > MAX_REFCOUNT { abort(); } Weak { ptr: self.ptr, alloc: self.alloc.clone() } } } #[stable(feature = "downgraded_weak", since = "1.10.0")] impl Default for Weak { /// Constructs a new `Weak`, without allocating memory. /// Calling [`upgrade`] on the return value always /// gives [`None`]. /// /// [`upgrade`]: Weak::upgrade /// /// # Examples /// /// ``` /// use std::sync::Weak; /// /// let empty: Weak = Default::default(); /// assert!(empty.upgrade().is_none()); /// ``` fn default() -> Weak { Weak::new() } } #[stable(feature = "arc_weak", since = "1.4.0")] unsafe impl<#[may_dangle] T: ?Sized, A: Allocator> Drop for Weak { /// Drops the `Weak` pointer. /// /// # Examples /// /// ``` /// use std::sync::{Arc, Weak}; /// /// struct Foo; /// /// impl Drop for Foo { /// fn drop(&mut self) { /// println!("dropped!"); /// } /// } /// /// let foo = Arc::new(Foo); /// let weak_foo = Arc::downgrade(&foo); /// let other_weak_foo = Weak::clone(&weak_foo); /// /// drop(weak_foo); // Doesn't print anything /// drop(foo); // Prints "dropped!" /// /// assert!(other_weak_foo.upgrade().is_none()); /// ``` fn drop(&mut self) { // If we find out that we were the last weak pointer, then its time to // deallocate the data entirely. See the discussion in Arc::drop() about // the memory orderings // // It's not necessary to check for the locked state here, because the // weak count can only be locked if there was precisely one weak ref, // meaning that drop could only subsequently run ON that remaining weak // ref, which can only happen after the lock is released. let inner = if let Some(inner) = self.inner() { inner } else { return }; if inner.weak.fetch_sub(1, Release) == 1 { acquire!(inner.weak); unsafe { self.alloc.deallocate(self.ptr.cast(), Layout::for_value_raw(self.ptr.as_ptr())) } } } } #[stable(feature = "rust1", since = "1.0.0")] trait ArcEqIdent { fn eq(&self, other: &Arc) -> bool; fn ne(&self, other: &Arc) -> bool; } #[stable(feature = "rust1", since = "1.0.0")] impl ArcEqIdent for Arc { #[inline] default fn eq(&self, other: &Arc) -> bool { **self == **other } #[inline] default fn ne(&self, other: &Arc) -> bool { **self != **other } } /// We're doing this specialization here, and not as a more general optimization on `&T`, because it /// would otherwise add a cost to all equality checks on refs. We assume that `Arc`s are used to /// store large values, that are slow to clone, but also heavy to check for equality, causing this /// cost to pay off more easily. It's also more likely to have two `Arc` clones, that point to /// the same value, than two `&T`s. /// /// We can only do this when `T: Eq` as a `PartialEq` might be deliberately irreflexive. #[stable(feature = "rust1", since = "1.0.0")] impl ArcEqIdent for Arc { #[inline] fn eq(&self, other: &Arc) -> bool { Arc::ptr_eq(self, other) || **self == **other } #[inline] fn ne(&self, other: &Arc) -> bool { !Arc::ptr_eq(self, other) && **self != **other } } #[stable(feature = "rust1", since = "1.0.0")] impl PartialEq for Arc { /// Equality for two `Arc`s. /// /// Two `Arc`s are equal if their inner values are equal, even if they are /// stored in different allocation. /// /// If `T` also implements `Eq` (implying reflexivity of equality), /// two `Arc`s that point to the same allocation are always equal. /// /// # Examples /// /// ``` /// use std::sync::Arc; /// /// let five = Arc::new(5); /// /// assert!(five == Arc::new(5)); /// ``` #[inline] fn eq(&self, other: &Arc) -> bool { ArcEqIdent::eq(self, other) } /// Inequality for two `Arc`s. /// /// Two `Arc`s are not equal if their inner values are not equal. /// /// If `T` also implements `Eq` (implying reflexivity of equality), /// two `Arc`s that point to the same value are always equal. /// /// # Examples /// /// ``` /// use std::sync::Arc; /// /// let five = Arc::new(5); /// /// assert!(five != Arc::new(6)); /// ``` #[inline] fn ne(&self, other: &Arc) -> bool { ArcEqIdent::ne(self, other) } } #[stable(feature = "rust1", since = "1.0.0")] impl PartialOrd for Arc { /// Partial comparison for two `Arc`s. /// /// The two are compared by calling `partial_cmp()` on their inner values. /// /// # Examples /// /// ``` /// use std::sync::Arc; /// use std::cmp::Ordering; /// /// let five = Arc::new(5); /// /// assert_eq!(Some(Ordering::Less), five.partial_cmp(&Arc::new(6))); /// ``` fn partial_cmp(&self, other: &Arc) -> Option { (**self).partial_cmp(&**other) } /// Less-than comparison for two `Arc`s. /// /// The two are compared by calling `<` on their inner values. /// /// # Examples /// /// ``` /// use std::sync::Arc; /// /// let five = Arc::new(5); /// /// assert!(five < Arc::new(6)); /// ``` fn lt(&self, other: &Arc) -> bool { *(*self) < *(*other) } /// 'Less than or equal to' comparison for two `Arc`s. /// /// The two are compared by calling `<=` on their inner values. /// /// # Examples /// /// ``` /// use std::sync::Arc; /// /// let five = Arc::new(5); /// /// assert!(five <= Arc::new(5)); /// ``` fn le(&self, other: &Arc) -> bool { *(*self) <= *(*other) } /// Greater-than comparison for two `Arc`s. /// /// The two are compared by calling `>` on their inner values. /// /// # Examples /// /// ``` /// use std::sync::Arc; /// /// let five = Arc::new(5); /// /// assert!(five > Arc::new(4)); /// ``` fn gt(&self, other: &Arc) -> bool { *(*self) > *(*other) } /// 'Greater than or equal to' comparison for two `Arc`s. /// /// The two are compared by calling `>=` on their inner values. /// /// # Examples /// /// ``` /// use std::sync::Arc; /// /// let five = Arc::new(5); /// /// assert!(five >= Arc::new(5)); /// ``` fn ge(&self, other: &Arc) -> bool { *(*self) >= *(*other) } } #[stable(feature = "rust1", since = "1.0.0")] impl Ord for Arc { /// Comparison for two `Arc`s. /// /// The two are compared by calling `cmp()` on their inner values. /// /// # Examples /// /// ``` /// use std::sync::Arc; /// use std::cmp::Ordering; /// /// let five = Arc::new(5); /// /// assert_eq!(Ordering::Less, five.cmp(&Arc::new(6))); /// ``` fn cmp(&self, other: &Arc) -> Ordering { (**self).cmp(&**other) } } #[stable(feature = "rust1", since = "1.0.0")] impl Eq for Arc {} #[stable(feature = "rust1", since = "1.0.0")] impl fmt::Display for Arc { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fmt::Display::fmt(&**self, f) } } #[stable(feature = "rust1", since = "1.0.0")] impl fmt::Debug for Arc { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fmt::Debug::fmt(&**self, f) } } #[stable(feature = "rust1", since = "1.0.0")] impl fmt::Pointer for Arc { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fmt::Pointer::fmt(&(&**self as *const T), f) } } #[cfg(not(no_global_oom_handling))] #[stable(feature = "rust1", since = "1.0.0")] impl Default for Arc { /// Creates a new `Arc`, with the `Default` value for `T`. /// /// # Examples /// /// ``` /// use std::sync::Arc; /// /// let x: Arc = Default::default(); /// assert_eq!(*x, 0); /// ``` fn default() -> Arc { Arc::new(Default::default()) } } #[stable(feature = "rust1", since = "1.0.0")] impl Hash for Arc { fn hash(&self, state: &mut H) { (**self).hash(state) } } #[cfg(not(no_global_oom_handling))] #[stable(feature = "from_for_ptrs", since = "1.6.0")] impl From for Arc { /// Converts a `T` into an `Arc` /// /// The conversion moves the value into a /// newly allocated `Arc`. It is equivalent to /// calling `Arc::new(t)`. /// /// # Example /// ```rust /// # use std::sync::Arc; /// let x = 5; /// let arc = Arc::new(5); /// /// assert_eq!(Arc::from(x), arc); /// ``` fn from(t: T) -> Self { Arc::new(t) } } #[cfg(not(no_global_oom_handling))] #[stable(feature = "shared_from_array", since = "1.74.0")] impl From<[T; N]> for Arc<[T]> { /// Converts a [`[T; N]`](prim@array) into an `Arc<[T]>`. /// /// The conversion moves the array into a newly allocated `Arc`. /// /// # Example /// /// ``` /// # use std::sync::Arc; /// let original: [i32; 3] = [1, 2, 3]; /// let shared: Arc<[i32]> = Arc::from(original); /// assert_eq!(&[1, 2, 3], &shared[..]); /// ``` #[inline] fn from(v: [T; N]) -> Arc<[T]> { Arc::<[T; N]>::from(v) } } #[cfg(not(no_global_oom_handling))] #[stable(feature = "shared_from_slice", since = "1.21.0")] impl From<&[T]> for Arc<[T]> { /// Allocate a reference-counted slice and fill it by cloning `v`'s items. /// /// # Example /// /// ``` /// # use std::sync::Arc; /// let original: &[i32] = &[1, 2, 3]; /// let shared: Arc<[i32]> = Arc::from(original); /// assert_eq!(&[1, 2, 3], &shared[..]); /// ``` #[inline] fn from(v: &[T]) -> Arc<[T]> { >::from_slice(v) } } #[cfg(not(no_global_oom_handling))] #[stable(feature = "shared_from_slice", since = "1.21.0")] impl From<&str> for Arc { /// Allocate a reference-counted `str` and copy `v` into it. /// /// # Example /// /// ``` /// # use std::sync::Arc; /// let shared: Arc = Arc::from("eggplant"); /// assert_eq!("eggplant", &shared[..]); /// ``` #[inline] fn from(v: &str) -> Arc { let arc = Arc::<[u8]>::from(v.as_bytes()); unsafe { Arc::from_raw(Arc::into_raw(arc) as *const str) } } } #[cfg(not(no_global_oom_handling))] #[stable(feature = "shared_from_slice", since = "1.21.0")] impl From for Arc { /// Allocate a reference-counted `str` and copy `v` into it. /// /// # Example /// /// ``` /// # use std::sync::Arc; /// let unique: String = "eggplant".to_owned(); /// let shared: Arc = Arc::from(unique); /// assert_eq!("eggplant", &shared[..]); /// ``` #[inline] fn from(v: String) -> Arc { Arc::from(&v[..]) } } #[cfg(not(no_global_oom_handling))] #[stable(feature = "shared_from_slice", since = "1.21.0")] impl From> for Arc { /// Move a boxed object to a new, reference-counted allocation. /// /// # Example /// /// ``` /// # use std::sync::Arc; /// let unique: Box = Box::from("eggplant"); /// let shared: Arc = Arc::from(unique); /// assert_eq!("eggplant", &shared[..]); /// ``` #[inline] fn from(v: Box) -> Arc { Arc::from_box_in(v) } } #[cfg(not(no_global_oom_handling))] #[stable(feature = "shared_from_slice", since = "1.21.0")] impl From> for Arc<[T], A> { /// Allocate a reference-counted slice and move `v`'s items into it. /// /// # Example /// /// ``` /// # use std::sync::Arc; /// let unique: Vec = vec![1, 2, 3]; /// let shared: Arc<[i32]> = Arc::from(unique); /// assert_eq!(&[1, 2, 3], &shared[..]); /// ``` #[inline] fn from(v: Vec) -> Arc<[T], A> { unsafe { let (vec_ptr, len, cap, alloc) = v.into_raw_parts_with_alloc(); let rc_ptr = Self::allocate_for_slice_in(len, &alloc); ptr::copy_nonoverlapping(vec_ptr, &mut (*rc_ptr).data as *mut [T] as *mut T, len); // Create a `Vec` with length 0, to deallocate the buffer // without dropping its contents or the allocator let _ = Vec::from_raw_parts_in(vec_ptr, 0, cap, &alloc); Self::from_ptr_in(rc_ptr, alloc) } } } #[stable(feature = "shared_from_cow", since = "1.45.0")] impl<'a, B> From> for Arc where B: ToOwned + ?Sized, Arc: From<&'a B> + From, { /// Create an atomically reference-counted pointer from /// a clone-on-write pointer by copying its content. /// /// # Example /// /// ```rust /// # use std::sync::Arc; /// # use std::borrow::Cow; /// let cow: Cow<'_, str> = Cow::Borrowed("eggplant"); /// let shared: Arc = Arc::from(cow); /// assert_eq!("eggplant", &shared[..]); /// ``` #[inline] fn from(cow: Cow<'a, B>) -> Arc { match cow { Cow::Borrowed(s) => Arc::from(s), Cow::Owned(s) => Arc::from(s), } } } #[stable(feature = "shared_from_str", since = "1.62.0")] impl From> for Arc<[u8]> { /// Converts an atomically reference-counted string slice into a byte slice. /// /// # Example /// /// ``` /// # use std::sync::Arc; /// let string: Arc = Arc::from("eggplant"); /// let bytes: Arc<[u8]> = Arc::from(string); /// assert_eq!("eggplant".as_bytes(), bytes.as_ref()); /// ``` #[inline] fn from(rc: Arc) -> Self { // SAFETY: `str` has the same layout as `[u8]`. unsafe { Arc::from_raw(Arc::into_raw(rc) as *const [u8]) } } } #[stable(feature = "boxed_slice_try_from", since = "1.43.0")] impl TryFrom> for Arc<[T; N], A> { type Error = Arc<[T], A>; fn try_from(boxed_slice: Arc<[T], A>) -> Result { if boxed_slice.len() == N { let alloc = boxed_slice.alloc.clone(); Ok(unsafe { Arc::from_raw_in(Arc::into_raw(boxed_slice) as *mut [T; N], alloc) }) } else { Err(boxed_slice) } } } #[cfg(not(no_global_oom_handling))] #[stable(feature = "shared_from_iter", since = "1.37.0")] impl FromIterator for Arc<[T]> { /// Takes each element in the `Iterator` and collects it into an `Arc<[T]>`. /// /// # Performance characteristics /// /// ## The general case /// /// In the general case, collecting into `Arc<[T]>` is done by first /// collecting into a `Vec`. That is, when writing the following: /// /// ```rust /// # use std::sync::Arc; /// let evens: Arc<[u8]> = (0..10).filter(|&x| x % 2 == 0).collect(); /// # assert_eq!(&*evens, &[0, 2, 4, 6, 8]); /// ``` /// /// this behaves as if we wrote: /// /// ```rust /// # use std::sync::Arc; /// let evens: Arc<[u8]> = (0..10).filter(|&x| x % 2 == 0) /// .collect::>() // The first set of allocations happens here. /// .into(); // A second allocation for `Arc<[T]>` happens here. /// # assert_eq!(&*evens, &[0, 2, 4, 6, 8]); /// ``` /// /// This will allocate as many times as needed for constructing the `Vec` /// and then it will allocate once for turning the `Vec` into the `Arc<[T]>`. /// /// ## Iterators of known length /// /// When your `Iterator` implements `TrustedLen` and is of an exact size, /// a single allocation will be made for the `Arc<[T]>`. For example: /// /// ```rust /// # use std::sync::Arc; /// let evens: Arc<[u8]> = (0..10).collect(); // Just a single allocation happens here. /// # assert_eq!(&*evens, &*(0..10).collect::>()); /// ``` fn from_iter>(iter: I) -> Self { ToArcSlice::to_arc_slice(iter.into_iter()) } } /// Specialization trait used for collecting into `Arc<[T]>`. trait ToArcSlice: Iterator + Sized { fn to_arc_slice(self) -> Arc<[T]>; } #[cfg(not(no_global_oom_handling))] impl> ToArcSlice for I { default fn to_arc_slice(self) -> Arc<[T]> { self.collect::>().into() } } #[cfg(not(no_global_oom_handling))] impl> ToArcSlice for I { fn to_arc_slice(self) -> Arc<[T]> { // This is the case for a `TrustedLen` iterator. let (low, high) = self.size_hint(); if let Some(high) = high { debug_assert_eq!( low, high, "TrustedLen iterator's size hint is not exact: {:?}", (low, high) ); unsafe { // SAFETY: We need to ensure that the iterator has an exact length and we have. Arc::from_iter_exact(self, low) } } else { // TrustedLen contract guarantees that `upper_bound == None` implies an iterator // length exceeding `usize::MAX`. // The default implementation would collect into a vec which would panic. // Thus we panic here immediately without invoking `Vec` code. panic!("capacity overflow"); } } } #[stable(feature = "rust1", since = "1.0.0")] impl borrow::Borrow for Arc { fn borrow(&self) -> &T { &**self } } #[stable(since = "1.5.0", feature = "smart_ptr_as_ref")] impl AsRef for Arc { fn as_ref(&self) -> &T { &**self } } #[stable(feature = "pin", since = "1.33.0")] impl Unpin for Arc {} /// Get the offset within an `ArcInner` for the payload behind a pointer. /// /// # Safety /// /// The pointer must point to (and have valid metadata for) a previously /// valid instance of T, but the T is allowed to be dropped. unsafe fn data_offset(ptr: *const T) -> usize { // Align the unsized value to the end of the ArcInner. // Because RcBox is repr(C), it will always be the last field in memory. // SAFETY: since the only unsized types possible are slices, trait objects, // and extern types, the input safety requirement is currently enough to // satisfy the requirements of align_of_val_raw; this is an implementation // detail of the language that must not be relied upon outside of std. unsafe { data_offset_align(align_of_val_raw(ptr)) } } #[inline] fn data_offset_align(align: usize) -> usize { let layout = Layout::new::>(); layout.size() + layout.padding_needed_for(align) } #[stable(feature = "arc_error", since = "1.52.0")] impl core::error::Error for Arc { #[allow(deprecated, deprecated_in_future)] fn description(&self) -> &str { core::error::Error::description(&**self) } #[allow(deprecated)] fn cause(&self) -> Option<&dyn core::error::Error> { core::error::Error::cause(&**self) } fn source(&self) -> Option<&(dyn core::error::Error + 'static)> { core::error::Error::source(&**self) } fn provide<'a>(&'a self, req: &mut core::error::Request<'a>) { core::error::Error::provide(&**self, req); } }