//! Vendored and stripped down version of triomphe use std::{ alloc::{self, Layout}, cmp::Ordering, hash::{Hash, Hasher}, marker::PhantomData, mem::{self, ManuallyDrop}, ops::Deref, ptr, sync::atomic::{ self, Ordering::{Acquire, Relaxed, Release}, }, }; use memoffset::offset_of; /// A soft limit on the amount of references that may be made to an `Arc`. /// /// Going above this limit will abort your program (although not /// necessarily) at _exactly_ `MAX_REFCOUNT + 1` references. const MAX_REFCOUNT: usize = (isize::MAX) as usize; /// The object allocated by an Arc #[repr(C)] pub(crate) struct ArcInner { pub(crate) count: atomic::AtomicUsize, pub(crate) data: T, } unsafe impl Send for ArcInner {} unsafe impl Sync for ArcInner {} /// An atomically reference counted shared pointer /// /// See the documentation for [`Arc`] in the standard library. Unlike the /// standard library `Arc`, this `Arc` does not support weak reference counting. /// /// [`Arc`]: https://doc.rust-lang.org/stable/std/sync/struct.Arc.html #[repr(transparent)] pub(crate) struct Arc { pub(crate) p: ptr::NonNull>, pub(crate) phantom: PhantomData, } unsafe impl Send for Arc {} unsafe impl Sync for Arc {} impl Arc { /// Reconstruct the Arc from a raw pointer obtained from into_raw() /// /// Note: This raw pointer will be offset in the allocation and must be preceded /// by the atomic count. /// /// It is recommended to use OffsetArc for this #[inline] pub(crate) unsafe fn from_raw(ptr: *const T) -> Self { // To find the corresponding pointer to the `ArcInner` we need // to subtract the offset of the `data` field from the pointer. let ptr = (ptr as *const u8).sub(offset_of!(ArcInner, data)); Arc { p: ptr::NonNull::new_unchecked(ptr as *mut ArcInner), phantom: PhantomData } } } impl Arc { #[inline] fn inner(&self) -> &ArcInner { // This unsafety is ok because while this arc is alive we're guaranteed // that the inner pointer is valid. Furthermore, we know that the // `ArcInner` structure itself is `Sync` because the inner data is // `Sync` as well, so we're ok loaning out an immutable pointer to these // contents. unsafe { &*self.ptr() } } // Non-inlined part of `drop`. Just invokes the destructor. #[inline(never)] unsafe fn drop_slow(&mut self) { let _ = Box::from_raw(self.ptr()); } /// Test pointer equality between the two Arcs, i.e. they must be the _same_ /// allocation #[inline] pub(crate) fn ptr_eq(this: &Self, other: &Self) -> bool { this.ptr() == other.ptr() } pub(crate) fn ptr(&self) -> *mut ArcInner { self.p.as_ptr() } } impl Clone for Arc { #[inline] fn clone(&self) -> Self { // Using a relaxed ordering is alright here, as knowledge of the // original reference prevents other threads from erroneously deleting // the object. // // As explained in the [Boost documentation][1], Increasing the // reference counter can always be done with memory_order_relaxed: New // references to an object can only be formed from an existing // reference, and passing an existing reference from one thread to // another must already provide any required synchronization. // // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html) let old_size = self.inner().count.fetch_add(1, Relaxed); // However we need to guard against massive refcounts in case someone // is `mem::forget`ing Arcs. If we don't do this the count can overflow // and users will use-after free. We racily saturate to `isize::MAX` on // the assumption that there aren't ~2 billion threads incrementing // the reference count at once. This branch will never be taken in // any realistic program. // // We abort because such a program is incredibly degenerate, and we // don't care to support it. if old_size > MAX_REFCOUNT { std::process::abort(); } unsafe { Arc { p: ptr::NonNull::new_unchecked(self.ptr()), phantom: PhantomData } } } } impl Deref for Arc { type Target = T; #[inline] fn deref(&self) -> &T { &self.inner().data } } impl Arc { /// Provides mutable access to the contents _if_ the `Arc` is uniquely owned. #[inline] pub(crate) fn get_mut(this: &mut Self) -> Option<&mut T> { if this.is_unique() { unsafe { // See make_mut() for documentation of the threadsafety here. Some(&mut (*this.ptr()).data) } } else { None } } /// Whether or not the `Arc` is uniquely owned (is the refcount 1?). pub(crate) fn is_unique(&self) -> bool { // See the extensive discussion in [1] for why this needs to be Acquire. // // [1] https://github.com/servo/servo/issues/21186 self.inner().count.load(Acquire) == 1 } } impl Drop for Arc { #[inline] fn drop(&mut self) { // Because `fetch_sub` is already atomic, we do not need to synchronize // with other threads unless we are going to delete the object. if self.inner().count.fetch_sub(1, Release) != 1 { return; } // FIXME(bholley): Use the updated comment when [2] is merged. // // This load is needed to prevent reordering of use of the data and // deletion of the data. Because it is marked `Release`, the decreasing // of the reference count synchronizes with this `Acquire` load. This // means that use of the data happens before decreasing the reference // count, which happens before this load, which happens before the // deletion of the data. // // As explained in the [Boost documentation][1], // // > It is important to enforce any possible access to the object in one // > thread (through an existing reference) to *happen before* deleting // > the object in a different thread. This is achieved by a "release" // > operation after dropping a reference (any access to the object // > through this reference must obviously happened before), and an // > "acquire" operation before deleting the object. // // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html) // [2]: https://github.com/rust-lang/rust/pull/41714 self.inner().count.load(Acquire); unsafe { self.drop_slow(); } } } impl PartialEq for Arc { fn eq(&self, other: &Arc) -> bool { Self::ptr_eq(self, other) || *(*self) == *(*other) } fn ne(&self, other: &Arc) -> bool { !Self::ptr_eq(self, other) && *(*self) != *(*other) } } impl PartialOrd for Arc { fn partial_cmp(&self, other: &Arc) -> Option { (**self).partial_cmp(&**other) } fn lt(&self, other: &Arc) -> bool { *(*self) < *(*other) } fn le(&self, other: &Arc) -> bool { *(*self) <= *(*other) } fn gt(&self, other: &Arc) -> bool { *(*self) > *(*other) } fn ge(&self, other: &Arc) -> bool { *(*self) >= *(*other) } } impl Ord for Arc { fn cmp(&self, other: &Arc) -> Ordering { (**self).cmp(&**other) } } impl Eq for Arc {} impl Hash for Arc { fn hash(&self, state: &mut H) { (**self).hash(state) } } #[derive(Debug, Eq, PartialEq, Hash, PartialOrd)] #[repr(C)] pub(crate) struct HeaderSlice { pub(crate) header: H, length: usize, slice: T, } impl HeaderSlice { pub(crate) fn slice(&self) -> &[T] { &self.slice } } impl Deref for HeaderSlice { type Target = HeaderSlice; fn deref(&self) -> &Self::Target { unsafe { let len = self.length; let fake_slice: *const [T] = ptr::slice_from_raw_parts(self as *const _ as *const T, len); &*(fake_slice as *const HeaderSlice) } } } /// A "thin" `Arc` containing dynamically sized data /// /// This is functionally equivalent to `Arc<(H, [T])>` /// /// When you create an `Arc` containing a dynamically sized type /// like `HeaderSlice`, the `Arc` is represented on the stack /// as a "fat pointer", where the length of the slice is stored /// alongside the `Arc`'s pointer. In some situations you may wish to /// have a thin pointer instead, perhaps for FFI compatibility /// or space efficiency. /// /// Note that we use `[T; 0]` in order to have the right alignment for `T`. /// /// `ThinArc` solves this by storing the length in the allocation itself, /// via `HeaderSlice`. #[repr(transparent)] pub(crate) struct ThinArc { ptr: ptr::NonNull>>, phantom: PhantomData<(H, T)>, } unsafe impl Send for ThinArc {} unsafe impl Sync for ThinArc {} // Synthesize a fat pointer from a thin pointer. fn thin_to_thick( thin: *mut ArcInner>, ) -> *mut ArcInner> { let len = unsafe { (*thin).data.length }; let fake_slice: *mut [T] = ptr::slice_from_raw_parts_mut(thin as *mut T, len); // Transplants metadata. fake_slice as *mut ArcInner> } impl ThinArc { /// Temporarily converts |self| into a bonafide Arc and exposes it to the /// provided callback. The refcount is not modified. #[inline] pub(crate) fn with_arc(&self, f: F) -> U where F: FnOnce(&Arc>) -> U, { // Synthesize transient Arc, which never touches the refcount of the ArcInner. let transient = unsafe { ManuallyDrop::new(Arc { p: ptr::NonNull::new_unchecked(thin_to_thick(self.ptr.as_ptr())), phantom: PhantomData, }) }; // Expose the transient Arc to the callback, which may clone it if it wants. let result = f(&transient); // Forward the result. result } /// Creates a `ThinArc` for a HeaderSlice using the given header struct and /// iterator to generate the slice. pub(crate) fn from_header_and_iter(header: H, mut items: I) -> Self where I: Iterator + ExactSizeIterator, { assert_ne!(mem::size_of::(), 0, "Need to think about ZST"); let num_items = items.len(); // Offset of the start of the slice in the allocation. let inner_to_data_offset = offset_of!(ArcInner>, data); let data_to_slice_offset = offset_of!(HeaderSlice, slice); let slice_offset = inner_to_data_offset + data_to_slice_offset; // Compute the size of the real payload. let slice_size = mem::size_of::().checked_mul(num_items).expect("size overflows"); let usable_size = slice_offset.checked_add(slice_size).expect("size overflows"); // Round up size to alignment. let align = mem::align_of::>>(); let size = usable_size.wrapping_add(align - 1) & !(align - 1); assert!(size >= usable_size, "size overflows"); let layout = Layout::from_size_align(size, align).expect("invalid layout"); let ptr: *mut ArcInner>; unsafe { let buffer = alloc::alloc(layout); if buffer.is_null() { alloc::handle_alloc_error(layout); } // // Synthesize the fat pointer. We do this by claiming we have a direct // // pointer to a [T], and then changing the type of the borrow. The key // // point here is that the length portion of the fat pointer applies // // only to the number of elements in the dynamically-sized portion of // // the type, so the value will be the same whether it points to a [T] // // or something else with a [T] as its last member. // let fake_slice: &mut [T] = slice::from_raw_parts_mut(buffer as *mut T, num_items); // ptr = fake_slice as *mut [T] as *mut ArcInner>; ptr = buffer as *mut _; let count = atomic::AtomicUsize::new(1); // Write the data. // // Note that any panics here (i.e. from the iterator) are safe, since // we'll just leak the uninitialized memory. ptr::write(ptr::addr_of_mut!((*ptr).count), count); ptr::write(ptr::addr_of_mut!((*ptr).data.header), header); ptr::write(ptr::addr_of_mut!((*ptr).data.length), num_items); if num_items != 0 { let mut current = ptr::addr_of_mut!((*ptr).data.slice) as *mut T; debug_assert_eq!(current as usize - buffer as usize, slice_offset); for _ in 0..num_items { ptr::write( current, items.next().expect("ExactSizeIterator over-reported length"), ); current = current.offset(1); } assert!(items.next().is_none(), "ExactSizeIterator under-reported length"); // We should have consumed the buffer exactly. debug_assert_eq!(current as *mut u8, buffer.add(usable_size)); } assert!(items.next().is_none(), "ExactSizeIterator under-reported length"); } ThinArc { ptr: unsafe { ptr::NonNull::new_unchecked(ptr) }, phantom: PhantomData } } } impl Deref for ThinArc { type Target = HeaderSlice; #[inline] fn deref(&self) -> &Self::Target { unsafe { &(*thin_to_thick(self.ptr.as_ptr())).data } } } impl Clone for ThinArc { #[inline] fn clone(&self) -> Self { ThinArc::with_arc(self, |a| Arc::into_thin(a.clone())) } } impl Drop for ThinArc { #[inline] fn drop(&mut self) { let _ = Arc::from_thin(ThinArc { ptr: self.ptr, phantom: PhantomData }); } } impl Arc> { /// Converts an `Arc` into a `ThinArc`. This consumes the `Arc`, so the refcount /// is not modified. #[inline] pub(crate) fn into_thin(a: Self) -> ThinArc { assert_eq!(a.length, a.slice.len(), "Length needs to be correct for ThinArc to work"); let fat_ptr: *mut ArcInner> = a.ptr(); mem::forget(a); let thin_ptr = fat_ptr as *mut [usize] as *mut usize; ThinArc { ptr: unsafe { ptr::NonNull::new_unchecked(thin_ptr as *mut ArcInner>) }, phantom: PhantomData, } } /// Converts a `ThinArc` into an `Arc`. This consumes the `ThinArc`, so the refcount /// is not modified. #[inline] pub(crate) fn from_thin(a: ThinArc) -> Self { let ptr = thin_to_thick(a.ptr.as_ptr()); mem::forget(a); unsafe { Arc { p: ptr::NonNull::new_unchecked(ptr), phantom: PhantomData } } } } impl PartialEq for ThinArc { #[inline] fn eq(&self, other: &ThinArc) -> bool { **self == **other } } impl Eq for ThinArc {} impl Hash for ThinArc { fn hash(&self, state: &mut HSR) { (**self).hash(state) } }