From dc0db358abe19481e475e10c32149b53370f1a1c Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Thu, 30 May 2024 05:57:31 +0200 Subject: Merging upstream version 1.72.1+dfsg1. Signed-off-by: Daniel Baumann --- vendor/triomphe/src/arc.rs | 887 ++++++++++++++++++++++++++++++++ vendor/triomphe/src/arc_borrow.rs | 116 +++++ vendor/triomphe/src/arc_swap_support.rs | 42 ++ vendor/triomphe/src/arc_union.rs | 139 +++++ vendor/triomphe/src/header.rs | 378 ++++++++++++++ vendor/triomphe/src/lib.rs | 94 ++++ vendor/triomphe/src/offset_arc.rs | 134 +++++ vendor/triomphe/src/thin_arc.rs | 329 ++++++++++++ vendor/triomphe/src/unique_arc.rs | 257 +++++++++ 9 files changed, 2376 insertions(+) create mode 100644 vendor/triomphe/src/arc.rs create mode 100644 vendor/triomphe/src/arc_borrow.rs create mode 100644 vendor/triomphe/src/arc_swap_support.rs create mode 100644 vendor/triomphe/src/arc_union.rs create mode 100644 vendor/triomphe/src/header.rs create mode 100644 vendor/triomphe/src/lib.rs create mode 100644 vendor/triomphe/src/offset_arc.rs create mode 100644 vendor/triomphe/src/thin_arc.rs create mode 100644 vendor/triomphe/src/unique_arc.rs (limited to 'vendor/triomphe/src') diff --git a/vendor/triomphe/src/arc.rs b/vendor/triomphe/src/arc.rs new file mode 100644 index 000000000..6fe022c46 --- /dev/null +++ b/vendor/triomphe/src/arc.rs @@ -0,0 +1,887 @@ +use alloc::alloc::handle_alloc_error; +use alloc::boxed::Box; +use core::alloc::Layout; +use core::borrow; +use core::cmp::Ordering; +use core::convert::From; +use core::ffi::c_void; +use core::fmt; +use core::hash::{Hash, Hasher}; +use core::marker::PhantomData; +use core::mem::{ManuallyDrop, MaybeUninit}; +use core::ops::Deref; +use core::ptr::{self, NonNull}; +use core::sync::atomic; +use core::sync::atomic::Ordering::{Acquire, Relaxed, Release}; +use core::{isize, usize}; + +#[cfg(feature = "serde")] +use serde::{Deserialize, Serialize}; +#[cfg(feature = "stable_deref_trait")] +use stable_deref_trait::{CloneStableDeref, StableDeref}; + +use crate::{abort, ArcBorrow, HeaderSlice, OffsetArc, UniqueArc}; + +/// A soft limit on the amount of references that may be made to an `Arc`. +/// +/// Going above this limit will abort your program (although not +/// necessarily) at _exactly_ `MAX_REFCOUNT + 1` references. +const MAX_REFCOUNT: usize = (isize::MAX) as usize; + +/// The object allocated by an Arc +#[repr(C)] +pub(crate) struct ArcInner { + pub(crate) count: atomic::AtomicUsize, + pub(crate) data: T, +} + +unsafe impl Send for ArcInner {} +unsafe impl Sync for ArcInner {} + +/// An atomically reference counted shared pointer +/// +/// See the documentation for [`Arc`] in the standard library. Unlike the +/// standard library `Arc`, this `Arc` does not support weak reference counting. +/// +/// [`Arc`]: https://doc.rust-lang.org/stable/std/sync/struct.Arc.html +#[repr(transparent)] +pub struct Arc { + pub(crate) p: ptr::NonNull>, + pub(crate) phantom: PhantomData, +} + +unsafe impl Send for Arc {} +unsafe impl Sync for Arc {} + +impl Arc { + /// Construct an `Arc` + #[inline] + pub fn new(data: T) -> Self { + let ptr = Box::into_raw(Box::new(ArcInner { + count: atomic::AtomicUsize::new(1), + data, + })); + + unsafe { + Arc { + p: ptr::NonNull::new_unchecked(ptr), + phantom: PhantomData, + } + } + } + + /// Reconstruct the Arc from a raw pointer obtained from into_raw() + /// + /// Note: This raw pointer will be offset in the allocation and must be preceded + /// by the atomic count. + /// + /// It is recommended to use OffsetArc for this + #[inline] + pub unsafe fn from_raw(ptr: *const T) -> Self { + // FIXME: when `byte_sub` is stabilized, this can accept T: ?Sized. + + // To find the corresponding pointer to the `ArcInner` we need + // to subtract the offset of the `data` field from the pointer. + let ptr = (ptr as *const u8).sub(offset_of!(ArcInner, data)); + Arc::from_raw_inner(ptr as *mut ArcInner) + } + + /// Temporarily converts |self| into a bonafide OffsetArc and exposes it to the + /// provided callback. The refcount is not modified. + #[inline(always)] + pub fn with_raw_offset_arc(&self, f: F) -> U + where + F: FnOnce(&OffsetArc) -> U, + { + // Synthesize transient Arc, which never touches the refcount of the ArcInner. + // Store transient in `ManuallyDrop`, to leave the refcount untouched. + let transient = unsafe { ManuallyDrop::new(Arc::into_raw_offset(ptr::read(self))) }; + + // Expose the transient Arc to the callback, which may clone it if it wants. + f(&transient) + } + + /// Converts an `Arc` into a `OffsetArc`. This consumes the `Arc`, so the refcount + /// is not modified. + #[inline] + pub fn into_raw_offset(a: Self) -> OffsetArc { + unsafe { + OffsetArc { + ptr: ptr::NonNull::new_unchecked(Arc::into_raw(a) as *mut T), + phantom: PhantomData, + } + } + } + + /// Converts a `OffsetArc` into an `Arc`. This consumes the `OffsetArc`, so the refcount + /// is not modified. + #[inline] + pub fn from_raw_offset(a: OffsetArc) -> Self { + let a = ManuallyDrop::new(a); + let ptr = a.ptr.as_ptr(); + unsafe { Arc::from_raw(ptr) } + } + + /// Returns the inner value, if the [`Arc`] has exactly one strong reference. + /// + /// Otherwise, an [`Err`] is returned with the same [`Arc`] that was + /// passed in. + /// + /// # Examples + /// + /// ``` + /// use triomphe::Arc; + /// + /// let x = Arc::new(3); + /// assert_eq!(Arc::try_unwrap(x), Ok(3)); + /// + /// let x = Arc::new(4); + /// let _y = Arc::clone(&x); + /// assert_eq!(*Arc::try_unwrap(x).unwrap_err(), 4); + /// ``` + pub fn try_unwrap(this: Self) -> Result { + Self::try_unique(this).map(UniqueArc::into_inner) + } +} + +impl Arc { + /// Convert the Arc to a raw pointer, suitable for use across FFI + /// + /// Note: This returns a pointer to the data T, which is offset in the allocation. + /// + /// It is recommended to use OffsetArc for this. + #[inline] + pub fn into_raw(this: Self) -> *const T { + let this = ManuallyDrop::new(this); + this.as_ptr() + } + + /// Returns the raw pointer. + /// + /// Same as into_raw except `self` isn't consumed. + #[inline] + pub fn as_ptr(&self) -> *const T { + // SAFETY: This cannot go through a reference to `data`, because this method + // is used to implement `into_raw`. To reconstruct the full `Arc` from this + // pointer, it needs to maintain its full provenance, and not be reduced to + // just the contained `T`. + unsafe { ptr::addr_of_mut!((*self.ptr()).data) } + } + + /// Produce a pointer to the data that can be converted back + /// to an Arc. This is basically an `&Arc`, without the extra indirection. + /// It has the benefits of an `&T` but also knows about the underlying refcount + /// and can be converted into more `Arc`s if necessary. + #[inline] + pub fn borrow_arc(&self) -> ArcBorrow<'_, T> { + ArcBorrow(&**self) + } + + /// Returns the address on the heap of the Arc itself -- not the T within it -- for memory + /// reporting. + pub fn heap_ptr(&self) -> *const c_void { + self.p.as_ptr() as *const ArcInner as *const c_void + } + + #[inline] + pub(super) fn into_raw_inner(this: Self) -> *mut ArcInner { + let this = ManuallyDrop::new(this); + this.ptr() + } + + /// Construct an `Arc` from an allocated `ArcInner`. + /// # Safety + /// The `ptr` must point to a valid instance, allocated by an `Arc`. The reference could will + /// not be modified. + pub(super) unsafe fn from_raw_inner(ptr: *mut ArcInner) -> Self { + Arc { + p: ptr::NonNull::new_unchecked(ptr), + phantom: PhantomData, + } + } + + #[inline] + pub(super) fn inner(&self) -> &ArcInner { + // This unsafety is ok because while this arc is alive we're guaranteed + // that the inner pointer is valid. Furthermore, we know that the + // `ArcInner` structure itself is `Sync` because the inner data is + // `Sync` as well, so we're ok loaning out an immutable pointer to these + // contents. + unsafe { &*self.ptr() } + } + + // Non-inlined part of `drop`. Just invokes the destructor. + #[inline(never)] + unsafe fn drop_slow(&mut self) { + let _ = Box::from_raw(self.ptr()); + } + + /// Test pointer equality between the two Arcs, i.e. they must be the _same_ + /// allocation + #[inline] + pub fn ptr_eq(this: &Self, other: &Self) -> bool { + this.ptr() == other.ptr() + } + + pub(crate) fn ptr(&self) -> *mut ArcInner { + self.p.as_ptr() + } + + /// Allocates an `ArcInner` with sufficient space for + /// a possibly-unsized inner value where the value has the layout provided. + /// + /// The function `mem_to_arcinner` is called with the data pointer + /// and must return back a (potentially fat)-pointer for the `ArcInner`. + /// + /// ## Safety + /// + /// `mem_to_arcinner` must return the same pointer, the only things that can change are + /// - its type + /// - its metadata + /// + /// `value_layout` must be correct for `T`. + #[allow(unused_unsafe)] + pub(super) unsafe fn allocate_for_layout( + value_layout: Layout, + mem_to_arcinner: impl FnOnce(*mut u8) -> *mut ArcInner, + ) -> NonNull> { + let layout = Layout::new::>() + .extend(value_layout) + .unwrap() + .0 + .pad_to_align(); + + // Safety: we propagate safety requirements to the caller + unsafe { + Arc::try_allocate_for_layout(value_layout, mem_to_arcinner) + .unwrap_or_else(|_| handle_alloc_error(layout)) + } + } + + /// Allocates an `ArcInner` with sufficient space for + /// a possibly-unsized inner value where the value has the layout provided, + /// returning an error if allocation fails. + /// + /// The function `mem_to_arcinner` is called with the data pointer + /// and must return back a (potentially fat)-pointer for the `ArcInner`. + /// + /// ## Safety + /// + /// `mem_to_arcinner` must return the same pointer, the only things that can change are + /// - its type + /// - its metadata + /// + /// `value_layout` must be correct for `T`. + #[allow(unused_unsafe)] + unsafe fn try_allocate_for_layout( + value_layout: Layout, + mem_to_arcinner: impl FnOnce(*mut u8) -> *mut ArcInner, + ) -> Result>, ()> { + let layout = Layout::new::>() + .extend(value_layout) + .unwrap() + .0 + .pad_to_align(); + + let ptr = NonNull::new(alloc::alloc::alloc(layout)).ok_or(())?; + + // Initialize the ArcInner + let inner = mem_to_arcinner(ptr.as_ptr()); + debug_assert_eq!(unsafe { Layout::for_value(&*inner) }, layout); + + unsafe { + ptr::write(&mut (*inner).count, atomic::AtomicUsize::new(1)); + } + + // Safety: `ptr` is checked to be non-null, + // `inner` is the same as `ptr` (per the safety requirements of this function) + unsafe { Ok(NonNull::new_unchecked(inner)) } + } +} + +impl Arc> { + pub(super) fn allocate_for_header_and_slice( + len: usize, + ) -> NonNull>> { + let layout = Layout::new::() + .extend(Layout::array::(len).unwrap()) + .unwrap() + .0 + .pad_to_align(); + + unsafe { + // Safety: + // - the provided closure does not change the pointer (except for meta & type) + // - the provided layout is valid for `HeaderSlice` + Arc::allocate_for_layout(layout, |mem| { + // Synthesize the fat pointer. We do this by claiming we have a direct + // pointer to a [T], and then changing the type of the borrow. The key + // point here is that the length portion of the fat pointer applies + // only to the number of elements in the dynamically-sized portion of + // the type, so the value will be the same whether it points to a [T] + // or something else with a [T] as its last member. + let fake_slice = ptr::slice_from_raw_parts_mut(mem as *mut T, len); + fake_slice as *mut ArcInner> + }) + } + } +} + +impl Arc> { + /// Create an Arc contains an `MaybeUninit`. + pub fn new_uninit() -> Self { + Arc::new(MaybeUninit::::uninit()) + } + + /// Calls `MaybeUninit::write` on the value contained. + /// + /// ## Panics + /// + /// If the `Arc` is not unique. + #[deprecated( + since = "0.1.7", + note = "this function previously was UB and now panics for non-unique `Arc`s. Use `UniqueArc::write` instead." + )] + #[track_caller] + pub fn write(&mut self, val: T) -> &mut T { + UniqueArc::write(must_be_unique(self), val) + } + + /// Obtain a mutable pointer to the stored `MaybeUninit`. + pub fn as_mut_ptr(&mut self) -> *mut MaybeUninit { + unsafe { &mut (*self.ptr()).data } + } + + /// # Safety + /// + /// Must initialize all fields before calling this function. + #[inline] + pub unsafe fn assume_init(self) -> Arc { + Arc::from_raw_inner(ManuallyDrop::new(self).ptr().cast()) + } +} + +impl Arc<[MaybeUninit]> { + /// Create an Arc contains an array `[MaybeUninit]` of `len`. + pub fn new_uninit_slice(len: usize) -> Self { + UniqueArc::new_uninit_slice(len).shareable() + } + + /// Obtain a mutable slice to the stored `[MaybeUninit]`. + #[deprecated( + since = "0.1.8", + note = "this function previously was UB and now panics for non-unique `Arc`s. Use `UniqueArc` or `get_mut` instead." + )] + #[track_caller] + pub fn as_mut_slice(&mut self) -> &mut [MaybeUninit] { + must_be_unique(self) + } + + /// # Safety + /// + /// Must initialize all fields before calling this function. + #[inline] + pub unsafe fn assume_init(self) -> Arc<[T]> { + Arc::from_raw_inner(ManuallyDrop::new(self).ptr() as _) + } +} + +impl Clone for Arc { + #[inline] + fn clone(&self) -> Self { + // Using a relaxed ordering is alright here, as knowledge of the + // original reference prevents other threads from erroneously deleting + // the object. + // + // As explained in the [Boost documentation][1], Increasing the + // reference counter can always be done with memory_order_relaxed: New + // references to an object can only be formed from an existing + // reference, and passing an existing reference from one thread to + // another must already provide any required synchronization. + // + // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html) + let old_size = self.inner().count.fetch_add(1, Relaxed); + + // However we need to guard against massive refcounts in case someone + // is `mem::forget`ing Arcs. If we don't do this the count can overflow + // and users will use-after free. We racily saturate to `isize::MAX` on + // the assumption that there aren't ~2 billion threads incrementing + // the reference count at once. This branch will never be taken in + // any realistic program. + // + // We abort because such a program is incredibly degenerate, and we + // don't care to support it. + if old_size > MAX_REFCOUNT { + abort(); + } + + unsafe { + Arc { + p: ptr::NonNull::new_unchecked(self.ptr()), + phantom: PhantomData, + } + } + } +} + +impl Deref for Arc { + type Target = T; + + #[inline] + fn deref(&self) -> &T { + &self.inner().data + } +} + +impl Arc { + /// Makes a mutable reference to the `Arc`, cloning if necessary + /// + /// This is functionally equivalent to [`Arc::make_mut`][mm] from the standard library. + /// + /// If this `Arc` is uniquely owned, `make_mut()` will provide a mutable + /// reference to the contents. If not, `make_mut()` will create a _new_ `Arc` + /// with a copy of the contents, update `this` to point to it, and provide + /// a mutable reference to its contents. + /// + /// This is useful for implementing copy-on-write schemes where you wish to + /// avoid copying things if your `Arc` is not shared. + /// + /// [mm]: https://doc.rust-lang.org/stable/std/sync/struct.Arc.html#method.make_mut + #[inline] + pub fn make_mut(this: &mut Self) -> &mut T { + if !this.is_unique() { + // Another pointer exists; clone + *this = Arc::new(T::clone(&this)); + } + + unsafe { + // This unsafety is ok because we're guaranteed that the pointer + // returned is the *only* pointer that will ever be returned to T. Our + // reference count is guaranteed to be 1 at this point, and we required + // the Arc itself to be `mut`, so we're returning the only possible + // reference to the inner data. + &mut (*this.ptr()).data + } + } + + /// Makes a `UniqueArc` from an `Arc`, cloning if necessary. + /// + /// If this `Arc` is uniquely owned, `make_unique()` will provide a `UniqueArc` + /// containing `this`. If not, `make_unique()` will create a _new_ `Arc` + /// with a copy of the contents, update `this` to point to it, and provide + /// a `UniqueArc` to it. + /// + /// This is useful for implementing copy-on-write schemes where you wish to + /// avoid copying things if your `Arc` is not shared. + #[inline] + pub fn make_unique(this: &mut Self) -> &mut UniqueArc { + if !this.is_unique() { + // Another pointer exists; clone + *this = Arc::new(T::clone(&this)); + } + + unsafe { + // Safety: this is either unique or just created (which is also unique) + UniqueArc::from_arc_ref(this) + } + } + + /// If we have the only reference to `T` then unwrap it. Otherwise, clone `T` and return the clone. + /// + /// Assuming `arc_t` is of type `Arc`, this function is functionally equivalent to `(*arc_t).clone()`, but will avoid cloning the inner value where possible. + pub fn unwrap_or_clone(this: Arc) -> T { + Self::try_unwrap(this).unwrap_or_else(|this| T::clone(&this)) + } +} + +impl Arc { + /// Provides mutable access to the contents _if_ the `Arc` is uniquely owned. + #[inline] + pub fn get_mut(this: &mut Self) -> Option<&mut T> { + if this.is_unique() { + unsafe { + // See make_mut() for documentation of the threadsafety here. + Some(&mut (*this.ptr()).data) + } + } else { + None + } + } + + /// Provides unique access to the arc _if_ the `Arc` is uniquely owned. + pub fn get_unique(this: &mut Self) -> Option<&mut UniqueArc> { + Self::try_as_unique(this).ok() + } + + /// Whether or not the `Arc` is uniquely owned (is the refcount 1?). + pub fn is_unique(&self) -> bool { + // See the extensive discussion in [1] for why this needs to be Acquire. + // + // [1] https://github.com/servo/servo/issues/21186 + Self::count(self) == 1 + } + + /// Gets the number of [`Arc`] pointers to this allocation + pub fn count(this: &Self) -> usize { + this.inner().count.load(Acquire) + } + + /// Returns a [`UniqueArc`] if the [`Arc`] has exactly one strong reference. + /// + /// Otherwise, an [`Err`] is returned with the same [`Arc`] that was + /// passed in. + /// + /// # Examples + /// + /// ``` + /// use triomphe::{Arc, UniqueArc}; + /// + /// let x = Arc::new(3); + /// assert_eq!(UniqueArc::into_inner(Arc::try_unique(x).unwrap()), 3); + /// + /// let x = Arc::new(4); + /// let _y = Arc::clone(&x); + /// assert_eq!( + /// *Arc::try_unique(x).map(UniqueArc::into_inner).unwrap_err(), + /// 4, + /// ); + /// ``` + pub fn try_unique(this: Self) -> Result, Self> { + if this.is_unique() { + // Safety: The current arc is unique and making a `UniqueArc` + // from it is sound + unsafe { Ok(UniqueArc::from_arc(this)) } + } else { + Err(this) + } + } + + pub(crate) fn try_as_unique(this: &mut Self) -> Result<&mut UniqueArc, &mut Self> { + if this.is_unique() { + // Safety: The current arc is unique and making a `UniqueArc` + // from it is sound + unsafe { Ok(UniqueArc::from_arc_ref(this)) } + } else { + Err(this) + } + } +} + +impl Drop for Arc { + #[inline] + fn drop(&mut self) { + // Because `fetch_sub` is already atomic, we do not need to synchronize + // with other threads unless we are going to delete the object. + if self.inner().count.fetch_sub(1, Release) != 1 { + return; + } + + // FIXME(bholley): Use the updated comment when [2] is merged. + // + // This load is needed to prevent reordering of use of the data and + // deletion of the data. Because it is marked `Release`, the decreasing + // of the reference count synchronizes with this `Acquire` load. This + // means that use of the data happens before decreasing the reference + // count, which happens before this load, which happens before the + // deletion of the data. + // + // As explained in the [Boost documentation][1], + // + // > It is important to enforce any possible access to the object in one + // > thread (through an existing reference) to *happen before* deleting + // > the object in a different thread. This is achieved by a "release" + // > operation after dropping a reference (any access to the object + // > through this reference must obviously happened before), and an + // > "acquire" operation before deleting the object. + // + // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html) + // [2]: https://github.com/rust-lang/rust/pull/41714 + self.inner().count.load(Acquire); + + unsafe { + self.drop_slow(); + } + } +} + +impl PartialEq for Arc { + fn eq(&self, other: &Arc) -> bool { + Self::ptr_eq(self, other) || *(*self) == *(*other) + } + + #[allow(clippy::partialeq_ne_impl)] + fn ne(&self, other: &Arc) -> bool { + !Self::ptr_eq(self, other) && *(*self) != *(*other) + } +} + +impl PartialOrd for Arc { + fn partial_cmp(&self, other: &Arc) -> Option { + (**self).partial_cmp(&**other) + } + + fn lt(&self, other: &Arc) -> bool { + *(*self) < *(*other) + } + + fn le(&self, other: &Arc) -> bool { + *(*self) <= *(*other) + } + + fn gt(&self, other: &Arc) -> bool { + *(*self) > *(*other) + } + + fn ge(&self, other: &Arc) -> bool { + *(*self) >= *(*other) + } +} + +impl Ord for Arc { + fn cmp(&self, other: &Arc) -> Ordering { + (**self).cmp(&**other) + } +} + +impl Eq for Arc {} + +impl fmt::Display for Arc { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fmt::Display::fmt(&**self, f) + } +} + +impl fmt::Debug for Arc { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fmt::Debug::fmt(&**self, f) + } +} + +impl fmt::Pointer for Arc { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fmt::Pointer::fmt(&self.ptr(), f) + } +} + +impl Default for Arc { + #[inline] + fn default() -> Arc { + Arc::new(Default::default()) + } +} + +impl Hash for Arc { + fn hash(&self, state: &mut H) { + (**self).hash(state) + } +} + +impl From for Arc { + #[inline] + fn from(t: T) -> Self { + Arc::new(t) + } +} + +impl borrow::Borrow for Arc { + #[inline] + fn borrow(&self) -> &T { + &**self + } +} + +impl AsRef for Arc { + #[inline] + fn as_ref(&self) -> &T { + &**self + } +} + +#[cfg(feature = "stable_deref_trait")] +unsafe impl StableDeref for Arc {} +#[cfg(feature = "stable_deref_trait")] +unsafe impl CloneStableDeref for Arc {} + +#[cfg(feature = "serde")] +impl<'de, T: Deserialize<'de>> Deserialize<'de> for Arc { + fn deserialize(deserializer: D) -> Result, D::Error> + where + D: ::serde::de::Deserializer<'de>, + { + T::deserialize(deserializer).map(Arc::new) + } +} + +#[cfg(feature = "serde")] +impl Serialize for Arc { + fn serialize(&self, serializer: S) -> Result + where + S: ::serde::ser::Serializer, + { + (**self).serialize(serializer) + } +} + +// Safety: +// This implementation must guarantee that it is sound to call replace_ptr with an unsized variant +// of the pointer retuned in `as_sized_ptr`. The basic property of Unsize coercion is that safety +// variants and layout is unaffected. The Arc does not rely on any other property of T. This makes +// any unsized ArcInner valid for being shared with the sized variant. +// This does _not_ mean that any T can be unsized into an U, but rather than if such unsizing is +// possible then it can be propagated into the Arc. +#[cfg(feature = "unsize")] +unsafe impl unsize::CoerciblePtr for Arc { + type Pointee = T; + type Output = Arc; + + fn as_sized_ptr(&mut self) -> *mut T { + // Returns a pointer to the complete inner. The unsizing itself won't care about the + // pointer value and promises not to offset it. + self.p.as_ptr() as *mut T + } + + unsafe fn replace_ptr(self, new: *mut U) -> Arc { + // Fix the provenance by ensuring that of `self` is used. + let inner = ManuallyDrop::new(self); + let p = inner.p.as_ptr() as *mut T; + // Safety: This points to an ArcInner of the previous self and holds shared ownership since + // the old pointer never decremented the reference count. The caller upholds that `new` is + // an unsized version of the previous ArcInner. This assumes that unsizing to the fat + // pointer tag of an `ArcInner` and `U` is isomorphic under a direct pointer cast since + // in reality we unsized *mut T to *mut U at the address of the ArcInner. This is the case + // for all currently envisioned unsized types where the tag of T and ArcInner are simply + // the same. + Arc::from_raw_inner(p.replace_ptr(new) as *mut ArcInner) + } +} + +#[track_caller] +fn must_be_unique(arc: &mut Arc) -> &mut UniqueArc { + match Arc::try_as_unique(arc) { + Ok(unique) => unique, + Err(this) => panic!("`Arc` must be unique in order for this operation to be safe, there are currently {} copies", Arc::count(this)), + } +} + +#[cfg(test)] +mod tests { + use crate::arc::Arc; + use alloc::string::String; + use core::mem::MaybeUninit; + #[cfg(feature = "unsize")] + use unsize::{CoerceUnsize, Coercion}; + + #[test] + fn try_unwrap() { + let x = Arc::new(100usize); + let y = x.clone(); + + // The count should be two so `try_unwrap()` should fail + assert_eq!(Arc::count(&x), 2); + assert!(Arc::try_unwrap(x).is_err()); + + // Since `x` has now been dropped, the count should be 1 + // and `try_unwrap()` should succeed + assert_eq!(Arc::count(&y), 1); + assert_eq!(Arc::try_unwrap(y), Ok(100)); + } + + #[test] + #[cfg(feature = "unsize")] + fn coerce_to_slice() { + let x = Arc::new([0u8; 4]); + let y: Arc<[u8]> = x.clone().unsize(Coercion::to_slice()); + assert_eq!((*x).as_ptr(), (*y).as_ptr()); + } + + #[test] + #[cfg(feature = "unsize")] + fn coerce_to_dyn() { + let x: Arc<_> = Arc::new(|| 42u32); + let x: Arc<_> = x.unsize(Coercion::<_, dyn Fn() -> u32>::to_fn()); + assert_eq!((*x)(), 42); + } + + #[test] + #[allow(deprecated)] + fn maybeuninit() { + let mut arc: Arc> = Arc::new_uninit(); + arc.write(999); + + let arc = unsafe { arc.assume_init() }; + assert_eq!(*arc, 999); + } + + #[test] + #[allow(deprecated)] + #[should_panic = "`Arc` must be unique in order for this operation to be safe"] + fn maybeuninit_ub_to_proceed() { + let mut uninit = Arc::new_uninit(); + let clone = uninit.clone(); + + let x: &MaybeUninit = &*clone; + + // This write invalidates `x` reference + uninit.write(String::from("nonononono")); + + // Read invalidated reference to trigger UB + let _ = &*x; + } + + #[test] + #[allow(deprecated)] + #[should_panic = "`Arc` must be unique in order for this operation to be safe"] + fn maybeuninit_slice_ub_to_proceed() { + let mut uninit = Arc::new_uninit_slice(13); + let clone = uninit.clone(); + + let x: &[MaybeUninit] = &*clone; + + // This write invalidates `x` reference + uninit.as_mut_slice()[0].write(String::from("nonononono")); + + // Read invalidated reference to trigger UB + let _ = &*x; + } + + #[test] + fn maybeuninit_array() { + let mut arc: Arc<[MaybeUninit<_>]> = Arc::new_uninit_slice(5); + assert!(arc.is_unique()); + #[allow(deprecated)] + for (uninit, index) in arc.as_mut_slice().iter_mut().zip(0..5) { + let ptr = uninit.as_mut_ptr(); + unsafe { core::ptr::write(ptr, index) }; + } + + let arc = unsafe { arc.assume_init() }; + assert!(arc.is_unique()); + // Using clone to that the layout generated in new_uninit_slice is compatible + // with ArcInner. + let arcs = [ + arc.clone(), + arc.clone(), + arc.clone(), + arc.clone(), + arc.clone(), + ]; + assert_eq!(6, Arc::count(&arc)); + // If the layout is not compatible, then the data might be corrupted. + assert_eq!(*arc, [0, 1, 2, 3, 4]); + + // Drop the arcs and check the count and the content to + // make sure it isn't corrupted. + drop(arcs); + assert!(arc.is_unique()); + assert_eq!(*arc, [0, 1, 2, 3, 4]); + } + + #[test] + fn roundtrip() { + let arc: Arc = Arc::new(0usize); + let ptr = Arc::into_raw(arc); + unsafe { + let _arc = Arc::from_raw(ptr); + } + } +} diff --git a/vendor/triomphe/src/arc_borrow.rs b/vendor/triomphe/src/arc_borrow.rs new file mode 100644 index 000000000..d53e1a5ea --- /dev/null +++ b/vendor/triomphe/src/arc_borrow.rs @@ -0,0 +1,116 @@ +use core::mem; +use core::mem::ManuallyDrop; +use core::ops::Deref; +use core::ptr; + +use super::Arc; + +/// A "borrowed `Arc`". This is a pointer to +/// a T that is known to have been allocated within an +/// `Arc`. +/// +/// This is equivalent in guarantees to `&Arc`, however it is +/// a bit more flexible. To obtain an `&Arc` you must have +/// an `Arc` instance somewhere pinned down until we're done with it. +/// It's also a direct pointer to `T`, so using this involves less pointer-chasing +/// +/// However, C++ code may hand us refcounted things as pointers to T directly, +/// so we have to conjure up a temporary `Arc` on the stack each time. The +/// same happens for when the object is managed by a `OffsetArc`. +/// +/// `ArcBorrow` lets us deal with borrows of known-refcounted objects +/// without needing to worry about where the `Arc` is. +#[derive(Debug, Eq, PartialEq)] +#[repr(transparent)] +pub struct ArcBorrow<'a, T: ?Sized + 'a>(pub(crate) &'a T); + +impl<'a, T> Copy for ArcBorrow<'a, T> {} +impl<'a, T> Clone for ArcBorrow<'a, T> { + #[inline] + fn clone(&self) -> Self { + *self + } +} + +impl<'a, T> ArcBorrow<'a, T> { + /// Clone this as an `Arc`. This bumps the refcount. + #[inline] + pub fn clone_arc(&self) -> Arc { + let arc = unsafe { Arc::from_raw(self.0) }; + // addref it! + mem::forget(arc.clone()); + arc + } + + /// For constructing from a reference known to be Arc-backed, + /// e.g. if we obtain such a reference over FFI + /// TODO: should from_ref be relaxed to unsized types? It can't be + /// converted back to an Arc right now for unsized types. + #[inline] + pub unsafe fn from_ref(r: &'a T) -> Self { + ArcBorrow(r) + } + + /// Compare two `ArcBorrow`s via pointer equality. Will only return + /// true if they come from the same allocation + #[inline] + pub fn ptr_eq(this: &Self, other: &Self) -> bool { + ptr::eq(this.0 as *const T, other.0 as *const T) + } + + /// Temporarily converts |self| into a bonafide Arc and exposes it to the + /// provided callback. The refcount is not modified. + #[inline] + pub fn with_arc(&self, f: F) -> U + where + F: FnOnce(&Arc) -> U, + T: 'static, + { + // Synthesize transient Arc, which never touches the refcount. + let transient = unsafe { ManuallyDrop::new(Arc::from_raw(self.0)) }; + + // Expose the transient Arc to the callback, which may clone it if it wants + // and forward the result to the user + f(&transient) + } + + /// Similar to deref, but uses the lifetime |a| rather than the lifetime of + /// self, which is incompatible with the signature of the Deref trait. + #[inline] + pub fn get(&self) -> &'a T { + self.0 + } +} + +impl<'a, T> Deref for ArcBorrow<'a, T> { + type Target = T; + + #[inline] + fn deref(&self) -> &T { + self.0 + } +} + +// Safety: +// This implementation must guarantee that it is sound to call replace_ptr with an unsized variant +// of the pointer retuned in `as_sized_ptr`. We leverage unsizing the contained reference. This +// continues to point to the data of an ArcInner. The reference count remains untouched which is +// correct since the number of owners did not change. This implies the returned instance fulfills +// its safety invariants. +#[cfg(feature = "unsize")] +unsafe impl<'lt, T: 'lt, U: ?Sized + 'lt> unsize::CoerciblePtr for ArcBorrow<'lt, T> { + type Pointee = T; + type Output = ArcBorrow<'lt, U>; + + fn as_sized_ptr(&mut self) -> *mut T { + // Returns a pointer to the inner data. We do not need to care about any particular + // provenance here, only the pointer value, which we need to reconstruct the new pointer. + self.0 as *const T as *mut T + } + + unsafe fn replace_ptr(self, new: *mut U) -> ArcBorrow<'lt, U> { + let inner = ManuallyDrop::new(self); + // Safety: backed by the same Arc that backed `self`. + ArcBorrow(inner.0.replace_ptr(new)) + } +} diff --git a/vendor/triomphe/src/arc_swap_support.rs b/vendor/triomphe/src/arc_swap_support.rs new file mode 100644 index 000000000..195854ed3 --- /dev/null +++ b/vendor/triomphe/src/arc_swap_support.rs @@ -0,0 +1,42 @@ +use arc_swap::RefCnt; + +use crate::{Arc, ThinArc}; +use core::ffi::c_void; + +unsafe impl RefCnt for ThinArc { + type Base = c_void; + + #[inline] + fn into_ptr(me: Self) -> *mut Self::Base { + ThinArc::into_raw(me) as *mut _ + } + + #[inline] + fn as_ptr(me: &Self) -> *mut Self::Base { + ThinArc::as_ptr(me) as *mut _ + } + + #[inline] + unsafe fn from_ptr(ptr: *const Self::Base) -> Self { + ThinArc::from_raw(ptr) + } +} + +unsafe impl RefCnt for Arc { + type Base = T; + + #[inline] + fn into_ptr(me: Self) -> *mut Self::Base { + Arc::into_raw(me) as *mut _ + } + + #[inline] + fn as_ptr(me: &Self) -> *mut Self::Base { + Arc::as_ptr(me) as *mut _ + } + + #[inline] + unsafe fn from_ptr(ptr: *const Self::Base) -> Self { + Arc::from_raw(ptr) + } +} diff --git a/vendor/triomphe/src/arc_union.rs b/vendor/triomphe/src/arc_union.rs new file mode 100644 index 000000000..6d5cddc8f --- /dev/null +++ b/vendor/triomphe/src/arc_union.rs @@ -0,0 +1,139 @@ +use core::fmt; +use core::marker::PhantomData; +use core::ptr; +use core::usize; + +use super::{Arc, ArcBorrow}; + +/// A tagged union that can represent `Arc` or `Arc` while only consuming a +/// single word. The type is also `NonNull`, and thus can be stored in an Option +/// without increasing size. +/// +/// This is functionally equivalent to +/// `enum ArcUnion { First(Arc), Second(Arc)` but only takes up +/// up a single word of stack space. +/// +/// This could probably be extended to support four types if necessary. +pub struct ArcUnion { + p: ptr::NonNull<()>, + phantom_a: PhantomData, + phantom_b: PhantomData, +} + +unsafe impl Send for ArcUnion {} +unsafe impl Sync for ArcUnion {} + +impl PartialEq for ArcUnion { + fn eq(&self, other: &Self) -> bool { + use crate::ArcUnionBorrow::*; + match (self.borrow(), other.borrow()) { + (First(x), First(y)) => x == y, + (Second(x), Second(y)) => x == y, + (_, _) => false, + } + } +} + +/// This represents a borrow of an `ArcUnion`. +#[derive(Debug)] +pub enum ArcUnionBorrow<'a, A: 'a, B: 'a> { + First(ArcBorrow<'a, A>), + Second(ArcBorrow<'a, B>), +} + +impl ArcUnion { + unsafe fn new(ptr: *mut ()) -> Self { + ArcUnion { + p: ptr::NonNull::new_unchecked(ptr), + phantom_a: PhantomData, + phantom_b: PhantomData, + } + } + + /// Returns true if the two values are pointer-equal. + #[inline] + pub fn ptr_eq(this: &Self, other: &Self) -> bool { + this.p == other.p + } + + /// Returns an enum representing a borrow of either A or B. + pub fn borrow(&self) -> ArcUnionBorrow { + if self.is_first() { + let ptr = self.p.as_ptr() as *const A; + let borrow = unsafe { ArcBorrow::from_ref(&*ptr) }; + ArcUnionBorrow::First(borrow) + } else { + let ptr = ((self.p.as_ptr() as usize) & !0x1) as *const B; + let borrow = unsafe { ArcBorrow::from_ref(&*ptr) }; + ArcUnionBorrow::Second(borrow) + } + } + + /// Creates an `ArcUnion` from an instance of the first type. + #[inline] + pub fn from_first(other: Arc) -> Self { + unsafe { Self::new(Arc::into_raw(other) as *mut _) } + } + + /// Creates an `ArcUnion` from an instance of the second type. + #[inline] + pub fn from_second(other: Arc) -> Self { + unsafe { Self::new(((Arc::into_raw(other) as usize) | 0x1) as *mut _) } + } + + /// Returns true if this `ArcUnion` contains the first type. + #[inline] + pub fn is_first(&self) -> bool { + self.p.as_ptr() as usize & 0x1 == 0 + } + + /// Returns true if this `ArcUnion` contains the second type. + #[inline] + pub fn is_second(&self) -> bool { + !self.is_first() + } + + /// Returns a borrow of the first type if applicable, otherwise `None`. + pub fn as_first(&self) -> Option> { + match self.borrow() { + ArcUnionBorrow::First(x) => Some(x), + ArcUnionBorrow::Second(_) => None, + } + } + + /// Returns a borrow of the second type if applicable, otherwise None. + pub fn as_second(&self) -> Option> { + match self.borrow() { + ArcUnionBorrow::First(_) => None, + ArcUnionBorrow::Second(x) => Some(x), + } + } +} + +impl Clone for ArcUnion { + fn clone(&self) -> Self { + match self.borrow() { + ArcUnionBorrow::First(x) => ArcUnion::from_first(x.clone_arc()), + ArcUnionBorrow::Second(x) => ArcUnion::from_second(x.clone_arc()), + } + } +} + +impl Drop for ArcUnion { + fn drop(&mut self) { + match self.borrow() { + ArcUnionBorrow::First(x) => unsafe { + let _ = Arc::from_raw(&*x); + }, + ArcUnionBorrow::Second(x) => unsafe { + let _ = Arc::from_raw(&*x); + }, + } + } +} + +impl fmt::Debug for ArcUnion { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fmt::Debug::fmt(&self.borrow(), f) + } +} diff --git a/vendor/triomphe/src/header.rs b/vendor/triomphe/src/header.rs new file mode 100644 index 000000000..e35ec48b0 --- /dev/null +++ b/vendor/triomphe/src/header.rs @@ -0,0 +1,378 @@ +use alloc::alloc::Layout; +use alloc::boxed::Box; +use alloc::string::String; +use alloc::vec::Vec; +use core::iter::{ExactSizeIterator, Iterator}; +use core::marker::PhantomData; +use core::mem::{self, ManuallyDrop}; +use core::ptr::{self, addr_of_mut}; +use core::usize; + +use super::{Arc, ArcInner}; + +/// Structure to allow Arc-managing some fixed-sized data and a variably-sized +/// slice in a single allocation. +#[derive(Debug, Eq, PartialEq, Hash, PartialOrd)] +#[repr(C)] +pub struct HeaderSlice { + /// The fixed-sized data. + pub header: H, + + /// The dynamically-sized data. + pub slice: T, +} + +impl Arc> { + /// Creates an Arc for a HeaderSlice using the given header struct and + /// iterator to generate the slice. The resulting Arc will be fat. + pub fn from_header_and_iter(header: H, mut items: I) -> Self + where + I: Iterator + ExactSizeIterator, + { + assert_ne!(mem::size_of::(), 0, "Need to think about ZST"); + + let num_items = items.len(); + + let inner = Arc::allocate_for_header_and_slice(num_items); + + unsafe { + // Write the data. + // + // Note that any panics here (i.e. from the iterator) are safe, since + // we'll just leak the uninitialized memory. + ptr::write(&mut ((*inner.as_ptr()).data.header), header); + if num_items != 0 { + let mut current = (*inner.as_ptr()).data.slice.as_mut_ptr(); + for _ in 0..num_items { + ptr::write( + current, + items + .next() + .expect("ExactSizeIterator over-reported length"), + ); + current = current.offset(1); + } + assert!( + items.next().is_none(), + "ExactSizeIterator under-reported length" + ); + } + assert!( + items.next().is_none(), + "ExactSizeIterator under-reported length" + ); + } + + // Safety: ptr is valid & the inner structure is fully initialized + Arc { + p: inner, + phantom: PhantomData, + } + } + + /// Creates an Arc for a HeaderSlice using the given header struct and + /// iterator to generate the slice. The resulting Arc will be fat. + pub fn from_header_and_slice(header: H, items: &[T]) -> Self + where + T: Copy, + { + assert_ne!(mem::size_of::(), 0, "Need to think about ZST"); + + let num_items = items.len(); + + let inner = Arc::allocate_for_header_and_slice(num_items); + + unsafe { + // Write the data. + ptr::write(&mut ((*inner.as_ptr()).data.header), header); + let dst = (*inner.as_ptr()).data.slice.as_mut_ptr(); + ptr::copy_nonoverlapping(items.as_ptr(), dst, num_items); + } + + // Safety: ptr is valid & the inner structure is fully initialized + Arc { + p: inner, + phantom: PhantomData, + } + } + + /// Creates an Arc for a HeaderSlice using the given header struct and + /// vec to generate the slice. The resulting Arc will be fat. + pub fn from_header_and_vec(header: H, mut v: Vec) -> Self { + let len = v.len(); + + let inner = Arc::allocate_for_header_and_slice(len); + + unsafe { + // Safety: inner is a valid pointer, so this can't go out of bounds + let dst = addr_of_mut!((*inner.as_ptr()).data.header); + + // Safety: `dst` is valid for writes (just allocated) + ptr::write(dst, header); + } + + unsafe { + let src = v.as_mut_ptr(); + + // Safety: inner is a valid pointer, so this can't go out of bounds + let dst = addr_of_mut!((*inner.as_ptr()).data.slice) as *mut T; + + // Safety: + // - `src` is valid for reads for `len` (got from `Vec`) + // - `dst` is valid for writes for `len` (just allocated, with layout for appropriate slice) + // - `src` and `dst` don't overlap (separate allocations) + ptr::copy_nonoverlapping(src, dst, len); + + // Deallocate vec without dropping `T` + // + // Safety: 0..0 elements are always initialized, 0 <= cap for any cap + v.set_len(0); + } + + // Safety: ptr is valid & the inner structure is fully initialized + Arc { + p: inner, + phantom: PhantomData, + } + } +} + +impl Arc> { + /// Creates an Arc for a HeaderSlice using the given header struct and + /// a str slice to generate the slice. The resulting Arc will be fat. + pub fn from_header_and_str(header: H, string: &str) -> Self { + let bytes = Arc::from_header_and_slice(header, string.as_bytes()); + + // Safety: `ArcInner` and `HeaderSlice` are `repr(C)`, `str` has the same layout as `[u8]`, + // thus it's ok to "transmute" between `Arc>` and `Arc>`. + // + // `bytes` are a valid string since we've just got them from a valid `str`. + unsafe { Arc::from_raw_inner(Arc::into_raw_inner(bytes) as _) } + } +} + +/// Header data with an inline length. Consumers that use HeaderWithLength as the +/// Header type in HeaderSlice can take advantage of ThinArc. +#[derive(Debug, Eq, PartialEq, Hash, PartialOrd)] +#[repr(C)] +pub struct HeaderWithLength { + /// The fixed-sized data. + pub header: H, + + /// The slice length. + pub length: usize, +} + +impl HeaderWithLength { + /// Creates a new HeaderWithLength. + #[inline] + pub fn new(header: H, length: usize) -> Self { + HeaderWithLength { header, length } + } +} + +impl From>> for Arc { + fn from(this: Arc>) -> Self { + debug_assert_eq!( + Layout::for_value::>(&this), + Layout::for_value::(&this.slice) + ); + + // Safety: `HeaderSlice<(), T>` and `T` has the same layout + unsafe { Arc::from_raw_inner(Arc::into_raw_inner(this) as _) } + } +} + +impl From> for Arc> { + fn from(this: Arc) -> Self { + // Safety: `T` and `HeaderSlice<(), T>` has the same layout + unsafe { Arc::from_raw_inner(Arc::into_raw_inner(this) as _) } + } +} + +impl From<&[T]> for Arc<[T]> { + fn from(slice: &[T]) -> Self { + Arc::from_header_and_slice((), slice).into() + } +} + +impl From<&str> for Arc { + fn from(s: &str) -> Self { + Arc::from_header_and_str((), s).into() + } +} + +impl From for Arc { + fn from(s: String) -> Self { + Self::from(&s[..]) + } +} + +// FIXME: once `pointer::with_metadata_of` is stable or +// implementable on stable without assuming ptr layout +// this will be able to accept `T: ?Sized`. +impl From> for Arc { + fn from(b: Box) -> Self { + let layout = Layout::for_value::(&b); + + // Safety: the closure only changes the type of the pointer + let inner = unsafe { Self::allocate_for_layout(layout, |mem| mem as *mut ArcInner) }; + + unsafe { + let src = Box::into_raw(b); + + // Safety: inner is a valid pointer, so this can't go out of bounds + let dst = addr_of_mut!((*inner.as_ptr()).data); + + // Safety: + // - `src` is valid for reads (got from `Box`) + // - `dst` is valid for writes (just allocated) + // - `src` and `dst` don't overlap (separate allocations) + ptr::copy_nonoverlapping(src, dst, 1); + + // Deallocate box without dropping `T` + // + // Safety: + // - `src` has been got from `Box::into_raw` + // - `ManuallyDrop` is guaranteed to have the same layout as `T` + Box::>::from_raw(src as _); + } + + Arc { + p: inner, + phantom: PhantomData, + } + } +} + +impl From> for Arc<[T]> { + fn from(v: Vec) -> Self { + Arc::from_header_and_vec((), v).into() + } +} + +pub(crate) type HeaderSliceWithLength = HeaderSlice, T>; + +#[cfg(test)] +mod tests { + use alloc::boxed::Box; + use alloc::string::String; + use alloc::vec; + use core::iter; + + use crate::{Arc, HeaderSlice}; + + #[test] + fn from_header_and_iter_smoke() { + let arc = Arc::from_header_and_iter( + (42u32, 17u8), + IntoIterator::into_iter([1u16, 2, 3, 4, 5, 6, 7]), + ); + + assert_eq!(arc.header, (42, 17)); + assert_eq!(arc.slice, [1, 2, 3, 4, 5, 6, 7]); + } + + #[test] + fn from_header_and_slice_smoke() { + let arc = Arc::from_header_and_slice((42u32, 17u8), &[1u16, 2, 3, 4, 5, 6, 7]); + + assert_eq!(arc.header, (42, 17)); + assert_eq!(arc.slice, [1u16, 2, 3, 4, 5, 6, 7]); + } + + #[test] + fn from_header_and_vec_smoke() { + let arc = Arc::from_header_and_vec((42u32, 17u8), vec![1u16, 2, 3, 4, 5, 6, 7]); + + assert_eq!(arc.header, (42, 17)); + assert_eq!(arc.slice, [1u16, 2, 3, 4, 5, 6, 7]); + } + + #[test] + fn from_header_and_iter_empty() { + let arc = Arc::from_header_and_iter((42u32, 17u8), iter::empty::()); + + assert_eq!(arc.header, (42, 17)); + assert_eq!(arc.slice, []); + } + + #[test] + fn from_header_and_slice_empty() { + let arc = Arc::from_header_and_slice((42u32, 17u8), &[1u16; 0]); + + assert_eq!(arc.header, (42, 17)); + assert_eq!(arc.slice, []); + } + + #[test] + fn from_header_and_vec_empty() { + let arc = Arc::from_header_and_vec((42u32, 17u8), vec![1u16; 0]); + + assert_eq!(arc.header, (42, 17)); + assert_eq!(arc.slice, []); + } + + #[test] + fn issue_13_empty() { + crate::Arc::from_header_and_iter((), iter::empty::()); + } + + #[test] + fn issue_13_consumption() { + let s: &[u8] = &[0u8; 255]; + crate::Arc::from_header_and_iter((), s.iter().copied()); + } + + #[test] + fn from_header_and_str_smoke() { + let a = Arc::from_header_and_str( + 42, + "The answer to the ultimate question of life, the universe, and everything", + ); + assert_eq!(a.header, 42); + assert_eq!( + &a.slice, + "The answer to the ultimate question of life, the universe, and everything" + ); + + let empty = Arc::from_header_and_str((), ""); + assert_eq!(empty.header, ()); + assert_eq!(&empty.slice, ""); + } + + #[test] + fn erase_and_create_from_thin_air_header() { + let a: Arc> = Arc::from_header_and_slice((), &[12, 17, 16]); + let b: Arc<[u32]> = a.into(); + + assert_eq!(&*b, [12, 17, 16]); + + let c: Arc> = b.into(); + + assert_eq!(&c.slice, [12, 17, 16]); + assert_eq!(c.header, ()); + } + + #[test] + fn from_box_and_vec() { + let b = Box::new(String::from("xxx")); + let b = Arc::::from(b); + assert_eq!(&*b, "xxx"); + + let v = vec![String::from("1"), String::from("2"), String::from("3")]; + let v = Arc::<[_]>::from(v); + assert_eq!( + &*v, + [String::from("1"), String::from("2"), String::from("3")] + ); + + let mut v = vec![String::from("1"), String::from("2"), String::from("3")]; + v.reserve(10); + let v = Arc::<[_]>::from(v); + assert_eq!( + &*v, + [String::from("1"), String::from("2"), String::from("3")] + ); + } +} diff --git a/vendor/triomphe/src/lib.rs b/vendor/triomphe/src/lib.rs new file mode 100644 index 000000000..13d568bda --- /dev/null +++ b/vendor/triomphe/src/lib.rs @@ -0,0 +1,94 @@ +// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Fork of Arc. This has the following advantages over std::sync::Arc: +//! +//! * `triomphe::Arc` doesn't support weak references: we save space by excluding the weak reference count, and we don't do extra read-modify-update operations to handle the possibility of weak references. +//! * `triomphe::UniqueArc` allows one to construct a temporarily-mutable `Arc` which can be converted to a regular `triomphe::Arc` later +//! * `triomphe::OffsetArc` can be used transparently from C++ code and is compatible with (and can be converted to/from) `triomphe::Arc` +//! * `triomphe::ArcBorrow` is functionally similar to `&triomphe::Arc`, however in memory it's simply `&T`. This makes it more flexible for FFI; the source of the borrow need not be an `Arc` pinned on the stack (and can instead be a pointer from C++, or an `OffsetArc`). Additionally, this helps avoid pointer-chasing. +//! * `triomphe::Arc` has can be constructed for dynamically-sized types via `from_header_and_iter` +//! * `triomphe::ThinArc` provides thin-pointer `Arc`s to dynamically sized types +//! * `triomphe::ArcUnion` is union of two `triomphe:Arc`s which fits inside one word of memory + +#![allow(missing_docs)] +#![cfg_attr(not(feature = "std"), no_std)] + +extern crate alloc; +#[cfg(feature = "std")] +extern crate core; + +#[cfg(feature = "arc-swap")] +extern crate arc_swap; +#[cfg(feature = "serde")] +extern crate serde; +#[cfg(feature = "stable_deref_trait")] +extern crate stable_deref_trait; +#[cfg(feature = "unsize")] +extern crate unsize; + +/// Calculates the offset of the specified field from the start of the named struct. +/// This macro is impossible to be const until feature(const_ptr_offset_from) is stable. +macro_rules! offset_of { + ($ty: path, $field: tt) => {{ + // ensure the type is a named struct + // ensure the field exists and is accessible + let $ty { $field: _, .. }; + + let uninit = <::core::mem::MaybeUninit<$ty>>::uninit(); // const since 1.36 + + let base_ptr: *const $ty = uninit.as_ptr(); // const since 1.59 + + #[allow(unused_unsafe)] + let field_ptr = unsafe { ::core::ptr::addr_of!((*base_ptr).$field) }; // since 1.51 + + // // the const version requires feature(const_ptr_offset_from) + // // https://github.com/rust-lang/rust/issues/92980 + // #[allow(unused_unsafe)] + // unsafe { (field_ptr as *const u8).offset_from(base_ptr as *const u8) as usize } + + (field_ptr as usize) - (base_ptr as usize) + }}; +} + +mod arc; +mod arc_borrow; +#[cfg(feature = "arc-swap")] +mod arc_swap_support; +mod arc_union; +mod header; +mod offset_arc; +mod thin_arc; +mod unique_arc; + +pub use arc::*; +pub use arc_borrow::*; +pub use arc_union::*; +pub use header::*; +pub use offset_arc::*; +pub use thin_arc::*; +pub use unique_arc::*; + +#[cfg(feature = "std")] +use std::process::abort; + +// `no_std`-compatible abort by forcing a panic while already panicing. +#[cfg(not(feature = "std"))] +#[cold] +fn abort() -> ! { + struct PanicOnDrop; + impl Drop for PanicOnDrop { + fn drop(&mut self) { + panic!() + } + } + let _double_panicer = PanicOnDrop; + panic!(); +} diff --git a/vendor/triomphe/src/offset_arc.rs b/vendor/triomphe/src/offset_arc.rs new file mode 100644 index 000000000..9345416d4 --- /dev/null +++ b/vendor/triomphe/src/offset_arc.rs @@ -0,0 +1,134 @@ +use core::fmt; +use core::marker::PhantomData; +use core::mem::ManuallyDrop; +use core::ops::Deref; +use core::ptr; + +use super::{Arc, ArcBorrow}; + +/// An `Arc`, except it holds a pointer to the T instead of to the +/// entire ArcInner. +/// +/// An `OffsetArc` has the same layout and ABI as a non-null +/// `const T*` in C, and may be used in FFI function signatures. +/// +/// ```text +/// Arc OffsetArc +/// | | +/// v v +/// --------------------- +/// | RefCount | T (data) | [ArcInner] +/// --------------------- +/// ``` +/// +/// This means that this is a direct pointer to +/// its contained data (and can be read from by both C++ and Rust), +/// but we can also convert it to a "regular" `Arc` by removing the offset. +/// +/// This is very useful if you have an Arc-containing struct shared between Rust and C++, +/// and wish for C++ to be able to read the data behind the `Arc` without incurring +/// an FFI call overhead. +#[derive(Eq)] +#[repr(transparent)] +pub struct OffsetArc { + pub(crate) ptr: ptr::NonNull, + pub(crate) phantom: PhantomData, +} + +unsafe impl Send for OffsetArc {} +unsafe impl Sync for OffsetArc {} + +impl Deref for OffsetArc { + type Target = T; + #[inline] + fn deref(&self) -> &Self::Target { + unsafe { &*self.ptr.as_ptr() } + } +} + +impl Clone for OffsetArc { + #[inline] + fn clone(&self) -> Self { + Arc::into_raw_offset(self.clone_arc()) + } +} + +impl Drop for OffsetArc { + fn drop(&mut self) { + let _ = Arc::from_raw_offset(OffsetArc { + ptr: self.ptr, + phantom: PhantomData, + }); + } +} + +impl fmt::Debug for OffsetArc { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fmt::Debug::fmt(&**self, f) + } +} + +impl PartialEq for OffsetArc { + fn eq(&self, other: &OffsetArc) -> bool { + *(*self) == *(*other) + } + + #[allow(clippy::partialeq_ne_impl)] + fn ne(&self, other: &OffsetArc) -> bool { + *(*self) != *(*other) + } +} + +impl OffsetArc { + /// Temporarily converts |self| into a bonafide Arc and exposes it to the + /// provided callback. The refcount is not modified. + #[inline] + pub fn with_arc(&self, f: F) -> U + where + F: FnOnce(&Arc) -> U, + { + // Synthesize transient Arc, which never touches the refcount of the ArcInner. + let transient = unsafe { ManuallyDrop::new(Arc::from_raw(self.ptr.as_ptr())) }; + + // Expose the transient Arc to the callback, which may clone it if it wants + // and forward the result to the user + f(&transient) + } + + /// If uniquely owned, provide a mutable reference + /// Else create a copy, and mutate that + /// + /// This is functionally the same thing as `Arc::make_mut` + #[inline] + pub fn make_mut(&mut self) -> &mut T + where + T: Clone, + { + unsafe { + // extract the OffsetArc as an owned variable + let this = ptr::read(self); + // treat it as a real Arc + let mut arc = Arc::from_raw_offset(this); + // obtain the mutable reference. Cast away the lifetime + // This may mutate `arc` + let ret = Arc::make_mut(&mut arc) as *mut _; + // Store the possibly-mutated arc back inside, after converting + // it to a OffsetArc again + ptr::write(self, Arc::into_raw_offset(arc)); + &mut *ret + } + } + + /// Clone it as an `Arc` + #[inline] + pub fn clone_arc(&self) -> Arc { + OffsetArc::with_arc(self, |a| a.clone()) + } + + /// Produce a pointer to the data that can be converted back + /// to an `Arc` + #[inline] + pub fn borrow_arc(&self) -> ArcBorrow<'_, T> { + ArcBorrow(&**self) + } +} diff --git a/vendor/triomphe/src/thin_arc.rs b/vendor/triomphe/src/thin_arc.rs new file mode 100644 index 000000000..e048468ad --- /dev/null +++ b/vendor/triomphe/src/thin_arc.rs @@ -0,0 +1,329 @@ +use core::ffi::c_void; +use core::fmt; +use core::hash::{Hash, Hasher}; +use core::iter::{ExactSizeIterator, Iterator}; +use core::marker::PhantomData; +use core::mem::ManuallyDrop; +use core::ops::Deref; +use core::ptr; +use core::usize; + +use super::{Arc, ArcInner, HeaderSliceWithLength, HeaderWithLength}; + +/// A "thin" `Arc` containing dynamically sized data +/// +/// This is functionally equivalent to `Arc<(H, [T])>` +/// +/// When you create an `Arc` containing a dynamically sized type +/// like `HeaderSlice`, the `Arc` is represented on the stack +/// as a "fat pointer", where the length of the slice is stored +/// alongside the `Arc`'s pointer. In some situations you may wish to +/// have a thin pointer instead, perhaps for FFI compatibility +/// or space efficiency. +/// +/// Note that we use `[T; 0]` in order to have the right alignment for `T`. +/// +/// `ThinArc` solves this by storing the length in the allocation itself, +/// via `HeaderSliceWithLength`. +#[repr(transparent)] +pub struct ThinArc { + ptr: ptr::NonNull>>, + phantom: PhantomData<(H, T)>, +} + +unsafe impl Send for ThinArc {} +unsafe impl Sync for ThinArc {} + +// Synthesize a fat pointer from a thin pointer. +// +// See the comment around the analogous operation in from_header_and_iter. +fn thin_to_thick( + thin: *mut ArcInner>, +) -> *mut ArcInner> { + let len = unsafe { (*thin).data.header.length }; + let fake_slice = ptr::slice_from_raw_parts_mut(thin as *mut T, len); + + fake_slice as *mut ArcInner> +} + +impl ThinArc { + /// Temporarily converts |self| into a bonafide Arc and exposes it to the + /// provided callback. The refcount is not modified. + #[inline] + pub fn with_arc(&self, f: F) -> U + where + F: FnOnce(&Arc>) -> U, + { + // Synthesize transient Arc, which never touches the refcount of the ArcInner. + let transient = unsafe { + ManuallyDrop::new(Arc { + p: ptr::NonNull::new_unchecked(thin_to_thick(self.ptr.as_ptr())), + phantom: PhantomData, + }) + }; + + // Expose the transient Arc to the callback, which may clone it if it wants + // and forward the result to the user + f(&transient) + } + + /// Creates a `ThinArc` for a HeaderSlice using the given header struct and + /// iterator to generate the slice. + pub fn from_header_and_iter(header: H, items: I) -> Self + where + I: Iterator + ExactSizeIterator, + { + let header = HeaderWithLength::new(header, items.len()); + Arc::into_thin(Arc::from_header_and_iter(header, items)) + } + + /// Creates a `ThinArc` for a HeaderSlice using the given header struct and + /// a slice to copy. + pub fn from_header_and_slice(header: H, items: &[T]) -> Self + where + T: Copy, + { + let header = HeaderWithLength::new(header, items.len()); + Arc::into_thin(Arc::from_header_and_slice(header, items)) + } + + /// Returns the address on the heap of the ThinArc itself -- not the T + /// within it -- for memory reporting. + #[inline] + pub fn ptr(&self) -> *const c_void { + self.ptr.as_ptr() as *const ArcInner as *const c_void + } + + /// Returns the address on the heap of the Arc itself -- not the T within it -- for memory + /// reporting. + #[inline] + pub fn heap_ptr(&self) -> *const c_void { + self.ptr() + } + + /// # Safety + /// + /// Constructs an ThinArc from a raw pointer. + /// + /// The raw pointer must have been previously returned by a call to + /// ThinArc::into_raw. + /// + /// The user of from_raw has to make sure a specific value of T is only dropped once. + /// + /// This function is unsafe because improper use may lead to memory unsafety, + /// even if the returned ThinArc is never accessed. + #[inline] + pub unsafe fn from_raw(ptr: *const c_void) -> Self { + Self { + ptr: ptr::NonNull::new_unchecked(ptr as *mut c_void).cast(), + phantom: PhantomData, + } + } + + /// Consume ThinArc and returned the wrapped pointer. + #[inline] + pub fn into_raw(self) -> *const c_void { + let this = ManuallyDrop::new(self); + this.ptr.cast().as_ptr() + } + + /// Provides a raw pointer to the data. + /// The counts are not affected in any way and the ThinArc is not consumed. + /// The pointer is valid for as long as there are strong counts in the ThinArc. + #[inline] + pub fn as_ptr(&self) -> *const c_void { + self.ptr() + } +} + +impl Deref for ThinArc { + type Target = HeaderSliceWithLength; + + #[inline] + fn deref(&self) -> &Self::Target { + unsafe { &(*thin_to_thick(self.ptr.as_ptr())).data } + } +} + +impl Clone for ThinArc { + #[inline] + fn clone(&self) -> Self { + ThinArc::with_arc(self, |a| Arc::into_thin(a.clone())) + } +} + +impl Drop for ThinArc { + #[inline] + fn drop(&mut self) { + let _ = Arc::from_thin(ThinArc { + ptr: self.ptr, + phantom: PhantomData, + }); + } +} + +impl Arc> { + /// Converts an `Arc` into a `ThinArc`. This consumes the `Arc`, so the refcount + /// is not modified. + #[inline] + pub fn into_thin(a: Self) -> ThinArc { + let a = ManuallyDrop::new(a); + assert_eq!( + a.header.length, + a.slice.len(), + "Length needs to be correct for ThinArc to work" + ); + let fat_ptr: *mut ArcInner> = a.ptr(); + let thin_ptr = fat_ptr as *mut [usize] as *mut usize; + ThinArc { + ptr: unsafe { + ptr::NonNull::new_unchecked( + thin_ptr as *mut ArcInner>, + ) + }, + phantom: PhantomData, + } + } + + /// Converts a `ThinArc` into an `Arc`. This consumes the `ThinArc`, so the refcount + /// is not modified. + #[inline] + pub fn from_thin(a: ThinArc) -> Self { + let a = ManuallyDrop::new(a); + let ptr = thin_to_thick(a.ptr.as_ptr()); + unsafe { + Arc { + p: ptr::NonNull::new_unchecked(ptr), + phantom: PhantomData, + } + } + } +} + +impl PartialEq for ThinArc { + #[inline] + fn eq(&self, other: &ThinArc) -> bool { + ThinArc::with_arc(self, |a| ThinArc::with_arc(other, |b| *a == *b)) + } +} + +impl Eq for ThinArc {} + +impl Hash for ThinArc { + fn hash(&self, state: &mut HSR) { + ThinArc::with_arc(self, |a| a.hash(state)) + } +} + +impl fmt::Debug for ThinArc { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fmt::Debug::fmt(&**self, f) + } +} + +impl fmt::Pointer for ThinArc { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fmt::Pointer::fmt(&self.ptr(), f) + } +} + +#[cfg(test)] +mod tests { + use crate::{Arc, HeaderWithLength, ThinArc}; + use alloc::vec; + use core::clone::Clone; + use core::ops::Drop; + use core::sync::atomic; + use core::sync::atomic::Ordering::{Acquire, SeqCst}; + + #[derive(PartialEq)] + struct Canary(*mut atomic::AtomicUsize); + + impl Drop for Canary { + fn drop(&mut self) { + unsafe { + (*self.0).fetch_add(1, SeqCst); + } + } + } + + #[test] + fn empty_thin() { + let header = HeaderWithLength::new(100u32, 0); + let x = Arc::from_header_and_iter(header, core::iter::empty::()); + let y = Arc::into_thin(x.clone()); + assert_eq!(y.header.header, 100); + assert!(y.slice.is_empty()); + assert_eq!(x.header.header, 100); + assert!(x.slice.is_empty()); + } + + #[test] + fn thin_assert_padding() { + #[derive(Clone, Default)] + #[repr(C)] + struct Padded { + i: u16, + } + + // The header will have more alignment than `Padded` + let header = HeaderWithLength::new(0i32, 2); + let items = vec![Padded { i: 0xdead }, Padded { i: 0xbeef }]; + let a = ThinArc::from_header_and_iter(header, items.into_iter()); + assert_eq!(a.slice.len(), 2); + assert_eq!(a.slice[0].i, 0xdead); + assert_eq!(a.slice[1].i, 0xbeef); + } + + #[test] + #[allow(clippy::redundant_clone, clippy::eq_op)] + fn slices_and_thin() { + let mut canary = atomic::AtomicUsize::new(0); + let c = Canary(&mut canary as *mut atomic::AtomicUsize); + let v = vec![5, 6]; + let header = HeaderWithLength::new(c, v.len()); + { + let x = Arc::into_thin(Arc::from_header_and_slice(header, &v)); + let y = ThinArc::with_arc(&x, |q| q.clone()); + let _ = y.clone(); + let _ = x == x; + Arc::from_thin(x.clone()); + } + assert_eq!(canary.load(Acquire), 1); + } + + #[test] + #[allow(clippy::redundant_clone, clippy::eq_op)] + fn iter_and_thin() { + let mut canary = atomic::AtomicUsize::new(0); + let c = Canary(&mut canary as *mut atomic::AtomicUsize); + let v = vec![5, 6]; + let header = HeaderWithLength::new(c, v.len()); + { + let x = Arc::into_thin(Arc::from_header_and_iter(header, v.into_iter())); + let y = ThinArc::with_arc(&x, |q| q.clone()); + let _ = y.clone(); + let _ = x == x; + Arc::from_thin(x.clone()); + } + assert_eq!(canary.load(Acquire), 1); + } + + #[test] + fn into_raw_and_from_raw() { + let mut canary = atomic::AtomicUsize::new(0); + let c = Canary(&mut canary as *mut atomic::AtomicUsize); + let v = vec![5, 6]; + let header = HeaderWithLength::new(c, v.len()); + { + type ThinArcCanary = ThinArc; + let x: ThinArcCanary = Arc::into_thin(Arc::from_header_and_iter(header, v.into_iter())); + let ptr = x.as_ptr(); + + assert_eq!(x.into_raw(), ptr); + + let _x = unsafe { ThinArcCanary::from_raw(ptr) }; + } + assert_eq!(canary.load(Acquire), 1); + } +} diff --git a/vendor/triomphe/src/unique_arc.rs b/vendor/triomphe/src/unique_arc.rs new file mode 100644 index 000000000..79555fc27 --- /dev/null +++ b/vendor/triomphe/src/unique_arc.rs @@ -0,0 +1,257 @@ +use alloc::{alloc::Layout, boxed::Box}; +use core::convert::TryFrom; +use core::marker::PhantomData; +use core::mem::{ManuallyDrop, MaybeUninit}; +use core::ops::{Deref, DerefMut}; +use core::ptr::{self, NonNull}; +use core::sync::atomic::AtomicUsize; + +use crate::HeaderSlice; + +use super::{Arc, ArcInner}; + +/// An `Arc` that is known to be uniquely owned +/// +/// When `Arc`s are constructed, they are known to be +/// uniquely owned. In such a case it is safe to mutate +/// the contents of the `Arc`. Normally, one would just handle +/// this by mutating the data on the stack before allocating the +/// `Arc`, however it's possible the data is large or unsized +/// and you need to heap-allocate it earlier in such a way +/// that it can be freely converted into a regular `Arc` once you're +/// done. +/// +/// `UniqueArc` exists for this purpose, when constructed it performs +/// the same allocations necessary for an `Arc`, however it allows mutable access. +/// Once the mutation is finished, you can call `.shareable()` and get a regular `Arc` +/// out of it. +/// +/// ```rust +/// # use triomphe::UniqueArc; +/// let data = [1, 2, 3, 4, 5]; +/// let mut x = UniqueArc::new(data); +/// x[4] = 7; // mutate! +/// let y = x.shareable(); // y is an Arc +/// ``` +#[repr(transparent)] +pub struct UniqueArc(Arc); + +impl UniqueArc { + #[inline] + /// Construct a new UniqueArc + pub fn new(data: T) -> Self { + UniqueArc(Arc::new(data)) + } + + /// Construct an uninitialized arc + #[inline] + pub fn new_uninit() -> UniqueArc> { + unsafe { + let layout = Layout::new::>>(); + let ptr = alloc::alloc::alloc(layout); + let mut p = NonNull::new(ptr) + .unwrap_or_else(|| alloc::alloc::handle_alloc_error(layout)) + .cast::>>(); + ptr::write(&mut p.as_mut().count, AtomicUsize::new(1)); + + UniqueArc(Arc { + p, + phantom: PhantomData, + }) + } + } + + /// Gets the inner value of the unique arc + pub fn into_inner(this: Self) -> T { + // Wrap the Arc in a `ManuallyDrop` so that its drop routine never runs + let this = ManuallyDrop::new(this.0); + debug_assert!( + this.is_unique(), + "attempted to call `.into_inner()` on a `UniqueArc` with a non-zero ref count", + ); + + // Safety: We have exclusive access to the inner data and the + // arc will not perform its drop routine since we've + // wrapped it in a `ManuallyDrop` + unsafe { Box::from_raw(this.ptr()).data } + } +} + +impl UniqueArc { + /// Convert to a shareable Arc once we're done mutating it + #[inline] + pub fn shareable(self) -> Arc { + self.0 + } + + /// Creates a new [`UniqueArc`] from the given [`Arc`]. + /// + /// An unchecked alternative to `Arc::try_unique()` + /// + /// # Safety + /// + /// The given `Arc` must have a reference count of exactly one + /// + pub(crate) unsafe fn from_arc(arc: Arc) -> Self { + debug_assert_eq!(Arc::count(&arc), 1); + Self(arc) + } + + /// Creates a new `&mut `[`UniqueArc`] from the given `&mut `[`Arc`]. + /// + /// An unchecked alternative to `Arc::try_as_unique()` + /// + /// # Safety + /// + /// The given `Arc` must have a reference count of exactly one + pub(crate) unsafe fn from_arc_ref(arc: &mut Arc) -> &mut Self { + debug_assert_eq!(Arc::count(&arc), 1); + + // Safety: caller guarantees that `arc` is unique, + // `UniqueArc` is `repr(transparent)` + &mut *(arc as *mut Arc as *mut UniqueArc) + } +} + +impl UniqueArc> { + /// Calls `MaybeUninit::write` on the contained value. + pub fn write(&mut self, val: T) -> &mut T { + unsafe { + // Casting *mut MaybeUninit -> *mut T is always fine + let ptr = self.as_mut_ptr() as *mut T; + + // Safety: We have exclusive access to the inner data + ptr.write(val); + + // Safety: the pointer was just written to + &mut *ptr + } + } + + /// Obtain a mutable pointer to the stored `MaybeUninit`. + pub fn as_mut_ptr(&mut self) -> *mut MaybeUninit { + unsafe { &mut (*self.0.ptr()).data } + } + + /// Convert to an initialized Arc. + /// + /// # Safety + /// + /// This function is equivalent to `MaybeUninit::assume_init` and has the + /// same safety requirements. You are responsible for ensuring that the `T` + /// has actually been initialized before calling this method. + #[inline] + pub unsafe fn assume_init(this: Self) -> UniqueArc { + UniqueArc(Arc { + p: ManuallyDrop::new(this).0.p.cast(), + phantom: PhantomData, + }) + } +} + +impl UniqueArc<[MaybeUninit]> { + /// Create an Arc contains an array `[MaybeUninit]` of `len`. + pub fn new_uninit_slice(len: usize) -> Self { + let ptr: NonNull]>>> = + Arc::allocate_for_header_and_slice(len); + + // Safety: + // - `ArcInner` is properly allocated and initialized. + // - `()` and `[MaybeUninit]` do not require special initialization + // - The `Arc` is just created and so -- unique. + unsafe { + let arc: Arc]>> = Arc::from_raw_inner(ptr.as_ptr()); + let arc: Arc<[MaybeUninit]> = arc.into(); + UniqueArc(arc) + } + } + + /// # Safety + /// + /// Must initialize all fields before calling this function. + #[inline] + pub unsafe fn assume_init_slice(Self(this): Self) -> UniqueArc<[T]> { + UniqueArc(this.assume_init()) + } +} + +impl TryFrom> for UniqueArc { + type Error = Arc; + + fn try_from(arc: Arc) -> Result { + Arc::try_unique(arc) + } +} + +impl Deref for UniqueArc { + type Target = T; + + #[inline] + fn deref(&self) -> &T { + &*self.0 + } +} + +impl DerefMut for UniqueArc { + #[inline] + fn deref_mut(&mut self) -> &mut T { + // We know this to be uniquely owned + unsafe { &mut (*self.0.ptr()).data } + } +} + +// Safety: +// This leverages the correctness of Arc's CoerciblePtr impl. Additionally, we must ensure that +// this can not be used to violate the safety invariants of UniqueArc, which require that we can not +// duplicate the Arc, such that replace_ptr returns a valid instance. This holds since it consumes +// a unique owner of the contained ArcInner. +#[cfg(feature = "unsize")] +unsafe impl unsize::CoerciblePtr for UniqueArc { + type Pointee = T; + type Output = UniqueArc; + + fn as_sized_ptr(&mut self) -> *mut T { + // Dispatch to the contained field. + unsize::CoerciblePtr::::as_sized_ptr(&mut self.0) + } + + unsafe fn replace_ptr(self, new: *mut U) -> UniqueArc { + // Dispatch to the contained field, work around conflict of destructuring and Drop. + let inner = ManuallyDrop::new(self); + UniqueArc(ptr::read(&inner.0).replace_ptr(new)) + } +} + +#[cfg(test)] +mod tests { + use crate::{Arc, UniqueArc}; + use core::{convert::TryFrom, mem::MaybeUninit}; + + #[test] + fn unique_into_inner() { + let unique = UniqueArc::new(10u64); + assert_eq!(UniqueArc::into_inner(unique), 10); + } + + #[test] + fn try_from_arc() { + let x = Arc::new(10_000); + let y = x.clone(); + + assert!(UniqueArc::try_from(x).is_err()); + assert_eq!( + UniqueArc::into_inner(UniqueArc::try_from(y).unwrap()), + 10_000, + ); + } + + #[test] + #[allow(deprecated)] + fn maybeuninit_smoke() { + let mut arc: UniqueArc> = UniqueArc::new_uninit(); + arc.write(999); + + let arc = unsafe { UniqueArc::assume_init(arc) }; + assert_eq!(*arc, 999); + } +} -- cgit v1.2.3