summaryrefslogtreecommitdiffstats
path: root/vendor/triomphe/src
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-30 03:57:31 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-30 03:57:31 +0000
commitdc0db358abe19481e475e10c32149b53370f1a1c (patch)
treeab8ce99c4b255ce46f99ef402c27916055b899ee /vendor/triomphe/src
parentReleasing progress-linux version 1.71.1+dfsg1-2~progress7.99u1. (diff)
downloadrustc-dc0db358abe19481e475e10c32149b53370f1a1c.tar.xz
rustc-dc0db358abe19481e475e10c32149b53370f1a1c.zip
Merging upstream version 1.72.1+dfsg1.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'vendor/triomphe/src')
-rw-r--r--vendor/triomphe/src/arc.rs887
-rw-r--r--vendor/triomphe/src/arc_borrow.rs116
-rw-r--r--vendor/triomphe/src/arc_swap_support.rs42
-rw-r--r--vendor/triomphe/src/arc_union.rs139
-rw-r--r--vendor/triomphe/src/header.rs378
-rw-r--r--vendor/triomphe/src/lib.rs94
-rw-r--r--vendor/triomphe/src/offset_arc.rs134
-rw-r--r--vendor/triomphe/src/thin_arc.rs329
-rw-r--r--vendor/triomphe/src/unique_arc.rs257
9 files changed, 2376 insertions, 0 deletions
diff --git a/vendor/triomphe/src/arc.rs b/vendor/triomphe/src/arc.rs
new file mode 100644
index 000000000..6fe022c46
--- /dev/null
+++ b/vendor/triomphe/src/arc.rs
@@ -0,0 +1,887 @@
+use alloc::alloc::handle_alloc_error;
+use alloc::boxed::Box;
+use core::alloc::Layout;
+use core::borrow;
+use core::cmp::Ordering;
+use core::convert::From;
+use core::ffi::c_void;
+use core::fmt;
+use core::hash::{Hash, Hasher};
+use core::marker::PhantomData;
+use core::mem::{ManuallyDrop, MaybeUninit};
+use core::ops::Deref;
+use core::ptr::{self, NonNull};
+use core::sync::atomic;
+use core::sync::atomic::Ordering::{Acquire, Relaxed, Release};
+use core::{isize, usize};
+
+#[cfg(feature = "serde")]
+use serde::{Deserialize, Serialize};
+#[cfg(feature = "stable_deref_trait")]
+use stable_deref_trait::{CloneStableDeref, StableDeref};
+
+use crate::{abort, ArcBorrow, HeaderSlice, OffsetArc, UniqueArc};
+
+/// A soft limit on the amount of references that may be made to an `Arc`.
+///
+/// Going above this limit will abort your program (although not
+/// necessarily) at _exactly_ `MAX_REFCOUNT + 1` references.
+const MAX_REFCOUNT: usize = (isize::MAX) as usize;
+
+/// The object allocated by an Arc<T>
+#[repr(C)]
+pub(crate) struct ArcInner<T: ?Sized> {
+ pub(crate) count: atomic::AtomicUsize,
+ pub(crate) data: T,
+}
+
+unsafe impl<T: ?Sized + Sync + Send> Send for ArcInner<T> {}
+unsafe impl<T: ?Sized + Sync + Send> Sync for ArcInner<T> {}
+
+/// An atomically reference counted shared pointer
+///
+/// See the documentation for [`Arc`] in the standard library. Unlike the
+/// standard library `Arc`, this `Arc` does not support weak reference counting.
+///
+/// [`Arc`]: https://doc.rust-lang.org/stable/std/sync/struct.Arc.html
+#[repr(transparent)]
+pub struct Arc<T: ?Sized> {
+ pub(crate) p: ptr::NonNull<ArcInner<T>>,
+ pub(crate) phantom: PhantomData<T>,
+}
+
+unsafe impl<T: ?Sized + Sync + Send> Send for Arc<T> {}
+unsafe impl<T: ?Sized + Sync + Send> Sync for Arc<T> {}
+
+impl<T> Arc<T> {
+ /// Construct an `Arc<T>`
+ #[inline]
+ pub fn new(data: T) -> Self {
+ let ptr = Box::into_raw(Box::new(ArcInner {
+ count: atomic::AtomicUsize::new(1),
+ data,
+ }));
+
+ unsafe {
+ Arc {
+ p: ptr::NonNull::new_unchecked(ptr),
+ phantom: PhantomData,
+ }
+ }
+ }
+
+ /// Reconstruct the Arc<T> from a raw pointer obtained from into_raw()
+ ///
+ /// Note: This raw pointer will be offset in the allocation and must be preceded
+ /// by the atomic count.
+ ///
+ /// It is recommended to use OffsetArc for this
+ #[inline]
+ pub unsafe fn from_raw(ptr: *const T) -> Self {
+ // FIXME: when `byte_sub` is stabilized, this can accept T: ?Sized.
+
+ // To find the corresponding pointer to the `ArcInner` we need
+ // to subtract the offset of the `data` field from the pointer.
+ let ptr = (ptr as *const u8).sub(offset_of!(ArcInner<T>, data));
+ Arc::from_raw_inner(ptr as *mut ArcInner<T>)
+ }
+
+ /// Temporarily converts |self| into a bonafide OffsetArc and exposes it to the
+ /// provided callback. The refcount is not modified.
+ #[inline(always)]
+ pub fn with_raw_offset_arc<F, U>(&self, f: F) -> U
+ where
+ F: FnOnce(&OffsetArc<T>) -> U,
+ {
+ // Synthesize transient Arc, which never touches the refcount of the ArcInner.
+ // Store transient in `ManuallyDrop`, to leave the refcount untouched.
+ let transient = unsafe { ManuallyDrop::new(Arc::into_raw_offset(ptr::read(self))) };
+
+ // Expose the transient Arc to the callback, which may clone it if it wants.
+ f(&transient)
+ }
+
+ /// Converts an `Arc` into a `OffsetArc`. This consumes the `Arc`, so the refcount
+ /// is not modified.
+ #[inline]
+ pub fn into_raw_offset(a: Self) -> OffsetArc<T> {
+ unsafe {
+ OffsetArc {
+ ptr: ptr::NonNull::new_unchecked(Arc::into_raw(a) as *mut T),
+ phantom: PhantomData,
+ }
+ }
+ }
+
+ /// Converts a `OffsetArc` into an `Arc`. This consumes the `OffsetArc`, so the refcount
+ /// is not modified.
+ #[inline]
+ pub fn from_raw_offset(a: OffsetArc<T>) -> Self {
+ let a = ManuallyDrop::new(a);
+ let ptr = a.ptr.as_ptr();
+ unsafe { Arc::from_raw(ptr) }
+ }
+
+ /// Returns the inner value, if the [`Arc`] has exactly one strong reference.
+ ///
+ /// Otherwise, an [`Err`] is returned with the same [`Arc`] that was
+ /// passed in.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use triomphe::Arc;
+ ///
+ /// let x = Arc::new(3);
+ /// assert_eq!(Arc::try_unwrap(x), Ok(3));
+ ///
+ /// let x = Arc::new(4);
+ /// let _y = Arc::clone(&x);
+ /// assert_eq!(*Arc::try_unwrap(x).unwrap_err(), 4);
+ /// ```
+ pub fn try_unwrap(this: Self) -> Result<T, Self> {
+ Self::try_unique(this).map(UniqueArc::into_inner)
+ }
+}
+
+impl<T: ?Sized> Arc<T> {
+ /// Convert the Arc<T> to a raw pointer, suitable for use across FFI
+ ///
+ /// Note: This returns a pointer to the data T, which is offset in the allocation.
+ ///
+ /// It is recommended to use OffsetArc for this.
+ #[inline]
+ pub fn into_raw(this: Self) -> *const T {
+ let this = ManuallyDrop::new(this);
+ this.as_ptr()
+ }
+
+ /// Returns the raw pointer.
+ ///
+ /// Same as into_raw except `self` isn't consumed.
+ #[inline]
+ pub fn as_ptr(&self) -> *const T {
+ // SAFETY: This cannot go through a reference to `data`, because this method
+ // is used to implement `into_raw`. To reconstruct the full `Arc` from this
+ // pointer, it needs to maintain its full provenance, and not be reduced to
+ // just the contained `T`.
+ unsafe { ptr::addr_of_mut!((*self.ptr()).data) }
+ }
+
+ /// Produce a pointer to the data that can be converted back
+ /// to an Arc. This is basically an `&Arc<T>`, without the extra indirection.
+ /// It has the benefits of an `&T` but also knows about the underlying refcount
+ /// and can be converted into more `Arc<T>`s if necessary.
+ #[inline]
+ pub fn borrow_arc(&self) -> ArcBorrow<'_, T> {
+ ArcBorrow(&**self)
+ }
+
+ /// Returns the address on the heap of the Arc itself -- not the T within it -- for memory
+ /// reporting.
+ pub fn heap_ptr(&self) -> *const c_void {
+ self.p.as_ptr() as *const ArcInner<T> as *const c_void
+ }
+
+ #[inline]
+ pub(super) fn into_raw_inner(this: Self) -> *mut ArcInner<T> {
+ let this = ManuallyDrop::new(this);
+ this.ptr()
+ }
+
+ /// Construct an `Arc` from an allocated `ArcInner`.
+ /// # Safety
+ /// The `ptr` must point to a valid instance, allocated by an `Arc`. The reference could will
+ /// not be modified.
+ pub(super) unsafe fn from_raw_inner(ptr: *mut ArcInner<T>) -> Self {
+ Arc {
+ p: ptr::NonNull::new_unchecked(ptr),
+ phantom: PhantomData,
+ }
+ }
+
+ #[inline]
+ pub(super) fn inner(&self) -> &ArcInner<T> {
+ // This unsafety is ok because while this arc is alive we're guaranteed
+ // that the inner pointer is valid. Furthermore, we know that the
+ // `ArcInner` structure itself is `Sync` because the inner data is
+ // `Sync` as well, so we're ok loaning out an immutable pointer to these
+ // contents.
+ unsafe { &*self.ptr() }
+ }
+
+ // Non-inlined part of `drop`. Just invokes the destructor.
+ #[inline(never)]
+ unsafe fn drop_slow(&mut self) {
+ let _ = Box::from_raw(self.ptr());
+ }
+
+ /// Test pointer equality between the two Arcs, i.e. they must be the _same_
+ /// allocation
+ #[inline]
+ pub fn ptr_eq(this: &Self, other: &Self) -> bool {
+ this.ptr() == other.ptr()
+ }
+
+ pub(crate) fn ptr(&self) -> *mut ArcInner<T> {
+ self.p.as_ptr()
+ }
+
+ /// Allocates an `ArcInner<T>` with sufficient space for
+ /// a possibly-unsized inner value where the value has the layout provided.
+ ///
+ /// The function `mem_to_arcinner` is called with the data pointer
+ /// and must return back a (potentially fat)-pointer for the `ArcInner<T>`.
+ ///
+ /// ## Safety
+ ///
+ /// `mem_to_arcinner` must return the same pointer, the only things that can change are
+ /// - its type
+ /// - its metadata
+ ///
+ /// `value_layout` must be correct for `T`.
+ #[allow(unused_unsafe)]
+ pub(super) unsafe fn allocate_for_layout(
+ value_layout: Layout,
+ mem_to_arcinner: impl FnOnce(*mut u8) -> *mut ArcInner<T>,
+ ) -> NonNull<ArcInner<T>> {
+ let layout = Layout::new::<ArcInner<()>>()
+ .extend(value_layout)
+ .unwrap()
+ .0
+ .pad_to_align();
+
+ // Safety: we propagate safety requirements to the caller
+ unsafe {
+ Arc::try_allocate_for_layout(value_layout, mem_to_arcinner)
+ .unwrap_or_else(|_| handle_alloc_error(layout))
+ }
+ }
+
+ /// Allocates an `ArcInner<T>` with sufficient space for
+ /// a possibly-unsized inner value where the value has the layout provided,
+ /// returning an error if allocation fails.
+ ///
+ /// The function `mem_to_arcinner` is called with the data pointer
+ /// and must return back a (potentially fat)-pointer for the `ArcInner<T>`.
+ ///
+ /// ## Safety
+ ///
+ /// `mem_to_arcinner` must return the same pointer, the only things that can change are
+ /// - its type
+ /// - its metadata
+ ///
+ /// `value_layout` must be correct for `T`.
+ #[allow(unused_unsafe)]
+ unsafe fn try_allocate_for_layout(
+ value_layout: Layout,
+ mem_to_arcinner: impl FnOnce(*mut u8) -> *mut ArcInner<T>,
+ ) -> Result<NonNull<ArcInner<T>>, ()> {
+ let layout = Layout::new::<ArcInner<()>>()
+ .extend(value_layout)
+ .unwrap()
+ .0
+ .pad_to_align();
+
+ let ptr = NonNull::new(alloc::alloc::alloc(layout)).ok_or(())?;
+
+ // Initialize the ArcInner
+ let inner = mem_to_arcinner(ptr.as_ptr());
+ debug_assert_eq!(unsafe { Layout::for_value(&*inner) }, layout);
+
+ unsafe {
+ ptr::write(&mut (*inner).count, atomic::AtomicUsize::new(1));
+ }
+
+ // Safety: `ptr` is checked to be non-null,
+ // `inner` is the same as `ptr` (per the safety requirements of this function)
+ unsafe { Ok(NonNull::new_unchecked(inner)) }
+ }
+}
+
+impl<H, T> Arc<HeaderSlice<H, [T]>> {
+ pub(super) fn allocate_for_header_and_slice(
+ len: usize,
+ ) -> NonNull<ArcInner<HeaderSlice<H, [T]>>> {
+ let layout = Layout::new::<H>()
+ .extend(Layout::array::<T>(len).unwrap())
+ .unwrap()
+ .0
+ .pad_to_align();
+
+ unsafe {
+ // Safety:
+ // - the provided closure does not change the pointer (except for meta & type)
+ // - the provided layout is valid for `HeaderSlice<H, [T]>`
+ Arc::allocate_for_layout(layout, |mem| {
+ // Synthesize the fat pointer. We do this by claiming we have a direct
+ // pointer to a [T], and then changing the type of the borrow. The key
+ // point here is that the length portion of the fat pointer applies
+ // only to the number of elements in the dynamically-sized portion of
+ // the type, so the value will be the same whether it points to a [T]
+ // or something else with a [T] as its last member.
+ let fake_slice = ptr::slice_from_raw_parts_mut(mem as *mut T, len);
+ fake_slice as *mut ArcInner<HeaderSlice<H, [T]>>
+ })
+ }
+ }
+}
+
+impl<T> Arc<MaybeUninit<T>> {
+ /// Create an Arc contains an `MaybeUninit<T>`.
+ pub fn new_uninit() -> Self {
+ Arc::new(MaybeUninit::<T>::uninit())
+ }
+
+ /// Calls `MaybeUninit::write` on the value contained.
+ ///
+ /// ## Panics
+ ///
+ /// If the `Arc` is not unique.
+ #[deprecated(
+ since = "0.1.7",
+ note = "this function previously was UB and now panics for non-unique `Arc`s. Use `UniqueArc::write` instead."
+ )]
+ #[track_caller]
+ pub fn write(&mut self, val: T) -> &mut T {
+ UniqueArc::write(must_be_unique(self), val)
+ }
+
+ /// Obtain a mutable pointer to the stored `MaybeUninit<T>`.
+ pub fn as_mut_ptr(&mut self) -> *mut MaybeUninit<T> {
+ unsafe { &mut (*self.ptr()).data }
+ }
+
+ /// # Safety
+ ///
+ /// Must initialize all fields before calling this function.
+ #[inline]
+ pub unsafe fn assume_init(self) -> Arc<T> {
+ Arc::from_raw_inner(ManuallyDrop::new(self).ptr().cast())
+ }
+}
+
+impl<T> Arc<[MaybeUninit<T>]> {
+ /// Create an Arc contains an array `[MaybeUninit<T>]` of `len`.
+ pub fn new_uninit_slice(len: usize) -> Self {
+ UniqueArc::new_uninit_slice(len).shareable()
+ }
+
+ /// Obtain a mutable slice to the stored `[MaybeUninit<T>]`.
+ #[deprecated(
+ since = "0.1.8",
+ note = "this function previously was UB and now panics for non-unique `Arc`s. Use `UniqueArc` or `get_mut` instead."
+ )]
+ #[track_caller]
+ pub fn as_mut_slice(&mut self) -> &mut [MaybeUninit<T>] {
+ must_be_unique(self)
+ }
+
+ /// # Safety
+ ///
+ /// Must initialize all fields before calling this function.
+ #[inline]
+ pub unsafe fn assume_init(self) -> Arc<[T]> {
+ Arc::from_raw_inner(ManuallyDrop::new(self).ptr() as _)
+ }
+}
+
+impl<T: ?Sized> Clone for Arc<T> {
+ #[inline]
+ fn clone(&self) -> Self {
+ // Using a relaxed ordering is alright here, as knowledge of the
+ // original reference prevents other threads from erroneously deleting
+ // the object.
+ //
+ // As explained in the [Boost documentation][1], Increasing the
+ // reference counter can always be done with memory_order_relaxed: New
+ // references to an object can only be formed from an existing
+ // reference, and passing an existing reference from one thread to
+ // another must already provide any required synchronization.
+ //
+ // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
+ let old_size = self.inner().count.fetch_add(1, Relaxed);
+
+ // However we need to guard against massive refcounts in case someone
+ // is `mem::forget`ing Arcs. If we don't do this the count can overflow
+ // and users will use-after free. We racily saturate to `isize::MAX` on
+ // the assumption that there aren't ~2 billion threads incrementing
+ // the reference count at once. This branch will never be taken in
+ // any realistic program.
+ //
+ // We abort because such a program is incredibly degenerate, and we
+ // don't care to support it.
+ if old_size > MAX_REFCOUNT {
+ abort();
+ }
+
+ unsafe {
+ Arc {
+ p: ptr::NonNull::new_unchecked(self.ptr()),
+ phantom: PhantomData,
+ }
+ }
+ }
+}
+
+impl<T: ?Sized> Deref for Arc<T> {
+ type Target = T;
+
+ #[inline]
+ fn deref(&self) -> &T {
+ &self.inner().data
+ }
+}
+
+impl<T: Clone> Arc<T> {
+ /// Makes a mutable reference to the `Arc`, cloning if necessary
+ ///
+ /// This is functionally equivalent to [`Arc::make_mut`][mm] from the standard library.
+ ///
+ /// If this `Arc` is uniquely owned, `make_mut()` will provide a mutable
+ /// reference to the contents. If not, `make_mut()` will create a _new_ `Arc`
+ /// with a copy of the contents, update `this` to point to it, and provide
+ /// a mutable reference to its contents.
+ ///
+ /// This is useful for implementing copy-on-write schemes where you wish to
+ /// avoid copying things if your `Arc` is not shared.
+ ///
+ /// [mm]: https://doc.rust-lang.org/stable/std/sync/struct.Arc.html#method.make_mut
+ #[inline]
+ pub fn make_mut(this: &mut Self) -> &mut T {
+ if !this.is_unique() {
+ // Another pointer exists; clone
+ *this = Arc::new(T::clone(&this));
+ }
+
+ unsafe {
+ // This unsafety is ok because we're guaranteed that the pointer
+ // returned is the *only* pointer that will ever be returned to T. Our
+ // reference count is guaranteed to be 1 at this point, and we required
+ // the Arc itself to be `mut`, so we're returning the only possible
+ // reference to the inner data.
+ &mut (*this.ptr()).data
+ }
+ }
+
+ /// Makes a `UniqueArc` from an `Arc`, cloning if necessary.
+ ///
+ /// If this `Arc` is uniquely owned, `make_unique()` will provide a `UniqueArc`
+ /// containing `this`. If not, `make_unique()` will create a _new_ `Arc`
+ /// with a copy of the contents, update `this` to point to it, and provide
+ /// a `UniqueArc` to it.
+ ///
+ /// This is useful for implementing copy-on-write schemes where you wish to
+ /// avoid copying things if your `Arc` is not shared.
+ #[inline]
+ pub fn make_unique(this: &mut Self) -> &mut UniqueArc<T> {
+ if !this.is_unique() {
+ // Another pointer exists; clone
+ *this = Arc::new(T::clone(&this));
+ }
+
+ unsafe {
+ // Safety: this is either unique or just created (which is also unique)
+ UniqueArc::from_arc_ref(this)
+ }
+ }
+
+ /// If we have the only reference to `T` then unwrap it. Otherwise, clone `T` and return the clone.
+ ///
+ /// Assuming `arc_t` is of type `Arc<T>`, this function is functionally equivalent to `(*arc_t).clone()`, but will avoid cloning the inner value where possible.
+ pub fn unwrap_or_clone(this: Arc<T>) -> T {
+ Self::try_unwrap(this).unwrap_or_else(|this| T::clone(&this))
+ }
+}
+
+impl<T: ?Sized> Arc<T> {
+ /// Provides mutable access to the contents _if_ the `Arc` is uniquely owned.
+ #[inline]
+ pub fn get_mut(this: &mut Self) -> Option<&mut T> {
+ if this.is_unique() {
+ unsafe {
+ // See make_mut() for documentation of the threadsafety here.
+ Some(&mut (*this.ptr()).data)
+ }
+ } else {
+ None
+ }
+ }
+
+ /// Provides unique access to the arc _if_ the `Arc` is uniquely owned.
+ pub fn get_unique(this: &mut Self) -> Option<&mut UniqueArc<T>> {
+ Self::try_as_unique(this).ok()
+ }
+
+ /// Whether or not the `Arc` is uniquely owned (is the refcount 1?).
+ pub fn is_unique(&self) -> bool {
+ // See the extensive discussion in [1] for why this needs to be Acquire.
+ //
+ // [1] https://github.com/servo/servo/issues/21186
+ Self::count(self) == 1
+ }
+
+ /// Gets the number of [`Arc`] pointers to this allocation
+ pub fn count(this: &Self) -> usize {
+ this.inner().count.load(Acquire)
+ }
+
+ /// Returns a [`UniqueArc`] if the [`Arc`] has exactly one strong reference.
+ ///
+ /// Otherwise, an [`Err`] is returned with the same [`Arc`] that was
+ /// passed in.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use triomphe::{Arc, UniqueArc};
+ ///
+ /// let x = Arc::new(3);
+ /// assert_eq!(UniqueArc::into_inner(Arc::try_unique(x).unwrap()), 3);
+ ///
+ /// let x = Arc::new(4);
+ /// let _y = Arc::clone(&x);
+ /// assert_eq!(
+ /// *Arc::try_unique(x).map(UniqueArc::into_inner).unwrap_err(),
+ /// 4,
+ /// );
+ /// ```
+ pub fn try_unique(this: Self) -> Result<UniqueArc<T>, Self> {
+ if this.is_unique() {
+ // Safety: The current arc is unique and making a `UniqueArc`
+ // from it is sound
+ unsafe { Ok(UniqueArc::from_arc(this)) }
+ } else {
+ Err(this)
+ }
+ }
+
+ pub(crate) fn try_as_unique(this: &mut Self) -> Result<&mut UniqueArc<T>, &mut Self> {
+ if this.is_unique() {
+ // Safety: The current arc is unique and making a `UniqueArc`
+ // from it is sound
+ unsafe { Ok(UniqueArc::from_arc_ref(this)) }
+ } else {
+ Err(this)
+ }
+ }
+}
+
+impl<T: ?Sized> Drop for Arc<T> {
+ #[inline]
+ fn drop(&mut self) {
+ // Because `fetch_sub` is already atomic, we do not need to synchronize
+ // with other threads unless we are going to delete the object.
+ if self.inner().count.fetch_sub(1, Release) != 1 {
+ return;
+ }
+
+ // FIXME(bholley): Use the updated comment when [2] is merged.
+ //
+ // This load is needed to prevent reordering of use of the data and
+ // deletion of the data. Because it is marked `Release`, the decreasing
+ // of the reference count synchronizes with this `Acquire` load. This
+ // means that use of the data happens before decreasing the reference
+ // count, which happens before this load, which happens before the
+ // deletion of the data.
+ //
+ // As explained in the [Boost documentation][1],
+ //
+ // > It is important to enforce any possible access to the object in one
+ // > thread (through an existing reference) to *happen before* deleting
+ // > the object in a different thread. This is achieved by a "release"
+ // > operation after dropping a reference (any access to the object
+ // > through this reference must obviously happened before), and an
+ // > "acquire" operation before deleting the object.
+ //
+ // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
+ // [2]: https://github.com/rust-lang/rust/pull/41714
+ self.inner().count.load(Acquire);
+
+ unsafe {
+ self.drop_slow();
+ }
+ }
+}
+
+impl<T: ?Sized + PartialEq> PartialEq for Arc<T> {
+ fn eq(&self, other: &Arc<T>) -> bool {
+ Self::ptr_eq(self, other) || *(*self) == *(*other)
+ }
+
+ #[allow(clippy::partialeq_ne_impl)]
+ fn ne(&self, other: &Arc<T>) -> bool {
+ !Self::ptr_eq(self, other) && *(*self) != *(*other)
+ }
+}
+
+impl<T: ?Sized + PartialOrd> PartialOrd for Arc<T> {
+ fn partial_cmp(&self, other: &Arc<T>) -> Option<Ordering> {
+ (**self).partial_cmp(&**other)
+ }
+
+ fn lt(&self, other: &Arc<T>) -> bool {
+ *(*self) < *(*other)
+ }
+
+ fn le(&self, other: &Arc<T>) -> bool {
+ *(*self) <= *(*other)
+ }
+
+ fn gt(&self, other: &Arc<T>) -> bool {
+ *(*self) > *(*other)
+ }
+
+ fn ge(&self, other: &Arc<T>) -> bool {
+ *(*self) >= *(*other)
+ }
+}
+
+impl<T: ?Sized + Ord> Ord for Arc<T> {
+ fn cmp(&self, other: &Arc<T>) -> Ordering {
+ (**self).cmp(&**other)
+ }
+}
+
+impl<T: ?Sized + Eq> Eq for Arc<T> {}
+
+impl<T: ?Sized + fmt::Display> fmt::Display for Arc<T> {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ fmt::Display::fmt(&**self, f)
+ }
+}
+
+impl<T: ?Sized + fmt::Debug> fmt::Debug for Arc<T> {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ fmt::Debug::fmt(&**self, f)
+ }
+}
+
+impl<T: ?Sized> fmt::Pointer for Arc<T> {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ fmt::Pointer::fmt(&self.ptr(), f)
+ }
+}
+
+impl<T: Default> Default for Arc<T> {
+ #[inline]
+ fn default() -> Arc<T> {
+ Arc::new(Default::default())
+ }
+}
+
+impl<T: ?Sized + Hash> Hash for Arc<T> {
+ fn hash<H: Hasher>(&self, state: &mut H) {
+ (**self).hash(state)
+ }
+}
+
+impl<T> From<T> for Arc<T> {
+ #[inline]
+ fn from(t: T) -> Self {
+ Arc::new(t)
+ }
+}
+
+impl<T: ?Sized> borrow::Borrow<T> for Arc<T> {
+ #[inline]
+ fn borrow(&self) -> &T {
+ &**self
+ }
+}
+
+impl<T: ?Sized> AsRef<T> for Arc<T> {
+ #[inline]
+ fn as_ref(&self) -> &T {
+ &**self
+ }
+}
+
+#[cfg(feature = "stable_deref_trait")]
+unsafe impl<T: ?Sized> StableDeref for Arc<T> {}
+#[cfg(feature = "stable_deref_trait")]
+unsafe impl<T: ?Sized> CloneStableDeref for Arc<T> {}
+
+#[cfg(feature = "serde")]
+impl<'de, T: Deserialize<'de>> Deserialize<'de> for Arc<T> {
+ fn deserialize<D>(deserializer: D) -> Result<Arc<T>, D::Error>
+ where
+ D: ::serde::de::Deserializer<'de>,
+ {
+ T::deserialize(deserializer).map(Arc::new)
+ }
+}
+
+#[cfg(feature = "serde")]
+impl<T: Serialize> Serialize for Arc<T> {
+ fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
+ where
+ S: ::serde::ser::Serializer,
+ {
+ (**self).serialize(serializer)
+ }
+}
+
+// Safety:
+// This implementation must guarantee that it is sound to call replace_ptr with an unsized variant
+// of the pointer retuned in `as_sized_ptr`. The basic property of Unsize coercion is that safety
+// variants and layout is unaffected. The Arc does not rely on any other property of T. This makes
+// any unsized ArcInner valid for being shared with the sized variant.
+// This does _not_ mean that any T can be unsized into an U, but rather than if such unsizing is
+// possible then it can be propagated into the Arc<T>.
+#[cfg(feature = "unsize")]
+unsafe impl<T, U: ?Sized> unsize::CoerciblePtr<U> for Arc<T> {
+ type Pointee = T;
+ type Output = Arc<U>;
+
+ fn as_sized_ptr(&mut self) -> *mut T {
+ // Returns a pointer to the complete inner. The unsizing itself won't care about the
+ // pointer value and promises not to offset it.
+ self.p.as_ptr() as *mut T
+ }
+
+ unsafe fn replace_ptr(self, new: *mut U) -> Arc<U> {
+ // Fix the provenance by ensuring that of `self` is used.
+ let inner = ManuallyDrop::new(self);
+ let p = inner.p.as_ptr() as *mut T;
+ // Safety: This points to an ArcInner of the previous self and holds shared ownership since
+ // the old pointer never decremented the reference count. The caller upholds that `new` is
+ // an unsized version of the previous ArcInner. This assumes that unsizing to the fat
+ // pointer tag of an `ArcInner<U>` and `U` is isomorphic under a direct pointer cast since
+ // in reality we unsized *mut T to *mut U at the address of the ArcInner. This is the case
+ // for all currently envisioned unsized types where the tag of T and ArcInner<T> are simply
+ // the same.
+ Arc::from_raw_inner(p.replace_ptr(new) as *mut ArcInner<U>)
+ }
+}
+
+#[track_caller]
+fn must_be_unique<T: ?Sized>(arc: &mut Arc<T>) -> &mut UniqueArc<T> {
+ match Arc::try_as_unique(arc) {
+ Ok(unique) => unique,
+ Err(this) => panic!("`Arc` must be unique in order for this operation to be safe, there are currently {} copies", Arc::count(this)),
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use crate::arc::Arc;
+ use alloc::string::String;
+ use core::mem::MaybeUninit;
+ #[cfg(feature = "unsize")]
+ use unsize::{CoerceUnsize, Coercion};
+
+ #[test]
+ fn try_unwrap() {
+ let x = Arc::new(100usize);
+ let y = x.clone();
+
+ // The count should be two so `try_unwrap()` should fail
+ assert_eq!(Arc::count(&x), 2);
+ assert!(Arc::try_unwrap(x).is_err());
+
+ // Since `x` has now been dropped, the count should be 1
+ // and `try_unwrap()` should succeed
+ assert_eq!(Arc::count(&y), 1);
+ assert_eq!(Arc::try_unwrap(y), Ok(100));
+ }
+
+ #[test]
+ #[cfg(feature = "unsize")]
+ fn coerce_to_slice() {
+ let x = Arc::new([0u8; 4]);
+ let y: Arc<[u8]> = x.clone().unsize(Coercion::to_slice());
+ assert_eq!((*x).as_ptr(), (*y).as_ptr());
+ }
+
+ #[test]
+ #[cfg(feature = "unsize")]
+ fn coerce_to_dyn() {
+ let x: Arc<_> = Arc::new(|| 42u32);
+ let x: Arc<_> = x.unsize(Coercion::<_, dyn Fn() -> u32>::to_fn());
+ assert_eq!((*x)(), 42);
+ }
+
+ #[test]
+ #[allow(deprecated)]
+ fn maybeuninit() {
+ let mut arc: Arc<MaybeUninit<_>> = Arc::new_uninit();
+ arc.write(999);
+
+ let arc = unsafe { arc.assume_init() };
+ assert_eq!(*arc, 999);
+ }
+
+ #[test]
+ #[allow(deprecated)]
+ #[should_panic = "`Arc` must be unique in order for this operation to be safe"]
+ fn maybeuninit_ub_to_proceed() {
+ let mut uninit = Arc::new_uninit();
+ let clone = uninit.clone();
+
+ let x: &MaybeUninit<String> = &*clone;
+
+ // This write invalidates `x` reference
+ uninit.write(String::from("nonononono"));
+
+ // Read invalidated reference to trigger UB
+ let _ = &*x;
+ }
+
+ #[test]
+ #[allow(deprecated)]
+ #[should_panic = "`Arc` must be unique in order for this operation to be safe"]
+ fn maybeuninit_slice_ub_to_proceed() {
+ let mut uninit = Arc::new_uninit_slice(13);
+ let clone = uninit.clone();
+
+ let x: &[MaybeUninit<String>] = &*clone;
+
+ // This write invalidates `x` reference
+ uninit.as_mut_slice()[0].write(String::from("nonononono"));
+
+ // Read invalidated reference to trigger UB
+ let _ = &*x;
+ }
+
+ #[test]
+ fn maybeuninit_array() {
+ let mut arc: Arc<[MaybeUninit<_>]> = Arc::new_uninit_slice(5);
+ assert!(arc.is_unique());
+ #[allow(deprecated)]
+ for (uninit, index) in arc.as_mut_slice().iter_mut().zip(0..5) {
+ let ptr = uninit.as_mut_ptr();
+ unsafe { core::ptr::write(ptr, index) };
+ }
+
+ let arc = unsafe { arc.assume_init() };
+ assert!(arc.is_unique());
+ // Using clone to that the layout generated in new_uninit_slice is compatible
+ // with ArcInner.
+ let arcs = [
+ arc.clone(),
+ arc.clone(),
+ arc.clone(),
+ arc.clone(),
+ arc.clone(),
+ ];
+ assert_eq!(6, Arc::count(&arc));
+ // If the layout is not compatible, then the data might be corrupted.
+ assert_eq!(*arc, [0, 1, 2, 3, 4]);
+
+ // Drop the arcs and check the count and the content to
+ // make sure it isn't corrupted.
+ drop(arcs);
+ assert!(arc.is_unique());
+ assert_eq!(*arc, [0, 1, 2, 3, 4]);
+ }
+
+ #[test]
+ fn roundtrip() {
+ let arc: Arc<usize> = Arc::new(0usize);
+ let ptr = Arc::into_raw(arc);
+ unsafe {
+ let _arc = Arc::from_raw(ptr);
+ }
+ }
+}
diff --git a/vendor/triomphe/src/arc_borrow.rs b/vendor/triomphe/src/arc_borrow.rs
new file mode 100644
index 000000000..d53e1a5ea
--- /dev/null
+++ b/vendor/triomphe/src/arc_borrow.rs
@@ -0,0 +1,116 @@
+use core::mem;
+use core::mem::ManuallyDrop;
+use core::ops::Deref;
+use core::ptr;
+
+use super::Arc;
+
+/// A "borrowed `Arc`". This is a pointer to
+/// a T that is known to have been allocated within an
+/// `Arc`.
+///
+/// This is equivalent in guarantees to `&Arc<T>`, however it is
+/// a bit more flexible. To obtain an `&Arc<T>` you must have
+/// an `Arc<T>` instance somewhere pinned down until we're done with it.
+/// It's also a direct pointer to `T`, so using this involves less pointer-chasing
+///
+/// However, C++ code may hand us refcounted things as pointers to T directly,
+/// so we have to conjure up a temporary `Arc` on the stack each time. The
+/// same happens for when the object is managed by a `OffsetArc`.
+///
+/// `ArcBorrow` lets us deal with borrows of known-refcounted objects
+/// without needing to worry about where the `Arc<T>` is.
+#[derive(Debug, Eq, PartialEq)]
+#[repr(transparent)]
+pub struct ArcBorrow<'a, T: ?Sized + 'a>(pub(crate) &'a T);
+
+impl<'a, T> Copy for ArcBorrow<'a, T> {}
+impl<'a, T> Clone for ArcBorrow<'a, T> {
+ #[inline]
+ fn clone(&self) -> Self {
+ *self
+ }
+}
+
+impl<'a, T> ArcBorrow<'a, T> {
+ /// Clone this as an `Arc<T>`. This bumps the refcount.
+ #[inline]
+ pub fn clone_arc(&self) -> Arc<T> {
+ let arc = unsafe { Arc::from_raw(self.0) };
+ // addref it!
+ mem::forget(arc.clone());
+ arc
+ }
+
+ /// For constructing from a reference known to be Arc-backed,
+ /// e.g. if we obtain such a reference over FFI
+ /// TODO: should from_ref be relaxed to unsized types? It can't be
+ /// converted back to an Arc right now for unsized types.
+ #[inline]
+ pub unsafe fn from_ref(r: &'a T) -> Self {
+ ArcBorrow(r)
+ }
+
+ /// Compare two `ArcBorrow`s via pointer equality. Will only return
+ /// true if they come from the same allocation
+ #[inline]
+ pub fn ptr_eq(this: &Self, other: &Self) -> bool {
+ ptr::eq(this.0 as *const T, other.0 as *const T)
+ }
+
+ /// Temporarily converts |self| into a bonafide Arc and exposes it to the
+ /// provided callback. The refcount is not modified.
+ #[inline]
+ pub fn with_arc<F, U>(&self, f: F) -> U
+ where
+ F: FnOnce(&Arc<T>) -> U,
+ T: 'static,
+ {
+ // Synthesize transient Arc, which never touches the refcount.
+ let transient = unsafe { ManuallyDrop::new(Arc::from_raw(self.0)) };
+
+ // Expose the transient Arc to the callback, which may clone it if it wants
+ // and forward the result to the user
+ f(&transient)
+ }
+
+ /// Similar to deref, but uses the lifetime |a| rather than the lifetime of
+ /// self, which is incompatible with the signature of the Deref trait.
+ #[inline]
+ pub fn get(&self) -> &'a T {
+ self.0
+ }
+}
+
+impl<'a, T> Deref for ArcBorrow<'a, T> {
+ type Target = T;
+
+ #[inline]
+ fn deref(&self) -> &T {
+ self.0
+ }
+}
+
+// Safety:
+// This implementation must guarantee that it is sound to call replace_ptr with an unsized variant
+// of the pointer retuned in `as_sized_ptr`. We leverage unsizing the contained reference. This
+// continues to point to the data of an ArcInner. The reference count remains untouched which is
+// correct since the number of owners did not change. This implies the returned instance fulfills
+// its safety invariants.
+#[cfg(feature = "unsize")]
+unsafe impl<'lt, T: 'lt, U: ?Sized + 'lt> unsize::CoerciblePtr<U> for ArcBorrow<'lt, T> {
+ type Pointee = T;
+ type Output = ArcBorrow<'lt, U>;
+
+ fn as_sized_ptr(&mut self) -> *mut T {
+ // Returns a pointer to the inner data. We do not need to care about any particular
+ // provenance here, only the pointer value, which we need to reconstruct the new pointer.
+ self.0 as *const T as *mut T
+ }
+
+ unsafe fn replace_ptr(self, new: *mut U) -> ArcBorrow<'lt, U> {
+ let inner = ManuallyDrop::new(self);
+ // Safety: backed by the same Arc that backed `self`.
+ ArcBorrow(inner.0.replace_ptr(new))
+ }
+}
diff --git a/vendor/triomphe/src/arc_swap_support.rs b/vendor/triomphe/src/arc_swap_support.rs
new file mode 100644
index 000000000..195854ed3
--- /dev/null
+++ b/vendor/triomphe/src/arc_swap_support.rs
@@ -0,0 +1,42 @@
+use arc_swap::RefCnt;
+
+use crate::{Arc, ThinArc};
+use core::ffi::c_void;
+
+unsafe impl<H, T> RefCnt for ThinArc<H, T> {
+ type Base = c_void;
+
+ #[inline]
+ fn into_ptr(me: Self) -> *mut Self::Base {
+ ThinArc::into_raw(me) as *mut _
+ }
+
+ #[inline]
+ fn as_ptr(me: &Self) -> *mut Self::Base {
+ ThinArc::as_ptr(me) as *mut _
+ }
+
+ #[inline]
+ unsafe fn from_ptr(ptr: *const Self::Base) -> Self {
+ ThinArc::from_raw(ptr)
+ }
+}
+
+unsafe impl<T> RefCnt for Arc<T> {
+ type Base = T;
+
+ #[inline]
+ fn into_ptr(me: Self) -> *mut Self::Base {
+ Arc::into_raw(me) as *mut _
+ }
+
+ #[inline]
+ fn as_ptr(me: &Self) -> *mut Self::Base {
+ Arc::as_ptr(me) as *mut _
+ }
+
+ #[inline]
+ unsafe fn from_ptr(ptr: *const Self::Base) -> Self {
+ Arc::from_raw(ptr)
+ }
+}
diff --git a/vendor/triomphe/src/arc_union.rs b/vendor/triomphe/src/arc_union.rs
new file mode 100644
index 000000000..6d5cddc8f
--- /dev/null
+++ b/vendor/triomphe/src/arc_union.rs
@@ -0,0 +1,139 @@
+use core::fmt;
+use core::marker::PhantomData;
+use core::ptr;
+use core::usize;
+
+use super::{Arc, ArcBorrow};
+
+/// A tagged union that can represent `Arc<A>` or `Arc<B>` while only consuming a
+/// single word. The type is also `NonNull`, and thus can be stored in an Option
+/// without increasing size.
+///
+/// This is functionally equivalent to
+/// `enum ArcUnion<A, B> { First(Arc<A>), Second(Arc<B>)` but only takes up
+/// up a single word of stack space.
+///
+/// This could probably be extended to support four types if necessary.
+pub struct ArcUnion<A, B> {
+ p: ptr::NonNull<()>,
+ phantom_a: PhantomData<A>,
+ phantom_b: PhantomData<B>,
+}
+
+unsafe impl<A: Sync + Send, B: Send + Sync> Send for ArcUnion<A, B> {}
+unsafe impl<A: Sync + Send, B: Send + Sync> Sync for ArcUnion<A, B> {}
+
+impl<A: PartialEq, B: PartialEq> PartialEq for ArcUnion<A, B> {
+ fn eq(&self, other: &Self) -> bool {
+ use crate::ArcUnionBorrow::*;
+ match (self.borrow(), other.borrow()) {
+ (First(x), First(y)) => x == y,
+ (Second(x), Second(y)) => x == y,
+ (_, _) => false,
+ }
+ }
+}
+
+/// This represents a borrow of an `ArcUnion`.
+#[derive(Debug)]
+pub enum ArcUnionBorrow<'a, A: 'a, B: 'a> {
+ First(ArcBorrow<'a, A>),
+ Second(ArcBorrow<'a, B>),
+}
+
+impl<A, B> ArcUnion<A, B> {
+ unsafe fn new(ptr: *mut ()) -> Self {
+ ArcUnion {
+ p: ptr::NonNull::new_unchecked(ptr),
+ phantom_a: PhantomData,
+ phantom_b: PhantomData,
+ }
+ }
+
+ /// Returns true if the two values are pointer-equal.
+ #[inline]
+ pub fn ptr_eq(this: &Self, other: &Self) -> bool {
+ this.p == other.p
+ }
+
+ /// Returns an enum representing a borrow of either A or B.
+ pub fn borrow(&self) -> ArcUnionBorrow<A, B> {
+ if self.is_first() {
+ let ptr = self.p.as_ptr() as *const A;
+ let borrow = unsafe { ArcBorrow::from_ref(&*ptr) };
+ ArcUnionBorrow::First(borrow)
+ } else {
+ let ptr = ((self.p.as_ptr() as usize) & !0x1) as *const B;
+ let borrow = unsafe { ArcBorrow::from_ref(&*ptr) };
+ ArcUnionBorrow::Second(borrow)
+ }
+ }
+
+ /// Creates an `ArcUnion` from an instance of the first type.
+ #[inline]
+ pub fn from_first(other: Arc<A>) -> Self {
+ unsafe { Self::new(Arc::into_raw(other) as *mut _) }
+ }
+
+ /// Creates an `ArcUnion` from an instance of the second type.
+ #[inline]
+ pub fn from_second(other: Arc<B>) -> Self {
+ unsafe { Self::new(((Arc::into_raw(other) as usize) | 0x1) as *mut _) }
+ }
+
+ /// Returns true if this `ArcUnion` contains the first type.
+ #[inline]
+ pub fn is_first(&self) -> bool {
+ self.p.as_ptr() as usize & 0x1 == 0
+ }
+
+ /// Returns true if this `ArcUnion` contains the second type.
+ #[inline]
+ pub fn is_second(&self) -> bool {
+ !self.is_first()
+ }
+
+ /// Returns a borrow of the first type if applicable, otherwise `None`.
+ pub fn as_first(&self) -> Option<ArcBorrow<A>> {
+ match self.borrow() {
+ ArcUnionBorrow::First(x) => Some(x),
+ ArcUnionBorrow::Second(_) => None,
+ }
+ }
+
+ /// Returns a borrow of the second type if applicable, otherwise None.
+ pub fn as_second(&self) -> Option<ArcBorrow<B>> {
+ match self.borrow() {
+ ArcUnionBorrow::First(_) => None,
+ ArcUnionBorrow::Second(x) => Some(x),
+ }
+ }
+}
+
+impl<A, B> Clone for ArcUnion<A, B> {
+ fn clone(&self) -> Self {
+ match self.borrow() {
+ ArcUnionBorrow::First(x) => ArcUnion::from_first(x.clone_arc()),
+ ArcUnionBorrow::Second(x) => ArcUnion::from_second(x.clone_arc()),
+ }
+ }
+}
+
+impl<A, B> Drop for ArcUnion<A, B> {
+ fn drop(&mut self) {
+ match self.borrow() {
+ ArcUnionBorrow::First(x) => unsafe {
+ let _ = Arc::from_raw(&*x);
+ },
+ ArcUnionBorrow::Second(x) => unsafe {
+ let _ = Arc::from_raw(&*x);
+ },
+ }
+ }
+}
+
+impl<A: fmt::Debug, B: fmt::Debug> fmt::Debug for ArcUnion<A, B> {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ fmt::Debug::fmt(&self.borrow(), f)
+ }
+}
diff --git a/vendor/triomphe/src/header.rs b/vendor/triomphe/src/header.rs
new file mode 100644
index 000000000..e35ec48b0
--- /dev/null
+++ b/vendor/triomphe/src/header.rs
@@ -0,0 +1,378 @@
+use alloc::alloc::Layout;
+use alloc::boxed::Box;
+use alloc::string::String;
+use alloc::vec::Vec;
+use core::iter::{ExactSizeIterator, Iterator};
+use core::marker::PhantomData;
+use core::mem::{self, ManuallyDrop};
+use core::ptr::{self, addr_of_mut};
+use core::usize;
+
+use super::{Arc, ArcInner};
+
+/// Structure to allow Arc-managing some fixed-sized data and a variably-sized
+/// slice in a single allocation.
+#[derive(Debug, Eq, PartialEq, Hash, PartialOrd)]
+#[repr(C)]
+pub struct HeaderSlice<H, T: ?Sized> {
+ /// The fixed-sized data.
+ pub header: H,
+
+ /// The dynamically-sized data.
+ pub slice: T,
+}
+
+impl<H, T> Arc<HeaderSlice<H, [T]>> {
+ /// Creates an Arc for a HeaderSlice using the given header struct and
+ /// iterator to generate the slice. The resulting Arc will be fat.
+ pub fn from_header_and_iter<I>(header: H, mut items: I) -> Self
+ where
+ I: Iterator<Item = T> + ExactSizeIterator,
+ {
+ assert_ne!(mem::size_of::<T>(), 0, "Need to think about ZST");
+
+ let num_items = items.len();
+
+ let inner = Arc::allocate_for_header_and_slice(num_items);
+
+ unsafe {
+ // Write the data.
+ //
+ // Note that any panics here (i.e. from the iterator) are safe, since
+ // we'll just leak the uninitialized memory.
+ ptr::write(&mut ((*inner.as_ptr()).data.header), header);
+ if num_items != 0 {
+ let mut current = (*inner.as_ptr()).data.slice.as_mut_ptr();
+ for _ in 0..num_items {
+ ptr::write(
+ current,
+ items
+ .next()
+ .expect("ExactSizeIterator over-reported length"),
+ );
+ current = current.offset(1);
+ }
+ assert!(
+ items.next().is_none(),
+ "ExactSizeIterator under-reported length"
+ );
+ }
+ assert!(
+ items.next().is_none(),
+ "ExactSizeIterator under-reported length"
+ );
+ }
+
+ // Safety: ptr is valid & the inner structure is fully initialized
+ Arc {
+ p: inner,
+ phantom: PhantomData,
+ }
+ }
+
+ /// Creates an Arc for a HeaderSlice using the given header struct and
+ /// iterator to generate the slice. The resulting Arc will be fat.
+ pub fn from_header_and_slice(header: H, items: &[T]) -> Self
+ where
+ T: Copy,
+ {
+ assert_ne!(mem::size_of::<T>(), 0, "Need to think about ZST");
+
+ let num_items = items.len();
+
+ let inner = Arc::allocate_for_header_and_slice(num_items);
+
+ unsafe {
+ // Write the data.
+ ptr::write(&mut ((*inner.as_ptr()).data.header), header);
+ let dst = (*inner.as_ptr()).data.slice.as_mut_ptr();
+ ptr::copy_nonoverlapping(items.as_ptr(), dst, num_items);
+ }
+
+ // Safety: ptr is valid & the inner structure is fully initialized
+ Arc {
+ p: inner,
+ phantom: PhantomData,
+ }
+ }
+
+ /// Creates an Arc for a HeaderSlice using the given header struct and
+ /// vec to generate the slice. The resulting Arc will be fat.
+ pub fn from_header_and_vec(header: H, mut v: Vec<T>) -> Self {
+ let len = v.len();
+
+ let inner = Arc::allocate_for_header_and_slice(len);
+
+ unsafe {
+ // Safety: inner is a valid pointer, so this can't go out of bounds
+ let dst = addr_of_mut!((*inner.as_ptr()).data.header);
+
+ // Safety: `dst` is valid for writes (just allocated)
+ ptr::write(dst, header);
+ }
+
+ unsafe {
+ let src = v.as_mut_ptr();
+
+ // Safety: inner is a valid pointer, so this can't go out of bounds
+ let dst = addr_of_mut!((*inner.as_ptr()).data.slice) as *mut T;
+
+ // Safety:
+ // - `src` is valid for reads for `len` (got from `Vec`)
+ // - `dst` is valid for writes for `len` (just allocated, with layout for appropriate slice)
+ // - `src` and `dst` don't overlap (separate allocations)
+ ptr::copy_nonoverlapping(src, dst, len);
+
+ // Deallocate vec without dropping `T`
+ //
+ // Safety: 0..0 elements are always initialized, 0 <= cap for any cap
+ v.set_len(0);
+ }
+
+ // Safety: ptr is valid & the inner structure is fully initialized
+ Arc {
+ p: inner,
+ phantom: PhantomData,
+ }
+ }
+}
+
+impl<H> Arc<HeaderSlice<H, str>> {
+ /// Creates an Arc for a HeaderSlice using the given header struct and
+ /// a str slice to generate the slice. The resulting Arc will be fat.
+ pub fn from_header_and_str(header: H, string: &str) -> Self {
+ let bytes = Arc::from_header_and_slice(header, string.as_bytes());
+
+ // Safety: `ArcInner` and `HeaderSlice` are `repr(C)`, `str` has the same layout as `[u8]`,
+ // thus it's ok to "transmute" between `Arc<HeaderSlice<H, [u8]>>` and `Arc<HeaderSlice<H, str>>`.
+ //
+ // `bytes` are a valid string since we've just got them from a valid `str`.
+ unsafe { Arc::from_raw_inner(Arc::into_raw_inner(bytes) as _) }
+ }
+}
+
+/// Header data with an inline length. Consumers that use HeaderWithLength as the
+/// Header type in HeaderSlice can take advantage of ThinArc.
+#[derive(Debug, Eq, PartialEq, Hash, PartialOrd)]
+#[repr(C)]
+pub struct HeaderWithLength<H> {
+ /// The fixed-sized data.
+ pub header: H,
+
+ /// The slice length.
+ pub length: usize,
+}
+
+impl<H> HeaderWithLength<H> {
+ /// Creates a new HeaderWithLength.
+ #[inline]
+ pub fn new(header: H, length: usize) -> Self {
+ HeaderWithLength { header, length }
+ }
+}
+
+impl<T: ?Sized> From<Arc<HeaderSlice<(), T>>> for Arc<T> {
+ fn from(this: Arc<HeaderSlice<(), T>>) -> Self {
+ debug_assert_eq!(
+ Layout::for_value::<HeaderSlice<(), T>>(&this),
+ Layout::for_value::<T>(&this.slice)
+ );
+
+ // Safety: `HeaderSlice<(), T>` and `T` has the same layout
+ unsafe { Arc::from_raw_inner(Arc::into_raw_inner(this) as _) }
+ }
+}
+
+impl<T: ?Sized> From<Arc<T>> for Arc<HeaderSlice<(), T>> {
+ fn from(this: Arc<T>) -> Self {
+ // Safety: `T` and `HeaderSlice<(), T>` has the same layout
+ unsafe { Arc::from_raw_inner(Arc::into_raw_inner(this) as _) }
+ }
+}
+
+impl<T: Copy> From<&[T]> for Arc<[T]> {
+ fn from(slice: &[T]) -> Self {
+ Arc::from_header_and_slice((), slice).into()
+ }
+}
+
+impl From<&str> for Arc<str> {
+ fn from(s: &str) -> Self {
+ Arc::from_header_and_str((), s).into()
+ }
+}
+
+impl From<String> for Arc<str> {
+ fn from(s: String) -> Self {
+ Self::from(&s[..])
+ }
+}
+
+// FIXME: once `pointer::with_metadata_of` is stable or
+// implementable on stable without assuming ptr layout
+// this will be able to accept `T: ?Sized`.
+impl<T> From<Box<T>> for Arc<T> {
+ fn from(b: Box<T>) -> Self {
+ let layout = Layout::for_value::<T>(&b);
+
+ // Safety: the closure only changes the type of the pointer
+ let inner = unsafe { Self::allocate_for_layout(layout, |mem| mem as *mut ArcInner<T>) };
+
+ unsafe {
+ let src = Box::into_raw(b);
+
+ // Safety: inner is a valid pointer, so this can't go out of bounds
+ let dst = addr_of_mut!((*inner.as_ptr()).data);
+
+ // Safety:
+ // - `src` is valid for reads (got from `Box`)
+ // - `dst` is valid for writes (just allocated)
+ // - `src` and `dst` don't overlap (separate allocations)
+ ptr::copy_nonoverlapping(src, dst, 1);
+
+ // Deallocate box without dropping `T`
+ //
+ // Safety:
+ // - `src` has been got from `Box::into_raw`
+ // - `ManuallyDrop<T>` is guaranteed to have the same layout as `T`
+ Box::<ManuallyDrop<T>>::from_raw(src as _);
+ }
+
+ Arc {
+ p: inner,
+ phantom: PhantomData,
+ }
+ }
+}
+
+impl<T> From<Vec<T>> for Arc<[T]> {
+ fn from(v: Vec<T>) -> Self {
+ Arc::from_header_and_vec((), v).into()
+ }
+}
+
+pub(crate) type HeaderSliceWithLength<H, T> = HeaderSlice<HeaderWithLength<H>, T>;
+
+#[cfg(test)]
+mod tests {
+ use alloc::boxed::Box;
+ use alloc::string::String;
+ use alloc::vec;
+ use core::iter;
+
+ use crate::{Arc, HeaderSlice};
+
+ #[test]
+ fn from_header_and_iter_smoke() {
+ let arc = Arc::from_header_and_iter(
+ (42u32, 17u8),
+ IntoIterator::into_iter([1u16, 2, 3, 4, 5, 6, 7]),
+ );
+
+ assert_eq!(arc.header, (42, 17));
+ assert_eq!(arc.slice, [1, 2, 3, 4, 5, 6, 7]);
+ }
+
+ #[test]
+ fn from_header_and_slice_smoke() {
+ let arc = Arc::from_header_and_slice((42u32, 17u8), &[1u16, 2, 3, 4, 5, 6, 7]);
+
+ assert_eq!(arc.header, (42, 17));
+ assert_eq!(arc.slice, [1u16, 2, 3, 4, 5, 6, 7]);
+ }
+
+ #[test]
+ fn from_header_and_vec_smoke() {
+ let arc = Arc::from_header_and_vec((42u32, 17u8), vec![1u16, 2, 3, 4, 5, 6, 7]);
+
+ assert_eq!(arc.header, (42, 17));
+ assert_eq!(arc.slice, [1u16, 2, 3, 4, 5, 6, 7]);
+ }
+
+ #[test]
+ fn from_header_and_iter_empty() {
+ let arc = Arc::from_header_and_iter((42u32, 17u8), iter::empty::<u16>());
+
+ assert_eq!(arc.header, (42, 17));
+ assert_eq!(arc.slice, []);
+ }
+
+ #[test]
+ fn from_header_and_slice_empty() {
+ let arc = Arc::from_header_and_slice((42u32, 17u8), &[1u16; 0]);
+
+ assert_eq!(arc.header, (42, 17));
+ assert_eq!(arc.slice, []);
+ }
+
+ #[test]
+ fn from_header_and_vec_empty() {
+ let arc = Arc::from_header_and_vec((42u32, 17u8), vec![1u16; 0]);
+
+ assert_eq!(arc.header, (42, 17));
+ assert_eq!(arc.slice, []);
+ }
+
+ #[test]
+ fn issue_13_empty() {
+ crate::Arc::from_header_and_iter((), iter::empty::<usize>());
+ }
+
+ #[test]
+ fn issue_13_consumption() {
+ let s: &[u8] = &[0u8; 255];
+ crate::Arc::from_header_and_iter((), s.iter().copied());
+ }
+
+ #[test]
+ fn from_header_and_str_smoke() {
+ let a = Arc::from_header_and_str(
+ 42,
+ "The answer to the ultimate question of life, the universe, and everything",
+ );
+ assert_eq!(a.header, 42);
+ assert_eq!(
+ &a.slice,
+ "The answer to the ultimate question of life, the universe, and everything"
+ );
+
+ let empty = Arc::from_header_and_str((), "");
+ assert_eq!(empty.header, ());
+ assert_eq!(&empty.slice, "");
+ }
+
+ #[test]
+ fn erase_and_create_from_thin_air_header() {
+ let a: Arc<HeaderSlice<(), [u32]>> = Arc::from_header_and_slice((), &[12, 17, 16]);
+ let b: Arc<[u32]> = a.into();
+
+ assert_eq!(&*b, [12, 17, 16]);
+
+ let c: Arc<HeaderSlice<(), [u32]>> = b.into();
+
+ assert_eq!(&c.slice, [12, 17, 16]);
+ assert_eq!(c.header, ());
+ }
+
+ #[test]
+ fn from_box_and_vec() {
+ let b = Box::new(String::from("xxx"));
+ let b = Arc::<String>::from(b);
+ assert_eq!(&*b, "xxx");
+
+ let v = vec![String::from("1"), String::from("2"), String::from("3")];
+ let v = Arc::<[_]>::from(v);
+ assert_eq!(
+ &*v,
+ [String::from("1"), String::from("2"), String::from("3")]
+ );
+
+ let mut v = vec![String::from("1"), String::from("2"), String::from("3")];
+ v.reserve(10);
+ let v = Arc::<[_]>::from(v);
+ assert_eq!(
+ &*v,
+ [String::from("1"), String::from("2"), String::from("3")]
+ );
+ }
+}
diff --git a/vendor/triomphe/src/lib.rs b/vendor/triomphe/src/lib.rs
new file mode 100644
index 000000000..13d568bda
--- /dev/null
+++ b/vendor/triomphe/src/lib.rs
@@ -0,0 +1,94 @@
+// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Fork of Arc. This has the following advantages over std::sync::Arc:
+//!
+//! * `triomphe::Arc` doesn't support weak references: we save space by excluding the weak reference count, and we don't do extra read-modify-update operations to handle the possibility of weak references.
+//! * `triomphe::UniqueArc` allows one to construct a temporarily-mutable `Arc` which can be converted to a regular `triomphe::Arc` later
+//! * `triomphe::OffsetArc` can be used transparently from C++ code and is compatible with (and can be converted to/from) `triomphe::Arc`
+//! * `triomphe::ArcBorrow` is functionally similar to `&triomphe::Arc<T>`, however in memory it's simply `&T`. This makes it more flexible for FFI; the source of the borrow need not be an `Arc` pinned on the stack (and can instead be a pointer from C++, or an `OffsetArc`). Additionally, this helps avoid pointer-chasing.
+//! * `triomphe::Arc` has can be constructed for dynamically-sized types via `from_header_and_iter`
+//! * `triomphe::ThinArc` provides thin-pointer `Arc`s to dynamically sized types
+//! * `triomphe::ArcUnion` is union of two `triomphe:Arc`s which fits inside one word of memory
+
+#![allow(missing_docs)]
+#![cfg_attr(not(feature = "std"), no_std)]
+
+extern crate alloc;
+#[cfg(feature = "std")]
+extern crate core;
+
+#[cfg(feature = "arc-swap")]
+extern crate arc_swap;
+#[cfg(feature = "serde")]
+extern crate serde;
+#[cfg(feature = "stable_deref_trait")]
+extern crate stable_deref_trait;
+#[cfg(feature = "unsize")]
+extern crate unsize;
+
+/// Calculates the offset of the specified field from the start of the named struct.
+/// This macro is impossible to be const until feature(const_ptr_offset_from) is stable.
+macro_rules! offset_of {
+ ($ty: path, $field: tt) => {{
+ // ensure the type is a named struct
+ // ensure the field exists and is accessible
+ let $ty { $field: _, .. };
+
+ let uninit = <::core::mem::MaybeUninit<$ty>>::uninit(); // const since 1.36
+
+ let base_ptr: *const $ty = uninit.as_ptr(); // const since 1.59
+
+ #[allow(unused_unsafe)]
+ let field_ptr = unsafe { ::core::ptr::addr_of!((*base_ptr).$field) }; // since 1.51
+
+ // // the const version requires feature(const_ptr_offset_from)
+ // // https://github.com/rust-lang/rust/issues/92980
+ // #[allow(unused_unsafe)]
+ // unsafe { (field_ptr as *const u8).offset_from(base_ptr as *const u8) as usize }
+
+ (field_ptr as usize) - (base_ptr as usize)
+ }};
+}
+
+mod arc;
+mod arc_borrow;
+#[cfg(feature = "arc-swap")]
+mod arc_swap_support;
+mod arc_union;
+mod header;
+mod offset_arc;
+mod thin_arc;
+mod unique_arc;
+
+pub use arc::*;
+pub use arc_borrow::*;
+pub use arc_union::*;
+pub use header::*;
+pub use offset_arc::*;
+pub use thin_arc::*;
+pub use unique_arc::*;
+
+#[cfg(feature = "std")]
+use std::process::abort;
+
+// `no_std`-compatible abort by forcing a panic while already panicing.
+#[cfg(not(feature = "std"))]
+#[cold]
+fn abort() -> ! {
+ struct PanicOnDrop;
+ impl Drop for PanicOnDrop {
+ fn drop(&mut self) {
+ panic!()
+ }
+ }
+ let _double_panicer = PanicOnDrop;
+ panic!();
+}
diff --git a/vendor/triomphe/src/offset_arc.rs b/vendor/triomphe/src/offset_arc.rs
new file mode 100644
index 000000000..9345416d4
--- /dev/null
+++ b/vendor/triomphe/src/offset_arc.rs
@@ -0,0 +1,134 @@
+use core::fmt;
+use core::marker::PhantomData;
+use core::mem::ManuallyDrop;
+use core::ops::Deref;
+use core::ptr;
+
+use super::{Arc, ArcBorrow};
+
+/// An `Arc`, except it holds a pointer to the T instead of to the
+/// entire ArcInner.
+///
+/// An `OffsetArc<T>` has the same layout and ABI as a non-null
+/// `const T*` in C, and may be used in FFI function signatures.
+///
+/// ```text
+/// Arc<T> OffsetArc<T>
+/// | |
+/// v v
+/// ---------------------
+/// | RefCount | T (data) | [ArcInner<T>]
+/// ---------------------
+/// ```
+///
+/// This means that this is a direct pointer to
+/// its contained data (and can be read from by both C++ and Rust),
+/// but we can also convert it to a "regular" `Arc<T>` by removing the offset.
+///
+/// This is very useful if you have an Arc-containing struct shared between Rust and C++,
+/// and wish for C++ to be able to read the data behind the `Arc` without incurring
+/// an FFI call overhead.
+#[derive(Eq)]
+#[repr(transparent)]
+pub struct OffsetArc<T> {
+ pub(crate) ptr: ptr::NonNull<T>,
+ pub(crate) phantom: PhantomData<T>,
+}
+
+unsafe impl<T: Sync + Send> Send for OffsetArc<T> {}
+unsafe impl<T: Sync + Send> Sync for OffsetArc<T> {}
+
+impl<T> Deref for OffsetArc<T> {
+ type Target = T;
+ #[inline]
+ fn deref(&self) -> &Self::Target {
+ unsafe { &*self.ptr.as_ptr() }
+ }
+}
+
+impl<T> Clone for OffsetArc<T> {
+ #[inline]
+ fn clone(&self) -> Self {
+ Arc::into_raw_offset(self.clone_arc())
+ }
+}
+
+impl<T> Drop for OffsetArc<T> {
+ fn drop(&mut self) {
+ let _ = Arc::from_raw_offset(OffsetArc {
+ ptr: self.ptr,
+ phantom: PhantomData,
+ });
+ }
+}
+
+impl<T: fmt::Debug> fmt::Debug for OffsetArc<T> {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ fmt::Debug::fmt(&**self, f)
+ }
+}
+
+impl<T: PartialEq> PartialEq for OffsetArc<T> {
+ fn eq(&self, other: &OffsetArc<T>) -> bool {
+ *(*self) == *(*other)
+ }
+
+ #[allow(clippy::partialeq_ne_impl)]
+ fn ne(&self, other: &OffsetArc<T>) -> bool {
+ *(*self) != *(*other)
+ }
+}
+
+impl<T> OffsetArc<T> {
+ /// Temporarily converts |self| into a bonafide Arc and exposes it to the
+ /// provided callback. The refcount is not modified.
+ #[inline]
+ pub fn with_arc<F, U>(&self, f: F) -> U
+ where
+ F: FnOnce(&Arc<T>) -> U,
+ {
+ // Synthesize transient Arc, which never touches the refcount of the ArcInner.
+ let transient = unsafe { ManuallyDrop::new(Arc::from_raw(self.ptr.as_ptr())) };
+
+ // Expose the transient Arc to the callback, which may clone it if it wants
+ // and forward the result to the user
+ f(&transient)
+ }
+
+ /// If uniquely owned, provide a mutable reference
+ /// Else create a copy, and mutate that
+ ///
+ /// This is functionally the same thing as `Arc::make_mut`
+ #[inline]
+ pub fn make_mut(&mut self) -> &mut T
+ where
+ T: Clone,
+ {
+ unsafe {
+ // extract the OffsetArc as an owned variable
+ let this = ptr::read(self);
+ // treat it as a real Arc
+ let mut arc = Arc::from_raw_offset(this);
+ // obtain the mutable reference. Cast away the lifetime
+ // This may mutate `arc`
+ let ret = Arc::make_mut(&mut arc) as *mut _;
+ // Store the possibly-mutated arc back inside, after converting
+ // it to a OffsetArc again
+ ptr::write(self, Arc::into_raw_offset(arc));
+ &mut *ret
+ }
+ }
+
+ /// Clone it as an `Arc`
+ #[inline]
+ pub fn clone_arc(&self) -> Arc<T> {
+ OffsetArc::with_arc(self, |a| a.clone())
+ }
+
+ /// Produce a pointer to the data that can be converted back
+ /// to an `Arc`
+ #[inline]
+ pub fn borrow_arc(&self) -> ArcBorrow<'_, T> {
+ ArcBorrow(&**self)
+ }
+}
diff --git a/vendor/triomphe/src/thin_arc.rs b/vendor/triomphe/src/thin_arc.rs
new file mode 100644
index 000000000..e048468ad
--- /dev/null
+++ b/vendor/triomphe/src/thin_arc.rs
@@ -0,0 +1,329 @@
+use core::ffi::c_void;
+use core::fmt;
+use core::hash::{Hash, Hasher};
+use core::iter::{ExactSizeIterator, Iterator};
+use core::marker::PhantomData;
+use core::mem::ManuallyDrop;
+use core::ops::Deref;
+use core::ptr;
+use core::usize;
+
+use super::{Arc, ArcInner, HeaderSliceWithLength, HeaderWithLength};
+
+/// A "thin" `Arc` containing dynamically sized data
+///
+/// This is functionally equivalent to `Arc<(H, [T])>`
+///
+/// When you create an `Arc` containing a dynamically sized type
+/// like `HeaderSlice<H, [T]>`, the `Arc` is represented on the stack
+/// as a "fat pointer", where the length of the slice is stored
+/// alongside the `Arc`'s pointer. In some situations you may wish to
+/// have a thin pointer instead, perhaps for FFI compatibility
+/// or space efficiency.
+///
+/// Note that we use `[T; 0]` in order to have the right alignment for `T`.
+///
+/// `ThinArc` solves this by storing the length in the allocation itself,
+/// via `HeaderSliceWithLength`.
+#[repr(transparent)]
+pub struct ThinArc<H, T> {
+ ptr: ptr::NonNull<ArcInner<HeaderSliceWithLength<H, [T; 0]>>>,
+ phantom: PhantomData<(H, T)>,
+}
+
+unsafe impl<H: Sync + Send, T: Sync + Send> Send for ThinArc<H, T> {}
+unsafe impl<H: Sync + Send, T: Sync + Send> Sync for ThinArc<H, T> {}
+
+// Synthesize a fat pointer from a thin pointer.
+//
+// See the comment around the analogous operation in from_header_and_iter.
+fn thin_to_thick<H, T>(
+ thin: *mut ArcInner<HeaderSliceWithLength<H, [T; 0]>>,
+) -> *mut ArcInner<HeaderSliceWithLength<H, [T]>> {
+ let len = unsafe { (*thin).data.header.length };
+ let fake_slice = ptr::slice_from_raw_parts_mut(thin as *mut T, len);
+
+ fake_slice as *mut ArcInner<HeaderSliceWithLength<H, [T]>>
+}
+
+impl<H, T> ThinArc<H, T> {
+ /// Temporarily converts |self| into a bonafide Arc and exposes it to the
+ /// provided callback. The refcount is not modified.
+ #[inline]
+ pub fn with_arc<F, U>(&self, f: F) -> U
+ where
+ F: FnOnce(&Arc<HeaderSliceWithLength<H, [T]>>) -> U,
+ {
+ // Synthesize transient Arc, which never touches the refcount of the ArcInner.
+ let transient = unsafe {
+ ManuallyDrop::new(Arc {
+ p: ptr::NonNull::new_unchecked(thin_to_thick(self.ptr.as_ptr())),
+ phantom: PhantomData,
+ })
+ };
+
+ // Expose the transient Arc to the callback, which may clone it if it wants
+ // and forward the result to the user
+ f(&transient)
+ }
+
+ /// Creates a `ThinArc` for a HeaderSlice using the given header struct and
+ /// iterator to generate the slice.
+ pub fn from_header_and_iter<I>(header: H, items: I) -> Self
+ where
+ I: Iterator<Item = T> + ExactSizeIterator,
+ {
+ let header = HeaderWithLength::new(header, items.len());
+ Arc::into_thin(Arc::from_header_and_iter(header, items))
+ }
+
+ /// Creates a `ThinArc` for a HeaderSlice using the given header struct and
+ /// a slice to copy.
+ pub fn from_header_and_slice(header: H, items: &[T]) -> Self
+ where
+ T: Copy,
+ {
+ let header = HeaderWithLength::new(header, items.len());
+ Arc::into_thin(Arc::from_header_and_slice(header, items))
+ }
+
+ /// Returns the address on the heap of the ThinArc itself -- not the T
+ /// within it -- for memory reporting.
+ #[inline]
+ pub fn ptr(&self) -> *const c_void {
+ self.ptr.as_ptr() as *const ArcInner<T> as *const c_void
+ }
+
+ /// Returns the address on the heap of the Arc itself -- not the T within it -- for memory
+ /// reporting.
+ #[inline]
+ pub fn heap_ptr(&self) -> *const c_void {
+ self.ptr()
+ }
+
+ /// # Safety
+ ///
+ /// Constructs an ThinArc from a raw pointer.
+ ///
+ /// The raw pointer must have been previously returned by a call to
+ /// ThinArc::into_raw.
+ ///
+ /// The user of from_raw has to make sure a specific value of T is only dropped once.
+ ///
+ /// This function is unsafe because improper use may lead to memory unsafety,
+ /// even if the returned ThinArc is never accessed.
+ #[inline]
+ pub unsafe fn from_raw(ptr: *const c_void) -> Self {
+ Self {
+ ptr: ptr::NonNull::new_unchecked(ptr as *mut c_void).cast(),
+ phantom: PhantomData,
+ }
+ }
+
+ /// Consume ThinArc and returned the wrapped pointer.
+ #[inline]
+ pub fn into_raw(self) -> *const c_void {
+ let this = ManuallyDrop::new(self);
+ this.ptr.cast().as_ptr()
+ }
+
+ /// Provides a raw pointer to the data.
+ /// The counts are not affected in any way and the ThinArc is not consumed.
+ /// The pointer is valid for as long as there are strong counts in the ThinArc.
+ #[inline]
+ pub fn as_ptr(&self) -> *const c_void {
+ self.ptr()
+ }
+}
+
+impl<H, T> Deref for ThinArc<H, T> {
+ type Target = HeaderSliceWithLength<H, [T]>;
+
+ #[inline]
+ fn deref(&self) -> &Self::Target {
+ unsafe { &(*thin_to_thick(self.ptr.as_ptr())).data }
+ }
+}
+
+impl<H, T> Clone for ThinArc<H, T> {
+ #[inline]
+ fn clone(&self) -> Self {
+ ThinArc::with_arc(self, |a| Arc::into_thin(a.clone()))
+ }
+}
+
+impl<H, T> Drop for ThinArc<H, T> {
+ #[inline]
+ fn drop(&mut self) {
+ let _ = Arc::from_thin(ThinArc {
+ ptr: self.ptr,
+ phantom: PhantomData,
+ });
+ }
+}
+
+impl<H, T> Arc<HeaderSliceWithLength<H, [T]>> {
+ /// Converts an `Arc` into a `ThinArc`. This consumes the `Arc`, so the refcount
+ /// is not modified.
+ #[inline]
+ pub fn into_thin(a: Self) -> ThinArc<H, T> {
+ let a = ManuallyDrop::new(a);
+ assert_eq!(
+ a.header.length,
+ a.slice.len(),
+ "Length needs to be correct for ThinArc to work"
+ );
+ let fat_ptr: *mut ArcInner<HeaderSliceWithLength<H, [T]>> = a.ptr();
+ let thin_ptr = fat_ptr as *mut [usize] as *mut usize;
+ ThinArc {
+ ptr: unsafe {
+ ptr::NonNull::new_unchecked(
+ thin_ptr as *mut ArcInner<HeaderSliceWithLength<H, [T; 0]>>,
+ )
+ },
+ phantom: PhantomData,
+ }
+ }
+
+ /// Converts a `ThinArc` into an `Arc`. This consumes the `ThinArc`, so the refcount
+ /// is not modified.
+ #[inline]
+ pub fn from_thin(a: ThinArc<H, T>) -> Self {
+ let a = ManuallyDrop::new(a);
+ let ptr = thin_to_thick(a.ptr.as_ptr());
+ unsafe {
+ Arc {
+ p: ptr::NonNull::new_unchecked(ptr),
+ phantom: PhantomData,
+ }
+ }
+ }
+}
+
+impl<H: PartialEq, T: PartialEq> PartialEq for ThinArc<H, T> {
+ #[inline]
+ fn eq(&self, other: &ThinArc<H, T>) -> bool {
+ ThinArc::with_arc(self, |a| ThinArc::with_arc(other, |b| *a == *b))
+ }
+}
+
+impl<H: Eq, T: Eq> Eq for ThinArc<H, T> {}
+
+impl<H: Hash, T: Hash> Hash for ThinArc<H, T> {
+ fn hash<HSR: Hasher>(&self, state: &mut HSR) {
+ ThinArc::with_arc(self, |a| a.hash(state))
+ }
+}
+
+impl<H: fmt::Debug, T: fmt::Debug> fmt::Debug for ThinArc<H, T> {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ fmt::Debug::fmt(&**self, f)
+ }
+}
+
+impl<H, T> fmt::Pointer for ThinArc<H, T> {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ fmt::Pointer::fmt(&self.ptr(), f)
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use crate::{Arc, HeaderWithLength, ThinArc};
+ use alloc::vec;
+ use core::clone::Clone;
+ use core::ops::Drop;
+ use core::sync::atomic;
+ use core::sync::atomic::Ordering::{Acquire, SeqCst};
+
+ #[derive(PartialEq)]
+ struct Canary(*mut atomic::AtomicUsize);
+
+ impl Drop for Canary {
+ fn drop(&mut self) {
+ unsafe {
+ (*self.0).fetch_add(1, SeqCst);
+ }
+ }
+ }
+
+ #[test]
+ fn empty_thin() {
+ let header = HeaderWithLength::new(100u32, 0);
+ let x = Arc::from_header_and_iter(header, core::iter::empty::<i32>());
+ let y = Arc::into_thin(x.clone());
+ assert_eq!(y.header.header, 100);
+ assert!(y.slice.is_empty());
+ assert_eq!(x.header.header, 100);
+ assert!(x.slice.is_empty());
+ }
+
+ #[test]
+ fn thin_assert_padding() {
+ #[derive(Clone, Default)]
+ #[repr(C)]
+ struct Padded {
+ i: u16,
+ }
+
+ // The header will have more alignment than `Padded`
+ let header = HeaderWithLength::new(0i32, 2);
+ let items = vec![Padded { i: 0xdead }, Padded { i: 0xbeef }];
+ let a = ThinArc::from_header_and_iter(header, items.into_iter());
+ assert_eq!(a.slice.len(), 2);
+ assert_eq!(a.slice[0].i, 0xdead);
+ assert_eq!(a.slice[1].i, 0xbeef);
+ }
+
+ #[test]
+ #[allow(clippy::redundant_clone, clippy::eq_op)]
+ fn slices_and_thin() {
+ let mut canary = atomic::AtomicUsize::new(0);
+ let c = Canary(&mut canary as *mut atomic::AtomicUsize);
+ let v = vec![5, 6];
+ let header = HeaderWithLength::new(c, v.len());
+ {
+ let x = Arc::into_thin(Arc::from_header_and_slice(header, &v));
+ let y = ThinArc::with_arc(&x, |q| q.clone());
+ let _ = y.clone();
+ let _ = x == x;
+ Arc::from_thin(x.clone());
+ }
+ assert_eq!(canary.load(Acquire), 1);
+ }
+
+ #[test]
+ #[allow(clippy::redundant_clone, clippy::eq_op)]
+ fn iter_and_thin() {
+ let mut canary = atomic::AtomicUsize::new(0);
+ let c = Canary(&mut canary as *mut atomic::AtomicUsize);
+ let v = vec![5, 6];
+ let header = HeaderWithLength::new(c, v.len());
+ {
+ let x = Arc::into_thin(Arc::from_header_and_iter(header, v.into_iter()));
+ let y = ThinArc::with_arc(&x, |q| q.clone());
+ let _ = y.clone();
+ let _ = x == x;
+ Arc::from_thin(x.clone());
+ }
+ assert_eq!(canary.load(Acquire), 1);
+ }
+
+ #[test]
+ fn into_raw_and_from_raw() {
+ let mut canary = atomic::AtomicUsize::new(0);
+ let c = Canary(&mut canary as *mut atomic::AtomicUsize);
+ let v = vec![5, 6];
+ let header = HeaderWithLength::new(c, v.len());
+ {
+ type ThinArcCanary = ThinArc<Canary, u32>;
+ let x: ThinArcCanary = Arc::into_thin(Arc::from_header_and_iter(header, v.into_iter()));
+ let ptr = x.as_ptr();
+
+ assert_eq!(x.into_raw(), ptr);
+
+ let _x = unsafe { ThinArcCanary::from_raw(ptr) };
+ }
+ assert_eq!(canary.load(Acquire), 1);
+ }
+}
diff --git a/vendor/triomphe/src/unique_arc.rs b/vendor/triomphe/src/unique_arc.rs
new file mode 100644
index 000000000..79555fc27
--- /dev/null
+++ b/vendor/triomphe/src/unique_arc.rs
@@ -0,0 +1,257 @@
+use alloc::{alloc::Layout, boxed::Box};
+use core::convert::TryFrom;
+use core::marker::PhantomData;
+use core::mem::{ManuallyDrop, MaybeUninit};
+use core::ops::{Deref, DerefMut};
+use core::ptr::{self, NonNull};
+use core::sync::atomic::AtomicUsize;
+
+use crate::HeaderSlice;
+
+use super::{Arc, ArcInner};
+
+/// An `Arc` that is known to be uniquely owned
+///
+/// When `Arc`s are constructed, they are known to be
+/// uniquely owned. In such a case it is safe to mutate
+/// the contents of the `Arc`. Normally, one would just handle
+/// this by mutating the data on the stack before allocating the
+/// `Arc`, however it's possible the data is large or unsized
+/// and you need to heap-allocate it earlier in such a way
+/// that it can be freely converted into a regular `Arc` once you're
+/// done.
+///
+/// `UniqueArc` exists for this purpose, when constructed it performs
+/// the same allocations necessary for an `Arc`, however it allows mutable access.
+/// Once the mutation is finished, you can call `.shareable()` and get a regular `Arc`
+/// out of it.
+///
+/// ```rust
+/// # use triomphe::UniqueArc;
+/// let data = [1, 2, 3, 4, 5];
+/// let mut x = UniqueArc::new(data);
+/// x[4] = 7; // mutate!
+/// let y = x.shareable(); // y is an Arc<T>
+/// ```
+#[repr(transparent)]
+pub struct UniqueArc<T: ?Sized>(Arc<T>);
+
+impl<T> UniqueArc<T> {
+ #[inline]
+ /// Construct a new UniqueArc
+ pub fn new(data: T) -> Self {
+ UniqueArc(Arc::new(data))
+ }
+
+ /// Construct an uninitialized arc
+ #[inline]
+ pub fn new_uninit() -> UniqueArc<MaybeUninit<T>> {
+ unsafe {
+ let layout = Layout::new::<ArcInner<MaybeUninit<T>>>();
+ let ptr = alloc::alloc::alloc(layout);
+ let mut p = NonNull::new(ptr)
+ .unwrap_or_else(|| alloc::alloc::handle_alloc_error(layout))
+ .cast::<ArcInner<MaybeUninit<T>>>();
+ ptr::write(&mut p.as_mut().count, AtomicUsize::new(1));
+
+ UniqueArc(Arc {
+ p,
+ phantom: PhantomData,
+ })
+ }
+ }
+
+ /// Gets the inner value of the unique arc
+ pub fn into_inner(this: Self) -> T {
+ // Wrap the Arc in a `ManuallyDrop` so that its drop routine never runs
+ let this = ManuallyDrop::new(this.0);
+ debug_assert!(
+ this.is_unique(),
+ "attempted to call `.into_inner()` on a `UniqueArc` with a non-zero ref count",
+ );
+
+ // Safety: We have exclusive access to the inner data and the
+ // arc will not perform its drop routine since we've
+ // wrapped it in a `ManuallyDrop`
+ unsafe { Box::from_raw(this.ptr()).data }
+ }
+}
+
+impl<T: ?Sized> UniqueArc<T> {
+ /// Convert to a shareable Arc<T> once we're done mutating it
+ #[inline]
+ pub fn shareable(self) -> Arc<T> {
+ self.0
+ }
+
+ /// Creates a new [`UniqueArc`] from the given [`Arc`].
+ ///
+ /// An unchecked alternative to `Arc::try_unique()`
+ ///
+ /// # Safety
+ ///
+ /// The given `Arc` must have a reference count of exactly one
+ ///
+ pub(crate) unsafe fn from_arc(arc: Arc<T>) -> Self {
+ debug_assert_eq!(Arc::count(&arc), 1);
+ Self(arc)
+ }
+
+ /// Creates a new `&mut `[`UniqueArc`] from the given `&mut `[`Arc`].
+ ///
+ /// An unchecked alternative to `Arc::try_as_unique()`
+ ///
+ /// # Safety
+ ///
+ /// The given `Arc` must have a reference count of exactly one
+ pub(crate) unsafe fn from_arc_ref(arc: &mut Arc<T>) -> &mut Self {
+ debug_assert_eq!(Arc::count(&arc), 1);
+
+ // Safety: caller guarantees that `arc` is unique,
+ // `UniqueArc` is `repr(transparent)`
+ &mut *(arc as *mut Arc<T> as *mut UniqueArc<T>)
+ }
+}
+
+impl<T> UniqueArc<MaybeUninit<T>> {
+ /// Calls `MaybeUninit::write` on the contained value.
+ pub fn write(&mut self, val: T) -> &mut T {
+ unsafe {
+ // Casting *mut MaybeUninit<T> -> *mut T is always fine
+ let ptr = self.as_mut_ptr() as *mut T;
+
+ // Safety: We have exclusive access to the inner data
+ ptr.write(val);
+
+ // Safety: the pointer was just written to
+ &mut *ptr
+ }
+ }
+
+ /// Obtain a mutable pointer to the stored `MaybeUninit<T>`.
+ pub fn as_mut_ptr(&mut self) -> *mut MaybeUninit<T> {
+ unsafe { &mut (*self.0.ptr()).data }
+ }
+
+ /// Convert to an initialized Arc.
+ ///
+ /// # Safety
+ ///
+ /// This function is equivalent to `MaybeUninit::assume_init` and has the
+ /// same safety requirements. You are responsible for ensuring that the `T`
+ /// has actually been initialized before calling this method.
+ #[inline]
+ pub unsafe fn assume_init(this: Self) -> UniqueArc<T> {
+ UniqueArc(Arc {
+ p: ManuallyDrop::new(this).0.p.cast(),
+ phantom: PhantomData,
+ })
+ }
+}
+
+impl<T> UniqueArc<[MaybeUninit<T>]> {
+ /// Create an Arc contains an array `[MaybeUninit<T>]` of `len`.
+ pub fn new_uninit_slice(len: usize) -> Self {
+ let ptr: NonNull<ArcInner<HeaderSlice<(), [MaybeUninit<T>]>>> =
+ Arc::allocate_for_header_and_slice(len);
+
+ // Safety:
+ // - `ArcInner` is properly allocated and initialized.
+ // - `()` and `[MaybeUninit<T>]` do not require special initialization
+ // - The `Arc` is just created and so -- unique.
+ unsafe {
+ let arc: Arc<HeaderSlice<(), [MaybeUninit<T>]>> = Arc::from_raw_inner(ptr.as_ptr());
+ let arc: Arc<[MaybeUninit<T>]> = arc.into();
+ UniqueArc(arc)
+ }
+ }
+
+ /// # Safety
+ ///
+ /// Must initialize all fields before calling this function.
+ #[inline]
+ pub unsafe fn assume_init_slice(Self(this): Self) -> UniqueArc<[T]> {
+ UniqueArc(this.assume_init())
+ }
+}
+
+impl<T: ?Sized> TryFrom<Arc<T>> for UniqueArc<T> {
+ type Error = Arc<T>;
+
+ fn try_from(arc: Arc<T>) -> Result<Self, Self::Error> {
+ Arc::try_unique(arc)
+ }
+}
+
+impl<T: ?Sized> Deref for UniqueArc<T> {
+ type Target = T;
+
+ #[inline]
+ fn deref(&self) -> &T {
+ &*self.0
+ }
+}
+
+impl<T: ?Sized> DerefMut for UniqueArc<T> {
+ #[inline]
+ fn deref_mut(&mut self) -> &mut T {
+ // We know this to be uniquely owned
+ unsafe { &mut (*self.0.ptr()).data }
+ }
+}
+
+// Safety:
+// This leverages the correctness of Arc's CoerciblePtr impl. Additionally, we must ensure that
+// this can not be used to violate the safety invariants of UniqueArc, which require that we can not
+// duplicate the Arc, such that replace_ptr returns a valid instance. This holds since it consumes
+// a unique owner of the contained ArcInner.
+#[cfg(feature = "unsize")]
+unsafe impl<T, U: ?Sized> unsize::CoerciblePtr<U> for UniqueArc<T> {
+ type Pointee = T;
+ type Output = UniqueArc<U>;
+
+ fn as_sized_ptr(&mut self) -> *mut T {
+ // Dispatch to the contained field.
+ unsize::CoerciblePtr::<U>::as_sized_ptr(&mut self.0)
+ }
+
+ unsafe fn replace_ptr(self, new: *mut U) -> UniqueArc<U> {
+ // Dispatch to the contained field, work around conflict of destructuring and Drop.
+ let inner = ManuallyDrop::new(self);
+ UniqueArc(ptr::read(&inner.0).replace_ptr(new))
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use crate::{Arc, UniqueArc};
+ use core::{convert::TryFrom, mem::MaybeUninit};
+
+ #[test]
+ fn unique_into_inner() {
+ let unique = UniqueArc::new(10u64);
+ assert_eq!(UniqueArc::into_inner(unique), 10);
+ }
+
+ #[test]
+ fn try_from_arc() {
+ let x = Arc::new(10_000);
+ let y = x.clone();
+
+ assert!(UniqueArc::try_from(x).is_err());
+ assert_eq!(
+ UniqueArc::into_inner(UniqueArc::try_from(y).unwrap()),
+ 10_000,
+ );
+ }
+
+ #[test]
+ #[allow(deprecated)]
+ fn maybeuninit_smoke() {
+ let mut arc: UniqueArc<MaybeUninit<_>> = UniqueArc::new_uninit();
+ arc.write(999);
+
+ let arc = unsafe { UniqueArc::assume_init(arc) };
+ assert_eq!(*arc, 999);
+ }
+}