diff options
Diffstat (limited to 'vendor/triomphe')
-rw-r--r-- | vendor/triomphe/.cargo-checksum.json | 2 | ||||
-rw-r--r-- | vendor/triomphe/Cargo.toml | 2 | ||||
-rw-r--r-- | vendor/triomphe/LICENSE-APACHE | 4 | ||||
-rw-r--r-- | vendor/triomphe/README.md | 16 | ||||
-rw-r--r-- | vendor/triomphe/src/arc.rs | 166 | ||||
-rw-r--r-- | vendor/triomphe/src/arc_borrow.rs | 2 | ||||
-rw-r--r-- | vendor/triomphe/src/header.rs | 19 | ||||
-rw-r--r-- | vendor/triomphe/src/iterator_as_exact_size_iterator.rs | 47 | ||||
-rw-r--r-- | vendor/triomphe/src/lib.rs | 3 | ||||
-rw-r--r-- | vendor/triomphe/src/offset_arc.rs | 20 | ||||
-rw-r--r-- | vendor/triomphe/src/thin_arc.rs | 103 | ||||
-rw-r--r-- | vendor/triomphe/src/unique_arc.rs | 25 |
12 files changed, 375 insertions, 34 deletions
diff --git a/vendor/triomphe/.cargo-checksum.json b/vendor/triomphe/.cargo-checksum.json index 712085ef9..f296bf8d4 100644 --- a/vendor/triomphe/.cargo-checksum.json +++ b/vendor/triomphe/.cargo-checksum.json @@ -1 +1 @@ -{"files":{"Cargo.toml":"c202bbb3cde95a6a58173272683abee0935898336a6933ad70d7b672ecb983b5","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"15656cc11a8331f28c0986b8ab97220d3e76f98e60ed388b5ffad37dfac4710c","README.md":"b165c87151b13a2a0b639843da6f933249fe6814cd6698259172b6a27b3844a8","src/arc.rs":"dbbd13ac6d47ce2aa11a4b1af6bb858ce9bc077679d3709565d0e710a795d8b8","src/arc_borrow.rs":"df8b6ed97b41839b5be92bec8ff7b40085ae6d9d014f5571024e8bf2595a1170","src/arc_swap_support.rs":"54213ddea959fec13f1af4efb94271a9b85c531c06649899ddf8d6b939615d05","src/arc_union.rs":"b623adb5bf6f0911b5230658ad4e054d76569dc0e75fbccf41fa666112775766","src/header.rs":"051a5537d7f5aceafcb02397caf9f8a53e7e43741a41e934156f9c388bff2a7c","src/lib.rs":"657017eacbbcc9b8720ca9b278342bbfbdc6c6e25e1c6015e70fc87668cb8bb2","src/offset_arc.rs":"ebedbcc3dd46dfebcfb2976cc37252340548819b8c8c513aa4c4b2d4187c917c","src/thin_arc.rs":"dc9ad742fc4226c9f8f9b7e6ea8fb8a4aa7ea369d2723f6dcfa9bd46b4b34b0b","src/unique_arc.rs":"73308e959d0966cacd75aa2b2043a093fcb9d1b494aae9014d48c64fd1f81c8b"},"package":"f1ee9bd9239c339d714d657fac840c6d2a4f9c45f4f9ec7b0975113458be78db"}
\ No newline at end of file +{"files":{"Cargo.toml":"7c92b709076ccf08efbafadc9f2ce7c5acef36de19de097bf924b8ac732475df","LICENSE-APACHE":"a6b7260485462860838b76ad1846d21f1605e6d239141c3aa949a601d992676c","LICENSE-MIT":"15656cc11a8331f28c0986b8ab97220d3e76f98e60ed388b5ffad37dfac4710c","README.md":"31f2d36aba753938cca35a0770f87b14215d6b367a1cd173f183d49242890258","src/arc.rs":"f257f159041c7e7b1bdde3942eea81e364a3a7fb4a1a38f771e8df79a266788d","src/arc_borrow.rs":"a560a837d4976a5e388d2ee49b751a0077607e29fb063fffa7cccbe8fa28d1d6","src/arc_swap_support.rs":"54213ddea959fec13f1af4efb94271a9b85c531c06649899ddf8d6b939615d05","src/arc_union.rs":"b623adb5bf6f0911b5230658ad4e054d76569dc0e75fbccf41fa666112775766","src/header.rs":"0a7ead5c03d3ebf3fe20fde20eced35489f3d8c53c179a53169e68c0090b6bdb","src/iterator_as_exact_size_iterator.rs":"cae199c33fb172eb63455c0290a70ac72a7a9f6d26cf30d691560470b08524a4","src/lib.rs":"66c33fbe65220852ca04347b36da3040411d39a03ca5bec6001f5358cf56af72","src/offset_arc.rs":"86ea6733383d9024bae4e8383ed7c2dcf835615b3bd17f763148e08000cd93ef","src/thin_arc.rs":"bd9887b6810321e42e4f9cda6bf7c88131ca44e46e62f988b9f611c9aa5a772a","src/unique_arc.rs":"0731c864a37f89f4708fda530572ba6f563da56ad1b70a2f54dc5baa434e1a0d"},"package":"d0c5a71827ac326072b6405552093e2ad2accd25a32fd78d4edc82d98c7f2409"}
\ No newline at end of file diff --git a/vendor/triomphe/Cargo.toml b/vendor/triomphe/Cargo.toml index 5cb758241..73a44bbf2 100644 --- a/vendor/triomphe/Cargo.toml +++ b/vendor/triomphe/Cargo.toml @@ -11,7 +11,7 @@ [package] name = "triomphe" -version = "0.1.8" +version = "0.1.10" authors = ["The Servo Project Developers"] description = "A fork of std::sync::Arc with some extra functionality and without weak references (originally servo_arc)" readme = "README.md" diff --git a/vendor/triomphe/LICENSE-APACHE b/vendor/triomphe/LICENSE-APACHE index 16fe87b06..c28ea2ac4 100644 --- a/vendor/triomphe/LICENSE-APACHE +++ b/vendor/triomphe/LICENSE-APACHE @@ -180,7 +180,7 @@ APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate + the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier @@ -192,7 +192,7 @@ Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/vendor/triomphe/README.md b/vendor/triomphe/README.md index c7615a843..59f1637b2 100644 --- a/vendor/triomphe/README.md +++ b/vendor/triomphe/README.md @@ -2,12 +2,12 @@ Fork of Arc. This has the following advantages over std::sync::Arc: - * `triomphe::Arc` doesn't support weak references: we save space by excluding the weak reference count, and we don't do extra read-modify-update operations to handle the possibility of weak references. - * `triomphe::UniqueArc` allows one to construct a temporarily-mutable `Arc` which can be converted to a regular `triomphe::Arc` later - * `triomphe::OffsetArc` can be used transparently from C++ code and is compatible with (and can be converted to/from) `triomphe::Arc` - * `triomphe::ArcBorrow` is functionally similar to `&triomphe::Arc<T>`, however in memory it's simply `&T`. This makes it more flexible for FFI; the source of the borrow need not be an `Arc` pinned on the stack (and can instead be a pointer from C++, or an `OffsetArc`). Additionally, this helps avoid pointer-chasing. - * `triomphe::Arc` has can be constructed for dynamically-sized types via `from_header_and_iter` - * `triomphe::ThinArc` provides thin-pointer `Arc`s to dynamically sized types - * `triomphe::ArcUnion` is union of two `triomphe:Arc`s which fits inside one word of memory +* `triomphe::Arc` doesn't support weak references: we save space by excluding the weak reference count, and we don't do extra read-modify-update operations to handle the possibility of weak references. +* `triomphe::UniqueArc` allows one to construct a temporarily-mutable `Arc` which can be converted to a regular `triomphe::Arc` later +* `triomphe::OffsetArc` can be used transparently from C++ code and is compatible with (and can be converted to/from) `triomphe::Arc` +* `triomphe::ArcBorrow` is functionally similar to `&triomphe::Arc<T>`, however in memory it's simply `&T`. This makes it more flexible for FFI; the source of the borrow need not be an `Arc` pinned on the stack (and can instead be a pointer from C++, or an `OffsetArc`). Additionally, this helps avoid pointer-chasing. +* `triomphe::Arc` has can be constructed for dynamically-sized types via `from_header_and_iter` +* `triomphe::ThinArc` provides thin-pointer `Arc`s to dynamically sized types +* `triomphe::ArcUnion` is union of two `triomphe:Arc`s which fits inside one word of memory -This crate is a version of `servo_arc` meant for general community use.
\ No newline at end of file +This crate is a version of `servo_arc` meant for general community use. diff --git a/vendor/triomphe/src/arc.rs b/vendor/triomphe/src/arc.rs index 6fe022c46..97cca6338 100644 --- a/vendor/triomphe/src/arc.rs +++ b/vendor/triomphe/src/arc.rs @@ -7,6 +7,7 @@ use core::convert::From; use core::ffi::c_void; use core::fmt; use core::hash::{Hash, Hasher}; +use core::iter::FromIterator; use core::marker::PhantomData; use core::mem::{ManuallyDrop, MaybeUninit}; use core::ops::Deref; @@ -28,7 +29,7 @@ use crate::{abort, ArcBorrow, HeaderSlice, OffsetArc, UniqueArc}; /// necessarily) at _exactly_ `MAX_REFCOUNT + 1` references. const MAX_REFCOUNT: usize = (isize::MAX) as usize; -/// The object allocated by an Arc<T> +/// The object allocated by an `Arc<T>` #[repr(C)] pub(crate) struct ArcInner<T: ?Sized> { pub(crate) count: atomic::AtomicUsize, @@ -70,12 +71,16 @@ impl<T> Arc<T> { } } - /// Reconstruct the Arc<T> from a raw pointer obtained from into_raw() + /// Reconstruct the `Arc<T>` from a raw pointer obtained from into_raw() /// /// Note: This raw pointer will be offset in the allocation and must be preceded /// by the atomic count. /// /// It is recommended to use OffsetArc for this + /// + /// # Safety + /// - The given pointer must be a valid pointer to `T` that came from [`Arc::into_raw`]. + /// - After `from_raw`, the pointer must not be accessed. #[inline] pub unsafe fn from_raw(ptr: *const T) -> Self { // FIXME: when `byte_sub` is stabilized, this can accept T: ?Sized. @@ -144,8 +149,31 @@ impl<T> Arc<T> { } } +impl<T> Arc<[T]> { + /// Reconstruct the `Arc<[T]>` from a raw pointer obtained from `into_raw()`. + /// + /// [`Arc::from_raw`] should accept unsized types, but this is not trivial to do correctly + /// until the feature [`pointer_bytes_offsets`](https://github.com/rust-lang/rust/issues/96283) + /// is stabilized. This is stopgap solution for slices. + /// + /// # Safety + /// - The given pointer must be a valid pointer to `[T]` that came from [`Arc::into_raw`]. + /// - After `from_raw_slice`, the pointer must not be accessed. + pub unsafe fn from_raw_slice(ptr: *const [T]) -> Self { + let len = (*ptr).len(); + // Assuming the offset of `T` in `ArcInner<T>` is the same + // as as offset of `[T]` in `ArcInner<[T]>`. + // (`offset_of!` macro requires `Sized`.) + let arc_inner_ptr = (ptr as *const u8).sub(offset_of!(ArcInner<T>, data)); + // Synthesize the fat pointer: the pointer metadata for `Arc<[T]>` + // is the same as the pointer metadata for `[T]`: the length. + let fake_slice = ptr::slice_from_raw_parts_mut(arc_inner_ptr as *mut T, len); + Arc::from_raw_inner(fake_slice as *mut ArcInner<[T]>) + } +} + impl<T: ?Sized> Arc<T> { - /// Convert the Arc<T> to a raw pointer, suitable for use across FFI + /// Convert the `Arc<T>` to a raw pointer, suitable for use across FFI /// /// Note: This returns a pointer to the data T, which is offset in the allocation. /// @@ -451,7 +479,7 @@ impl<T: Clone> Arc<T> { pub fn make_mut(this: &mut Self) -> &mut T { if !this.is_unique() { // Another pointer exists; clone - *this = Arc::new(T::clone(&this)); + *this = Arc::new(T::clone(this)); } unsafe { @@ -477,7 +505,7 @@ impl<T: Clone> Arc<T> { pub fn make_unique(this: &mut Self) -> &mut UniqueArc<T> { if !this.is_unique() { // Another pointer exists; clone - *this = Arc::new(T::clone(&this)); + *this = Arc::new(T::clone(this)); } unsafe { @@ -683,17 +711,23 @@ impl<T> From<T> for Arc<T> { } } +impl<A> FromIterator<A> for Arc<[A]> { + fn from_iter<T: IntoIterator<Item = A>>(iter: T) -> Self { + UniqueArc::from_iter(iter).shareable() + } +} + impl<T: ?Sized> borrow::Borrow<T> for Arc<T> { #[inline] fn borrow(&self) -> &T { - &**self + self } } impl<T: ?Sized> AsRef<T> for Arc<T> { #[inline] fn as_ref(&self) -> &T { - &**self + self } } @@ -766,7 +800,10 @@ fn must_be_unique<T: ?Sized>(arc: &mut Arc<T>) -> &mut UniqueArc<T> { #[cfg(test)] mod tests { use crate::arc::Arc; + use alloc::borrow::ToOwned; use alloc::string::String; + use alloc::vec::Vec; + use core::iter::FromIterator; use core::mem::MaybeUninit; #[cfg(feature = "unsize")] use unsize::{CoerceUnsize, Coercion}; @@ -884,4 +921,119 @@ mod tests { let _arc = Arc::from_raw(ptr); } } + + #[test] + fn from_iterator_exact_size() { + let arc = Arc::from_iter(Vec::from_iter(["ololo".to_owned(), "trololo".to_owned()])); + assert_eq!(1, Arc::count(&arc)); + assert_eq!(["ololo".to_owned(), "trololo".to_owned()], *arc); + } + + #[test] + fn from_iterator_unknown_size() { + let arc = Arc::from_iter( + Vec::from_iter(["ololo".to_owned(), "trololo".to_owned()]) + .into_iter() + // Filter is opaque to iterators, so the resulting iterator + // will report lower bound of 0. + .filter(|_| true), + ); + assert_eq!(1, Arc::count(&arc)); + assert_eq!(["ololo".to_owned(), "trololo".to_owned()], *arc); + } + + #[test] + fn roundtrip_slice() { + let arc = Arc::from(Vec::from_iter([17, 19])); + let ptr = Arc::into_raw(arc); + let arc = unsafe { Arc::from_raw_slice(ptr) }; + assert_eq!([17, 19], *arc); + assert_eq!(1, Arc::count(&arc)); + } + + #[test] + fn arc_eq_and_cmp() { + [ + [("*", &b"AB"[..]), ("*", &b"ab"[..])], + [("*", &b"AB"[..]), ("*", &b"a"[..])], + [("*", &b"A"[..]), ("*", &b"ab"[..])], + [("A", &b"*"[..]), ("a", &b"*"[..])], + [("a", &b"*"[..]), ("A", &b"*"[..])], + [("AB", &b"*"[..]), ("a", &b"*"[..])], + [("A", &b"*"[..]), ("ab", &b"*"[..])], + ] + .iter() + .for_each(|[lt @ (lh, ls), rt @ (rh, rs)]| { + let l = Arc::from_header_and_slice(lh, ls); + let r = Arc::from_header_and_slice(rh, rs); + + assert_eq!(l, l); + assert_eq!(r, r); + + assert_ne!(l, r); + assert_ne!(r, l); + + assert_eq!(l <= l, lt <= lt, "{lt:?} <= {lt:?}"); + assert_eq!(l >= l, lt >= lt, "{lt:?} >= {lt:?}"); + + assert_eq!(l < l, lt < lt, "{lt:?} < {lt:?}"); + assert_eq!(l > l, lt > lt, "{lt:?} > {lt:?}"); + + assert_eq!(r <= r, rt <= rt, "{rt:?} <= {rt:?}"); + assert_eq!(r >= r, rt >= rt, "{rt:?} >= {rt:?}"); + + assert_eq!(r < r, rt < rt, "{rt:?} < {rt:?}"); + assert_eq!(r > r, rt > rt, "{rt:?} > {rt:?}"); + + assert_eq!(l < r, lt < rt, "{lt:?} < {rt:?}"); + assert_eq!(r > l, rt > lt, "{rt:?} > {lt:?}"); + }) + } + + #[test] + fn arc_eq_and_partial_cmp() { + [ + [(0.0, &[0.0, 0.0][..]), (1.0, &[0.0, 0.0][..])], + [(1.0, &[0.0, 0.0][..]), (0.0, &[0.0, 0.0][..])], + [(0.0, &[0.0][..]), (0.0, &[0.0, 0.0][..])], + [(0.0, &[0.0, 0.0][..]), (0.0, &[0.0][..])], + [(0.0, &[1.0, 2.0][..]), (0.0, &[10.0, 20.0][..])], + ] + .iter() + .for_each(|[lt @ (lh, ls), rt @ (rh, rs)]| { + let l = Arc::from_header_and_slice(lh, ls); + let r = Arc::from_header_and_slice(rh, rs); + + assert_eq!(l, l); + assert_eq!(r, r); + + assert_ne!(l, r); + assert_ne!(r, l); + + assert_eq!(l <= l, lt <= lt, "{lt:?} <= {lt:?}"); + assert_eq!(l >= l, lt >= lt, "{lt:?} >= {lt:?}"); + + assert_eq!(l < l, lt < lt, "{lt:?} < {lt:?}"); + assert_eq!(l > l, lt > lt, "{lt:?} > {lt:?}"); + + assert_eq!(r <= r, rt <= rt, "{rt:?} <= {rt:?}"); + assert_eq!(r >= r, rt >= rt, "{rt:?} >= {rt:?}"); + + assert_eq!(r < r, rt < rt, "{rt:?} < {rt:?}"); + assert_eq!(r > r, rt > rt, "{rt:?} > {rt:?}"); + + assert_eq!(l < r, lt < rt, "{lt:?} < {rt:?}"); + assert_eq!(r > l, rt > lt, "{rt:?} > {lt:?}"); + }) + } + + #[allow(dead_code)] + const fn is_partial_ord<T: ?Sized + PartialOrd>() {} + + #[allow(dead_code)] + const fn is_ord<T: ?Sized + Ord>() {} + + // compile-time check that PartialOrd/Ord is correctly derived + const _: () = is_partial_ord::<Arc<f64>>(); + const _: () = is_ord::<Arc<u64>>(); } diff --git a/vendor/triomphe/src/arc_borrow.rs b/vendor/triomphe/src/arc_borrow.rs index d53e1a5ea..4e0e77feb 100644 --- a/vendor/triomphe/src/arc_borrow.rs +++ b/vendor/triomphe/src/arc_borrow.rs @@ -46,6 +46,8 @@ impl<'a, T> ArcBorrow<'a, T> { /// e.g. if we obtain such a reference over FFI /// TODO: should from_ref be relaxed to unsized types? It can't be /// converted back to an Arc right now for unsized types. + /// # Safety + /// - The reference to `T` must have come from a Triomphe Arc, UniqueArc, or ArcBorrow. #[inline] pub unsafe fn from_ref(r: &'a T) -> Self { ArcBorrow(r) diff --git a/vendor/triomphe/src/header.rs b/vendor/triomphe/src/header.rs index e35ec48b0..403f0ba83 100644 --- a/vendor/triomphe/src/header.rs +++ b/vendor/triomphe/src/header.rs @@ -2,6 +2,7 @@ use alloc::alloc::Layout; use alloc::boxed::Box; use alloc::string::String; use alloc::vec::Vec; +use core::cmp::Ordering; use core::iter::{ExactSizeIterator, Iterator}; use core::marker::PhantomData; use core::mem::{self, ManuallyDrop}; @@ -12,7 +13,7 @@ use super::{Arc, ArcInner}; /// Structure to allow Arc-managing some fixed-sized data and a variably-sized /// slice in a single allocation. -#[derive(Debug, Eq, PartialEq, Hash, PartialOrd)] +#[derive(Debug, Eq, PartialEq, Hash, PartialOrd, Ord)] #[repr(C)] pub struct HeaderSlice<H, T: ?Sized> { /// The fixed-sized data. @@ -153,7 +154,7 @@ impl<H> Arc<HeaderSlice<H, str>> { /// Header data with an inline length. Consumers that use HeaderWithLength as the /// Header type in HeaderSlice can take advantage of ThinArc. -#[derive(Debug, Eq, PartialEq, Hash, PartialOrd)] +#[derive(Debug, Eq, PartialEq, Hash)] #[repr(C)] pub struct HeaderWithLength<H> { /// The fixed-sized data. @@ -235,7 +236,7 @@ impl<T> From<Box<T>> for Arc<T> { // Safety: // - `src` has been got from `Box::into_raw` // - `ManuallyDrop<T>` is guaranteed to have the same layout as `T` - Box::<ManuallyDrop<T>>::from_raw(src as _); + drop(Box::<ManuallyDrop<T>>::from_raw(src as _)); } Arc { @@ -253,6 +254,18 @@ impl<T> From<Vec<T>> for Arc<[T]> { pub(crate) type HeaderSliceWithLength<H, T> = HeaderSlice<HeaderWithLength<H>, T>; +impl<H: PartialOrd, T: ?Sized + PartialOrd> PartialOrd for HeaderSliceWithLength<H, T> { + fn partial_cmp(&self, other: &Self) -> Option<Ordering> { + (&self.header.header, &self.slice).partial_cmp(&(&other.header.header, &other.slice)) + } +} + +impl<H: Ord, T: ?Sized + Ord> Ord for HeaderSliceWithLength<H, T> { + fn cmp(&self, other: &Self) -> Ordering { + (&self.header.header, &self.slice).cmp(&(&other.header.header, &other.slice)) + } +} + #[cfg(test)] mod tests { use alloc::boxed::Box; diff --git a/vendor/triomphe/src/iterator_as_exact_size_iterator.rs b/vendor/triomphe/src/iterator_as_exact_size_iterator.rs new file mode 100644 index 000000000..12936f7c9 --- /dev/null +++ b/vendor/triomphe/src/iterator_as_exact_size_iterator.rs @@ -0,0 +1,47 @@ +/// Wrap an iterator and implement `ExactSizeIterator` +/// assuming the underlying iterator reports lower bound equal to upper bound. +/// +/// It does not check the size is reported correctly (except in debug mode). +pub(crate) struct IteratorAsExactSizeIterator<I> { + iter: I, +} + +impl<I: Iterator> IteratorAsExactSizeIterator<I> { + #[inline] + pub(crate) fn new(iter: I) -> Self { + let (lower, upper) = iter.size_hint(); + debug_assert_eq!( + Some(lower), + upper, + "IteratorAsExactSizeIterator requires size hint lower == upper" + ); + IteratorAsExactSizeIterator { iter } + } +} + +impl<I: Iterator> Iterator for IteratorAsExactSizeIterator<I> { + type Item = I::Item; + + #[inline] + fn next(&mut self) -> Option<Self::Item> { + self.iter.next() + } + + #[inline] + fn size_hint(&self) -> (usize, Option<usize>) { + self.iter.size_hint() + } +} + +impl<I: Iterator> ExactSizeIterator for IteratorAsExactSizeIterator<I> { + #[inline] + fn len(&self) -> usize { + let (lower, upper) = self.iter.size_hint(); + debug_assert_eq!( + Some(lower), + upper, + "IteratorAsExactSizeIterator requires size hint lower == upper" + ); + lower + } +} diff --git a/vendor/triomphe/src/lib.rs b/vendor/triomphe/src/lib.rs index 13d568bda..7b3886cd8 100644 --- a/vendor/triomphe/src/lib.rs +++ b/vendor/triomphe/src/lib.rs @@ -64,6 +64,7 @@ mod arc_borrow; mod arc_swap_support; mod arc_union; mod header; +mod iterator_as_exact_size_iterator; mod offset_arc; mod thin_arc; mod unique_arc; @@ -79,7 +80,7 @@ pub use unique_arc::*; #[cfg(feature = "std")] use std::process::abort; -// `no_std`-compatible abort by forcing a panic while already panicing. +// `no_std`-compatible abort by forcing a panic while already panicking. #[cfg(not(feature = "std"))] #[cold] fn abort() -> ! { diff --git a/vendor/triomphe/src/offset_arc.rs b/vendor/triomphe/src/offset_arc.rs index 9345416d4..4d7707e77 100644 --- a/vendor/triomphe/src/offset_arc.rs +++ b/vendor/triomphe/src/offset_arc.rs @@ -105,16 +105,20 @@ impl<T> OffsetArc<T> { T: Clone, { unsafe { - // extract the OffsetArc as an owned variable + // extract the OffsetArc as an owned variable. This does not modify + // the refcount and we should be careful to not drop `this` let this = ptr::read(self); - // treat it as a real Arc - let mut arc = Arc::from_raw_offset(this); - // obtain the mutable reference. Cast away the lifetime - // This may mutate `arc` - let ret = Arc::make_mut(&mut arc) as *mut _; + // treat it as a real Arc, but wrapped in a ManuallyDrop + // in case `Arc::make_mut()` panics in the clone impl + let mut arc = ManuallyDrop::new(Arc::from_raw_offset(this)); + // obtain the mutable reference. Cast away the lifetime since + // we have the right lifetime bounds in the parameters. + // This may mutate `arc`. + let ret = Arc::make_mut(&mut *arc) as *mut _; // Store the possibly-mutated arc back inside, after converting - // it to a OffsetArc again - ptr::write(self, Arc::into_raw_offset(arc)); + // it to a OffsetArc again. Release the ManuallyDrop. + // This also does not modify the refcount or call drop on self + ptr::write(self, Arc::into_raw_offset(ManuallyDrop::into_inner(arc))); &mut *ret } } diff --git a/vendor/triomphe/src/thin_arc.rs b/vendor/triomphe/src/thin_arc.rs index e048468ad..19bb39924 100644 --- a/vendor/triomphe/src/thin_arc.rs +++ b/vendor/triomphe/src/thin_arc.rs @@ -1,3 +1,4 @@ +use core::cmp::Ordering; use core::ffi::c_void; use core::fmt; use core::hash::{Hash, Hasher}; @@ -209,6 +210,20 @@ impl<H: PartialEq, T: PartialEq> PartialEq for ThinArc<H, T> { impl<H: Eq, T: Eq> Eq for ThinArc<H, T> {} +impl<H: PartialOrd, T: PartialOrd> PartialOrd for ThinArc<H, T> { + #[inline] + fn partial_cmp(&self, other: &ThinArc<H, T>) -> Option<Ordering> { + ThinArc::with_arc(self, |a| ThinArc::with_arc(other, |b| a.partial_cmp(b))) + } +} + +impl<H: Ord, T: Ord> Ord for ThinArc<H, T> { + #[inline] + fn cmp(&self, other: &ThinArc<H, T>) -> Ordering { + ThinArc::with_arc(self, |a| ThinArc::with_arc(other, |b| a.cmp(b))) + } +} + impl<H: Hash, T: Hash> Hash for ThinArc<H, T> { fn hash<HSR: Hasher>(&self, state: &mut HSR) { ThinArc::with_arc(self, |a| a.hash(state)) @@ -326,4 +341,92 @@ mod tests { } assert_eq!(canary.load(Acquire), 1); } + + #[test] + fn thin_eq_and_cmp() { + [ + [("*", &b"AB"[..]), ("*", &b"ab"[..])], + [("*", &b"AB"[..]), ("*", &b"a"[..])], + [("*", &b"A"[..]), ("*", &b"ab"[..])], + [("A", &b"*"[..]), ("a", &b"*"[..])], + [("a", &b"*"[..]), ("A", &b"*"[..])], + [("AB", &b"*"[..]), ("a", &b"*"[..])], + [("A", &b"*"[..]), ("ab", &b"*"[..])], + ] + .iter() + .for_each(|[lt @ (lh, ls), rt @ (rh, rs)]| { + let l = ThinArc::from_header_and_slice(lh, ls); + let r = ThinArc::from_header_and_slice(rh, rs); + + assert_eq!(l, l); + assert_eq!(r, r); + + assert_ne!(l, r); + assert_ne!(r, l); + + assert_eq!(l <= l, lt <= lt, "{lt:?} <= {lt:?}"); + assert_eq!(l >= l, lt >= lt, "{lt:?} >= {lt:?}"); + + assert_eq!(l < l, lt < lt, "{lt:?} < {lt:?}"); + assert_eq!(l > l, lt > lt, "{lt:?} > {lt:?}"); + + assert_eq!(r <= r, rt <= rt, "{rt:?} <= {rt:?}"); + assert_eq!(r >= r, rt >= rt, "{rt:?} >= {rt:?}"); + + assert_eq!(r < r, rt < rt, "{rt:?} < {rt:?}"); + assert_eq!(r > r, rt > rt, "{rt:?} > {rt:?}"); + + assert_eq!(l < r, lt < rt, "{lt:?} < {rt:?}"); + assert_eq!(r > l, rt > lt, "{rt:?} > {lt:?}"); + }) + } + + #[test] + fn thin_eq_and_partial_cmp() { + [ + [(0.0, &[0.0, 0.0][..]), (1.0, &[0.0, 0.0][..])], + [(1.0, &[0.0, 0.0][..]), (0.0, &[0.0, 0.0][..])], + [(0.0, &[0.0][..]), (0.0, &[0.0, 0.0][..])], + [(0.0, &[0.0, 0.0][..]), (0.0, &[0.0][..])], + [(0.0, &[1.0, 2.0][..]), (0.0, &[10.0, 20.0][..])], + ] + .iter() + .for_each(|[lt @ (lh, ls), rt @ (rh, rs)]| { + let l = ThinArc::from_header_and_slice(lh, ls); + let r = ThinArc::from_header_and_slice(rh, rs); + + assert_eq!(l, l); + assert_eq!(r, r); + + assert_ne!(l, r); + assert_ne!(r, l); + + assert_eq!(l <= l, lt <= lt, "{lt:?} <= {lt:?}"); + assert_eq!(l >= l, lt >= lt, "{lt:?} >= {lt:?}"); + + assert_eq!(l < l, lt < lt, "{lt:?} < {lt:?}"); + assert_eq!(l > l, lt > lt, "{lt:?} > {lt:?}"); + + assert_eq!(r <= r, rt <= rt, "{rt:?} <= {rt:?}"); + assert_eq!(r >= r, rt >= rt, "{rt:?} >= {rt:?}"); + + assert_eq!(r < r, rt < rt, "{rt:?} < {rt:?}"); + assert_eq!(r > r, rt > rt, "{rt:?} > {rt:?}"); + + assert_eq!(l < r, lt < rt, "{lt:?} < {rt:?}"); + assert_eq!(r > l, rt > lt, "{rt:?} > {lt:?}"); + }) + } + + #[allow(dead_code)] + const fn is_partial_ord<T: ?Sized + PartialOrd>() {} + + #[allow(dead_code)] + const fn is_ord<T: ?Sized + Ord>() {} + + // compile-time check that PartialOrd/Ord is correctly derived + const _: () = is_partial_ord::<ThinArc<f64, f64>>(); + const _: () = is_partial_ord::<ThinArc<f64, u64>>(); + const _: () = is_partial_ord::<ThinArc<u64, f64>>(); + const _: () = is_ord::<ThinArc<u64, u64>>(); } diff --git a/vendor/triomphe/src/unique_arc.rs b/vendor/triomphe/src/unique_arc.rs index 79555fc27..8ace71308 100644 --- a/vendor/triomphe/src/unique_arc.rs +++ b/vendor/triomphe/src/unique_arc.rs @@ -1,11 +1,14 @@ +use alloc::vec::Vec; use alloc::{alloc::Layout, boxed::Box}; use core::convert::TryFrom; +use core::iter::FromIterator; use core::marker::PhantomData; use core::mem::{ManuallyDrop, MaybeUninit}; use core::ops::{Deref, DerefMut}; use core::ptr::{self, NonNull}; use core::sync::atomic::AtomicUsize; +use crate::iterator_as_exact_size_iterator::IteratorAsExactSizeIterator; use crate::HeaderSlice; use super::{Arc, ArcInner}; @@ -78,7 +81,7 @@ impl<T> UniqueArc<T> { } impl<T: ?Sized> UniqueArc<T> { - /// Convert to a shareable Arc<T> once we're done mutating it + /// Convert to a shareable `Arc<T>` once we're done mutating it #[inline] pub fn shareable(self) -> Arc<T> { self.0 @@ -105,7 +108,7 @@ impl<T: ?Sized> UniqueArc<T> { /// /// The given `Arc` must have a reference count of exactly one pub(crate) unsafe fn from_arc_ref(arc: &mut Arc<T>) -> &mut Self { - debug_assert_eq!(Arc::count(&arc), 1); + debug_assert_eq!(Arc::count(arc), 1); // Safety: caller guarantees that `arc` is unique, // `UniqueArc` is `repr(transparent)` @@ -188,7 +191,7 @@ impl<T: ?Sized> Deref for UniqueArc<T> { #[inline] fn deref(&self) -> &T { - &*self.0 + &self.0 } } @@ -200,6 +203,22 @@ impl<T: ?Sized> DerefMut for UniqueArc<T> { } } +impl<A> FromIterator<A> for UniqueArc<[A]> { + fn from_iter<T: IntoIterator<Item = A>>(iter: T) -> Self { + let iter = iter.into_iter(); + let (lower, upper) = iter.size_hint(); + let arc: Arc<[A]> = if Some(lower) == upper { + let iter = IteratorAsExactSizeIterator::new(iter); + Arc::from_header_and_iter((), iter).into() + } else { + let vec = iter.collect::<Vec<_>>(); + Arc::from(vec) + }; + // Safety: We just created an `Arc`, so it's unique. + unsafe { UniqueArc::from_arc(arc) } + } +} + // Safety: // This leverages the correctness of Arc's CoerciblePtr impl. Additionally, we must ensure that // this can not be used to violate the safety invariants of UniqueArc, which require that we can not |