summaryrefslogtreecommitdiffstats
path: root/vendor/thread_local/src
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-17 12:02:58 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-17 12:02:58 +0000
commit698f8c2f01ea549d77d7dc3338a12e04c11057b9 (patch)
tree173a775858bd501c378080a10dca74132f05bc50 /vendor/thread_local/src
parentInitial commit. (diff)
downloadrustc-698f8c2f01ea549d77d7dc3338a12e04c11057b9.tar.xz
rustc-698f8c2f01ea549d77d7dc3338a12e04c11057b9.zip
Adding upstream version 1.64.0+dfsg1.upstream/1.64.0+dfsg1
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'vendor/thread_local/src')
-rw-r--r--vendor/thread_local/src/cached.rs161
-rw-r--r--vendor/thread_local/src/lib.rs638
-rw-r--r--vendor/thread_local/src/thread_id.rs127
-rw-r--r--vendor/thread_local/src/unreachable.rs57
4 files changed, 983 insertions, 0 deletions
diff --git a/vendor/thread_local/src/cached.rs b/vendor/thread_local/src/cached.rs
new file mode 100644
index 000000000..16f6516de
--- /dev/null
+++ b/vendor/thread_local/src/cached.rs
@@ -0,0 +1,161 @@
+#![allow(deprecated)]
+
+use super::{IntoIter, IterMut, ThreadLocal};
+use std::fmt;
+use std::panic::UnwindSafe;
+use std::usize;
+
+/// Wrapper around [`ThreadLocal`].
+///
+/// This used to add a fast path for a single thread, however that has been
+/// obsoleted by performance improvements to [`ThreadLocal`] itself.
+#[deprecated(since = "1.1.0", note = "Use `ThreadLocal` instead")]
+pub struct CachedThreadLocal<T: Send> {
+ inner: ThreadLocal<T>,
+}
+
+impl<T: Send> Default for CachedThreadLocal<T> {
+ fn default() -> CachedThreadLocal<T> {
+ CachedThreadLocal::new()
+ }
+}
+
+impl<T: Send> CachedThreadLocal<T> {
+ /// Creates a new empty `CachedThreadLocal`.
+ #[inline]
+ pub fn new() -> CachedThreadLocal<T> {
+ CachedThreadLocal {
+ inner: ThreadLocal::new(),
+ }
+ }
+
+ /// Returns the element for the current thread, if it exists.
+ #[inline]
+ pub fn get(&self) -> Option<&T> {
+ self.inner.get()
+ }
+
+ /// Returns the element for the current thread, or creates it if it doesn't
+ /// exist.
+ #[inline]
+ pub fn get_or<F>(&self, create: F) -> &T
+ where
+ F: FnOnce() -> T,
+ {
+ self.inner.get_or(create)
+ }
+
+ /// Returns the element for the current thread, or creates it if it doesn't
+ /// exist. If `create` fails, that error is returned and no element is
+ /// added.
+ #[inline]
+ pub fn get_or_try<F, E>(&self, create: F) -> Result<&T, E>
+ where
+ F: FnOnce() -> Result<T, E>,
+ {
+ self.inner.get_or_try(create)
+ }
+
+ /// Returns a mutable iterator over the local values of all threads.
+ ///
+ /// Since this call borrows the `ThreadLocal` mutably, this operation can
+ /// be done safely---the mutable borrow statically guarantees no other
+ /// threads are currently accessing their associated values.
+ #[inline]
+ pub fn iter_mut(&mut self) -> CachedIterMut<T> {
+ CachedIterMut {
+ inner: self.inner.iter_mut(),
+ }
+ }
+
+ /// Removes all thread-specific values from the `ThreadLocal`, effectively
+ /// reseting it to its original state.
+ ///
+ /// Since this call borrows the `ThreadLocal` mutably, this operation can
+ /// be done safely---the mutable borrow statically guarantees no other
+ /// threads are currently accessing their associated values.
+ #[inline]
+ pub fn clear(&mut self) {
+ self.inner.clear();
+ }
+}
+
+impl<T: Send> IntoIterator for CachedThreadLocal<T> {
+ type Item = T;
+ type IntoIter = CachedIntoIter<T>;
+
+ fn into_iter(self) -> CachedIntoIter<T> {
+ CachedIntoIter {
+ inner: self.inner.into_iter(),
+ }
+ }
+}
+
+impl<'a, T: Send + 'a> IntoIterator for &'a mut CachedThreadLocal<T> {
+ type Item = &'a mut T;
+ type IntoIter = CachedIterMut<'a, T>;
+
+ fn into_iter(self) -> CachedIterMut<'a, T> {
+ self.iter_mut()
+ }
+}
+
+impl<T: Send + Default> CachedThreadLocal<T> {
+ /// Returns the element for the current thread, or creates a default one if
+ /// it doesn't exist.
+ pub fn get_or_default(&self) -> &T {
+ self.get_or(T::default)
+ }
+}
+
+impl<T: Send + fmt::Debug> fmt::Debug for CachedThreadLocal<T> {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ write!(f, "ThreadLocal {{ local_data: {:?} }}", self.get())
+ }
+}
+
+impl<T: Send + UnwindSafe> UnwindSafe for CachedThreadLocal<T> {}
+
+/// Mutable iterator over the contents of a `CachedThreadLocal`.
+#[deprecated(since = "1.1.0", note = "Use `IterMut` instead")]
+pub struct CachedIterMut<'a, T: Send + 'a> {
+ inner: IterMut<'a, T>,
+}
+
+impl<'a, T: Send + 'a> Iterator for CachedIterMut<'a, T> {
+ type Item = &'a mut T;
+
+ #[inline]
+ fn next(&mut self) -> Option<&'a mut T> {
+ self.inner.next()
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.inner.size_hint()
+ }
+}
+
+impl<'a, T: Send + 'a> ExactSizeIterator for CachedIterMut<'a, T> {}
+
+/// An iterator that moves out of a `CachedThreadLocal`.
+#[deprecated(since = "1.1.0", note = "Use `IntoIter` instead")]
+pub struct CachedIntoIter<T: Send> {
+ inner: IntoIter<T>,
+}
+
+impl<T: Send> Iterator for CachedIntoIter<T> {
+ type Item = T;
+
+ #[inline]
+ fn next(&mut self) -> Option<T> {
+ self.inner.next()
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.inner.size_hint()
+ }
+}
+
+impl<T: Send> ExactSizeIterator for CachedIntoIter<T> {}
diff --git a/vendor/thread_local/src/lib.rs b/vendor/thread_local/src/lib.rs
new file mode 100644
index 000000000..33b79d6a5
--- /dev/null
+++ b/vendor/thread_local/src/lib.rs
@@ -0,0 +1,638 @@
+// Copyright 2017 Amanieu d'Antras
+//
+// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
+// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
+// http://opensource.org/licenses/MIT>, at your option. This file may not be
+// copied, modified, or distributed except according to those terms.
+
+//! Per-object thread-local storage
+//!
+//! This library provides the `ThreadLocal` type which allows a separate copy of
+//! an object to be used for each thread. This allows for per-object
+//! thread-local storage, unlike the standard library's `thread_local!` macro
+//! which only allows static thread-local storage.
+//!
+//! Per-thread objects are not destroyed when a thread exits. Instead, objects
+//! are only destroyed when the `ThreadLocal` containing them is destroyed.
+//!
+//! You can also iterate over the thread-local values of all thread in a
+//! `ThreadLocal` object using the `iter_mut` and `into_iter` methods. This can
+//! only be done if you have mutable access to the `ThreadLocal` object, which
+//! guarantees that you are the only thread currently accessing it.
+//!
+//! Note that since thread IDs are recycled when a thread exits, it is possible
+//! for one thread to retrieve the object of another thread. Since this can only
+//! occur after a thread has exited this does not lead to any race conditions.
+//!
+//! # Examples
+//!
+//! Basic usage of `ThreadLocal`:
+//!
+//! ```rust
+//! use thread_local::ThreadLocal;
+//! let tls: ThreadLocal<u32> = ThreadLocal::new();
+//! assert_eq!(tls.get(), None);
+//! assert_eq!(tls.get_or(|| 5), &5);
+//! assert_eq!(tls.get(), Some(&5));
+//! ```
+//!
+//! Combining thread-local values into a single result:
+//!
+//! ```rust
+//! use thread_local::ThreadLocal;
+//! use std::sync::Arc;
+//! use std::cell::Cell;
+//! use std::thread;
+//!
+//! let tls = Arc::new(ThreadLocal::new());
+//!
+//! // Create a bunch of threads to do stuff
+//! for _ in 0..5 {
+//! let tls2 = tls.clone();
+//! thread::spawn(move || {
+//! // Increment a counter to count some event...
+//! let cell = tls2.get_or(|| Cell::new(0));
+//! cell.set(cell.get() + 1);
+//! }).join().unwrap();
+//! }
+//!
+//! // Once all threads are done, collect the counter values and return the
+//! // sum of all thread-local counter values.
+//! let tls = Arc::try_unwrap(tls).unwrap();
+//! let total = tls.into_iter().fold(0, |x, y| x + y.get());
+//! assert_eq!(total, 5);
+//! ```
+
+#![warn(missing_docs)]
+#![allow(clippy::mutex_atomic)]
+
+mod cached;
+mod thread_id;
+mod unreachable;
+
+#[allow(deprecated)]
+pub use cached::{CachedIntoIter, CachedIterMut, CachedThreadLocal};
+
+use std::cell::UnsafeCell;
+use std::fmt;
+use std::iter::FusedIterator;
+use std::mem;
+use std::mem::MaybeUninit;
+use std::panic::UnwindSafe;
+use std::ptr;
+use std::sync::atomic::{AtomicBool, AtomicPtr, AtomicUsize, Ordering};
+use std::sync::Mutex;
+use thread_id::Thread;
+use unreachable::UncheckedResultExt;
+
+// Use usize::BITS once it has stabilized and the MSRV has been bumped.
+#[cfg(target_pointer_width = "16")]
+const POINTER_WIDTH: u8 = 16;
+#[cfg(target_pointer_width = "32")]
+const POINTER_WIDTH: u8 = 32;
+#[cfg(target_pointer_width = "64")]
+const POINTER_WIDTH: u8 = 64;
+
+/// The total number of buckets stored in each thread local.
+const BUCKETS: usize = (POINTER_WIDTH + 1) as usize;
+
+/// Thread-local variable wrapper
+///
+/// See the [module-level documentation](index.html) for more.
+pub struct ThreadLocal<T: Send> {
+ /// The buckets in the thread local. The nth bucket contains `2^(n-1)`
+ /// elements. Each bucket is lazily allocated.
+ buckets: [AtomicPtr<Entry<T>>; BUCKETS],
+
+ /// The number of values in the thread local. This can be less than the real number of values,
+ /// but is never more.
+ values: AtomicUsize,
+
+ /// Lock used to guard against concurrent modifications. This is taken when
+ /// there is a possibility of allocating a new bucket, which only occurs
+ /// when inserting values.
+ lock: Mutex<()>,
+}
+
+struct Entry<T> {
+ present: AtomicBool,
+ value: UnsafeCell<MaybeUninit<T>>,
+}
+
+impl<T> Drop for Entry<T> {
+ fn drop(&mut self) {
+ unsafe {
+ if *self.present.get_mut() {
+ ptr::drop_in_place((*self.value.get()).as_mut_ptr());
+ }
+ }
+ }
+}
+
+// ThreadLocal is always Sync, even if T isn't
+unsafe impl<T: Send> Sync for ThreadLocal<T> {}
+
+impl<T: Send> Default for ThreadLocal<T> {
+ fn default() -> ThreadLocal<T> {
+ ThreadLocal::new()
+ }
+}
+
+impl<T: Send> Drop for ThreadLocal<T> {
+ fn drop(&mut self) {
+ let mut bucket_size = 1;
+
+ // Free each non-null bucket
+ for (i, bucket) in self.buckets.iter_mut().enumerate() {
+ let bucket_ptr = *bucket.get_mut();
+
+ let this_bucket_size = bucket_size;
+ if i != 0 {
+ bucket_size <<= 1;
+ }
+
+ if bucket_ptr.is_null() {
+ continue;
+ }
+
+ unsafe { Box::from_raw(std::slice::from_raw_parts_mut(bucket_ptr, this_bucket_size)) };
+ }
+ }
+}
+
+impl<T: Send> ThreadLocal<T> {
+ /// Creates a new empty `ThreadLocal`.
+ pub fn new() -> ThreadLocal<T> {
+ Self::with_capacity(2)
+ }
+
+ /// Creates a new `ThreadLocal` with an initial capacity. If less than the capacity threads
+ /// access the thread local it will never reallocate. The capacity may be rounded up to the
+ /// nearest power of two.
+ pub fn with_capacity(capacity: usize) -> ThreadLocal<T> {
+ let allocated_buckets = capacity
+ .checked_sub(1)
+ .map(|c| usize::from(POINTER_WIDTH) - (c.leading_zeros() as usize) + 1)
+ .unwrap_or(0);
+
+ let mut buckets = [ptr::null_mut(); BUCKETS];
+ let mut bucket_size = 1;
+ for (i, bucket) in buckets[..allocated_buckets].iter_mut().enumerate() {
+ *bucket = allocate_bucket::<T>(bucket_size);
+
+ if i != 0 {
+ bucket_size <<= 1;
+ }
+ }
+
+ ThreadLocal {
+ // Safety: AtomicPtr has the same representation as a pointer and arrays have the same
+ // representation as a sequence of their inner type.
+ buckets: unsafe { mem::transmute(buckets) },
+ values: AtomicUsize::new(0),
+ lock: Mutex::new(()),
+ }
+ }
+
+ /// Returns the element for the current thread, if it exists.
+ pub fn get(&self) -> Option<&T> {
+ let thread = thread_id::get();
+ self.get_inner(thread)
+ }
+
+ /// Returns the element for the current thread, or creates it if it doesn't
+ /// exist.
+ pub fn get_or<F>(&self, create: F) -> &T
+ where
+ F: FnOnce() -> T,
+ {
+ unsafe {
+ self.get_or_try(|| Ok::<T, ()>(create()))
+ .unchecked_unwrap_ok()
+ }
+ }
+
+ /// Returns the element for the current thread, or creates it if it doesn't
+ /// exist. If `create` fails, that error is returned and no element is
+ /// added.
+ pub fn get_or_try<F, E>(&self, create: F) -> Result<&T, E>
+ where
+ F: FnOnce() -> Result<T, E>,
+ {
+ let thread = thread_id::get();
+ match self.get_inner(thread) {
+ Some(x) => Ok(x),
+ None => Ok(self.insert(thread, create()?)),
+ }
+ }
+
+ fn get_inner(&self, thread: Thread) -> Option<&T> {
+ let bucket_ptr =
+ unsafe { self.buckets.get_unchecked(thread.bucket) }.load(Ordering::Acquire);
+ if bucket_ptr.is_null() {
+ return None;
+ }
+ unsafe {
+ let entry = &*bucket_ptr.add(thread.index);
+ // Read without atomic operations as only this thread can set the value.
+ if (&entry.present as *const _ as *const bool).read() {
+ Some(&*(&*entry.value.get()).as_ptr())
+ } else {
+ None
+ }
+ }
+ }
+
+ #[cold]
+ fn insert(&self, thread: Thread, data: T) -> &T {
+ // Lock the Mutex to ensure only a single thread is allocating buckets at once
+ let _guard = self.lock.lock().unwrap();
+
+ let bucket_atomic_ptr = unsafe { self.buckets.get_unchecked(thread.bucket) };
+
+ let bucket_ptr: *const _ = bucket_atomic_ptr.load(Ordering::Acquire);
+ let bucket_ptr = if bucket_ptr.is_null() {
+ // Allocate a new bucket
+ let bucket_ptr = allocate_bucket(thread.bucket_size);
+ bucket_atomic_ptr.store(bucket_ptr, Ordering::Release);
+ bucket_ptr
+ } else {
+ bucket_ptr
+ };
+
+ drop(_guard);
+
+ // Insert the new element into the bucket
+ let entry = unsafe { &*bucket_ptr.add(thread.index) };
+ let value_ptr = entry.value.get();
+ unsafe { value_ptr.write(MaybeUninit::new(data)) };
+ entry.present.store(true, Ordering::Release);
+
+ self.values.fetch_add(1, Ordering::Release);
+
+ unsafe { &*(&*value_ptr).as_ptr() }
+ }
+
+ /// Returns an iterator over the local values of all threads in unspecified
+ /// order.
+ ///
+ /// This call can be done safely, as `T` is required to implement [`Sync`].
+ pub fn iter(&self) -> Iter<'_, T>
+ where
+ T: Sync,
+ {
+ Iter {
+ thread_local: self,
+ raw: RawIter::new(),
+ }
+ }
+
+ /// Returns a mutable iterator over the local values of all threads in
+ /// unspecified order.
+ ///
+ /// Since this call borrows the `ThreadLocal` mutably, this operation can
+ /// be done safely---the mutable borrow statically guarantees no other
+ /// threads are currently accessing their associated values.
+ pub fn iter_mut(&mut self) -> IterMut<T> {
+ IterMut {
+ thread_local: self,
+ raw: RawIter::new(),
+ }
+ }
+
+ /// Removes all thread-specific values from the `ThreadLocal`, effectively
+ /// reseting it to its original state.
+ ///
+ /// Since this call borrows the `ThreadLocal` mutably, this operation can
+ /// be done safely---the mutable borrow statically guarantees no other
+ /// threads are currently accessing their associated values.
+ pub fn clear(&mut self) {
+ *self = ThreadLocal::new();
+ }
+}
+
+impl<T: Send> IntoIterator for ThreadLocal<T> {
+ type Item = T;
+ type IntoIter = IntoIter<T>;
+
+ fn into_iter(self) -> IntoIter<T> {
+ IntoIter {
+ thread_local: self,
+ raw: RawIter::new(),
+ }
+ }
+}
+
+impl<'a, T: Send + Sync> IntoIterator for &'a ThreadLocal<T> {
+ type Item = &'a T;
+ type IntoIter = Iter<'a, T>;
+
+ fn into_iter(self) -> Self::IntoIter {
+ self.iter()
+ }
+}
+
+impl<'a, T: Send> IntoIterator for &'a mut ThreadLocal<T> {
+ type Item = &'a mut T;
+ type IntoIter = IterMut<'a, T>;
+
+ fn into_iter(self) -> IterMut<'a, T> {
+ self.iter_mut()
+ }
+}
+
+impl<T: Send + Default> ThreadLocal<T> {
+ /// Returns the element for the current thread, or creates a default one if
+ /// it doesn't exist.
+ pub fn get_or_default(&self) -> &T {
+ self.get_or(Default::default)
+ }
+}
+
+impl<T: Send + fmt::Debug> fmt::Debug for ThreadLocal<T> {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ write!(f, "ThreadLocal {{ local_data: {:?} }}", self.get())
+ }
+}
+
+impl<T: Send + UnwindSafe> UnwindSafe for ThreadLocal<T> {}
+
+#[derive(Debug)]
+struct RawIter {
+ yielded: usize,
+ bucket: usize,
+ bucket_size: usize,
+ index: usize,
+}
+impl RawIter {
+ #[inline]
+ fn new() -> Self {
+ Self {
+ yielded: 0,
+ bucket: 0,
+ bucket_size: 1,
+ index: 0,
+ }
+ }
+
+ fn next<'a, T: Send + Sync>(&mut self, thread_local: &'a ThreadLocal<T>) -> Option<&'a T> {
+ while self.bucket < BUCKETS {
+ let bucket = unsafe { thread_local.buckets.get_unchecked(self.bucket) };
+ let bucket = bucket.load(Ordering::Acquire);
+
+ if !bucket.is_null() {
+ while self.index < self.bucket_size {
+ let entry = unsafe { &*bucket.add(self.index) };
+ self.index += 1;
+ if entry.present.load(Ordering::Acquire) {
+ self.yielded += 1;
+ return Some(unsafe { &*(&*entry.value.get()).as_ptr() });
+ }
+ }
+ }
+
+ self.next_bucket();
+ }
+ None
+ }
+ fn next_mut<'a, T: Send>(
+ &mut self,
+ thread_local: &'a mut ThreadLocal<T>,
+ ) -> Option<&'a mut Entry<T>> {
+ if *thread_local.values.get_mut() == self.yielded {
+ return None;
+ }
+
+ loop {
+ let bucket = unsafe { thread_local.buckets.get_unchecked_mut(self.bucket) };
+ let bucket = *bucket.get_mut();
+
+ if !bucket.is_null() {
+ while self.index < self.bucket_size {
+ let entry = unsafe { &mut *bucket.add(self.index) };
+ self.index += 1;
+ if *entry.present.get_mut() {
+ self.yielded += 1;
+ return Some(entry);
+ }
+ }
+ }
+
+ self.next_bucket();
+ }
+ }
+
+ #[inline]
+ fn next_bucket(&mut self) {
+ if self.bucket != 0 {
+ self.bucket_size <<= 1;
+ }
+ self.bucket += 1;
+ self.index = 0;
+ }
+
+ fn size_hint<T: Send>(&self, thread_local: &ThreadLocal<T>) -> (usize, Option<usize>) {
+ let total = thread_local.values.load(Ordering::Acquire);
+ (total - self.yielded, None)
+ }
+ fn size_hint_frozen<T: Send>(&self, thread_local: &ThreadLocal<T>) -> (usize, Option<usize>) {
+ let total = unsafe { *(&thread_local.values as *const AtomicUsize as *const usize) };
+ let remaining = total - self.yielded;
+ (remaining, Some(remaining))
+ }
+}
+
+/// Iterator over the contents of a `ThreadLocal`.
+#[derive(Debug)]
+pub struct Iter<'a, T: Send + Sync> {
+ thread_local: &'a ThreadLocal<T>,
+ raw: RawIter,
+}
+
+impl<'a, T: Send + Sync> Iterator for Iter<'a, T> {
+ type Item = &'a T;
+ fn next(&mut self) -> Option<Self::Item> {
+ self.raw.next(self.thread_local)
+ }
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.raw.size_hint(self.thread_local)
+ }
+}
+impl<T: Send + Sync> FusedIterator for Iter<'_, T> {}
+
+/// Mutable iterator over the contents of a `ThreadLocal`.
+pub struct IterMut<'a, T: Send> {
+ thread_local: &'a mut ThreadLocal<T>,
+ raw: RawIter,
+}
+
+impl<'a, T: Send> Iterator for IterMut<'a, T> {
+ type Item = &'a mut T;
+ fn next(&mut self) -> Option<&'a mut T> {
+ self.raw
+ .next_mut(self.thread_local)
+ .map(|entry| unsafe { &mut *(&mut *entry.value.get()).as_mut_ptr() })
+ }
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.raw.size_hint_frozen(self.thread_local)
+ }
+}
+
+impl<T: Send> ExactSizeIterator for IterMut<'_, T> {}
+impl<T: Send> FusedIterator for IterMut<'_, T> {}
+
+// Manual impl so we don't call Debug on the ThreadLocal, as doing so would create a reference to
+// this thread's value that potentially aliases with a mutable reference we have given out.
+impl<'a, T: Send + fmt::Debug> fmt::Debug for IterMut<'a, T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("IterMut").field("raw", &self.raw).finish()
+ }
+}
+
+/// An iterator that moves out of a `ThreadLocal`.
+#[derive(Debug)]
+pub struct IntoIter<T: Send> {
+ thread_local: ThreadLocal<T>,
+ raw: RawIter,
+}
+
+impl<T: Send> Iterator for IntoIter<T> {
+ type Item = T;
+ fn next(&mut self) -> Option<T> {
+ self.raw.next_mut(&mut self.thread_local).map(|entry| {
+ *entry.present.get_mut() = false;
+ unsafe {
+ std::mem::replace(&mut *entry.value.get(), MaybeUninit::uninit()).assume_init()
+ }
+ })
+ }
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.raw.size_hint_frozen(&self.thread_local)
+ }
+}
+
+impl<T: Send> ExactSizeIterator for IntoIter<T> {}
+impl<T: Send> FusedIterator for IntoIter<T> {}
+
+fn allocate_bucket<T>(size: usize) -> *mut Entry<T> {
+ Box::into_raw(
+ (0..size)
+ .map(|_| Entry::<T> {
+ present: AtomicBool::new(false),
+ value: UnsafeCell::new(MaybeUninit::uninit()),
+ })
+ .collect(),
+ ) as *mut _
+}
+
+#[cfg(test)]
+mod tests {
+ use super::ThreadLocal;
+ use std::cell::RefCell;
+ use std::sync::atomic::AtomicUsize;
+ use std::sync::atomic::Ordering::Relaxed;
+ use std::sync::Arc;
+ use std::thread;
+
+ fn make_create() -> Arc<dyn Fn() -> usize + Send + Sync> {
+ let count = AtomicUsize::new(0);
+ Arc::new(move || count.fetch_add(1, Relaxed))
+ }
+
+ #[test]
+ fn same_thread() {
+ let create = make_create();
+ let mut tls = ThreadLocal::new();
+ assert_eq!(None, tls.get());
+ assert_eq!("ThreadLocal { local_data: None }", format!("{:?}", &tls));
+ assert_eq!(0, *tls.get_or(|| create()));
+ assert_eq!(Some(&0), tls.get());
+ assert_eq!(0, *tls.get_or(|| create()));
+ assert_eq!(Some(&0), tls.get());
+ assert_eq!(0, *tls.get_or(|| create()));
+ assert_eq!(Some(&0), tls.get());
+ assert_eq!("ThreadLocal { local_data: Some(0) }", format!("{:?}", &tls));
+ tls.clear();
+ assert_eq!(None, tls.get());
+ }
+
+ #[test]
+ fn different_thread() {
+ let create = make_create();
+ let tls = Arc::new(ThreadLocal::new());
+ assert_eq!(None, tls.get());
+ assert_eq!(0, *tls.get_or(|| create()));
+ assert_eq!(Some(&0), tls.get());
+
+ let tls2 = tls.clone();
+ let create2 = create.clone();
+ thread::spawn(move || {
+ assert_eq!(None, tls2.get());
+ assert_eq!(1, *tls2.get_or(|| create2()));
+ assert_eq!(Some(&1), tls2.get());
+ })
+ .join()
+ .unwrap();
+
+ assert_eq!(Some(&0), tls.get());
+ assert_eq!(0, *tls.get_or(|| create()));
+ }
+
+ #[test]
+ fn iter() {
+ let tls = Arc::new(ThreadLocal::new());
+ tls.get_or(|| Box::new(1));
+
+ let tls2 = tls.clone();
+ thread::spawn(move || {
+ tls2.get_or(|| Box::new(2));
+ let tls3 = tls2.clone();
+ thread::spawn(move || {
+ tls3.get_or(|| Box::new(3));
+ })
+ .join()
+ .unwrap();
+ drop(tls2);
+ })
+ .join()
+ .unwrap();
+
+ let mut tls = Arc::try_unwrap(tls).unwrap();
+
+ let mut v = tls.iter().map(|x| **x).collect::<Vec<i32>>();
+ v.sort_unstable();
+ assert_eq!(vec![1, 2, 3], v);
+
+ let mut v = tls.iter_mut().map(|x| **x).collect::<Vec<i32>>();
+ v.sort_unstable();
+ assert_eq!(vec![1, 2, 3], v);
+
+ let mut v = tls.into_iter().map(|x| *x).collect::<Vec<i32>>();
+ v.sort_unstable();
+ assert_eq!(vec![1, 2, 3], v);
+ }
+
+ #[test]
+ fn test_drop() {
+ let local = ThreadLocal::new();
+ struct Dropped(Arc<AtomicUsize>);
+ impl Drop for Dropped {
+ fn drop(&mut self) {
+ self.0.fetch_add(1, Relaxed);
+ }
+ }
+
+ let dropped = Arc::new(AtomicUsize::new(0));
+ local.get_or(|| Dropped(dropped.clone()));
+ assert_eq!(dropped.load(Relaxed), 0);
+ drop(local);
+ assert_eq!(dropped.load(Relaxed), 1);
+ }
+
+ #[test]
+ fn is_sync() {
+ fn foo<T: Sync>() {}
+ foo::<ThreadLocal<String>>();
+ foo::<ThreadLocal<RefCell<String>>>();
+ }
+}
diff --git a/vendor/thread_local/src/thread_id.rs b/vendor/thread_local/src/thread_id.rs
new file mode 100644
index 000000000..6eb0f616f
--- /dev/null
+++ b/vendor/thread_local/src/thread_id.rs
@@ -0,0 +1,127 @@
+// Copyright 2017 Amanieu d'Antras
+//
+// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
+// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
+// http://opensource.org/licenses/MIT>, at your option. This file may not be
+// copied, modified, or distributed except according to those terms.
+
+use crate::POINTER_WIDTH;
+use once_cell::sync::Lazy;
+use std::cmp::Reverse;
+use std::collections::BinaryHeap;
+use std::sync::Mutex;
+use std::usize;
+
+/// Thread ID manager which allocates thread IDs. It attempts to aggressively
+/// reuse thread IDs where possible to avoid cases where a ThreadLocal grows
+/// indefinitely when it is used by many short-lived threads.
+struct ThreadIdManager {
+ free_from: usize,
+ free_list: BinaryHeap<Reverse<usize>>,
+}
+impl ThreadIdManager {
+ fn new() -> ThreadIdManager {
+ ThreadIdManager {
+ free_from: 0,
+ free_list: BinaryHeap::new(),
+ }
+ }
+ fn alloc(&mut self) -> usize {
+ if let Some(id) = self.free_list.pop() {
+ id.0
+ } else {
+ let id = self.free_from;
+ self.free_from = self
+ .free_from
+ .checked_add(1)
+ .expect("Ran out of thread IDs");
+ id
+ }
+ }
+ fn free(&mut self, id: usize) {
+ self.free_list.push(Reverse(id));
+ }
+}
+static THREAD_ID_MANAGER: Lazy<Mutex<ThreadIdManager>> =
+ Lazy::new(|| Mutex::new(ThreadIdManager::new()));
+
+/// Data which is unique to the current thread while it is running.
+/// A thread ID may be reused after a thread exits.
+#[derive(Clone, Copy)]
+pub(crate) struct Thread {
+ /// The thread ID obtained from the thread ID manager.
+ pub(crate) id: usize,
+ /// The bucket this thread's local storage will be in.
+ pub(crate) bucket: usize,
+ /// The size of the bucket this thread's local storage will be in.
+ pub(crate) bucket_size: usize,
+ /// The index into the bucket this thread's local storage is in.
+ pub(crate) index: usize,
+}
+impl Thread {
+ fn new(id: usize) -> Thread {
+ let bucket = usize::from(POINTER_WIDTH) - id.leading_zeros() as usize;
+ let bucket_size = 1 << bucket.saturating_sub(1);
+ let index = if id != 0 { id ^ bucket_size } else { 0 };
+
+ Thread {
+ id,
+ bucket,
+ bucket_size,
+ index,
+ }
+ }
+}
+
+/// Wrapper around `Thread` that allocates and deallocates the ID.
+struct ThreadHolder(Thread);
+impl ThreadHolder {
+ fn new() -> ThreadHolder {
+ ThreadHolder(Thread::new(THREAD_ID_MANAGER.lock().unwrap().alloc()))
+ }
+}
+impl Drop for ThreadHolder {
+ fn drop(&mut self) {
+ THREAD_ID_MANAGER.lock().unwrap().free(self.0.id);
+ }
+}
+
+thread_local!(static THREAD_HOLDER: ThreadHolder = ThreadHolder::new());
+
+/// Get the current thread.
+pub(crate) fn get() -> Thread {
+ THREAD_HOLDER.with(|holder| holder.0)
+}
+
+#[test]
+fn test_thread() {
+ let thread = Thread::new(0);
+ assert_eq!(thread.id, 0);
+ assert_eq!(thread.bucket, 0);
+ assert_eq!(thread.bucket_size, 1);
+ assert_eq!(thread.index, 0);
+
+ let thread = Thread::new(1);
+ assert_eq!(thread.id, 1);
+ assert_eq!(thread.bucket, 1);
+ assert_eq!(thread.bucket_size, 1);
+ assert_eq!(thread.index, 0);
+
+ let thread = Thread::new(2);
+ assert_eq!(thread.id, 2);
+ assert_eq!(thread.bucket, 2);
+ assert_eq!(thread.bucket_size, 2);
+ assert_eq!(thread.index, 0);
+
+ let thread = Thread::new(3);
+ assert_eq!(thread.id, 3);
+ assert_eq!(thread.bucket, 2);
+ assert_eq!(thread.bucket_size, 2);
+ assert_eq!(thread.index, 1);
+
+ let thread = Thread::new(19);
+ assert_eq!(thread.id, 19);
+ assert_eq!(thread.bucket, 5);
+ assert_eq!(thread.bucket_size, 16);
+ assert_eq!(thread.index, 3);
+}
diff --git a/vendor/thread_local/src/unreachable.rs b/vendor/thread_local/src/unreachable.rs
new file mode 100644
index 000000000..db4d831ac
--- /dev/null
+++ b/vendor/thread_local/src/unreachable.rs
@@ -0,0 +1,57 @@
+// Copyright 2017 Amanieu d'Antras
+//
+// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
+// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
+// http://opensource.org/licenses/MIT>, at your option. This file may not be
+// copied, modified, or distributed except according to those terms.
+
+use std::hint::unreachable_unchecked;
+
+/// An extension trait for `Option<T>` providing unchecked unwrapping methods.
+pub trait UncheckedOptionExt<T> {
+ /// Get the value out of this Option without checking for None.
+ unsafe fn unchecked_unwrap(self) -> T;
+
+ /// Assert that this Option is a None to the optimizer.
+ unsafe fn unchecked_unwrap_none(self);
+}
+
+/// An extension trait for `Result<T, E>` providing unchecked unwrapping methods.
+pub trait UncheckedResultExt<T, E> {
+ /// Get the value out of this Result without checking for Err.
+ unsafe fn unchecked_unwrap_ok(self) -> T;
+
+ /// Get the error out of this Result without checking for Ok.
+ unsafe fn unchecked_unwrap_err(self) -> E;
+}
+
+impl<T> UncheckedOptionExt<T> for Option<T> {
+ unsafe fn unchecked_unwrap(self) -> T {
+ match self {
+ Some(x) => x,
+ None => unreachable_unchecked(),
+ }
+ }
+
+ unsafe fn unchecked_unwrap_none(self) {
+ if self.is_some() {
+ unreachable_unchecked()
+ }
+ }
+}
+
+impl<T, E> UncheckedResultExt<T, E> for Result<T, E> {
+ unsafe fn unchecked_unwrap_ok(self) -> T {
+ match self {
+ Ok(x) => x,
+ Err(_) => unreachable_unchecked(),
+ }
+ }
+
+ unsafe fn unchecked_unwrap_err(self) -> E {
+ match self {
+ Ok(_) => unreachable_unchecked(),
+ Err(e) => e,
+ }
+ }
+}