summaryrefslogtreecommitdiffstats
path: root/vendor/thread_local/src
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-18 02:49:42 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-18 02:49:42 +0000
commit837b550238aa671a591ccf282dddeab29cadb206 (patch)
tree914b6b8862bace72bd3245ca184d374b08d8a672 /vendor/thread_local/src
parentAdding debian version 1.70.0+dfsg2-1. (diff)
downloadrustc-837b550238aa671a591ccf282dddeab29cadb206.tar.xz
rustc-837b550238aa671a591ccf282dddeab29cadb206.zip
Merging upstream version 1.71.1+dfsg1.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'vendor/thread_local/src')
-rw-r--r--vendor/thread_local/src/lib.rs56
-rw-r--r--vendor/thread_local/src/thread_id.rs112
2 files changed, 128 insertions, 40 deletions
diff --git a/vendor/thread_local/src/lib.rs b/vendor/thread_local/src/lib.rs
index 33b79d6a5..12d25f6c3 100644
--- a/vendor/thread_local/src/lib.rs
+++ b/vendor/thread_local/src/lib.rs
@@ -65,6 +65,7 @@
#![warn(missing_docs)]
#![allow(clippy::mutex_atomic)]
+#![cfg_attr(feature = "nightly", feature(thread_local))]
mod cached;
mod thread_id;
@@ -81,7 +82,6 @@ use std::mem::MaybeUninit;
use std::panic::UnwindSafe;
use std::ptr;
use std::sync::atomic::{AtomicBool, AtomicPtr, AtomicUsize, Ordering};
-use std::sync::Mutex;
use thread_id::Thread;
use unreachable::UncheckedResultExt;
@@ -107,11 +107,6 @@ pub struct ThreadLocal<T: Send> {
/// The number of values in the thread local. This can be less than the real number of values,
/// but is never more.
values: AtomicUsize,
-
- /// Lock used to guard against concurrent modifications. This is taken when
- /// there is a possibility of allocating a new bucket, which only occurs
- /// when inserting values.
- lock: Mutex<()>,
}
struct Entry<T> {
@@ -155,7 +150,7 @@ impl<T: Send> Drop for ThreadLocal<T> {
continue;
}
- unsafe { Box::from_raw(std::slice::from_raw_parts_mut(bucket_ptr, this_bucket_size)) };
+ unsafe { deallocate_bucket(bucket_ptr, this_bucket_size) };
}
}
}
@@ -190,14 +185,12 @@ impl<T: Send> ThreadLocal<T> {
// representation as a sequence of their inner type.
buckets: unsafe { mem::transmute(buckets) },
values: AtomicUsize::new(0),
- lock: Mutex::new(()),
}
}
/// Returns the element for the current thread, if it exists.
pub fn get(&self) -> Option<&T> {
- let thread = thread_id::get();
- self.get_inner(thread)
+ self.get_inner(thread_id::get())
}
/// Returns the element for the current thread, or creates it if it doesn't
@@ -220,10 +213,11 @@ impl<T: Send> ThreadLocal<T> {
F: FnOnce() -> Result<T, E>,
{
let thread = thread_id::get();
- match self.get_inner(thread) {
- Some(x) => Ok(x),
- None => Ok(self.insert(thread, create()?)),
+ if let Some(val) = self.get_inner(thread) {
+ return Ok(val);
}
+
+ Ok(self.insert(create()?))
}
fn get_inner(&self, thread: Thread) -> Option<&T> {
@@ -244,24 +238,34 @@ impl<T: Send> ThreadLocal<T> {
}
#[cold]
- fn insert(&self, thread: Thread, data: T) -> &T {
- // Lock the Mutex to ensure only a single thread is allocating buckets at once
- let _guard = self.lock.lock().unwrap();
-
+ fn insert(&self, data: T) -> &T {
+ let thread = thread_id::get();
let bucket_atomic_ptr = unsafe { self.buckets.get_unchecked(thread.bucket) };
-
let bucket_ptr: *const _ = bucket_atomic_ptr.load(Ordering::Acquire);
+
+ // If the bucket doesn't already exist, we need to allocate it
let bucket_ptr = if bucket_ptr.is_null() {
- // Allocate a new bucket
- let bucket_ptr = allocate_bucket(thread.bucket_size);
- bucket_atomic_ptr.store(bucket_ptr, Ordering::Release);
- bucket_ptr
+ let new_bucket = allocate_bucket(thread.bucket_size);
+
+ match bucket_atomic_ptr.compare_exchange(
+ ptr::null_mut(),
+ new_bucket,
+ Ordering::AcqRel,
+ Ordering::Acquire,
+ ) {
+ Ok(_) => new_bucket,
+ // If the bucket value changed (from null), that means
+ // another thread stored a new bucket before we could,
+ // and we can free our bucket and use that one instead
+ Err(bucket_ptr) => {
+ unsafe { deallocate_bucket(new_bucket, thread.bucket_size) }
+ bucket_ptr
+ }
+ }
} else {
bucket_ptr
};
- drop(_guard);
-
// Insert the new element into the bucket
let entry = unsafe { &*bucket_ptr.add(thread.index) };
let value_ptr = entry.value.get();
@@ -525,6 +529,10 @@ fn allocate_bucket<T>(size: usize) -> *mut Entry<T> {
) as *mut _
}
+unsafe fn deallocate_bucket<T>(bucket: *mut Entry<T>, size: usize) {
+ let _ = Box::from_raw(std::slice::from_raw_parts_mut(bucket, size));
+}
+
#[cfg(test)]
mod tests {
use super::ThreadLocal;
diff --git a/vendor/thread_local/src/thread_id.rs b/vendor/thread_local/src/thread_id.rs
index 6eb0f616f..aa4f2d632 100644
--- a/vendor/thread_local/src/thread_id.rs
+++ b/vendor/thread_local/src/thread_id.rs
@@ -7,6 +7,7 @@
use crate::POINTER_WIDTH;
use once_cell::sync::Lazy;
+use std::cell::Cell;
use std::cmp::Reverse;
use std::collections::BinaryHeap;
use std::sync::Mutex;
@@ -73,24 +74,103 @@ impl Thread {
}
}
-/// Wrapper around `Thread` that allocates and deallocates the ID.
-struct ThreadHolder(Thread);
-impl ThreadHolder {
- fn new() -> ThreadHolder {
- ThreadHolder(Thread::new(THREAD_ID_MANAGER.lock().unwrap().alloc()))
- }
-}
-impl Drop for ThreadHolder {
- fn drop(&mut self) {
- THREAD_ID_MANAGER.lock().unwrap().free(self.0.id);
- }
-}
+cfg_if::cfg_if! {
+ if #[cfg(feature = "nightly")] {
+ // This is split into 2 thread-local variables so that we can check whether the
+ // thread is initialized without having to register a thread-local destructor.
+ //
+ // This makes the fast path smaller.
+ #[thread_local]
+ static mut THREAD: Option<Thread> = None;
+ thread_local! { static THREAD_GUARD: ThreadGuard = const { ThreadGuard { id: Cell::new(0) } }; }
+
+ // Guard to ensure the thread ID is released on thread exit.
+ struct ThreadGuard {
+ // We keep a copy of the thread ID in the ThreadGuard: we can't
+ // reliably access THREAD in our Drop impl due to the unpredictable
+ // order of TLS destructors.
+ id: Cell<usize>,
+ }
-thread_local!(static THREAD_HOLDER: ThreadHolder = ThreadHolder::new());
+ impl Drop for ThreadGuard {
+ fn drop(&mut self) {
+ // Release the thread ID. Any further accesses to the thread ID
+ // will go through get_slow which will either panic or
+ // initialize a new ThreadGuard.
+ unsafe {
+ THREAD = None;
+ }
+ THREAD_ID_MANAGER.lock().unwrap().free(self.id.get());
+ }
+ }
-/// Get the current thread.
-pub(crate) fn get() -> Thread {
- THREAD_HOLDER.with(|holder| holder.0)
+ /// Returns a thread ID for the current thread, allocating one if needed.
+ #[inline]
+ pub(crate) fn get() -> Thread {
+ if let Some(thread) = unsafe { THREAD } {
+ thread
+ } else {
+ get_slow()
+ }
+ }
+
+ /// Out-of-line slow path for allocating a thread ID.
+ #[cold]
+ fn get_slow() -> Thread {
+ let new = Thread::new(THREAD_ID_MANAGER.lock().unwrap().alloc());
+ unsafe {
+ THREAD = Some(new);
+ }
+ THREAD_GUARD.with(|guard| guard.id.set(new.id));
+ new
+ }
+ } else {
+ // This is split into 2 thread-local variables so that we can check whether the
+ // thread is initialized without having to register a thread-local destructor.
+ //
+ // This makes the fast path smaller.
+ thread_local! { static THREAD: Cell<Option<Thread>> = const { Cell::new(None) }; }
+ thread_local! { static THREAD_GUARD: ThreadGuard = const { ThreadGuard { id: Cell::new(0) } }; }
+
+ // Guard to ensure the thread ID is released on thread exit.
+ struct ThreadGuard {
+ // We keep a copy of the thread ID in the ThreadGuard: we can't
+ // reliably access THREAD in our Drop impl due to the unpredictable
+ // order of TLS destructors.
+ id: Cell<usize>,
+ }
+
+ impl Drop for ThreadGuard {
+ fn drop(&mut self) {
+ // Release the thread ID. Any further accesses to the thread ID
+ // will go through get_slow which will either panic or
+ // initialize a new ThreadGuard.
+ let _ = THREAD.try_with(|thread| thread.set(None));
+ THREAD_ID_MANAGER.lock().unwrap().free(self.id.get());
+ }
+ }
+
+ /// Returns a thread ID for the current thread, allocating one if needed.
+ #[inline]
+ pub(crate) fn get() -> Thread {
+ THREAD.with(|thread| {
+ if let Some(thread) = thread.get() {
+ thread
+ } else {
+ get_slow(thread)
+ }
+ })
+ }
+
+ /// Out-of-line slow path for allocating a thread ID.
+ #[cold]
+ fn get_slow(thread: &Cell<Option<Thread>>) -> Thread {
+ let new = Thread::new(THREAD_ID_MANAGER.lock().unwrap().alloc());
+ thread.set(Some(new));
+ THREAD_GUARD.with(|guard| guard.id.set(new.id));
+ new
+ }
+ }
}
#[test]