summaryrefslogtreecommitdiffstats
path: root/compiler/rustc_data_structures/src/sync
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/rustc_data_structures/src/sync')
-rw-r--r--compiler/rustc_data_structures/src/sync/freeze.rs200
-rw-r--r--compiler/rustc_data_structures/src/sync/lock.rs275
-rw-r--r--compiler/rustc_data_structures/src/sync/parallel.rs188
-rw-r--r--compiler/rustc_data_structures/src/sync/worker_local.rs20
4 files changed, 674 insertions, 9 deletions
diff --git a/compiler/rustc_data_structures/src/sync/freeze.rs b/compiler/rustc_data_structures/src/sync/freeze.rs
new file mode 100644
index 000000000..466c44f59
--- /dev/null
+++ b/compiler/rustc_data_structures/src/sync/freeze.rs
@@ -0,0 +1,200 @@
+use crate::sync::{AtomicBool, ReadGuard, RwLock, WriteGuard};
+#[cfg(parallel_compiler)]
+use crate::sync::{DynSend, DynSync};
+use std::{
+ cell::UnsafeCell,
+ intrinsics::likely,
+ marker::PhantomData,
+ ops::{Deref, DerefMut},
+ ptr::NonNull,
+ sync::atomic::Ordering,
+};
+
+/// A type which allows mutation using a lock until
+/// the value is frozen and can be accessed lock-free.
+///
+/// Unlike `RwLock`, it can be used to prevent mutation past a point.
+#[derive(Default)]
+pub struct FreezeLock<T> {
+ data: UnsafeCell<T>,
+ frozen: AtomicBool,
+
+ /// This lock protects writes to the `data` and `frozen` fields.
+ lock: RwLock<()>,
+}
+
+#[cfg(parallel_compiler)]
+unsafe impl<T: DynSync + DynSend> DynSync for FreezeLock<T> {}
+
+impl<T> FreezeLock<T> {
+ #[inline]
+ pub fn new(value: T) -> Self {
+ Self::with(value, false)
+ }
+
+ #[inline]
+ pub fn frozen(value: T) -> Self {
+ Self::with(value, true)
+ }
+
+ #[inline]
+ pub fn with(value: T, frozen: bool) -> Self {
+ Self {
+ data: UnsafeCell::new(value),
+ frozen: AtomicBool::new(frozen),
+ lock: RwLock::new(()),
+ }
+ }
+
+ /// Clones the inner value along with the frozen state.
+ #[inline]
+ pub fn clone(&self) -> Self
+ where
+ T: Clone,
+ {
+ let lock = self.read();
+ Self::with(lock.clone(), self.is_frozen())
+ }
+
+ #[inline]
+ pub fn is_frozen(&self) -> bool {
+ self.frozen.load(Ordering::Acquire)
+ }
+
+ /// Get the inner value if frozen.
+ #[inline]
+ pub fn get(&self) -> Option<&T> {
+ if likely(self.frozen.load(Ordering::Acquire)) {
+ // SAFETY: This is frozen so the data cannot be modified.
+ unsafe { Some(&*self.data.get()) }
+ } else {
+ None
+ }
+ }
+
+ #[inline]
+ pub fn read(&self) -> FreezeReadGuard<'_, T> {
+ FreezeReadGuard {
+ _lock_guard: if self.frozen.load(Ordering::Acquire) {
+ None
+ } else {
+ Some(self.lock.read())
+ },
+ data: unsafe { NonNull::new_unchecked(self.data.get()) },
+ }
+ }
+
+ #[inline]
+ pub fn borrow(&self) -> FreezeReadGuard<'_, T> {
+ self.read()
+ }
+
+ #[inline]
+ #[track_caller]
+ pub fn write(&self) -> FreezeWriteGuard<'_, T> {
+ self.try_write().expect("still mutable")
+ }
+
+ #[inline]
+ pub fn try_write(&self) -> Option<FreezeWriteGuard<'_, T>> {
+ let _lock_guard = self.lock.write();
+ // Use relaxed ordering since we're in the write lock.
+ if self.frozen.load(Ordering::Relaxed) {
+ None
+ } else {
+ Some(FreezeWriteGuard {
+ _lock_guard,
+ data: unsafe { NonNull::new_unchecked(self.data.get()) },
+ frozen: &self.frozen,
+ marker: PhantomData,
+ })
+ }
+ }
+
+ #[inline]
+ pub fn freeze(&self) -> &T {
+ if !self.frozen.load(Ordering::Acquire) {
+ // Get the lock to ensure no concurrent writes and that we release the latest write.
+ let _lock = self.lock.write();
+ self.frozen.store(true, Ordering::Release);
+ }
+
+ // SAFETY: This is frozen so the data cannot be modified and shared access is sound.
+ unsafe { &*self.data.get() }
+ }
+}
+
+/// A guard holding shared access to a `FreezeLock` which is in a locked state or frozen.
+#[must_use = "if unused the FreezeLock may immediately unlock"]
+pub struct FreezeReadGuard<'a, T: ?Sized> {
+ _lock_guard: Option<ReadGuard<'a, ()>>,
+ data: NonNull<T>,
+}
+
+impl<'a, T: ?Sized + 'a> Deref for FreezeReadGuard<'a, T> {
+ type Target = T;
+ #[inline]
+ fn deref(&self) -> &T {
+ // SAFETY: If the lock is not frozen, `_lock_guard` holds the lock to the `UnsafeCell` so
+ // this has shared access until the `FreezeReadGuard` is dropped. If the lock is frozen,
+ // the data cannot be modified and shared access is sound.
+ unsafe { &*self.data.as_ptr() }
+ }
+}
+
+impl<'a, T: ?Sized> FreezeReadGuard<'a, T> {
+ #[inline]
+ pub fn map<U: ?Sized>(this: Self, f: impl FnOnce(&T) -> &U) -> FreezeReadGuard<'a, U> {
+ FreezeReadGuard { data: NonNull::from(f(&*this)), _lock_guard: this._lock_guard }
+ }
+}
+
+/// A guard holding mutable access to a `FreezeLock` which is in a locked state or frozen.
+#[must_use = "if unused the FreezeLock may immediately unlock"]
+pub struct FreezeWriteGuard<'a, T: ?Sized> {
+ _lock_guard: WriteGuard<'a, ()>,
+ frozen: &'a AtomicBool,
+ data: NonNull<T>,
+ marker: PhantomData<&'a mut T>,
+}
+
+impl<'a, T> FreezeWriteGuard<'a, T> {
+ pub fn freeze(self) -> &'a T {
+ self.frozen.store(true, Ordering::Release);
+
+ // SAFETY: This is frozen so the data cannot be modified and shared access is sound.
+ unsafe { &*self.data.as_ptr() }
+ }
+}
+
+impl<'a, T: ?Sized> FreezeWriteGuard<'a, T> {
+ #[inline]
+ pub fn map<U: ?Sized>(
+ mut this: Self,
+ f: impl FnOnce(&mut T) -> &mut U,
+ ) -> FreezeWriteGuard<'a, U> {
+ FreezeWriteGuard {
+ data: NonNull::from(f(&mut *this)),
+ _lock_guard: this._lock_guard,
+ frozen: this.frozen,
+ marker: PhantomData,
+ }
+ }
+}
+
+impl<'a, T: ?Sized + 'a> Deref for FreezeWriteGuard<'a, T> {
+ type Target = T;
+ #[inline]
+ fn deref(&self) -> &T {
+ // SAFETY: `self._lock_guard` holds the lock to the `UnsafeCell` so this has shared access.
+ unsafe { &*self.data.as_ptr() }
+ }
+}
+
+impl<'a, T: ?Sized + 'a> DerefMut for FreezeWriteGuard<'a, T> {
+ #[inline]
+ fn deref_mut(&mut self) -> &mut T {
+ // SAFETY: `self._lock_guard` holds the lock to the `UnsafeCell` so this has mutable access.
+ unsafe { &mut *self.data.as_ptr() }
+ }
+}
diff --git a/compiler/rustc_data_structures/src/sync/lock.rs b/compiler/rustc_data_structures/src/sync/lock.rs
new file mode 100644
index 000000000..339aebbf8
--- /dev/null
+++ b/compiler/rustc_data_structures/src/sync/lock.rs
@@ -0,0 +1,275 @@
+//! This module implements a lock which only uses synchronization if `might_be_dyn_thread_safe` is true.
+//! It implements `DynSend` and `DynSync` instead of the typical `Send` and `Sync` traits.
+//!
+//! When `cfg(parallel_compiler)` is not set, the lock is instead a wrapper around `RefCell`.
+
+#![allow(dead_code)]
+
+use std::fmt;
+
+#[cfg(parallel_compiler)]
+pub use maybe_sync::*;
+#[cfg(not(parallel_compiler))]
+pub use no_sync::*;
+
+#[derive(Clone, Copy, PartialEq)]
+pub enum Mode {
+ NoSync,
+ Sync,
+}
+
+mod maybe_sync {
+ use super::Mode;
+ use crate::sync::mode;
+ #[cfg(parallel_compiler)]
+ use crate::sync::{DynSend, DynSync};
+ use parking_lot::lock_api::RawMutex as _;
+ use parking_lot::RawMutex;
+ use std::cell::Cell;
+ use std::cell::UnsafeCell;
+ use std::intrinsics::unlikely;
+ use std::marker::PhantomData;
+ use std::mem::ManuallyDrop;
+ use std::ops::{Deref, DerefMut};
+
+ /// A guard holding mutable access to a `Lock` which is in a locked state.
+ #[must_use = "if unused the Lock will immediately unlock"]
+ pub struct LockGuard<'a, T> {
+ lock: &'a Lock<T>,
+ marker: PhantomData<&'a mut T>,
+
+ /// The syncronization mode of the lock. This is explicitly passed to let LLVM relate it
+ /// to the original lock operation.
+ mode: Mode,
+ }
+
+ impl<'a, T: 'a> Deref for LockGuard<'a, T> {
+ type Target = T;
+ #[inline]
+ fn deref(&self) -> &T {
+ // SAFETY: We have shared access to the mutable access owned by this type,
+ // so we can give out a shared reference.
+ unsafe { &*self.lock.data.get() }
+ }
+ }
+
+ impl<'a, T: 'a> DerefMut for LockGuard<'a, T> {
+ #[inline]
+ fn deref_mut(&mut self) -> &mut T {
+ // SAFETY: We have mutable access to the data so we can give out a mutable reference.
+ unsafe { &mut *self.lock.data.get() }
+ }
+ }
+
+ impl<'a, T: 'a> Drop for LockGuard<'a, T> {
+ #[inline]
+ fn drop(&mut self) {
+ // SAFETY (union access): We get `self.mode` from the lock operation so it is consistent
+ // with the `lock.mode` state. This means we access the right union fields.
+ match self.mode {
+ Mode::NoSync => {
+ let cell = unsafe { &self.lock.mode_union.no_sync };
+ debug_assert_eq!(cell.get(), true);
+ cell.set(false);
+ }
+ // SAFETY (unlock): We know that the lock is locked as this type is a proof of that.
+ Mode::Sync => unsafe { self.lock.mode_union.sync.unlock() },
+ }
+ }
+ }
+
+ union ModeUnion {
+ /// Indicates if the cell is locked. Only used if `Lock.mode` is `NoSync`.
+ no_sync: ManuallyDrop<Cell<bool>>,
+
+ /// A lock implementation that's only used if `Lock.mode` is `Sync`.
+ sync: ManuallyDrop<RawMutex>,
+ }
+
+ /// The value representing a locked state for the `Cell`.
+ const LOCKED: bool = true;
+
+ /// A lock which only uses synchronization if `might_be_dyn_thread_safe` is true.
+ /// It implements `DynSend` and `DynSync` instead of the typical `Send` and `Sync`.
+ pub struct Lock<T> {
+ /// Indicates if synchronization is used via `mode_union.sync` if it's `Sync`, or if a
+ /// not thread safe cell is used via `mode_union.no_sync` if it's `NoSync`.
+ /// This is set on initialization and never changed.
+ mode: Mode,
+
+ mode_union: ModeUnion,
+ data: UnsafeCell<T>,
+ }
+
+ impl<T> Lock<T> {
+ #[inline(always)]
+ pub fn new(inner: T) -> Self {
+ let (mode, mode_union) = if unlikely(mode::might_be_dyn_thread_safe()) {
+ // Create the lock with synchronization enabled using the `RawMutex` type.
+ (Mode::Sync, ModeUnion { sync: ManuallyDrop::new(RawMutex::INIT) })
+ } else {
+ // Create the lock with synchronization disabled.
+ (Mode::NoSync, ModeUnion { no_sync: ManuallyDrop::new(Cell::new(!LOCKED)) })
+ };
+ Lock { mode, mode_union, data: UnsafeCell::new(inner) }
+ }
+
+ #[inline(always)]
+ pub fn into_inner(self) -> T {
+ self.data.into_inner()
+ }
+
+ #[inline(always)]
+ pub fn get_mut(&mut self) -> &mut T {
+ self.data.get_mut()
+ }
+
+ #[inline(always)]
+ pub fn try_lock(&self) -> Option<LockGuard<'_, T>> {
+ let mode = self.mode;
+ // SAFETY: This is safe since the union fields are used in accordance with `self.mode`.
+ match mode {
+ Mode::NoSync => {
+ let cell = unsafe { &self.mode_union.no_sync };
+ let was_unlocked = cell.get() != LOCKED;
+ if was_unlocked {
+ cell.set(LOCKED);
+ }
+ was_unlocked
+ }
+ Mode::Sync => unsafe { self.mode_union.sync.try_lock() },
+ }
+ .then(|| LockGuard { lock: self, marker: PhantomData, mode })
+ }
+
+ /// This acquires the lock assuming syncronization is in a specific mode.
+ ///
+ /// Safety
+ /// This method must only be called with `Mode::Sync` if `might_be_dyn_thread_safe` was
+ /// true on lock creation.
+ #[inline(always)]
+ #[track_caller]
+ pub unsafe fn lock_assume(&self, mode: Mode) -> LockGuard<'_, T> {
+ #[inline(never)]
+ #[track_caller]
+ #[cold]
+ fn lock_held() -> ! {
+ panic!("lock was already held")
+ }
+
+ // SAFETY: This is safe since the union fields are used in accordance with `mode`
+ // which also must match `self.mode` due to the safety precondition.
+ unsafe {
+ match mode {
+ Mode::NoSync => {
+ if unlikely(self.mode_union.no_sync.replace(LOCKED) == LOCKED) {
+ lock_held()
+ }
+ }
+ Mode::Sync => self.mode_union.sync.lock(),
+ }
+ }
+ LockGuard { lock: self, marker: PhantomData, mode }
+ }
+
+ #[inline(always)]
+ #[track_caller]
+ pub fn lock(&self) -> LockGuard<'_, T> {
+ unsafe { self.lock_assume(self.mode) }
+ }
+ }
+
+ #[cfg(parallel_compiler)]
+ unsafe impl<T: DynSend> DynSend for Lock<T> {}
+ #[cfg(parallel_compiler)]
+ unsafe impl<T: DynSend> DynSync for Lock<T> {}
+}
+
+mod no_sync {
+ use super::Mode;
+ use std::cell::RefCell;
+
+ pub use std::cell::RefMut as LockGuard;
+
+ pub struct Lock<T>(RefCell<T>);
+
+ impl<T> Lock<T> {
+ #[inline(always)]
+ pub fn new(inner: T) -> Self {
+ Lock(RefCell::new(inner))
+ }
+
+ #[inline(always)]
+ pub fn into_inner(self) -> T {
+ self.0.into_inner()
+ }
+
+ #[inline(always)]
+ pub fn get_mut(&mut self) -> &mut T {
+ self.0.get_mut()
+ }
+
+ #[inline(always)]
+ pub fn try_lock(&self) -> Option<LockGuard<'_, T>> {
+ self.0.try_borrow_mut().ok()
+ }
+
+ #[inline(always)]
+ #[track_caller]
+ // This is unsafe to match the API for the `parallel_compiler` case.
+ pub unsafe fn lock_assume(&self, _mode: Mode) -> LockGuard<'_, T> {
+ self.0.borrow_mut()
+ }
+
+ #[inline(always)]
+ #[track_caller]
+ pub fn lock(&self) -> LockGuard<'_, T> {
+ self.0.borrow_mut()
+ }
+ }
+}
+
+impl<T> Lock<T> {
+ #[inline(always)]
+ #[track_caller]
+ pub fn with_lock<F: FnOnce(&mut T) -> R, R>(&self, f: F) -> R {
+ f(&mut *self.lock())
+ }
+
+ #[inline(always)]
+ #[track_caller]
+ pub fn borrow(&self) -> LockGuard<'_, T> {
+ self.lock()
+ }
+
+ #[inline(always)]
+ #[track_caller]
+ pub fn borrow_mut(&self) -> LockGuard<'_, T> {
+ self.lock()
+ }
+}
+
+impl<T: Default> Default for Lock<T> {
+ #[inline]
+ fn default() -> Self {
+ Lock::new(T::default())
+ }
+}
+
+impl<T: fmt::Debug> fmt::Debug for Lock<T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match self.try_lock() {
+ Some(guard) => f.debug_struct("Lock").field("data", &&*guard).finish(),
+ None => {
+ struct LockedPlaceholder;
+ impl fmt::Debug for LockedPlaceholder {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.write_str("<locked>")
+ }
+ }
+
+ f.debug_struct("Lock").field("data", &LockedPlaceholder).finish()
+ }
+ }
+ }
+}
diff --git a/compiler/rustc_data_structures/src/sync/parallel.rs b/compiler/rustc_data_structures/src/sync/parallel.rs
new file mode 100644
index 000000000..1944ddfb7
--- /dev/null
+++ b/compiler/rustc_data_structures/src/sync/parallel.rs
@@ -0,0 +1,188 @@
+//! This module defines parallel operations that are implemented in
+//! one way for the serial compiler, and another way the parallel compiler.
+
+#![allow(dead_code)]
+
+use parking_lot::Mutex;
+use std::any::Any;
+use std::panic::{catch_unwind, resume_unwind, AssertUnwindSafe};
+
+#[cfg(not(parallel_compiler))]
+pub use disabled::*;
+#[cfg(parallel_compiler)]
+pub use enabled::*;
+
+/// A guard used to hold panics that occur during a parallel section to later by unwound.
+/// This is used for the parallel compiler to prevent fatal errors from non-deterministically
+/// hiding errors by ensuring that everything in the section has completed executing before
+/// continuing with unwinding. It's also used for the non-parallel code to ensure error message
+/// output match the parallel compiler for testing purposes.
+pub struct ParallelGuard {
+ panic: Mutex<Option<Box<dyn Any + Send + 'static>>>,
+}
+
+impl ParallelGuard {
+ pub fn run<R>(&self, f: impl FnOnce() -> R) -> Option<R> {
+ catch_unwind(AssertUnwindSafe(f))
+ .map_err(|err| {
+ *self.panic.lock() = Some(err);
+ })
+ .ok()
+ }
+}
+
+/// This gives access to a fresh parallel guard in the closure and will unwind any panics
+/// caught in it after the closure returns.
+#[inline]
+pub fn parallel_guard<R>(f: impl FnOnce(&ParallelGuard) -> R) -> R {
+ let guard = ParallelGuard { panic: Mutex::new(None) };
+ let ret = f(&guard);
+ if let Some(panic) = guard.panic.into_inner() {
+ resume_unwind(panic);
+ }
+ ret
+}
+
+mod disabled {
+ use crate::sync::parallel_guard;
+
+ #[macro_export]
+ #[cfg(not(parallel_compiler))]
+ macro_rules! parallel {
+ ($($blocks:block),*) => {{
+ $crate::sync::parallel_guard(|guard| {
+ $(guard.run(|| $blocks);)*
+ });
+ }}
+ }
+
+ pub fn join<A, B, RA, RB>(oper_a: A, oper_b: B) -> (RA, RB)
+ where
+ A: FnOnce() -> RA,
+ B: FnOnce() -> RB,
+ {
+ let (a, b) = parallel_guard(|guard| {
+ let a = guard.run(oper_a);
+ let b = guard.run(oper_b);
+ (a, b)
+ });
+ (a.unwrap(), b.unwrap())
+ }
+
+ pub fn par_for_each_in<T: IntoIterator>(t: T, mut for_each: impl FnMut(T::Item)) {
+ parallel_guard(|guard| {
+ t.into_iter().for_each(|i| {
+ guard.run(|| for_each(i));
+ });
+ })
+ }
+
+ pub fn par_map<T: IntoIterator, R, C: FromIterator<R>>(
+ t: T,
+ mut map: impl FnMut(<<T as IntoIterator>::IntoIter as Iterator>::Item) -> R,
+ ) -> C {
+ parallel_guard(|guard| t.into_iter().filter_map(|i| guard.run(|| map(i))).collect())
+ }
+}
+
+#[cfg(parallel_compiler)]
+mod enabled {
+ use crate::sync::{mode, parallel_guard, DynSend, DynSync, FromDyn};
+
+ /// Runs a list of blocks in parallel. The first block is executed immediately on
+ /// the current thread. Use that for the longest running block.
+ #[macro_export]
+ macro_rules! parallel {
+ (impl $fblock:block [$($c:expr,)*] [$block:expr $(, $rest:expr)*]) => {
+ parallel!(impl $fblock [$block, $($c,)*] [$($rest),*])
+ };
+ (impl $fblock:block [$($blocks:expr,)*] []) => {
+ ::rustc_data_structures::sync::scope(|s| {
+ $(let block = rustc_data_structures::sync::FromDyn::from(|| $blocks);
+ s.spawn(move |_| block.into_inner()());)*
+ (|| $fblock)();
+ });
+ };
+ ($fblock:block, $($blocks:block),*) => {
+ if rustc_data_structures::sync::is_dyn_thread_safe() {
+ // Reverse the order of the later blocks since Rayon executes them in reverse order
+ // when using a single thread. This ensures the execution order matches that
+ // of a single threaded rustc.
+ parallel!(impl $fblock [] [$($blocks),*]);
+ } else {
+ $crate::sync::parallel_guard(|guard| {
+ guard.run(|| $fblock);
+ $(guard.run(|| $blocks);)*
+ });
+ }
+ };
+ }
+
+ // This function only works when `mode::is_dyn_thread_safe()`.
+ pub fn scope<'scope, OP, R>(op: OP) -> R
+ where
+ OP: FnOnce(&rayon::Scope<'scope>) -> R + DynSend,
+ R: DynSend,
+ {
+ let op = FromDyn::from(op);
+ rayon::scope(|s| FromDyn::from(op.into_inner()(s))).into_inner()
+ }
+
+ #[inline]
+ pub fn join<A, B, RA: DynSend, RB: DynSend>(oper_a: A, oper_b: B) -> (RA, RB)
+ where
+ A: FnOnce() -> RA + DynSend,
+ B: FnOnce() -> RB + DynSend,
+ {
+ if mode::is_dyn_thread_safe() {
+ let oper_a = FromDyn::from(oper_a);
+ let oper_b = FromDyn::from(oper_b);
+ let (a, b) = rayon::join(
+ move || FromDyn::from(oper_a.into_inner()()),
+ move || FromDyn::from(oper_b.into_inner()()),
+ );
+ (a.into_inner(), b.into_inner())
+ } else {
+ super::disabled::join(oper_a, oper_b)
+ }
+ }
+
+ use rayon::iter::{FromParallelIterator, IntoParallelIterator, ParallelIterator};
+
+ pub fn par_for_each_in<I, T: IntoIterator<Item = I> + IntoParallelIterator<Item = I>>(
+ t: T,
+ for_each: impl Fn(I) + DynSync + DynSend,
+ ) {
+ parallel_guard(|guard| {
+ if mode::is_dyn_thread_safe() {
+ let for_each = FromDyn::from(for_each);
+ t.into_par_iter().for_each(|i| {
+ guard.run(|| for_each(i));
+ });
+ } else {
+ t.into_iter().for_each(|i| {
+ guard.run(|| for_each(i));
+ });
+ }
+ });
+ }
+
+ pub fn par_map<
+ I,
+ T: IntoIterator<Item = I> + IntoParallelIterator<Item = I>,
+ R: std::marker::Send,
+ C: FromIterator<R> + FromParallelIterator<R>,
+ >(
+ t: T,
+ map: impl Fn(I) -> R + DynSync + DynSend,
+ ) -> C {
+ parallel_guard(|guard| {
+ if mode::is_dyn_thread_safe() {
+ let map = FromDyn::from(map);
+ t.into_par_iter().filter_map(|i| guard.run(|| map(i))).collect()
+ } else {
+ t.into_iter().filter_map(|i| guard.run(|| map(i))).collect()
+ }
+ })
+ }
+}
diff --git a/compiler/rustc_data_structures/src/sync/worker_local.rs b/compiler/rustc_data_structures/src/sync/worker_local.rs
index 8c84daf4f..ffafdba13 100644
--- a/compiler/rustc_data_structures/src/sync/worker_local.rs
+++ b/compiler/rustc_data_structures/src/sync/worker_local.rs
@@ -1,4 +1,4 @@
-use crate::sync::Lock;
+use parking_lot::Mutex;
use std::cell::Cell;
use std::cell::OnceCell;
use std::ops::Deref;
@@ -6,7 +6,7 @@ use std::ptr;
use std::sync::Arc;
#[cfg(parallel_compiler)]
-use {crate::cold_path, crate::sync::CacheAligned};
+use {crate::outline, crate::sync::CacheAligned};
/// A pointer to the `RegistryData` which uniquely identifies a registry.
/// This identifier can be reused if the registry gets freed.
@@ -25,17 +25,13 @@ impl RegistryId {
fn verify(self) -> usize {
let (id, index) = THREAD_DATA.with(|data| (data.registry_id.get(), data.index.get()));
- if id == self {
- index
- } else {
- cold_path(|| panic!("Unable to verify registry association"))
- }
+ if id == self { index } else { outline(|| panic!("Unable to verify registry association")) }
}
}
struct RegistryData {
thread_limit: usize,
- threads: Lock<usize>,
+ threads: Mutex<usize>,
}
/// Represents a list of threads which can access worker locals.
@@ -65,7 +61,7 @@ thread_local! {
impl Registry {
/// Creates a registry which can hold up to `thread_limit` threads.
pub fn new(thread_limit: usize) -> Self {
- Registry(Arc::new(RegistryData { thread_limit, threads: Lock::new(0) }))
+ Registry(Arc::new(RegistryData { thread_limit, threads: Mutex::new(0) }))
}
/// Gets the registry associated with the current thread. Panics if there's no such registry.
@@ -171,3 +167,9 @@ impl<T> Deref for WorkerLocal<T> {
unsafe { &self.locals.get_unchecked(self.registry.id().verify()).0 }
}
}
+
+impl<T: Default> Default for WorkerLocal<T> {
+ fn default() -> Self {
+ WorkerLocal::new(|_| T::default())
+ }
+}