diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-30 18:31:44 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-30 18:31:44 +0000 |
commit | c23a457e72abe608715ac76f076f47dc42af07a5 (patch) | |
tree | 2772049aaf84b5c9d0ed12ec8d86812f7a7904b6 /compiler/rustc_data_structures | |
parent | Releasing progress-linux version 1.73.0+dfsg1-1~progress7.99u1. (diff) | |
download | rustc-c23a457e72abe608715ac76f076f47dc42af07a5.tar.xz rustc-c23a457e72abe608715ac76f076f47dc42af07a5.zip |
Merging upstream version 1.74.1+dfsg1.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'compiler/rustc_data_structures')
-rw-r--r-- | compiler/rustc_data_structures/src/flock/unix.rs | 24 | ||||
-rw-r--r-- | compiler/rustc_data_structures/src/graph/dominators/mod.rs | 6 | ||||
-rw-r--r-- | compiler/rustc_data_structures/src/graph/implementation/mod.rs | 23 | ||||
-rw-r--r-- | compiler/rustc_data_structures/src/lib.rs | 24 | ||||
-rw-r--r-- | compiler/rustc_data_structures/src/marker.rs | 2 | ||||
-rw-r--r-- | compiler/rustc_data_structures/src/memmap.rs | 7 | ||||
-rw-r--r-- | compiler/rustc_data_structures/src/profiling.rs | 4 | ||||
-rw-r--r-- | compiler/rustc_data_structures/src/sharded.rs | 159 | ||||
-rw-r--r-- | compiler/rustc_data_structures/src/small_c_str.rs | 6 | ||||
-rw-r--r-- | compiler/rustc_data_structures/src/sync.rs | 338 | ||||
-rw-r--r-- | compiler/rustc_data_structures/src/sync/freeze.rs | 200 | ||||
-rw-r--r-- | compiler/rustc_data_structures/src/sync/lock.rs | 275 | ||||
-rw-r--r-- | compiler/rustc_data_structures/src/sync/parallel.rs | 188 | ||||
-rw-r--r-- | compiler/rustc_data_structures/src/sync/worker_local.rs | 20 |
14 files changed, 875 insertions, 401 deletions
diff --git a/compiler/rustc_data_structures/src/flock/unix.rs b/compiler/rustc_data_structures/src/flock/unix.rs index 4e5297d58..eff9e8f83 100644 --- a/compiler/rustc_data_structures/src/flock/unix.rs +++ b/compiler/rustc_data_structures/src/flock/unix.rs @@ -21,8 +21,16 @@ impl Lock { let lock_type = if exclusive { libc::F_WRLCK } else { libc::F_RDLCK }; let mut flock: libc::flock = unsafe { mem::zeroed() }; - flock.l_type = lock_type as libc::c_short; - flock.l_whence = libc::SEEK_SET as libc::c_short; + #[cfg(not(all(target_os = "hurd", target_arch = "x86")))] + { + flock.l_type = lock_type as libc::c_short; + flock.l_whence = libc::SEEK_SET as libc::c_short; + } + #[cfg(all(target_os = "hurd", target_arch = "x86"))] + { + flock.l_type = lock_type as libc::c_int; + flock.l_whence = libc::SEEK_SET as libc::c_int; + } flock.l_start = 0; flock.l_len = 0; @@ -39,8 +47,16 @@ impl Lock { impl Drop for Lock { fn drop(&mut self) { let mut flock: libc::flock = unsafe { mem::zeroed() }; - flock.l_type = libc::F_UNLCK as libc::c_short; - flock.l_whence = libc::SEEK_SET as libc::c_short; + #[cfg(not(all(target_os = "hurd", target_arch = "x86")))] + { + flock.l_type = libc::F_UNLCK as libc::c_short; + flock.l_whence = libc::SEEK_SET as libc::c_short; + } + #[cfg(all(target_os = "hurd", target_arch = "x86"))] + { + flock.l_type = libc::F_UNLCK as libc::c_int; + flock.l_whence = libc::SEEK_SET as libc::c_int; + } flock.l_start = 0; flock.l_len = 0; diff --git a/compiler/rustc_data_structures/src/graph/dominators/mod.rs b/compiler/rustc_data_structures/src/graph/dominators/mod.rs index 85ef2de9b..4075481e5 100644 --- a/compiler/rustc_data_structures/src/graph/dominators/mod.rs +++ b/compiler/rustc_data_structures/src/graph/dominators/mod.rs @@ -51,7 +51,7 @@ pub fn dominators<G: ControlFlowGraph>(graph: &G) -> Dominators<G::Node> { // Traverse the graph, collecting a number of things: // // * Preorder mapping (to it, and back to the actual ordering) - // * Postorder mapping (used exclusively for rank_partial_cmp on the final product) + // * Postorder mapping (used exclusively for `cmp_in_dominator_order` on the final product) // * Parents for each vertex in the preorder tree // // These are all done here rather than through one of the 'standard' @@ -342,8 +342,8 @@ impl<Node: Idx> Dominators<Node> { /// relationship, the dominator will always precede the dominated. (The relative ordering /// of two unrelated nodes will also be consistent, but otherwise the order has no /// meaning.) This method cannot be used to determine if either Node dominates the other. - pub fn rank_partial_cmp(&self, lhs: Node, rhs: Node) -> Option<Ordering> { - self.post_order_rank[rhs].partial_cmp(&self.post_order_rank[lhs]) + pub fn cmp_in_dominator_order(&self, lhs: Node, rhs: Node) -> Ordering { + self.post_order_rank[rhs].cmp(&self.post_order_rank[lhs]) } /// Returns true if `a` dominates `b`. diff --git a/compiler/rustc_data_structures/src/graph/implementation/mod.rs b/compiler/rustc_data_structures/src/graph/implementation/mod.rs index 9ff401c3c..3910c6fa4 100644 --- a/compiler/rustc_data_structures/src/graph/implementation/mod.rs +++ b/compiler/rustc_data_structures/src/graph/implementation/mod.rs @@ -20,7 +20,6 @@ //! the field `next_edge`). Each of those fields is an array that should //! be indexed by the direction (see the type `Direction`). -use crate::snapshot_vec::{SnapshotVec, SnapshotVecDelegate}; use rustc_index::bit_set::BitSet; use std::fmt::Debug; @@ -28,8 +27,8 @@ use std::fmt::Debug; mod tests; pub struct Graph<N, E> { - nodes: SnapshotVec<Node<N>>, - edges: SnapshotVec<Edge<E>>, + nodes: Vec<Node<N>>, + edges: Vec<Edge<E>>, } pub struct Node<N> { @@ -45,20 +44,6 @@ pub struct Edge<E> { pub data: E, } -impl<N> SnapshotVecDelegate for Node<N> { - type Value = Node<N>; - type Undo = (); - - fn reverse(_: &mut Vec<Node<N>>, _: ()) {} -} - -impl<N> SnapshotVecDelegate for Edge<N> { - type Value = Edge<N>; - type Undo = (); - - fn reverse(_: &mut Vec<Edge<N>>, _: ()) {} -} - #[derive(Copy, Clone, PartialEq, Debug)] pub struct NodeIndex(pub usize); @@ -86,11 +71,11 @@ impl NodeIndex { impl<N: Debug, E: Debug> Graph<N, E> { pub fn new() -> Graph<N, E> { - Graph { nodes: SnapshotVec::new(), edges: SnapshotVec::new() } + Graph { nodes: Vec::new(), edges: Vec::new() } } pub fn with_capacity(nodes: usize, edges: usize) -> Graph<N, E> { - Graph { nodes: SnapshotVec::with_capacity(nodes), edges: SnapshotVec::with_capacity(edges) } + Graph { nodes: Vec::with_capacity(nodes), edges: Vec::with_capacity(edges) } } // # Simple accessors diff --git a/compiler/rustc_data_structures/src/lib.rs b/compiler/rustc_data_structures/src/lib.rs index 337720897..461ec3a90 100644 --- a/compiler/rustc_data_structures/src/lib.rs +++ b/compiler/rustc_data_structures/src/lib.rs @@ -37,7 +37,7 @@ #![allow(rustc::potential_query_instability)] #![deny(rustc::untranslatable_diagnostic)] #![deny(rustc::diagnostic_outside_of_impl)] -#![cfg_attr(not(bootstrap), allow(internal_features))] +#![allow(internal_features)] #![deny(unsafe_op_in_unsafe_fn)] #[macro_use] @@ -47,11 +47,14 @@ extern crate cfg_if; #[macro_use] extern crate rustc_macros; +use std::fmt; + pub use rustc_index::static_assert_size; +/// This calls the passed function while ensuring it won't be inlined into the caller. #[inline(never)] #[cold] -pub fn cold_path<F: FnOnce() -> R, R>(f: F) -> R { +pub fn outline<F: FnOnce() -> R, R>(f: F) -> R { f() } @@ -126,6 +129,23 @@ impl<F: FnOnce()> Drop for OnDrop<F> { } } +/// Turns a closure that takes an `&mut Formatter` into something that can be display-formatted. +pub fn make_display(f: impl Fn(&mut fmt::Formatter<'_>) -> fmt::Result) -> impl fmt::Display { + struct Printer<F> { + f: F, + } + impl<F> fmt::Display for Printer<F> + where + F: Fn(&mut fmt::Formatter<'_>) -> fmt::Result, + { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + (self.f)(fmt) + } + } + + Printer { f } +} + // See comments in src/librustc_middle/lib.rs #[doc(hidden)] pub fn __noop_fix_for_27438() {} diff --git a/compiler/rustc_data_structures/src/marker.rs b/compiler/rustc_data_structures/src/marker.rs index f8c06f9a8..b067f9d45 100644 --- a/compiler/rustc_data_structures/src/marker.rs +++ b/compiler/rustc_data_structures/src/marker.rs @@ -92,7 +92,6 @@ cfg_if!( [std::collections::BTreeMap<K, V, A> where K: DynSend, V: DynSend, A: std::alloc::Allocator + Clone + DynSend] [Vec<T, A> where T: DynSend, A: std::alloc::Allocator + DynSend] [Box<T, A> where T: ?Sized + DynSend, A: std::alloc::Allocator + DynSend] - [crate::sync::Lock<T> where T: DynSend] [crate::sync::RwLock<T> where T: DynSend] [crate::tagged_ptr::CopyTaggedPtr<P, T, CP> where P: Send + crate::tagged_ptr::Pointer, T: Send + crate::tagged_ptr::Tag, const CP: bool] [rustc_arena::TypedArena<T> where T: DynSend] @@ -171,7 +170,6 @@ cfg_if!( [std::collections::BTreeMap<K, V, A> where K: DynSync, V: DynSync, A: std::alloc::Allocator + Clone + DynSync] [Vec<T, A> where T: DynSync, A: std::alloc::Allocator + DynSync] [Box<T, A> where T: ?Sized + DynSync, A: std::alloc::Allocator + DynSync] - [crate::sync::Lock<T> where T: DynSend] [crate::sync::RwLock<T> where T: DynSend + DynSync] [crate::sync::OneThread<T> where T] [crate::sync::WorkerLocal<T> where T: DynSend] diff --git a/compiler/rustc_data_structures/src/memmap.rs b/compiler/rustc_data_structures/src/memmap.rs index ca908671a..30403a614 100644 --- a/compiler/rustc_data_structures/src/memmap.rs +++ b/compiler/rustc_data_structures/src/memmap.rs @@ -11,9 +11,14 @@ pub struct Mmap(Vec<u8>); #[cfg(not(target_arch = "wasm32"))] impl Mmap { + /// # Safety + /// + /// The given file must not be mutated (i.e., not written, not truncated, ...) until the mapping is closed. + /// + /// However in practice most callers do not ensure this, so uses of this function are likely unsound. #[inline] pub unsafe fn map(file: File) -> io::Result<Self> { - // Safety: this is in fact not safe. + // Safety: the caller must ensure that this is safe. unsafe { memmap2::Mmap::map(&file).map(Mmap) } } } diff --git a/compiler/rustc_data_structures/src/profiling.rs b/compiler/rustc_data_structures/src/profiling.rs index 3c76c2b79..e688feb5f 100644 --- a/compiler/rustc_data_structures/src/profiling.rs +++ b/compiler/rustc_data_structures/src/profiling.rs @@ -81,8 +81,8 @@ //! //! [mm]: https://github.com/rust-lang/measureme/ -use crate::cold_path; use crate::fx::FxHashMap; +use crate::outline; use std::borrow::Borrow; use std::collections::hash_map::Entry; @@ -697,7 +697,7 @@ impl<'a> TimingGuard<'a> { #[inline] pub fn finish_with_query_invocation_id(self, query_invocation_id: QueryInvocationId) { if let Some(guard) = self.0 { - cold_path(|| { + outline(|| { let event_id = StringId::new_virtual(query_invocation_id.0); let event_id = EventId::from_virtual(event_id); guard.finish_with_override_event_id(event_id); diff --git a/compiler/rustc_data_structures/src/sharded.rs b/compiler/rustc_data_structures/src/sharded.rs index 40cbf1495..29516fffd 100644 --- a/compiler/rustc_data_structures/src/sharded.rs +++ b/compiler/rustc_data_structures/src/sharded.rs @@ -1,31 +1,29 @@ use crate::fx::{FxHashMap, FxHasher}; #[cfg(parallel_compiler)] -use crate::sync::is_dyn_thread_safe; -use crate::sync::{CacheAligned, Lock, LockGuard}; +use crate::sync::{is_dyn_thread_safe, CacheAligned}; +use crate::sync::{Lock, LockGuard, Mode}; +#[cfg(parallel_compiler)] +use itertools::Either; use std::borrow::Borrow; use std::collections::hash_map::RawEntryMut; use std::hash::{Hash, Hasher}; +use std::iter; use std::mem; -#[cfg(parallel_compiler)] // 32 shards is sufficient to reduce contention on an 8-core Ryzen 7 1700, // but this should be tested on higher core count CPUs. How the `Sharded` type gets used // may also affect the ideal number of shards. const SHARD_BITS: usize = 5; -#[cfg(not(parallel_compiler))] -const SHARD_BITS: usize = 0; - -pub const SHARDS: usize = 1 << SHARD_BITS; +#[cfg(parallel_compiler)] +const SHARDS: usize = 1 << SHARD_BITS; /// An array of cache-line aligned inner locked structures with convenience methods. -pub struct Sharded<T> { - /// This mask is used to ensure that accesses are inbounds of `shards`. - /// When dynamic thread safety is off, this field is set to 0 causing only - /// a single shard to be used for greater cache efficiency. +/// A single field is used when the compiler uses only one thread. +pub enum Sharded<T> { + Single(Lock<T>), #[cfg(parallel_compiler)] - mask: usize, - shards: [CacheAligned<Lock<T>>; SHARDS], + Shards(Box<[CacheAligned<Lock<T>>; SHARDS]>), } impl<T: Default> Default for Sharded<T> { @@ -38,62 +36,133 @@ impl<T: Default> Default for Sharded<T> { impl<T> Sharded<T> { #[inline] pub fn new(mut value: impl FnMut() -> T) -> Self { - Sharded { - #[cfg(parallel_compiler)] - mask: if is_dyn_thread_safe() { SHARDS - 1 } else { 0 }, - shards: [(); SHARDS].map(|()| CacheAligned(Lock::new(value()))), + #[cfg(parallel_compiler)] + if is_dyn_thread_safe() { + return Sharded::Shards(Box::new( + [(); SHARDS].map(|()| CacheAligned(Lock::new(value()))), + )); } + + Sharded::Single(Lock::new(value())) } - #[inline(always)] - fn mask(&self) -> usize { - #[cfg(parallel_compiler)] - { - if SHARDS == 1 { 0 } else { self.mask } - } - #[cfg(not(parallel_compiler))] - { - 0 + /// The shard is selected by hashing `val` with `FxHasher`. + #[inline] + pub fn get_shard_by_value<K: Hash + ?Sized>(&self, _val: &K) -> &Lock<T> { + match self { + Self::Single(single) => &single, + #[cfg(parallel_compiler)] + Self::Shards(..) => self.get_shard_by_hash(make_hash(_val)), } } - #[inline(always)] - fn count(&self) -> usize { - // `self.mask` is always one below the used shard count - self.mask() + 1 + #[inline] + pub fn get_shard_by_hash(&self, hash: u64) -> &Lock<T> { + self.get_shard_by_index(get_shard_hash(hash)) + } + + #[inline] + pub fn get_shard_by_index(&self, _i: usize) -> &Lock<T> { + match self { + Self::Single(single) => &single, + #[cfg(parallel_compiler)] + Self::Shards(shards) => { + // SAFETY: The index gets ANDed with the shard mask, ensuring it is always inbounds. + unsafe { &shards.get_unchecked(_i & (SHARDS - 1)).0 } + } + } } /// The shard is selected by hashing `val` with `FxHasher`. #[inline] - pub fn get_shard_by_value<K: Hash + ?Sized>(&self, val: &K) -> &Lock<T> { - self.get_shard_by_hash(if SHARDS == 1 { 0 } else { make_hash(val) }) + #[track_caller] + pub fn lock_shard_by_value<K: Hash + ?Sized>(&self, _val: &K) -> LockGuard<'_, T> { + match self { + Self::Single(single) => { + // Syncronization is disabled so use the `lock_assume_no_sync` method optimized + // for that case. + + // SAFETY: We know `is_dyn_thread_safe` was false when creating the lock thus + // `might_be_dyn_thread_safe` was also false. + unsafe { single.lock_assume(Mode::NoSync) } + } + #[cfg(parallel_compiler)] + Self::Shards(..) => self.lock_shard_by_hash(make_hash(_val)), + } + } + + #[inline] + #[track_caller] + pub fn lock_shard_by_hash(&self, hash: u64) -> LockGuard<'_, T> { + self.lock_shard_by_index(get_shard_hash(hash)) } #[inline] - pub fn get_shard_by_hash(&self, hash: u64) -> &Lock<T> { - self.get_shard_by_index(get_shard_hash(hash)) + #[track_caller] + pub fn lock_shard_by_index(&self, _i: usize) -> LockGuard<'_, T> { + match self { + Self::Single(single) => { + // Syncronization is disabled so use the `lock_assume_no_sync` method optimized + // for that case. + + // SAFETY: We know `is_dyn_thread_safe` was false when creating the lock thus + // `might_be_dyn_thread_safe` was also false. + unsafe { single.lock_assume(Mode::NoSync) } + } + #[cfg(parallel_compiler)] + Self::Shards(shards) => { + // Syncronization is enabled so use the `lock_assume_sync` method optimized + // for that case. + + // SAFETY (get_unchecked): The index gets ANDed with the shard mask, ensuring it is + // always inbounds. + // SAFETY (lock_assume_sync): We know `is_dyn_thread_safe` was true when creating + // the lock thus `might_be_dyn_thread_safe` was also true. + unsafe { shards.get_unchecked(_i & (SHARDS - 1)).0.lock_assume(Mode::Sync) } + } + } } #[inline] - pub fn get_shard_by_index(&self, i: usize) -> &Lock<T> { - // SAFETY: The index get ANDed with the mask, ensuring it is always inbounds. - unsafe { &self.shards.get_unchecked(i & self.mask()).0 } + pub fn lock_shards(&self) -> impl Iterator<Item = LockGuard<'_, T>> { + match self { + #[cfg(not(parallel_compiler))] + Self::Single(single) => iter::once(single.lock()), + #[cfg(parallel_compiler)] + Self::Single(single) => Either::Left(iter::once(single.lock())), + #[cfg(parallel_compiler)] + Self::Shards(shards) => Either::Right(shards.iter().map(|shard| shard.0.lock())), + } } - pub fn lock_shards(&self) -> Vec<LockGuard<'_, T>> { - (0..self.count()).map(|i| self.get_shard_by_index(i).lock()).collect() + #[inline] + pub fn try_lock_shards(&self) -> impl Iterator<Item = Option<LockGuard<'_, T>>> { + match self { + #[cfg(not(parallel_compiler))] + Self::Single(single) => iter::once(single.try_lock()), + #[cfg(parallel_compiler)] + Self::Single(single) => Either::Left(iter::once(single.try_lock())), + #[cfg(parallel_compiler)] + Self::Shards(shards) => Either::Right(shards.iter().map(|shard| shard.0.try_lock())), + } } +} - pub fn try_lock_shards(&self) -> Option<Vec<LockGuard<'_, T>>> { - (0..self.count()).map(|i| self.get_shard_by_index(i).try_lock()).collect() +#[inline] +pub fn shards() -> usize { + #[cfg(parallel_compiler)] + if is_dyn_thread_safe() { + return SHARDS; } + + 1 } pub type ShardedHashMap<K, V> = Sharded<FxHashMap<K, V>>; impl<K: Eq, V> ShardedHashMap<K, V> { pub fn len(&self) -> usize { - self.lock_shards().iter().map(|shard| shard.len()).sum() + self.lock_shards().map(|shard| shard.len()).sum() } } @@ -105,7 +174,7 @@ impl<K: Eq + Hash + Copy> ShardedHashMap<K, ()> { Q: Hash + Eq, { let hash = make_hash(value); - let mut shard = self.get_shard_by_hash(hash).lock(); + let mut shard = self.lock_shard_by_hash(hash); let entry = shard.raw_entry_mut().from_key_hashed_nocheck(hash, value); match entry { @@ -125,7 +194,7 @@ impl<K: Eq + Hash + Copy> ShardedHashMap<K, ()> { Q: Hash + Eq, { let hash = make_hash(&value); - let mut shard = self.get_shard_by_hash(hash).lock(); + let mut shard = self.lock_shard_by_hash(hash); let entry = shard.raw_entry_mut().from_key_hashed_nocheck(hash, &value); match entry { @@ -147,7 +216,7 @@ pub trait IntoPointer { impl<K: Eq + Hash + Copy + IntoPointer> ShardedHashMap<K, ()> { pub fn contains_pointer_to<T: Hash + IntoPointer>(&self, value: &T) -> bool { let hash = make_hash(&value); - let shard = self.get_shard_by_hash(hash).lock(); + let shard = self.lock_shard_by_hash(hash); let value = value.into_pointer(); shard.raw_entry().from_hash(hash, |entry| entry.into_pointer() == value).is_some() } diff --git a/compiler/rustc_data_structures/src/small_c_str.rs b/compiler/rustc_data_structures/src/small_c_str.rs index 719e4e3d9..349fd7f97 100644 --- a/compiler/rustc_data_structures/src/small_c_str.rs +++ b/compiler/rustc_data_structures/src/small_c_str.rs @@ -79,3 +79,9 @@ impl<'a> FromIterator<&'a str> for SmallCStr { Self { data } } } + +impl From<&ffi::CStr> for SmallCStr { + fn from(s: &ffi::CStr) -> Self { + Self { data: SmallVec::from_slice(s.to_bytes()) } + } +} diff --git a/compiler/rustc_data_structures/src/sync.rs b/compiler/rustc_data_structures/src/sync.rs index 25a082373..cca043ba0 100644 --- a/compiler/rustc_data_structures/src/sync.rs +++ b/compiler/rustc_data_structures/src/sync.rs @@ -26,7 +26,8 @@ //! | `AtomicU64` | `Cell<u64>` | `atomic::AtomicU64` | //! | `AtomicUsize` | `Cell<usize>` | `atomic::AtomicUsize` | //! | | | | -//! | `Lock<T>` | `RefCell<T>` | `parking_lot::Mutex<T>` | +//! | `Lock<T>` | `RefCell<T>` | `RefCell<T>` or | +//! | | | `parking_lot::Mutex<T>` | //! | `RwLock<T>` | `RefCell<T>` | `parking_lot::RwLock<T>` | //! | `MTLock<T>` [^1] | `T` | `Lock<T>` | //! | `MTLockRef<'a, T>` [^2] | `&'a mut MTLock<T>` | `&'a MTLock<T>` | @@ -43,11 +44,18 @@ pub use crate::marker::*; use std::collections::HashMap; use std::hash::{BuildHasher, Hash}; use std::ops::{Deref, DerefMut}; -use std::panic::{catch_unwind, resume_unwind, AssertUnwindSafe}; + +mod lock; +pub use lock::{Lock, LockGuard, Mode}; mod worker_local; pub use worker_local::{Registry, WorkerLocal}; +mod parallel; +#[cfg(parallel_compiler)] +pub use parallel::scope; +pub use parallel::{join, par_for_each_in, par_map, parallel_guard}; + pub use std::sync::atomic::Ordering; pub use std::sync::atomic::Ordering::SeqCst; @@ -55,6 +63,9 @@ pub use vec::{AppendOnlyIndexVec, AppendOnlyVec}; mod vec; +mod freeze; +pub use freeze::{FreezeLock, FreezeReadGuard, FreezeWriteGuard}; + mod mode { use super::Ordering; use std::sync::atomic::AtomicU8; @@ -75,6 +86,12 @@ mod mode { } } + // Whether thread safety might be enabled. + #[inline] + pub fn might_be_dyn_thread_safe() -> bool { + DYN_THREAD_SAFE_MODE.load(Ordering::Relaxed) != DYN_NOT_THREAD_SAFE + } + // Only set by the `-Z threads` compile option pub fn set_dyn_thread_safe_mode(mode: bool) { let set: u8 = if mode { DYN_THREAD_SAFE } else { DYN_NOT_THREAD_SAFE }; @@ -94,14 +111,15 @@ pub use mode::{is_dyn_thread_safe, set_dyn_thread_safe_mode}; cfg_if! { if #[cfg(not(parallel_compiler))] { + use std::ops::Add; + use std::cell::Cell; + pub unsafe auto trait Send {} pub unsafe auto trait Sync {} unsafe impl<T> Send for T {} unsafe impl<T> Sync for T {} - use std::ops::Add; - /// This is a single threaded variant of `AtomicU64`, `AtomicUsize`, etc. /// It has explicit ordering arguments and is only intended for use with /// the native atomic types. @@ -182,88 +200,17 @@ cfg_if! { pub type AtomicU32 = Atomic<u32>; pub type AtomicU64 = Atomic<u64>; - pub fn join<A, B, RA, RB>(oper_a: A, oper_b: B) -> (RA, RB) - where A: FnOnce() -> RA, - B: FnOnce() -> RB - { - (oper_a(), oper_b()) - } - - #[macro_export] - macro_rules! parallel { - ($($blocks:block),*) => { - // We catch panics here ensuring that all the blocks execute. - // This makes behavior consistent with the parallel compiler. - let mut panic = None; - $( - if let Err(p) = ::std::panic::catch_unwind( - ::std::panic::AssertUnwindSafe(|| $blocks) - ) { - if panic.is_none() { - panic = Some(p); - } - } - )* - if let Some(panic) = panic { - ::std::panic::resume_unwind(panic); - } - } - } - - pub fn par_for_each_in<T: IntoIterator>(t: T, mut for_each: impl FnMut(T::Item) + Sync + Send) { - // We catch panics here ensuring that all the loop iterations execute. - // This makes behavior consistent with the parallel compiler. - let mut panic = None; - t.into_iter().for_each(|i| { - if let Err(p) = catch_unwind(AssertUnwindSafe(|| for_each(i))) { - if panic.is_none() { - panic = Some(p); - } - } - }); - if let Some(panic) = panic { - resume_unwind(panic); - } - } - - pub fn par_map<T: IntoIterator, R, C: FromIterator<R>>( - t: T, - mut map: impl FnMut(<<T as IntoIterator>::IntoIter as Iterator>::Item) -> R, - ) -> C { - // We catch panics here ensuring that all the loop iterations execute. - let mut panic = None; - let r = t.into_iter().filter_map(|i| { - match catch_unwind(AssertUnwindSafe(|| map(i))) { - Ok(r) => Some(r), - Err(p) => { - if panic.is_none() { - panic = Some(p); - } - None - } - } - }).collect(); - if let Some(panic) = panic { - resume_unwind(panic); - } - r - } - pub use std::rc::Rc as Lrc; pub use std::rc::Weak as Weak; pub use std::cell::Ref as ReadGuard; pub use std::cell::Ref as MappedReadGuard; pub use std::cell::RefMut as WriteGuard; pub use std::cell::RefMut as MappedWriteGuard; - pub use std::cell::RefMut as LockGuard; pub use std::cell::RefMut as MappedLockGuard; - pub use std::cell::OnceCell; + pub use std::cell::OnceCell as OnceLock; use std::cell::RefCell as InnerRwLock; - use std::cell::RefCell as InnerLock; - - use std::cell::Cell; pub type MTLockRef<'a, T> = &'a mut MTLock<T>; @@ -313,10 +260,9 @@ cfg_if! { pub use parking_lot::RwLockWriteGuard as WriteGuard; pub use parking_lot::MappedRwLockWriteGuard as MappedWriteGuard; - pub use parking_lot::MutexGuard as LockGuard; pub use parking_lot::MappedMutexGuard as MappedLockGuard; - pub use std::sync::OnceLock as OnceCell; + pub use std::sync::OnceLock; pub use std::sync::atomic::{AtomicBool, AtomicUsize, AtomicU32, AtomicU64}; @@ -355,171 +301,10 @@ cfg_if! { } } - use parking_lot::Mutex as InnerLock; use parking_lot::RwLock as InnerRwLock; use std::thread; - #[inline] - pub fn join<A, B, RA: DynSend, RB: DynSend>(oper_a: A, oper_b: B) -> (RA, RB) - where - A: FnOnce() -> RA + DynSend, - B: FnOnce() -> RB + DynSend, - { - if mode::is_dyn_thread_safe() { - let oper_a = FromDyn::from(oper_a); - let oper_b = FromDyn::from(oper_b); - let (a, b) = rayon::join(move || FromDyn::from(oper_a.into_inner()()), move || FromDyn::from(oper_b.into_inner()())); - (a.into_inner(), b.into_inner()) - } else { - (oper_a(), oper_b()) - } - } - - // This function only works when `mode::is_dyn_thread_safe()`. - pub fn scope<'scope, OP, R>(op: OP) -> R - where - OP: FnOnce(&rayon::Scope<'scope>) -> R + DynSend, - R: DynSend, - { - let op = FromDyn::from(op); - rayon::scope(|s| FromDyn::from(op.into_inner()(s))).into_inner() - } - - /// Runs a list of blocks in parallel. The first block is executed immediately on - /// the current thread. Use that for the longest running block. - #[macro_export] - macro_rules! parallel { - (impl $fblock:block [$($c:expr,)*] [$block:expr $(, $rest:expr)*]) => { - parallel!(impl $fblock [$block, $($c,)*] [$($rest),*]) - }; - (impl $fblock:block [$($blocks:expr,)*] []) => { - ::rustc_data_structures::sync::scope(|s| { - $(let block = rustc_data_structures::sync::FromDyn::from(|| $blocks); - s.spawn(move |_| block.into_inner()());)* - (|| $fblock)(); - }); - }; - ($fblock:block, $($blocks:block),*) => { - if rustc_data_structures::sync::is_dyn_thread_safe() { - // Reverse the order of the later blocks since Rayon executes them in reverse order - // when using a single thread. This ensures the execution order matches that - // of a single threaded rustc. - parallel!(impl $fblock [] [$($blocks),*]); - } else { - // We catch panics here ensuring that all the blocks execute. - // This makes behavior consistent with the parallel compiler. - let mut panic = None; - if let Err(p) = ::std::panic::catch_unwind( - ::std::panic::AssertUnwindSafe(|| $fblock) - ) { - if panic.is_none() { - panic = Some(p); - } - } - $( - if let Err(p) = ::std::panic::catch_unwind( - ::std::panic::AssertUnwindSafe(|| $blocks) - ) { - if panic.is_none() { - panic = Some(p); - } - } - )* - if let Some(panic) = panic { - ::std::panic::resume_unwind(panic); - } - } - }; - } - - use rayon::iter::{FromParallelIterator, IntoParallelIterator, ParallelIterator}; - - pub fn par_for_each_in<I, T: IntoIterator<Item = I> + IntoParallelIterator<Item = I>>( - t: T, - for_each: impl Fn(I) + DynSync + DynSend - ) { - if mode::is_dyn_thread_safe() { - let for_each = FromDyn::from(for_each); - let panic: Lock<Option<_>> = Lock::new(None); - t.into_par_iter().for_each(|i| if let Err(p) = catch_unwind(AssertUnwindSafe(|| for_each(i))) { - let mut l = panic.lock(); - if l.is_none() { - *l = Some(p) - } - }); - - if let Some(panic) = panic.into_inner() { - resume_unwind(panic); - } - } else { - // We catch panics here ensuring that all the loop iterations execute. - // This makes behavior consistent with the parallel compiler. - let mut panic = None; - t.into_iter().for_each(|i| { - if let Err(p) = catch_unwind(AssertUnwindSafe(|| for_each(i))) { - if panic.is_none() { - panic = Some(p); - } - } - }); - if let Some(panic) = panic { - resume_unwind(panic); - } - } - } - - pub fn par_map< - I, - T: IntoIterator<Item = I> + IntoParallelIterator<Item = I>, - R: std::marker::Send, - C: FromIterator<R> + FromParallelIterator<R> - >( - t: T, - map: impl Fn(I) -> R + DynSync + DynSend - ) -> C { - if mode::is_dyn_thread_safe() { - let panic: Lock<Option<_>> = Lock::new(None); - let map = FromDyn::from(map); - // We catch panics here ensuring that all the loop iterations execute. - let r = t.into_par_iter().filter_map(|i| { - match catch_unwind(AssertUnwindSafe(|| map(i))) { - Ok(r) => Some(r), - Err(p) => { - let mut l = panic.lock(); - if l.is_none() { - *l = Some(p); - } - None - }, - } - }).collect(); - - if let Some(panic) = panic.into_inner() { - resume_unwind(panic); - } - r - } else { - // We catch panics here ensuring that all the loop iterations execute. - let mut panic = None; - let r = t.into_iter().filter_map(|i| { - match catch_unwind(AssertUnwindSafe(|| map(i))) { - Ok(r) => Some(r), - Err(p) => { - if panic.is_none() { - panic = Some(p); - } - None - } - } - }).collect(); - if let Some(panic) = panic { - resume_unwind(panic); - } - r - } - } - /// This makes locks panic if they are already held. /// It is only useful when you are running in a single thread const ERROR_CHECKING: bool = false; @@ -542,81 +327,6 @@ impl<K: Eq + Hash, V: Eq, S: BuildHasher> HashMapExt<K, V> for HashMap<K, V, S> } } -#[derive(Debug)] -pub struct Lock<T>(InnerLock<T>); - -impl<T> Lock<T> { - #[inline(always)] - pub fn new(inner: T) -> Self { - Lock(InnerLock::new(inner)) - } - - #[inline(always)] - pub fn into_inner(self) -> T { - self.0.into_inner() - } - - #[inline(always)] - pub fn get_mut(&mut self) -> &mut T { - self.0.get_mut() - } - - #[cfg(parallel_compiler)] - #[inline(always)] - pub fn try_lock(&self) -> Option<LockGuard<'_, T>> { - self.0.try_lock() - } - - #[cfg(not(parallel_compiler))] - #[inline(always)] - pub fn try_lock(&self) -> Option<LockGuard<'_, T>> { - self.0.try_borrow_mut().ok() - } - - #[cfg(parallel_compiler)] - #[inline(always)] - #[track_caller] - pub fn lock(&self) -> LockGuard<'_, T> { - if ERROR_CHECKING { - self.0.try_lock().expect("lock was already held") - } else { - self.0.lock() - } - } - - #[cfg(not(parallel_compiler))] - #[inline(always)] - #[track_caller] - pub fn lock(&self) -> LockGuard<'_, T> { - self.0.borrow_mut() - } - - #[inline(always)] - #[track_caller] - pub fn with_lock<F: FnOnce(&mut T) -> R, R>(&self, f: F) -> R { - f(&mut *self.lock()) - } - - #[inline(always)] - #[track_caller] - pub fn borrow(&self) -> LockGuard<'_, T> { - self.lock() - } - - #[inline(always)] - #[track_caller] - pub fn borrow_mut(&self) -> LockGuard<'_, T> { - self.lock() - } -} - -impl<T: Default> Default for Lock<T> { - #[inline] - fn default() -> Self { - Lock::new(T::default()) - } -} - #[derive(Debug, Default)] pub struct RwLock<T>(InnerRwLock<T>); diff --git a/compiler/rustc_data_structures/src/sync/freeze.rs b/compiler/rustc_data_structures/src/sync/freeze.rs new file mode 100644 index 000000000..466c44f59 --- /dev/null +++ b/compiler/rustc_data_structures/src/sync/freeze.rs @@ -0,0 +1,200 @@ +use crate::sync::{AtomicBool, ReadGuard, RwLock, WriteGuard}; +#[cfg(parallel_compiler)] +use crate::sync::{DynSend, DynSync}; +use std::{ + cell::UnsafeCell, + intrinsics::likely, + marker::PhantomData, + ops::{Deref, DerefMut}, + ptr::NonNull, + sync::atomic::Ordering, +}; + +/// A type which allows mutation using a lock until +/// the value is frozen and can be accessed lock-free. +/// +/// Unlike `RwLock`, it can be used to prevent mutation past a point. +#[derive(Default)] +pub struct FreezeLock<T> { + data: UnsafeCell<T>, + frozen: AtomicBool, + + /// This lock protects writes to the `data` and `frozen` fields. + lock: RwLock<()>, +} + +#[cfg(parallel_compiler)] +unsafe impl<T: DynSync + DynSend> DynSync for FreezeLock<T> {} + +impl<T> FreezeLock<T> { + #[inline] + pub fn new(value: T) -> Self { + Self::with(value, false) + } + + #[inline] + pub fn frozen(value: T) -> Self { + Self::with(value, true) + } + + #[inline] + pub fn with(value: T, frozen: bool) -> Self { + Self { + data: UnsafeCell::new(value), + frozen: AtomicBool::new(frozen), + lock: RwLock::new(()), + } + } + + /// Clones the inner value along with the frozen state. + #[inline] + pub fn clone(&self) -> Self + where + T: Clone, + { + let lock = self.read(); + Self::with(lock.clone(), self.is_frozen()) + } + + #[inline] + pub fn is_frozen(&self) -> bool { + self.frozen.load(Ordering::Acquire) + } + + /// Get the inner value if frozen. + #[inline] + pub fn get(&self) -> Option<&T> { + if likely(self.frozen.load(Ordering::Acquire)) { + // SAFETY: This is frozen so the data cannot be modified. + unsafe { Some(&*self.data.get()) } + } else { + None + } + } + + #[inline] + pub fn read(&self) -> FreezeReadGuard<'_, T> { + FreezeReadGuard { + _lock_guard: if self.frozen.load(Ordering::Acquire) { + None + } else { + Some(self.lock.read()) + }, + data: unsafe { NonNull::new_unchecked(self.data.get()) }, + } + } + + #[inline] + pub fn borrow(&self) -> FreezeReadGuard<'_, T> { + self.read() + } + + #[inline] + #[track_caller] + pub fn write(&self) -> FreezeWriteGuard<'_, T> { + self.try_write().expect("still mutable") + } + + #[inline] + pub fn try_write(&self) -> Option<FreezeWriteGuard<'_, T>> { + let _lock_guard = self.lock.write(); + // Use relaxed ordering since we're in the write lock. + if self.frozen.load(Ordering::Relaxed) { + None + } else { + Some(FreezeWriteGuard { + _lock_guard, + data: unsafe { NonNull::new_unchecked(self.data.get()) }, + frozen: &self.frozen, + marker: PhantomData, + }) + } + } + + #[inline] + pub fn freeze(&self) -> &T { + if !self.frozen.load(Ordering::Acquire) { + // Get the lock to ensure no concurrent writes and that we release the latest write. + let _lock = self.lock.write(); + self.frozen.store(true, Ordering::Release); + } + + // SAFETY: This is frozen so the data cannot be modified and shared access is sound. + unsafe { &*self.data.get() } + } +} + +/// A guard holding shared access to a `FreezeLock` which is in a locked state or frozen. +#[must_use = "if unused the FreezeLock may immediately unlock"] +pub struct FreezeReadGuard<'a, T: ?Sized> { + _lock_guard: Option<ReadGuard<'a, ()>>, + data: NonNull<T>, +} + +impl<'a, T: ?Sized + 'a> Deref for FreezeReadGuard<'a, T> { + type Target = T; + #[inline] + fn deref(&self) -> &T { + // SAFETY: If the lock is not frozen, `_lock_guard` holds the lock to the `UnsafeCell` so + // this has shared access until the `FreezeReadGuard` is dropped. If the lock is frozen, + // the data cannot be modified and shared access is sound. + unsafe { &*self.data.as_ptr() } + } +} + +impl<'a, T: ?Sized> FreezeReadGuard<'a, T> { + #[inline] + pub fn map<U: ?Sized>(this: Self, f: impl FnOnce(&T) -> &U) -> FreezeReadGuard<'a, U> { + FreezeReadGuard { data: NonNull::from(f(&*this)), _lock_guard: this._lock_guard } + } +} + +/// A guard holding mutable access to a `FreezeLock` which is in a locked state or frozen. +#[must_use = "if unused the FreezeLock may immediately unlock"] +pub struct FreezeWriteGuard<'a, T: ?Sized> { + _lock_guard: WriteGuard<'a, ()>, + frozen: &'a AtomicBool, + data: NonNull<T>, + marker: PhantomData<&'a mut T>, +} + +impl<'a, T> FreezeWriteGuard<'a, T> { + pub fn freeze(self) -> &'a T { + self.frozen.store(true, Ordering::Release); + + // SAFETY: This is frozen so the data cannot be modified and shared access is sound. + unsafe { &*self.data.as_ptr() } + } +} + +impl<'a, T: ?Sized> FreezeWriteGuard<'a, T> { + #[inline] + pub fn map<U: ?Sized>( + mut this: Self, + f: impl FnOnce(&mut T) -> &mut U, + ) -> FreezeWriteGuard<'a, U> { + FreezeWriteGuard { + data: NonNull::from(f(&mut *this)), + _lock_guard: this._lock_guard, + frozen: this.frozen, + marker: PhantomData, + } + } +} + +impl<'a, T: ?Sized + 'a> Deref for FreezeWriteGuard<'a, T> { + type Target = T; + #[inline] + fn deref(&self) -> &T { + // SAFETY: `self._lock_guard` holds the lock to the `UnsafeCell` so this has shared access. + unsafe { &*self.data.as_ptr() } + } +} + +impl<'a, T: ?Sized + 'a> DerefMut for FreezeWriteGuard<'a, T> { + #[inline] + fn deref_mut(&mut self) -> &mut T { + // SAFETY: `self._lock_guard` holds the lock to the `UnsafeCell` so this has mutable access. + unsafe { &mut *self.data.as_ptr() } + } +} diff --git a/compiler/rustc_data_structures/src/sync/lock.rs b/compiler/rustc_data_structures/src/sync/lock.rs new file mode 100644 index 000000000..339aebbf8 --- /dev/null +++ b/compiler/rustc_data_structures/src/sync/lock.rs @@ -0,0 +1,275 @@ +//! This module implements a lock which only uses synchronization if `might_be_dyn_thread_safe` is true. +//! It implements `DynSend` and `DynSync` instead of the typical `Send` and `Sync` traits. +//! +//! When `cfg(parallel_compiler)` is not set, the lock is instead a wrapper around `RefCell`. + +#![allow(dead_code)] + +use std::fmt; + +#[cfg(parallel_compiler)] +pub use maybe_sync::*; +#[cfg(not(parallel_compiler))] +pub use no_sync::*; + +#[derive(Clone, Copy, PartialEq)] +pub enum Mode { + NoSync, + Sync, +} + +mod maybe_sync { + use super::Mode; + use crate::sync::mode; + #[cfg(parallel_compiler)] + use crate::sync::{DynSend, DynSync}; + use parking_lot::lock_api::RawMutex as _; + use parking_lot::RawMutex; + use std::cell::Cell; + use std::cell::UnsafeCell; + use std::intrinsics::unlikely; + use std::marker::PhantomData; + use std::mem::ManuallyDrop; + use std::ops::{Deref, DerefMut}; + + /// A guard holding mutable access to a `Lock` which is in a locked state. + #[must_use = "if unused the Lock will immediately unlock"] + pub struct LockGuard<'a, T> { + lock: &'a Lock<T>, + marker: PhantomData<&'a mut T>, + + /// The syncronization mode of the lock. This is explicitly passed to let LLVM relate it + /// to the original lock operation. + mode: Mode, + } + + impl<'a, T: 'a> Deref for LockGuard<'a, T> { + type Target = T; + #[inline] + fn deref(&self) -> &T { + // SAFETY: We have shared access to the mutable access owned by this type, + // so we can give out a shared reference. + unsafe { &*self.lock.data.get() } + } + } + + impl<'a, T: 'a> DerefMut for LockGuard<'a, T> { + #[inline] + fn deref_mut(&mut self) -> &mut T { + // SAFETY: We have mutable access to the data so we can give out a mutable reference. + unsafe { &mut *self.lock.data.get() } + } + } + + impl<'a, T: 'a> Drop for LockGuard<'a, T> { + #[inline] + fn drop(&mut self) { + // SAFETY (union access): We get `self.mode` from the lock operation so it is consistent + // with the `lock.mode` state. This means we access the right union fields. + match self.mode { + Mode::NoSync => { + let cell = unsafe { &self.lock.mode_union.no_sync }; + debug_assert_eq!(cell.get(), true); + cell.set(false); + } + // SAFETY (unlock): We know that the lock is locked as this type is a proof of that. + Mode::Sync => unsafe { self.lock.mode_union.sync.unlock() }, + } + } + } + + union ModeUnion { + /// Indicates if the cell is locked. Only used if `Lock.mode` is `NoSync`. + no_sync: ManuallyDrop<Cell<bool>>, + + /// A lock implementation that's only used if `Lock.mode` is `Sync`. + sync: ManuallyDrop<RawMutex>, + } + + /// The value representing a locked state for the `Cell`. + const LOCKED: bool = true; + + /// A lock which only uses synchronization if `might_be_dyn_thread_safe` is true. + /// It implements `DynSend` and `DynSync` instead of the typical `Send` and `Sync`. + pub struct Lock<T> { + /// Indicates if synchronization is used via `mode_union.sync` if it's `Sync`, or if a + /// not thread safe cell is used via `mode_union.no_sync` if it's `NoSync`. + /// This is set on initialization and never changed. + mode: Mode, + + mode_union: ModeUnion, + data: UnsafeCell<T>, + } + + impl<T> Lock<T> { + #[inline(always)] + pub fn new(inner: T) -> Self { + let (mode, mode_union) = if unlikely(mode::might_be_dyn_thread_safe()) { + // Create the lock with synchronization enabled using the `RawMutex` type. + (Mode::Sync, ModeUnion { sync: ManuallyDrop::new(RawMutex::INIT) }) + } else { + // Create the lock with synchronization disabled. + (Mode::NoSync, ModeUnion { no_sync: ManuallyDrop::new(Cell::new(!LOCKED)) }) + }; + Lock { mode, mode_union, data: UnsafeCell::new(inner) } + } + + #[inline(always)] + pub fn into_inner(self) -> T { + self.data.into_inner() + } + + #[inline(always)] + pub fn get_mut(&mut self) -> &mut T { + self.data.get_mut() + } + + #[inline(always)] + pub fn try_lock(&self) -> Option<LockGuard<'_, T>> { + let mode = self.mode; + // SAFETY: This is safe since the union fields are used in accordance with `self.mode`. + match mode { + Mode::NoSync => { + let cell = unsafe { &self.mode_union.no_sync }; + let was_unlocked = cell.get() != LOCKED; + if was_unlocked { + cell.set(LOCKED); + } + was_unlocked + } + Mode::Sync => unsafe { self.mode_union.sync.try_lock() }, + } + .then(|| LockGuard { lock: self, marker: PhantomData, mode }) + } + + /// This acquires the lock assuming syncronization is in a specific mode. + /// + /// Safety + /// This method must only be called with `Mode::Sync` if `might_be_dyn_thread_safe` was + /// true on lock creation. + #[inline(always)] + #[track_caller] + pub unsafe fn lock_assume(&self, mode: Mode) -> LockGuard<'_, T> { + #[inline(never)] + #[track_caller] + #[cold] + fn lock_held() -> ! { + panic!("lock was already held") + } + + // SAFETY: This is safe since the union fields are used in accordance with `mode` + // which also must match `self.mode` due to the safety precondition. + unsafe { + match mode { + Mode::NoSync => { + if unlikely(self.mode_union.no_sync.replace(LOCKED) == LOCKED) { + lock_held() + } + } + Mode::Sync => self.mode_union.sync.lock(), + } + } + LockGuard { lock: self, marker: PhantomData, mode } + } + + #[inline(always)] + #[track_caller] + pub fn lock(&self) -> LockGuard<'_, T> { + unsafe { self.lock_assume(self.mode) } + } + } + + #[cfg(parallel_compiler)] + unsafe impl<T: DynSend> DynSend for Lock<T> {} + #[cfg(parallel_compiler)] + unsafe impl<T: DynSend> DynSync for Lock<T> {} +} + +mod no_sync { + use super::Mode; + use std::cell::RefCell; + + pub use std::cell::RefMut as LockGuard; + + pub struct Lock<T>(RefCell<T>); + + impl<T> Lock<T> { + #[inline(always)] + pub fn new(inner: T) -> Self { + Lock(RefCell::new(inner)) + } + + #[inline(always)] + pub fn into_inner(self) -> T { + self.0.into_inner() + } + + #[inline(always)] + pub fn get_mut(&mut self) -> &mut T { + self.0.get_mut() + } + + #[inline(always)] + pub fn try_lock(&self) -> Option<LockGuard<'_, T>> { + self.0.try_borrow_mut().ok() + } + + #[inline(always)] + #[track_caller] + // This is unsafe to match the API for the `parallel_compiler` case. + pub unsafe fn lock_assume(&self, _mode: Mode) -> LockGuard<'_, T> { + self.0.borrow_mut() + } + + #[inline(always)] + #[track_caller] + pub fn lock(&self) -> LockGuard<'_, T> { + self.0.borrow_mut() + } + } +} + +impl<T> Lock<T> { + #[inline(always)] + #[track_caller] + pub fn with_lock<F: FnOnce(&mut T) -> R, R>(&self, f: F) -> R { + f(&mut *self.lock()) + } + + #[inline(always)] + #[track_caller] + pub fn borrow(&self) -> LockGuard<'_, T> { + self.lock() + } + + #[inline(always)] + #[track_caller] + pub fn borrow_mut(&self) -> LockGuard<'_, T> { + self.lock() + } +} + +impl<T: Default> Default for Lock<T> { + #[inline] + fn default() -> Self { + Lock::new(T::default()) + } +} + +impl<T: fmt::Debug> fmt::Debug for Lock<T> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self.try_lock() { + Some(guard) => f.debug_struct("Lock").field("data", &&*guard).finish(), + None => { + struct LockedPlaceholder; + impl fmt::Debug for LockedPlaceholder { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str("<locked>") + } + } + + f.debug_struct("Lock").field("data", &LockedPlaceholder).finish() + } + } + } +} diff --git a/compiler/rustc_data_structures/src/sync/parallel.rs b/compiler/rustc_data_structures/src/sync/parallel.rs new file mode 100644 index 000000000..1944ddfb7 --- /dev/null +++ b/compiler/rustc_data_structures/src/sync/parallel.rs @@ -0,0 +1,188 @@ +//! This module defines parallel operations that are implemented in +//! one way for the serial compiler, and another way the parallel compiler. + +#![allow(dead_code)] + +use parking_lot::Mutex; +use std::any::Any; +use std::panic::{catch_unwind, resume_unwind, AssertUnwindSafe}; + +#[cfg(not(parallel_compiler))] +pub use disabled::*; +#[cfg(parallel_compiler)] +pub use enabled::*; + +/// A guard used to hold panics that occur during a parallel section to later by unwound. +/// This is used for the parallel compiler to prevent fatal errors from non-deterministically +/// hiding errors by ensuring that everything in the section has completed executing before +/// continuing with unwinding. It's also used for the non-parallel code to ensure error message +/// output match the parallel compiler for testing purposes. +pub struct ParallelGuard { + panic: Mutex<Option<Box<dyn Any + Send + 'static>>>, +} + +impl ParallelGuard { + pub fn run<R>(&self, f: impl FnOnce() -> R) -> Option<R> { + catch_unwind(AssertUnwindSafe(f)) + .map_err(|err| { + *self.panic.lock() = Some(err); + }) + .ok() + } +} + +/// This gives access to a fresh parallel guard in the closure and will unwind any panics +/// caught in it after the closure returns. +#[inline] +pub fn parallel_guard<R>(f: impl FnOnce(&ParallelGuard) -> R) -> R { + let guard = ParallelGuard { panic: Mutex::new(None) }; + let ret = f(&guard); + if let Some(panic) = guard.panic.into_inner() { + resume_unwind(panic); + } + ret +} + +mod disabled { + use crate::sync::parallel_guard; + + #[macro_export] + #[cfg(not(parallel_compiler))] + macro_rules! parallel { + ($($blocks:block),*) => {{ + $crate::sync::parallel_guard(|guard| { + $(guard.run(|| $blocks);)* + }); + }} + } + + pub fn join<A, B, RA, RB>(oper_a: A, oper_b: B) -> (RA, RB) + where + A: FnOnce() -> RA, + B: FnOnce() -> RB, + { + let (a, b) = parallel_guard(|guard| { + let a = guard.run(oper_a); + let b = guard.run(oper_b); + (a, b) + }); + (a.unwrap(), b.unwrap()) + } + + pub fn par_for_each_in<T: IntoIterator>(t: T, mut for_each: impl FnMut(T::Item)) { + parallel_guard(|guard| { + t.into_iter().for_each(|i| { + guard.run(|| for_each(i)); + }); + }) + } + + pub fn par_map<T: IntoIterator, R, C: FromIterator<R>>( + t: T, + mut map: impl FnMut(<<T as IntoIterator>::IntoIter as Iterator>::Item) -> R, + ) -> C { + parallel_guard(|guard| t.into_iter().filter_map(|i| guard.run(|| map(i))).collect()) + } +} + +#[cfg(parallel_compiler)] +mod enabled { + use crate::sync::{mode, parallel_guard, DynSend, DynSync, FromDyn}; + + /// Runs a list of blocks in parallel. The first block is executed immediately on + /// the current thread. Use that for the longest running block. + #[macro_export] + macro_rules! parallel { + (impl $fblock:block [$($c:expr,)*] [$block:expr $(, $rest:expr)*]) => { + parallel!(impl $fblock [$block, $($c,)*] [$($rest),*]) + }; + (impl $fblock:block [$($blocks:expr,)*] []) => { + ::rustc_data_structures::sync::scope(|s| { + $(let block = rustc_data_structures::sync::FromDyn::from(|| $blocks); + s.spawn(move |_| block.into_inner()());)* + (|| $fblock)(); + }); + }; + ($fblock:block, $($blocks:block),*) => { + if rustc_data_structures::sync::is_dyn_thread_safe() { + // Reverse the order of the later blocks since Rayon executes them in reverse order + // when using a single thread. This ensures the execution order matches that + // of a single threaded rustc. + parallel!(impl $fblock [] [$($blocks),*]); + } else { + $crate::sync::parallel_guard(|guard| { + guard.run(|| $fblock); + $(guard.run(|| $blocks);)* + }); + } + }; + } + + // This function only works when `mode::is_dyn_thread_safe()`. + pub fn scope<'scope, OP, R>(op: OP) -> R + where + OP: FnOnce(&rayon::Scope<'scope>) -> R + DynSend, + R: DynSend, + { + let op = FromDyn::from(op); + rayon::scope(|s| FromDyn::from(op.into_inner()(s))).into_inner() + } + + #[inline] + pub fn join<A, B, RA: DynSend, RB: DynSend>(oper_a: A, oper_b: B) -> (RA, RB) + where + A: FnOnce() -> RA + DynSend, + B: FnOnce() -> RB + DynSend, + { + if mode::is_dyn_thread_safe() { + let oper_a = FromDyn::from(oper_a); + let oper_b = FromDyn::from(oper_b); + let (a, b) = rayon::join( + move || FromDyn::from(oper_a.into_inner()()), + move || FromDyn::from(oper_b.into_inner()()), + ); + (a.into_inner(), b.into_inner()) + } else { + super::disabled::join(oper_a, oper_b) + } + } + + use rayon::iter::{FromParallelIterator, IntoParallelIterator, ParallelIterator}; + + pub fn par_for_each_in<I, T: IntoIterator<Item = I> + IntoParallelIterator<Item = I>>( + t: T, + for_each: impl Fn(I) + DynSync + DynSend, + ) { + parallel_guard(|guard| { + if mode::is_dyn_thread_safe() { + let for_each = FromDyn::from(for_each); + t.into_par_iter().for_each(|i| { + guard.run(|| for_each(i)); + }); + } else { + t.into_iter().for_each(|i| { + guard.run(|| for_each(i)); + }); + } + }); + } + + pub fn par_map< + I, + T: IntoIterator<Item = I> + IntoParallelIterator<Item = I>, + R: std::marker::Send, + C: FromIterator<R> + FromParallelIterator<R>, + >( + t: T, + map: impl Fn(I) -> R + DynSync + DynSend, + ) -> C { + parallel_guard(|guard| { + if mode::is_dyn_thread_safe() { + let map = FromDyn::from(map); + t.into_par_iter().filter_map(|i| guard.run(|| map(i))).collect() + } else { + t.into_iter().filter_map(|i| guard.run(|| map(i))).collect() + } + }) + } +} diff --git a/compiler/rustc_data_structures/src/sync/worker_local.rs b/compiler/rustc_data_structures/src/sync/worker_local.rs index 8c84daf4f..ffafdba13 100644 --- a/compiler/rustc_data_structures/src/sync/worker_local.rs +++ b/compiler/rustc_data_structures/src/sync/worker_local.rs @@ -1,4 +1,4 @@ -use crate::sync::Lock; +use parking_lot::Mutex; use std::cell::Cell; use std::cell::OnceCell; use std::ops::Deref; @@ -6,7 +6,7 @@ use std::ptr; use std::sync::Arc; #[cfg(parallel_compiler)] -use {crate::cold_path, crate::sync::CacheAligned}; +use {crate::outline, crate::sync::CacheAligned}; /// A pointer to the `RegistryData` which uniquely identifies a registry. /// This identifier can be reused if the registry gets freed. @@ -25,17 +25,13 @@ impl RegistryId { fn verify(self) -> usize { let (id, index) = THREAD_DATA.with(|data| (data.registry_id.get(), data.index.get())); - if id == self { - index - } else { - cold_path(|| panic!("Unable to verify registry association")) - } + if id == self { index } else { outline(|| panic!("Unable to verify registry association")) } } } struct RegistryData { thread_limit: usize, - threads: Lock<usize>, + threads: Mutex<usize>, } /// Represents a list of threads which can access worker locals. @@ -65,7 +61,7 @@ thread_local! { impl Registry { /// Creates a registry which can hold up to `thread_limit` threads. pub fn new(thread_limit: usize) -> Self { - Registry(Arc::new(RegistryData { thread_limit, threads: Lock::new(0) })) + Registry(Arc::new(RegistryData { thread_limit, threads: Mutex::new(0) })) } /// Gets the registry associated with the current thread. Panics if there's no such registry. @@ -171,3 +167,9 @@ impl<T> Deref for WorkerLocal<T> { unsafe { &self.locals.get_unchecked(self.registry.id().verify()).0 } } } + +impl<T: Default> Default for WorkerLocal<T> { + fn default() -> Self { + WorkerLocal::new(|_| T::default()) + } +} |