summaryrefslogtreecommitdiffstats
path: root/compiler/rustc_data_structures/src
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/rustc_data_structures/src')
-rw-r--r--compiler/rustc_data_structures/src/flock.rs13
-rw-r--r--compiler/rustc_data_structures/src/functor.rs116
-rw-r--r--compiler/rustc_data_structures/src/graph/dominators/mod.rs101
-rw-r--r--compiler/rustc_data_structures/src/graph/dominators/tests.rs45
-rw-r--r--compiler/rustc_data_structures/src/lib.rs43
-rw-r--r--compiler/rustc_data_structures/src/marker.rs44
-rw-r--r--compiler/rustc_data_structures/src/profiling.rs13
-rw-r--r--compiler/rustc_data_structures/src/sync.rs19
-rw-r--r--compiler/rustc_data_structures/src/sync/parallel.rs67
9 files changed, 217 insertions, 244 deletions
diff --git a/compiler/rustc_data_structures/src/flock.rs b/compiler/rustc_data_structures/src/flock.rs
index efdb44248..008565e4c 100644
--- a/compiler/rustc_data_structures/src/flock.rs
+++ b/compiler/rustc_data_structures/src/flock.rs
@@ -4,17 +4,20 @@
//! green/native threading. This is just a bare-bones enough solution for
//! librustdoc, it is not production quality at all.
-cfg_if! {
- if #[cfg(target_os = "linux")] {
+cfg_match! {
+ cfg(target_os = "linux") => {
mod linux;
use linux as imp;
- } else if #[cfg(unix)] {
+ }
+ cfg(unix) => {
mod unix;
use unix as imp;
- } else if #[cfg(windows)] {
+ }
+ cfg(windows) => {
mod windows;
use self::windows as imp;
- } else {
+ }
+ _ => {
mod unsupported;
use unsupported as imp;
}
diff --git a/compiler/rustc_data_structures/src/functor.rs b/compiler/rustc_data_structures/src/functor.rs
deleted file mode 100644
index e3fcaccb1..000000000
--- a/compiler/rustc_data_structures/src/functor.rs
+++ /dev/null
@@ -1,116 +0,0 @@
-use rustc_index::{Idx, IndexVec};
-use std::{mem, rc::Rc, sync::Arc};
-
-pub trait IdFunctor: Sized {
- type Inner;
-
- fn try_map_id<F, E>(self, f: F) -> Result<Self, E>
- where
- F: FnMut(Self::Inner) -> Result<Self::Inner, E>;
-}
-
-impl<T> IdFunctor for Box<T> {
- type Inner = T;
-
- #[inline]
- fn try_map_id<F, E>(self, mut f: F) -> Result<Self, E>
- where
- F: FnMut(Self::Inner) -> Result<Self::Inner, E>,
- {
- let raw = Box::into_raw(self);
- Ok(unsafe {
- // SAFETY: The raw pointer points to a valid value of type `T`.
- let value = raw.read();
- // SAFETY: Converts `Box<T>` to `Box<MaybeUninit<T>>` which is the
- // inverse of `Box::assume_init()` and should be safe.
- let raw: Box<mem::MaybeUninit<T>> = Box::from_raw(raw.cast());
- // SAFETY: Write the mapped value back into the `Box`.
- Box::write(raw, f(value)?)
- })
- }
-}
-
-impl<T> IdFunctor for Vec<T> {
- type Inner = T;
-
- #[inline]
- fn try_map_id<F, E>(self, f: F) -> Result<Self, E>
- where
- F: FnMut(Self::Inner) -> Result<Self::Inner, E>,
- {
- self.into_iter().map(f).collect()
- }
-}
-
-impl<T> IdFunctor for Box<[T]> {
- type Inner = T;
-
- #[inline]
- fn try_map_id<F, E>(self, f: F) -> Result<Self, E>
- where
- F: FnMut(Self::Inner) -> Result<Self::Inner, E>,
- {
- Vec::from(self).try_map_id(f).map(Into::into)
- }
-}
-
-impl<I: Idx, T> IdFunctor for IndexVec<I, T> {
- type Inner = T;
-
- #[inline]
- fn try_map_id<F, E>(self, f: F) -> Result<Self, E>
- where
- F: FnMut(Self::Inner) -> Result<Self::Inner, E>,
- {
- self.raw.try_map_id(f).map(IndexVec::from_raw)
- }
-}
-
-macro_rules! rc {
- ($($rc:ident),+) => {$(
- impl<T: Clone> IdFunctor for $rc<T> {
- type Inner = T;
-
- #[inline]
- fn try_map_id<F, E>(mut self, mut f: F) -> Result<Self, E>
- where
- F: FnMut(Self::Inner) -> Result<Self::Inner, E>,
- {
- // We merely want to replace the contained `T`, if at all possible,
- // so that we don't needlessly allocate a new `$rc` or indeed clone
- // the contained type.
- unsafe {
- // First step is to ensure that we have a unique reference to
- // the contained type, which `$rc::make_mut` will accomplish (by
- // allocating a new `$rc` and cloning the `T` only if required).
- // This is done *before* casting to `$rc<ManuallyDrop<T>>` so that
- // panicking during `make_mut` does not leak the `T`.
- $rc::make_mut(&mut self);
-
- // Casting to `$rc<ManuallyDrop<T>>` is safe because `ManuallyDrop`
- // is `repr(transparent)`.
- let ptr = $rc::into_raw(self).cast::<mem::ManuallyDrop<T>>();
- let mut unique = $rc::from_raw(ptr);
-
- // Call to `$rc::make_mut` above guarantees that `unique` is the
- // sole reference to the contained value, so we can avoid doing
- // a checked `get_mut` here.
- let slot = $rc::get_mut_unchecked(&mut unique);
-
- // Semantically move the contained type out from `unique`, fold
- // it, then move the folded value back into `unique`. Should
- // folding fail, `ManuallyDrop` ensures that the "moved-out"
- // value is not re-dropped.
- let owned = mem::ManuallyDrop::take(slot);
- let folded = f(owned)?;
- *slot = mem::ManuallyDrop::new(folded);
-
- // Cast back to `$rc<T>`.
- Ok($rc::from_raw($rc::into_raw(unique).cast()))
- }
- }
- }
- )+};
-}
-
-rc! { Rc, Arc }
diff --git a/compiler/rustc_data_structures/src/graph/dominators/mod.rs b/compiler/rustc_data_structures/src/graph/dominators/mod.rs
index 4075481e5..5dd414cfd 100644
--- a/compiler/rustc_data_structures/src/graph/dominators/mod.rs
+++ b/compiler/rustc_data_structures/src/graph/dominators/mod.rs
@@ -26,7 +26,42 @@ rustc_index::newtype_index! {
struct PreorderIndex {}
}
-pub fn dominators<G: ControlFlowGraph>(graph: &G) -> Dominators<G::Node> {
+#[derive(Clone, Debug)]
+pub struct Dominators<Node: Idx> {
+ kind: Kind<Node>,
+}
+
+#[derive(Clone, Debug)]
+enum Kind<Node: Idx> {
+ /// A representation optimized for a small path graphs.
+ Path,
+ General(Inner<Node>),
+}
+
+pub fn dominators<G: ControlFlowGraph>(g: &G) -> Dominators<G::Node> {
+ // We often encounter MIR bodies with 1 or 2 basic blocks. Special case the dominators
+ // computation and representation for those cases.
+ if is_small_path_graph(g) {
+ Dominators { kind: Kind::Path }
+ } else {
+ Dominators { kind: Kind::General(dominators_impl(g)) }
+ }
+}
+
+fn is_small_path_graph<G: ControlFlowGraph>(g: &G) -> bool {
+ if g.start_node().index() != 0 {
+ return false;
+ }
+ if g.num_nodes() == 1 {
+ return true;
+ }
+ if g.num_nodes() == 2 {
+ return g.successors(g.start_node()).any(|n| n.index() == 1);
+ }
+ false
+}
+
+fn dominators_impl<G: ControlFlowGraph>(graph: &G) -> Inner<G::Node> {
// compute the post order index (rank) for each node
let mut post_order_rank = IndexVec::from_elem_n(0, graph.num_nodes());
@@ -245,7 +280,7 @@ pub fn dominators<G: ControlFlowGraph>(graph: &G) -> Dominators<G::Node> {
let time = compute_access_time(start_node, &immediate_dominators);
- Dominators { start_node, post_order_rank, immediate_dominators, time }
+ Inner { post_order_rank, immediate_dominators, time }
}
/// Evaluate the link-eval virtual forest, providing the currently minimum semi
@@ -310,12 +345,11 @@ fn compress(
/// Tracks the list of dominators for each node.
#[derive(Clone, Debug)]
-pub struct Dominators<N: Idx> {
- start_node: N,
+struct Inner<N: Idx> {
post_order_rank: IndexVec<N, usize>,
// Even though we track only the immediate dominator of each node, it's
// possible to get its full list of dominators by looking up the dominator
- // of each dominator. (See the `impl Iterator for Iter` definition).
+ // of each dominator.
immediate_dominators: IndexVec<N, Option<N>>,
time: IndexVec<N, Time>,
}
@@ -323,19 +357,24 @@ pub struct Dominators<N: Idx> {
impl<Node: Idx> Dominators<Node> {
/// Returns true if node is reachable from the start node.
pub fn is_reachable(&self, node: Node) -> bool {
- node == self.start_node || self.immediate_dominators[node].is_some()
+ match &self.kind {
+ Kind::Path => true,
+ Kind::General(g) => g.time[node].start != 0,
+ }
}
/// Returns the immediate dominator of node, if any.
pub fn immediate_dominator(&self, node: Node) -> Option<Node> {
- self.immediate_dominators[node]
- }
-
- /// Provides an iterator over each dominator up the CFG, for the given Node.
- /// See the `impl Iterator for Iter` definition to understand how this works.
- pub fn dominators(&self, node: Node) -> Iter<'_, Node> {
- assert!(self.is_reachable(node), "node {node:?} is not reachable");
- Iter { dom_tree: self, node: Some(node) }
+ match &self.kind {
+ Kind::Path => {
+ if 0 < node.index() {
+ Some(Node::new(node.index() - 1))
+ } else {
+ None
+ }
+ }
+ Kind::General(g) => g.immediate_dominators[node],
+ }
}
/// Provide deterministic ordering of nodes such that, if any two nodes have a dominator
@@ -343,7 +382,10 @@ impl<Node: Idx> Dominators<Node> {
/// of two unrelated nodes will also be consistent, but otherwise the order has no
/// meaning.) This method cannot be used to determine if either Node dominates the other.
pub fn cmp_in_dominator_order(&self, lhs: Node, rhs: Node) -> Ordering {
- self.post_order_rank[rhs].cmp(&self.post_order_rank[lhs])
+ match &self.kind {
+ Kind::Path => lhs.index().cmp(&rhs.index()),
+ Kind::General(g) => g.post_order_rank[rhs].cmp(&g.post_order_rank[lhs]),
+ }
}
/// Returns true if `a` dominates `b`.
@@ -352,27 +394,14 @@ impl<Node: Idx> Dominators<Node> {
///
/// Panics if `b` is unreachable.
pub fn dominates(&self, a: Node, b: Node) -> bool {
- let a = self.time[a];
- let b = self.time[b];
- assert!(b.start != 0, "node {b:?} is not reachable");
- a.start <= b.start && b.finish <= a.finish
- }
-}
-
-pub struct Iter<'dom, Node: Idx> {
- dom_tree: &'dom Dominators<Node>,
- node: Option<Node>,
-}
-
-impl<'dom, Node: Idx> Iterator for Iter<'dom, Node> {
- type Item = Node;
-
- fn next(&mut self) -> Option<Self::Item> {
- if let Some(node) = self.node {
- self.node = self.dom_tree.immediate_dominator(node);
- Some(node)
- } else {
- None
+ match &self.kind {
+ Kind::Path => a.index() <= b.index(),
+ Kind::General(g) => {
+ let a = g.time[a];
+ let b = g.time[b];
+ assert!(b.start != 0, "node {b:?} is not reachable");
+ a.start <= b.start && b.finish <= a.finish
+ }
}
}
}
diff --git a/compiler/rustc_data_structures/src/graph/dominators/tests.rs b/compiler/rustc_data_structures/src/graph/dominators/tests.rs
index 5472bb808..39725ba43 100644
--- a/compiler/rustc_data_structures/src/graph/dominators/tests.rs
+++ b/compiler/rustc_data_structures/src/graph/dominators/tests.rs
@@ -6,12 +6,11 @@ use super::super::tests::TestGraph;
fn diamond() {
let graph = TestGraph::new(0, &[(0, 1), (0, 2), (1, 3), (2, 3)]);
- let dominators = dominators(&graph);
- let immediate_dominators = &dominators.immediate_dominators;
- assert_eq!(immediate_dominators[0], None);
- assert_eq!(immediate_dominators[1], Some(0));
- assert_eq!(immediate_dominators[2], Some(0));
- assert_eq!(immediate_dominators[3], Some(0));
+ let d = dominators(&graph);
+ assert_eq!(d.immediate_dominator(0), None);
+ assert_eq!(d.immediate_dominator(1), Some(0));
+ assert_eq!(d.immediate_dominator(2), Some(0));
+ assert_eq!(d.immediate_dominator(3), Some(0));
}
#[test]
@@ -22,15 +21,14 @@ fn paper() {
&[(6, 5), (6, 4), (5, 1), (4, 2), (4, 3), (1, 2), (2, 3), (3, 2), (2, 1)],
);
- let dominators = dominators(&graph);
- let immediate_dominators = &dominators.immediate_dominators;
- assert_eq!(immediate_dominators[0], None); // <-- note that 0 is not in graph
- assert_eq!(immediate_dominators[1], Some(6));
- assert_eq!(immediate_dominators[2], Some(6));
- assert_eq!(immediate_dominators[3], Some(6));
- assert_eq!(immediate_dominators[4], Some(6));
- assert_eq!(immediate_dominators[5], Some(6));
- assert_eq!(immediate_dominators[6], None);
+ let d = dominators(&graph);
+ assert_eq!(d.immediate_dominator(0), None); // <-- note that 0 is not in graph
+ assert_eq!(d.immediate_dominator(1), Some(6));
+ assert_eq!(d.immediate_dominator(2), Some(6));
+ assert_eq!(d.immediate_dominator(3), Some(6));
+ assert_eq!(d.immediate_dominator(4), Some(6));
+ assert_eq!(d.immediate_dominator(5), Some(6));
+ assert_eq!(d.immediate_dominator(6), None);
}
#[test]
@@ -47,11 +45,11 @@ fn paper_slt() {
#[test]
fn immediate_dominator() {
let graph = TestGraph::new(1, &[(1, 2), (2, 3)]);
- let dominators = dominators(&graph);
- assert_eq!(dominators.immediate_dominator(0), None);
- assert_eq!(dominators.immediate_dominator(1), None);
- assert_eq!(dominators.immediate_dominator(2), Some(1));
- assert_eq!(dominators.immediate_dominator(3), Some(2));
+ let d = dominators(&graph);
+ assert_eq!(d.immediate_dominator(0), None);
+ assert_eq!(d.immediate_dominator(1), None);
+ assert_eq!(d.immediate_dominator(2), Some(1));
+ assert_eq!(d.immediate_dominator(3), Some(2));
}
#[test]
@@ -75,8 +73,7 @@ fn transitive_dominator() {
],
);
- let dom_tree = dominators(&graph);
- let immediate_dominators = &dom_tree.immediate_dominators;
- assert_eq!(immediate_dominators[2], Some(0));
- assert_eq!(immediate_dominators[3], Some(0)); // This used to return Some(1).
+ let d = dominators(&graph);
+ assert_eq!(d.immediate_dominator(2), Some(0));
+ assert_eq!(d.immediate_dominator(3), Some(0)); // This used to return Some(1).
}
diff --git a/compiler/rustc_data_structures/src/lib.rs b/compiler/rustc_data_structures/src/lib.rs
index 461ec3a90..d09c026c4 100644
--- a/compiler/rustc_data_structures/src/lib.rs
+++ b/compiler/rustc_data_structures/src/lib.rs
@@ -6,45 +6,44 @@
//!
//! This API is completely unstable and subject to change.
+// tidy-alphabetical-start
+#![allow(internal_features)]
+#![allow(rustc::default_hash_types)]
+#![allow(rustc::potential_query_instability)]
+#![cfg_attr(not(bootstrap), doc(rust_logo))]
+#![cfg_attr(not(bootstrap), feature(rustdoc_internals))]
+#![deny(rustc::diagnostic_outside_of_impl)]
+#![deny(rustc::untranslatable_diagnostic)]
+#![deny(unsafe_op_in_unsafe_fn)]
#![doc(html_root_url = "https://doc.rust-lang.org/nightly/nightly-rustc/")]
+#![feature(allocator_api)]
#![feature(array_windows)]
-#![feature(associated_type_bounds)]
#![feature(auto_traits)]
#![feature(cell_leak)]
+#![feature(cfg_match)]
#![feature(core_intrinsics)]
#![feature(extend_one)]
#![feature(hash_raw_entry)]
#![feature(hasher_prefixfree_extras)]
+#![feature(lazy_cell)]
+#![feature(lint_reasons)]
+#![feature(macro_metavar_expr)]
#![feature(maybe_uninit_uninit_array)]
#![feature(min_specialization)]
+#![feature(negative_impls)]
#![feature(never_type)]
-#![feature(type_alias_impl_trait)]
-#![feature(new_uninit)]
-#![feature(lazy_cell)]
+#![feature(ptr_alignment_type)]
#![feature(rustc_attrs)]
-#![feature(negative_impls)]
+#![feature(strict_provenance)]
#![feature(test)]
#![feature(thread_id_value)]
-#![feature(vec_into_raw_parts)]
-#![feature(allocator_api)]
-#![feature(get_mut_unchecked)]
-#![feature(lint_reasons)]
+#![feature(type_alias_impl_trait)]
#![feature(unwrap_infallible)]
-#![feature(strict_provenance)]
-#![feature(ptr_alignment_type)]
-#![feature(macro_metavar_expr)]
-#![allow(rustc::default_hash_types)]
-#![allow(rustc::potential_query_instability)]
-#![deny(rustc::untranslatable_diagnostic)]
-#![deny(rustc::diagnostic_outside_of_impl)]
-#![allow(internal_features)]
-#![deny(unsafe_op_in_unsafe_fn)]
+// tidy-alphabetical-end
#[macro_use]
extern crate tracing;
#[macro_use]
-extern crate cfg_if;
-#[macro_use]
extern crate rustc_macros;
use std::fmt;
@@ -63,7 +62,6 @@ pub mod binary_search_util;
pub mod captures;
pub mod flat_map_in_place;
pub mod flock;
-pub mod functor;
pub mod fx;
pub mod graph;
pub mod intern;
@@ -129,6 +127,9 @@ impl<F: FnOnce()> Drop for OnDrop<F> {
}
}
+/// This is a marker for a fatal compiler error used with `resume_unwind`.
+pub struct FatalErrorMarker;
+
/// Turns a closure that takes an `&mut Formatter` into something that can be display-formatted.
pub fn make_display(f: impl Fn(&mut fmt::Formatter<'_>) -> fmt::Result) -> impl fmt::Display {
struct Printer<F> {
diff --git a/compiler/rustc_data_structures/src/marker.rs b/compiler/rustc_data_structures/src/marker.rs
index b067f9d45..266e54604 100644
--- a/compiler/rustc_data_structures/src/marker.rs
+++ b/compiler/rustc_data_structures/src/marker.rs
@@ -1,11 +1,12 @@
-cfg_if!(
- if #[cfg(not(parallel_compiler))] {
+cfg_match! {
+ cfg(not(parallel_compiler)) => {
pub auto trait DynSend {}
pub auto trait DynSync {}
impl<T> DynSend for T {}
impl<T> DynSync for T {}
- } else {
+ }
+ _ => {
#[rustc_on_unimplemented(
message = "`{Self}` doesn't implement `DynSend`. \
Add it to `rustc_data_structures::marker` or use `IntoDynSyncSend` if it's already `Send`"
@@ -48,13 +49,10 @@ cfg_if!(
[std::io::StdoutLock<'_>]
[std::io::StderrLock<'_>]
);
- cfg_if!(
- // Consistent with `std`
- // `os_imp::Env` is `!Send` in these platforms
- if #[cfg(any(unix, target_os = "hermit", target_os = "wasi", target_os = "solid_asp3"))] {
- impl !DynSend for std::env::VarsOs {}
- }
- );
+
+ #[cfg(any(unix, target_os = "hermit", target_os = "wasi", target_os = "solid_asp3"))]
+ // Consistent with `std`, `os_imp::Env` is `!Sync` in these platforms
+ impl !DynSend for std::env::VarsOs {}
macro_rules! already_send {
($([$ty: ty])*) => {
@@ -123,13 +121,10 @@ cfg_if!(
[std::sync::mpsc::Receiver<T> where T]
[std::sync::mpsc::Sender<T> where T]
);
- cfg_if!(
- // Consistent with `std`
- // `os_imp::Env` is `!Sync` in these platforms
- if #[cfg(any(unix, target_os = "hermit", target_os = "wasi", target_os = "solid_asp3"))] {
- impl !DynSync for std::env::VarsOs {}
- }
- );
+
+ #[cfg(any(unix, target_os = "hermit", target_os = "wasi", target_os = "solid_asp3"))]
+ // Consistent with `std`, `os_imp::Env` is `!Sync` in these platforms
+ impl !DynSync for std::env::VarsOs {}
macro_rules! already_sync {
($([$ty: ty])*) => {
@@ -143,7 +138,6 @@ cfg_if!(
[std::sync::atomic::AtomicUsize]
[std::sync::atomic::AtomicU8]
[std::sync::atomic::AtomicU32]
- [std::sync::atomic::AtomicU64]
[std::backtrace::Backtrace]
[std::io::Error]
[std::fs::File]
@@ -153,6 +147,18 @@ cfg_if!(
[crate::owned_slice::OwnedSlice]
);
+ // PowerPC and MIPS platforms with 32-bit pointers do not
+ // have AtomicU64 type.
+ #[cfg(not(any(target_arch = "powerpc", target_arch = "mips")))]
+ already_sync!(
+ [std::sync::atomic::AtomicU64]
+ );
+
+ #[cfg(any(target_arch = "powerpc", target_arch = "mips"))]
+ already_sync!(
+ [portable_atomic::AtomicU64]
+ );
+
macro_rules! impl_dyn_sync {
($($($attr: meta)* [$ty: ty where $($generics2: tt)*])*) => {
$(unsafe impl<$($generics2)*> DynSync for $ty {})*
@@ -183,7 +189,7 @@ cfg_if!(
[thin_vec::ThinVec<T> where T: DynSync]
);
}
-);
+}
pub fn assert_dyn_sync<T: ?Sized + DynSync>() {}
pub fn assert_dyn_send<T: ?Sized + DynSend>() {}
diff --git a/compiler/rustc_data_structures/src/profiling.rs b/compiler/rustc_data_structures/src/profiling.rs
index e688feb5f..ef7375a73 100644
--- a/compiler/rustc_data_structures/src/profiling.rs
+++ b/compiler/rustc_data_structures/src/profiling.rs
@@ -859,8 +859,8 @@ fn get_thread_id() -> u32 {
}
// Memory reporting
-cfg_if! {
- if #[cfg(windows)] {
+cfg_match! {
+ cfg(windows) => {
pub fn get_resident_set_size() -> Option<usize> {
use std::mem;
@@ -885,7 +885,8 @@ cfg_if! {
Some(pmc.WorkingSetSize)
}
- } else if #[cfg(target_os = "macos")] {
+ }
+ cfg(target_os = "macos") => {
pub fn get_resident_set_size() -> Option<usize> {
use libc::{c_int, c_void, getpid, proc_pidinfo, proc_taskinfo, PROC_PIDTASKINFO};
use std::mem;
@@ -903,7 +904,8 @@ cfg_if! {
}
}
}
- } else if #[cfg(unix)] {
+ }
+ cfg(unix) => {
pub fn get_resident_set_size() -> Option<usize> {
let field = 1;
let contents = fs::read("/proc/self/statm").ok()?;
@@ -912,7 +914,8 @@ cfg_if! {
let npages = s.parse::<usize>().ok()?;
Some(npages * 4096)
}
- } else {
+ }
+ _ => {
pub fn get_resident_set_size() -> Option<usize> {
None
}
diff --git a/compiler/rustc_data_structures/src/sync.rs b/compiler/rustc_data_structures/src/sync.rs
index cca043ba0..43221d70e 100644
--- a/compiler/rustc_data_structures/src/sync.rs
+++ b/compiler/rustc_data_structures/src/sync.rs
@@ -54,7 +54,7 @@ pub use worker_local::{Registry, WorkerLocal};
mod parallel;
#[cfg(parallel_compiler)]
pub use parallel::scope;
-pub use parallel::{join, par_for_each_in, par_map, parallel_guard};
+pub use parallel::{join, par_for_each_in, par_map, parallel_guard, try_par_for_each_in};
pub use std::sync::atomic::Ordering;
pub use std::sync::atomic::Ordering::SeqCst;
@@ -109,8 +109,8 @@ mod mode {
pub use mode::{is_dyn_thread_safe, set_dyn_thread_safe_mode};
-cfg_if! {
- if #[cfg(not(parallel_compiler))] {
+cfg_match! {
+ cfg(not(parallel_compiler)) => {
use std::ops::Add;
use std::cell::Cell;
@@ -251,7 +251,8 @@ cfg_if! {
MTLock(self.0.clone())
}
}
- } else {
+ }
+ _ => {
pub use std::marker::Send as Send;
pub use std::marker::Sync as Sync;
@@ -264,7 +265,15 @@ cfg_if! {
pub use std::sync::OnceLock;
- pub use std::sync::atomic::{AtomicBool, AtomicUsize, AtomicU32, AtomicU64};
+ pub use std::sync::atomic::{AtomicBool, AtomicUsize, AtomicU32};
+
+ // PowerPC and MIPS platforms with 32-bit pointers do not
+ // have AtomicU64 type.
+ #[cfg(not(any(target_arch = "powerpc", target_arch = "mips")))]
+ pub use std::sync::atomic::AtomicU64;
+
+ #[cfg(any(target_arch = "powerpc", target_arch = "mips"))]
+ pub use portable_atomic::AtomicU64;
pub use std::sync::Arc as Lrc;
pub use std::sync::Weak as Weak;
diff --git a/compiler/rustc_data_structures/src/sync/parallel.rs b/compiler/rustc_data_structures/src/sync/parallel.rs
index 1944ddfb7..7783de57f 100644
--- a/compiler/rustc_data_structures/src/sync/parallel.rs
+++ b/compiler/rustc_data_structures/src/sync/parallel.rs
@@ -3,6 +3,8 @@
#![allow(dead_code)]
+use crate::sync::IntoDynSyncSend;
+use crate::FatalErrorMarker;
use parking_lot::Mutex;
use std::any::Any;
use std::panic::{catch_unwind, resume_unwind, AssertUnwindSafe};
@@ -18,14 +20,17 @@ pub use enabled::*;
/// continuing with unwinding. It's also used for the non-parallel code to ensure error message
/// output match the parallel compiler for testing purposes.
pub struct ParallelGuard {
- panic: Mutex<Option<Box<dyn Any + Send + 'static>>>,
+ panic: Mutex<Option<IntoDynSyncSend<Box<dyn Any + Send + 'static>>>>,
}
impl ParallelGuard {
pub fn run<R>(&self, f: impl FnOnce() -> R) -> Option<R> {
catch_unwind(AssertUnwindSafe(f))
.map_err(|err| {
- *self.panic.lock() = Some(err);
+ let mut panic = self.panic.lock();
+ if panic.is_none() || !(*err).is::<FatalErrorMarker>() {
+ *panic = Some(IntoDynSyncSend(err));
+ }
})
.ok()
}
@@ -37,7 +42,7 @@ impl ParallelGuard {
pub fn parallel_guard<R>(f: impl FnOnce(&ParallelGuard) -> R) -> R {
let guard = ParallelGuard { panic: Mutex::new(None) };
let ret = f(&guard);
- if let Some(panic) = guard.panic.into_inner() {
+ if let Some(IntoDynSyncSend(panic)) = guard.panic.into_inner() {
resume_unwind(panic);
}
ret
@@ -77,6 +82,15 @@ mod disabled {
})
}
+ pub fn try_par_for_each_in<T: IntoIterator, E>(
+ t: T,
+ mut for_each: impl FnMut(T::Item) -> Result<(), E>,
+ ) -> Result<(), E> {
+ parallel_guard(|guard| {
+ t.into_iter().filter_map(|i| guard.run(|| for_each(i))).fold(Ok(()), Result::and)
+ })
+ }
+
pub fn par_map<T: IntoIterator, R, C: FromIterator<R>>(
t: T,
mut map: impl FnMut(<<T as IntoIterator>::IntoIter as Iterator>::Item) -> R,
@@ -97,14 +111,20 @@ mod enabled {
parallel!(impl $fblock [$block, $($c,)*] [$($rest),*])
};
(impl $fblock:block [$($blocks:expr,)*] []) => {
- ::rustc_data_structures::sync::scope(|s| {
- $(let block = rustc_data_structures::sync::FromDyn::from(|| $blocks);
- s.spawn(move |_| block.into_inner()());)*
- (|| $fblock)();
+ $crate::sync::parallel_guard(|guard| {
+ $crate::sync::scope(|s| {
+ $(
+ let block = $crate::sync::FromDyn::from(|| $blocks);
+ s.spawn(move |_| {
+ guard.run(move || block.into_inner()());
+ });
+ )*
+ guard.run(|| $fblock);
+ });
});
};
($fblock:block, $($blocks:block),*) => {
- if rustc_data_structures::sync::is_dyn_thread_safe() {
+ if $crate::sync::is_dyn_thread_safe() {
// Reverse the order of the later blocks since Rayon executes them in reverse order
// when using a single thread. This ensures the execution order matches that
// of a single threaded rustc.
@@ -137,11 +157,13 @@ mod enabled {
if mode::is_dyn_thread_safe() {
let oper_a = FromDyn::from(oper_a);
let oper_b = FromDyn::from(oper_b);
- let (a, b) = rayon::join(
- move || FromDyn::from(oper_a.into_inner()()),
- move || FromDyn::from(oper_b.into_inner()()),
- );
- (a.into_inner(), b.into_inner())
+ let (a, b) = parallel_guard(|guard| {
+ rayon::join(
+ move || guard.run(move || FromDyn::from(oper_a.into_inner()())),
+ move || guard.run(move || FromDyn::from(oper_b.into_inner()())),
+ )
+ });
+ (a.unwrap().into_inner(), b.unwrap().into_inner())
} else {
super::disabled::join(oper_a, oper_b)
}
@@ -167,6 +189,25 @@ mod enabled {
});
}
+ pub fn try_par_for_each_in<
+ T: IntoIterator + IntoParallelIterator<Item = <T as IntoIterator>::Item>,
+ E: Send,
+ >(
+ t: T,
+ for_each: impl Fn(<T as IntoIterator>::Item) -> Result<(), E> + DynSync + DynSend,
+ ) -> Result<(), E> {
+ parallel_guard(|guard| {
+ if mode::is_dyn_thread_safe() {
+ let for_each = FromDyn::from(for_each);
+ t.into_par_iter()
+ .filter_map(|i| guard.run(|| for_each(i)))
+ .reduce(|| Ok(()), Result::and)
+ } else {
+ t.into_iter().filter_map(|i| guard.run(|| for_each(i))).fold(Ok(()), Result::and)
+ }
+ })
+ }
+
pub fn par_map<
I,
T: IntoIterator<Item = I> + IntoParallelIterator<Item = I>,