summaryrefslogtreecommitdiffstats
path: root/compiler/rustc_query_system
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-30 18:31:44 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-30 18:31:44 +0000
commitc23a457e72abe608715ac76f076f47dc42af07a5 (patch)
tree2772049aaf84b5c9d0ed12ec8d86812f7a7904b6 /compiler/rustc_query_system
parentReleasing progress-linux version 1.73.0+dfsg1-1~progress7.99u1. (diff)
downloadrustc-c23a457e72abe608715ac76f076f47dc42af07a5.tar.xz
rustc-c23a457e72abe608715ac76f076f47dc42af07a5.zip
Merging upstream version 1.74.1+dfsg1.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'compiler/rustc_query_system')
-rw-r--r--compiler/rustc_query_system/src/dep_graph/debug.rs14
-rw-r--r--compiler/rustc_query_system/src/dep_graph/dep_node.rs92
-rw-r--r--compiler/rustc_query_system/src/dep_graph/edges.rs73
-rw-r--r--compiler/rustc_query_system/src/dep_graph/graph.rs202
-rw-r--r--compiler/rustc_query_system/src/dep_graph/mod.rs80
-rw-r--r--compiler/rustc_query_system/src/dep_graph/query.rs22
-rw-r--r--compiler/rustc_query_system/src/dep_graph/serialized.rs412
-rw-r--r--compiler/rustc_query_system/src/ich/impls_syntax.rs49
-rw-r--r--compiler/rustc_query_system/src/lib.rs1
-rw-r--r--compiler/rustc_query_system/src/query/caches.rs26
-rw-r--r--compiler/rustc_query_system/src/query/config.rs12
-rw-r--r--compiler/rustc_query_system/src/query/job.rs122
-rw-r--r--compiler/rustc_query_system/src/query/mod.rs10
-rw-r--r--compiler/rustc_query_system/src/query/plumbing.rs167
-rw-r--r--compiler/rustc_query_system/src/values.rs12
15 files changed, 817 insertions, 477 deletions
diff --git a/compiler/rustc_query_system/src/dep_graph/debug.rs b/compiler/rustc_query_system/src/dep_graph/debug.rs
index c2c9600f5..103a6c01b 100644
--- a/compiler/rustc_query_system/src/dep_graph/debug.rs
+++ b/compiler/rustc_query_system/src/dep_graph/debug.rs
@@ -1,6 +1,6 @@
//! Code for debugging the dep-graph.
-use super::{DepKind, DepNode, DepNodeIndex};
+use super::{DepNode, DepNodeIndex};
use rustc_data_structures::fx::FxHashMap;
use rustc_data_structures::sync::Lock;
use std::error::Error;
@@ -28,7 +28,7 @@ impl DepNodeFilter {
}
/// Tests whether `node` meets the filter, returning true if so.
- pub fn test<K: DepKind>(&self, node: &DepNode<K>) -> bool {
+ pub fn test(&self, node: &DepNode) -> bool {
let debug_str = format!("{node:?}");
self.text.split('&').map(|s| s.trim()).all(|f| debug_str.contains(f))
}
@@ -36,14 +36,14 @@ impl DepNodeFilter {
/// A filter like `F -> G` where `F` and `G` are valid dep-node
/// filters. This can be used to test the source/target independently.
-pub struct EdgeFilter<K: DepKind> {
+pub struct EdgeFilter {
pub source: DepNodeFilter,
pub target: DepNodeFilter,
- pub index_to_node: Lock<FxHashMap<DepNodeIndex, DepNode<K>>>,
+ pub index_to_node: Lock<FxHashMap<DepNodeIndex, DepNode>>,
}
-impl<K: DepKind> EdgeFilter<K> {
- pub fn new(test: &str) -> Result<EdgeFilter<K>, Box<dyn Error>> {
+impl EdgeFilter {
+ pub fn new(test: &str) -> Result<EdgeFilter, Box<dyn Error>> {
let parts: Vec<_> = test.split("->").collect();
if parts.len() != 2 {
Err(format!("expected a filter like `a&b -> c&d`, not `{test}`").into())
@@ -57,7 +57,7 @@ impl<K: DepKind> EdgeFilter<K> {
}
#[cfg(debug_assertions)]
- pub fn test(&self, source: &DepNode<K>, target: &DepNode<K>) -> bool {
+ pub fn test(&self, source: &DepNode, target: &DepNode) -> bool {
self.source.test(source) && self.target.test(target)
}
}
diff --git a/compiler/rustc_query_system/src/dep_graph/dep_node.rs b/compiler/rustc_query_system/src/dep_graph/dep_node.rs
index 39a4cb1b1..17f96896a 100644
--- a/compiler/rustc_query_system/src/dep_graph/dep_node.rs
+++ b/compiler/rustc_query_system/src/dep_graph/dep_node.rs
@@ -42,36 +42,84 @@
//! `DefId` it was computed from. In other cases, too much information gets
//! lost during fingerprint computation.
-use super::{DepContext, DepKind, FingerprintStyle};
+use super::{DepContext, FingerprintStyle};
use crate::ich::StableHashingContext;
use rustc_data_structures::fingerprint::{Fingerprint, PackedFingerprint};
use rustc_data_structures::stable_hasher::{HashStable, StableHasher, StableOrd, ToStableHashKey};
+use rustc_data_structures::AtomicRef;
use rustc_hir::definitions::DefPathHash;
use std::fmt;
use std::hash::Hash;
-#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Encodable, Decodable)]
-pub struct DepNode<K> {
- pub kind: K,
+/// This serves as an index into arrays built by `make_dep_kind_array`.
+#[derive(Clone, Copy, PartialEq, Eq, Hash)]
+pub struct DepKind {
+ variant: u16,
+}
+
+impl DepKind {
+ #[inline]
+ pub const fn new(variant: u16) -> Self {
+ Self { variant }
+ }
+
+ #[inline]
+ pub const fn as_inner(&self) -> u16 {
+ self.variant
+ }
+
+ #[inline]
+ pub const fn as_usize(&self) -> usize {
+ self.variant as usize
+ }
+}
+
+static_assert_size!(DepKind, 2);
+
+pub fn default_dep_kind_debug(kind: DepKind, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("DepKind").field("variant", &kind.variant).finish()
+}
+
+pub static DEP_KIND_DEBUG: AtomicRef<fn(DepKind, &mut fmt::Formatter<'_>) -> fmt::Result> =
+ AtomicRef::new(&(default_dep_kind_debug as fn(_, &mut fmt::Formatter<'_>) -> _));
+
+impl fmt::Debug for DepKind {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ (*DEP_KIND_DEBUG)(*self, f)
+ }
+}
+
+#[derive(Clone, Copy, PartialEq, Eq, Hash)]
+pub struct DepNode {
+ pub kind: DepKind,
pub hash: PackedFingerprint,
}
-impl<K: DepKind> DepNode<K> {
+// We keep a lot of `DepNode`s in memory during compilation. It's not
+// required that their size stay the same, but we don't want to change
+// it inadvertently. This assert just ensures we're aware of any change.
+#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
+static_assert_size!(DepNode, 18);
+
+#[cfg(not(any(target_arch = "x86", target_arch = "x86_64")))]
+static_assert_size!(DepNode, 24);
+
+impl DepNode {
/// Creates a new, parameterless DepNode. This method will assert
/// that the DepNode corresponding to the given DepKind actually
/// does not require any parameters.
- pub fn new_no_params<Tcx>(tcx: Tcx, kind: K) -> DepNode<K>
+ pub fn new_no_params<Tcx>(tcx: Tcx, kind: DepKind) -> DepNode
where
- Tcx: super::DepContext<DepKind = K>,
+ Tcx: super::DepContext,
{
debug_assert_eq!(tcx.fingerprint_style(kind), FingerprintStyle::Unit);
DepNode { kind, hash: Fingerprint::ZERO.into() }
}
- pub fn construct<Tcx, Key>(tcx: Tcx, kind: K, arg: &Key) -> DepNode<K>
+ pub fn construct<Tcx, Key>(tcx: Tcx, kind: DepKind, arg: &Key) -> DepNode
where
- Tcx: super::DepContext<DepKind = K>,
+ Tcx: super::DepContext,
Key: DepNodeParams<Tcx>,
{
let hash = arg.to_fingerprint(tcx);
@@ -93,18 +141,25 @@ impl<K: DepKind> DepNode<K> {
/// Construct a DepNode from the given DepKind and DefPathHash. This
/// method will assert that the given DepKind actually requires a
/// single DefId/DefPathHash parameter.
- pub fn from_def_path_hash<Tcx>(tcx: Tcx, def_path_hash: DefPathHash, kind: K) -> Self
+ pub fn from_def_path_hash<Tcx>(tcx: Tcx, def_path_hash: DefPathHash, kind: DepKind) -> Self
where
- Tcx: super::DepContext<DepKind = K>,
+ Tcx: super::DepContext,
{
debug_assert!(tcx.fingerprint_style(kind) == FingerprintStyle::DefPathHash);
DepNode { kind, hash: def_path_hash.0.into() }
}
}
-impl<K: DepKind> fmt::Debug for DepNode<K> {
+pub fn default_dep_node_debug(node: DepNode, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("DepNode").field("kind", &node.kind).field("hash", &node.hash).finish()
+}
+
+pub static DEP_NODE_DEBUG: AtomicRef<fn(DepNode, &mut fmt::Formatter<'_>) -> fmt::Result> =
+ AtomicRef::new(&(default_dep_node_debug as fn(_, &mut fmt::Formatter<'_>) -> _));
+
+impl fmt::Debug for DepNode {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- K::debug_node(self, f)
+ (*DEP_NODE_DEBUG)(*self, f)
}
}
@@ -129,7 +184,7 @@ pub trait DepNodeParams<Tcx: DepContext>: fmt::Debug + Sized {
/// `fingerprint_style()` is not `FingerprintStyle::Opaque`.
/// It is always valid to return `None` here, in which case incremental
/// compilation will treat the query as having changed instead of forcing it.
- fn recover(tcx: Tcx, dep_node: &DepNode<Tcx::DepKind>) -> Option<Self>;
+ fn recover(tcx: Tcx, dep_node: &DepNode) -> Option<Self>;
}
impl<Tcx: DepContext, T> DepNodeParams<Tcx> for T
@@ -156,7 +211,7 @@ where
}
#[inline(always)]
- default fn recover(_: Tcx, _: &DepNode<Tcx::DepKind>) -> Option<Self> {
+ default fn recover(_: Tcx, _: &DepNode) -> Option<Self> {
None
}
}
@@ -216,10 +271,13 @@ pub struct DepKindStruct<Tcx: DepContext> {
/// with kind `MirValidated`, we know that the GUID/fingerprint of the `DepNode`
/// is actually a `DefPathHash`, and can therefore just look up the corresponding
/// `DefId` in `tcx.def_path_hash_to_def_id`.
- pub force_from_dep_node: Option<fn(tcx: Tcx, dep_node: DepNode<Tcx::DepKind>) -> bool>,
+ pub force_from_dep_node: Option<fn(tcx: Tcx, dep_node: DepNode) -> bool>,
/// Invoke a query to put the on-disk cached value in memory.
- pub try_load_from_on_disk_cache: Option<fn(Tcx, DepNode<Tcx::DepKind>)>,
+ pub try_load_from_on_disk_cache: Option<fn(Tcx, DepNode)>,
+
+ /// The name of this dep kind.
+ pub name: &'static &'static str,
}
/// A "work product" corresponds to a `.o` (or other) file that we
diff --git a/compiler/rustc_query_system/src/dep_graph/edges.rs b/compiler/rustc_query_system/src/dep_graph/edges.rs
new file mode 100644
index 000000000..6ba3924f6
--- /dev/null
+++ b/compiler/rustc_query_system/src/dep_graph/edges.rs
@@ -0,0 +1,73 @@
+use crate::dep_graph::DepNodeIndex;
+use smallvec::SmallVec;
+use std::hash::{Hash, Hasher};
+use std::iter::Extend;
+use std::ops::Deref;
+
+#[derive(Default, Debug)]
+pub struct EdgesVec {
+ max: u32,
+ edges: SmallVec<[DepNodeIndex; EdgesVec::INLINE_CAPACITY]>,
+}
+
+impl Hash for EdgesVec {
+ #[inline]
+ fn hash<H: Hasher>(&self, hasher: &mut H) {
+ Hash::hash(&self.edges, hasher)
+ }
+}
+
+impl EdgesVec {
+ pub const INLINE_CAPACITY: usize = 8;
+
+ #[inline]
+ pub fn new() -> Self {
+ Self::default()
+ }
+
+ #[inline]
+ pub fn push(&mut self, edge: DepNodeIndex) {
+ self.max = self.max.max(edge.as_u32());
+ self.edges.push(edge);
+ }
+
+ #[inline]
+ pub fn max_index(&self) -> u32 {
+ self.max
+ }
+}
+
+impl Deref for EdgesVec {
+ type Target = [DepNodeIndex];
+
+ #[inline]
+ fn deref(&self) -> &Self::Target {
+ self.edges.as_slice()
+ }
+}
+
+impl FromIterator<DepNodeIndex> for EdgesVec {
+ #[inline]
+ fn from_iter<T>(iter: T) -> Self
+ where
+ T: IntoIterator<Item = DepNodeIndex>,
+ {
+ let mut vec = EdgesVec::new();
+ for index in iter {
+ vec.push(index)
+ }
+ vec
+ }
+}
+
+impl Extend<DepNodeIndex> for EdgesVec {
+ #[inline]
+ fn extend<T>(&mut self, iter: T)
+ where
+ T: IntoIterator<Item = DepNodeIndex>,
+ {
+ for elem in iter {
+ self.push(elem);
+ }
+ }
+}
diff --git a/compiler/rustc_query_system/src/dep_graph/graph.rs b/compiler/rustc_query_system/src/dep_graph/graph.rs
index 30422ea11..c7e92d7b2 100644
--- a/compiler/rustc_query_system/src/dep_graph/graph.rs
+++ b/compiler/rustc_query_system/src/dep_graph/graph.rs
@@ -8,7 +8,6 @@ use rustc_data_structures::sync::{AtomicU32, AtomicU64, Lock, Lrc, Ordering};
use rustc_data_structures::unord::UnordMap;
use rustc_index::IndexVec;
use rustc_serialize::opaque::{FileEncodeResult, FileEncoder};
-use smallvec::{smallvec, SmallVec};
use std::assert_matches::assert_matches;
use std::collections::hash_map::Entry;
use std::fmt::Debug;
@@ -18,7 +17,8 @@ use std::sync::atomic::Ordering::Relaxed;
use super::query::DepGraphQuery;
use super::serialized::{GraphEncoder, SerializedDepGraph, SerializedDepNodeIndex};
-use super::{DepContext, DepKind, DepNode, HasDepContext, WorkProductId};
+use super::{DepContext, DepKind, DepNode, Deps, HasDepContext, WorkProductId};
+use crate::dep_graph::EdgesVec;
use crate::ich::StableHashingContext;
use crate::query::{QueryContext, QuerySideEffects};
@@ -26,8 +26,8 @@ use crate::query::{QueryContext, QuerySideEffects};
use {super::debug::EdgeFilter, std::env};
#[derive(Clone)]
-pub struct DepGraph<K: DepKind> {
- data: Option<Lrc<DepGraphData<K>>>,
+pub struct DepGraph<D: Deps> {
+ data: Option<Lrc<DepGraphData<D>>>,
/// This field is used for assigning DepNodeIndices when running in
/// non-incremental mode. Even in non-incremental mode we make sure that
@@ -74,16 +74,16 @@ impl DepNodeColor {
}
}
-pub struct DepGraphData<K: DepKind> {
+pub struct DepGraphData<D: Deps> {
/// The new encoding of the dependency graph, optimized for red/green
/// tracking. The `current` field is the dependency graph of only the
/// current compilation session: We don't merge the previous dep-graph into
/// current one anymore, but we do reference shared data to save space.
- current: CurrentDepGraph<K>,
+ current: CurrentDepGraph<D>,
/// The dep-graph from the previous compilation session. It contains all
/// nodes and edges as well as all fingerprints of nodes that have them.
- previous: SerializedDepGraph<K>,
+ previous: SerializedDepGraph,
colors: DepNodeColorMap,
@@ -95,12 +95,12 @@ pub struct DepGraphData<K: DepKind> {
/// this map. We can later look for and extract that data.
previous_work_products: WorkProductMap,
- dep_node_debug: Lock<FxHashMap<DepNode<K>, String>>,
+ dep_node_debug: Lock<FxHashMap<DepNode, String>>,
/// Used by incremental compilation tests to assert that
/// a particular query result was decoded from disk
/// (not just marked green)
- debug_loaded_from_disk: Lock<FxHashSet<DepNode<K>>>,
+ debug_loaded_from_disk: Lock<FxHashSet<DepNode>>,
}
pub fn hash_result<R>(hcx: &mut StableHashingContext<'_>, result: &R) -> Fingerprint
@@ -112,15 +112,15 @@ where
stable_hasher.finish()
}
-impl<K: DepKind> DepGraph<K> {
+impl<D: Deps> DepGraph<D> {
pub fn new(
profiler: &SelfProfilerRef,
- prev_graph: SerializedDepGraph<K>,
+ prev_graph: SerializedDepGraph,
prev_work_products: WorkProductMap,
encoder: FileEncoder,
record_graph: bool,
record_stats: bool,
- ) -> DepGraph<K> {
+ ) -> DepGraph<D> {
let prev_graph_node_count = prev_graph.node_count();
let current = CurrentDepGraph::new(
@@ -136,8 +136,8 @@ impl<K: DepKind> DepGraph<K> {
// Instantiate a dependy-less node only once for anonymous queries.
let _green_node_index = current.intern_new_node(
profiler,
- DepNode { kind: DepKind::NULL, hash: current.anon_id_seed.into() },
- smallvec![],
+ DepNode { kind: D::DEP_KIND_NULL, hash: current.anon_id_seed.into() },
+ EdgesVec::new(),
Fingerprint::ZERO,
);
assert_eq!(_green_node_index, DepNodeIndex::SINGLETON_DEPENDENCYLESS_ANON_NODE);
@@ -146,8 +146,8 @@ impl<K: DepKind> DepGraph<K> {
let (red_node_index, red_node_prev_index_and_color) = current.intern_node(
profiler,
&prev_graph,
- DepNode { kind: DepKind::RED, hash: Fingerprint::ZERO.into() },
- smallvec![],
+ DepNode { kind: D::DEP_KIND_RED, hash: Fingerprint::ZERO.into() },
+ EdgesVec::new(),
None,
false,
);
@@ -181,12 +181,12 @@ impl<K: DepKind> DepGraph<K> {
}
}
- pub fn new_disabled() -> DepGraph<K> {
+ pub fn new_disabled() -> DepGraph<D> {
DepGraph { data: None, virtual_dep_node_index: Lrc::new(AtomicU32::new(0)) }
}
#[inline]
- pub fn data(&self) -> Option<&DepGraphData<K>> {
+ pub fn data(&self) -> Option<&DepGraphData<D>> {
self.data.as_deref()
}
@@ -196,7 +196,7 @@ impl<K: DepKind> DepGraph<K> {
self.data.is_some()
}
- pub fn with_query(&self, f: impl Fn(&DepGraphQuery<K>)) {
+ pub fn with_query(&self, f: impl Fn(&DepGraphQuery)) {
if let Some(data) = &self.data {
data.current.encoder.borrow().with_query(f)
}
@@ -204,7 +204,7 @@ impl<K: DepKind> DepGraph<K> {
pub fn assert_ignored(&self) {
if let Some(..) = self.data {
- K::read_deps(|task_deps| {
+ D::read_deps(|task_deps| {
assert_matches!(
task_deps,
TaskDepsRef::Ignore,
@@ -218,7 +218,7 @@ impl<K: DepKind> DepGraph<K> {
where
OP: FnOnce() -> R,
{
- K::with_deps(TaskDepsRef::Ignore, op)
+ D::with_deps(TaskDepsRef::Ignore, op)
}
/// Used to wrap the deserialization of a query result from disk,
@@ -271,13 +271,13 @@ impl<K: DepKind> DepGraph<K> {
where
OP: FnOnce() -> R,
{
- K::with_deps(TaskDepsRef::Forbid, op)
+ D::with_deps(TaskDepsRef::Forbid, op)
}
#[inline(always)]
- pub fn with_task<Ctxt: HasDepContext<DepKind = K>, A: Debug, R>(
+ pub fn with_task<Ctxt: HasDepContext<Deps = D>, A: Debug, R>(
&self,
- key: DepNode<K>,
+ key: DepNode,
cx: Ctxt,
arg: A,
task: fn(Ctxt, A) -> R,
@@ -289,10 +289,10 @@ impl<K: DepKind> DepGraph<K> {
}
}
- pub fn with_anon_task<Tcx: DepContext<DepKind = K>, OP, R>(
+ pub fn with_anon_task<Tcx: DepContext<Deps = D>, OP, R>(
&self,
cx: Tcx,
- dep_kind: K,
+ dep_kind: DepKind,
op: OP,
) -> (R, DepNodeIndex)
where
@@ -305,7 +305,7 @@ impl<K: DepKind> DepGraph<K> {
}
}
-impl<K: DepKind> DepGraphData<K> {
+impl<D: Deps> DepGraphData<D> {
/// Starts a new dep-graph task. Dep-graph tasks are specified
/// using a free function (`task`) and **not** a closure -- this
/// is intentional because we want to exercise tight control over
@@ -334,9 +334,9 @@ impl<K: DepKind> DepGraphData<K> {
///
/// [rustc dev guide]: https://rustc-dev-guide.rust-lang.org/incremental-compilation.html
#[inline(always)]
- pub fn with_task<Ctxt: HasDepContext<DepKind = K>, A: Debug, R>(
+ pub fn with_task<Ctxt: HasDepContext<Deps = D>, A: Debug, R>(
&self,
- key: DepNode<K>,
+ key: DepNode,
cx: Ctxt,
arg: A,
task: fn(Ctxt, A) -> R,
@@ -354,14 +354,14 @@ impl<K: DepKind> DepGraphData<K> {
- dep-node: {key:?}"
);
- let with_deps = |task_deps| K::with_deps(task_deps, || task(cx, arg));
+ let with_deps = |task_deps| D::with_deps(task_deps, || task(cx, arg));
let (result, edges) = if cx.dep_context().is_eval_always(key.kind) {
- (with_deps(TaskDepsRef::EvalAlways), smallvec![])
+ (with_deps(TaskDepsRef::EvalAlways), EdgesVec::new())
} else {
let task_deps = Lock::new(TaskDeps {
#[cfg(debug_assertions)]
node: Some(key),
- reads: SmallVec::new(),
+ reads: EdgesVec::new(),
read_set: Default::default(),
phantom_data: PhantomData,
});
@@ -402,10 +402,10 @@ impl<K: DepKind> DepGraphData<K> {
/// Executes something within an "anonymous" task, that is, a task the
/// `DepNode` of which is determined by the list of inputs it read from.
- pub fn with_anon_task<Tcx: DepContext<DepKind = K>, OP, R>(
+ pub fn with_anon_task<Tcx: DepContext<Deps = D>, OP, R>(
&self,
cx: Tcx,
- dep_kind: K,
+ dep_kind: DepKind,
op: OP,
) -> (R, DepNodeIndex)
where
@@ -414,7 +414,7 @@ impl<K: DepKind> DepGraphData<K> {
debug_assert!(!cx.is_eval_always(dep_kind));
let task_deps = Lock::new(TaskDeps::default());
- let result = K::with_deps(TaskDepsRef::Allow(&task_deps), op);
+ let result = D::with_deps(TaskDepsRef::Allow(&task_deps), op);
let task_deps = task_deps.into_inner();
let task_deps = task_deps.reads;
@@ -461,11 +461,11 @@ impl<K: DepKind> DepGraphData<K> {
}
}
-impl<K: DepKind> DepGraph<K> {
+impl<D: Deps> DepGraph<D> {
#[inline]
pub fn read_index(&self, dep_node_index: DepNodeIndex) {
if let Some(ref data) = self.data {
- K::read_deps(|task_deps| {
+ D::read_deps(|task_deps| {
let mut task_deps = match task_deps {
TaskDepsRef::Allow(deps) => deps.lock(),
TaskDepsRef::EvalAlways => {
@@ -486,14 +486,14 @@ impl<K: DepKind> DepGraph<K> {
// As long as we only have a low number of reads we can avoid doing a hash
// insert and potentially allocating/reallocating the hashmap
- let new_read = if task_deps.reads.len() < TASK_DEPS_READS_CAP {
+ let new_read = if task_deps.reads.len() < EdgesVec::INLINE_CAPACITY {
task_deps.reads.iter().all(|other| *other != dep_node_index)
} else {
task_deps.read_set.insert(dep_node_index)
};
if new_read {
task_deps.reads.push(dep_node_index);
- if task_deps.reads.len() == TASK_DEPS_READS_CAP {
+ if task_deps.reads.len() == EdgesVec::INLINE_CAPACITY {
// Fill `read_set` with what we have so far so we can use the hashset
// next time
task_deps.read_set.extend(task_deps.reads.iter().copied());
@@ -532,9 +532,9 @@ impl<K: DepKind> DepGraph<K> {
/// FIXME: If the code is changed enough for this node to be marked before requiring the
/// caller's node, we suppose that those changes will be enough to mark this node red and
/// force a recomputation using the "normal" way.
- pub fn with_feed_task<Ctxt: DepContext<DepKind = K>, A: Debug, R: Debug>(
+ pub fn with_feed_task<Ctxt: DepContext<Deps = D>, A: Debug, R: Debug>(
&self,
- node: DepNode<K>,
+ node: DepNode,
cx: Ctxt,
key: A,
result: &R,
@@ -572,8 +572,8 @@ impl<K: DepKind> DepGraph<K> {
}
}
- let mut edges = SmallVec::new();
- K::read_deps(|task_deps| match task_deps {
+ let mut edges = EdgesVec::new();
+ D::read_deps(|task_deps| match task_deps {
TaskDepsRef::Allow(deps) => edges.extend(deps.lock().reads.iter().copied()),
TaskDepsRef::EvalAlways => {
edges.push(DepNodeIndex::FOREVER_RED_NODE);
@@ -623,27 +623,22 @@ impl<K: DepKind> DepGraph<K> {
}
}
-impl<K: DepKind> DepGraphData<K> {
+impl<D: Deps> DepGraphData<D> {
#[inline]
- pub fn dep_node_index_of_opt(&self, dep_node: &DepNode<K>) -> Option<DepNodeIndex> {
+ pub fn dep_node_index_of_opt(&self, dep_node: &DepNode) -> Option<DepNodeIndex> {
if let Some(prev_index) = self.previous.node_to_index_opt(dep_node) {
self.current.prev_index_to_index.lock()[prev_index]
} else {
- self.current
- .new_node_to_index
- .get_shard_by_value(dep_node)
- .lock()
- .get(dep_node)
- .copied()
+ self.current.new_node_to_index.lock_shard_by_value(dep_node).get(dep_node).copied()
}
}
#[inline]
- pub fn dep_node_exists(&self, dep_node: &DepNode<K>) -> bool {
+ pub fn dep_node_exists(&self, dep_node: &DepNode) -> bool {
self.dep_node_index_of_opt(dep_node).is_some()
}
- fn node_color(&self, dep_node: &DepNode<K>) -> Option<DepNodeColor> {
+ fn node_color(&self, dep_node: &DepNode) -> Option<DepNodeColor> {
if let Some(prev_index) = self.previous.node_to_index_opt(dep_node) {
self.colors.get(prev_index)
} else {
@@ -665,18 +660,18 @@ impl<K: DepKind> DepGraphData<K> {
}
#[inline]
- pub fn prev_node_of(&self, prev_index: SerializedDepNodeIndex) -> DepNode<K> {
+ pub fn prev_node_of(&self, prev_index: SerializedDepNodeIndex) -> DepNode {
self.previous.index_to_node(prev_index)
}
- pub fn mark_debug_loaded_from_disk(&self, dep_node: DepNode<K>) {
+ pub fn mark_debug_loaded_from_disk(&self, dep_node: DepNode) {
self.debug_loaded_from_disk.lock().insert(dep_node);
}
}
-impl<K: DepKind> DepGraph<K> {
+impl<D: Deps> DepGraph<D> {
#[inline]
- pub fn dep_node_exists(&self, dep_node: &DepNode<K>) -> bool {
+ pub fn dep_node_exists(&self, dep_node: &DepNode) -> bool {
self.data.as_ref().is_some_and(|data| data.dep_node_exists(dep_node))
}
@@ -692,12 +687,12 @@ impl<K: DepKind> DepGraph<K> {
&self.data.as_ref().unwrap().previous_work_products
}
- pub fn debug_was_loaded_from_disk(&self, dep_node: DepNode<K>) -> bool {
+ pub fn debug_was_loaded_from_disk(&self, dep_node: DepNode) -> bool {
self.data.as_ref().unwrap().debug_loaded_from_disk.lock().contains(&dep_node)
}
#[inline(always)]
- pub fn register_dep_node_debug_str<F>(&self, dep_node: DepNode<K>, debug_str_gen: F)
+ pub fn register_dep_node_debug_str<F>(&self, dep_node: DepNode, debug_str_gen: F)
where
F: FnOnce() -> String,
{
@@ -710,11 +705,11 @@ impl<K: DepKind> DepGraph<K> {
dep_node_debug.borrow_mut().insert(dep_node, debug_str);
}
- pub fn dep_node_debug_str(&self, dep_node: DepNode<K>) -> Option<String> {
+ pub fn dep_node_debug_str(&self, dep_node: DepNode) -> Option<String> {
self.data.as_ref()?.dep_node_debug.borrow().get(&dep_node).cloned()
}
- fn node_color(&self, dep_node: &DepNode<K>) -> Option<DepNodeColor> {
+ fn node_color(&self, dep_node: &DepNode) -> Option<DepNodeColor> {
if let Some(ref data) = self.data {
return data.node_color(dep_node);
}
@@ -722,25 +717,25 @@ impl<K: DepKind> DepGraph<K> {
None
}
- pub fn try_mark_green<Qcx: QueryContext<DepKind = K>>(
+ pub fn try_mark_green<Qcx: QueryContext<Deps = D>>(
&self,
qcx: Qcx,
- dep_node: &DepNode<K>,
+ dep_node: &DepNode,
) -> Option<(SerializedDepNodeIndex, DepNodeIndex)> {
self.data().and_then(|data| data.try_mark_green(qcx, dep_node))
}
}
-impl<K: DepKind> DepGraphData<K> {
+impl<D: Deps> DepGraphData<D> {
/// Try to mark a node index for the node dep_node.
///
/// A node will have an index, when it's already been marked green, or when we can mark it
/// green. This function will mark the current task as a reader of the specified node, when
/// a node index can be found for that node.
- pub fn try_mark_green<Qcx: QueryContext<DepKind = K>>(
+ pub fn try_mark_green<Qcx: QueryContext<Deps = D>>(
&self,
qcx: Qcx,
- dep_node: &DepNode<K>,
+ dep_node: &DepNode,
) -> Option<(SerializedDepNodeIndex, DepNodeIndex)> {
debug_assert!(!qcx.dep_context().is_eval_always(dep_node.kind));
@@ -762,11 +757,11 @@ impl<K: DepKind> DepGraphData<K> {
}
#[instrument(skip(self, qcx, parent_dep_node_index, frame), level = "debug")]
- fn try_mark_parent_green<Qcx: QueryContext<DepKind = K>>(
+ fn try_mark_parent_green<Qcx: QueryContext<Deps = D>>(
&self,
qcx: Qcx,
parent_dep_node_index: SerializedDepNodeIndex,
- dep_node: &DepNode<K>,
+ dep_node: &DepNode,
frame: Option<&MarkFrame<'_>>,
) -> Option<()> {
let dep_dep_node_color = self.colors.get(parent_dep_node_index);
@@ -850,11 +845,11 @@ impl<K: DepKind> DepGraphData<K> {
/// Try to mark a dep-node which existed in the previous compilation session as green.
#[instrument(skip(self, qcx, prev_dep_node_index, frame), level = "debug")]
- fn try_mark_previous_green<Qcx: QueryContext<DepKind = K>>(
+ fn try_mark_previous_green<Qcx: QueryContext<Deps = D>>(
&self,
qcx: Qcx,
prev_dep_node_index: SerializedDepNodeIndex,
- dep_node: &DepNode<K>,
+ dep_node: &DepNode,
frame: Option<&MarkFrame<'_>>,
) -> Option<DepNodeIndex> {
let frame = MarkFrame { index: prev_dep_node_index, parent: frame };
@@ -872,7 +867,7 @@ impl<K: DepKind> DepGraphData<K> {
let prev_deps = self.previous.edge_targets_from(prev_dep_node_index);
- for &dep_dep_node_index in prev_deps {
+ for dep_dep_node_index in prev_deps {
self.try_mark_parent_green(qcx, dep_dep_node_index, dep_node, Some(&frame))?;
}
@@ -921,7 +916,7 @@ impl<K: DepKind> DepGraphData<K> {
/// This may be called concurrently on multiple threads for the same dep node.
#[cold]
#[inline(never)]
- fn emit_side_effects<Qcx: QueryContext<DepKind = K>>(
+ fn emit_side_effects<Qcx: QueryContext<Deps = D>>(
&self,
qcx: Qcx,
dep_node_index: DepNodeIndex,
@@ -945,16 +940,16 @@ impl<K: DepKind> DepGraphData<K> {
}
}
-impl<K: DepKind> DepGraph<K> {
+impl<D: Deps> DepGraph<D> {
/// Returns true if the given node has been marked as red during the
/// current compilation session. Used in various assertions
- pub fn is_red(&self, dep_node: &DepNode<K>) -> bool {
+ pub fn is_red(&self, dep_node: &DepNode) -> bool {
self.node_color(dep_node) == Some(DepNodeColor::Red)
}
/// Returns true if the given node has been marked as green during the
/// current compilation session. Used in various assertions
- pub fn is_green(&self, dep_node: &DepNode<K>) -> bool {
+ pub fn is_green(&self, dep_node: &DepNode) -> bool {
self.node_color(dep_node).is_some_and(|c| c.is_green())
}
@@ -966,7 +961,7 @@ impl<K: DepKind> DepGraph<K> {
///
/// This method will only load queries that will end up in the disk cache.
/// Other queries will not be executed.
- pub fn exec_cache_promotions<Tcx: DepContext<DepKind = K>>(&self, tcx: Tcx) {
+ pub fn exec_cache_promotions<Tcx: DepContext>(&self, tcx: Tcx) {
let _prof_timer = tcx.profiler().generic_activity("incr_comp_query_cache_promotion");
let data = self.data.as_ref().unwrap();
@@ -1081,9 +1076,9 @@ rustc_index::newtype_index! {
/// `new_node_to_index` and `data`, or `prev_index_to_index` and `data`. When
/// manipulating both, we acquire `new_node_to_index` or `prev_index_to_index`
/// first, and `data` second.
-pub(super) struct CurrentDepGraph<K: DepKind> {
- encoder: Steal<GraphEncoder<K>>,
- new_node_to_index: Sharded<FxHashMap<DepNode<K>, DepNodeIndex>>,
+pub(super) struct CurrentDepGraph<D: Deps> {
+ encoder: Steal<GraphEncoder<D>>,
+ new_node_to_index: Sharded<FxHashMap<DepNode, DepNodeIndex>>,
prev_index_to_index: Lock<IndexVec<SerializedDepNodeIndex, Option<DepNodeIndex>>>,
/// This is used to verify that fingerprints do not change between the creation of a node
@@ -1094,7 +1089,7 @@ pub(super) struct CurrentDepGraph<K: DepKind> {
/// Used to trap when a specific edge is added to the graph.
/// This is used for debug purposes and is only active with `debug_assertions`.
#[cfg(debug_assertions)]
- forbidden_edge: Option<EdgeFilter<K>>,
+ forbidden_edge: Option<EdgeFilter>,
/// Anonymous `DepNode`s are nodes whose IDs we compute from the list of
/// their edges. This has the beneficial side-effect that multiple anonymous
@@ -1121,14 +1116,14 @@ pub(super) struct CurrentDepGraph<K: DepKind> {
node_intern_event_id: Option<EventId>,
}
-impl<K: DepKind> CurrentDepGraph<K> {
+impl<D: Deps> CurrentDepGraph<D> {
fn new(
profiler: &SelfProfilerRef,
prev_graph_node_count: usize,
encoder: FileEncoder,
record_graph: bool,
record_stats: bool,
- ) -> CurrentDepGraph<K> {
+ ) -> Self {
use std::time::{SystemTime, UNIX_EPOCH};
let duration = SystemTime::now().duration_since(UNIX_EPOCH).unwrap();
@@ -1166,7 +1161,7 @@ impl<K: DepKind> CurrentDepGraph<K> {
)),
new_node_to_index: Sharded::new(|| {
FxHashMap::with_capacity_and_hasher(
- new_node_count_estimate / sharded::SHARDS,
+ new_node_count_estimate / sharded::shards(),
Default::default(),
)
}),
@@ -1183,7 +1178,7 @@ impl<K: DepKind> CurrentDepGraph<K> {
}
#[cfg(debug_assertions)]
- fn record_edge(&self, dep_node_index: DepNodeIndex, key: DepNode<K>, fingerprint: Fingerprint) {
+ fn record_edge(&self, dep_node_index: DepNodeIndex, key: DepNode, fingerprint: Fingerprint) {
if let Some(forbidden_edge) = &self.forbidden_edge {
forbidden_edge.index_to_node.lock().insert(dep_node_index, key);
}
@@ -1197,12 +1192,11 @@ impl<K: DepKind> CurrentDepGraph<K> {
fn intern_new_node(
&self,
profiler: &SelfProfilerRef,
- key: DepNode<K>,
+ key: DepNode,
edges: EdgesVec,
current_fingerprint: Fingerprint,
) -> DepNodeIndex {
- let dep_node_index = match self.new_node_to_index.get_shard_by_value(&key).lock().entry(key)
- {
+ let dep_node_index = match self.new_node_to_index.lock_shard_by_value(&key).entry(key) {
Entry::Occupied(entry) => *entry.get(),
Entry::Vacant(entry) => {
let dep_node_index =
@@ -1221,8 +1215,8 @@ impl<K: DepKind> CurrentDepGraph<K> {
fn intern_node(
&self,
profiler: &SelfProfilerRef,
- prev_graph: &SerializedDepGraph<K>,
- key: DepNode<K>,
+ prev_graph: &SerializedDepGraph,
+ key: DepNode,
edges: EdgesVec,
fingerprint: Option<Fingerprint>,
print_status: bool,
@@ -1295,7 +1289,7 @@ impl<K: DepKind> CurrentDepGraph<K> {
fn promote_node_and_deps_to_current(
&self,
profiler: &SelfProfilerRef,
- prev_graph: &SerializedDepGraph<K>,
+ prev_graph: &SerializedDepGraph,
prev_index: SerializedDepNodeIndex,
) -> DepNodeIndex {
self.debug_assert_not_in_new_nodes(prev_graph, prev_index);
@@ -1308,8 +1302,7 @@ impl<K: DepKind> CurrentDepGraph<K> {
let key = prev_graph.index_to_node(prev_index);
let edges = prev_graph
.edge_targets_from(prev_index)
- .iter()
- .map(|i| prev_index_to_index[*i].unwrap())
+ .map(|i| prev_index_to_index[i].unwrap())
.collect();
let fingerprint = prev_graph.fingerprint_by_index(prev_index);
let dep_node_index = self.encoder.borrow().send(profiler, key, fingerprint, edges);
@@ -1324,27 +1317,23 @@ impl<K: DepKind> CurrentDepGraph<K> {
#[inline]
fn debug_assert_not_in_new_nodes(
&self,
- prev_graph: &SerializedDepGraph<K>,
+ prev_graph: &SerializedDepGraph,
prev_index: SerializedDepNodeIndex,
) {
let node = &prev_graph.index_to_node(prev_index);
debug_assert!(
- !self.new_node_to_index.get_shard_by_value(node).lock().contains_key(node),
+ !self.new_node_to_index.lock_shard_by_value(node).contains_key(node),
"node from previous graph present in new node collection"
);
}
}
-/// The capacity of the `reads` field `SmallVec`
-const TASK_DEPS_READS_CAP: usize = 8;
-type EdgesVec = SmallVec<[DepNodeIndex; TASK_DEPS_READS_CAP]>;
-
#[derive(Debug, Clone, Copy)]
-pub enum TaskDepsRef<'a, K: DepKind> {
+pub enum TaskDepsRef<'a> {
/// New dependencies can be added to the
/// `TaskDeps`. This is used when executing a 'normal' query
/// (no `eval_always` modifier)
- Allow(&'a Lock<TaskDeps<K>>),
+ Allow(&'a Lock<TaskDeps>),
/// This is used when executing an `eval_always` query. We don't
/// need to track dependencies for a query that's always
/// re-executed -- but we need to know that this is an `eval_always`
@@ -1361,15 +1350,15 @@ pub enum TaskDepsRef<'a, K: DepKind> {
}
#[derive(Debug)]
-pub struct TaskDeps<K: DepKind> {
+pub struct TaskDeps {
#[cfg(debug_assertions)]
- node: Option<DepNode<K>>,
+ node: Option<DepNode>,
reads: EdgesVec,
read_set: FxHashSet<DepNodeIndex>,
- phantom_data: PhantomData<DepNode<K>>,
+ phantom_data: PhantomData<DepNode>,
}
-impl<K: DepKind> Default for TaskDeps<K> {
+impl Default for TaskDeps {
fn default() -> Self {
Self {
#[cfg(debug_assertions)]
@@ -1421,10 +1410,7 @@ impl DepNodeColorMap {
#[inline(never)]
#[cold]
-pub(crate) fn print_markframe_trace<K: DepKind>(
- graph: &DepGraph<K>,
- frame: Option<&MarkFrame<'_>>,
-) {
+pub(crate) fn print_markframe_trace<D: Deps>(graph: &DepGraph<D>, frame: Option<&MarkFrame<'_>>) {
let data = graph.data.as_ref().unwrap();
eprintln!("there was a panic while trying to force a dep node");
diff --git a/compiler/rustc_query_system/src/dep_graph/mod.rs b/compiler/rustc_query_system/src/dep_graph/mod.rs
index 0fd9e35d6..624ae680a 100644
--- a/compiler/rustc_query_system/src/dep_graph/mod.rs
+++ b/compiler/rustc_query_system/src/dep_graph/mod.rs
@@ -1,10 +1,12 @@
pub mod debug;
-mod dep_node;
+pub mod dep_node;
+mod edges;
mod graph;
mod query;
mod serialized;
-pub use dep_node::{DepKindStruct, DepNode, DepNodeParams, WorkProductId};
+pub use dep_node::{DepKind, DepKindStruct, DepNode, DepNodeParams, WorkProductId};
+pub use edges::EdgesVec;
pub use graph::{
hash_result, DepGraph, DepGraphData, DepNodeColor, DepNodeIndex, TaskDeps, TaskDepsRef,
WorkProduct, WorkProductMap,
@@ -14,22 +16,20 @@ pub use serialized::{SerializedDepGraph, SerializedDepNodeIndex};
use crate::ich::StableHashingContext;
use rustc_data_structures::profiling::SelfProfilerRef;
-use rustc_serialize::{opaque::FileEncoder, Encodable};
use rustc_session::Session;
-use std::hash::Hash;
-use std::{fmt, panic};
+use std::panic;
use self::graph::{print_markframe_trace, MarkFrame};
pub trait DepContext: Copy {
- type DepKind: self::DepKind;
+ type Deps: Deps;
/// Create a hashing context for hashing new results.
fn with_stable_hashing_context<R>(self, f: impl FnOnce(StableHashingContext<'_>) -> R) -> R;
/// Access the DepGraph.
- fn dep_graph(&self) -> &DepGraph<Self::DepKind>;
+ fn dep_graph(&self) -> &DepGraph<Self::Deps>;
/// Access the profiler.
fn profiler(&self) -> &SelfProfilerRef;
@@ -37,10 +37,10 @@ pub trait DepContext: Copy {
/// Access the compiler session.
fn sess(&self) -> &Session;
- fn dep_kind_info(&self, dep_node: Self::DepKind) -> &DepKindStruct<Self>;
+ fn dep_kind_info(&self, dep_node: DepKind) -> &DepKindStruct<Self>;
#[inline(always)]
- fn fingerprint_style(self, kind: Self::DepKind) -> FingerprintStyle {
+ fn fingerprint_style(self, kind: DepKind) -> FingerprintStyle {
let data = self.dep_kind_info(kind);
if data.is_anon {
return FingerprintStyle::Opaque;
@@ -50,18 +50,14 @@ pub trait DepContext: Copy {
#[inline(always)]
/// Return whether this kind always require evaluation.
- fn is_eval_always(self, kind: Self::DepKind) -> bool {
+ fn is_eval_always(self, kind: DepKind) -> bool {
self.dep_kind_info(kind).is_eval_always
}
/// Try to force a dep node to execute and see if it's green.
#[inline]
#[instrument(skip(self, frame), level = "debug")]
- fn try_force_from_dep_node(
- self,
- dep_node: DepNode<Self::DepKind>,
- frame: Option<&MarkFrame<'_>>,
- ) -> bool {
+ fn try_force_from_dep_node(self, dep_node: DepNode, frame: Option<&MarkFrame<'_>>) -> bool {
let cb = self.dep_kind_info(dep_node.kind);
if let Some(f) = cb.force_from_dep_node {
if let Err(value) = panic::catch_unwind(panic::AssertUnwindSafe(|| {
@@ -79,7 +75,7 @@ pub trait DepContext: Copy {
}
/// Load data from the on-disk cache.
- fn try_load_from_on_disk_cache(self, dep_node: DepNode<Self::DepKind>) {
+ fn try_load_from_on_disk_cache(self, dep_node: DepNode) {
let cb = self.dep_kind_info(dep_node.kind);
if let Some(f) = cb.try_load_from_on_disk_cache {
f(self, dep_node)
@@ -87,15 +83,37 @@ pub trait DepContext: Copy {
}
}
+pub trait Deps {
+ /// Execute the operation with provided dependencies.
+ fn with_deps<OP, R>(deps: TaskDepsRef<'_>, op: OP) -> R
+ where
+ OP: FnOnce() -> R;
+
+ /// Access dependencies from current implicit context.
+ fn read_deps<OP>(op: OP)
+ where
+ OP: for<'a> FnOnce(TaskDepsRef<'a>);
+
+ /// We use this for most things when incr. comp. is turned off.
+ const DEP_KIND_NULL: DepKind;
+
+ /// We use this to create a forever-red node.
+ const DEP_KIND_RED: DepKind;
+
+ /// This is the highest value a `DepKind` can have. It's used during encoding to
+ /// pack information into the unused bits.
+ const DEP_KIND_MAX: u16;
+}
+
pub trait HasDepContext: Copy {
- type DepKind: self::DepKind;
- type DepContext: self::DepContext<DepKind = Self::DepKind>;
+ type Deps: self::Deps;
+ type DepContext: self::DepContext<Deps = Self::Deps>;
fn dep_context(&self) -> &Self::DepContext;
}
impl<T: DepContext> HasDepContext for T {
- type DepKind = T::DepKind;
+ type Deps = T::Deps;
type DepContext = Self;
fn dep_context(&self) -> &Self::DepContext {
@@ -104,7 +122,7 @@ impl<T: DepContext> HasDepContext for T {
}
impl<T: HasDepContext, Q: Copy> HasDepContext for (T, Q) {
- type DepKind = T::DepKind;
+ type Deps = T::Deps;
type DepContext = T::DepContext;
fn dep_context(&self) -> &Self::DepContext {
@@ -136,25 +154,3 @@ impl FingerprintStyle {
}
}
}
-
-/// Describe the different families of dependency nodes.
-pub trait DepKind: Copy + fmt::Debug + Eq + Hash + Send + Encodable<FileEncoder> + 'static {
- /// DepKind to use when incr. comp. is turned off.
- const NULL: Self;
-
- /// DepKind to use to create the initial forever-red node.
- const RED: Self;
-
- /// Implementation of `std::fmt::Debug` for `DepNode`.
- fn debug_node(node: &DepNode<Self>, f: &mut fmt::Formatter<'_>) -> fmt::Result;
-
- /// Execute the operation with provided dependencies.
- fn with_deps<OP, R>(deps: TaskDepsRef<'_, Self>, op: OP) -> R
- where
- OP: FnOnce() -> R;
-
- /// Access dependencies from current implicit context.
- fn read_deps<OP>(op: OP)
- where
- OP: for<'a> FnOnce(TaskDepsRef<'a, Self>);
-}
diff --git a/compiler/rustc_query_system/src/dep_graph/query.rs b/compiler/rustc_query_system/src/dep_graph/query.rs
index 5cbc6bf8f..5969e5fbe 100644
--- a/compiler/rustc_query_system/src/dep_graph/query.rs
+++ b/compiler/rustc_query_system/src/dep_graph/query.rs
@@ -2,16 +2,16 @@ use rustc_data_structures::fx::FxHashMap;
use rustc_data_structures::graph::implementation::{Direction, Graph, NodeIndex, INCOMING};
use rustc_index::IndexVec;
-use super::{DepKind, DepNode, DepNodeIndex};
+use super::{DepNode, DepNodeIndex};
-pub struct DepGraphQuery<K> {
- pub graph: Graph<DepNode<K>, ()>,
- pub indices: FxHashMap<DepNode<K>, NodeIndex>,
+pub struct DepGraphQuery {
+ pub graph: Graph<DepNode, ()>,
+ pub indices: FxHashMap<DepNode, NodeIndex>,
pub dep_index_to_index: IndexVec<DepNodeIndex, Option<NodeIndex>>,
}
-impl<K: DepKind> DepGraphQuery<K> {
- pub fn new(prev_node_count: usize) -> DepGraphQuery<K> {
+impl DepGraphQuery {
+ pub fn new(prev_node_count: usize) -> DepGraphQuery {
let node_count = prev_node_count + prev_node_count / 4;
let edge_count = 6 * node_count;
@@ -22,7 +22,7 @@ impl<K: DepKind> DepGraphQuery<K> {
DepGraphQuery { graph, indices, dep_index_to_index }
}
- pub fn push(&mut self, index: DepNodeIndex, node: DepNode<K>, edges: &[DepNodeIndex]) {
+ pub fn push(&mut self, index: DepNodeIndex, node: DepNode, edges: &[DepNodeIndex]) {
let source = self.graph.add_node(node);
self.dep_index_to_index.insert(index, source);
self.indices.insert(node, source);
@@ -37,11 +37,11 @@ impl<K: DepKind> DepGraphQuery<K> {
}
}
- pub fn nodes(&self) -> Vec<&DepNode<K>> {
+ pub fn nodes(&self) -> Vec<&DepNode> {
self.graph.all_nodes().iter().map(|n| &n.data).collect()
}
- pub fn edges(&self) -> Vec<(&DepNode<K>, &DepNode<K>)> {
+ pub fn edges(&self) -> Vec<(&DepNode, &DepNode)> {
self.graph
.all_edges()
.iter()
@@ -50,7 +50,7 @@ impl<K: DepKind> DepGraphQuery<K> {
.collect()
}
- fn reachable_nodes(&self, node: &DepNode<K>, direction: Direction) -> Vec<&DepNode<K>> {
+ fn reachable_nodes(&self, node: &DepNode, direction: Direction) -> Vec<&DepNode> {
if let Some(&index) = self.indices.get(node) {
self.graph.depth_traverse(index, direction).map(|s| self.graph.node_data(s)).collect()
} else {
@@ -59,7 +59,7 @@ impl<K: DepKind> DepGraphQuery<K> {
}
/// All nodes that can reach `node`.
- pub fn transitive_predecessors(&self, node: &DepNode<K>) -> Vec<&DepNode<K>> {
+ pub fn transitive_predecessors(&self, node: &DepNode) -> Vec<&DepNode> {
self.reachable_nodes(node, INCOMING)
}
}
diff --git a/compiler/rustc_query_system/src/dep_graph/serialized.rs b/compiler/rustc_query_system/src/dep_graph/serialized.rs
index edddfda62..fcf46be6e 100644
--- a/compiler/rustc_query_system/src/dep_graph/serialized.rs
+++ b/compiler/rustc_query_system/src/dep_graph/serialized.rs
@@ -1,6 +1,6 @@
//! The data that we will serialize and deserialize.
//!
-//! The dep-graph is serialized as a sequence of NodeInfo, with the dependencies
+//! Notionally, the dep-graph is a sequence of NodeInfo with the dependencies
//! specified inline. The total number of nodes and edges are stored as the last
//! 16 bytes of the file, so we can find them easily at decoding time.
//!
@@ -11,17 +11,44 @@
//! sequence of NodeInfos to the different arrays in SerializedDepGraph. Since the
//! node and edge count are stored at the end of the file, all the arrays can be
//! pre-allocated with the right length.
+//!
+//! The encoding of the de-pgraph is generally designed around the fact that fixed-size
+//! reads of encoded data are generally faster than variable-sized reads. Ergo we adopt
+//! essentially the same varint encoding scheme used in the rmeta format; the edge lists
+//! for each node on the graph store a 2-bit integer which is the number of bytes per edge
+//! index in that node's edge list. We effectively ignore that an edge index of 0 could be
+//! encoded with 0 bytes in order to not require 3 bits to store the byte width of the edges.
+//! The overhead of calculating the correct byte width for each edge is mitigated by
+//! building edge lists with [`EdgesVec`] which keeps a running max of the edges in a node.
+//!
+//! When we decode this data, we do not immediately create [`SerializedDepNodeIndex`] and
+//! instead keep the data in its denser serialized form which lets us turn our on-disk size
+//! efficiency directly into a peak memory reduction. When we convert these encoded-in-memory
+//! values into their fully-deserialized type, we use a fixed-size read of the encoded array
+//! then mask off any errant bytes we read. The array of edge index bytes is padded to permit this.
+//!
+//! We also encode and decode the entire rest of each node using [`SerializedNodeHeader`]
+//! to let this encoding and decoding be done in one fixed-size operation. These headers contain
+//! two [`Fingerprint`]s along with the serialized [`DepKind`], and the number of edge indices
+//! in the node and the number of bytes used to encode the edge indices for this node. The
+//! [`DepKind`], number of edges, and bytes per edge are all bit-packed together, if they fit.
+//! If the number of edges in this node does not fit in the bits available in the header, we
+//! store it directly after the header with leb128.
use super::query::DepGraphQuery;
-use super::{DepKind, DepNode, DepNodeIndex};
+use super::{DepKind, DepNode, DepNodeIndex, Deps};
+use crate::dep_graph::EdgesVec;
use rustc_data_structures::fingerprint::Fingerprint;
+use rustc_data_structures::fingerprint::PackedFingerprint;
use rustc_data_structures::fx::FxHashMap;
use rustc_data_structures::profiling::SelfProfilerRef;
use rustc_data_structures::sync::Lock;
+use rustc_data_structures::unhash::UnhashMap;
use rustc_index::{Idx, IndexVec};
use rustc_serialize::opaque::{FileEncodeResult, FileEncoder, IntEncodedWithFixedSize, MemDecoder};
-use rustc_serialize::{Decodable, Decoder, Encodable};
-use smallvec::SmallVec;
+use rustc_serialize::{Decodable, Decoder, Encodable, Encoder};
+use std::iter;
+use std::marker::PhantomData;
// The maximum value of `SerializedDepNodeIndex` leaves the upper two bits
// unused so that we can store multiple index types in `CompressedHybridIndex`,
@@ -31,26 +58,37 @@ rustc_index::newtype_index! {
pub struct SerializedDepNodeIndex {}
}
+const DEP_NODE_SIZE: usize = std::mem::size_of::<SerializedDepNodeIndex>();
+/// Amount of padding we need to add to the edge list data so that we can retrieve every
+/// SerializedDepNodeIndex with a fixed-size read then mask.
+const DEP_NODE_PAD: usize = DEP_NODE_SIZE - 1;
+/// Number of bits we need to store the number of used bytes in a SerializedDepNodeIndex.
+/// Note that wherever we encode byte widths like this we actually store the number of bytes used
+/// minus 1; for a 4-byte value we technically would have 5 widths to store, but using one byte to
+/// store zeroes (which are relatively rare) is a decent tradeoff to save a bit in our bitfields.
+const DEP_NODE_WIDTH_BITS: usize = DEP_NODE_SIZE / 2;
+
/// Data for use when recompiling the **current crate**.
#[derive(Debug)]
-pub struct SerializedDepGraph<K: DepKind> {
+pub struct SerializedDepGraph {
/// The set of all DepNodes in the graph
- nodes: IndexVec<SerializedDepNodeIndex, DepNode<K>>,
+ nodes: IndexVec<SerializedDepNodeIndex, DepNode>,
/// The set of all Fingerprints in the graph. Each Fingerprint corresponds to
/// the DepNode at the same index in the nodes vector.
fingerprints: IndexVec<SerializedDepNodeIndex, Fingerprint>,
/// For each DepNode, stores the list of edges originating from that
/// DepNode. Encoded as a [start, end) pair indexing into edge_list_data,
/// which holds the actual DepNodeIndices of the target nodes.
- edge_list_indices: IndexVec<SerializedDepNodeIndex, (u32, u32)>,
- /// A flattened list of all edge targets in the graph. Edge sources are
- /// implicit in edge_list_indices.
- edge_list_data: Vec<SerializedDepNodeIndex>,
- /// Reciprocal map to `nodes`.
- index: FxHashMap<DepNode<K>, SerializedDepNodeIndex>,
+ edge_list_indices: IndexVec<SerializedDepNodeIndex, EdgeHeader>,
+ /// A flattened list of all edge targets in the graph, stored in the same
+ /// varint encoding that we use on disk. Edge sources are implicit in edge_list_indices.
+ edge_list_data: Vec<u8>,
+ /// Stores a map from fingerprints to nodes per dep node kind.
+ /// This is the reciprocal of `nodes`.
+ index: Vec<UnhashMap<PackedFingerprint, SerializedDepNodeIndex>>,
}
-impl<K: DepKind> Default for SerializedDepGraph<K> {
+impl Default for SerializedDepGraph {
fn default() -> Self {
SerializedDepGraph {
nodes: Default::default(),
@@ -62,21 +100,47 @@ impl<K: DepKind> Default for SerializedDepGraph<K> {
}
}
-impl<K: DepKind> SerializedDepGraph<K> {
+impl SerializedDepGraph {
#[inline]
- pub fn edge_targets_from(&self, source: SerializedDepNodeIndex) -> &[SerializedDepNodeIndex] {
- let targets = self.edge_list_indices[source];
- &self.edge_list_data[targets.0 as usize..targets.1 as usize]
+ pub fn edge_targets_from(
+ &self,
+ source: SerializedDepNodeIndex,
+ ) -> impl Iterator<Item = SerializedDepNodeIndex> + '_ {
+ let header = self.edge_list_indices[source];
+ let mut raw = &self.edge_list_data[header.start()..];
+ // Figure out where the edge list for `source` ends by getting the start index of the next
+ // edge list, or the end of the array if this is the last edge.
+ let end = self
+ .edge_list_indices
+ .get(source + 1)
+ .map(|h| h.start())
+ .unwrap_or_else(|| self.edge_list_data.len() - DEP_NODE_PAD);
+
+ // The number of edges for this node is implicitly stored in the combination of the byte
+ // width and the length.
+ let bytes_per_index = header.bytes_per_index();
+ let len = (end - header.start()) / bytes_per_index;
+
+ // LLVM doesn't hoist EdgeHeader::mask so we do it ourselves.
+ let mask = header.mask();
+ (0..len).map(move |_| {
+ // Doing this slicing in this order ensures that the first bounds check suffices for
+ // all the others.
+ let index = &raw[..DEP_NODE_SIZE];
+ raw = &raw[bytes_per_index..];
+ let index = u32::from_le_bytes(index.try_into().unwrap()) & mask;
+ SerializedDepNodeIndex::from_u32(index)
+ })
}
#[inline]
- pub fn index_to_node(&self, dep_node_index: SerializedDepNodeIndex) -> DepNode<K> {
+ pub fn index_to_node(&self, dep_node_index: SerializedDepNodeIndex) -> DepNode {
self.nodes[dep_node_index]
}
#[inline]
- pub fn node_to_index_opt(&self, dep_node: &DepNode<K>) -> Option<SerializedDepNodeIndex> {
- self.index.get(dep_node).cloned()
+ pub fn node_to_index_opt(&self, dep_node: &DepNode) -> Option<SerializedDepNodeIndex> {
+ self.index.get(dep_node.kind.as_usize())?.get(&dep_node.hash).cloned()
}
#[inline]
@@ -84,16 +148,45 @@ impl<K: DepKind> SerializedDepGraph<K> {
self.fingerprints[dep_node_index]
}
+ #[inline]
pub fn node_count(&self) -> usize {
- self.index.len()
+ self.nodes.len()
+ }
+}
+
+/// A packed representation of an edge's start index and byte width.
+///
+/// This is packed by stealing 2 bits from the start index, which means we only accomodate edge
+/// data arrays up to a quarter of our address space. Which seems fine.
+#[derive(Debug, Clone, Copy)]
+struct EdgeHeader {
+ repr: usize,
+}
+
+impl EdgeHeader {
+ #[inline]
+ fn start(self) -> usize {
+ self.repr >> DEP_NODE_WIDTH_BITS
+ }
+
+ #[inline]
+ fn bytes_per_index(self) -> usize {
+ (self.repr & mask(DEP_NODE_WIDTH_BITS)) + 1
}
+
+ #[inline]
+ fn mask(self) -> u32 {
+ mask(self.bytes_per_index() * 8) as u32
+ }
+}
+
+fn mask(bits: usize) -> usize {
+ usize::MAX >> ((std::mem::size_of::<usize>() * 8) - bits)
}
-impl<'a, K: DepKind + Decodable<MemDecoder<'a>>> Decodable<MemDecoder<'a>>
- for SerializedDepGraph<K>
-{
+impl SerializedDepGraph {
#[instrument(level = "debug", skip(d))]
- fn decode(d: &mut MemDecoder<'a>) -> SerializedDepGraph<K> {
+ pub fn decode<D: Deps>(d: &mut MemDecoder<'_>) -> SerializedDepGraph {
// The last 16 bytes are the node count and edge count.
debug!("position: {:?}", d.position());
let (node_count, edge_count) =
@@ -107,76 +200,261 @@ impl<'a, K: DepKind + Decodable<MemDecoder<'a>>> Decodable<MemDecoder<'a>>
debug!(?node_count, ?edge_count);
+ let graph_bytes = d.len() - (2 * IntEncodedWithFixedSize::ENCODED_SIZE) - d.position();
+
let mut nodes = IndexVec::with_capacity(node_count);
let mut fingerprints = IndexVec::with_capacity(node_count);
let mut edge_list_indices = IndexVec::with_capacity(node_count);
- let mut edge_list_data = Vec::with_capacity(edge_count);
+ // This estimation assumes that all of the encoded bytes are for the edge lists or for the
+ // fixed-size node headers. But that's not necessarily true; if any edge list has a length
+ // that spills out of the size we can bit-pack into SerializedNodeHeader then some of the
+ // total serialized size is also used by leb128-encoded edge list lengths. Neglecting that
+ // contribution to graph_bytes means our estimation of the bytes needed for edge_list_data
+ // slightly overshoots. But it cannot overshoot by much; consider that the worse case is
+ // for a node with length 64, which means the spilled 1-byte leb128 length is 1 byte of at
+ // least (34 byte header + 1 byte len + 64 bytes edge data), which is ~1%. A 2-byte leb128
+ // length is about the same fractional overhead and it amortizes for yet greater lengths.
+ let mut edge_list_data = Vec::with_capacity(
+ graph_bytes - node_count * std::mem::size_of::<SerializedNodeHeader<D>>(),
+ );
for _index in 0..node_count {
- let dep_node: DepNode<K> = Decodable::decode(d);
- let _i: SerializedDepNodeIndex = nodes.push(dep_node);
+ // Decode the header for this edge; the header packs together as many of the fixed-size
+ // fields as possible to limit the number of times we update decoder state.
+ let node_header =
+ SerializedNodeHeader::<D> { bytes: d.read_array(), _marker: PhantomData };
+
+ let _i: SerializedDepNodeIndex = nodes.push(node_header.node());
debug_assert_eq!(_i.index(), _index);
- let fingerprint: Fingerprint = Decodable::decode(d);
- let _i: SerializedDepNodeIndex = fingerprints.push(fingerprint);
+ let _i: SerializedDepNodeIndex = fingerprints.push(node_header.fingerprint());
debug_assert_eq!(_i.index(), _index);
- // Deserialize edges -- sequence of DepNodeIndex
- let len = d.read_usize();
- let start = edge_list_data.len().try_into().unwrap();
- for _ in 0..len {
- let edge = Decodable::decode(d);
- edge_list_data.push(edge);
- }
- let end = edge_list_data.len().try_into().unwrap();
- let _i: SerializedDepNodeIndex = edge_list_indices.push((start, end));
+ // If the length of this node's edge list is small, the length is stored in the header.
+ // If it is not, we fall back to another decoder call.
+ let num_edges = node_header.len().unwrap_or_else(|| d.read_usize());
+
+ // The edges index list uses the same varint strategy as rmeta tables; we select the
+ // number of byte elements per-array not per-element. This lets us read the whole edge
+ // list for a node with one decoder call and also use the on-disk format in memory.
+ let edges_len_bytes = node_header.bytes_per_index() * num_edges;
+ // The in-memory structure for the edges list stores the byte width of the edges on
+ // this node with the offset into the global edge data array.
+ let edges_header = node_header.edges_header(&edge_list_data);
+
+ edge_list_data.extend(d.read_raw_bytes(edges_len_bytes));
+
+ let _i: SerializedDepNodeIndex = edge_list_indices.push(edges_header);
debug_assert_eq!(_i.index(), _index);
}
- let index: FxHashMap<_, _> =
- nodes.iter_enumerated().map(|(idx, &dep_node)| (dep_node, idx)).collect();
+ // When we access the edge list data, we do a fixed-size read from the edge list data then
+ // mask off the bytes that aren't for that edge index, so the last read may dangle off the
+ // end of the array. This padding ensure it doesn't.
+ edge_list_data.extend(&[0u8; DEP_NODE_PAD]);
+
+ // Read the number of each dep kind and use it to create an hash map with a suitable size.
+ let mut index: Vec<_> = (0..(D::DEP_KIND_MAX + 1))
+ .map(|_| UnhashMap::with_capacity_and_hasher(d.read_u32() as usize, Default::default()))
+ .collect();
+
+ for (idx, node) in nodes.iter_enumerated() {
+ index[node.kind.as_usize()].insert(node.hash, idx);
+ }
SerializedDepGraph { nodes, fingerprints, edge_list_indices, edge_list_data, index }
}
}
-#[derive(Debug, Encodable, Decodable)]
-pub struct NodeInfo<K: DepKind> {
- node: DepNode<K>,
+/// A packed representation of all the fixed-size fields in a `NodeInfo`.
+///
+/// This stores in one byte array:
+/// * The `Fingerprint` in the `NodeInfo`
+/// * The `Fingerprint` in `DepNode` that is in this `NodeInfo`
+/// * The `DepKind`'s discriminant (a u16, but not all bits are used...)
+/// * The byte width of the encoded edges for this node
+/// * In whatever bits remain, the length of the edge list for this node, if it fits
+struct SerializedNodeHeader<D> {
+ // 2 bytes for the DepNode
+ // 16 for Fingerprint in DepNode
+ // 16 for Fingerprint in NodeInfo
+ bytes: [u8; 34],
+ _marker: PhantomData<D>,
+}
+
+// The fields of a `SerializedNodeHeader`, this struct is an implementation detail and exists only
+// to make the implementation of `SerializedNodeHeader` simpler.
+struct Unpacked {
+ len: Option<usize>,
+ bytes_per_index: usize,
+ kind: DepKind,
+ hash: PackedFingerprint,
fingerprint: Fingerprint,
- edges: SmallVec<[DepNodeIndex; 8]>,
}
-struct Stat<K: DepKind> {
- kind: K,
+// Bit fields, where
+// M: bits used to store the length of a node's edge list
+// N: bits used to store the byte width of elements of the edge list
+// are
+// 0..M length of the edge
+// M..M+N bytes per index
+// M+N..16 kind
+impl<D: Deps> SerializedNodeHeader<D> {
+ const TOTAL_BITS: usize = std::mem::size_of::<DepKind>() * 8;
+ const LEN_BITS: usize = Self::TOTAL_BITS - Self::KIND_BITS - Self::WIDTH_BITS;
+ const WIDTH_BITS: usize = DEP_NODE_WIDTH_BITS;
+ const KIND_BITS: usize = Self::TOTAL_BITS - D::DEP_KIND_MAX.leading_zeros() as usize;
+ const MAX_INLINE_LEN: usize = (u16::MAX as usize >> (Self::TOTAL_BITS - Self::LEN_BITS)) - 1;
+
+ #[inline]
+ fn new(node_info: &NodeInfo) -> Self {
+ debug_assert_eq!(Self::TOTAL_BITS, Self::LEN_BITS + Self::WIDTH_BITS + Self::KIND_BITS);
+
+ let NodeInfo { node, fingerprint, edges } = node_info;
+
+ let mut head = node.kind.as_inner();
+
+ let free_bytes = edges.max_index().leading_zeros() as usize / 8;
+ let bytes_per_index = (DEP_NODE_SIZE - free_bytes).saturating_sub(1);
+ head |= (bytes_per_index as u16) << Self::KIND_BITS;
+
+ // Encode number of edges + 1 so that we can reserve 0 to indicate that the len doesn't fit
+ // in this bitfield.
+ if edges.len() <= Self::MAX_INLINE_LEN {
+ head |= (edges.len() as u16 + 1) << (Self::KIND_BITS + Self::WIDTH_BITS);
+ }
+
+ let hash: Fingerprint = node.hash.into();
+
+ // Using half-open ranges ensures an unconditional panic if we get the magic numbers wrong.
+ let mut bytes = [0u8; 34];
+ bytes[..2].copy_from_slice(&head.to_le_bytes());
+ bytes[2..18].copy_from_slice(&hash.to_le_bytes());
+ bytes[18..].copy_from_slice(&fingerprint.to_le_bytes());
+
+ #[cfg(debug_assertions)]
+ {
+ let res = Self { bytes, _marker: PhantomData };
+ assert_eq!(node_info.fingerprint, res.fingerprint());
+ assert_eq!(node_info.node, res.node());
+ if let Some(len) = res.len() {
+ assert_eq!(node_info.edges.len(), len);
+ }
+ }
+ Self { bytes, _marker: PhantomData }
+ }
+
+ #[inline]
+ fn unpack(&self) -> Unpacked {
+ let head = u16::from_le_bytes(self.bytes[..2].try_into().unwrap());
+ let hash = self.bytes[2..18].try_into().unwrap();
+ let fingerprint = self.bytes[18..].try_into().unwrap();
+
+ let kind = head & mask(Self::KIND_BITS) as u16;
+ let bytes_per_index = (head >> Self::KIND_BITS) & mask(Self::WIDTH_BITS) as u16;
+ let len = (head as usize) >> (Self::WIDTH_BITS + Self::KIND_BITS);
+
+ Unpacked {
+ len: len.checked_sub(1),
+ bytes_per_index: bytes_per_index as usize + 1,
+ kind: DepKind::new(kind),
+ hash: Fingerprint::from_le_bytes(hash).into(),
+ fingerprint: Fingerprint::from_le_bytes(fingerprint),
+ }
+ }
+
+ #[inline]
+ fn len(&self) -> Option<usize> {
+ self.unpack().len
+ }
+
+ #[inline]
+ fn bytes_per_index(&self) -> usize {
+ self.unpack().bytes_per_index
+ }
+
+ #[inline]
+ fn fingerprint(&self) -> Fingerprint {
+ self.unpack().fingerprint
+ }
+
+ #[inline]
+ fn node(&self) -> DepNode {
+ let Unpacked { kind, hash, .. } = self.unpack();
+ DepNode { kind, hash }
+ }
+
+ #[inline]
+ fn edges_header(&self, edge_list_data: &[u8]) -> EdgeHeader {
+ EdgeHeader {
+ repr: (edge_list_data.len() << DEP_NODE_WIDTH_BITS) | (self.bytes_per_index() - 1),
+ }
+ }
+}
+
+#[derive(Debug)]
+struct NodeInfo {
+ node: DepNode,
+ fingerprint: Fingerprint,
+ edges: EdgesVec,
+}
+
+impl NodeInfo {
+ fn encode<D: Deps>(&self, e: &mut FileEncoder) {
+ let header = SerializedNodeHeader::<D>::new(self);
+ e.write_array(header.bytes);
+
+ if header.len().is_none() {
+ e.emit_usize(self.edges.len());
+ }
+
+ let bytes_per_index = header.bytes_per_index();
+ for node_index in self.edges.iter() {
+ e.write_with(|dest| {
+ *dest = node_index.as_u32().to_le_bytes();
+ bytes_per_index
+ });
+ }
+ }
+}
+
+struct Stat {
+ kind: DepKind,
node_counter: u64,
edge_counter: u64,
}
-struct EncoderState<K: DepKind> {
+struct EncoderState<D: Deps> {
encoder: FileEncoder,
total_node_count: usize,
total_edge_count: usize,
- stats: Option<FxHashMap<K, Stat<K>>>,
+ stats: Option<FxHashMap<DepKind, Stat>>,
+
+ /// Stores the number of times we've encoded each dep kind.
+ kind_stats: Vec<u32>,
+ marker: PhantomData<D>,
}
-impl<K: DepKind> EncoderState<K> {
+impl<D: Deps> EncoderState<D> {
fn new(encoder: FileEncoder, record_stats: bool) -> Self {
Self {
encoder,
total_edge_count: 0,
total_node_count: 0,
stats: record_stats.then(FxHashMap::default),
+ kind_stats: iter::repeat(0).take(D::DEP_KIND_MAX as usize + 1).collect(),
+ marker: PhantomData,
}
}
fn encode_node(
&mut self,
- node: &NodeInfo<K>,
- record_graph: &Option<Lock<DepGraphQuery<K>>>,
+ node: &NodeInfo,
+ record_graph: &Option<Lock<DepGraphQuery>>,
) -> DepNodeIndex {
let index = DepNodeIndex::new(self.total_node_count);
self.total_node_count += 1;
+ self.kind_stats[node.node.kind.as_usize()] += 1;
let edge_count = node.edges.len();
self.total_edge_count += edge_count;
@@ -197,16 +475,28 @@ impl<K: DepKind> EncoderState<K> {
}
let encoder = &mut self.encoder;
- node.encode(encoder);
+ node.encode::<D>(encoder);
index
}
fn finish(self, profiler: &SelfProfilerRef) -> FileEncodeResult {
- let Self { mut encoder, total_node_count, total_edge_count, stats: _ } = self;
+ let Self {
+ mut encoder,
+ total_node_count,
+ total_edge_count,
+ stats: _,
+ kind_stats,
+ marker: _,
+ } = self;
let node_count = total_node_count.try_into().unwrap();
let edge_count = total_edge_count.try_into().unwrap();
+ // Encode the number of each dep kind encountered
+ for count in kind_stats.iter() {
+ count.encode(&mut encoder);
+ }
+
debug!(?node_count, ?edge_count);
debug!("position: {:?}", encoder.position());
IntEncodedWithFixedSize(node_count).encode(&mut encoder);
@@ -223,12 +513,12 @@ impl<K: DepKind> EncoderState<K> {
}
}
-pub struct GraphEncoder<K: DepKind> {
- status: Lock<EncoderState<K>>,
- record_graph: Option<Lock<DepGraphQuery<K>>>,
+pub struct GraphEncoder<D: Deps> {
+ status: Lock<EncoderState<D>>,
+ record_graph: Option<Lock<DepGraphQuery>>,
}
-impl<K: DepKind + Encodable<FileEncoder>> GraphEncoder<K> {
+impl<D: Deps> GraphEncoder<D> {
pub fn new(
encoder: FileEncoder,
prev_node_count: usize,
@@ -240,7 +530,7 @@ impl<K: DepKind + Encodable<FileEncoder>> GraphEncoder<K> {
GraphEncoder { status, record_graph }
}
- pub(crate) fn with_query(&self, f: impl Fn(&DepGraphQuery<K>)) {
+ pub(crate) fn with_query(&self, f: impl Fn(&DepGraphQuery)) {
if let Some(record_graph) = &self.record_graph {
f(&record_graph.lock())
}
@@ -301,9 +591,9 @@ impl<K: DepKind + Encodable<FileEncoder>> GraphEncoder<K> {
pub(crate) fn send(
&self,
profiler: &SelfProfilerRef,
- node: DepNode<K>,
+ node: DepNode,
fingerprint: Fingerprint,
- edges: SmallVec<[DepNodeIndex; 8]>,
+ edges: EdgesVec,
) -> DepNodeIndex {
let _prof_timer = profiler.generic_activity("incr_comp_encode_dep_graph");
let node = NodeInfo { node, fingerprint, edges };
diff --git a/compiler/rustc_query_system/src/ich/impls_syntax.rs b/compiler/rustc_query_system/src/ich/impls_syntax.rs
index e673d5b8c..b2177be0e 100644
--- a/compiler/rustc_query_system/src/ich/impls_syntax.rs
+++ b/compiler/rustc_query_system/src/ich/impls_syntax.rs
@@ -5,7 +5,7 @@ use crate::ich::StableHashingContext;
use rustc_ast as ast;
use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
-use rustc_span::{BytePos, NormalizedPos, SourceFile};
+use rustc_span::SourceFile;
use std::assert_matches::assert_matches;
use smallvec::SmallVec;
@@ -67,8 +67,8 @@ impl<'a> HashStable<StableHashingContext<'a>> for SourceFile {
src: _,
ref src_hash,
external_src: _,
- start_pos,
- end_pos: _,
+ start_pos: _,
+ source_len: _,
lines: _,
ref multibyte_chars,
ref non_narrow_chars,
@@ -79,62 +79,37 @@ impl<'a> HashStable<StableHashingContext<'a>> for SourceFile {
src_hash.hash_stable(hcx, hasher);
- // We are always in `Lines` form by the time we reach here.
- assert!(self.lines.borrow().is_lines());
- self.lines(|lines| {
+ {
+ // We are always in `Lines` form by the time we reach here.
+ assert!(self.lines.read().is_lines());
+ let lines = self.lines();
// We only hash the relative position within this source_file
lines.len().hash_stable(hcx, hasher);
for &line in lines.iter() {
- stable_byte_pos(line, start_pos).hash_stable(hcx, hasher);
+ line.hash_stable(hcx, hasher);
}
- });
+ }
// We only hash the relative position within this source_file
multibyte_chars.len().hash_stable(hcx, hasher);
for &char_pos in multibyte_chars.iter() {
- stable_multibyte_char(char_pos, start_pos).hash_stable(hcx, hasher);
+ char_pos.hash_stable(hcx, hasher);
}
non_narrow_chars.len().hash_stable(hcx, hasher);
for &char_pos in non_narrow_chars.iter() {
- stable_non_narrow_char(char_pos, start_pos).hash_stable(hcx, hasher);
+ char_pos.hash_stable(hcx, hasher);
}
normalized_pos.len().hash_stable(hcx, hasher);
for &char_pos in normalized_pos.iter() {
- stable_normalized_pos(char_pos, start_pos).hash_stable(hcx, hasher);
+ char_pos.hash_stable(hcx, hasher);
}
cnum.hash_stable(hcx, hasher);
}
}
-fn stable_byte_pos(pos: BytePos, source_file_start: BytePos) -> u32 {
- pos.0 - source_file_start.0
-}
-
-fn stable_multibyte_char(mbc: rustc_span::MultiByteChar, source_file_start: BytePos) -> (u32, u32) {
- let rustc_span::MultiByteChar { pos, bytes } = mbc;
-
- (pos.0 - source_file_start.0, bytes as u32)
-}
-
-fn stable_non_narrow_char(
- swc: rustc_span::NonNarrowChar,
- source_file_start: BytePos,
-) -> (u32, u32) {
- let pos = swc.pos();
- let width = swc.width();
-
- (pos.0 - source_file_start.0, width as u32)
-}
-
-fn stable_normalized_pos(np: NormalizedPos, source_file_start: BytePos) -> (u32, u32) {
- let NormalizedPos { pos, diff } = np;
-
- (pos.0 - source_file_start.0, diff)
-}
-
impl<'tcx> HashStable<StableHashingContext<'tcx>> for rustc_feature::Features {
fn hash_stable(&self, hcx: &mut StableHashingContext<'tcx>, hasher: &mut StableHasher) {
// Unfortunately we cannot exhaustively list fields here, since the
diff --git a/compiler/rustc_query_system/src/lib.rs b/compiler/rustc_query_system/src/lib.rs
index 8c9e9cfad..1944ac443 100644
--- a/compiler/rustc_query_system/src/lib.rs
+++ b/compiler/rustc_query_system/src/lib.rs
@@ -4,6 +4,7 @@
#![feature(min_specialization)]
#![feature(extern_types)]
#![feature(let_chains)]
+#![feature(inline_const)]
#![allow(rustc::potential_query_instability)]
#![deny(rustc::untranslatable_diagnostic)]
#![deny(rustc::diagnostic_outside_of_impl)]
diff --git a/compiler/rustc_query_system/src/query/caches.rs b/compiler/rustc_query_system/src/query/caches.rs
index 4ba9d53a9..0240f012d 100644
--- a/compiler/rustc_query_system/src/query/caches.rs
+++ b/compiler/rustc_query_system/src/query/caches.rs
@@ -2,7 +2,7 @@ use crate::dep_graph::DepNodeIndex;
use rustc_data_structures::fx::FxHashMap;
use rustc_data_structures::sharded::{self, Sharded};
-use rustc_data_structures::sync::Lock;
+use rustc_data_structures::sync::OnceLock;
use rustc_index::{Idx, IndexVec};
use std::fmt::Debug;
use std::hash::Hash;
@@ -55,7 +55,7 @@ where
#[inline(always)]
fn lookup(&self, key: &K) -> Option<(V, DepNodeIndex)> {
let key_hash = sharded::make_hash(key);
- let lock = self.cache.get_shard_by_hash(key_hash).lock();
+ let lock = self.cache.lock_shard_by_hash(key_hash);
let result = lock.raw_entry().from_key_hashed_nocheck(key_hash, key);
if let Some((_, value)) = result { Some(*value) } else { None }
@@ -63,15 +63,14 @@ where
#[inline]
fn complete(&self, key: K, value: V, index: DepNodeIndex) {
- let mut lock = self.cache.get_shard_by_value(&key).lock();
+ let mut lock = self.cache.lock_shard_by_value(&key);
// We may be overwriting another value. This is all right, since the dep-graph
// will check that the fingerprint matches.
lock.insert(key, (value, index));
}
fn iter(&self, f: &mut dyn FnMut(&Self::Key, &Self::Value, DepNodeIndex)) {
- let shards = self.cache.lock_shards();
- for shard in shards.iter() {
+ for shard in self.cache.lock_shards() {
for (k, v) in shard.iter() {
f(k, &v.0, v.1);
}
@@ -88,12 +87,12 @@ impl<'tcx, V: 'tcx> CacheSelector<'tcx, V> for SingleCacheSelector {
}
pub struct SingleCache<V> {
- cache: Lock<Option<(V, DepNodeIndex)>>,
+ cache: OnceLock<(V, DepNodeIndex)>,
}
impl<V> Default for SingleCache<V> {
fn default() -> Self {
- SingleCache { cache: Lock::new(None) }
+ SingleCache { cache: OnceLock::new() }
}
}
@@ -106,16 +105,16 @@ where
#[inline(always)]
fn lookup(&self, _key: &()) -> Option<(V, DepNodeIndex)> {
- *self.cache.lock()
+ self.cache.get().copied()
}
#[inline]
fn complete(&self, _key: (), value: V, index: DepNodeIndex) {
- *self.cache.lock() = Some((value, index));
+ self.cache.set((value, index)).ok();
}
fn iter(&self, f: &mut dyn FnMut(&Self::Key, &Self::Value, DepNodeIndex)) {
- if let Some(value) = self.cache.lock().as_ref() {
+ if let Some(value) = self.cache.get() {
f(&(), &value.0, value.1)
}
}
@@ -149,19 +148,18 @@ where
#[inline(always)]
fn lookup(&self, key: &K) -> Option<(V, DepNodeIndex)> {
- let lock = self.cache.get_shard_by_hash(key.index() as u64).lock();
+ let lock = self.cache.lock_shard_by_hash(key.index() as u64);
if let Some(Some(value)) = lock.get(*key) { Some(*value) } else { None }
}
#[inline]
fn complete(&self, key: K, value: V, index: DepNodeIndex) {
- let mut lock = self.cache.get_shard_by_hash(key.index() as u64).lock();
+ let mut lock = self.cache.lock_shard_by_hash(key.index() as u64);
lock.insert(key, (value, index));
}
fn iter(&self, f: &mut dyn FnMut(&Self::Key, &Self::Value, DepNodeIndex)) {
- let shards = self.cache.lock_shards();
- for shard in shards.iter() {
+ for shard in self.cache.lock_shards() {
for (k, v) in shard.iter_enumerated() {
if let Some(v) = v {
f(&k, &v.0, v.1);
diff --git a/compiler/rustc_query_system/src/query/config.rs b/compiler/rustc_query_system/src/query/config.rs
index 7e47d7012..c025fac26 100644
--- a/compiler/rustc_query_system/src/query/config.rs
+++ b/compiler/rustc_query_system/src/query/config.rs
@@ -1,6 +1,6 @@
//! Query configuration and description traits.
-use crate::dep_graph::{DepNode, DepNodeParams, SerializedDepNodeIndex};
+use crate::dep_graph::{DepKind, DepNode, DepNodeParams, SerializedDepNodeIndex};
use crate::error::HandleCycleError;
use crate::ich::StableHashingContext;
use crate::query::caches::QueryCache;
@@ -8,6 +8,7 @@ use crate::query::DepNodeIndex;
use crate::query::{QueryContext, QueryInfo, QueryState};
use rustc_data_structures::fingerprint::Fingerprint;
+use rustc_span::ErrorGuaranteed;
use std::fmt::Debug;
use std::hash::Hash;
@@ -26,7 +27,7 @@ pub trait QueryConfig<Qcx: QueryContext>: Copy {
fn format_value(self) -> fn(&Self::Value) -> String;
// Don't use this method to access query results, instead use the methods on TyCtxt
- fn query_state<'a>(self, tcx: Qcx) -> &'a QueryState<Self::Key, Qcx::DepKind>
+ fn query_state<'a>(self, tcx: Qcx) -> &'a QueryState<Self::Key>
where
Qcx: 'a;
@@ -56,7 +57,8 @@ pub trait QueryConfig<Qcx: QueryContext>: Copy {
fn value_from_cycle_error(
self,
tcx: Qcx::DepContext,
- cycle: &[QueryInfo<Qcx::DepKind>],
+ cycle: &[QueryInfo],
+ guar: ErrorGuaranteed,
) -> Self::Value;
fn anon(self) -> bool;
@@ -64,12 +66,12 @@ pub trait QueryConfig<Qcx: QueryContext>: Copy {
fn depth_limit(self) -> bool;
fn feedable(self) -> bool;
- fn dep_kind(self) -> Qcx::DepKind;
+ fn dep_kind(self) -> DepKind;
fn handle_cycle_error(self) -> HandleCycleError;
fn hash_result(self) -> HashResult<Self::Value>;
// Just here for convenience and checking that the key matches the kind, don't override this.
- fn construct_dep_node(self, tcx: Qcx::DepContext, key: &Self::Key) -> DepNode<Qcx::DepKind> {
+ fn construct_dep_node(self, tcx: Qcx::DepContext, key: &Self::Key) -> DepNode {
DepNode::construct(tcx, self.dep_kind(), key)
}
}
diff --git a/compiler/rustc_query_system/src/query/job.rs b/compiler/rustc_query_system/src/query/job.rs
index 1b1248924..f2c1f84fc 100644
--- a/compiler/rustc_query_system/src/query/job.rs
+++ b/compiler/rustc_query_system/src/query/job.rs
@@ -1,9 +1,8 @@
-use crate::dep_graph::DepKind;
+use crate::dep_graph::DepContext;
use crate::error::CycleStack;
use crate::query::plumbing::CycleError;
+use crate::query::DepKind;
use crate::query::{QueryContext, QueryStackFrame};
-use core::marker::PhantomData;
-
use rustc_data_structures::fx::FxHashMap;
use rustc_errors::{
Diagnostic, DiagnosticBuilder, ErrorGuaranteed, Handler, IntoDiagnostic, Level,
@@ -30,48 +29,48 @@ use {
/// Represents a span and a query key.
#[derive(Clone, Debug)]
-pub struct QueryInfo<D: DepKind> {
+pub struct QueryInfo {
/// The span corresponding to the reason for which this query was required.
pub span: Span,
- pub query: QueryStackFrame<D>,
+ pub query: QueryStackFrame,
}
-pub type QueryMap<D> = FxHashMap<QueryJobId, QueryJobInfo<D>>;
+pub type QueryMap = FxHashMap<QueryJobId, QueryJobInfo>;
/// A value uniquely identifying an active query job.
#[derive(Copy, Clone, Eq, PartialEq, Hash)]
pub struct QueryJobId(pub NonZeroU64);
impl QueryJobId {
- fn query<D: DepKind>(self, map: &QueryMap<D>) -> QueryStackFrame<D> {
+ fn query(self, map: &QueryMap) -> QueryStackFrame {
map.get(&self).unwrap().query.clone()
}
#[cfg(parallel_compiler)]
- fn span<D: DepKind>(self, map: &QueryMap<D>) -> Span {
+ fn span(self, map: &QueryMap) -> Span {
map.get(&self).unwrap().job.span
}
#[cfg(parallel_compiler)]
- fn parent<D: DepKind>(self, map: &QueryMap<D>) -> Option<QueryJobId> {
+ fn parent(self, map: &QueryMap) -> Option<QueryJobId> {
map.get(&self).unwrap().job.parent
}
#[cfg(parallel_compiler)]
- fn latch<D: DepKind>(self, map: &QueryMap<D>) -> Option<&QueryLatch<D>> {
+ fn latch(self, map: &QueryMap) -> Option<&QueryLatch> {
map.get(&self).unwrap().job.latch.as_ref()
}
}
#[derive(Clone)]
-pub struct QueryJobInfo<D: DepKind> {
- pub query: QueryStackFrame<D>,
- pub job: QueryJob<D>,
+pub struct QueryJobInfo {
+ pub query: QueryStackFrame,
+ pub job: QueryJob,
}
/// Represents an active query job.
#[derive(Clone)]
-pub struct QueryJob<D: DepKind> {
+pub struct QueryJob {
pub id: QueryJobId,
/// The span corresponding to the reason for which this query was required.
@@ -82,11 +81,10 @@ pub struct QueryJob<D: DepKind> {
/// The latch that is used to wait on this job.
#[cfg(parallel_compiler)]
- latch: Option<QueryLatch<D>>,
- spooky: core::marker::PhantomData<D>,
+ latch: Option<QueryLatch>,
}
-impl<D: DepKind> QueryJob<D> {
+impl QueryJob {
/// Creates a new query job.
#[inline]
pub fn new(id: QueryJobId, span: Span, parent: Option<QueryJobId>) -> Self {
@@ -96,12 +94,11 @@ impl<D: DepKind> QueryJob<D> {
parent,
#[cfg(parallel_compiler)]
latch: None,
- spooky: PhantomData,
}
}
#[cfg(parallel_compiler)]
- pub(super) fn latch(&mut self) -> QueryLatch<D> {
+ pub(super) fn latch(&mut self) -> QueryLatch {
if self.latch.is_none() {
self.latch = Some(QueryLatch::new());
}
@@ -124,13 +121,12 @@ impl<D: DepKind> QueryJob<D> {
}
impl QueryJobId {
- #[cfg(not(parallel_compiler))]
- pub(super) fn find_cycle_in_stack<D: DepKind>(
+ pub(super) fn find_cycle_in_stack(
&self,
- query_map: QueryMap<D>,
+ query_map: QueryMap,
current_job: &Option<QueryJobId>,
span: Span,
- ) -> CycleError<D> {
+ ) -> CycleError {
// Find the waitee amongst `current_job` parents
let mut cycle = Vec::new();
let mut current_job = Option::clone(current_job);
@@ -164,18 +160,18 @@ impl QueryJobId {
#[cold]
#[inline(never)]
- pub fn try_find_layout_root<D: DepKind>(
+ pub fn try_find_layout_root(
&self,
- query_map: QueryMap<D>,
- ) -> Option<(QueryJobInfo<D>, usize)> {
+ query_map: QueryMap,
+ layout_of_kind: DepKind,
+ ) -> Option<(QueryJobInfo, usize)> {
let mut last_layout = None;
let mut current_id = Some(*self);
let mut depth = 0;
while let Some(id) = current_id {
let info = query_map.get(&id).unwrap();
- // FIXME: This string comparison should probably not be done.
- if format!("{:?}", info.query.dep_kind) == "layout_of" {
+ if info.query.dep_kind == layout_of_kind {
depth += 1;
last_layout = Some((info.clone(), depth));
}
@@ -186,15 +182,15 @@ impl QueryJobId {
}
#[cfg(parallel_compiler)]
-struct QueryWaiter<D: DepKind> {
+struct QueryWaiter {
query: Option<QueryJobId>,
condvar: Condvar,
span: Span,
- cycle: Mutex<Option<CycleError<D>>>,
+ cycle: Mutex<Option<CycleError>>,
}
#[cfg(parallel_compiler)]
-impl<D: DepKind> QueryWaiter<D> {
+impl QueryWaiter {
fn notify(&self, registry: &rayon_core::Registry) {
rayon_core::mark_unblocked(registry);
self.condvar.notify_one();
@@ -202,19 +198,19 @@ impl<D: DepKind> QueryWaiter<D> {
}
#[cfg(parallel_compiler)]
-struct QueryLatchInfo<D: DepKind> {
+struct QueryLatchInfo {
complete: bool,
- waiters: Vec<Arc<QueryWaiter<D>>>,
+ waiters: Vec<Arc<QueryWaiter>>,
}
#[cfg(parallel_compiler)]
#[derive(Clone)]
-pub(super) struct QueryLatch<D: DepKind> {
- info: Arc<Mutex<QueryLatchInfo<D>>>,
+pub(super) struct QueryLatch {
+ info: Arc<Mutex<QueryLatchInfo>>,
}
#[cfg(parallel_compiler)]
-impl<D: DepKind> QueryLatch<D> {
+impl QueryLatch {
fn new() -> Self {
QueryLatch {
info: Arc::new(Mutex::new(QueryLatchInfo { complete: false, waiters: Vec::new() })),
@@ -222,11 +218,7 @@ impl<D: DepKind> QueryLatch<D> {
}
/// Awaits for the query job to complete.
- pub(super) fn wait_on(
- &self,
- query: Option<QueryJobId>,
- span: Span,
- ) -> Result<(), CycleError<D>> {
+ pub(super) fn wait_on(&self, query: Option<QueryJobId>, span: Span) -> Result<(), CycleError> {
let waiter =
Arc::new(QueryWaiter { query, span, cycle: Mutex::new(None), condvar: Condvar::new() });
self.wait_on_inner(&waiter);
@@ -241,7 +233,7 @@ impl<D: DepKind> QueryLatch<D> {
}
/// Awaits the caller on this latch by blocking the current thread.
- fn wait_on_inner(&self, waiter: &Arc<QueryWaiter<D>>) {
+ fn wait_on_inner(&self, waiter: &Arc<QueryWaiter>) {
let mut info = self.info.lock();
if !info.complete {
// We push the waiter on to the `waiters` list. It can be accessed inside
@@ -275,7 +267,7 @@ impl<D: DepKind> QueryLatch<D> {
/// Removes a single waiter from the list of waiters.
/// This is used to break query cycles.
- fn extract_waiter(&self, waiter: usize) -> Arc<QueryWaiter<D>> {
+ fn extract_waiter(&self, waiter: usize) -> Arc<QueryWaiter> {
let mut info = self.info.lock();
debug_assert!(!info.complete);
// Remove the waiter from the list of waiters
@@ -297,14 +289,9 @@ type Waiter = (QueryJobId, usize);
/// required information to resume the waiter.
/// If all `visit` calls returns None, this function also returns None.
#[cfg(parallel_compiler)]
-fn visit_waiters<F, D>(
- query_map: &QueryMap<D>,
- query: QueryJobId,
- mut visit: F,
-) -> Option<Option<Waiter>>
+fn visit_waiters<F>(query_map: &QueryMap, query: QueryJobId, mut visit: F) -> Option<Option<Waiter>>
where
F: FnMut(Span, QueryJobId) -> Option<Option<Waiter>>,
- D: DepKind,
{
// Visit the parent query which is a non-resumable waiter since it's on the same stack
if let Some(parent) = query.parent(query_map) {
@@ -333,8 +320,8 @@ where
/// If a cycle is detected, this initial value is replaced with the span causing
/// the cycle.
#[cfg(parallel_compiler)]
-fn cycle_check<D: DepKind>(
- query_map: &QueryMap<D>,
+fn cycle_check(
+ query_map: &QueryMap,
query: QueryJobId,
span: Span,
stack: &mut Vec<(Span, QueryJobId)>,
@@ -374,8 +361,8 @@ fn cycle_check<D: DepKind>(
/// from `query` without going through any of the queries in `visited`.
/// This is achieved with a depth first search.
#[cfg(parallel_compiler)]
-fn connected_to_root<D: DepKind>(
- query_map: &QueryMap<D>,
+fn connected_to_root(
+ query_map: &QueryMap,
query: QueryJobId,
visited: &mut FxHashSet<QueryJobId>,
) -> bool {
@@ -397,10 +384,9 @@ fn connected_to_root<D: DepKind>(
// Deterministically pick an query from a list
#[cfg(parallel_compiler)]
-fn pick_query<'a, T, F, D>(query_map: &QueryMap<D>, queries: &'a [T], f: F) -> &'a T
+fn pick_query<'a, T, F>(query_map: &QueryMap, queries: &'a [T], f: F) -> &'a T
where
F: Fn(&T) -> (Span, QueryJobId),
- D: DepKind,
{
// Deterministically pick an entry point
// FIXME: Sort this instead
@@ -424,10 +410,10 @@ where
/// If a cycle was not found, the starting query is removed from `jobs` and
/// the function returns false.
#[cfg(parallel_compiler)]
-fn remove_cycle<D: DepKind>(
- query_map: &QueryMap<D>,
+fn remove_cycle(
+ query_map: &QueryMap,
jobs: &mut Vec<QueryJobId>,
- wakelist: &mut Vec<Arc<QueryWaiter<D>>>,
+ wakelist: &mut Vec<Arc<QueryWaiter>>,
) -> bool {
let mut visited = FxHashSet::default();
let mut stack = Vec::new();
@@ -529,7 +515,7 @@ fn remove_cycle<D: DepKind>(
/// There may be multiple cycles involved in a deadlock, so this searches
/// all active queries for cycles before finally resuming all the waiters at once.
#[cfg(parallel_compiler)]
-pub fn deadlock<D: DepKind>(query_map: QueryMap<D>, registry: &rayon_core::Registry) {
+pub fn deadlock(query_map: QueryMap, registry: &rayon_core::Registry) {
let on_panic = defer(|| {
eprintln!("deadlock handler panicked, aborting process");
process::abort();
@@ -553,7 +539,9 @@ pub fn deadlock<D: DepKind>(query_map: QueryMap<D>, registry: &rayon_core::Regis
// which in turn will wait on X causing a deadlock. We have a false dependency from
// X to Y due to Rayon waiting and a true dependency from Y to X. The algorithm here
// only considers the true dependency and won't detect a cycle.
- assert!(found_cycle);
+ if !found_cycle {
+ panic!("deadlock detected");
+ }
// FIXME: Ensure this won't cause a deadlock before we return
for waiter in wakelist.into_iter() {
@@ -565,9 +553,9 @@ pub fn deadlock<D: DepKind>(query_map: QueryMap<D>, registry: &rayon_core::Regis
#[inline(never)]
#[cold]
-pub(crate) fn report_cycle<'a, D: DepKind>(
+pub(crate) fn report_cycle<'a>(
sess: &'a Session,
- CycleError { usage, cycle: stack }: &CycleError<D>,
+ CycleError { usage, cycle: stack }: &CycleError,
) -> DiagnosticBuilder<'a, ErrorGuaranteed> {
assert!(!stack.is_empty());
@@ -592,9 +580,7 @@ pub(crate) fn report_cycle<'a, D: DepKind>(
});
}
- let alias = if stack
- .iter()
- .all(|entry| matches!(entry.query.def_kind, Some(DefKind::TyAlias { .. })))
+ let alias = if stack.iter().all(|entry| matches!(entry.query.def_kind, Some(DefKind::TyAlias)))
{
Some(crate::error::Alias::Ty)
} else if stack.iter().all(|entry| entry.query.def_kind == Some(DefKind::TraitAlias)) {
@@ -654,8 +640,10 @@ pub fn print_query_stack<Qcx: QueryContext>(
if let Some(ref mut file) = file {
let _ = writeln!(
file,
- "#{} [{:?}] {}",
- count_total, query_info.query.dep_kind, query_info.query.description
+ "#{} [{}] {}",
+ count_total,
+ qcx.dep_context().dep_kind_info(query_info.query.dep_kind).name,
+ query_info.query.description
);
}
diff --git a/compiler/rustc_query_system/src/query/mod.rs b/compiler/rustc_query_system/src/query/mod.rs
index f7619d75b..05dee9f12 100644
--- a/compiler/rustc_query_system/src/query/mod.rs
+++ b/compiler/rustc_query_system/src/query/mod.rs
@@ -28,27 +28,27 @@ use thin_vec::ThinVec;
///
/// This is mostly used in case of cycles for error reporting.
#[derive(Clone, Debug)]
-pub struct QueryStackFrame<D: DepKind> {
+pub struct QueryStackFrame {
pub description: String,
span: Option<Span>,
pub def_id: Option<DefId>,
pub def_kind: Option<DefKind>,
pub ty_adt_id: Option<DefId>,
- pub dep_kind: D,
+ pub dep_kind: DepKind,
/// This hash is used to deterministically pick
/// a query to remove cycles in the parallel compiler.
#[cfg(parallel_compiler)]
hash: Hash64,
}
-impl<D: DepKind> QueryStackFrame<D> {
+impl QueryStackFrame {
#[inline]
pub fn new(
description: String,
span: Option<Span>,
def_id: Option<DefId>,
def_kind: Option<DefKind>,
- dep_kind: D,
+ dep_kind: DepKind,
ty_adt_id: Option<DefId>,
_hash: impl FnOnce() -> Hash64,
) -> Self {
@@ -106,7 +106,7 @@ pub trait QueryContext: HasDepContext {
/// Get the query information from the TLS context.
fn current_query_job(self) -> Option<QueryJobId>;
- fn try_collect_active_jobs(self) -> Option<QueryMap<Self::DepKind>>;
+ fn try_collect_active_jobs(self) -> Option<QueryMap>;
/// Load side effects associated to the node in the previous session.
fn load_side_effects(self, prev_dep_node_index: SerializedDepNodeIndex) -> QuerySideEffects;
diff --git a/compiler/rustc_query_system/src/query/plumbing.rs b/compiler/rustc_query_system/src/query/plumbing.rs
index 4adb4eb74..ae8414ebb 100644
--- a/compiler/rustc_query_system/src/query/plumbing.rs
+++ b/compiler/rustc_query_system/src/query/plumbing.rs
@@ -2,8 +2,8 @@
//! generate the actual methods on tcx which find and execute the provider,
//! manage the caches, and so forth.
-use crate::dep_graph::{DepContext, DepKind, DepNode, DepNodeIndex, DepNodeParams};
-use crate::dep_graph::{DepGraphData, HasDepContext};
+use crate::dep_graph::DepGraphData;
+use crate::dep_graph::{DepContext, DepNode, DepNodeIndex, DepNodeParams};
use crate::ich::StableHashingContext;
use crate::query::caches::QueryCache;
#[cfg(parallel_compiler)]
@@ -14,10 +14,11 @@ use crate::query::{QueryContext, QueryMap, QuerySideEffects, QueryStackFrame};
use crate::HandleCycleError;
use rustc_data_structures::fingerprint::Fingerprint;
use rustc_data_structures::fx::FxHashMap;
+use rustc_data_structures::sharded::Sharded;
use rustc_data_structures::stack::ensure_sufficient_stack;
use rustc_data_structures::sync::Lock;
#[cfg(parallel_compiler)]
-use rustc_data_structures::{cold_path, sharded::Sharded};
+use rustc_data_structures::{outline, sync};
use rustc_errors::{DiagnosticBuilder, ErrorGuaranteed, FatalError};
use rustc_span::{Span, DUMMY_SP};
use std::cell::Cell;
@@ -29,68 +30,40 @@ use thin_vec::ThinVec;
use super::QueryConfig;
-pub struct QueryState<K, D: DepKind> {
- #[cfg(parallel_compiler)]
- active: Sharded<FxHashMap<K, QueryResult<D>>>,
- #[cfg(not(parallel_compiler))]
- active: Lock<FxHashMap<K, QueryResult<D>>>,
+pub struct QueryState<K> {
+ active: Sharded<FxHashMap<K, QueryResult>>,
}
/// Indicates the state of a query for a given key in a query map.
-enum QueryResult<D: DepKind> {
+enum QueryResult {
/// An already executing query. The query job can be used to await for its completion.
- Started(QueryJob<D>),
+ Started(QueryJob),
/// The query panicked. Queries trying to wait on this will raise a fatal error which will
/// silently panic.
Poisoned,
}
-impl<K, D> QueryState<K, D>
+impl<K> QueryState<K>
where
K: Eq + Hash + Copy + Debug,
- D: DepKind,
{
pub fn all_inactive(&self) -> bool {
- #[cfg(parallel_compiler)]
- {
- let shards = self.active.lock_shards();
- shards.iter().all(|shard| shard.is_empty())
- }
- #[cfg(not(parallel_compiler))]
- {
- self.active.lock().is_empty()
- }
+ self.active.lock_shards().all(|shard| shard.is_empty())
}
pub fn try_collect_active_jobs<Qcx: Copy>(
&self,
qcx: Qcx,
- make_query: fn(Qcx, K) -> QueryStackFrame<D>,
- jobs: &mut QueryMap<D>,
+ make_query: fn(Qcx, K) -> QueryStackFrame,
+ jobs: &mut QueryMap,
) -> Option<()> {
let mut active = Vec::new();
- #[cfg(parallel_compiler)]
- {
- // We use try_lock_shards here since we are called from the
- // deadlock handler, and this shouldn't be locked.
- let shards = self.active.try_lock_shards()?;
- for shard in shards.iter() {
- for (k, v) in shard.iter() {
- if let QueryResult::Started(ref job) = *v {
- active.push((*k, job.clone()));
- }
- }
- }
- }
- #[cfg(not(parallel_compiler))]
- {
- // We use try_lock here since we are called from the
- // deadlock handler, and this shouldn't be locked.
- // (FIXME: Is this relevant for non-parallel compilers? It doesn't
- // really hurt much.)
- for (k, v) in self.active.try_lock()?.iter() {
+ // We use try_lock_shards here since we are called from the
+ // deadlock handler, and this shouldn't be locked.
+ for shard in self.active.try_lock_shards() {
+ for (k, v) in shard?.iter() {
if let QueryResult::Started(ref job) = *v {
active.push((*k, job.clone()));
}
@@ -108,25 +81,25 @@ where
}
}
-impl<K, D: DepKind> Default for QueryState<K, D> {
- fn default() -> QueryState<K, D> {
+impl<K> Default for QueryState<K> {
+ fn default() -> QueryState<K> {
QueryState { active: Default::default() }
}
}
/// A type representing the responsibility to execute the job in the `job` field.
/// This will poison the relevant query if dropped.
-struct JobOwner<'tcx, K, D: DepKind>
+struct JobOwner<'tcx, K>
where
K: Eq + Hash + Copy,
{
- state: &'tcx QueryState<K, D>,
+ state: &'tcx QueryState<K>,
key: K,
}
#[cold]
#[inline(never)]
-fn mk_cycle<Q, Qcx>(query: Q, qcx: Qcx, cycle_error: CycleError<Qcx::DepKind>) -> Q::Value
+fn mk_cycle<Q, Qcx>(query: Q, qcx: Qcx, cycle_error: CycleError) -> Q::Value
where
Q: QueryConfig<Qcx>,
Qcx: QueryContext,
@@ -138,7 +111,7 @@ where
fn handle_cycle_error<Q, Qcx>(
query: Q,
qcx: Qcx,
- cycle_error: &CycleError<Qcx::DepKind>,
+ cycle_error: &CycleError,
mut error: DiagnosticBuilder<'_, ErrorGuaranteed>,
) -> Q::Value
where
@@ -148,8 +121,8 @@ where
use HandleCycleError::*;
match query.handle_cycle_error() {
Error => {
- error.emit();
- query.value_from_cycle_error(*qcx.dep_context(), &cycle_error.cycle)
+ let guar = error.emit();
+ query.value_from_cycle_error(*qcx.dep_context(), &cycle_error.cycle, guar)
}
Fatal => {
error.emit();
@@ -157,13 +130,13 @@ where
unreachable!()
}
DelayBug => {
- error.delay_as_bug();
- query.value_from_cycle_error(*qcx.dep_context(), &cycle_error.cycle)
+ let guar = error.delay_as_bug();
+ query.value_from_cycle_error(*qcx.dep_context(), &cycle_error.cycle, guar)
}
}
}
-impl<'tcx, K, D: DepKind> JobOwner<'tcx, K, D>
+impl<'tcx, K> JobOwner<'tcx, K>
where
K: Eq + Hash + Copy,
{
@@ -184,10 +157,7 @@ where
cache.complete(key, result, dep_node_index);
let job = {
- #[cfg(parallel_compiler)]
- let mut lock = state.active.get_shard_by_value(&key).lock();
- #[cfg(not(parallel_compiler))]
- let mut lock = state.active.lock();
+ let mut lock = state.active.lock_shard_by_value(&key);
match lock.remove(&key).unwrap() {
QueryResult::Started(job) => job,
QueryResult::Poisoned => panic!(),
@@ -198,10 +168,9 @@ where
}
}
-impl<'tcx, K, D> Drop for JobOwner<'tcx, K, D>
+impl<'tcx, K> Drop for JobOwner<'tcx, K>
where
K: Eq + Hash + Copy,
- D: DepKind,
{
#[inline(never)]
#[cold]
@@ -209,10 +178,7 @@ where
// Poison the query so jobs waiting on it panic.
let state = self.state;
let job = {
- #[cfg(parallel_compiler)]
- let mut shard = state.active.get_shard_by_value(&self.key).lock();
- #[cfg(not(parallel_compiler))]
- let mut shard = state.active.lock();
+ let mut shard = state.active.lock_shard_by_value(&self.key);
let job = match shard.remove(&self.key).unwrap() {
QueryResult::Started(job) => job,
QueryResult::Poisoned => panic!(),
@@ -227,10 +193,10 @@ where
}
#[derive(Clone)]
-pub(crate) struct CycleError<D: DepKind> {
+pub(crate) struct CycleError {
/// The query and related span that uses the cycle.
- pub usage: Option<(Span, QueryStackFrame<D>)>,
- pub cycle: Vec<QueryInfo<D>>,
+ pub usage: Option<(Span, QueryStackFrame)>,
+ pub cycle: Vec<QueryInfo>,
}
/// Checks if the query is already computed and in the cache.
@@ -255,7 +221,6 @@ where
#[cold]
#[inline(never)]
-#[cfg(not(parallel_compiler))]
fn cycle_error<Q, Qcx>(
query: Q,
qcx: Qcx,
@@ -281,7 +246,7 @@ fn wait_for_query<Q, Qcx>(
qcx: Qcx,
span: Span,
key: Q::Key,
- latch: QueryLatch<Qcx::DepKind>,
+ latch: QueryLatch,
current: Option<QueryJobId>,
) -> (Q::Value, Option<DepNodeIndex>)
where
@@ -300,7 +265,18 @@ where
match result {
Ok(()) => {
let Some((v, index)) = query.query_cache(qcx).lookup(&key) else {
- cold_path(|| panic!("value must be in cache after waiting"))
+ outline(|| {
+ // We didn't find the query result in the query cache. Check if it was
+ // poisoned due to a panic instead.
+ let lock = query.query_state(qcx).active.get_shard_by_value(&key).lock();
+ match lock.get(&key) {
+ // The query we waited on panicked. Continue unwinding here.
+ Some(QueryResult::Poisoned) => FatalError.raise(),
+ _ => panic!(
+ "query result must in the cache or the query must be poisoned after a wait"
+ ),
+ }
+ })
};
qcx.dep_context().profiler().query_cache_hit(index.into());
@@ -318,17 +294,14 @@ fn try_execute_query<Q, Qcx, const INCR: bool>(
qcx: Qcx,
span: Span,
key: Q::Key,
- dep_node: Option<DepNode<Qcx::DepKind>>,
+ dep_node: Option<DepNode>,
) -> (Q::Value, Option<DepNodeIndex>)
where
Q: QueryConfig<Qcx>,
Qcx: QueryContext,
{
let state = query.query_state(qcx);
- #[cfg(parallel_compiler)]
- let mut state_lock = state.active.get_shard_by_value(&key).lock();
- #[cfg(not(parallel_compiler))]
- let mut state_lock = state.active.lock();
+ let mut state_lock = state.active.lock_shard_by_value(&key);
// For the parallel compiler we need to check both the query cache and query state structures
// while holding the state lock to ensure that 1) the query has not yet completed and 2) the
@@ -360,8 +333,18 @@ where
}
Entry::Occupied(mut entry) => {
match entry.get_mut() {
- #[cfg(not(parallel_compiler))]
QueryResult::Started(job) => {
+ #[cfg(parallel_compiler)]
+ if sync::is_dyn_thread_safe() {
+ // Get the latch out
+ let latch = job.latch();
+ drop(state_lock);
+
+ // Only call `wait_for_query` if we're using a Rayon thread pool
+ // as it will attempt to mark the worker thread as blocked.
+ return wait_for_query(query, qcx, span, key, latch, current_job_id);
+ }
+
let id = job.id;
drop(state_lock);
@@ -369,14 +352,6 @@ where
// so we just return the error.
cycle_error(query, qcx, id, span)
}
- #[cfg(parallel_compiler)]
- QueryResult::Started(job) => {
- // Get the latch out
- let latch = job.latch();
- drop(state_lock);
-
- wait_for_query(query, qcx, span, key, latch, current_job_id)
- }
QueryResult::Poisoned => FatalError.raise(),
}
}
@@ -387,10 +362,10 @@ where
fn execute_job<Q, Qcx, const INCR: bool>(
query: Q,
qcx: Qcx,
- state: &QueryState<Q::Key, Qcx::DepKind>,
+ state: &QueryState<Q::Key>,
key: Q::Key,
id: QueryJobId,
- dep_node: Option<DepNode<Qcx::DepKind>>,
+ dep_node: Option<DepNode>,
) -> (Q::Value, Option<DepNodeIndex>)
where
Q: QueryConfig<Qcx>,
@@ -497,9 +472,9 @@ where
fn execute_job_incr<Q, Qcx>(
query: Q,
qcx: Qcx,
- dep_graph_data: &DepGraphData<Qcx::DepKind>,
+ dep_graph_data: &DepGraphData<Qcx::Deps>,
key: Q::Key,
- mut dep_node_opt: Option<DepNode<Qcx::DepKind>>,
+ mut dep_node_opt: Option<DepNode>,
job_id: QueryJobId,
) -> (Q::Value, DepNodeIndex)
where
@@ -563,10 +538,10 @@ where
#[inline(always)]
fn try_load_from_disk_and_cache_in_memory<Q, Qcx>(
query: Q,
- dep_graph_data: &DepGraphData<Qcx::DepKind>,
+ dep_graph_data: &DepGraphData<Qcx::Deps>,
qcx: Qcx,
key: &Q::Key,
- dep_node: &DepNode<Qcx::DepKind>,
+ dep_node: &DepNode,
) -> Option<(Q::Value, DepNodeIndex)>
where
Q: QueryConfig<Qcx>,
@@ -660,7 +635,7 @@ where
#[instrument(skip(tcx, dep_graph_data, result, hash_result, format_value), level = "debug")]
pub(crate) fn incremental_verify_ich<Tcx, V>(
tcx: Tcx,
- dep_graph_data: &DepGraphData<Tcx::DepKind>,
+ dep_graph_data: &DepGraphData<Tcx::Deps>,
result: &V,
prev_index: SerializedDepNodeIndex,
hash_result: Option<fn(&mut StableHashingContext<'_>, &V) -> Fingerprint>,
@@ -753,7 +728,7 @@ fn ensure_must_run<Q, Qcx>(
qcx: Qcx,
key: &Q::Key,
check_cache: bool,
-) -> (bool, Option<DepNode<Qcx::DepKind>>)
+) -> (bool, Option<DepNode>)
where
Q: QueryConfig<Qcx>,
Qcx: QueryContext,
@@ -844,12 +819,8 @@ where
Some(result)
}
-pub fn force_query<Q, Qcx>(
- query: Q,
- qcx: Qcx,
- key: Q::Key,
- dep_node: DepNode<<Qcx as HasDepContext>::DepKind>,
-) where
+pub fn force_query<Q, Qcx>(query: Q, qcx: Qcx, key: Q::Key, dep_node: DepNode)
+where
Q: QueryConfig<Qcx>,
Qcx: QueryContext,
{
diff --git a/compiler/rustc_query_system/src/values.rs b/compiler/rustc_query_system/src/values.rs
index ce551078c..8848fda9d 100644
--- a/compiler/rustc_query_system/src/values.rs
+++ b/compiler/rustc_query_system/src/values.rs
@@ -1,12 +1,14 @@
-use crate::dep_graph::{DepContext, DepKind};
+use rustc_span::ErrorGuaranteed;
+
+use crate::dep_graph::DepContext;
use crate::query::QueryInfo;
-pub trait Value<Tcx: DepContext, D: DepKind>: Sized {
- fn from_cycle_error(tcx: Tcx, cycle: &[QueryInfo<D>]) -> Self;
+pub trait Value<Tcx: DepContext>: Sized {
+ fn from_cycle_error(tcx: Tcx, cycle: &[QueryInfo], guar: ErrorGuaranteed) -> Self;
}
-impl<Tcx: DepContext, T, D: DepKind> Value<Tcx, D> for T {
- default fn from_cycle_error(tcx: Tcx, cycle: &[QueryInfo<D>]) -> T {
+impl<Tcx: DepContext, T> Value<Tcx> for T {
+ default fn from_cycle_error(tcx: Tcx, cycle: &[QueryInfo], _guar: ErrorGuaranteed) -> T {
tcx.sess().abort_if_errors();
// Ideally we would use `bug!` here. But bug! is only defined in rustc_middle, and it's
// non-trivial to define it earlier.