summaryrefslogtreecommitdiffstats
path: root/compiler/rustc_query_system/src
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/rustc_query_system/src')
-rw-r--r--compiler/rustc_query_system/src/error.rs80
-rw-r--r--compiler/rustc_query_system/src/ich/hcx.rs25
-rw-r--r--compiler/rustc_query_system/src/ich/impls_hir.rs24
-rw-r--r--compiler/rustc_query_system/src/ich/impls_syntax.rs8
-rw-r--r--compiler/rustc_query_system/src/lib.rs9
-rw-r--r--compiler/rustc_query_system/src/query/config.rs20
-rw-r--r--compiler/rustc_query_system/src/query/job.rs69
-rw-r--r--compiler/rustc_query_system/src/query/mod.rs10
-rw-r--r--compiler/rustc_query_system/src/query/plumbing.rs88
-rw-r--r--compiler/rustc_query_system/src/values.rs14
10 files changed, 222 insertions, 125 deletions
diff --git a/compiler/rustc_query_system/src/error.rs b/compiler/rustc_query_system/src/error.rs
new file mode 100644
index 000000000..3fb06cbed
--- /dev/null
+++ b/compiler/rustc_query_system/src/error.rs
@@ -0,0 +1,80 @@
+use rustc_errors::AddSubdiagnostic;
+use rustc_span::Span;
+
+pub struct CycleStack {
+ pub span: Span,
+ pub desc: String,
+}
+
+impl AddSubdiagnostic for CycleStack {
+ fn add_to_diagnostic(self, diag: &mut rustc_errors::Diagnostic) {
+ diag.span_note(self.span, &format!("...which requires {}...", self.desc));
+ }
+}
+
+#[derive(Copy, Clone)]
+pub enum HandleCycleError {
+ Error,
+ Fatal,
+ DelayBug,
+}
+
+#[derive(SessionSubdiagnostic)]
+pub enum StackCount {
+ #[note(query_system::cycle_stack_single)]
+ Single,
+ #[note(query_system::cycle_stack_multiple)]
+ Multiple,
+}
+
+#[derive(SessionSubdiagnostic)]
+pub enum Alias {
+ #[note(query_system::cycle_recursive_ty_alias)]
+ #[help(query_system::cycle_recursive_ty_alias_help1)]
+ #[help(query_system::cycle_recursive_ty_alias_help2)]
+ Ty,
+ #[note(query_system::cycle_recursive_trait_alias)]
+ Trait,
+}
+
+#[derive(SessionSubdiagnostic)]
+#[note(query_system::cycle_usage)]
+pub struct CycleUsage {
+ #[primary_span]
+ pub span: Span,
+ pub usage: String,
+}
+
+#[derive(SessionDiagnostic)]
+#[diag(query_system::cycle, code = "E0391")]
+pub struct Cycle {
+ #[primary_span]
+ pub span: Span,
+ pub stack_bottom: String,
+ #[subdiagnostic]
+ pub cycle_stack: Vec<CycleStack>,
+ #[subdiagnostic]
+ pub stack_count: StackCount,
+ #[subdiagnostic]
+ pub alias: Option<Alias>,
+ #[subdiagnostic]
+ pub cycle_usage: Option<CycleUsage>,
+}
+
+#[derive(SessionDiagnostic)]
+#[diag(query_system::reentrant)]
+pub struct Reentrant;
+
+#[derive(SessionDiagnostic)]
+#[diag(query_system::increment_compilation)]
+#[help]
+#[note(query_system::increment_compilation_note1)]
+#[note(query_system::increment_compilation_note2)]
+pub struct IncrementCompilation {
+ pub run_cmd: String,
+ pub dep_node: String,
+}
+
+#[derive(SessionDiagnostic)]
+#[diag(query_system::query_overflow)]
+pub struct QueryOverflow;
diff --git a/compiler/rustc_query_system/src/ich/hcx.rs b/compiler/rustc_query_system/src/ich/hcx.rs
index 217fac341..a09b8ca30 100644
--- a/compiler/rustc_query_system/src/ich/hcx.rs
+++ b/compiler/rustc_query_system/src/ich/hcx.rs
@@ -40,11 +40,8 @@ pub struct StableHashingContext<'a> {
#[derive(Clone, Copy)]
pub(super) enum BodyResolver<'tcx> {
Forbidden,
- Traverse {
- hash_bodies: bool,
- owner: LocalDefId,
- bodies: &'tcx SortedMap<hir::ItemLocalId, &'tcx hir::Body<'tcx>>,
- },
+ Ignore,
+ Traverse { owner: LocalDefId, bodies: &'tcx SortedMap<hir::ItemLocalId, &'tcx hir::Body<'tcx>> },
}
impl<'a> StableHashingContext<'a> {
@@ -98,32 +95,20 @@ impl<'a> StableHashingContext<'a> {
Self::new_with_or_without_spans(sess, definitions, cstore, source_span, always_ignore_spans)
}
- /// Allow hashing
#[inline]
- pub fn while_hashing_hir_bodies(&mut self, hb: bool, f: impl FnOnce(&mut Self)) {
- let prev = match &mut self.body_resolver {
- BodyResolver::Forbidden => panic!("Hashing HIR bodies is forbidden."),
- BodyResolver::Traverse { ref mut hash_bodies, .. } => {
- std::mem::replace(hash_bodies, hb)
- }
- };
- f(self);
- match &mut self.body_resolver {
- BodyResolver::Forbidden => unreachable!(),
- BodyResolver::Traverse { ref mut hash_bodies, .. } => *hash_bodies = prev,
- }
+ pub fn without_hir_bodies(&mut self, f: impl FnOnce(&mut StableHashingContext<'_>)) {
+ f(&mut StableHashingContext { body_resolver: BodyResolver::Ignore, ..self.clone() });
}
#[inline]
pub fn with_hir_bodies(
&mut self,
- hash_bodies: bool,
owner: LocalDefId,
bodies: &SortedMap<hir::ItemLocalId, &hir::Body<'_>>,
f: impl FnOnce(&mut StableHashingContext<'_>),
) {
f(&mut StableHashingContext {
- body_resolver: BodyResolver::Traverse { hash_bodies, owner, bodies },
+ body_resolver: BodyResolver::Traverse { owner, bodies },
..self.clone()
});
}
diff --git a/compiler/rustc_query_system/src/ich/impls_hir.rs b/compiler/rustc_query_system/src/ich/impls_hir.rs
index 3390ed9eb..aa008d404 100644
--- a/compiler/rustc_query_system/src/ich/impls_hir.rs
+++ b/compiler/rustc_query_system/src/ich/impls_hir.rs
@@ -12,31 +12,11 @@ impl<'ctx> rustc_hir::HashStableContext for StableHashingContext<'ctx> {
let hcx = self;
match hcx.body_resolver {
BodyResolver::Forbidden => panic!("Hashing HIR bodies is forbidden."),
- BodyResolver::Traverse { hash_bodies: false, .. } => {}
- BodyResolver::Traverse { hash_bodies: true, owner, bodies } => {
+ BodyResolver::Ignore => {}
+ BodyResolver::Traverse { owner, bodies } => {
assert_eq!(id.hir_id.owner, owner);
bodies[&id.hir_id.local_id].hash_stable(hcx, hasher);
}
}
}
-
- fn hash_hir_expr(&mut self, expr: &hir::Expr<'_>, hasher: &mut StableHasher) {
- self.while_hashing_hir_bodies(true, |hcx| {
- let hir::Expr { hir_id, ref span, ref kind } = *expr;
-
- hir_id.hash_stable(hcx, hasher);
- span.hash_stable(hcx, hasher);
- kind.hash_stable(hcx, hasher);
- })
- }
-
- fn hash_hir_ty(&mut self, ty: &hir::Ty<'_>, hasher: &mut StableHasher) {
- self.while_hashing_hir_bodies(true, |hcx| {
- let hir::Ty { hir_id, ref kind, ref span } = *ty;
-
- hir_id.hash_stable(hcx, hasher);
- kind.hash_stable(hcx, hasher);
- span.hash_stable(hcx, hasher);
- })
- }
}
diff --git a/compiler/rustc_query_system/src/ich/impls_syntax.rs b/compiler/rustc_query_system/src/ich/impls_syntax.rs
index 1fa085926..0bc811eb0 100644
--- a/compiler/rustc_query_system/src/ich/impls_syntax.rs
+++ b/compiler/rustc_query_system/src/ich/impls_syntax.rs
@@ -42,12 +42,12 @@ impl<'ctx> rustc_ast::HashStableContext for StableHashingContext<'ctx> {
debug_assert!(!attr.is_doc_comment());
let ast::Attribute { kind, id: _, style, span } = attr;
- if let ast::AttrKind::Normal(item, tokens) = kind {
- item.hash_stable(self, hasher);
+ if let ast::AttrKind::Normal(normal) = kind {
+ normal.item.hash_stable(self, hasher);
style.hash_stable(self, hasher);
span.hash_stable(self, hasher);
assert_matches!(
- tokens.as_ref(),
+ normal.tokens.as_ref(),
None,
"Tokens should have been removed during lowering!"
);
@@ -148,3 +148,5 @@ impl<'tcx> HashStable<StableHashingContext<'tcx>> for rustc_feature::Features {
});
}
}
+
+impl<'ctx> rustc_type_ir::HashStableContext for StableHashingContext<'ctx> {}
diff --git a/compiler/rustc_query_system/src/lib.rs b/compiler/rustc_query_system/src/lib.rs
index 68284dcaa..f92c3831f 100644
--- a/compiler/rustc_query_system/src/lib.rs
+++ b/compiler/rustc_query_system/src/lib.rs
@@ -1,10 +1,12 @@
#![feature(assert_matches)]
#![feature(core_intrinsics)]
#![feature(hash_raw_entry)]
-#![feature(let_else)]
+#![cfg_attr(bootstrap, feature(let_else))]
#![feature(min_specialization)]
#![feature(extern_types)]
#![allow(rustc::potential_query_instability)]
+// #![deny(rustc::untranslatable_diagnostic)]
+#![deny(rustc::diagnostic_outside_of_impl)]
#[macro_use]
extern crate tracing;
@@ -15,5 +17,10 @@ extern crate rustc_macros;
pub mod cache;
pub mod dep_graph;
+mod error;
pub mod ich;
pub mod query;
+mod values;
+
+pub use error::HandleCycleError;
+pub use values::Value;
diff --git a/compiler/rustc_query_system/src/query/config.rs b/compiler/rustc_query_system/src/query/config.rs
index 964914a13..c4549cc9e 100644
--- a/compiler/rustc_query_system/src/query/config.rs
+++ b/compiler/rustc_query_system/src/query/config.rs
@@ -2,12 +2,12 @@
use crate::dep_graph::DepNode;
use crate::dep_graph::SerializedDepNodeIndex;
+use crate::error::HandleCycleError;
use crate::ich::StableHashingContext;
use crate::query::caches::QueryCache;
use crate::query::{QueryContext, QueryState};
use rustc_data_structures::fingerprint::Fingerprint;
-use rustc_errors::{DiagnosticBuilder, ErrorGuaranteed};
use std::fmt::Debug;
use std::hash::Hash;
@@ -19,15 +19,17 @@ pub trait QueryConfig {
type Stored: Clone;
}
+#[derive(Copy, Clone)]
pub struct QueryVTable<CTX: QueryContext, K, V> {
pub anon: bool,
pub dep_kind: CTX::DepKind,
pub eval_always: bool,
- pub cache_on_disk: bool,
+ pub depth_limit: bool,
pub compute: fn(CTX::DepContext, K) -> V,
pub hash_result: Option<fn(&mut StableHashingContext<'_>, &V) -> Fingerprint>,
- pub handle_cycle_error: fn(CTX, DiagnosticBuilder<'_, ErrorGuaranteed>) -> V,
+ pub handle_cycle_error: HandleCycleError,
+ // NOTE: this is also `None` if `cache_on_disk()` returns false, not just if it's unsupported by the query
pub try_load_from_disk: Option<fn(CTX, SerializedDepNodeIndex) -> Option<V>>,
}
@@ -42,18 +44,9 @@ impl<CTX: QueryContext, K, V> QueryVTable<CTX, K, V> {
pub(crate) fn compute(&self, tcx: CTX::DepContext, key: K) -> V {
(self.compute)(tcx, key)
}
-
- pub(crate) fn try_load_from_disk(&self, tcx: CTX, index: SerializedDepNodeIndex) -> Option<V> {
- self.try_load_from_disk
- .expect("QueryDescription::load_from_disk() called for an unsupported query.")(
- tcx, index,
- )
- }
}
pub trait QueryDescription<CTX: QueryContext>: QueryConfig {
- const TRY_LOAD_FROM_DISK: Option<fn(CTX, SerializedDepNodeIndex) -> Option<Self::Value>>;
-
type Cache: QueryCache<Key = Self::Key, Stored = Self::Stored, Value = Self::Value>;
fn describe(tcx: CTX, key: Self::Key) -> String;
@@ -72,4 +65,7 @@ pub trait QueryDescription<CTX: QueryContext>: QueryConfig {
fn make_vtable(tcx: CTX, key: &Self::Key) -> QueryVTable<CTX, Self::Key, Self::Value>;
fn cache_on_disk(tcx: CTX::DepContext, key: &Self::Key) -> bool;
+
+ // Don't use this method to compute query results, instead use the methods on TyCtxt
+ fn execute_query(tcx: CTX::DepContext, k: Self::Key) -> Self::Stored;
}
diff --git a/compiler/rustc_query_system/src/query/job.rs b/compiler/rustc_query_system/src/query/job.rs
index 6d2aff381..45b4079fb 100644
--- a/compiler/rustc_query_system/src/query/job.rs
+++ b/compiler/rustc_query_system/src/query/job.rs
@@ -1,12 +1,11 @@
+use crate::error::CycleStack;
use crate::query::plumbing::CycleError;
use crate::query::{QueryContext, QueryStackFrame};
-use rustc_hir::def::DefKind;
use rustc_data_structures::fx::FxHashMap;
-use rustc_errors::{
- struct_span_err, Diagnostic, DiagnosticBuilder, ErrorGuaranteed, Handler, Level,
-};
-use rustc_session::Session;
+use rustc_errors::{Diagnostic, DiagnosticBuilder, ErrorGuaranteed, Handler, Level};
+use rustc_hir::def::DefKind;
+use rustc_session::{Session, SessionDiagnostic};
use rustc_span::Span;
use std::hash::Hash;
@@ -536,46 +535,44 @@ pub(crate) fn report_cycle<'a>(
assert!(!stack.is_empty());
let span = stack[0].query.default_span(stack[1 % stack.len()].span);
- let mut err =
- struct_span_err!(sess, span, E0391, "cycle detected when {}", stack[0].query.description);
+
+ let mut cycle_stack = Vec::new();
+
+ use crate::error::StackCount;
+ let stack_count = if stack.len() == 1 { StackCount::Single } else { StackCount::Multiple };
for i in 1..stack.len() {
let query = &stack[i].query;
let span = query.default_span(stack[(i + 1) % stack.len()].span);
- err.span_note(span, &format!("...which requires {}...", query.description));
- }
-
- if stack.len() == 1 {
- err.note(&format!("...which immediately requires {} again", stack[0].query.description));
- } else {
- err.note(&format!(
- "...which again requires {}, completing the cycle",
- stack[0].query.description
- ));
- }
-
- if stack.iter().all(|entry| {
- entry
- .query
- .def_kind
- .map_or(false, |def_kind| matches!(def_kind, DefKind::TyAlias | DefKind::TraitAlias))
- }) {
- if stack.iter().all(|entry| {
- entry.query.def_kind.map_or(false, |def_kind| matches!(def_kind, DefKind::TyAlias))
- }) {
- err.note("type aliases cannot be recursive");
- err.help("consider using a struct, enum, or union instead to break the cycle");
- err.help("see <https://doc.rust-lang.org/reference/types.html#recursive-types> for more information");
- } else {
- err.note("trait aliases cannot be recursive");
- }
+ cycle_stack.push(CycleStack { span, desc: query.description.to_owned() });
}
+ let mut cycle_usage = None;
if let Some((span, query)) = usage {
- err.span_note(query.default_span(span), &format!("cycle used when {}", query.description));
+ cycle_usage = Some(crate::error::CycleUsage {
+ span: query.default_span(span),
+ usage: query.description,
+ });
}
- err
+ let alias = if stack.iter().all(|entry| entry.query.def_kind == Some(DefKind::TyAlias)) {
+ Some(crate::error::Alias::Ty)
+ } else if stack.iter().all(|entry| entry.query.def_kind == Some(DefKind::TraitAlias)) {
+ Some(crate::error::Alias::Trait)
+ } else {
+ None
+ };
+
+ let cycle_diag = crate::error::Cycle {
+ span,
+ cycle_stack,
+ stack_bottom: stack[0].query.description.to_owned(),
+ alias,
+ cycle_usage: cycle_usage,
+ stack_count,
+ };
+
+ cycle_diag.into_diagnostic(&sess.parse_sess.span_diagnostic)
}
pub fn print_query_stack<CTX: QueryContext>(
diff --git a/compiler/rustc_query_system/src/query/mod.rs b/compiler/rustc_query_system/src/query/mod.rs
index fb2258434..0b07bb64b 100644
--- a/compiler/rustc_query_system/src/query/mod.rs
+++ b/compiler/rustc_query_system/src/query/mod.rs
@@ -14,13 +14,12 @@ pub use self::caches::{
mod config;
pub use self::config::{QueryConfig, QueryDescription, QueryVTable};
-use crate::dep_graph::{DepNodeIndex, HasDepContext, SerializedDepNodeIndex};
-
+use crate::dep_graph::{DepContext, DepNodeIndex, HasDepContext, SerializedDepNodeIndex};
use rustc_data_structures::sync::Lock;
-use rustc_data_structures::thin_vec::ThinVec;
use rustc_errors::Diagnostic;
use rustc_hir::def::DefKind;
use rustc_span::Span;
+use thin_vec::ThinVec;
/// Description of a frame in the query stack.
///
@@ -119,7 +118,12 @@ pub trait QueryContext: HasDepContext {
fn start_query<R>(
&self,
token: QueryJobId,
+ depth_limit: bool,
diagnostics: Option<&Lock<ThinVec<Diagnostic>>>,
compute: impl FnOnce() -> R,
) -> R;
+
+ fn depth_limit_error(&self) {
+ self.dep_context().sess().emit_fatal(crate::error::QueryOverflow);
+ }
}
diff --git a/compiler/rustc_query_system/src/query/plumbing.rs b/compiler/rustc_query_system/src/query/plumbing.rs
index 5e8ea07d0..8179a674a 100644
--- a/compiler/rustc_query_system/src/query/plumbing.rs
+++ b/compiler/rustc_query_system/src/query/plumbing.rs
@@ -7,6 +7,8 @@ use crate::query::caches::QueryCache;
use crate::query::config::{QueryDescription, QueryVTable};
use crate::query::job::{report_cycle, QueryInfo, QueryJob, QueryJobId, QueryJobInfo};
use crate::query::{QueryContext, QueryMap, QuerySideEffects, QueryStackFrame};
+use crate::values::Value;
+use crate::HandleCycleError;
use rustc_data_structures::fingerprint::Fingerprint;
use rustc_data_structures::fx::FxHashMap;
#[cfg(parallel_compiler)]
@@ -14,7 +16,6 @@ use rustc_data_structures::profiling::TimingGuard;
#[cfg(parallel_compiler)]
use rustc_data_structures::sharded::Sharded;
use rustc_data_structures::sync::Lock;
-use rustc_data_structures::thin_vec::ThinVec;
use rustc_errors::{DiagnosticBuilder, ErrorGuaranteed, FatalError};
use rustc_session::Session;
use rustc_span::{Span, DUMMY_SP};
@@ -24,6 +25,7 @@ use std::fmt::Debug;
use std::hash::Hash;
use std::mem;
use std::ptr;
+use thin_vec::ThinVec;
pub struct QueryState<K> {
#[cfg(parallel_compiler)]
@@ -118,19 +120,46 @@ where
fn mk_cycle<CTX, V, R>(
tcx: CTX,
error: CycleError,
- handle_cycle_error: fn(CTX, DiagnosticBuilder<'_, ErrorGuaranteed>) -> V,
+ handler: HandleCycleError,
cache: &dyn crate::query::QueryStorage<Value = V, Stored = R>,
) -> R
where
CTX: QueryContext,
- V: std::fmt::Debug,
+ V: std::fmt::Debug + Value<CTX::DepContext>,
R: Clone,
{
let error = report_cycle(tcx.dep_context().sess(), error);
- let value = handle_cycle_error(tcx, error);
+ let value = handle_cycle_error(*tcx.dep_context(), error, handler);
cache.store_nocache(value)
}
+fn handle_cycle_error<CTX, V>(
+ tcx: CTX,
+ mut error: DiagnosticBuilder<'_, ErrorGuaranteed>,
+ handler: HandleCycleError,
+) -> V
+where
+ CTX: DepContext,
+ V: Value<CTX>,
+{
+ use HandleCycleError::*;
+ match handler {
+ Error => {
+ error.emit();
+ Value::from_cycle_error(tcx)
+ }
+ Fatal => {
+ error.emit();
+ tcx.sess().abort_if_errors();
+ unreachable!()
+ }
+ DelayBug => {
+ error.delay_as_bug();
+ Value::from_cycle_error(tcx)
+ }
+ }
+}
+
impl<'tcx, K> JobOwner<'tcx, K>
where
K: Eq + Hash + Clone,
@@ -336,6 +365,7 @@ fn try_execute_query<CTX, C>(
where
C: QueryCache,
C::Key: Clone + DepNodeParams<CTX::DepContext>,
+ C::Value: Value<CTX::DepContext>,
CTX: QueryContext,
{
match JobOwner::<'_, C::Key>::try_start(&tcx, state, span, key.clone()) {
@@ -381,7 +411,9 @@ where
// Fast path for when incr. comp. is off.
if !dep_graph.is_fully_enabled() {
let prof_timer = tcx.dep_context().profiler().query_provider();
- let result = tcx.start_query(job_id, None, || query.compute(*tcx.dep_context(), key));
+ let result = tcx.start_query(job_id, query.depth_limit, None, || {
+ query.compute(*tcx.dep_context(), key)
+ });
let dep_node_index = dep_graph.next_virtual_depnode_index();
prof_timer.finish_with_query_invocation_id(dep_node_index.into());
return (result, dep_node_index);
@@ -394,7 +426,7 @@ where
// The diagnostics for this query will be promoted to the current session during
// `try_mark_green()`, so we can ignore them here.
- if let Some(ret) = tcx.start_query(job_id, None, || {
+ if let Some(ret) = tcx.start_query(job_id, false, None, || {
try_load_from_disk_and_cache_in_memory(tcx, &key, &dep_node, query)
}) {
return ret;
@@ -404,18 +436,20 @@ where
let prof_timer = tcx.dep_context().profiler().query_provider();
let diagnostics = Lock::new(ThinVec::new());
- let (result, dep_node_index) = tcx.start_query(job_id, Some(&diagnostics), || {
- if query.anon {
- return dep_graph.with_anon_task(*tcx.dep_context(), query.dep_kind, || {
- query.compute(*tcx.dep_context(), key)
- });
- }
+ let (result, dep_node_index) =
+ tcx.start_query(job_id, query.depth_limit, Some(&diagnostics), || {
+ if query.anon {
+ return dep_graph.with_anon_task(*tcx.dep_context(), query.dep_kind, || {
+ query.compute(*tcx.dep_context(), key)
+ });
+ }
- // `to_dep_node` is expensive for some `DepKind`s.
- let dep_node = dep_node_opt.unwrap_or_else(|| query.to_dep_node(*tcx.dep_context(), &key));
+ // `to_dep_node` is expensive for some `DepKind`s.
+ let dep_node =
+ dep_node_opt.unwrap_or_else(|| query.to_dep_node(*tcx.dep_context(), &key));
- dep_graph.with_task(dep_node, *tcx.dep_context(), key, query.compute, query.hash_result)
- });
+ dep_graph.with_task(dep_node, *tcx.dep_context(), key, query.compute, query.hash_result)
+ });
prof_timer.finish_with_query_invocation_id(dep_node_index.into());
@@ -454,14 +488,14 @@ where
// First we try to load the result from the on-disk cache.
// Some things are never cached on disk.
- if query.cache_on_disk {
+ if let Some(try_load_from_disk) = query.try_load_from_disk {
let prof_timer = tcx.dep_context().profiler().incr_cache_loading();
// The call to `with_query_deserialization` enforces that no new `DepNodes`
// are created during deserialization. See the docs of that method for more
// details.
- let result = dep_graph
- .with_query_deserialization(|| query.try_load_from_disk(tcx, prev_dep_node_index));
+ let result =
+ dep_graph.with_query_deserialization(|| try_load_from_disk(tcx, prev_dep_node_index));
prof_timer.finish_with_query_invocation_id(dep_node_index.into());
@@ -614,16 +648,12 @@ fn incremental_verify_ich_cold(sess: &Session, dep_node: DebugArg<'_>, result: D
let old_in_panic = INSIDE_VERIFY_PANIC.with(|in_panic| in_panic.replace(true));
if old_in_panic {
- sess.struct_err(
- "internal compiler error: re-entrant incremental verify failure, suppressing message",
- )
- .emit();
+ sess.emit_err(crate::error::Reentrant);
} else {
- sess.struct_err(&format!("internal compiler error: encountered incremental compilation error with {:?}", dep_node))
- .help(&format!("This is a known issue with the compiler. Run {} to allow your project to compile", run_cmd))
- .note("Please follow the instructions below to create a bug report with the provided information")
- .note("See <https://github.com/rust-lang/rust/issues/84970> for more information")
- .emit();
+ sess.emit_err(crate::error::IncrementCompilation {
+ run_cmd,
+ dep_node: format!("{:?}", dep_node),
+ });
panic!("Found unstable fingerprints for {:?}: {:?}", dep_node, result);
}
@@ -686,6 +716,7 @@ pub fn get_query<Q, CTX>(tcx: CTX, span: Span, key: Q::Key, mode: QueryMode) ->
where
Q: QueryDescription<CTX>,
Q::Key: DepNodeParams<CTX::DepContext>,
+ Q::Value: Value<CTX::DepContext>,
CTX: QueryContext,
{
let query = Q::make_vtable(tcx, &key);
@@ -718,6 +749,7 @@ pub fn force_query<Q, CTX>(tcx: CTX, key: Q::Key, dep_node: DepNode<CTX::DepKind
where
Q: QueryDescription<CTX>,
Q::Key: DepNodeParams<CTX::DepContext>,
+ Q::Value: Value<CTX::DepContext>,
CTX: QueryContext,
{
// We may be concurrently trying both execute and force a query.
diff --git a/compiler/rustc_query_system/src/values.rs b/compiler/rustc_query_system/src/values.rs
new file mode 100644
index 000000000..aeef66f86
--- /dev/null
+++ b/compiler/rustc_query_system/src/values.rs
@@ -0,0 +1,14 @@
+use crate::dep_graph::DepContext;
+
+pub trait Value<CTX: DepContext>: Sized {
+ fn from_cycle_error(tcx: CTX) -> Self;
+}
+
+impl<CTX: DepContext, T> Value<CTX> for T {
+ default fn from_cycle_error(tcx: CTX) -> T {
+ tcx.sess().abort_if_errors();
+ // Ideally we would use `bug!` here. But bug! is only defined in rustc_middle, and it's
+ // non-trivial to define it earlier.
+ panic!("Value::from_cycle_error called without errors");
+ }
+}