summaryrefslogtreecommitdiffstats
path: root/compiler/rustc_query_system/src/query
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/rustc_query_system/src/query')
-rw-r--r--compiler/rustc_query_system/src/query/job.rs31
-rw-r--r--compiler/rustc_query_system/src/query/mod.rs2
-rw-r--r--compiler/rustc_query_system/src/query/plumbing.rs23
3 files changed, 29 insertions, 27 deletions
diff --git a/compiler/rustc_query_system/src/query/job.rs b/compiler/rustc_query_system/src/query/job.rs
index f2c1f84fc..2a34ffb75 100644
--- a/compiler/rustc_query_system/src/query/job.rs
+++ b/compiler/rustc_query_system/src/query/job.rs
@@ -5,7 +5,7 @@ use crate::query::DepKind;
use crate::query::{QueryContext, QueryStackFrame};
use rustc_data_structures::fx::FxHashMap;
use rustc_errors::{
- Diagnostic, DiagnosticBuilder, ErrorGuaranteed, Handler, IntoDiagnostic, Level,
+ DiagCtxt, Diagnostic, DiagnosticBuilder, ErrorGuaranteed, IntoDiagnostic, Level,
};
use rustc_hir::def::DefKind;
use rustc_session::Session;
@@ -18,7 +18,6 @@ use std::num::NonZeroU64;
#[cfg(parallel_compiler)]
use {
parking_lot::{Condvar, Mutex},
- rayon_core,
rustc_data_structures::fx::FxHashSet,
rustc_data_structures::{defer, jobserver},
rustc_span::DUMMY_SP,
@@ -38,7 +37,7 @@ pub struct QueryInfo {
pub type QueryMap = FxHashMap<QueryJobId, QueryJobInfo>;
/// A value uniquely identifying an active query job.
-#[derive(Copy, Clone, Eq, PartialEq, Hash)]
+#[derive(Copy, Clone, Eq, PartialEq, Hash, Debug)]
pub struct QueryJobId(pub NonZeroU64);
impl QueryJobId {
@@ -62,14 +61,14 @@ impl QueryJobId {
}
}
-#[derive(Clone)]
+#[derive(Clone, Debug)]
pub struct QueryJobInfo {
pub query: QueryStackFrame,
pub job: QueryJob,
}
/// Represents an active query job.
-#[derive(Clone)]
+#[derive(Clone, Debug)]
pub struct QueryJob {
pub id: QueryJobId,
@@ -182,6 +181,7 @@ impl QueryJobId {
}
#[cfg(parallel_compiler)]
+#[derive(Debug)]
struct QueryWaiter {
query: Option<QueryJobId>,
condvar: Condvar,
@@ -198,13 +198,14 @@ impl QueryWaiter {
}
#[cfg(parallel_compiler)]
+#[derive(Debug)]
struct QueryLatchInfo {
complete: bool,
waiters: Vec<Arc<QueryWaiter>>,
}
#[cfg(parallel_compiler)]
-#[derive(Clone)]
+#[derive(Clone, Debug)]
pub(super) struct QueryLatch {
info: Arc<Mutex<QueryLatchInfo>>,
}
@@ -540,7 +541,11 @@ pub fn deadlock(query_map: QueryMap, registry: &rayon_core::Registry) {
// X to Y due to Rayon waiting and a true dependency from Y to X. The algorithm here
// only considers the true dependency and won't detect a cycle.
if !found_cycle {
- panic!("deadlock detected");
+ if query_map.len() == 0 {
+ panic!("deadlock detected without any query!")
+ } else {
+ panic!("deadlock detected! current query map:\n{:#?}", query_map);
+ }
}
// FIXME: Ensure this won't cause a deadlock before we return
@@ -599,28 +604,28 @@ pub(crate) fn report_cycle<'a>(
note_span: (),
};
- cycle_diag.into_diagnostic(&sess.parse_sess.span_diagnostic)
+ cycle_diag.into_diagnostic(sess.dcx())
}
pub fn print_query_stack<Qcx: QueryContext>(
qcx: Qcx,
mut current_query: Option<QueryJobId>,
- handler: &Handler,
+ dcx: &DiagCtxt,
num_frames: Option<usize>,
mut file: Option<std::fs::File>,
) -> usize {
// Be careful relying on global state here: this code is called from
- // a panic hook, which means that the global `Handler` may be in a weird
+ // a panic hook, which means that the global `DiagCtxt` may be in a weird
// state if it was responsible for triggering the panic.
let mut count_printed = 0;
let mut count_total = 0;
- let query_map = qcx.try_collect_active_jobs();
+ let query_map = qcx.collect_active_jobs();
if let Some(ref mut file) = file {
let _ = writeln!(file, "\n\nquery stack during panic:");
}
while let Some(query) = current_query {
- let Some(query_info) = query_map.as_ref().and_then(|map| map.get(&query)) else {
+ let Some(query_info) = query_map.get(&query) else {
break;
};
if Some(count_printed) < num_frames || num_frames.is_none() {
@@ -633,7 +638,7 @@ pub fn print_query_stack<Qcx: QueryContext>(
),
);
diag.span = query_info.job.span.into();
- handler.force_print_diagnostic(diag);
+ dcx.force_print_diagnostic(diag);
count_printed += 1;
}
diff --git a/compiler/rustc_query_system/src/query/mod.rs b/compiler/rustc_query_system/src/query/mod.rs
index 05dee9f12..96a0c7a03 100644
--- a/compiler/rustc_query_system/src/query/mod.rs
+++ b/compiler/rustc_query_system/src/query/mod.rs
@@ -106,7 +106,7 @@ pub trait QueryContext: HasDepContext {
/// Get the query information from the TLS context.
fn current_query_job(self) -> Option<QueryJobId>;
- fn try_collect_active_jobs(self) -> Option<QueryMap>;
+ fn collect_active_jobs(self) -> QueryMap;
/// Load side effects associated to the node in the previous session.
fn load_side_effects(self, prev_dep_node_index: SerializedDepNodeIndex) -> QuerySideEffects;
diff --git a/compiler/rustc_query_system/src/query/plumbing.rs b/compiler/rustc_query_system/src/query/plumbing.rs
index 1f3403d09..41638b38c 100644
--- a/compiler/rustc_query_system/src/query/plumbing.rs
+++ b/compiler/rustc_query_system/src/query/plumbing.rs
@@ -138,7 +138,7 @@ where
&& let Some(span) = root.query.span
{
error.stash(span, StashKey::Cycle);
- qcx.dep_context().sess().delay_span_bug(span, "delayed cycle error")
+ qcx.dep_context().sess().span_delayed_bug(span, "delayed cycle error")
} else {
error.emit()
};
@@ -203,7 +203,7 @@ where
}
}
-#[derive(Clone)]
+#[derive(Clone, Debug)]
pub(crate) struct CycleError {
/// The query and related span that uses the cycle.
pub usage: Option<(Span, QueryStackFrame)>,
@@ -220,7 +220,7 @@ where
C: QueryCache,
Tcx: DepContext,
{
- match cache.lookup(&key) {
+ match cache.lookup(key) {
Some((value, index)) => {
tcx.profiler().query_cache_hit(index.into());
tcx.dep_graph().read_index(index);
@@ -242,11 +242,8 @@ where
Q: QueryConfig<Qcx>,
Qcx: QueryContext,
{
- let error = try_execute.find_cycle_in_stack(
- qcx.try_collect_active_jobs().unwrap(),
- &qcx.current_query_job(),
- span,
- );
+ let error =
+ try_execute.find_cycle_in_stack(qcx.collect_active_jobs(), &qcx.current_query_job(), span);
(mk_cycle(query, qcx, error), None)
}
@@ -424,7 +421,7 @@ where
// We have an inconsistency. This can happen if one of the two
// results is tainted by errors. In this case, delay a bug to
// ensure compilation is doomed.
- qcx.dep_context().sess().delay_span_bug(
+ qcx.dep_context().sess().span_delayed_bug(
DUMMY_SP,
format!(
"Computed query value for {:?}({:?}) is inconsistent with fed value,\n\
@@ -502,7 +499,7 @@ where
// The diagnostics for this query will be promoted to the current session during
// `try_mark_green()`, so we can ignore them here.
if let Some(ret) = qcx.start_query(job_id, false, None, || {
- try_load_from_disk_and_cache_in_memory(query, dep_graph_data, qcx, &key, &dep_node)
+ try_load_from_disk_and_cache_in_memory(query, dep_graph_data, qcx, &key, dep_node)
}) {
return ret;
}
@@ -563,7 +560,7 @@ where
// Note this function can be called concurrently from the same query
// We must ensure that this is handled correctly.
- let (prev_dep_node_index, dep_node_index) = dep_graph_data.try_mark_green(qcx, &dep_node)?;
+ let (prev_dep_node_index, dep_node_index) = dep_graph_data.try_mark_green(qcx, dep_node)?;
debug_assert!(dep_graph_data.is_index_green(prev_dep_node_index));
@@ -610,7 +607,7 @@ where
// Sanity check for the logic in `ensure`: if the node is green and the result loadable,
// we should actually be able to load it.
debug_assert!(
- !query.loadable_from_disk(qcx, &key, prev_dep_node_index),
+ !query.loadable_from_disk(qcx, key, prev_dep_node_index),
"missing on-disk cache entry for loadable {dep_node:?}"
);
@@ -667,7 +664,7 @@ pub(crate) fn incremental_verify_ich<Tcx, V>(
let old_hash = dep_graph_data.prev_fingerprint_of(prev_index);
if new_hash != old_hash {
- incremental_verify_ich_failed(tcx, prev_index, &|| format_value(&result));
+ incremental_verify_ich_failed(tcx, prev_index, &|| format_value(result));
}
}