diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-18 02:49:42 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-18 02:49:42 +0000 |
commit | 837b550238aa671a591ccf282dddeab29cadb206 (patch) | |
tree | 914b6b8862bace72bd3245ca184d374b08d8a672 /compiler/rustc_query_impl | |
parent | Adding debian version 1.70.0+dfsg2-1. (diff) | |
download | rustc-837b550238aa671a591ccf282dddeab29cadb206.tar.xz rustc-837b550238aa671a591ccf282dddeab29cadb206.zip |
Merging upstream version 1.71.1+dfsg1.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'compiler/rustc_query_impl')
-rw-r--r-- | compiler/rustc_query_impl/Cargo.toml | 2 | ||||
-rw-r--r-- | compiler/rustc_query_impl/src/lib.rs | 213 | ||||
-rw-r--r-- | compiler/rustc_query_impl/src/on_disk_cache.rs | 1093 | ||||
-rw-r--r-- | compiler/rustc_query_impl/src/plumbing.rs | 670 | ||||
-rw-r--r-- | compiler/rustc_query_impl/src/profiling_support.rs | 53 |
5 files changed, 506 insertions, 1525 deletions
diff --git a/compiler/rustc_query_impl/Cargo.toml b/compiler/rustc_query_impl/Cargo.toml index b107a3f03..e59699346 100644 --- a/compiler/rustc_query_impl/Cargo.toml +++ b/compiler/rustc_query_impl/Cargo.toml @@ -7,6 +7,8 @@ edition = "2021" [dependencies] +memoffset = { version = "0.6.0", features = ["unstable_const"] } +field-offset = "0.3.5" measureme = "10.0.0" rustc_ast = { path = "../rustc_ast" } rustc_data_structures = { path = "../rustc_data_structures" } diff --git a/compiler/rustc_query_impl/src/lib.rs b/compiler/rustc_query_impl/src/lib.rs index 7001a1eed..4cf0f1305 100644 --- a/compiler/rustc_query_impl/src/lib.rs +++ b/compiler/rustc_query_impl/src/lib.rs @@ -3,60 +3,225 @@ #![doc(html_root_url = "https://doc.rust-lang.org/nightly/nightly-rustc/")] // this shouldn't be necessary, but the check for `&mut _` is too naive and denies returning a function pointer that takes a mut ref #![feature(const_mut_refs)] +#![feature(const_refs_to_cell)] #![feature(min_specialization)] #![feature(never_type)] #![feature(rustc_attrs)] #![recursion_limit = "256"] -#![allow(rustc::potential_query_instability)] +#![allow(rustc::potential_query_instability, unused_parens)] #![deny(rustc::untranslatable_diagnostic)] #![deny(rustc::diagnostic_outside_of_impl)] #[macro_use] -extern crate rustc_macros; -#[macro_use] extern crate rustc_middle; +use crate::plumbing::{__rust_begin_short_backtrace, encode_all_query_results, try_mark_green}; +use field_offset::offset_of; +use rustc_data_structures::stable_hasher::HashStable; use rustc_data_structures::sync::AtomicU64; use rustc_middle::arena::Arena; +use rustc_middle::dep_graph::DepNodeIndex; use rustc_middle::dep_graph::{self, DepKind, DepKindStruct}; use rustc_middle::query::erase::{erase, restore, Erase}; +use rustc_middle::query::on_disk_cache::{CacheEncoder, EncodedDepNodeIndex, OnDiskCache}; +use rustc_middle::query::plumbing::{ + DynamicQuery, QueryKeyStringCache, QuerySystem, QuerySystemFns, +}; use rustc_middle::query::AsLocalKey; -use rustc_middle::ty::query::{ - query_keys, query_provided, query_provided_to_value, query_storage, query_values, +use rustc_middle::query::{ + queries, DynamicQueries, ExternProviders, Providers, QueryCaches, QueryEngine, QueryStates, }; -use rustc_middle::ty::query::{ExternProviders, Providers, QueryEngine}; use rustc_middle::ty::TyCtxt; use rustc_query_system::dep_graph::SerializedDepNodeIndex; +use rustc_query_system::ich::StableHashingContext; +use rustc_query_system::query::{ + get_query_incr, get_query_non_incr, HashResult, QueryCache, QueryConfig, QueryInfo, QueryMap, + QueryMode, QueryState, +}; +use rustc_query_system::HandleCycleError; use rustc_query_system::Value; use rustc_span::Span; #[macro_use] mod plumbing; -pub use plumbing::QueryCtxt; -use rustc_query_system::query::*; -#[cfg(parallel_compiler)] -pub use rustc_query_system::query::{deadlock, QueryContext}; - -pub use rustc_query_system::query::QueryConfig; - -mod on_disk_cache; -pub use on_disk_cache::OnDiskCache; +pub use crate::plumbing::QueryCtxt; mod profiling_support; pub use self::profiling_support::alloc_self_profile_query_strings; -/// This is implemented per query and restoring query values from their erased state. -trait QueryConfigRestored<'tcx>: QueryConfig<QueryCtxt<'tcx>> + Default { - type RestoredValue; +struct DynamicConfig< + 'tcx, + C: QueryCache, + const ANON: bool, + const DEPTH_LIMIT: bool, + const FEEDABLE: bool, +> { + dynamic: &'tcx DynamicQuery<'tcx, C>, +} - fn restore(value: <Self as QueryConfig<QueryCtxt<'tcx>>>::Value) -> Self::RestoredValue; +impl<'tcx, C: QueryCache, const ANON: bool, const DEPTH_LIMIT: bool, const FEEDABLE: bool> Copy + for DynamicConfig<'tcx, C, ANON, DEPTH_LIMIT, FEEDABLE> +{ +} +impl<'tcx, C: QueryCache, const ANON: bool, const DEPTH_LIMIT: bool, const FEEDABLE: bool> Clone + for DynamicConfig<'tcx, C, ANON, DEPTH_LIMIT, FEEDABLE> +{ + fn clone(&self) -> Self { + DynamicConfig { dynamic: self.dynamic } + } } -rustc_query_append! { define_queries! } +impl<'tcx, C: QueryCache, const ANON: bool, const DEPTH_LIMIT: bool, const FEEDABLE: bool> + QueryConfig<QueryCtxt<'tcx>> for DynamicConfig<'tcx, C, ANON, DEPTH_LIMIT, FEEDABLE> +where + for<'a> C::Key: HashStable<StableHashingContext<'a>>, +{ + type Key = C::Key; + type Value = C::Value; + type Cache = C; + + #[inline(always)] + fn name(self) -> &'static str { + self.dynamic.name + } + + #[inline(always)] + fn cache_on_disk(self, tcx: TyCtxt<'tcx>, key: &Self::Key) -> bool { + (self.dynamic.cache_on_disk)(tcx, key) + } + + #[inline(always)] + fn query_state<'a>(self, qcx: QueryCtxt<'tcx>) -> &'a QueryState<Self::Key, DepKind> + where + QueryCtxt<'tcx>: 'a, + { + self.dynamic.query_state.apply(&qcx.tcx.query_system.states) + } + + #[inline(always)] + fn query_cache<'a>(self, qcx: QueryCtxt<'tcx>) -> &'a Self::Cache + where + 'tcx: 'a, + { + self.dynamic.query_cache.apply(&qcx.tcx.query_system.caches) + } + + #[inline(always)] + fn execute_query(self, tcx: TyCtxt<'tcx>, key: Self::Key) -> Self::Value { + (self.dynamic.execute_query)(tcx, key) + } + + #[inline(always)] + fn compute(self, qcx: QueryCtxt<'tcx>, key: Self::Key) -> Self::Value { + (self.dynamic.compute)(qcx.tcx, key) + } + + #[inline(always)] + fn try_load_from_disk( + self, + qcx: QueryCtxt<'tcx>, + key: &Self::Key, + prev_index: SerializedDepNodeIndex, + index: DepNodeIndex, + ) -> Option<Self::Value> { + if self.dynamic.can_load_from_disk { + (self.dynamic.try_load_from_disk)(qcx.tcx, key, prev_index, index) + } else { + None + } + } + + #[inline] + fn loadable_from_disk( + self, + qcx: QueryCtxt<'tcx>, + key: &Self::Key, + index: SerializedDepNodeIndex, + ) -> bool { + (self.dynamic.loadable_from_disk)(qcx.tcx, key, index) + } -impl<'tcx> Queries<'tcx> { - // Force codegen in the dyn-trait transformation in this crate. - pub fn as_dyn(&'tcx self) -> &'tcx dyn QueryEngine<'tcx> { - self + fn value_from_cycle_error( + self, + tcx: TyCtxt<'tcx>, + cycle: &[QueryInfo<DepKind>], + ) -> Self::Value { + (self.dynamic.value_from_cycle_error)(tcx, cycle) + } + + #[inline(always)] + fn format_value(self) -> fn(&Self::Value) -> String { + self.dynamic.format_value + } + + #[inline(always)] + fn anon(self) -> bool { + ANON + } + + #[inline(always)] + fn eval_always(self) -> bool { + self.dynamic.eval_always + } + + #[inline(always)] + fn depth_limit(self) -> bool { + DEPTH_LIMIT + } + + #[inline(always)] + fn feedable(self) -> bool { + FEEDABLE + } + + #[inline(always)] + fn dep_kind(self) -> DepKind { + self.dynamic.dep_kind + } + + #[inline(always)] + fn handle_cycle_error(self) -> HandleCycleError { + self.dynamic.handle_cycle_error + } + + #[inline(always)] + fn hash_result(self) -> HashResult<Self::Value> { + self.dynamic.hash_result + } +} + +/// This is implemented per query. It allows restoring query values from their erased state +/// and constructing a QueryConfig. +trait QueryConfigRestored<'tcx> { + type RestoredValue; + type Config: QueryConfig<QueryCtxt<'tcx>>; + + fn config(tcx: TyCtxt<'tcx>) -> Self::Config; + fn restore(value: <Self::Config as QueryConfig<QueryCtxt<'tcx>>>::Value) + -> Self::RestoredValue; +} + +pub fn query_system<'tcx>( + local_providers: Providers, + extern_providers: ExternProviders, + on_disk_cache: Option<OnDiskCache<'tcx>>, + incremental: bool, +) -> QuerySystem<'tcx> { + QuerySystem { + states: Default::default(), + arenas: Default::default(), + caches: Default::default(), + dynamic_queries: dynamic_queries(), + on_disk_cache, + fns: QuerySystemFns { + engine: engine(incremental), + local_providers, + extern_providers, + encode_query_results: encode_all_query_results, + try_mark_green: try_mark_green, + }, + jobs: AtomicU64::new(1), } } + +rustc_query_append! { define_queries! } diff --git a/compiler/rustc_query_impl/src/on_disk_cache.rs b/compiler/rustc_query_impl/src/on_disk_cache.rs deleted file mode 100644 index 30477c7bd..000000000 --- a/compiler/rustc_query_impl/src/on_disk_cache.rs +++ /dev/null @@ -1,1093 +0,0 @@ -use crate::QueryCtxt; -use rustc_data_structures::fx::{FxHashMap, FxIndexSet}; -use rustc_data_structures::memmap::Mmap; -use rustc_data_structures::sync::{HashMapExt, Lock, Lrc, RwLock}; -use rustc_data_structures::unhash::UnhashMap; -use rustc_data_structures::unord::UnordSet; -use rustc_hir::def_id::{CrateNum, DefId, DefIndex, LocalDefId, StableCrateId, LOCAL_CRATE}; -use rustc_hir::definitions::DefPathHash; -use rustc_index::vec::{Idx, IndexVec}; -use rustc_middle::dep_graph::{DepNodeIndex, SerializedDepNodeIndex}; -use rustc_middle::mir::interpret::{AllocDecodingSession, AllocDecodingState}; -use rustc_middle::mir::{self, interpret}; -use rustc_middle::ty::codec::{RefDecodable, TyDecoder, TyEncoder}; -use rustc_middle::ty::{self, Ty, TyCtxt}; -use rustc_query_system::dep_graph::DepContext; -use rustc_query_system::query::{QueryCache, QuerySideEffects}; -use rustc_serialize::{ - opaque::{FileEncodeResult, FileEncoder, IntEncodedWithFixedSize, MemDecoder}, - Decodable, Decoder, Encodable, Encoder, -}; -use rustc_session::Session; -use rustc_span::hygiene::{ - ExpnId, HygieneDecodeContext, HygieneEncodeContext, SyntaxContext, SyntaxContextData, -}; -use rustc_span::source_map::{SourceMap, StableSourceFileId}; -use rustc_span::{BytePos, ExpnData, ExpnHash, Pos, SourceFile, Span}; -use rustc_span::{CachingSourceMapView, Symbol}; -use std::collections::hash_map::Entry; -use std::io; -use std::mem; - -const TAG_FILE_FOOTER: u128 = 0xC0FFEE_C0FFEE_C0FFEE_C0FFEE_C0FFEE; - -// A normal span encoded with both location information and a `SyntaxContext` -const TAG_FULL_SPAN: u8 = 0; -// A partial span with no location information, encoded only with a `SyntaxContext` -const TAG_PARTIAL_SPAN: u8 = 1; -const TAG_RELATIVE_SPAN: u8 = 2; - -const TAG_SYNTAX_CONTEXT: u8 = 0; -const TAG_EXPN_DATA: u8 = 1; - -// Tags for encoding Symbol's -const SYMBOL_STR: u8 = 0; -const SYMBOL_OFFSET: u8 = 1; -const SYMBOL_PREINTERNED: u8 = 2; - -/// Provides an interface to incremental compilation data cached from the -/// previous compilation session. This data will eventually include the results -/// of a few selected queries (like `typeck` and `mir_optimized`) and -/// any side effects that have been emitted during a query. -pub struct OnDiskCache<'sess> { - // The complete cache data in serialized form. - serialized_data: RwLock<Option<Mmap>>, - - // Collects all `QuerySideEffects` created during the current compilation - // session. - current_side_effects: Lock<FxHashMap<DepNodeIndex, QuerySideEffects>>, - - source_map: &'sess SourceMap, - file_index_to_stable_id: FxHashMap<SourceFileIndex, EncodedSourceFileId>, - - // Caches that are populated lazily during decoding. - file_index_to_file: Lock<FxHashMap<SourceFileIndex, Lrc<SourceFile>>>, - - // A map from dep-node to the position of the cached query result in - // `serialized_data`. - query_result_index: FxHashMap<SerializedDepNodeIndex, AbsoluteBytePos>, - - // A map from dep-node to the position of any associated `QuerySideEffects` in - // `serialized_data`. - prev_side_effects_index: FxHashMap<SerializedDepNodeIndex, AbsoluteBytePos>, - - alloc_decoding_state: AllocDecodingState, - - // A map from syntax context ids to the position of their associated - // `SyntaxContextData`. We use a `u32` instead of a `SyntaxContext` - // to represent the fact that we are storing *encoded* ids. When we decode - // a `SyntaxContext`, a new id will be allocated from the global `HygieneData`, - // which will almost certainly be different than the serialized id. - syntax_contexts: FxHashMap<u32, AbsoluteBytePos>, - // A map from the `DefPathHash` of an `ExpnId` to the position - // of their associated `ExpnData`. Ideally, we would store a `DefId`, - // but we need to decode this before we've constructed a `TyCtxt` (which - // makes it difficult to decode a `DefId`). - - // Note that these `DefPathHashes` correspond to both local and foreign - // `ExpnData` (e.g `ExpnData.krate` may not be `LOCAL_CRATE`). Alternatively, - // we could look up the `ExpnData` from the metadata of foreign crates, - // but it seemed easier to have `OnDiskCache` be independent of the `CStore`. - expn_data: UnhashMap<ExpnHash, AbsoluteBytePos>, - // Additional information used when decoding hygiene data. - hygiene_context: HygieneDecodeContext, - // Maps `ExpnHash`es to their raw value from the *previous* - // compilation session. This is used as an initial 'guess' when - // we try to map an `ExpnHash` to its value in the current - // compilation session. - foreign_expn_data: UnhashMap<ExpnHash, u32>, -} - -// This type is used only for serialization and deserialization. -#[derive(Encodable, Decodable)] -struct Footer { - file_index_to_stable_id: FxHashMap<SourceFileIndex, EncodedSourceFileId>, - query_result_index: EncodedDepNodeIndex, - side_effects_index: EncodedDepNodeIndex, - // The location of all allocations. - interpret_alloc_index: Vec<u32>, - // See `OnDiskCache.syntax_contexts` - syntax_contexts: FxHashMap<u32, AbsoluteBytePos>, - // See `OnDiskCache.expn_data` - expn_data: UnhashMap<ExpnHash, AbsoluteBytePos>, - foreign_expn_data: UnhashMap<ExpnHash, u32>, -} - -pub type EncodedDepNodeIndex = Vec<(SerializedDepNodeIndex, AbsoluteBytePos)>; - -#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug, Encodable, Decodable)] -struct SourceFileIndex(u32); - -#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq, Encodable, Decodable)] -pub struct AbsoluteBytePos(u64); - -impl AbsoluteBytePos { - fn new(pos: usize) -> AbsoluteBytePos { - AbsoluteBytePos(pos.try_into().expect("Incremental cache file size overflowed u64.")) - } - - fn to_usize(self) -> usize { - self.0 as usize - } -} - -/// An `EncodedSourceFileId` is the same as a `StableSourceFileId` except that -/// the source crate is represented as a [StableCrateId] instead of as a -/// `CrateNum`. This way `EncodedSourceFileId` can be encoded and decoded -/// without any additional context, i.e. with a simple `opaque::Decoder` (which -/// is the only thing available when decoding the cache's [Footer]. -#[derive(Encodable, Decodable, Clone, Debug)] -struct EncodedSourceFileId { - file_name_hash: u64, - stable_crate_id: StableCrateId, -} - -impl EncodedSourceFileId { - fn translate(&self, tcx: TyCtxt<'_>) -> StableSourceFileId { - let cnum = tcx.stable_crate_id_to_crate_num(self.stable_crate_id); - StableSourceFileId { file_name_hash: self.file_name_hash, cnum } - } - - fn new(tcx: TyCtxt<'_>, file: &SourceFile) -> EncodedSourceFileId { - let source_file_id = StableSourceFileId::new(file); - EncodedSourceFileId { - file_name_hash: source_file_id.file_name_hash, - stable_crate_id: tcx.stable_crate_id(source_file_id.cnum), - } - } -} - -impl<'sess> rustc_middle::ty::OnDiskCache<'sess> for OnDiskCache<'sess> { - /// Creates a new `OnDiskCache` instance from the serialized data in `data`. - fn new(sess: &'sess Session, data: Mmap, start_pos: usize) -> Self { - debug_assert!(sess.opts.incremental.is_some()); - - // Wrap in a scope so we can borrow `data`. - let footer: Footer = { - let mut decoder = MemDecoder::new(&data, start_pos); - - // Decode the *position* of the footer, which can be found in the - // last 8 bytes of the file. - decoder.set_position(data.len() - IntEncodedWithFixedSize::ENCODED_SIZE); - let footer_pos = IntEncodedWithFixedSize::decode(&mut decoder).0 as usize; - - // Decode the file footer, which contains all the lookup tables, etc. - decoder.set_position(footer_pos); - - decode_tagged(&mut decoder, TAG_FILE_FOOTER) - }; - - Self { - serialized_data: RwLock::new(Some(data)), - file_index_to_stable_id: footer.file_index_to_stable_id, - file_index_to_file: Default::default(), - source_map: sess.source_map(), - current_side_effects: Default::default(), - query_result_index: footer.query_result_index.into_iter().collect(), - prev_side_effects_index: footer.side_effects_index.into_iter().collect(), - alloc_decoding_state: AllocDecodingState::new(footer.interpret_alloc_index), - syntax_contexts: footer.syntax_contexts, - expn_data: footer.expn_data, - foreign_expn_data: footer.foreign_expn_data, - hygiene_context: Default::default(), - } - } - - fn new_empty(source_map: &'sess SourceMap) -> Self { - Self { - serialized_data: RwLock::new(None), - file_index_to_stable_id: Default::default(), - file_index_to_file: Default::default(), - source_map, - current_side_effects: Default::default(), - query_result_index: Default::default(), - prev_side_effects_index: Default::default(), - alloc_decoding_state: AllocDecodingState::new(Vec::new()), - syntax_contexts: FxHashMap::default(), - expn_data: UnhashMap::default(), - foreign_expn_data: UnhashMap::default(), - hygiene_context: Default::default(), - } - } - - /// Execute all cache promotions and release the serialized backing Mmap. - /// - /// Cache promotions require invoking queries, which needs to read the serialized data. - /// In order to serialize the new on-disk cache, the former on-disk cache file needs to be - /// deleted, hence we won't be able to refer to its memmapped data. - fn drop_serialized_data(&self, tcx: TyCtxt<'_>) { - // Load everything into memory so we can write it out to the on-disk - // cache. The vast majority of cacheable query results should already - // be in memory, so this should be a cheap operation. - // Do this *before* we clone 'latest_foreign_def_path_hashes', since - // loading existing queries may cause us to create new DepNodes, which - // may in turn end up invoking `store_foreign_def_id_hash` - tcx.dep_graph.exec_cache_promotions(tcx); - - *self.serialized_data.write() = None; - } - - fn serialize(&self, tcx: TyCtxt<'_>, encoder: FileEncoder) -> FileEncodeResult { - // Serializing the `DepGraph` should not modify it. - tcx.dep_graph.with_ignore(|| { - // Allocate `SourceFileIndex`es. - let (file_to_file_index, file_index_to_stable_id) = { - let files = tcx.sess.source_map().files(); - let mut file_to_file_index = - FxHashMap::with_capacity_and_hasher(files.len(), Default::default()); - let mut file_index_to_stable_id = - FxHashMap::with_capacity_and_hasher(files.len(), Default::default()); - - for (index, file) in files.iter().enumerate() { - let index = SourceFileIndex(index as u32); - let file_ptr: *const SourceFile = &**file as *const _; - file_to_file_index.insert(file_ptr, index); - let source_file_id = EncodedSourceFileId::new(tcx, &file); - file_index_to_stable_id.insert(index, source_file_id); - } - - (file_to_file_index, file_index_to_stable_id) - }; - - let hygiene_encode_context = HygieneEncodeContext::default(); - - let mut encoder = CacheEncoder { - tcx, - encoder, - type_shorthands: Default::default(), - predicate_shorthands: Default::default(), - interpret_allocs: Default::default(), - source_map: CachingSourceMapView::new(tcx.sess.source_map()), - file_to_file_index, - hygiene_context: &hygiene_encode_context, - symbol_table: Default::default(), - }; - - // Encode query results. - let mut query_result_index = EncodedDepNodeIndex::new(); - - tcx.sess.time("encode_query_results", || { - let enc = &mut encoder; - let qri = &mut query_result_index; - QueryCtxt::from_tcx(tcx).encode_query_results(enc, qri); - }); - - // Encode side effects. - let side_effects_index: EncodedDepNodeIndex = self - .current_side_effects - .borrow() - .iter() - .map(|(dep_node_index, side_effects)| { - let pos = AbsoluteBytePos::new(encoder.position()); - let dep_node_index = SerializedDepNodeIndex::new(dep_node_index.index()); - encoder.encode_tagged(dep_node_index, side_effects); - - (dep_node_index, pos) - }) - .collect(); - - let interpret_alloc_index = { - let mut interpret_alloc_index = Vec::new(); - let mut n = 0; - loop { - let new_n = encoder.interpret_allocs.len(); - // If we have found new IDs, serialize those too. - if n == new_n { - // Otherwise, abort. - break; - } - interpret_alloc_index.reserve(new_n - n); - for idx in n..new_n { - let id = encoder.interpret_allocs[idx]; - let pos = encoder.position() as u32; - interpret_alloc_index.push(pos); - interpret::specialized_encode_alloc_id(&mut encoder, tcx, id); - } - n = new_n; - } - interpret_alloc_index - }; - - let mut syntax_contexts = FxHashMap::default(); - let mut expn_data = UnhashMap::default(); - let mut foreign_expn_data = UnhashMap::default(); - - // Encode all hygiene data (`SyntaxContextData` and `ExpnData`) from the current - // session. - - hygiene_encode_context.encode( - &mut encoder, - |encoder, index, ctxt_data| { - let pos = AbsoluteBytePos::new(encoder.position()); - encoder.encode_tagged(TAG_SYNTAX_CONTEXT, ctxt_data); - syntax_contexts.insert(index, pos); - }, - |encoder, expn_id, data, hash| { - if expn_id.krate == LOCAL_CRATE { - let pos = AbsoluteBytePos::new(encoder.position()); - encoder.encode_tagged(TAG_EXPN_DATA, data); - expn_data.insert(hash, pos); - } else { - foreign_expn_data.insert(hash, expn_id.local_id.as_u32()); - } - }, - ); - - // Encode the file footer. - let footer_pos = encoder.position() as u64; - encoder.encode_tagged( - TAG_FILE_FOOTER, - &Footer { - file_index_to_stable_id, - query_result_index, - side_effects_index, - interpret_alloc_index, - syntax_contexts, - expn_data, - foreign_expn_data, - }, - ); - - // Encode the position of the footer as the last 8 bytes of the - // file so we know where to look for it. - IntEncodedWithFixedSize(footer_pos).encode(&mut encoder.encoder); - - // DO NOT WRITE ANYTHING TO THE ENCODER AFTER THIS POINT! The address - // of the footer must be the last thing in the data stream. - - encoder.finish() - }) - } -} - -impl<'sess> OnDiskCache<'sess> { - pub fn as_dyn(&self) -> &dyn rustc_middle::ty::OnDiskCache<'sess> { - self as _ - } - - /// Loads a `QuerySideEffects` created during the previous compilation session. - pub fn load_side_effects( - &self, - tcx: TyCtxt<'_>, - dep_node_index: SerializedDepNodeIndex, - ) -> QuerySideEffects { - let side_effects: Option<QuerySideEffects> = - self.load_indexed(tcx, dep_node_index, &self.prev_side_effects_index); - - side_effects.unwrap_or_default() - } - - /// Stores a `QuerySideEffects` emitted during the current compilation session. - /// Anything stored like this will be available via `load_side_effects` in - /// the next compilation session. - #[inline(never)] - #[cold] - pub fn store_side_effects(&self, dep_node_index: DepNodeIndex, side_effects: QuerySideEffects) { - let mut current_side_effects = self.current_side_effects.borrow_mut(); - let prev = current_side_effects.insert(dep_node_index, side_effects); - debug_assert!(prev.is_none()); - } - - /// Return whether the cached query result can be decoded. - pub fn loadable_from_disk(&self, dep_node_index: SerializedDepNodeIndex) -> bool { - self.query_result_index.contains_key(&dep_node_index) - // with_decoder is infallible, so we can stop here - } - - /// Returns the cached query result if there is something in the cache for - /// the given `SerializedDepNodeIndex`; otherwise returns `None`. - pub fn try_load_query_result<'tcx, T>( - &self, - tcx: TyCtxt<'tcx>, - dep_node_index: SerializedDepNodeIndex, - ) -> Option<T> - where - T: for<'a> Decodable<CacheDecoder<'a, 'tcx>>, - { - let opt_value = self.load_indexed(tcx, dep_node_index, &self.query_result_index); - debug_assert_eq!(opt_value.is_some(), self.loadable_from_disk(dep_node_index)); - opt_value - } - - /// Stores side effect emitted during computation of an anonymous query. - /// Since many anonymous queries can share the same `DepNode`, we aggregate - /// them -- as opposed to regular queries where we assume that there is a - /// 1:1 relationship between query-key and `DepNode`. - #[inline(never)] - #[cold] - pub fn store_side_effects_for_anon_node( - &self, - dep_node_index: DepNodeIndex, - side_effects: QuerySideEffects, - ) { - let mut current_side_effects = self.current_side_effects.borrow_mut(); - - let x = current_side_effects.entry(dep_node_index).or_default(); - x.append(side_effects); - } - - fn load_indexed<'tcx, T>( - &self, - tcx: TyCtxt<'tcx>, - dep_node_index: SerializedDepNodeIndex, - index: &FxHashMap<SerializedDepNodeIndex, AbsoluteBytePos>, - ) -> Option<T> - where - T: for<'a> Decodable<CacheDecoder<'a, 'tcx>>, - { - let pos = index.get(&dep_node_index).cloned()?; - let value = self.with_decoder(tcx, pos, |decoder| decode_tagged(decoder, dep_node_index)); - Some(value) - } - - fn with_decoder<'a, 'tcx, T, F: for<'s> FnOnce(&mut CacheDecoder<'s, 'tcx>) -> T>( - &'sess self, - tcx: TyCtxt<'tcx>, - pos: AbsoluteBytePos, - f: F, - ) -> T - where - T: Decodable<CacheDecoder<'a, 'tcx>>, - { - let serialized_data = self.serialized_data.read(); - let mut decoder = CacheDecoder { - tcx, - opaque: MemDecoder::new(serialized_data.as_deref().unwrap_or(&[]), pos.to_usize()), - source_map: self.source_map, - file_index_to_file: &self.file_index_to_file, - file_index_to_stable_id: &self.file_index_to_stable_id, - alloc_decoding_session: self.alloc_decoding_state.new_decoding_session(), - syntax_contexts: &self.syntax_contexts, - expn_data: &self.expn_data, - foreign_expn_data: &self.foreign_expn_data, - hygiene_context: &self.hygiene_context, - }; - f(&mut decoder) - } -} - -//- DECODING ------------------------------------------------------------------- - -/// A decoder that can read from the incremental compilation cache. It is similar to the one -/// we use for crate metadata decoding in that it can rebase spans and eventually -/// will also handle things that contain `Ty` instances. -pub struct CacheDecoder<'a, 'tcx> { - tcx: TyCtxt<'tcx>, - opaque: MemDecoder<'a>, - source_map: &'a SourceMap, - file_index_to_file: &'a Lock<FxHashMap<SourceFileIndex, Lrc<SourceFile>>>, - file_index_to_stable_id: &'a FxHashMap<SourceFileIndex, EncodedSourceFileId>, - alloc_decoding_session: AllocDecodingSession<'a>, - syntax_contexts: &'a FxHashMap<u32, AbsoluteBytePos>, - expn_data: &'a UnhashMap<ExpnHash, AbsoluteBytePos>, - foreign_expn_data: &'a UnhashMap<ExpnHash, u32>, - hygiene_context: &'a HygieneDecodeContext, -} - -impl<'a, 'tcx> CacheDecoder<'a, 'tcx> { - fn file_index_to_file(&self, index: SourceFileIndex) -> Lrc<SourceFile> { - let CacheDecoder { - tcx, - ref file_index_to_file, - ref file_index_to_stable_id, - ref source_map, - .. - } = *self; - - file_index_to_file - .borrow_mut() - .entry(index) - .or_insert_with(|| { - let stable_id = file_index_to_stable_id[&index].translate(tcx); - - // If this `SourceFile` is from a foreign crate, then make sure - // that we've imported all of the source files from that crate. - // This has usually already been done during macro invocation. - // However, when encoding query results like `TypeckResults`, - // we might encode an `AdtDef` for a foreign type (because it - // was referenced in the body of the function). There is no guarantee - // that we will load the source files from that crate during macro - // expansion, so we use `import_source_files` to ensure that the foreign - // source files are actually imported before we call `source_file_by_stable_id`. - if stable_id.cnum != LOCAL_CRATE { - self.tcx.cstore_untracked().import_source_files(self.tcx.sess, stable_id.cnum); - } - - source_map - .source_file_by_stable_id(stable_id) - .expect("failed to lookup `SourceFile` in new context") - }) - .clone() - } -} - -trait DecoderWithPosition: Decoder { - fn position(&self) -> usize; -} - -impl<'a> DecoderWithPosition for MemDecoder<'a> { - fn position(&self) -> usize { - self.position() - } -} - -impl<'a, 'tcx> DecoderWithPosition for CacheDecoder<'a, 'tcx> { - fn position(&self) -> usize { - self.opaque.position() - } -} - -// Decodes something that was encoded with `encode_tagged()` and verify that the -// tag matches and the correct amount of bytes was read. -fn decode_tagged<D, T, V>(decoder: &mut D, expected_tag: T) -> V -where - T: Decodable<D> + Eq + std::fmt::Debug, - V: Decodable<D>, - D: DecoderWithPosition, -{ - let start_pos = decoder.position(); - - let actual_tag = T::decode(decoder); - assert_eq!(actual_tag, expected_tag); - let value = V::decode(decoder); - let end_pos = decoder.position(); - - let expected_len: u64 = Decodable::decode(decoder); - assert_eq!((end_pos - start_pos) as u64, expected_len); - - value -} - -impl<'a, 'tcx> TyDecoder for CacheDecoder<'a, 'tcx> { - type I = TyCtxt<'tcx>; - const CLEAR_CROSS_CRATE: bool = false; - - #[inline] - fn interner(&self) -> TyCtxt<'tcx> { - self.tcx - } - - #[inline] - fn position(&self) -> usize { - self.opaque.position() - } - - #[inline] - fn peek_byte(&self) -> u8 { - self.opaque.data[self.opaque.position()] - } - - fn cached_ty_for_shorthand<F>(&mut self, shorthand: usize, or_insert_with: F) -> Ty<'tcx> - where - F: FnOnce(&mut Self) -> Ty<'tcx>, - { - let tcx = self.tcx; - - let cache_key = ty::CReaderCacheKey { cnum: None, pos: shorthand }; - - if let Some(&ty) = tcx.ty_rcache.borrow().get(&cache_key) { - return ty; - } - - let ty = or_insert_with(self); - // This may overwrite the entry, but it should overwrite with the same value. - tcx.ty_rcache.borrow_mut().insert_same(cache_key, ty); - ty - } - - fn with_position<F, R>(&mut self, pos: usize, f: F) -> R - where - F: FnOnce(&mut Self) -> R, - { - debug_assert!(pos < self.opaque.data.len()); - - let new_opaque = MemDecoder::new(self.opaque.data, pos); - let old_opaque = mem::replace(&mut self.opaque, new_opaque); - let r = f(self); - self.opaque = old_opaque; - r - } - - fn decode_alloc_id(&mut self) -> interpret::AllocId { - let alloc_decoding_session = self.alloc_decoding_session; - alloc_decoding_session.decode_alloc_id(self) - } -} - -rustc_middle::implement_ty_decoder!(CacheDecoder<'a, 'tcx>); - -// This ensures that the `Decodable<opaque::Decoder>::decode` specialization for `Vec<u8>` is used -// when a `CacheDecoder` is passed to `Decodable::decode`. Unfortunately, we have to manually opt -// into specializations this way, given how `CacheDecoder` and the decoding traits currently work. -impl<'a, 'tcx> Decodable<CacheDecoder<'a, 'tcx>> for Vec<u8> { - fn decode(d: &mut CacheDecoder<'a, 'tcx>) -> Self { - Decodable::decode(&mut d.opaque) - } -} - -impl<'a, 'tcx> Decodable<CacheDecoder<'a, 'tcx>> for SyntaxContext { - fn decode(decoder: &mut CacheDecoder<'a, 'tcx>) -> Self { - let syntax_contexts = decoder.syntax_contexts; - rustc_span::hygiene::decode_syntax_context(decoder, decoder.hygiene_context, |this, id| { - // This closure is invoked if we haven't already decoded the data for the `SyntaxContext` we are deserializing. - // We look up the position of the associated `SyntaxData` and decode it. - let pos = syntax_contexts.get(&id).unwrap(); - this.with_position(pos.to_usize(), |decoder| { - let data: SyntaxContextData = decode_tagged(decoder, TAG_SYNTAX_CONTEXT); - data - }) - }) - } -} - -impl<'a, 'tcx> Decodable<CacheDecoder<'a, 'tcx>> for ExpnId { - fn decode(decoder: &mut CacheDecoder<'a, 'tcx>) -> Self { - let hash = ExpnHash::decode(decoder); - if hash.is_root() { - return ExpnId::root(); - } - - if let Some(expn_id) = ExpnId::from_hash(hash) { - return expn_id; - } - - let krate = decoder.tcx.stable_crate_id_to_crate_num(hash.stable_crate_id()); - - let expn_id = if krate == LOCAL_CRATE { - // We look up the position of the associated `ExpnData` and decode it. - let pos = decoder - .expn_data - .get(&hash) - .unwrap_or_else(|| panic!("Bad hash {:?} (map {:?})", hash, decoder.expn_data)); - - let data: ExpnData = decoder - .with_position(pos.to_usize(), |decoder| decode_tagged(decoder, TAG_EXPN_DATA)); - let expn_id = rustc_span::hygiene::register_local_expn_id(data, hash); - - #[cfg(debug_assertions)] - { - use rustc_data_structures::stable_hasher::{HashStable, StableHasher}; - let local_hash: u64 = decoder.tcx.with_stable_hashing_context(|mut hcx| { - let mut hasher = StableHasher::new(); - expn_id.expn_data().hash_stable(&mut hcx, &mut hasher); - hasher.finish() - }); - debug_assert_eq!(hash.local_hash(), local_hash); - } - - expn_id - } else { - let index_guess = decoder.foreign_expn_data[&hash]; - decoder.tcx.cstore_untracked().expn_hash_to_expn_id( - decoder.tcx.sess, - krate, - index_guess, - hash, - ) - }; - - debug_assert_eq!(expn_id.krate, krate); - expn_id - } -} - -impl<'a, 'tcx> Decodable<CacheDecoder<'a, 'tcx>> for Span { - fn decode(decoder: &mut CacheDecoder<'a, 'tcx>) -> Self { - let ctxt = SyntaxContext::decode(decoder); - let parent = Option::<LocalDefId>::decode(decoder); - let tag: u8 = Decodable::decode(decoder); - - if tag == TAG_PARTIAL_SPAN { - return Span::new(BytePos(0), BytePos(0), ctxt, parent); - } else if tag == TAG_RELATIVE_SPAN { - let dlo = u32::decode(decoder); - let dto = u32::decode(decoder); - - let enclosing = decoder.tcx.source_span_untracked(parent.unwrap()).data_untracked(); - let span = Span::new( - enclosing.lo + BytePos::from_u32(dlo), - enclosing.lo + BytePos::from_u32(dto), - ctxt, - parent, - ); - - return span; - } else { - debug_assert_eq!(tag, TAG_FULL_SPAN); - } - - let file_lo_index = SourceFileIndex::decode(decoder); - let line_lo = usize::decode(decoder); - let col_lo = BytePos::decode(decoder); - let len = BytePos::decode(decoder); - - let file_lo = decoder.file_index_to_file(file_lo_index); - let lo = file_lo.lines(|lines| lines[line_lo - 1] + col_lo); - let hi = lo + len; - - Span::new(lo, hi, ctxt, parent) - } -} - -// copy&paste impl from rustc_metadata -impl<'a, 'tcx> Decodable<CacheDecoder<'a, 'tcx>> for Symbol { - fn decode(d: &mut CacheDecoder<'a, 'tcx>) -> Self { - let tag = d.read_u8(); - - match tag { - SYMBOL_STR => { - let s = d.read_str(); - Symbol::intern(s) - } - SYMBOL_OFFSET => { - // read str offset - let pos = d.read_usize(); - let old_pos = d.opaque.position(); - - // move to str ofset and read - d.opaque.set_position(pos); - let s = d.read_str(); - let sym = Symbol::intern(s); - - // restore position - d.opaque.set_position(old_pos); - - sym - } - SYMBOL_PREINTERNED => { - let symbol_index = d.read_u32(); - Symbol::new_from_decoded(symbol_index) - } - _ => unreachable!(), - } - } -} - -impl<'a, 'tcx> Decodable<CacheDecoder<'a, 'tcx>> for CrateNum { - fn decode(d: &mut CacheDecoder<'a, 'tcx>) -> Self { - let stable_id = StableCrateId::decode(d); - let cnum = d.tcx.stable_crate_id_to_crate_num(stable_id); - cnum - } -} - -// This impl makes sure that we get a runtime error when we try decode a -// `DefIndex` that is not contained in a `DefId`. Such a case would be problematic -// because we would not know how to transform the `DefIndex` to the current -// context. -impl<'a, 'tcx> Decodable<CacheDecoder<'a, 'tcx>> for DefIndex { - fn decode(_d: &mut CacheDecoder<'a, 'tcx>) -> DefIndex { - panic!("trying to decode `DefIndex` outside the context of a `DefId`") - } -} - -// Both the `CrateNum` and the `DefIndex` of a `DefId` can change in between two -// compilation sessions. We use the `DefPathHash`, which is stable across -// sessions, to map the old `DefId` to the new one. -impl<'a, 'tcx> Decodable<CacheDecoder<'a, 'tcx>> for DefId { - fn decode(d: &mut CacheDecoder<'a, 'tcx>) -> Self { - // Load the `DefPathHash` which is was we encoded the `DefId` as. - let def_path_hash = DefPathHash::decode(d); - - // Using the `DefPathHash`, we can lookup the new `DefId`. - // Subtle: We only encode a `DefId` as part of a query result. - // If we get to this point, then all of the query inputs were green, - // which means that the definition with this hash is guaranteed to - // still exist in the current compilation session. - d.tcx.def_path_hash_to_def_id(def_path_hash, &mut || { - panic!("Failed to convert DefPathHash {def_path_hash:?}") - }) - } -} - -impl<'a, 'tcx> Decodable<CacheDecoder<'a, 'tcx>> for &'tcx UnordSet<LocalDefId> { - fn decode(d: &mut CacheDecoder<'a, 'tcx>) -> Self { - RefDecodable::decode(d) - } -} - -impl<'a, 'tcx> Decodable<CacheDecoder<'a, 'tcx>> for &'tcx FxHashMap<DefId, Ty<'tcx>> { - fn decode(d: &mut CacheDecoder<'a, 'tcx>) -> Self { - RefDecodable::decode(d) - } -} - -impl<'a, 'tcx> Decodable<CacheDecoder<'a, 'tcx>> - for &'tcx IndexVec<mir::Promoted, mir::Body<'tcx>> -{ - fn decode(d: &mut CacheDecoder<'a, 'tcx>) -> Self { - RefDecodable::decode(d) - } -} - -impl<'a, 'tcx> Decodable<CacheDecoder<'a, 'tcx>> for &'tcx [(ty::Predicate<'tcx>, Span)] { - fn decode(d: &mut CacheDecoder<'a, 'tcx>) -> Self { - RefDecodable::decode(d) - } -} - -impl<'a, 'tcx> Decodable<CacheDecoder<'a, 'tcx>> for &'tcx [(ty::Clause<'tcx>, Span)] { - fn decode(d: &mut CacheDecoder<'a, 'tcx>) -> Self { - RefDecodable::decode(d) - } -} - -impl<'a, 'tcx> Decodable<CacheDecoder<'a, 'tcx>> for &'tcx [rustc_ast::InlineAsmTemplatePiece] { - fn decode(d: &mut CacheDecoder<'a, 'tcx>) -> Self { - RefDecodable::decode(d) - } -} - -macro_rules! impl_ref_decoder { - (<$tcx:tt> $($ty:ty,)*) => { - $(impl<'a, $tcx> Decodable<CacheDecoder<'a, $tcx>> for &$tcx [$ty] { - fn decode(d: &mut CacheDecoder<'a, $tcx>) -> Self { - RefDecodable::decode(d) - } - })* - }; -} - -impl_ref_decoder! {<'tcx> - Span, - rustc_ast::Attribute, - rustc_span::symbol::Ident, - ty::Variance, - rustc_span::def_id::DefId, - rustc_span::def_id::LocalDefId, - (rustc_middle::middle::exported_symbols::ExportedSymbol<'tcx>, rustc_middle::middle::exported_symbols::SymbolExportInfo), - ty::DeducedParamAttrs, -} - -//- ENCODING ------------------------------------------------------------------- - -/// An encoder that can write to the incremental compilation cache. -pub struct CacheEncoder<'a, 'tcx> { - tcx: TyCtxt<'tcx>, - encoder: FileEncoder, - type_shorthands: FxHashMap<Ty<'tcx>, usize>, - predicate_shorthands: FxHashMap<ty::PredicateKind<'tcx>, usize>, - interpret_allocs: FxIndexSet<interpret::AllocId>, - source_map: CachingSourceMapView<'tcx>, - file_to_file_index: FxHashMap<*const SourceFile, SourceFileIndex>, - hygiene_context: &'a HygieneEncodeContext, - symbol_table: FxHashMap<Symbol, usize>, -} - -impl<'a, 'tcx> CacheEncoder<'a, 'tcx> { - fn source_file_index(&mut self, source_file: Lrc<SourceFile>) -> SourceFileIndex { - self.file_to_file_index[&(&*source_file as *const SourceFile)] - } - - /// Encode something with additional information that allows to do some - /// sanity checks when decoding the data again. This method will first - /// encode the specified tag, then the given value, then the number of - /// bytes taken up by tag and value. On decoding, we can then verify that - /// we get the expected tag and read the expected number of bytes. - fn encode_tagged<T: Encodable<Self>, V: Encodable<Self>>(&mut self, tag: T, value: &V) { - let start_pos = self.position(); - - tag.encode(self); - value.encode(self); - - let end_pos = self.position(); - ((end_pos - start_pos) as u64).encode(self); - } - - fn finish(self) -> Result<usize, io::Error> { - self.encoder.finish() - } -} - -impl<'a, 'tcx> Encodable<CacheEncoder<'a, 'tcx>> for SyntaxContext { - fn encode(&self, s: &mut CacheEncoder<'a, 'tcx>) { - rustc_span::hygiene::raw_encode_syntax_context(*self, s.hygiene_context, s); - } -} - -impl<'a, 'tcx> Encodable<CacheEncoder<'a, 'tcx>> for ExpnId { - fn encode(&self, s: &mut CacheEncoder<'a, 'tcx>) { - s.hygiene_context.schedule_expn_data_for_encoding(*self); - self.expn_hash().encode(s); - } -} - -impl<'a, 'tcx> Encodable<CacheEncoder<'a, 'tcx>> for Span { - fn encode(&self, s: &mut CacheEncoder<'a, 'tcx>) { - let span_data = self.data_untracked(); - span_data.ctxt.encode(s); - span_data.parent.encode(s); - - if span_data.is_dummy() { - return TAG_PARTIAL_SPAN.encode(s); - } - - if let Some(parent) = span_data.parent { - let enclosing = s.tcx.source_span(parent).data_untracked(); - if enclosing.contains(span_data) { - TAG_RELATIVE_SPAN.encode(s); - (span_data.lo - enclosing.lo).to_u32().encode(s); - (span_data.hi - enclosing.lo).to_u32().encode(s); - return; - } - } - - let pos = s.source_map.byte_pos_to_line_and_col(span_data.lo); - let partial_span = match &pos { - Some((file_lo, _, _)) => !file_lo.contains(span_data.hi), - None => true, - }; - - if partial_span { - return TAG_PARTIAL_SPAN.encode(s); - } - - let (file_lo, line_lo, col_lo) = pos.unwrap(); - - let len = span_data.hi - span_data.lo; - - let source_file_index = s.source_file_index(file_lo); - - TAG_FULL_SPAN.encode(s); - source_file_index.encode(s); - line_lo.encode(s); - col_lo.encode(s); - len.encode(s); - } -} - -// copy&paste impl from rustc_metadata -impl<'a, 'tcx> Encodable<CacheEncoder<'a, 'tcx>> for Symbol { - fn encode(&self, s: &mut CacheEncoder<'a, 'tcx>) { - // if symbol preinterned, emit tag and symbol index - if self.is_preinterned() { - s.encoder.emit_u8(SYMBOL_PREINTERNED); - s.encoder.emit_u32(self.as_u32()); - } else { - // otherwise write it as string or as offset to it - match s.symbol_table.entry(*self) { - Entry::Vacant(o) => { - s.encoder.emit_u8(SYMBOL_STR); - let pos = s.encoder.position(); - o.insert(pos); - s.emit_str(self.as_str()); - } - Entry::Occupied(o) => { - let x = *o.get(); - s.emit_u8(SYMBOL_OFFSET); - s.emit_usize(x); - } - } - } - } -} - -impl<'a, 'tcx> TyEncoder for CacheEncoder<'a, 'tcx> { - type I = TyCtxt<'tcx>; - const CLEAR_CROSS_CRATE: bool = false; - - fn position(&self) -> usize { - self.encoder.position() - } - fn type_shorthands(&mut self) -> &mut FxHashMap<Ty<'tcx>, usize> { - &mut self.type_shorthands - } - fn predicate_shorthands(&mut self) -> &mut FxHashMap<ty::PredicateKind<'tcx>, usize> { - &mut self.predicate_shorthands - } - fn encode_alloc_id(&mut self, alloc_id: &interpret::AllocId) { - let (index, _) = self.interpret_allocs.insert_full(*alloc_id); - - index.encode(self); - } -} - -impl<'a, 'tcx> Encodable<CacheEncoder<'a, 'tcx>> for CrateNum { - fn encode(&self, s: &mut CacheEncoder<'a, 'tcx>) { - s.tcx.stable_crate_id(*self).encode(s); - } -} - -impl<'a, 'tcx> Encodable<CacheEncoder<'a, 'tcx>> for DefId { - fn encode(&self, s: &mut CacheEncoder<'a, 'tcx>) { - s.tcx.def_path_hash(*self).encode(s); - } -} - -impl<'a, 'tcx> Encodable<CacheEncoder<'a, 'tcx>> for DefIndex { - fn encode(&self, _: &mut CacheEncoder<'a, 'tcx>) { - bug!("encoding `DefIndex` without context"); - } -} - -macro_rules! encoder_methods { - ($($name:ident($ty:ty);)*) => { - #[inline] - $(fn $name(&mut self, value: $ty) { - self.encoder.$name(value) - })* - } -} - -impl<'a, 'tcx> Encoder for CacheEncoder<'a, 'tcx> { - encoder_methods! { - emit_usize(usize); - emit_u128(u128); - emit_u64(u64); - emit_u32(u32); - emit_u16(u16); - emit_u8(u8); - - emit_isize(isize); - emit_i128(i128); - emit_i64(i64); - emit_i32(i32); - emit_i16(i16); - emit_i8(i8); - - emit_bool(bool); - emit_char(char); - emit_str(&str); - emit_raw_bytes(&[u8]); - } -} - -// This ensures that the `Encodable<opaque::FileEncoder>::encode` specialization for byte slices -// is used when a `CacheEncoder` having an `opaque::FileEncoder` is passed to `Encodable::encode`. -// Unfortunately, we have to manually opt into specializations this way, given how `CacheEncoder` -// and the encoding traits currently work. -impl<'a, 'tcx> Encodable<CacheEncoder<'a, 'tcx>> for [u8] { - fn encode(&self, e: &mut CacheEncoder<'a, 'tcx>) { - self.encode(&mut e.encoder); - } -} - -pub(crate) fn encode_query_results<'a, 'tcx, Q>( - query: Q, - qcx: QueryCtxt<'tcx>, - encoder: &mut CacheEncoder<'a, 'tcx>, - query_result_index: &mut EncodedDepNodeIndex, -) where - Q: super::QueryConfigRestored<'tcx>, - Q::RestoredValue: Encodable<CacheEncoder<'a, 'tcx>>, -{ - let _timer = qcx - .tcx - .profiler() - .verbose_generic_activity_with_arg("encode_query_results_for", query.name()); - - assert!(query.query_state(qcx).all_inactive()); - let cache = query.query_cache(qcx); - cache.iter(&mut |key, value, dep_node| { - if query.cache_on_disk(qcx.tcx, &key) { - let dep_node = SerializedDepNodeIndex::new(dep_node.index()); - - // Record position of the cache entry. - query_result_index.push((dep_node, AbsoluteBytePos::new(encoder.encoder.position()))); - - // Encode the type check tables with the `SerializedDepNodeIndex` - // as tag. - encoder.encode_tagged(dep_node, &Q::restore(*value)); - } - }); -} diff --git a/compiler/rustc_query_impl/src/plumbing.rs b/compiler/rustc_query_impl/src/plumbing.rs index afbead7d1..244f0e84b 100644 --- a/compiler/rustc_query_impl/src/plumbing.rs +++ b/compiler/rustc_query_impl/src/plumbing.rs @@ -2,35 +2,45 @@ //! generate the actual methods on tcx which find and execute the provider, //! manage the caches, and so forth. -use crate::on_disk_cache::{CacheDecoder, CacheEncoder, EncodedDepNodeIndex}; -use crate::profiling_support::QueryKeyStringCache; -use crate::{on_disk_cache, Queries}; -use rustc_data_structures::stable_hasher::{HashStable, StableHasher}; -use rustc_data_structures::sync::{AtomicU64, Lock}; -use rustc_errors::{Diagnostic, Handler}; +use crate::rustc_middle::dep_graph::DepContext; +use crate::rustc_middle::ty::TyEncoder; +use crate::QueryConfigRestored; +use rustc_data_structures::stable_hasher::{Hash64, HashStable, StableHasher}; +use rustc_data_structures::sync::Lock; +use rustc_errors::Diagnostic; +use rustc_index::Idx; use rustc_middle::dep_graph::{ self, DepKind, DepKindStruct, DepNode, DepNodeIndex, SerializedDepNodeIndex, }; +use rustc_middle::query::on_disk_cache::AbsoluteBytePos; +use rustc_middle::query::on_disk_cache::{CacheDecoder, CacheEncoder, EncodedDepNodeIndex}; use rustc_middle::query::Key; use rustc_middle::ty::tls::{self, ImplicitCtxt}; use rustc_middle::ty::{self, TyCtxt}; use rustc_query_system::dep_graph::{DepNodeParams, HasDepContext}; use rustc_query_system::ich::StableHashingContext; use rustc_query_system::query::{ - force_query, QueryConfig, QueryContext, QueryJobId, QueryMap, QuerySideEffects, QueryStackFrame, + force_query, QueryCache, QueryConfig, QueryContext, QueryJobId, QueryMap, QuerySideEffects, + QueryStackFrame, }; use rustc_query_system::{LayoutOfDepth, QueryOverflow}; use rustc_serialize::Decodable; +use rustc_serialize::Encodable; use rustc_session::Limit; use rustc_span::def_id::LOCAL_CRATE; -use std::any::Any; use std::num::NonZeroU64; use thin_vec::ThinVec; #[derive(Copy, Clone)] pub struct QueryCtxt<'tcx> { pub tcx: TyCtxt<'tcx>, - pub queries: &'tcx Queries<'tcx>, +} + +impl<'tcx> QueryCtxt<'tcx> { + #[inline] + pub fn new(tcx: TyCtxt<'tcx>) -> Self { + QueryCtxt { tcx } + } } impl<'tcx> std::ops::Deref for QueryCtxt<'tcx> { @@ -53,44 +63,56 @@ impl<'tcx> HasDepContext for QueryCtxt<'tcx> { } impl QueryContext for QueryCtxt<'_> { + #[inline] fn next_job_id(self) -> QueryJobId { QueryJobId( NonZeroU64::new( - self.queries.jobs.fetch_add(1, rustc_data_structures::sync::Ordering::Relaxed), + self.query_system.jobs.fetch_add(1, rustc_data_structures::sync::Ordering::Relaxed), ) .unwrap(), ) } + #[inline] fn current_query_job(self) -> Option<QueryJobId> { - tls::with_related_context(*self, |icx| icx.query) + tls::with_related_context(self.tcx, |icx| icx.query) } fn try_collect_active_jobs(self) -> Option<QueryMap<DepKind>> { - self.queries.try_collect_active_jobs(*self) + let mut jobs = QueryMap::default(); + + for collect in super::TRY_COLLECT_ACTIVE_JOBS.iter() { + collect(self.tcx, &mut jobs); + } + + Some(jobs) } // Interactions with on_disk_cache fn load_side_effects(self, prev_dep_node_index: SerializedDepNodeIndex) -> QuerySideEffects { - self.queries + self.query_system .on_disk_cache .as_ref() - .map(|c| c.load_side_effects(*self, prev_dep_node_index)) + .map(|c| c.load_side_effects(self.tcx, prev_dep_node_index)) .unwrap_or_default() } + #[inline(never)] + #[cold] fn store_side_effects(self, dep_node_index: DepNodeIndex, side_effects: QuerySideEffects) { - if let Some(c) = self.queries.on_disk_cache.as_ref() { + if let Some(c) = self.query_system.on_disk_cache.as_ref() { c.store_side_effects(dep_node_index, side_effects) } } + #[inline(never)] + #[cold] fn store_side_effects_for_anon_node( self, dep_node_index: DepNodeIndex, side_effects: QuerySideEffects, ) { - if let Some(c) = self.queries.on_disk_cache.as_ref() { + if let Some(c) = self.query_system.on_disk_cache.as_ref() { c.store_side_effects_for_anon_node(dep_node_index, side_effects) } } @@ -109,14 +131,14 @@ impl QueryContext for QueryCtxt<'_> { // The `TyCtxt` stored in TLS has the same global interner lifetime // as `self`, so we use `with_related_context` to relate the 'tcx lifetimes // when accessing the `ImplicitCtxt`. - tls::with_related_context(*self, move |current_icx| { + tls::with_related_context(self.tcx, move |current_icx| { if depth_limit && !self.recursion_limit().value_within_limit(current_icx.query_depth) { self.depth_limit_error(token); } // Update the `ImplicitCtxt` to point to our new query job. let new_icx = ImplicitCtxt { - tcx: *self, + tcx: self.tcx, query: Some(token), diagnostics, query_depth: current_icx.query_depth + depth_limit as usize, @@ -152,51 +174,18 @@ impl QueryContext for QueryCtxt<'_> { } } -impl<'tcx> QueryCtxt<'tcx> { - #[inline] - pub fn from_tcx(tcx: TyCtxt<'tcx>) -> Self { - let queries = tcx.queries.as_any(); - let queries = unsafe { - let queries = std::mem::transmute::<&dyn Any, &dyn Any>(queries); - let queries = queries.downcast_ref().unwrap(); - let queries = std::mem::transmute::<&Queries<'_>, &Queries<'_>>(queries); - queries - }; - QueryCtxt { tcx, queries } - } - - pub(crate) fn on_disk_cache(self) -> Option<&'tcx on_disk_cache::OnDiskCache<'tcx>> { - self.queries.on_disk_cache.as_ref() - } - - pub(super) fn encode_query_results( - self, - encoder: &mut CacheEncoder<'_, 'tcx>, - query_result_index: &mut EncodedDepNodeIndex, - ) { - for query in &self.queries.query_structs { - if let Some(encode) = query.encode_query_results { - encode(self, encoder, query_result_index); - } - } - } - - pub fn try_print_query_stack( - self, - query: Option<QueryJobId>, - handler: &Handler, - num_frames: Option<usize>, - ) -> usize { - rustc_query_system::query::print_query_stack(self, query, handler, num_frames) - } +pub(super) fn try_mark_green<'tcx>(tcx: TyCtxt<'tcx>, dep_node: &dep_graph::DepNode) -> bool { + tcx.dep_graph.try_mark_green(QueryCtxt::new(tcx), dep_node).is_some() } -#[derive(Clone, Copy)] -pub(crate) struct QueryStruct<'tcx> { - pub try_collect_active_jobs: fn(QueryCtxt<'tcx>, &mut QueryMap<DepKind>) -> Option<()>, - pub alloc_self_profile_query_strings: fn(TyCtxt<'tcx>, &mut QueryKeyStringCache), - pub encode_query_results: - Option<fn(QueryCtxt<'tcx>, &mut CacheEncoder<'_, 'tcx>, &mut EncodedDepNodeIndex)>, +pub(super) fn encode_all_query_results<'tcx>( + tcx: TyCtxt<'tcx>, + encoder: &mut CacheEncoder<'_, 'tcx>, + query_result_index: &mut EncodedDepNodeIndex, +) { + for encode in super::ENCODE_QUERY_RESULTS.iter().copied().filter_map(|e| e) { + encode(tcx, encoder, query_result_index); + } } macro_rules! handle_cycle_error { @@ -275,14 +264,14 @@ macro_rules! hash_result { } macro_rules! call_provider { - ([][$qcx:expr, $name:ident, $key:expr]) => {{ - ($qcx.queries.local_providers.$name)($qcx.tcx, $key) + ([][$tcx:expr, $name:ident, $key:expr]) => {{ + ($tcx.query_system.fns.local_providers.$name)($tcx, $key) }}; - ([(separate_provide_extern) $($rest:tt)*][$qcx:expr, $name:ident, $key:expr]) => {{ + ([(separate_provide_extern) $($rest:tt)*][$tcx:expr, $name:ident, $key:expr]) => {{ if let Some(key) = $key.as_local_key() { - ($qcx.queries.local_providers.$name)($qcx.tcx, key) + ($tcx.query_system.fns.local_providers.$name)($tcx, key) } else { - ($qcx.queries.extern_providers.$name)($qcx.tcx, $key) + ($tcx.query_system.fns.extern_providers.$name)($tcx, $key) } }}; ([$other:tt $($modifiers:tt)*][$($args:tt)*]) => { @@ -306,7 +295,7 @@ pub(crate) fn create_query_frame< 'tcx, K: Copy + Key + for<'a> HashStable<StableHashingContext<'a>>, >( - tcx: QueryCtxt<'tcx>, + tcx: TyCtxt<'tcx>, do_describe: fn(TyCtxt<'tcx>, K) -> String, key: K, kind: DepKind, @@ -318,7 +307,7 @@ pub(crate) fn create_query_frame< // Showing visible path instead of any path is not that important in production. ty::print::with_no_visible_paths!( // Force filename-line mode to avoid invoking `type_of` query. - ty::print::with_forced_impl_filename_line!(do_describe(tcx.tcx, key)) + ty::print::with_forced_impl_filename_line!(do_describe(tcx, key)) ) ); let description = @@ -328,7 +317,7 @@ pub(crate) fn create_query_frame< // so exit to avoid infinite recursion. None } else { - Some(key.default_span(*tcx)) + Some(key.default_span(tcx)) }; let def_id = key.key_as_def_id(); let def_kind = if kind == dep_graph::DepKind::opt_def_kind { @@ -342,7 +331,7 @@ pub(crate) fn create_query_frame< let mut hasher = StableHasher::new(); std::mem::discriminant(&kind).hash_stable(&mut hcx, &mut hasher); key.hash_stable(&mut hcx, &mut hasher); - hasher.finish::<u64>() + hasher.finish::<Hash64>() }) }; let ty_adt_id = key.ty_adt_id(); @@ -350,6 +339,34 @@ pub(crate) fn create_query_frame< QueryStackFrame::new(description, span, def_id, def_kind, kind, ty_adt_id, hash) } +pub(crate) fn encode_query_results<'a, 'tcx, Q>( + query: Q::Config, + qcx: QueryCtxt<'tcx>, + encoder: &mut CacheEncoder<'a, 'tcx>, + query_result_index: &mut EncodedDepNodeIndex, +) where + Q: super::QueryConfigRestored<'tcx>, + Q::RestoredValue: Encodable<CacheEncoder<'a, 'tcx>>, +{ + let _timer = + qcx.profiler().verbose_generic_activity_with_arg("encode_query_results_for", query.name()); + + assert!(query.query_state(qcx).all_inactive()); + let cache = query.query_cache(qcx); + cache.iter(&mut |key, value, dep_node| { + if query.cache_on_disk(qcx.tcx, &key) { + let dep_node = SerializedDepNodeIndex::new(dep_node.index()); + + // Record position of the cache entry. + query_result_index.push((dep_node, AbsoluteBytePos::new(encoder.position()))); + + // Encode the type check tables with the `SerializedDepNodeIndex` + // as tag. + encoder.encode_tagged(dep_node, &Q::restore(*value)); + } + }); +} + fn try_load_from_on_disk_cache<'tcx, Q>(query: Q, tcx: TyCtxt<'tcx>, dep_node: DepNode) where Q: QueryConfig<QueryCtxt<'tcx>>, @@ -364,8 +381,8 @@ where } } -pub(crate) fn loadable_from_disk<'tcx>(tcx: QueryCtxt<'tcx>, id: SerializedDepNodeIndex) -> bool { - if let Some(cache) = tcx.on_disk_cache().as_ref() { +pub(crate) fn loadable_from_disk<'tcx>(tcx: TyCtxt<'tcx>, id: SerializedDepNodeIndex) -> bool { + if let Some(cache) = tcx.query_system.on_disk_cache.as_ref() { cache.loadable_from_disk(id) } else { false @@ -373,13 +390,27 @@ pub(crate) fn loadable_from_disk<'tcx>(tcx: QueryCtxt<'tcx>, id: SerializedDepNo } pub(crate) fn try_load_from_disk<'tcx, V>( - tcx: QueryCtxt<'tcx>, - id: SerializedDepNodeIndex, + tcx: TyCtxt<'tcx>, + prev_index: SerializedDepNodeIndex, + index: DepNodeIndex, ) -> Option<V> where V: for<'a> Decodable<CacheDecoder<'a, 'tcx>>, { - tcx.on_disk_cache().as_ref()?.try_load_query_result(*tcx, id) + let on_disk_cache = tcx.query_system.on_disk_cache.as_ref()?; + + let prof_timer = tcx.prof.incr_cache_loading(); + + // The call to `with_query_deserialization` enforces that no new `DepNodes` + // are created during deserialization. See the docs of that method for more + // details. + let value = tcx + .dep_graph + .with_query_deserialization(|| on_disk_cache.try_load_query_result(tcx, prev_index)); + + prof_timer.finish_with_query_invocation_id(index.into()); + + value } fn force_from_dep_node<'tcx, Q>(query: Q, tcx: TyCtxt<'tcx>, dep_node: DepNode) -> bool @@ -407,8 +438,7 @@ where if let Some(key) = Q::Key::recover(tcx, &dep_node) { #[cfg(debug_assertions)] let _guard = tracing::span!(tracing::Level::TRACE, stringify!($name), ?key).entered(); - let tcx = QueryCtxt::from_tcx(tcx); - force_query(query, tcx, key, dep_node); + force_query(query, QueryCtxt::new(tcx), key, dep_node); true } else { false @@ -417,10 +447,9 @@ where pub(crate) fn query_callback<'tcx, Q>(is_anon: bool, is_eval_always: bool) -> DepKindStruct<'tcx> where - Q: QueryConfig<QueryCtxt<'tcx>> + Default, - Q::Key: DepNodeParams<TyCtxt<'tcx>>, + Q: QueryConfigRestored<'tcx>, { - let fingerprint_style = Q::Key::fingerprint_style(); + let fingerprint_style = <Q::Config as QueryConfig<QueryCtxt<'tcx>>>::Key::fingerprint_style(); if is_anon || !fingerprint_style.reconstructible() { return DepKindStruct { @@ -436,13 +465,25 @@ where is_anon, is_eval_always, fingerprint_style, - force_from_dep_node: Some(|tcx, dep_node| force_from_dep_node(Q::default(), tcx, dep_node)), + force_from_dep_node: Some(|tcx, dep_node| { + force_from_dep_node(Q::config(tcx), tcx, dep_node) + }), try_load_from_on_disk_cache: Some(|tcx, dep_node| { - try_load_from_on_disk_cache(Q::default(), tcx, dep_node) + try_load_from_on_disk_cache(Q::config(tcx), tcx, dep_node) }), } } +macro_rules! item_if_cached { + ([] $tokens:tt) => {}; + ([(cache) $($rest:tt)*] { $($tokens:tt)* }) => { + $($tokens)* + }; + ([$other:tt $($modifiers:tt)*] $tokens:tt) => { + item_if_cached! { [$($modifiers)*] $tokens } + }; +} + macro_rules! expand_if_cached { ([], $tokens:expr) => {{ None @@ -455,168 +496,226 @@ macro_rules! expand_if_cached { }; } +/// Don't show the backtrace for query system by default +/// use `RUST_BACKTRACE=full` to show all the backtraces +#[inline(never)] +pub fn __rust_begin_short_backtrace<F, T>(f: F) -> T +where + F: FnOnce() -> T, +{ + let result = f(); + std::hint::black_box(()); + result +} + // NOTE: `$V` isn't used here, but we still need to match on it so it can be passed to other macros // invoked by `rustc_query_append`. macro_rules! define_queries { ( $($(#[$attr:meta])* [$($modifiers:tt)*] fn $name:ident($($K:tt)*) -> $V:ty,)*) => { - define_queries_struct! { - input: ($(([$($modifiers)*] [$($attr)*] [$name]))*) - } - #[allow(nonstandard_style)] - mod queries { + pub(crate) mod query_impl { $(pub mod $name { + use super::super::*; use std::marker::PhantomData; - $( - #[derive(Copy, Clone, Default)] - pub struct $name<'tcx> { - data: PhantomData<&'tcx ()> + pub mod get_query_incr { + use super::*; + + // Adding `__rust_end_short_backtrace` marker to backtraces so that we emit the frames + // when `RUST_BACKTRACE=1`, add a new mod with `$name` here is to allow duplicate naming + #[inline(never)] + pub fn __rust_end_short_backtrace<'tcx>( + tcx: TyCtxt<'tcx>, + span: Span, + key: queries::$name::Key<'tcx>, + mode: QueryMode, + ) -> Option<Erase<queries::$name::Value<'tcx>>> { + get_query_incr( + QueryType::config(tcx), + QueryCtxt::new(tcx), + span, + key, + mode + ) } - )* - } - - $(impl<'tcx> QueryConfig<QueryCtxt<'tcx>> for queries::$name<'tcx> { - type Key = query_keys::$name<'tcx>; - type Value = Erase<query_values::$name<'tcx>>; - - #[inline(always)] - fn name(self) -> &'static str { - stringify!($name) - } - - #[inline] - fn format_value(self) -> fn(&Self::Value) -> String { - |value| format!("{:?}", restore::<query_values::$name<'tcx>>(*value)) - } - - #[inline] - fn cache_on_disk(self, tcx: TyCtxt<'tcx>, key: &Self::Key) -> bool { - ::rustc_middle::query::cached::$name(tcx, key) - } - - type Cache = query_storage::$name<'tcx>; - - #[inline(always)] - fn query_state<'a>(self, tcx: QueryCtxt<'tcx>) -> &'a QueryState<Self::Key, crate::dep_graph::DepKind> - where QueryCtxt<'tcx>: 'a - { - &tcx.queries.$name } - #[inline(always)] - fn query_cache<'a>(self, tcx: QueryCtxt<'tcx>) -> &'a Self::Cache - where 'tcx:'a - { - &tcx.query_system.caches.$name + pub mod get_query_non_incr { + use super::*; + + #[inline(never)] + pub fn __rust_end_short_backtrace<'tcx>( + tcx: TyCtxt<'tcx>, + span: Span, + key: queries::$name::Key<'tcx>, + __mode: QueryMode, + ) -> Option<Erase<queries::$name::Value<'tcx>>> { + Some(get_query_non_incr( + QueryType::config(tcx), + QueryCtxt::new(tcx), + span, + key, + )) + } } - fn execute_query(self, tcx: TyCtxt<'tcx>, key: Self::Key) -> Self::Value { - erase(tcx.$name(key)) + pub fn dynamic_query<'tcx>() -> DynamicQuery<'tcx, queries::$name::Storage<'tcx>> { + DynamicQuery { + name: stringify!($name), + eval_always: is_eval_always!([$($modifiers)*]), + dep_kind: dep_graph::DepKind::$name, + handle_cycle_error: handle_cycle_error!([$($modifiers)*]), + query_state: offset_of!(QueryStates<'tcx> => $name), + query_cache: offset_of!(QueryCaches<'tcx> => $name), + cache_on_disk: |tcx, key| ::rustc_middle::query::cached::$name(tcx, key), + execute_query: |tcx, key| erase(tcx.$name(key)), + compute: |tcx, key| { + __rust_begin_short_backtrace(|| + queries::$name::provided_to_erased( + tcx, + call_provider!([$($modifiers)*][tcx, $name, key]) + ) + ) + }, + can_load_from_disk: should_ever_cache_on_disk!([$($modifiers)*] true false), + try_load_from_disk: should_ever_cache_on_disk!([$($modifiers)*] { + |tcx, key, prev_index, index| { + if ::rustc_middle::query::cached::$name(tcx, key) { + let value = $crate::plumbing::try_load_from_disk::< + queries::$name::ProvidedValue<'tcx> + >( + tcx, + prev_index, + index, + ); + value.map(|value| queries::$name::provided_to_erased(tcx, value)) + } else { + None + } + } + } { + |_tcx, _key, _prev_index, _index| None + }), + value_from_cycle_error: |tcx, cycle| { + let result: queries::$name::Value<'tcx> = Value::from_cycle_error(tcx, cycle); + erase(result) + }, + loadable_from_disk: |_tcx, _key, _index| { + should_ever_cache_on_disk!([$($modifiers)*] { + ::rustc_middle::query::cached::$name(_tcx, _key) && + $crate::plumbing::loadable_from_disk(_tcx, _index) + } { + false + }) + }, + hash_result: hash_result!([$($modifiers)*][queries::$name::Value<'tcx>]), + format_value: |value| format!("{:?}", restore::<queries::$name::Value<'tcx>>(*value)), + } } - #[inline] - #[allow(unused_variables)] - fn compute(self, qcx: QueryCtxt<'tcx>, key: Self::Key) -> Self::Value { - query_provided_to_value::$name( - qcx.tcx, - call_provider!([$($modifiers)*][qcx, $name, key]) - ) + #[derive(Copy, Clone, Default)] + pub struct QueryType<'tcx> { + data: PhantomData<&'tcx ()> } - #[inline] - fn try_load_from_disk( - self, - _qcx: QueryCtxt<'tcx>, - _key: &Self::Key - ) -> rustc_query_system::query::TryLoadFromDisk<QueryCtxt<'tcx>, Self::Value> { - should_ever_cache_on_disk!([$($modifiers)*] { - if ::rustc_middle::query::cached::$name(_qcx.tcx, _key) { - Some(|qcx: QueryCtxt<'tcx>, dep_node| { - let value = $crate::plumbing::try_load_from_disk::<query_provided::$name<'tcx>>( - qcx, - dep_node - ); - value.map(|value| query_provided_to_value::$name(qcx.tcx, value)) - }) - } else { - None + impl<'tcx> QueryConfigRestored<'tcx> for QueryType<'tcx> { + type RestoredValue = queries::$name::Value<'tcx>; + type Config = DynamicConfig< + 'tcx, + queries::$name::Storage<'tcx>, + { is_anon!([$($modifiers)*]) }, + { depth_limit!([$($modifiers)*]) }, + { feedable!([$($modifiers)*]) }, + >; + + #[inline(always)] + fn config(tcx: TyCtxt<'tcx>) -> Self::Config { + DynamicConfig { + dynamic: &tcx.query_system.dynamic_queries.$name, } - } { - None - }) - } - - #[inline] - fn loadable_from_disk( - self, - _qcx: QueryCtxt<'tcx>, - _key: &Self::Key, - _index: SerializedDepNodeIndex, - ) -> bool { - should_ever_cache_on_disk!([$($modifiers)*] { - self.cache_on_disk(_qcx.tcx, _key) && - $crate::plumbing::loadable_from_disk(_qcx, _index) - } { - false - }) - } + } - #[inline] - fn value_from_cycle_error( - self, - tcx: TyCtxt<'tcx>, - cycle: &[QueryInfo<DepKind>], - ) -> Self::Value { - let result: query_values::$name<'tcx> = Value::from_cycle_error(tcx, cycle); - erase(result) + #[inline(always)] + fn restore(value: <Self::Config as QueryConfig<QueryCtxt<'tcx>>>::Value) -> Self::RestoredValue { + restore::<queries::$name::Value<'tcx>>(value) + } } - #[inline(always)] - fn anon(self) -> bool { - is_anon!([$($modifiers)*]) + pub fn try_collect_active_jobs<'tcx>(tcx: TyCtxt<'tcx>, qmap: &mut QueryMap<DepKind>) { + let make_query = |tcx, key| { + let kind = rustc_middle::dep_graph::DepKind::$name; + let name = stringify!($name); + $crate::plumbing::create_query_frame(tcx, rustc_middle::query::descs::$name, key, kind, name) + }; + tcx.query_system.states.$name.try_collect_active_jobs( + tcx, + make_query, + qmap, + ).unwrap(); } - #[inline(always)] - fn eval_always(self) -> bool { - is_eval_always!([$($modifiers)*]) + pub fn alloc_self_profile_query_strings<'tcx>(tcx: TyCtxt<'tcx>, string_cache: &mut QueryKeyStringCache) { + $crate::profiling_support::alloc_self_profile_query_strings_for_query_cache( + tcx, + stringify!($name), + &tcx.query_system.caches.$name, + string_cache, + ) } - #[inline(always)] - fn depth_limit(self) -> bool { - depth_limit!([$($modifiers)*]) - } + item_if_cached! { [$($modifiers)*] { + pub fn encode_query_results<'tcx>( + tcx: TyCtxt<'tcx>, + encoder: &mut CacheEncoder<'_, 'tcx>, + query_result_index: &mut EncodedDepNodeIndex + ) { + $crate::plumbing::encode_query_results::<query_impl::$name::QueryType<'tcx>>( + query_impl::$name::QueryType::config(tcx), + QueryCtxt::new(tcx), + encoder, + query_result_index, + ) + } + }} + })*} - #[inline(always)] - fn feedable(self) -> bool { - feedable!([$($modifiers)*]) + pub(crate) fn engine(incremental: bool) -> QueryEngine { + if incremental { + QueryEngine { + $($name: query_impl::$name::get_query_incr::__rust_end_short_backtrace,)* + } + } else { + QueryEngine { + $($name: query_impl::$name::get_query_non_incr::__rust_end_short_backtrace,)* + } } + } - #[inline(always)] - fn dep_kind(self) -> rustc_middle::dep_graph::DepKind { - dep_graph::DepKind::$name + pub fn dynamic_queries<'tcx>() -> DynamicQueries<'tcx> { + DynamicQueries { + $( + $name: query_impl::$name::dynamic_query(), + )* } + } - #[inline(always)] - fn handle_cycle_error(self) -> rustc_query_system::HandleCycleError { - handle_cycle_error!([$($modifiers)*]) - } + // These arrays are used for iteration and can't be indexed by `DepKind`. - #[inline(always)] - fn hash_result(self) -> rustc_query_system::query::HashResult<Self::Value> { - hash_result!([$($modifiers)*][query_values::$name<'tcx>]) - } - })* + const TRY_COLLECT_ACTIVE_JOBS: &[for<'tcx> fn(TyCtxt<'tcx>, &mut QueryMap<DepKind>)] = + &[$(query_impl::$name::try_collect_active_jobs),*]; - $(impl<'tcx> QueryConfigRestored<'tcx> for queries::$name<'tcx> { - type RestoredValue = query_values::$name<'tcx>; + const ALLOC_SELF_PROFILE_QUERY_STRINGS: &[ + for<'tcx> fn(TyCtxt<'tcx>, &mut QueryKeyStringCache) + ] = &[$(query_impl::$name::alloc_self_profile_query_strings),*]; - #[inline(always)] - fn restore(value: <Self as QueryConfig<QueryCtxt<'tcx>>>::Value) -> Self::RestoredValue { - restore::<query_values::$name<'tcx>>(value) - } - })* + const ENCODE_QUERY_RESULTS: &[ + Option<for<'tcx> fn( + TyCtxt<'tcx>, + &mut CacheEncoder<'_, 'tcx>, + &mut EncodedDepNodeIndex) + > + ] = &[$(expand_if_cached!([$($modifiers)*], query_impl::$name::encode_query_results)),*]; #[allow(nonstandard_style)] mod query_callbacks { @@ -676,164 +775,15 @@ macro_rules! define_queries { } $(pub(crate) fn $name<'tcx>()-> DepKindStruct<'tcx> { - $crate::plumbing::query_callback::<queries::$name<'tcx>>( + $crate::plumbing::query_callback::<query_impl::$name::QueryType<'tcx>>( is_anon!([$($modifiers)*]), is_eval_always!([$($modifiers)*]), ) })* } - mod query_structs { - use rustc_middle::ty::TyCtxt; - use $crate::plumbing::{QueryStruct, QueryCtxt}; - use $crate::profiling_support::QueryKeyStringCache; - use rustc_query_system::query::QueryMap; - use rustc_middle::dep_graph::DepKind; - - pub(super) const fn dummy_query_struct<'tcx>() -> QueryStruct<'tcx> { - fn noop_try_collect_active_jobs(_: QueryCtxt<'_>, _: &mut QueryMap<DepKind>) -> Option<()> { - None - } - fn noop_alloc_self_profile_query_strings(_: TyCtxt<'_>, _: &mut QueryKeyStringCache) {} - - QueryStruct { - try_collect_active_jobs: noop_try_collect_active_jobs, - alloc_self_profile_query_strings: noop_alloc_self_profile_query_strings, - encode_query_results: None, - } - } - - pub(super) use dummy_query_struct as Null; - pub(super) use dummy_query_struct as Red; - pub(super) use dummy_query_struct as TraitSelect; - pub(super) use dummy_query_struct as CompileCodegenUnit; - pub(super) use dummy_query_struct as CompileMonoItem; - - $( - pub(super) const fn $name<'tcx>() -> QueryStruct<'tcx> { QueryStruct { - try_collect_active_jobs: |tcx, qmap| { - let make_query = |tcx, key| { - let kind = rustc_middle::dep_graph::DepKind::$name; - let name = stringify!($name); - $crate::plumbing::create_query_frame(tcx, rustc_middle::query::descs::$name, key, kind, name) - }; - tcx.queries.$name.try_collect_active_jobs( - tcx, - make_query, - qmap, - ) - }, - alloc_self_profile_query_strings: |tcx, string_cache| { - $crate::profiling_support::alloc_self_profile_query_strings_for_query_cache( - tcx, - stringify!($name), - &tcx.query_system.caches.$name, - string_cache, - ) - }, - encode_query_results: expand_if_cached!([$($modifiers)*], |qcx, encoder, query_result_index| - $crate::on_disk_cache::encode_query_results::<super::queries::$name<'tcx>>( - super::queries::$name::default(), - qcx, - encoder, - query_result_index, - ) - ), - }})* - } - pub fn query_callbacks<'tcx>(arena: &'tcx Arena<'tcx>) -> &'tcx [DepKindStruct<'tcx>] { arena.alloc_from_iter(make_dep_kind_array!(query_callbacks)) } } } - -use crate::{ExternProviders, OnDiskCache, Providers}; - -impl<'tcx> Queries<'tcx> { - pub fn new( - local_providers: Providers, - extern_providers: ExternProviders, - on_disk_cache: Option<OnDiskCache<'tcx>>, - ) -> Self { - use crate::query_structs; - Queries { - local_providers: Box::new(local_providers), - extern_providers: Box::new(extern_providers), - query_structs: make_dep_kind_array!(query_structs).to_vec(), - on_disk_cache, - jobs: AtomicU64::new(1), - ..Queries::default() - } - } -} - -macro_rules! define_queries_struct { - ( - input: ($(([$($modifiers:tt)*] [$($attr:tt)*] [$name:ident]))*)) => { - #[derive(Default)] - pub struct Queries<'tcx> { - local_providers: Box<Providers>, - extern_providers: Box<ExternProviders>, - query_structs: Vec<$crate::plumbing::QueryStruct<'tcx>>, - pub on_disk_cache: Option<OnDiskCache<'tcx>>, - jobs: AtomicU64, - - $( - $(#[$attr])* - $name: QueryState< - <queries::$name<'tcx> as QueryConfig<QueryCtxt<'tcx>>>::Key, - rustc_middle::dep_graph::DepKind, - >, - )* - } - - impl<'tcx> Queries<'tcx> { - pub(crate) fn try_collect_active_jobs( - &'tcx self, - tcx: TyCtxt<'tcx>, - ) -> Option<QueryMap<rustc_middle::dep_graph::DepKind>> { - let tcx = QueryCtxt { tcx, queries: self }; - let mut jobs = QueryMap::default(); - - for query in &self.query_structs { - (query.try_collect_active_jobs)(tcx, &mut jobs); - } - - Some(jobs) - } - } - - impl<'tcx> QueryEngine<'tcx> for Queries<'tcx> { - fn as_any(&'tcx self) -> &'tcx dyn std::any::Any { - let this = unsafe { std::mem::transmute::<&Queries<'_>, &Queries<'_>>(self) }; - this as _ - } - - fn try_mark_green(&'tcx self, tcx: TyCtxt<'tcx>, dep_node: &dep_graph::DepNode) -> bool { - let qcx = QueryCtxt { tcx, queries: self }; - tcx.dep_graph.try_mark_green(qcx, dep_node).is_some() - } - - $($(#[$attr])* - #[inline(always)] - #[tracing::instrument(level = "trace", skip(self, tcx))] - fn $name( - &'tcx self, - tcx: TyCtxt<'tcx>, - span: Span, - key: query_keys::$name<'tcx>, - mode: QueryMode, - ) -> Option<Erase<query_values::$name<'tcx>>> { - let qcx = QueryCtxt { tcx, queries: self }; - get_query( - queries::$name::default(), - qcx, - span, - key, - mode - ) - })* - } - }; -} diff --git a/compiler/rustc_query_impl/src/profiling_support.rs b/compiler/rustc_query_impl/src/profiling_support.rs index 4743170e9..fbc6db93e 100644 --- a/compiler/rustc_query_impl/src/profiling_support.rs +++ b/compiler/rustc_query_impl/src/profiling_support.rs @@ -1,24 +1,13 @@ -use crate::QueryCtxt; use measureme::{StringComponent, StringId}; -use rustc_data_structures::fx::FxHashMap; use rustc_data_structures::profiling::SelfProfiler; use rustc_hir::def_id::{CrateNum, DefId, DefIndex, LocalDefId, LOCAL_CRATE}; use rustc_hir::definitions::DefPathData; -use rustc_middle::ty::{TyCtxt, WithOptConstParam}; +use rustc_middle::query::plumbing::QueryKeyStringCache; +use rustc_middle::ty::TyCtxt; use rustc_query_system::query::QueryCache; use std::fmt::Debug; use std::io::Write; -pub(crate) struct QueryKeyStringCache { - def_id_cache: FxHashMap<DefId, StringId>, -} - -impl QueryKeyStringCache { - fn new() -> QueryKeyStringCache { - QueryKeyStringCache { def_id_cache: Default::default() } - } -} - struct QueryKeyStringBuilder<'p, 'tcx> { profiler: &'p SelfProfiler, tcx: TyCtxt<'tcx>, @@ -151,37 +140,6 @@ impl SpecIntoSelfProfilingString for LocalDefId { } } -impl<T: SpecIntoSelfProfilingString> SpecIntoSelfProfilingString for WithOptConstParam<T> { - fn spec_to_self_profile_string(&self, builder: &mut QueryKeyStringBuilder<'_, '_>) -> StringId { - // We print `WithOptConstParam` values as tuples to make them shorter - // and more readable, without losing information: - // - // "WithOptConstParam { did: foo::bar, const_param_did: Some(foo::baz) }" - // becomes "(foo::bar, foo::baz)" and - // "WithOptConstParam { did: foo::bar, const_param_did: None }" - // becomes "(foo::bar, _)". - - let did = StringComponent::Ref(self.did.to_self_profile_string(builder)); - - let const_param_did = if let Some(const_param_did) = self.const_param_did { - let const_param_did = builder.def_id_to_string_id(const_param_did); - StringComponent::Ref(const_param_did) - } else { - StringComponent::Value("_") - }; - - let components = [ - StringComponent::Value("("), - did, - StringComponent::Value(", "), - const_param_did, - StringComponent::Value(")"), - ]; - - builder.profiler.alloc_string(&components[..]) - } -} - impl<T0, T1> SpecIntoSelfProfilingString for (T0, T1) where T0: SpecIntoSelfProfilingString, @@ -231,7 +189,7 @@ pub(crate) fn alloc_self_profile_query_strings_for_query_cache<'tcx, C>( // locked while doing so. Instead we copy out the // `(query_key, dep_node_index)` pairs and release the lock again. let mut query_keys_and_indices = Vec::new(); - query_cache.iter(&mut |k, _, i| query_keys_and_indices.push((k.clone(), i))); + query_cache.iter(&mut |k, _, i| query_keys_and_indices.push((*k, i))); // Now actually allocate the strings. If allocating the strings // generates new entries in the query cache, we'll miss them but @@ -284,9 +242,8 @@ pub fn alloc_self_profile_query_strings(tcx: TyCtxt<'_>) { } let mut string_cache = QueryKeyStringCache::new(); - let queries = QueryCtxt::from_tcx(tcx); - for query in &queries.queries.query_structs { - (query.alloc_self_profile_query_strings)(tcx, &mut string_cache); + for alloc in super::ALLOC_SELF_PROFILE_QUERY_STRINGS.iter() { + alloc(tcx, &mut string_cache) } } |