diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-17 12:02:58 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-17 12:02:58 +0000 |
commit | 698f8c2f01ea549d77d7dc3338a12e04c11057b9 (patch) | |
tree | 173a775858bd501c378080a10dca74132f05bc50 /compiler/rustc_monomorphize | |
parent | Initial commit. (diff) | |
download | rustc-698f8c2f01ea549d77d7dc3338a12e04c11057b9.tar.xz rustc-698f8c2f01ea549d77d7dc3338a12e04c11057b9.zip |
Adding upstream version 1.64.0+dfsg1.upstream/1.64.0+dfsg1
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'compiler/rustc_monomorphize')
-rw-r--r-- | compiler/rustc_monomorphize/Cargo.toml | 18 | ||||
-rw-r--r-- | compiler/rustc_monomorphize/src/collector.rs | 1463 | ||||
-rw-r--r-- | compiler/rustc_monomorphize/src/lib.rs | 49 | ||||
-rw-r--r-- | compiler/rustc_monomorphize/src/partitioning/default.rs | 560 | ||||
-rw-r--r-- | compiler/rustc_monomorphize/src/partitioning/merging.rs | 111 | ||||
-rw-r--r-- | compiler/rustc_monomorphize/src/partitioning/mod.rs | 515 | ||||
-rw-r--r-- | compiler/rustc_monomorphize/src/polymorphize.rs | 385 | ||||
-rw-r--r-- | compiler/rustc_monomorphize/src/util.rs | 70 |
8 files changed, 3171 insertions, 0 deletions
diff --git a/compiler/rustc_monomorphize/Cargo.toml b/compiler/rustc_monomorphize/Cargo.toml new file mode 100644 index 000000000..41ba4d4b6 --- /dev/null +++ b/compiler/rustc_monomorphize/Cargo.toml @@ -0,0 +1,18 @@ +[package] +name = "rustc_monomorphize" +version = "0.0.0" +edition = "2021" + +[lib] +doctest = false + +[dependencies] +smallvec = { version = "1.8.1", features = ["union", "may_dangle"] } +tracing = "0.1" +rustc_data_structures = { path = "../rustc_data_structures" } +rustc_hir = { path = "../rustc_hir" } +rustc_index = { path = "../rustc_index" } +rustc_middle = { path = "../rustc_middle" } +rustc_session = { path = "../rustc_session" } +rustc_span = { path = "../rustc_span" } +rustc_target = { path = "../rustc_target" } diff --git a/compiler/rustc_monomorphize/src/collector.rs b/compiler/rustc_monomorphize/src/collector.rs new file mode 100644 index 000000000..68b65658c --- /dev/null +++ b/compiler/rustc_monomorphize/src/collector.rs @@ -0,0 +1,1463 @@ +//! Mono Item Collection +//! ==================== +//! +//! This module is responsible for discovering all items that will contribute +//! to code generation of the crate. The important part here is that it not only +//! needs to find syntax-level items (functions, structs, etc) but also all +//! their monomorphized instantiations. Every non-generic, non-const function +//! maps to one LLVM artifact. Every generic function can produce +//! from zero to N artifacts, depending on the sets of type arguments it +//! is instantiated with. +//! This also applies to generic items from other crates: A generic definition +//! in crate X might produce monomorphizations that are compiled into crate Y. +//! We also have to collect these here. +//! +//! The following kinds of "mono items" are handled here: +//! +//! - Functions +//! - Methods +//! - Closures +//! - Statics +//! - Drop glue +//! +//! The following things also result in LLVM artifacts, but are not collected +//! here, since we instantiate them locally on demand when needed in a given +//! codegen unit: +//! +//! - Constants +//! - VTables +//! - Object Shims +//! +//! +//! General Algorithm +//! ----------------- +//! Let's define some terms first: +//! +//! - A "mono item" is something that results in a function or global in +//! the LLVM IR of a codegen unit. Mono items do not stand on their +//! own, they can reference other mono items. For example, if function +//! `foo()` calls function `bar()` then the mono item for `foo()` +//! references the mono item for function `bar()`. In general, the +//! definition for mono item A referencing a mono item B is that +//! the LLVM artifact produced for A references the LLVM artifact produced +//! for B. +//! +//! - Mono items and the references between them form a directed graph, +//! where the mono items are the nodes and references form the edges. +//! Let's call this graph the "mono item graph". +//! +//! - The mono item graph for a program contains all mono items +//! that are needed in order to produce the complete LLVM IR of the program. +//! +//! The purpose of the algorithm implemented in this module is to build the +//! mono item graph for the current crate. It runs in two phases: +//! +//! 1. Discover the roots of the graph by traversing the HIR of the crate. +//! 2. Starting from the roots, find neighboring nodes by inspecting the MIR +//! representation of the item corresponding to a given node, until no more +//! new nodes are found. +//! +//! ### Discovering roots +//! +//! The roots of the mono item graph correspond to the public non-generic +//! syntactic items in the source code. We find them by walking the HIR of the +//! crate, and whenever we hit upon a public function, method, or static item, +//! we create a mono item consisting of the items DefId and, since we only +//! consider non-generic items, an empty type-substitution set. (In eager +//! collection mode, during incremental compilation, all non-generic functions +//! are considered as roots, as well as when the `-Clink-dead-code` option is +//! specified. Functions marked `#[no_mangle]` and functions called by inlinable +//! functions also always act as roots.) +//! +//! ### Finding neighbor nodes +//! Given a mono item node, we can discover neighbors by inspecting its +//! MIR. We walk the MIR and any time we hit upon something that signifies a +//! reference to another mono item, we have found a neighbor. Since the +//! mono item we are currently at is always monomorphic, we also know the +//! concrete type arguments of its neighbors, and so all neighbors again will be +//! monomorphic. The specific forms a reference to a neighboring node can take +//! in MIR are quite diverse. Here is an overview: +//! +//! #### Calling Functions/Methods +//! The most obvious form of one mono item referencing another is a +//! function or method call (represented by a CALL terminator in MIR). But +//! calls are not the only thing that might introduce a reference between two +//! function mono items, and as we will see below, they are just a +//! specialization of the form described next, and consequently will not get any +//! special treatment in the algorithm. +//! +//! #### Taking a reference to a function or method +//! A function does not need to actually be called in order to be a neighbor of +//! another function. It suffices to just take a reference in order to introduce +//! an edge. Consider the following example: +//! +//! ``` +//! # use core::fmt::Display; +//! fn print_val<T: Display>(x: T) { +//! println!("{}", x); +//! } +//! +//! fn call_fn(f: &dyn Fn(i32), x: i32) { +//! f(x); +//! } +//! +//! fn main() { +//! let print_i32 = print_val::<i32>; +//! call_fn(&print_i32, 0); +//! } +//! ``` +//! The MIR of none of these functions will contain an explicit call to +//! `print_val::<i32>`. Nonetheless, in order to mono this program, we need +//! an instance of this function. Thus, whenever we encounter a function or +//! method in operand position, we treat it as a neighbor of the current +//! mono item. Calls are just a special case of that. +//! +//! #### Closures +//! In a way, closures are a simple case. Since every closure object needs to be +//! constructed somewhere, we can reliably discover them by observing +//! `RValue::Aggregate` expressions with `AggregateKind::Closure`. This is also +//! true for closures inlined from other crates. +//! +//! #### Drop glue +//! Drop glue mono items are introduced by MIR drop-statements. The +//! generated mono item will again have drop-glue item neighbors if the +//! type to be dropped contains nested values that also need to be dropped. It +//! might also have a function item neighbor for the explicit `Drop::drop` +//! implementation of its type. +//! +//! #### Unsizing Casts +//! A subtle way of introducing neighbor edges is by casting to a trait object. +//! Since the resulting fat-pointer contains a reference to a vtable, we need to +//! instantiate all object-save methods of the trait, as we need to store +//! pointers to these functions even if they never get called anywhere. This can +//! be seen as a special case of taking a function reference. +//! +//! #### Boxes +//! Since `Box` expression have special compiler support, no explicit calls to +//! `exchange_malloc()` and `box_free()` may show up in MIR, even if the +//! compiler will generate them. We have to observe `Rvalue::Box` expressions +//! and Box-typed drop-statements for that purpose. +//! +//! +//! Interaction with Cross-Crate Inlining +//! ------------------------------------- +//! The binary of a crate will not only contain machine code for the items +//! defined in the source code of that crate. It will also contain monomorphic +//! instantiations of any extern generic functions and of functions marked with +//! `#[inline]`. +//! The collection algorithm handles this more or less mono. If it is +//! about to create a mono item for something with an external `DefId`, +//! it will take a look if the MIR for that item is available, and if so just +//! proceed normally. If the MIR is not available, it assumes that the item is +//! just linked to and no node is created; which is exactly what we want, since +//! no machine code should be generated in the current crate for such an item. +//! +//! Eager and Lazy Collection Mode +//! ------------------------------ +//! Mono item collection can be performed in one of two modes: +//! +//! - Lazy mode means that items will only be instantiated when actually +//! referenced. The goal is to produce the least amount of machine code +//! possible. +//! +//! - Eager mode is meant to be used in conjunction with incremental compilation +//! where a stable set of mono items is more important than a minimal +//! one. Thus, eager mode will instantiate drop-glue for every drop-able type +//! in the crate, even if no drop call for that type exists (yet). It will +//! also instantiate default implementations of trait methods, something that +//! otherwise is only done on demand. +//! +//! +//! Open Issues +//! ----------- +//! Some things are not yet fully implemented in the current version of this +//! module. +//! +//! ### Const Fns +//! Ideally, no mono item should be generated for const fns unless there +//! is a call to them that cannot be evaluated at compile time. At the moment +//! this is not implemented however: a mono item will be produced +//! regardless of whether it is actually needed or not. + +use rustc_data_structures::fx::{FxHashMap, FxHashSet}; +use rustc_data_structures::sync::{par_for_each_in, MTLock, MTRef}; +use rustc_hir as hir; +use rustc_hir::def::DefKind; +use rustc_hir::def_id::{DefId, DefIdMap, LocalDefId}; +use rustc_hir::lang_items::LangItem; +use rustc_index::bit_set::GrowableBitSet; +use rustc_middle::mir::interpret::{AllocId, ConstValue}; +use rustc_middle::mir::interpret::{ErrorHandled, GlobalAlloc, Scalar}; +use rustc_middle::mir::mono::{InstantiationMode, MonoItem}; +use rustc_middle::mir::visit::Visitor as MirVisitor; +use rustc_middle::mir::{self, Local, Location}; +use rustc_middle::ty::adjustment::{CustomCoerceUnsized, PointerCast}; +use rustc_middle::ty::print::with_no_trimmed_paths; +use rustc_middle::ty::subst::{GenericArgKind, InternalSubsts}; +use rustc_middle::ty::{ + self, GenericParamDefKind, Instance, Ty, TyCtxt, TypeFoldable, TypeVisitable, VtblEntry, +}; +use rustc_middle::{middle::codegen_fn_attrs::CodegenFnAttrFlags, mir::visit::TyContext}; +use rustc_session::config::EntryFnType; +use rustc_session::lint::builtin::LARGE_ASSIGNMENTS; +use rustc_session::Limit; +use rustc_span::source_map::{dummy_spanned, respan, Span, Spanned, DUMMY_SP}; +use rustc_target::abi::Size; +use std::iter; +use std::ops::Range; +use std::path::PathBuf; + +#[derive(PartialEq)] +pub enum MonoItemCollectionMode { + Eager, + Lazy, +} + +/// Maps every mono item to all mono items it references in its +/// body. +pub struct InliningMap<'tcx> { + // Maps a source mono item to the range of mono items + // accessed by it. + // The range selects elements within the `targets` vecs. + index: FxHashMap<MonoItem<'tcx>, Range<usize>>, + targets: Vec<MonoItem<'tcx>>, + + // Contains one bit per mono item in the `targets` field. That bit + // is true if that mono item needs to be inlined into every CGU. + inlines: GrowableBitSet<usize>, +} + +/// Struct to store mono items in each collecting and if they should +/// be inlined. We call `instantiation_mode` to get their inlining +/// status when inserting new elements, which avoids calling it in +/// `inlining_map.lock_mut()`. See the `collect_items_rec` implementation +/// below. +struct MonoItems<'tcx> { + // If this is false, we do not need to compute whether items + // will need to be inlined. + compute_inlining: bool, + + // The TyCtxt used to determine whether the a item should + // be inlined. + tcx: TyCtxt<'tcx>, + + // The collected mono items. The bool field in each element + // indicates whether this element should be inlined. + items: Vec<(Spanned<MonoItem<'tcx>>, bool /*inlined*/)>, +} + +impl<'tcx> MonoItems<'tcx> { + #[inline] + fn push(&mut self, item: Spanned<MonoItem<'tcx>>) { + self.extend([item]); + } + + #[inline] + fn extend<T: IntoIterator<Item = Spanned<MonoItem<'tcx>>>>(&mut self, iter: T) { + self.items.extend(iter.into_iter().map(|mono_item| { + let inlined = if !self.compute_inlining { + false + } else { + mono_item.node.instantiation_mode(self.tcx) == InstantiationMode::LocalCopy + }; + (mono_item, inlined) + })) + } +} + +impl<'tcx> InliningMap<'tcx> { + fn new() -> InliningMap<'tcx> { + InliningMap { + index: FxHashMap::default(), + targets: Vec::new(), + inlines: GrowableBitSet::with_capacity(1024), + } + } + + fn record_accesses<'a>( + &mut self, + source: MonoItem<'tcx>, + new_targets: &'a [(Spanned<MonoItem<'tcx>>, bool)], + ) where + 'tcx: 'a, + { + let start_index = self.targets.len(); + let new_items_count = new_targets.len(); + let new_items_count_total = new_items_count + self.targets.len(); + + self.targets.reserve(new_items_count); + self.inlines.ensure(new_items_count_total); + + for (i, (Spanned { node: mono_item, .. }, inlined)) in new_targets.into_iter().enumerate() { + self.targets.push(*mono_item); + if *inlined { + self.inlines.insert(i + start_index); + } + } + + let end_index = self.targets.len(); + assert!(self.index.insert(source, start_index..end_index).is_none()); + } + + // Internally iterate over all items referenced by `source` which will be + // made available for inlining. + pub fn with_inlining_candidates<F>(&self, source: MonoItem<'tcx>, mut f: F) + where + F: FnMut(MonoItem<'tcx>), + { + if let Some(range) = self.index.get(&source) { + for (i, candidate) in self.targets[range.clone()].iter().enumerate() { + if self.inlines.contains(range.start + i) { + f(*candidate); + } + } + } + } + + // Internally iterate over all items and the things each accesses. + pub fn iter_accesses<F>(&self, mut f: F) + where + F: FnMut(MonoItem<'tcx>, &[MonoItem<'tcx>]), + { + for (&accessor, range) in &self.index { + f(accessor, &self.targets[range.clone()]) + } + } +} + +#[instrument(skip(tcx, mode), level = "debug")] +pub fn collect_crate_mono_items( + tcx: TyCtxt<'_>, + mode: MonoItemCollectionMode, +) -> (FxHashSet<MonoItem<'_>>, InliningMap<'_>) { + let _prof_timer = tcx.prof.generic_activity("monomorphization_collector"); + + let roots = + tcx.sess.time("monomorphization_collector_root_collections", || collect_roots(tcx, mode)); + + debug!("building mono item graph, beginning at roots"); + + let mut visited = MTLock::new(FxHashSet::default()); + let mut inlining_map = MTLock::new(InliningMap::new()); + let recursion_limit = tcx.recursion_limit(); + + { + let visited: MTRef<'_, _> = &mut visited; + let inlining_map: MTRef<'_, _> = &mut inlining_map; + + tcx.sess.time("monomorphization_collector_graph_walk", || { + par_for_each_in(roots, |root| { + let mut recursion_depths = DefIdMap::default(); + collect_items_rec( + tcx, + dummy_spanned(root), + visited, + &mut recursion_depths, + recursion_limit, + inlining_map, + ); + }); + }); + } + + (visited.into_inner(), inlining_map.into_inner()) +} + +// Find all non-generic items by walking the HIR. These items serve as roots to +// start monomorphizing from. +#[instrument(skip(tcx, mode), level = "debug")] +fn collect_roots(tcx: TyCtxt<'_>, mode: MonoItemCollectionMode) -> Vec<MonoItem<'_>> { + debug!("collecting roots"); + let mut roots = MonoItems { compute_inlining: false, tcx, items: Vec::new() }; + + { + let entry_fn = tcx.entry_fn(()); + + debug!("collect_roots: entry_fn = {:?}", entry_fn); + + let mut collector = RootCollector { tcx, mode, entry_fn, output: &mut roots }; + + let crate_items = tcx.hir_crate_items(()); + + for id in crate_items.items() { + collector.process_item(id); + } + + for id in crate_items.impl_items() { + collector.process_impl_item(id); + } + + collector.push_extra_entry_roots(); + } + + // We can only codegen items that are instantiable - items all of + // whose predicates hold. Luckily, items that aren't instantiable + // can't actually be used, so we can just skip codegenning them. + roots + .items + .into_iter() + .filter_map(|(Spanned { node: mono_item, .. }, _)| { + mono_item.is_instantiable(tcx).then_some(mono_item) + }) + .collect() +} + +/// Collect all monomorphized items reachable from `starting_point`, and emit a note diagnostic if a +/// post-monorphization error is encountered during a collection step. +#[instrument(skip(tcx, visited, recursion_depths, recursion_limit, inlining_map), level = "debug")] +fn collect_items_rec<'tcx>( + tcx: TyCtxt<'tcx>, + starting_point: Spanned<MonoItem<'tcx>>, + visited: MTRef<'_, MTLock<FxHashSet<MonoItem<'tcx>>>>, + recursion_depths: &mut DefIdMap<usize>, + recursion_limit: Limit, + inlining_map: MTRef<'_, MTLock<InliningMap<'tcx>>>, +) { + if !visited.lock_mut().insert(starting_point.node) { + // We've been here already, no need to search again. + return; + } + debug!("BEGIN collect_items_rec({})", starting_point.node); + + let mut neighbors = MonoItems { compute_inlining: true, tcx, items: Vec::new() }; + let recursion_depth_reset; + + // + // Post-monomorphization errors MVP + // + // We can encounter errors while monomorphizing an item, but we don't have a good way of + // showing a complete stack of spans ultimately leading to collecting the erroneous one yet. + // (It's also currently unclear exactly which diagnostics and information would be interesting + // to report in such cases) + // + // This leads to suboptimal error reporting: a post-monomorphization error (PME) will be + // shown with just a spanned piece of code causing the error, without information on where + // it was called from. This is especially obscure if the erroneous mono item is in a + // dependency. See for example issue #85155, where, before minimization, a PME happened two + // crates downstream from libcore's stdarch, without a way to know which dependency was the + // cause. + // + // If such an error occurs in the current crate, its span will be enough to locate the + // source. If the cause is in another crate, the goal here is to quickly locate which mono + // item in the current crate is ultimately responsible for causing the error. + // + // To give at least _some_ context to the user: while collecting mono items, we check the + // error count. If it has changed, a PME occurred, and we trigger some diagnostics about the + // current step of mono items collection. + // + // FIXME: don't rely on global state, instead bubble up errors. Note: this is very hard to do. + let error_count = tcx.sess.diagnostic().err_count(); + + match starting_point.node { + MonoItem::Static(def_id) => { + let instance = Instance::mono(tcx, def_id); + + // Sanity check whether this ended up being collected accidentally + debug_assert!(should_codegen_locally(tcx, &instance)); + + let ty = instance.ty(tcx, ty::ParamEnv::reveal_all()); + visit_drop_use(tcx, ty, true, starting_point.span, &mut neighbors); + + recursion_depth_reset = None; + + if let Ok(alloc) = tcx.eval_static_initializer(def_id) { + for &id in alloc.inner().relocations().values() { + collect_miri(tcx, id, &mut neighbors); + } + } + } + MonoItem::Fn(instance) => { + // Sanity check whether this ended up being collected accidentally + debug_assert!(should_codegen_locally(tcx, &instance)); + + // Keep track of the monomorphization recursion depth + recursion_depth_reset = Some(check_recursion_limit( + tcx, + instance, + starting_point.span, + recursion_depths, + recursion_limit, + )); + check_type_length_limit(tcx, instance); + + rustc_data_structures::stack::ensure_sufficient_stack(|| { + collect_neighbours(tcx, instance, &mut neighbors); + }); + } + MonoItem::GlobalAsm(item_id) => { + recursion_depth_reset = None; + + let item = tcx.hir().item(item_id); + if let hir::ItemKind::GlobalAsm(asm) = item.kind { + for (op, op_sp) in asm.operands { + match op { + hir::InlineAsmOperand::Const { .. } => { + // Only constants which resolve to a plain integer + // are supported. Therefore the value should not + // depend on any other items. + } + hir::InlineAsmOperand::SymFn { anon_const } => { + let fn_ty = + tcx.typeck_body(anon_const.body).node_type(anon_const.hir_id); + visit_fn_use(tcx, fn_ty, false, *op_sp, &mut neighbors); + } + hir::InlineAsmOperand::SymStatic { path: _, def_id } => { + let instance = Instance::mono(tcx, *def_id); + if should_codegen_locally(tcx, &instance) { + trace!("collecting static {:?}", def_id); + neighbors.push(dummy_spanned(MonoItem::Static(*def_id))); + } + } + hir::InlineAsmOperand::In { .. } + | hir::InlineAsmOperand::Out { .. } + | hir::InlineAsmOperand::InOut { .. } + | hir::InlineAsmOperand::SplitInOut { .. } => { + span_bug!(*op_sp, "invalid operand type for global_asm!") + } + } + } + } else { + span_bug!(item.span, "Mismatch between hir::Item type and MonoItem type") + } + } + } + + // Check for PMEs and emit a diagnostic if one happened. To try to show relevant edges of the + // mono item graph. + if tcx.sess.diagnostic().err_count() > error_count + && starting_point.node.is_generic_fn() + && starting_point.node.is_user_defined() + { + let formatted_item = with_no_trimmed_paths!(starting_point.node.to_string()); + tcx.sess.span_note_without_error( + starting_point.span, + &format!("the above error was encountered while instantiating `{}`", formatted_item), + ); + } + inlining_map.lock_mut().record_accesses(starting_point.node, &neighbors.items); + + for (neighbour, _) in neighbors.items { + collect_items_rec(tcx, neighbour, visited, recursion_depths, recursion_limit, inlining_map); + } + + if let Some((def_id, depth)) = recursion_depth_reset { + recursion_depths.insert(def_id, depth); + } + + debug!("END collect_items_rec({})", starting_point.node); +} + +/// Format instance name that is already known to be too long for rustc. +/// Show only the first and last 32 characters to avoid blasting +/// the user's terminal with thousands of lines of type-name. +/// +/// If the type name is longer than before+after, it will be written to a file. +fn shrunk_instance_name<'tcx>( + tcx: TyCtxt<'tcx>, + instance: &Instance<'tcx>, + before: usize, + after: usize, +) -> (String, Option<PathBuf>) { + let s = instance.to_string(); + + // Only use the shrunk version if it's really shorter. + // This also avoids the case where before and after slices overlap. + if s.chars().nth(before + after + 1).is_some() { + // An iterator of all byte positions including the end of the string. + let positions = || s.char_indices().map(|(i, _)| i).chain(iter::once(s.len())); + + let shrunk = format!( + "{before}...{after}", + before = &s[..positions().nth(before).unwrap_or(s.len())], + after = &s[positions().rev().nth(after).unwrap_or(0)..], + ); + + let path = tcx.output_filenames(()).temp_path_ext("long-type.txt", None); + let written_to_path = std::fs::write(&path, s).ok().map(|_| path); + + (shrunk, written_to_path) + } else { + (s, None) + } +} + +fn check_recursion_limit<'tcx>( + tcx: TyCtxt<'tcx>, + instance: Instance<'tcx>, + span: Span, + recursion_depths: &mut DefIdMap<usize>, + recursion_limit: Limit, +) -> (DefId, usize) { + let def_id = instance.def_id(); + let recursion_depth = recursion_depths.get(&def_id).cloned().unwrap_or(0); + debug!(" => recursion depth={}", recursion_depth); + + let adjusted_recursion_depth = if Some(def_id) == tcx.lang_items().drop_in_place_fn() { + // HACK: drop_in_place creates tight monomorphization loops. Give + // it more margin. + recursion_depth / 4 + } else { + recursion_depth + }; + + // Code that needs to instantiate the same function recursively + // more than the recursion limit is assumed to be causing an + // infinite expansion. + if !recursion_limit.value_within_limit(adjusted_recursion_depth) { + let (shrunk, written_to_path) = shrunk_instance_name(tcx, &instance, 32, 32); + let error = format!("reached the recursion limit while instantiating `{}`", shrunk); + let mut err = tcx.sess.struct_span_fatal(span, &error); + err.span_note( + tcx.def_span(def_id), + &format!("`{}` defined here", tcx.def_path_str(def_id)), + ); + if let Some(path) = written_to_path { + err.note(&format!("the full type name has been written to '{}'", path.display())); + } + err.emit() + } + + recursion_depths.insert(def_id, recursion_depth + 1); + + (def_id, recursion_depth) +} + +fn check_type_length_limit<'tcx>(tcx: TyCtxt<'tcx>, instance: Instance<'tcx>) { + let type_length = instance + .substs + .iter() + .flat_map(|arg| arg.walk()) + .filter(|arg| match arg.unpack() { + GenericArgKind::Type(_) | GenericArgKind::Const(_) => true, + GenericArgKind::Lifetime(_) => false, + }) + .count(); + debug!(" => type length={}", type_length); + + // Rust code can easily create exponentially-long types using only a + // polynomial recursion depth. Even with the default recursion + // depth, you can easily get cases that take >2^60 steps to run, + // which means that rustc basically hangs. + // + // Bail out in these cases to avoid that bad user experience. + if !tcx.type_length_limit().value_within_limit(type_length) { + let (shrunk, written_to_path) = shrunk_instance_name(tcx, &instance, 32, 32); + let msg = format!("reached the type-length limit while instantiating `{}`", shrunk); + let mut diag = tcx.sess.struct_span_fatal(tcx.def_span(instance.def_id()), &msg); + if let Some(path) = written_to_path { + diag.note(&format!("the full type name has been written to '{}'", path.display())); + } + diag.help(&format!( + "consider adding a `#![type_length_limit=\"{}\"]` attribute to your crate", + type_length + )); + diag.emit() + } +} + +struct MirNeighborCollector<'a, 'tcx> { + tcx: TyCtxt<'tcx>, + body: &'a mir::Body<'tcx>, + output: &'a mut MonoItems<'tcx>, + instance: Instance<'tcx>, +} + +impl<'a, 'tcx> MirNeighborCollector<'a, 'tcx> { + pub fn monomorphize<T>(&self, value: T) -> T + where + T: TypeFoldable<'tcx>, + { + debug!("monomorphize: self.instance={:?}", self.instance); + self.instance.subst_mir_and_normalize_erasing_regions( + self.tcx, + ty::ParamEnv::reveal_all(), + value, + ) + } +} + +impl<'a, 'tcx> MirVisitor<'tcx> for MirNeighborCollector<'a, 'tcx> { + fn visit_rvalue(&mut self, rvalue: &mir::Rvalue<'tcx>, location: Location) { + debug!("visiting rvalue {:?}", *rvalue); + + let span = self.body.source_info(location).span; + + match *rvalue { + // When doing an cast from a regular pointer to a fat pointer, we + // have to instantiate all methods of the trait being cast to, so we + // can build the appropriate vtable. + mir::Rvalue::Cast( + mir::CastKind::Pointer(PointerCast::Unsize), + ref operand, + target_ty, + ) => { + let target_ty = self.monomorphize(target_ty); + let source_ty = operand.ty(self.body, self.tcx); + let source_ty = self.monomorphize(source_ty); + let (source_ty, target_ty) = + find_vtable_types_for_unsizing(self.tcx, source_ty, target_ty); + // This could also be a different Unsize instruction, like + // from a fixed sized array to a slice. But we are only + // interested in things that produce a vtable. + if target_ty.is_trait() && !source_ty.is_trait() { + create_mono_items_for_vtable_methods( + self.tcx, + target_ty, + source_ty, + span, + self.output, + ); + } + } + mir::Rvalue::Cast( + mir::CastKind::Pointer(PointerCast::ReifyFnPointer), + ref operand, + _, + ) => { + let fn_ty = operand.ty(self.body, self.tcx); + let fn_ty = self.monomorphize(fn_ty); + visit_fn_use(self.tcx, fn_ty, false, span, &mut self.output); + } + mir::Rvalue::Cast( + mir::CastKind::Pointer(PointerCast::ClosureFnPointer(_)), + ref operand, + _, + ) => { + let source_ty = operand.ty(self.body, self.tcx); + let source_ty = self.monomorphize(source_ty); + match *source_ty.kind() { + ty::Closure(def_id, substs) => { + let instance = Instance::resolve_closure( + self.tcx, + def_id, + substs, + ty::ClosureKind::FnOnce, + ) + .expect("failed to normalize and resolve closure during codegen"); + if should_codegen_locally(self.tcx, &instance) { + self.output.push(create_fn_mono_item(self.tcx, instance, span)); + } + } + _ => bug!(), + } + } + mir::Rvalue::ThreadLocalRef(def_id) => { + assert!(self.tcx.is_thread_local_static(def_id)); + let instance = Instance::mono(self.tcx, def_id); + if should_codegen_locally(self.tcx, &instance) { + trace!("collecting thread-local static {:?}", def_id); + self.output.push(respan(span, MonoItem::Static(def_id))); + } + } + _ => { /* not interesting */ } + } + + self.super_rvalue(rvalue, location); + } + + /// This does not walk the constant, as it has been handled entirely here and trying + /// to walk it would attempt to evaluate the `ty::Const` inside, which doesn't necessarily + /// work, as some constants cannot be represented in the type system. + #[instrument(skip(self), level = "debug")] + fn visit_constant(&mut self, constant: &mir::Constant<'tcx>, location: Location) { + let literal = self.monomorphize(constant.literal); + let val = match literal { + mir::ConstantKind::Val(val, _) => val, + mir::ConstantKind::Ty(ct) => match ct.kind() { + ty::ConstKind::Value(val) => self.tcx.valtree_to_const_val((ct.ty(), val)), + ty::ConstKind::Unevaluated(ct) => { + debug!(?ct); + let param_env = ty::ParamEnv::reveal_all(); + match self.tcx.const_eval_resolve(param_env, ct, None) { + // The `monomorphize` call should have evaluated that constant already. + Ok(val) => val, + Err(ErrorHandled::Reported(_) | ErrorHandled::Linted) => return, + Err(ErrorHandled::TooGeneric) => span_bug!( + self.body.source_info(location).span, + "collection encountered polymorphic constant: {:?}", + literal + ), + } + } + _ => return, + }, + }; + collect_const_value(self.tcx, val, self.output); + self.visit_ty(literal.ty(), TyContext::Location(location)); + } + + #[instrument(skip(self), level = "debug")] + fn visit_const(&mut self, constant: ty::Const<'tcx>, location: Location) { + debug!("visiting const {:?} @ {:?}", constant, location); + + let substituted_constant = self.monomorphize(constant); + let param_env = ty::ParamEnv::reveal_all(); + + match substituted_constant.kind() { + ty::ConstKind::Value(val) => { + let const_val = self.tcx.valtree_to_const_val((constant.ty(), val)); + collect_const_value(self.tcx, const_val, self.output) + } + ty::ConstKind::Unevaluated(unevaluated) => { + match self.tcx.const_eval_resolve(param_env, unevaluated, None) { + // The `monomorphize` call should have evaluated that constant already. + Ok(val) => span_bug!( + self.body.source_info(location).span, + "collection encountered the unevaluated constant {} which evaluated to {:?}", + substituted_constant, + val + ), + Err(ErrorHandled::Reported(_) | ErrorHandled::Linted) => {} + Err(ErrorHandled::TooGeneric) => span_bug!( + self.body.source_info(location).span, + "collection encountered polymorphic constant: {}", + substituted_constant + ), + } + } + _ => {} + } + + self.super_const(constant); + } + + fn visit_terminator(&mut self, terminator: &mir::Terminator<'tcx>, location: Location) { + debug!("visiting terminator {:?} @ {:?}", terminator, location); + let source = self.body.source_info(location).span; + + let tcx = self.tcx; + match terminator.kind { + mir::TerminatorKind::Call { ref func, .. } => { + let callee_ty = func.ty(self.body, tcx); + let callee_ty = self.monomorphize(callee_ty); + visit_fn_use(self.tcx, callee_ty, true, source, &mut self.output); + } + mir::TerminatorKind::Drop { ref place, .. } + | mir::TerminatorKind::DropAndReplace { ref place, .. } => { + let ty = place.ty(self.body, self.tcx).ty; + let ty = self.monomorphize(ty); + visit_drop_use(self.tcx, ty, true, source, self.output); + } + mir::TerminatorKind::InlineAsm { ref operands, .. } => { + for op in operands { + match *op { + mir::InlineAsmOperand::SymFn { ref value } => { + let fn_ty = self.monomorphize(value.literal.ty()); + visit_fn_use(self.tcx, fn_ty, false, source, &mut self.output); + } + mir::InlineAsmOperand::SymStatic { def_id } => { + let instance = Instance::mono(self.tcx, def_id); + if should_codegen_locally(self.tcx, &instance) { + trace!("collecting asm sym static {:?}", def_id); + self.output.push(respan(source, MonoItem::Static(def_id))); + } + } + _ => {} + } + } + } + mir::TerminatorKind::Assert { ref msg, .. } => { + let lang_item = match msg { + mir::AssertKind::BoundsCheck { .. } => LangItem::PanicBoundsCheck, + _ => LangItem::Panic, + }; + let instance = Instance::mono(tcx, tcx.require_lang_item(lang_item, Some(source))); + if should_codegen_locally(tcx, &instance) { + self.output.push(create_fn_mono_item(tcx, instance, source)); + } + } + mir::TerminatorKind::Abort { .. } => { + let instance = Instance::mono( + tcx, + tcx.require_lang_item(LangItem::PanicNoUnwind, Some(source)), + ); + if should_codegen_locally(tcx, &instance) { + self.output.push(create_fn_mono_item(tcx, instance, source)); + } + } + mir::TerminatorKind::Goto { .. } + | mir::TerminatorKind::SwitchInt { .. } + | mir::TerminatorKind::Resume + | mir::TerminatorKind::Return + | mir::TerminatorKind::Unreachable => {} + mir::TerminatorKind::GeneratorDrop + | mir::TerminatorKind::Yield { .. } + | mir::TerminatorKind::FalseEdge { .. } + | mir::TerminatorKind::FalseUnwind { .. } => bug!(), + } + + self.super_terminator(terminator, location); + } + + fn visit_operand(&mut self, operand: &mir::Operand<'tcx>, location: Location) { + self.super_operand(operand, location); + let limit = self.tcx.move_size_limit().0; + if limit == 0 { + return; + } + let limit = Size::from_bytes(limit); + let ty = operand.ty(self.body, self.tcx); + let ty = self.monomorphize(ty); + let layout = self.tcx.layout_of(ty::ParamEnv::reveal_all().and(ty)); + if let Ok(layout) = layout { + if layout.size > limit { + debug!(?layout); + let source_info = self.body.source_info(location); + debug!(?source_info); + let lint_root = source_info.scope.lint_root(&self.body.source_scopes); + debug!(?lint_root); + let Some(lint_root) = lint_root else { + // This happens when the issue is in a function from a foreign crate that + // we monomorphized in the current crate. We can't get a `HirId` for things + // in other crates. + // FIXME: Find out where to report the lint on. Maybe simply crate-level lint root + // but correct span? This would make the lint at least accept crate-level lint attributes. + return; + }; + self.tcx.struct_span_lint_hir( + LARGE_ASSIGNMENTS, + lint_root, + source_info.span, + |lint| { + let mut err = lint.build(&format!("moving {} bytes", layout.size.bytes())); + err.span_label(source_info.span, "value moved from here"); + err.note(&format!(r#"The current maximum size is {}, but it can be customized with the move_size_limit attribute: `#![move_size_limit = "..."]`"#, limit.bytes())); + err.emit(); + }, + ); + } + } + } + + fn visit_local( + &mut self, + _place_local: Local, + _context: mir::visit::PlaceContext, + _location: Location, + ) { + } +} + +fn visit_drop_use<'tcx>( + tcx: TyCtxt<'tcx>, + ty: Ty<'tcx>, + is_direct_call: bool, + source: Span, + output: &mut MonoItems<'tcx>, +) { + let instance = Instance::resolve_drop_in_place(tcx, ty); + visit_instance_use(tcx, instance, is_direct_call, source, output); +} + +fn visit_fn_use<'tcx>( + tcx: TyCtxt<'tcx>, + ty: Ty<'tcx>, + is_direct_call: bool, + source: Span, + output: &mut MonoItems<'tcx>, +) { + if let ty::FnDef(def_id, substs) = *ty.kind() { + let instance = if is_direct_call { + ty::Instance::resolve(tcx, ty::ParamEnv::reveal_all(), def_id, substs).unwrap().unwrap() + } else { + ty::Instance::resolve_for_fn_ptr(tcx, ty::ParamEnv::reveal_all(), def_id, substs) + .unwrap() + }; + visit_instance_use(tcx, instance, is_direct_call, source, output); + } +} + +fn visit_instance_use<'tcx>( + tcx: TyCtxt<'tcx>, + instance: ty::Instance<'tcx>, + is_direct_call: bool, + source: Span, + output: &mut MonoItems<'tcx>, +) { + debug!("visit_item_use({:?}, is_direct_call={:?})", instance, is_direct_call); + if !should_codegen_locally(tcx, &instance) { + return; + } + + match instance.def { + ty::InstanceDef::Virtual(..) | ty::InstanceDef::Intrinsic(_) => { + if !is_direct_call { + bug!("{:?} being reified", instance); + } + } + ty::InstanceDef::DropGlue(_, None) => { + // Don't need to emit noop drop glue if we are calling directly. + if !is_direct_call { + output.push(create_fn_mono_item(tcx, instance, source)); + } + } + ty::InstanceDef::DropGlue(_, Some(_)) + | ty::InstanceDef::VTableShim(..) + | ty::InstanceDef::ReifyShim(..) + | ty::InstanceDef::ClosureOnceShim { .. } + | ty::InstanceDef::Item(..) + | ty::InstanceDef::FnPtrShim(..) + | ty::InstanceDef::CloneShim(..) => { + output.push(create_fn_mono_item(tcx, instance, source)); + } + } +} + +/// Returns `true` if we should codegen an instance in the local crate, or returns `false` if we +/// can just link to the upstream crate and therefore don't need a mono item. +fn should_codegen_locally<'tcx>(tcx: TyCtxt<'tcx>, instance: &Instance<'tcx>) -> bool { + let Some(def_id) = instance.def.def_id_if_not_guaranteed_local_codegen() else { + return true; + }; + + if tcx.is_foreign_item(def_id) { + // Foreign items are always linked against, there's no way of instantiating them. + return false; + } + + if def_id.is_local() { + // Local items cannot be referred to locally without monomorphizing them locally. + return true; + } + + if tcx.is_reachable_non_generic(def_id) + || instance.polymorphize(tcx).upstream_monomorphization(tcx).is_some() + { + // We can link to the item in question, no instance needed in this crate. + return false; + } + + if !tcx.is_mir_available(def_id) { + bug!("no MIR available for {:?}", def_id); + } + + true +} + +/// For a given pair of source and target type that occur in an unsizing coercion, +/// this function finds the pair of types that determines the vtable linking +/// them. +/// +/// For example, the source type might be `&SomeStruct` and the target type +/// might be `&SomeTrait` in a cast like: +/// +/// let src: &SomeStruct = ...; +/// let target = src as &SomeTrait; +/// +/// Then the output of this function would be (SomeStruct, SomeTrait) since for +/// constructing the `target` fat-pointer we need the vtable for that pair. +/// +/// Things can get more complicated though because there's also the case where +/// the unsized type occurs as a field: +/// +/// ```rust +/// struct ComplexStruct<T: ?Sized> { +/// a: u32, +/// b: f64, +/// c: T +/// } +/// ``` +/// +/// In this case, if `T` is sized, `&ComplexStruct<T>` is a thin pointer. If `T` +/// is unsized, `&SomeStruct` is a fat pointer, and the vtable it points to is +/// for the pair of `T` (which is a trait) and the concrete type that `T` was +/// originally coerced from: +/// +/// let src: &ComplexStruct<SomeStruct> = ...; +/// let target = src as &ComplexStruct<SomeTrait>; +/// +/// Again, we want this `find_vtable_types_for_unsizing()` to provide the pair +/// `(SomeStruct, SomeTrait)`. +/// +/// Finally, there is also the case of custom unsizing coercions, e.g., for +/// smart pointers such as `Rc` and `Arc`. +fn find_vtable_types_for_unsizing<'tcx>( + tcx: TyCtxt<'tcx>, + source_ty: Ty<'tcx>, + target_ty: Ty<'tcx>, +) -> (Ty<'tcx>, Ty<'tcx>) { + let ptr_vtable = |inner_source: Ty<'tcx>, inner_target: Ty<'tcx>| { + let param_env = ty::ParamEnv::reveal_all(); + let type_has_metadata = |ty: Ty<'tcx>| -> bool { + if ty.is_sized(tcx.at(DUMMY_SP), param_env) { + return false; + } + let tail = tcx.struct_tail_erasing_lifetimes(ty, param_env); + match tail.kind() { + ty::Foreign(..) => false, + ty::Str | ty::Slice(..) | ty::Dynamic(..) => true, + _ => bug!("unexpected unsized tail: {:?}", tail), + } + }; + if type_has_metadata(inner_source) { + (inner_source, inner_target) + } else { + tcx.struct_lockstep_tails_erasing_lifetimes(inner_source, inner_target, param_env) + } + }; + + match (&source_ty.kind(), &target_ty.kind()) { + (&ty::Ref(_, a, _), &ty::Ref(_, b, _) | &ty::RawPtr(ty::TypeAndMut { ty: b, .. })) + | (&ty::RawPtr(ty::TypeAndMut { ty: a, .. }), &ty::RawPtr(ty::TypeAndMut { ty: b, .. })) => { + ptr_vtable(*a, *b) + } + (&ty::Adt(def_a, _), &ty::Adt(def_b, _)) if def_a.is_box() && def_b.is_box() => { + ptr_vtable(source_ty.boxed_ty(), target_ty.boxed_ty()) + } + + (&ty::Adt(source_adt_def, source_substs), &ty::Adt(target_adt_def, target_substs)) => { + assert_eq!(source_adt_def, target_adt_def); + + let CustomCoerceUnsized::Struct(coerce_index) = + crate::custom_coerce_unsize_info(tcx, source_ty, target_ty); + + let source_fields = &source_adt_def.non_enum_variant().fields; + let target_fields = &target_adt_def.non_enum_variant().fields; + + assert!( + coerce_index < source_fields.len() && source_fields.len() == target_fields.len() + ); + + find_vtable_types_for_unsizing( + tcx, + source_fields[coerce_index].ty(tcx, source_substs), + target_fields[coerce_index].ty(tcx, target_substs), + ) + } + _ => bug!( + "find_vtable_types_for_unsizing: invalid coercion {:?} -> {:?}", + source_ty, + target_ty + ), + } +} + +#[instrument(skip(tcx), level = "debug")] +fn create_fn_mono_item<'tcx>( + tcx: TyCtxt<'tcx>, + instance: Instance<'tcx>, + source: Span, +) -> Spanned<MonoItem<'tcx>> { + debug!("create_fn_mono_item(instance={})", instance); + + let def_id = instance.def_id(); + if tcx.sess.opts.unstable_opts.profile_closures && def_id.is_local() && tcx.is_closure(def_id) { + crate::util::dump_closure_profile(tcx, instance); + } + + let respanned = respan(source, MonoItem::Fn(instance.polymorphize(tcx))); + debug!(?respanned); + + respanned +} + +/// Creates a `MonoItem` for each method that is referenced by the vtable for +/// the given trait/impl pair. +fn create_mono_items_for_vtable_methods<'tcx>( + tcx: TyCtxt<'tcx>, + trait_ty: Ty<'tcx>, + impl_ty: Ty<'tcx>, + source: Span, + output: &mut MonoItems<'tcx>, +) { + assert!(!trait_ty.has_escaping_bound_vars() && !impl_ty.has_escaping_bound_vars()); + + if let ty::Dynamic(ref trait_ty, ..) = trait_ty.kind() { + if let Some(principal) = trait_ty.principal() { + let poly_trait_ref = principal.with_self_ty(tcx, impl_ty); + assert!(!poly_trait_ref.has_escaping_bound_vars()); + + // Walk all methods of the trait, including those of its supertraits + let entries = tcx.vtable_entries(poly_trait_ref); + let methods = entries + .iter() + .filter_map(|entry| match entry { + VtblEntry::MetadataDropInPlace + | VtblEntry::MetadataSize + | VtblEntry::MetadataAlign + | VtblEntry::Vacant => None, + VtblEntry::TraitVPtr(_) => { + // all super trait items already covered, so skip them. + None + } + VtblEntry::Method(instance) => { + Some(*instance).filter(|instance| should_codegen_locally(tcx, instance)) + } + }) + .map(|item| create_fn_mono_item(tcx, item, source)); + output.extend(methods); + } + + // Also add the destructor. + visit_drop_use(tcx, impl_ty, false, source, output); + } +} + +//=----------------------------------------------------------------------------- +// Root Collection +//=----------------------------------------------------------------------------- + +struct RootCollector<'a, 'tcx> { + tcx: TyCtxt<'tcx>, + mode: MonoItemCollectionMode, + output: &'a mut MonoItems<'tcx>, + entry_fn: Option<(DefId, EntryFnType)>, +} + +impl<'v> RootCollector<'_, 'v> { + fn process_item(&mut self, id: hir::ItemId) { + match self.tcx.def_kind(id.def_id) { + DefKind::Enum | DefKind::Struct | DefKind::Union => { + let item = self.tcx.hir().item(id); + match item.kind { + hir::ItemKind::Enum(_, ref generics) + | hir::ItemKind::Struct(_, ref generics) + | hir::ItemKind::Union(_, ref generics) => { + if generics.params.is_empty() { + if self.mode == MonoItemCollectionMode::Eager { + debug!( + "RootCollector: ADT drop-glue for {}", + self.tcx.def_path_str(item.def_id.to_def_id()) + ); + + let ty = + Instance::new(item.def_id.to_def_id(), InternalSubsts::empty()) + .ty(self.tcx, ty::ParamEnv::reveal_all()); + visit_drop_use(self.tcx, ty, true, DUMMY_SP, self.output); + } + } + } + _ => bug!(), + } + } + DefKind::GlobalAsm => { + debug!( + "RootCollector: ItemKind::GlobalAsm({})", + self.tcx.def_path_str(id.def_id.to_def_id()) + ); + self.output.push(dummy_spanned(MonoItem::GlobalAsm(id))); + } + DefKind::Static(..) => { + debug!( + "RootCollector: ItemKind::Static({})", + self.tcx.def_path_str(id.def_id.to_def_id()) + ); + self.output.push(dummy_spanned(MonoItem::Static(id.def_id.to_def_id()))); + } + DefKind::Const => { + // const items only generate mono items if they are + // actually used somewhere. Just declaring them is insufficient. + + // but even just declaring them must collect the items they refer to + if let Ok(val) = self.tcx.const_eval_poly(id.def_id.to_def_id()) { + collect_const_value(self.tcx, val, &mut self.output); + } + } + DefKind::Impl => { + if self.mode == MonoItemCollectionMode::Eager { + let item = self.tcx.hir().item(id); + create_mono_items_for_default_impls(self.tcx, item, self.output); + } + } + DefKind::Fn => { + self.push_if_root(id.def_id); + } + _ => {} + } + } + + fn process_impl_item(&mut self, id: hir::ImplItemId) { + if matches!(self.tcx.def_kind(id.def_id), DefKind::AssocFn) { + self.push_if_root(id.def_id); + } + } + + fn is_root(&self, def_id: LocalDefId) -> bool { + !item_requires_monomorphization(self.tcx, def_id) + && match self.mode { + MonoItemCollectionMode::Eager => true, + MonoItemCollectionMode::Lazy => { + self.entry_fn.and_then(|(id, _)| id.as_local()) == Some(def_id) + || self.tcx.is_reachable_non_generic(def_id) + || self + .tcx + .codegen_fn_attrs(def_id) + .flags + .contains(CodegenFnAttrFlags::RUSTC_STD_INTERNAL_SYMBOL) + } + } + } + + /// If `def_id` represents a root, pushes it onto the list of + /// outputs. (Note that all roots must be monomorphic.) + #[instrument(skip(self), level = "debug")] + fn push_if_root(&mut self, def_id: LocalDefId) { + if self.is_root(def_id) { + debug!("RootCollector::push_if_root: found root def_id={:?}", def_id); + + let instance = Instance::mono(self.tcx, def_id.to_def_id()); + self.output.push(create_fn_mono_item(self.tcx, instance, DUMMY_SP)); + } + } + + /// As a special case, when/if we encounter the + /// `main()` function, we also have to generate a + /// monomorphized copy of the start lang item based on + /// the return type of `main`. This is not needed when + /// the user writes their own `start` manually. + fn push_extra_entry_roots(&mut self) { + let Some((main_def_id, EntryFnType::Main)) = self.entry_fn else { + return; + }; + + let start_def_id = match self.tcx.lang_items().require(LangItem::Start) { + Ok(s) => s, + Err(err) => self.tcx.sess.fatal(&err), + }; + let main_ret_ty = self.tcx.fn_sig(main_def_id).output(); + + // Given that `main()` has no arguments, + // then its return type cannot have + // late-bound regions, since late-bound + // regions must appear in the argument + // listing. + let main_ret_ty = self.tcx.normalize_erasing_regions( + ty::ParamEnv::reveal_all(), + main_ret_ty.no_bound_vars().unwrap(), + ); + + let start_instance = Instance::resolve( + self.tcx, + ty::ParamEnv::reveal_all(), + start_def_id, + self.tcx.intern_substs(&[main_ret_ty.into()]), + ) + .unwrap() + .unwrap(); + + self.output.push(create_fn_mono_item(self.tcx, start_instance, DUMMY_SP)); + } +} + +fn item_requires_monomorphization(tcx: TyCtxt<'_>, def_id: LocalDefId) -> bool { + let generics = tcx.generics_of(def_id); + generics.requires_monomorphization(tcx) +} + +fn create_mono_items_for_default_impls<'tcx>( + tcx: TyCtxt<'tcx>, + item: &'tcx hir::Item<'tcx>, + output: &mut MonoItems<'tcx>, +) { + match item.kind { + hir::ItemKind::Impl(ref impl_) => { + for param in impl_.generics.params { + match param.kind { + hir::GenericParamKind::Lifetime { .. } => {} + hir::GenericParamKind::Type { .. } | hir::GenericParamKind::Const { .. } => { + return; + } + } + } + + debug!( + "create_mono_items_for_default_impls(item={})", + tcx.def_path_str(item.def_id.to_def_id()) + ); + + if let Some(trait_ref) = tcx.impl_trait_ref(item.def_id) { + let param_env = ty::ParamEnv::reveal_all(); + let trait_ref = tcx.normalize_erasing_regions(param_env, trait_ref); + let overridden_methods = tcx.impl_item_implementor_ids(item.def_id); + for method in tcx.provided_trait_methods(trait_ref.def_id) { + if overridden_methods.contains_key(&method.def_id) { + continue; + } + + if tcx.generics_of(method.def_id).own_requires_monomorphization() { + continue; + } + + let substs = + InternalSubsts::for_item(tcx, method.def_id, |param, _| match param.kind { + GenericParamDefKind::Lifetime => tcx.lifetimes.re_erased.into(), + GenericParamDefKind::Type { .. } + | GenericParamDefKind::Const { .. } => { + trait_ref.substs[param.index as usize] + } + }); + let instance = ty::Instance::resolve(tcx, param_env, method.def_id, substs) + .unwrap() + .unwrap(); + + let mono_item = create_fn_mono_item(tcx, instance, DUMMY_SP); + if mono_item.node.is_instantiable(tcx) && should_codegen_locally(tcx, &instance) + { + output.push(mono_item); + } + } + } + } + _ => bug!(), + } +} + +/// Scans the miri alloc in order to find function calls, closures, and drop-glue. +fn collect_miri<'tcx>(tcx: TyCtxt<'tcx>, alloc_id: AllocId, output: &mut MonoItems<'tcx>) { + match tcx.global_alloc(alloc_id) { + GlobalAlloc::Static(def_id) => { + assert!(!tcx.is_thread_local_static(def_id)); + let instance = Instance::mono(tcx, def_id); + if should_codegen_locally(tcx, &instance) { + trace!("collecting static {:?}", def_id); + output.push(dummy_spanned(MonoItem::Static(def_id))); + } + } + GlobalAlloc::Memory(alloc) => { + trace!("collecting {:?} with {:#?}", alloc_id, alloc); + for &inner in alloc.inner().relocations().values() { + rustc_data_structures::stack::ensure_sufficient_stack(|| { + collect_miri(tcx, inner, output); + }); + } + } + GlobalAlloc::Function(fn_instance) => { + if should_codegen_locally(tcx, &fn_instance) { + trace!("collecting {:?} with {:#?}", alloc_id, fn_instance); + output.push(create_fn_mono_item(tcx, fn_instance, DUMMY_SP)); + } + } + GlobalAlloc::VTable(ty, trait_ref) => { + let alloc_id = tcx.vtable_allocation((ty, trait_ref)); + collect_miri(tcx, alloc_id, output) + } + } +} + +/// Scans the MIR in order to find function calls, closures, and drop-glue. +#[instrument(skip(tcx, output), level = "debug")] +fn collect_neighbours<'tcx>( + tcx: TyCtxt<'tcx>, + instance: Instance<'tcx>, + output: &mut MonoItems<'tcx>, +) { + let body = tcx.instance_mir(instance.def); + MirNeighborCollector { tcx, body: &body, output, instance }.visit_body(&body); +} + +#[instrument(skip(tcx, output), level = "debug")] +fn collect_const_value<'tcx>( + tcx: TyCtxt<'tcx>, + value: ConstValue<'tcx>, + output: &mut MonoItems<'tcx>, +) { + match value { + ConstValue::Scalar(Scalar::Ptr(ptr, _size)) => collect_miri(tcx, ptr.provenance, output), + ConstValue::Slice { data: alloc, start: _, end: _ } | ConstValue::ByRef { alloc, .. } => { + for &id in alloc.inner().relocations().values() { + collect_miri(tcx, id, output); + } + } + _ => {} + } +} diff --git a/compiler/rustc_monomorphize/src/lib.rs b/compiler/rustc_monomorphize/src/lib.rs new file mode 100644 index 000000000..ef4560b5e --- /dev/null +++ b/compiler/rustc_monomorphize/src/lib.rs @@ -0,0 +1,49 @@ +#![feature(array_windows)] +#![feature(control_flow_enum)] +#![feature(let_else)] +#![recursion_limit = "256"] +#![allow(rustc::potential_query_instability)] + +#[macro_use] +extern crate tracing; +#[macro_use] +extern crate rustc_middle; + +use rustc_hir::lang_items::LangItem; +use rustc_middle::traits; +use rustc_middle::ty::adjustment::CustomCoerceUnsized; +use rustc_middle::ty::query::Providers; +use rustc_middle::ty::{self, Ty, TyCtxt}; + +mod collector; +mod partitioning; +mod polymorphize; +mod util; + +fn custom_coerce_unsize_info<'tcx>( + tcx: TyCtxt<'tcx>, + source_ty: Ty<'tcx>, + target_ty: Ty<'tcx>, +) -> CustomCoerceUnsized { + let def_id = tcx.require_lang_item(LangItem::CoerceUnsized, None); + + let trait_ref = ty::Binder::dummy(ty::TraitRef { + def_id, + substs: tcx.mk_substs_trait(source_ty, &[target_ty.into()]), + }); + + match tcx.codegen_fulfill_obligation((ty::ParamEnv::reveal_all(), trait_ref)) { + Ok(traits::ImplSource::UserDefined(traits::ImplSourceUserDefinedData { + impl_def_id, + .. + })) => tcx.coerce_unsized_info(impl_def_id).custom_kind.unwrap(), + impl_source => { + bug!("invalid `CoerceUnsized` impl_source: {:?}", impl_source); + } + } +} + +pub fn provide(providers: &mut Providers) { + partitioning::provide(providers); + polymorphize::provide(providers); +} diff --git a/compiler/rustc_monomorphize/src/partitioning/default.rs b/compiler/rustc_monomorphize/src/partitioning/default.rs new file mode 100644 index 000000000..15276569c --- /dev/null +++ b/compiler/rustc_monomorphize/src/partitioning/default.rs @@ -0,0 +1,560 @@ +use std::collections::hash_map::Entry; + +use rustc_data_structures::fx::{FxHashMap, FxHashSet}; +use rustc_hir::def::DefKind; +use rustc_hir::def_id::{DefId, LOCAL_CRATE}; +use rustc_hir::definitions::DefPathDataName; +use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags; +use rustc_middle::middle::exported_symbols::{SymbolExportInfo, SymbolExportLevel}; +use rustc_middle::mir::mono::{CodegenUnit, CodegenUnitNameBuilder, Linkage, Visibility}; +use rustc_middle::mir::mono::{InstantiationMode, MonoItem}; +use rustc_middle::ty::print::characteristic_def_id_of_type; +use rustc_middle::ty::{self, visit::TypeVisitable, DefIdTree, InstanceDef, TyCtxt}; +use rustc_span::symbol::Symbol; + +use super::PartitioningCx; +use crate::collector::InliningMap; +use crate::partitioning::merging; +use crate::partitioning::{ + MonoItemPlacement, Partitioner, PostInliningPartitioning, PreInliningPartitioning, +}; + +pub struct DefaultPartitioning; + +impl<'tcx> Partitioner<'tcx> for DefaultPartitioning { + fn place_root_mono_items( + &mut self, + cx: &PartitioningCx<'_, 'tcx>, + mono_items: &mut dyn Iterator<Item = MonoItem<'tcx>>, + ) -> PreInliningPartitioning<'tcx> { + let mut roots = FxHashSet::default(); + let mut codegen_units = FxHashMap::default(); + let is_incremental_build = cx.tcx.sess.opts.incremental.is_some(); + let mut internalization_candidates = FxHashSet::default(); + + // Determine if monomorphizations instantiated in this crate will be made + // available to downstream crates. This depends on whether we are in + // share-generics mode and whether the current crate can even have + // downstream crates. + let export_generics = + cx.tcx.sess.opts.share_generics() && cx.tcx.local_crate_exports_generics(); + + let cgu_name_builder = &mut CodegenUnitNameBuilder::new(cx.tcx); + let cgu_name_cache = &mut FxHashMap::default(); + + for mono_item in mono_items { + match mono_item.instantiation_mode(cx.tcx) { + InstantiationMode::GloballyShared { .. } => {} + InstantiationMode::LocalCopy => continue, + } + + let characteristic_def_id = characteristic_def_id_of_mono_item(cx.tcx, mono_item); + let is_volatile = is_incremental_build && mono_item.is_generic_fn(); + + let codegen_unit_name = match characteristic_def_id { + Some(def_id) => compute_codegen_unit_name( + cx.tcx, + cgu_name_builder, + def_id, + is_volatile, + cgu_name_cache, + ), + None => fallback_cgu_name(cgu_name_builder), + }; + + let codegen_unit = codegen_units + .entry(codegen_unit_name) + .or_insert_with(|| CodegenUnit::new(codegen_unit_name)); + + let mut can_be_internalized = true; + let (linkage, visibility) = mono_item_linkage_and_visibility( + cx.tcx, + &mono_item, + &mut can_be_internalized, + export_generics, + ); + if visibility == Visibility::Hidden && can_be_internalized { + internalization_candidates.insert(mono_item); + } + + codegen_unit.items_mut().insert(mono_item, (linkage, visibility)); + roots.insert(mono_item); + } + + // Always ensure we have at least one CGU; otherwise, if we have a + // crate with just types (for example), we could wind up with no CGU. + if codegen_units.is_empty() { + let codegen_unit_name = fallback_cgu_name(cgu_name_builder); + codegen_units.insert(codegen_unit_name, CodegenUnit::new(codegen_unit_name)); + } + + PreInliningPartitioning { + codegen_units: codegen_units + .into_iter() + .map(|(_, codegen_unit)| codegen_unit) + .collect(), + roots, + internalization_candidates, + } + } + + fn merge_codegen_units( + &mut self, + cx: &PartitioningCx<'_, 'tcx>, + initial_partitioning: &mut PreInliningPartitioning<'tcx>, + ) { + merging::merge_codegen_units(cx, initial_partitioning); + } + + fn place_inlined_mono_items( + &mut self, + cx: &PartitioningCx<'_, 'tcx>, + initial_partitioning: PreInliningPartitioning<'tcx>, + ) -> PostInliningPartitioning<'tcx> { + let mut new_partitioning = Vec::new(); + let mut mono_item_placements = FxHashMap::default(); + + let PreInliningPartitioning { + codegen_units: initial_cgus, + roots, + internalization_candidates, + } = initial_partitioning; + + let single_codegen_unit = initial_cgus.len() == 1; + + for old_codegen_unit in initial_cgus { + // Collect all items that need to be available in this codegen unit. + let mut reachable = FxHashSet::default(); + for root in old_codegen_unit.items().keys() { + follow_inlining(*root, cx.inlining_map, &mut reachable); + } + + let mut new_codegen_unit = CodegenUnit::new(old_codegen_unit.name()); + + // Add all monomorphizations that are not already there. + for mono_item in reachable { + if let Some(linkage) = old_codegen_unit.items().get(&mono_item) { + // This is a root, just copy it over. + new_codegen_unit.items_mut().insert(mono_item, *linkage); + } else { + if roots.contains(&mono_item) { + bug!( + "GloballyShared mono-item inlined into other CGU: \ + {:?}", + mono_item + ); + } + + // This is a CGU-private copy. + new_codegen_unit + .items_mut() + .insert(mono_item, (Linkage::Internal, Visibility::Default)); + } + + if !single_codegen_unit { + // If there is more than one codegen unit, we need to keep track + // in which codegen units each monomorphization is placed. + match mono_item_placements.entry(mono_item) { + Entry::Occupied(e) => { + let placement = e.into_mut(); + debug_assert!(match *placement { + MonoItemPlacement::SingleCgu { cgu_name } => { + cgu_name != new_codegen_unit.name() + } + MonoItemPlacement::MultipleCgus => true, + }); + *placement = MonoItemPlacement::MultipleCgus; + } + Entry::Vacant(e) => { + e.insert(MonoItemPlacement::SingleCgu { + cgu_name: new_codegen_unit.name(), + }); + } + } + } + } + + new_partitioning.push(new_codegen_unit); + } + + return PostInliningPartitioning { + codegen_units: new_partitioning, + mono_item_placements, + internalization_candidates, + }; + + fn follow_inlining<'tcx>( + mono_item: MonoItem<'tcx>, + inlining_map: &InliningMap<'tcx>, + visited: &mut FxHashSet<MonoItem<'tcx>>, + ) { + if !visited.insert(mono_item) { + return; + } + + inlining_map.with_inlining_candidates(mono_item, |target| { + follow_inlining(target, inlining_map, visited); + }); + } + } + + fn internalize_symbols( + &mut self, + cx: &PartitioningCx<'_, 'tcx>, + partitioning: &mut PostInliningPartitioning<'tcx>, + ) { + if partitioning.codegen_units.len() == 1 { + // Fast path for when there is only one codegen unit. In this case we + // can internalize all candidates, since there is nowhere else they + // could be accessed from. + for cgu in &mut partitioning.codegen_units { + for candidate in &partitioning.internalization_candidates { + cgu.items_mut().insert(*candidate, (Linkage::Internal, Visibility::Default)); + } + } + + return; + } + + // Build a map from every monomorphization to all the monomorphizations that + // reference it. + let mut accessor_map: FxHashMap<MonoItem<'tcx>, Vec<MonoItem<'tcx>>> = Default::default(); + cx.inlining_map.iter_accesses(|accessor, accessees| { + for accessee in accessees { + accessor_map.entry(*accessee).or_default().push(accessor); + } + }); + + let mono_item_placements = &partitioning.mono_item_placements; + + // For each internalization candidates in each codegen unit, check if it is + // accessed from outside its defining codegen unit. + for cgu in &mut partitioning.codegen_units { + let home_cgu = MonoItemPlacement::SingleCgu { cgu_name: cgu.name() }; + + for (accessee, linkage_and_visibility) in cgu.items_mut() { + if !partitioning.internalization_candidates.contains(accessee) { + // This item is no candidate for internalizing, so skip it. + continue; + } + debug_assert_eq!(mono_item_placements[accessee], home_cgu); + + if let Some(accessors) = accessor_map.get(accessee) { + if accessors + .iter() + .filter_map(|accessor| { + // Some accessors might not have been + // instantiated. We can safely ignore those. + mono_item_placements.get(accessor) + }) + .any(|placement| *placement != home_cgu) + { + // Found an accessor from another CGU, so skip to the next + // item without marking this one as internal. + continue; + } + } + + // If we got here, we did not find any accesses from other CGUs, + // so it's fine to make this monomorphization internal. + *linkage_and_visibility = (Linkage::Internal, Visibility::Default); + } + } + } +} + +fn characteristic_def_id_of_mono_item<'tcx>( + tcx: TyCtxt<'tcx>, + mono_item: MonoItem<'tcx>, +) -> Option<DefId> { + match mono_item { + MonoItem::Fn(instance) => { + let def_id = match instance.def { + ty::InstanceDef::Item(def) => def.did, + ty::InstanceDef::VTableShim(..) + | ty::InstanceDef::ReifyShim(..) + | ty::InstanceDef::FnPtrShim(..) + | ty::InstanceDef::ClosureOnceShim { .. } + | ty::InstanceDef::Intrinsic(..) + | ty::InstanceDef::DropGlue(..) + | ty::InstanceDef::Virtual(..) + | ty::InstanceDef::CloneShim(..) => return None, + }; + + // If this is a method, we want to put it into the same module as + // its self-type. If the self-type does not provide a characteristic + // DefId, we use the location of the impl after all. + + if tcx.trait_of_item(def_id).is_some() { + let self_ty = instance.substs.type_at(0); + // This is a default implementation of a trait method. + return characteristic_def_id_of_type(self_ty).or(Some(def_id)); + } + + if let Some(impl_def_id) = tcx.impl_of_method(def_id) { + if tcx.sess.opts.incremental.is_some() + && tcx.trait_id_of_impl(impl_def_id) == tcx.lang_items().drop_trait() + { + // Put `Drop::drop` into the same cgu as `drop_in_place` + // since `drop_in_place` is the only thing that can + // call it. + return None; + } + + // When polymorphization is enabled, methods which do not depend on their generic + // parameters, but the self-type of their impl block do will fail to normalize. + if !tcx.sess.opts.unstable_opts.polymorphize || !instance.needs_subst() { + // This is a method within an impl, find out what the self-type is: + let impl_self_ty = tcx.subst_and_normalize_erasing_regions( + instance.substs, + ty::ParamEnv::reveal_all(), + tcx.type_of(impl_def_id), + ); + if let Some(def_id) = characteristic_def_id_of_type(impl_self_ty) { + return Some(def_id); + } + } + } + + Some(def_id) + } + MonoItem::Static(def_id) => Some(def_id), + MonoItem::GlobalAsm(item_id) => Some(item_id.def_id.to_def_id()), + } +} + +fn compute_codegen_unit_name( + tcx: TyCtxt<'_>, + name_builder: &mut CodegenUnitNameBuilder<'_>, + def_id: DefId, + volatile: bool, + cache: &mut CguNameCache, +) -> Symbol { + // Find the innermost module that is not nested within a function. + let mut current_def_id = def_id; + let mut cgu_def_id = None; + // Walk backwards from the item we want to find the module for. + loop { + if current_def_id.is_crate_root() { + if cgu_def_id.is_none() { + // If we have not found a module yet, take the crate root. + cgu_def_id = Some(def_id.krate.as_def_id()); + } + break; + } else if tcx.def_kind(current_def_id) == DefKind::Mod { + if cgu_def_id.is_none() { + cgu_def_id = Some(current_def_id); + } + } else { + // If we encounter something that is not a module, throw away + // any module that we've found so far because we now know that + // it is nested within something else. + cgu_def_id = None; + } + + current_def_id = tcx.parent(current_def_id); + } + + let cgu_def_id = cgu_def_id.unwrap(); + + *cache.entry((cgu_def_id, volatile)).or_insert_with(|| { + let def_path = tcx.def_path(cgu_def_id); + + let components = def_path.data.iter().map(|part| match part.data.name() { + DefPathDataName::Named(name) => name, + DefPathDataName::Anon { .. } => unreachable!(), + }); + + let volatile_suffix = volatile.then_some("volatile"); + + name_builder.build_cgu_name(def_path.krate, components, volatile_suffix) + }) +} + +// Anything we can't find a proper codegen unit for goes into this. +fn fallback_cgu_name(name_builder: &mut CodegenUnitNameBuilder<'_>) -> Symbol { + name_builder.build_cgu_name(LOCAL_CRATE, &["fallback"], Some("cgu")) +} + +fn mono_item_linkage_and_visibility<'tcx>( + tcx: TyCtxt<'tcx>, + mono_item: &MonoItem<'tcx>, + can_be_internalized: &mut bool, + export_generics: bool, +) -> (Linkage, Visibility) { + if let Some(explicit_linkage) = mono_item.explicit_linkage(tcx) { + return (explicit_linkage, Visibility::Default); + } + let vis = mono_item_visibility(tcx, mono_item, can_be_internalized, export_generics); + (Linkage::External, vis) +} + +type CguNameCache = FxHashMap<(DefId, bool), Symbol>; + +fn mono_item_visibility<'tcx>( + tcx: TyCtxt<'tcx>, + mono_item: &MonoItem<'tcx>, + can_be_internalized: &mut bool, + export_generics: bool, +) -> Visibility { + let instance = match mono_item { + // This is pretty complicated; see below. + MonoItem::Fn(instance) => instance, + + // Misc handling for generics and such, but otherwise: + MonoItem::Static(def_id) => { + return if tcx.is_reachable_non_generic(*def_id) { + *can_be_internalized = false; + default_visibility(tcx, *def_id, false) + } else { + Visibility::Hidden + }; + } + MonoItem::GlobalAsm(item_id) => { + return if tcx.is_reachable_non_generic(item_id.def_id) { + *can_be_internalized = false; + default_visibility(tcx, item_id.def_id.to_def_id(), false) + } else { + Visibility::Hidden + }; + } + }; + + let def_id = match instance.def { + InstanceDef::Item(def) => def.did, + InstanceDef::DropGlue(def_id, Some(_)) => def_id, + + // These are all compiler glue and such, never exported, always hidden. + InstanceDef::VTableShim(..) + | InstanceDef::ReifyShim(..) + | InstanceDef::FnPtrShim(..) + | InstanceDef::Virtual(..) + | InstanceDef::Intrinsic(..) + | InstanceDef::ClosureOnceShim { .. } + | InstanceDef::DropGlue(..) + | InstanceDef::CloneShim(..) => return Visibility::Hidden, + }; + + // The `start_fn` lang item is actually a monomorphized instance of a + // function in the standard library, used for the `main` function. We don't + // want to export it so we tag it with `Hidden` visibility but this symbol + // is only referenced from the actual `main` symbol which we unfortunately + // don't know anything about during partitioning/collection. As a result we + // forcibly keep this symbol out of the `internalization_candidates` set. + // + // FIXME: eventually we don't want to always force this symbol to have + // hidden visibility, it should indeed be a candidate for + // internalization, but we have to understand that it's referenced + // from the `main` symbol we'll generate later. + // + // This may be fixable with a new `InstanceDef` perhaps? Unsure! + if tcx.lang_items().start_fn() == Some(def_id) { + *can_be_internalized = false; + return Visibility::Hidden; + } + + let is_generic = instance.substs.non_erasable_generics().next().is_some(); + + // Upstream `DefId` instances get different handling than local ones. + let Some(def_id) = def_id.as_local() else { + return if export_generics && is_generic { + // If it is an upstream monomorphization and we export generics, we must make + // it available to downstream crates. + *can_be_internalized = false; + default_visibility(tcx, def_id, true) + } else { + Visibility::Hidden + }; + }; + + if is_generic { + if export_generics { + if tcx.is_unreachable_local_definition(def_id) { + // This instance cannot be used from another crate. + Visibility::Hidden + } else { + // This instance might be useful in a downstream crate. + *can_be_internalized = false; + default_visibility(tcx, def_id.to_def_id(), true) + } + } else { + // We are not exporting generics or the definition is not reachable + // for downstream crates, we can internalize its instantiations. + Visibility::Hidden + } + } else { + // If this isn't a generic function then we mark this a `Default` if + // this is a reachable item, meaning that it's a symbol other crates may + // access when they link to us. + if tcx.is_reachable_non_generic(def_id.to_def_id()) { + *can_be_internalized = false; + debug_assert!(!is_generic); + return default_visibility(tcx, def_id.to_def_id(), false); + } + + // If this isn't reachable then we're gonna tag this with `Hidden` + // visibility. In some situations though we'll want to prevent this + // symbol from being internalized. + // + // There's two categories of items here: + // + // * First is weak lang items. These are basically mechanisms for + // libcore to forward-reference symbols defined later in crates like + // the standard library or `#[panic_handler]` definitions. The + // definition of these weak lang items needs to be referencable by + // libcore, so we're no longer a candidate for internalization. + // Removal of these functions can't be done by LLVM but rather must be + // done by the linker as it's a non-local decision. + // + // * Second is "std internal symbols". Currently this is primarily used + // for allocator symbols. Allocators are a little weird in their + // implementation, but the idea is that the compiler, at the last + // minute, defines an allocator with an injected object file. The + // `alloc` crate references these symbols (`__rust_alloc`) and the + // definition doesn't get hooked up until a linked crate artifact is + // generated. + // + // The symbols synthesized by the compiler (`__rust_alloc`) are thin + // veneers around the actual implementation, some other symbol which + // implements the same ABI. These symbols (things like `__rg_alloc`, + // `__rdl_alloc`, `__rde_alloc`, etc), are all tagged with "std + // internal symbols". + // + // The std-internal symbols here **should not show up in a dll as an + // exported interface**, so they return `false` from + // `is_reachable_non_generic` above and we'll give them `Hidden` + // visibility below. Like the weak lang items, though, we can't let + // LLVM internalize them as this decision is left up to the linker to + // omit them, so prevent them from being internalized. + let attrs = tcx.codegen_fn_attrs(def_id); + if attrs.flags.contains(CodegenFnAttrFlags::RUSTC_STD_INTERNAL_SYMBOL) { + *can_be_internalized = false; + } + + Visibility::Hidden + } +} + +fn default_visibility(tcx: TyCtxt<'_>, id: DefId, is_generic: bool) -> Visibility { + if !tcx.sess.target.default_hidden_visibility { + return Visibility::Default; + } + + // Generic functions never have export-level C. + if is_generic { + return Visibility::Hidden; + } + + // Things with export level C don't get instantiated in + // downstream crates. + if !id.is_local() { + return Visibility::Hidden; + } + + // C-export level items remain at `Default`, all other internal + // items become `Hidden`. + match tcx.reachable_non_generics(id.krate).get(&id) { + Some(SymbolExportInfo { level: SymbolExportLevel::C, .. }) => Visibility::Default, + _ => Visibility::Hidden, + } +} diff --git a/compiler/rustc_monomorphize/src/partitioning/merging.rs b/compiler/rustc_monomorphize/src/partitioning/merging.rs new file mode 100644 index 000000000..02bb8dea0 --- /dev/null +++ b/compiler/rustc_monomorphize/src/partitioning/merging.rs @@ -0,0 +1,111 @@ +use std::cmp; + +use rustc_data_structures::fx::FxHashMap; +use rustc_hir::def_id::LOCAL_CRATE; +use rustc_middle::mir::mono::{CodegenUnit, CodegenUnitNameBuilder}; +use rustc_span::symbol::Symbol; + +use super::PartitioningCx; +use crate::partitioning::PreInliningPartitioning; + +pub fn merge_codegen_units<'tcx>( + cx: &PartitioningCx<'_, 'tcx>, + initial_partitioning: &mut PreInliningPartitioning<'tcx>, +) { + assert!(cx.target_cgu_count >= 1); + let codegen_units = &mut initial_partitioning.codegen_units; + + // Note that at this point in time the `codegen_units` here may not be in a + // deterministic order (but we know they're deterministically the same set). + // We want this merging to produce a deterministic ordering of codegen units + // from the input. + // + // Due to basically how we've implemented the merging below (merge the two + // smallest into each other) we're sure to start off with a deterministic + // order (sorted by name). This'll mean that if two cgus have the same size + // the stable sort below will keep everything nice and deterministic. + codegen_units.sort_by(|a, b| a.name().as_str().partial_cmp(b.name().as_str()).unwrap()); + + // This map keeps track of what got merged into what. + let mut cgu_contents: FxHashMap<Symbol, Vec<Symbol>> = + codegen_units.iter().map(|cgu| (cgu.name(), vec![cgu.name()])).collect(); + + // Merge the two smallest codegen units until the target size is reached. + while codegen_units.len() > cx.target_cgu_count { + // Sort small cgus to the back + codegen_units.sort_by_cached_key(|cgu| cmp::Reverse(cgu.size_estimate())); + let mut smallest = codegen_units.pop().unwrap(); + let second_smallest = codegen_units.last_mut().unwrap(); + + // Move the mono-items from `smallest` to `second_smallest` + second_smallest.modify_size_estimate(smallest.size_estimate()); + for (k, v) in smallest.items_mut().drain() { + second_smallest.items_mut().insert(k, v); + } + + // Record that `second_smallest` now contains all the stuff that was in + // `smallest` before. + let mut consumed_cgu_names = cgu_contents.remove(&smallest.name()).unwrap(); + cgu_contents.get_mut(&second_smallest.name()).unwrap().append(&mut consumed_cgu_names); + + debug!( + "CodegenUnit {} merged into CodegenUnit {}", + smallest.name(), + second_smallest.name() + ); + } + + let cgu_name_builder = &mut CodegenUnitNameBuilder::new(cx.tcx); + + if cx.tcx.sess.opts.incremental.is_some() { + // If we are doing incremental compilation, we want CGU names to + // reflect the path of the source level module they correspond to. + // For CGUs that contain the code of multiple modules because of the + // merging done above, we use a concatenation of the names of + // all contained CGUs. + let new_cgu_names: FxHashMap<Symbol, String> = cgu_contents + .into_iter() + // This `filter` makes sure we only update the name of CGUs that + // were actually modified by merging. + .filter(|(_, cgu_contents)| cgu_contents.len() > 1) + .map(|(current_cgu_name, cgu_contents)| { + let mut cgu_contents: Vec<&str> = cgu_contents.iter().map(|s| s.as_str()).collect(); + + // Sort the names, so things are deterministic and easy to + // predict. + + // We are sorting primitive &strs here so we can use unstable sort + cgu_contents.sort_unstable(); + + (current_cgu_name, cgu_contents.join("--")) + }) + .collect(); + + for cgu in codegen_units.iter_mut() { + if let Some(new_cgu_name) = new_cgu_names.get(&cgu.name()) { + if cx.tcx.sess.opts.unstable_opts.human_readable_cgu_names { + cgu.set_name(Symbol::intern(&new_cgu_name)); + } else { + // If we don't require CGU names to be human-readable, we + // use a fixed length hash of the composite CGU name + // instead. + let new_cgu_name = CodegenUnit::mangle_name(&new_cgu_name); + cgu.set_name(Symbol::intern(&new_cgu_name)); + } + } + } + } else { + // If we are compiling non-incrementally we just generate simple CGU + // names containing an index. + for (index, cgu) in codegen_units.iter_mut().enumerate() { + cgu.set_name(numbered_codegen_unit_name(cgu_name_builder, index)); + } + } +} + +fn numbered_codegen_unit_name( + name_builder: &mut CodegenUnitNameBuilder<'_>, + index: usize, +) -> Symbol { + name_builder.build_cgu_name_no_mangle(LOCAL_CRATE, &["cgu"], Some(index)) +} diff --git a/compiler/rustc_monomorphize/src/partitioning/mod.rs b/compiler/rustc_monomorphize/src/partitioning/mod.rs new file mode 100644 index 000000000..ff2d38693 --- /dev/null +++ b/compiler/rustc_monomorphize/src/partitioning/mod.rs @@ -0,0 +1,515 @@ +//! Partitioning Codegen Units for Incremental Compilation +//! ====================================================== +//! +//! The task of this module is to take the complete set of monomorphizations of +//! a crate and produce a set of codegen units from it, where a codegen unit +//! is a named set of (mono-item, linkage) pairs. That is, this module +//! decides which monomorphization appears in which codegen units with which +//! linkage. The following paragraphs describe some of the background on the +//! partitioning scheme. +//! +//! The most important opportunity for saving on compilation time with +//! incremental compilation is to avoid re-codegenning and re-optimizing code. +//! Since the unit of codegen and optimization for LLVM is "modules" or, how +//! we call them "codegen units", the particulars of how much time can be saved +//! by incremental compilation are tightly linked to how the output program is +//! partitioned into these codegen units prior to passing it to LLVM -- +//! especially because we have to treat codegen units as opaque entities once +//! they are created: There is no way for us to incrementally update an existing +//! LLVM module and so we have to build any such module from scratch if it was +//! affected by some change in the source code. +//! +//! From that point of view it would make sense to maximize the number of +//! codegen units by, for example, putting each function into its own module. +//! That way only those modules would have to be re-compiled that were actually +//! affected by some change, minimizing the number of functions that could have +//! been re-used but just happened to be located in a module that is +//! re-compiled. +//! +//! However, since LLVM optimization does not work across module boundaries, +//! using such a highly granular partitioning would lead to very slow runtime +//! code since it would effectively prohibit inlining and other inter-procedure +//! optimizations. We want to avoid that as much as possible. +//! +//! Thus we end up with a trade-off: The bigger the codegen units, the better +//! LLVM's optimizer can do its work, but also the smaller the compilation time +//! reduction we get from incremental compilation. +//! +//! Ideally, we would create a partitioning such that there are few big codegen +//! units with few interdependencies between them. For now though, we use the +//! following heuristic to determine the partitioning: +//! +//! - There are two codegen units for every source-level module: +//! - One for "stable", that is non-generic, code +//! - One for more "volatile" code, i.e., monomorphized instances of functions +//! defined in that module +//! +//! In order to see why this heuristic makes sense, let's take a look at when a +//! codegen unit can get invalidated: +//! +//! 1. The most straightforward case is when the BODY of a function or global +//! changes. Then any codegen unit containing the code for that item has to be +//! re-compiled. Note that this includes all codegen units where the function +//! has been inlined. +//! +//! 2. The next case is when the SIGNATURE of a function or global changes. In +//! this case, all codegen units containing a REFERENCE to that item have to be +//! re-compiled. This is a superset of case 1. +//! +//! 3. The final and most subtle case is when a REFERENCE to a generic function +//! is added or removed somewhere. Even though the definition of the function +//! might be unchanged, a new REFERENCE might introduce a new monomorphized +//! instance of this function which has to be placed and compiled somewhere. +//! Conversely, when removing a REFERENCE, it might have been the last one with +//! that particular set of generic arguments and thus we have to remove it. +//! +//! From the above we see that just using one codegen unit per source-level +//! module is not such a good idea, since just adding a REFERENCE to some +//! generic item somewhere else would invalidate everything within the module +//! containing the generic item. The heuristic above reduces this detrimental +//! side-effect of references a little by at least not touching the non-generic +//! code of the module. +//! +//! A Note on Inlining +//! ------------------ +//! As briefly mentioned above, in order for LLVM to be able to inline a +//! function call, the body of the function has to be available in the LLVM +//! module where the call is made. This has a few consequences for partitioning: +//! +//! - The partitioning algorithm has to take care of placing functions into all +//! codegen units where they should be available for inlining. It also has to +//! decide on the correct linkage for these functions. +//! +//! - The partitioning algorithm has to know which functions are likely to get +//! inlined, so it can distribute function instantiations accordingly. Since +//! there is no way of knowing for sure which functions LLVM will decide to +//! inline in the end, we apply a heuristic here: Only functions marked with +//! `#[inline]` are considered for inlining by the partitioner. The current +//! implementation will not try to determine if a function is likely to be +//! inlined by looking at the functions definition. +//! +//! Note though that as a side-effect of creating a codegen units per +//! source-level module, functions from the same module will be available for +//! inlining, even when they are not marked `#[inline]`. + +mod default; +mod merging; + +use rustc_data_structures::fx::{FxHashMap, FxHashSet}; +use rustc_data_structures::sync; +use rustc_hir::def_id::DefIdSet; +use rustc_middle::mir; +use rustc_middle::mir::mono::MonoItem; +use rustc_middle::mir::mono::{CodegenUnit, Linkage}; +use rustc_middle::ty::print::with_no_trimmed_paths; +use rustc_middle::ty::query::Providers; +use rustc_middle::ty::TyCtxt; +use rustc_span::symbol::Symbol; + +use crate::collector::InliningMap; +use crate::collector::{self, MonoItemCollectionMode}; + +pub struct PartitioningCx<'a, 'tcx> { + tcx: TyCtxt<'tcx>, + target_cgu_count: usize, + inlining_map: &'a InliningMap<'tcx>, +} + +trait Partitioner<'tcx> { + fn place_root_mono_items( + &mut self, + cx: &PartitioningCx<'_, 'tcx>, + mono_items: &mut dyn Iterator<Item = MonoItem<'tcx>>, + ) -> PreInliningPartitioning<'tcx>; + + fn merge_codegen_units( + &mut self, + cx: &PartitioningCx<'_, 'tcx>, + initial_partitioning: &mut PreInliningPartitioning<'tcx>, + ); + + fn place_inlined_mono_items( + &mut self, + cx: &PartitioningCx<'_, 'tcx>, + initial_partitioning: PreInliningPartitioning<'tcx>, + ) -> PostInliningPartitioning<'tcx>; + + fn internalize_symbols( + &mut self, + cx: &PartitioningCx<'_, 'tcx>, + partitioning: &mut PostInliningPartitioning<'tcx>, + ); +} + +fn get_partitioner<'tcx>(tcx: TyCtxt<'tcx>) -> Box<dyn Partitioner<'tcx>> { + let strategy = match &tcx.sess.opts.unstable_opts.cgu_partitioning_strategy { + None => "default", + Some(s) => &s[..], + }; + + match strategy { + "default" => Box::new(default::DefaultPartitioning), + _ => tcx.sess.fatal("unknown partitioning strategy"), + } +} + +pub fn partition<'tcx>( + tcx: TyCtxt<'tcx>, + mono_items: &mut dyn Iterator<Item = MonoItem<'tcx>>, + max_cgu_count: usize, + inlining_map: &InliningMap<'tcx>, +) -> Vec<CodegenUnit<'tcx>> { + let _prof_timer = tcx.prof.generic_activity("cgu_partitioning"); + + let mut partitioner = get_partitioner(tcx); + let cx = &PartitioningCx { tcx, target_cgu_count: max_cgu_count, inlining_map }; + // In the first step, we place all regular monomorphizations into their + // respective 'home' codegen unit. Regular monomorphizations are all + // functions and statics defined in the local crate. + let mut initial_partitioning = { + let _prof_timer = tcx.prof.generic_activity("cgu_partitioning_place_roots"); + partitioner.place_root_mono_items(cx, mono_items) + }; + + initial_partitioning.codegen_units.iter_mut().for_each(|cgu| cgu.estimate_size(tcx)); + + debug_dump(tcx, "INITIAL PARTITIONING:", initial_partitioning.codegen_units.iter()); + + // Merge until we have at most `max_cgu_count` codegen units. + { + let _prof_timer = tcx.prof.generic_activity("cgu_partitioning_merge_cgus"); + partitioner.merge_codegen_units(cx, &mut initial_partitioning); + debug_dump(tcx, "POST MERGING:", initial_partitioning.codegen_units.iter()); + } + + // In the next step, we use the inlining map to determine which additional + // monomorphizations have to go into each codegen unit. These additional + // monomorphizations can be drop-glue, functions from external crates, and + // local functions the definition of which is marked with `#[inline]`. + let mut post_inlining = { + let _prof_timer = tcx.prof.generic_activity("cgu_partitioning_place_inline_items"); + partitioner.place_inlined_mono_items(cx, initial_partitioning) + }; + + post_inlining.codegen_units.iter_mut().for_each(|cgu| cgu.estimate_size(tcx)); + + debug_dump(tcx, "POST INLINING:", post_inlining.codegen_units.iter()); + + // Next we try to make as many symbols "internal" as possible, so LLVM has + // more freedom to optimize. + if !tcx.sess.link_dead_code() { + let _prof_timer = tcx.prof.generic_activity("cgu_partitioning_internalize_symbols"); + partitioner.internalize_symbols(cx, &mut post_inlining); + } + + let instrument_dead_code = + tcx.sess.instrument_coverage() && !tcx.sess.instrument_coverage_except_unused_functions(); + + if instrument_dead_code { + assert!( + post_inlining.codegen_units.len() > 0, + "There must be at least one CGU that code coverage data can be generated in." + ); + + // Find the smallest CGU that has exported symbols and put the dead + // function stubs in that CGU. We look for exported symbols to increase + // the likelihood the linker won't throw away the dead functions. + // FIXME(#92165): In order to truly resolve this, we need to make sure + // the object file (CGU) containing the dead function stubs is included + // in the final binary. This will probably require forcing these + // function symbols to be included via `-u` or `/include` linker args. + let mut cgus: Vec<_> = post_inlining.codegen_units.iter_mut().collect(); + cgus.sort_by_key(|cgu| cgu.size_estimate()); + + let dead_code_cgu = + if let Some(cgu) = cgus.into_iter().rev().find(|cgu| { + cgu.items().iter().any(|(_, (linkage, _))| *linkage == Linkage::External) + }) { + cgu + } else { + // If there are no CGUs that have externally linked items, + // then we just pick the first CGU as a fallback. + &mut post_inlining.codegen_units[0] + }; + dead_code_cgu.make_code_coverage_dead_code_cgu(); + } + + // Finally, sort by codegen unit name, so that we get deterministic results. + let PostInliningPartitioning { + codegen_units: mut result, + mono_item_placements: _, + internalization_candidates: _, + } = post_inlining; + + result.sort_by(|a, b| a.name().as_str().partial_cmp(b.name().as_str()).unwrap()); + + result +} + +pub struct PreInliningPartitioning<'tcx> { + codegen_units: Vec<CodegenUnit<'tcx>>, + roots: FxHashSet<MonoItem<'tcx>>, + internalization_candidates: FxHashSet<MonoItem<'tcx>>, +} + +/// For symbol internalization, we need to know whether a symbol/mono-item is +/// accessed from outside the codegen unit it is defined in. This type is used +/// to keep track of that. +#[derive(Clone, PartialEq, Eq, Debug)] +enum MonoItemPlacement { + SingleCgu { cgu_name: Symbol }, + MultipleCgus, +} + +struct PostInliningPartitioning<'tcx> { + codegen_units: Vec<CodegenUnit<'tcx>>, + mono_item_placements: FxHashMap<MonoItem<'tcx>, MonoItemPlacement>, + internalization_candidates: FxHashSet<MonoItem<'tcx>>, +} + +fn debug_dump<'a, 'tcx, I>(tcx: TyCtxt<'tcx>, label: &str, cgus: I) +where + I: Iterator<Item = &'a CodegenUnit<'tcx>>, + 'tcx: 'a, +{ + let dump = move || { + use std::fmt::Write; + + let s = &mut String::new(); + let _ = writeln!(s, "{}", label); + for cgu in cgus { + let _ = + writeln!(s, "CodegenUnit {} estimated size {} :", cgu.name(), cgu.size_estimate()); + + for (mono_item, linkage) in cgu.items() { + let symbol_name = mono_item.symbol_name(tcx).name; + let symbol_hash_start = symbol_name.rfind('h'); + let symbol_hash = symbol_hash_start.map_or("<no hash>", |i| &symbol_name[i..]); + + let _ = writeln!( + s, + " - {} [{:?}] [{}] estimated size {}", + mono_item, + linkage, + symbol_hash, + mono_item.size_estimate(tcx) + ); + } + + let _ = writeln!(s, ""); + } + + std::mem::take(s) + }; + + debug!("{}", dump()); +} + +#[inline(never)] // give this a place in the profiler +fn assert_symbols_are_distinct<'a, 'tcx, I>(tcx: TyCtxt<'tcx>, mono_items: I) +where + I: Iterator<Item = &'a MonoItem<'tcx>>, + 'tcx: 'a, +{ + let _prof_timer = tcx.prof.generic_activity("assert_symbols_are_distinct"); + + let mut symbols: Vec<_> = + mono_items.map(|mono_item| (mono_item, mono_item.symbol_name(tcx))).collect(); + + symbols.sort_by_key(|sym| sym.1); + + for &[(mono_item1, ref sym1), (mono_item2, ref sym2)] in symbols.array_windows() { + if sym1 == sym2 { + let span1 = mono_item1.local_span(tcx); + let span2 = mono_item2.local_span(tcx); + + // Deterministically select one of the spans for error reporting + let span = match (span1, span2) { + (Some(span1), Some(span2)) => { + Some(if span1.lo().0 > span2.lo().0 { span1 } else { span2 }) + } + (span1, span2) => span1.or(span2), + }; + + let error_message = format!("symbol `{}` is already defined", sym1); + + if let Some(span) = span { + tcx.sess.span_fatal(span, &error_message) + } else { + tcx.sess.fatal(&error_message) + } + } + } +} + +fn collect_and_partition_mono_items<'tcx>( + tcx: TyCtxt<'tcx>, + (): (), +) -> (&'tcx DefIdSet, &'tcx [CodegenUnit<'tcx>]) { + let collection_mode = match tcx.sess.opts.unstable_opts.print_mono_items { + Some(ref s) => { + let mode_string = s.to_lowercase(); + let mode_string = mode_string.trim(); + if mode_string == "eager" { + MonoItemCollectionMode::Eager + } else { + if mode_string != "lazy" { + let message = format!( + "Unknown codegen-item collection mode '{}'. \ + Falling back to 'lazy' mode.", + mode_string + ); + tcx.sess.warn(&message); + } + + MonoItemCollectionMode::Lazy + } + } + None => { + if tcx.sess.link_dead_code() { + MonoItemCollectionMode::Eager + } else { + MonoItemCollectionMode::Lazy + } + } + }; + + let (items, inlining_map) = collector::collect_crate_mono_items(tcx, collection_mode); + + tcx.sess.abort_if_errors(); + + let (codegen_units, _) = tcx.sess.time("partition_and_assert_distinct_symbols", || { + sync::join( + || { + let mut codegen_units = partition( + tcx, + &mut items.iter().cloned(), + tcx.sess.codegen_units(), + &inlining_map, + ); + codegen_units[0].make_primary(); + &*tcx.arena.alloc_from_iter(codegen_units) + }, + || assert_symbols_are_distinct(tcx, items.iter()), + ) + }); + + if tcx.prof.enabled() { + // Record CGU size estimates for self-profiling. + for cgu in codegen_units { + tcx.prof.artifact_size( + "codegen_unit_size_estimate", + cgu.name().as_str(), + cgu.size_estimate() as u64, + ); + } + } + + let mono_items: DefIdSet = items + .iter() + .filter_map(|mono_item| match *mono_item { + MonoItem::Fn(ref instance) => Some(instance.def_id()), + MonoItem::Static(def_id) => Some(def_id), + _ => None, + }) + .collect(); + + if tcx.sess.opts.unstable_opts.print_mono_items.is_some() { + let mut item_to_cgus: FxHashMap<_, Vec<_>> = Default::default(); + + for cgu in codegen_units { + for (&mono_item, &linkage) in cgu.items() { + item_to_cgus.entry(mono_item).or_default().push((cgu.name(), linkage)); + } + } + + let mut item_keys: Vec<_> = items + .iter() + .map(|i| { + let mut output = with_no_trimmed_paths!(i.to_string()); + output.push_str(" @@"); + let mut empty = Vec::new(); + let cgus = item_to_cgus.get_mut(i).unwrap_or(&mut empty); + cgus.sort_by_key(|(name, _)| *name); + cgus.dedup(); + for &(ref cgu_name, (linkage, _)) in cgus.iter() { + output.push(' '); + output.push_str(cgu_name.as_str()); + + let linkage_abbrev = match linkage { + Linkage::External => "External", + Linkage::AvailableExternally => "Available", + Linkage::LinkOnceAny => "OnceAny", + Linkage::LinkOnceODR => "OnceODR", + Linkage::WeakAny => "WeakAny", + Linkage::WeakODR => "WeakODR", + Linkage::Appending => "Appending", + Linkage::Internal => "Internal", + Linkage::Private => "Private", + Linkage::ExternalWeak => "ExternalWeak", + Linkage::Common => "Common", + }; + + output.push('['); + output.push_str(linkage_abbrev); + output.push(']'); + } + output + }) + .collect(); + + item_keys.sort(); + + for item in item_keys { + println!("MONO_ITEM {}", item); + } + } + + (tcx.arena.alloc(mono_items), codegen_units) +} + +fn codegened_and_inlined_items<'tcx>(tcx: TyCtxt<'tcx>, (): ()) -> &'tcx DefIdSet { + let (items, cgus) = tcx.collect_and_partition_mono_items(()); + let mut visited = DefIdSet::default(); + let mut result = items.clone(); + + for cgu in cgus { + for (item, _) in cgu.items() { + if let MonoItem::Fn(ref instance) = item { + let did = instance.def_id(); + if !visited.insert(did) { + continue; + } + let body = tcx.instance_mir(instance.def); + for block in body.basic_blocks() { + for statement in &block.statements { + let mir::StatementKind::Coverage(_) = statement.kind else { continue }; + let scope = statement.source_info.scope; + if let Some(inlined) = scope.inlined_instance(&body.source_scopes) { + result.insert(inlined.def_id()); + } + } + } + } + } + } + + tcx.arena.alloc(result) +} + +pub fn provide(providers: &mut Providers) { + providers.collect_and_partition_mono_items = collect_and_partition_mono_items; + providers.codegened_and_inlined_items = codegened_and_inlined_items; + + providers.is_codegened_item = |tcx, def_id| { + let (all_mono_items, _) = tcx.collect_and_partition_mono_items(()); + all_mono_items.contains(&def_id) + }; + + providers.codegen_unit = |tcx, name| { + let (_, all) = tcx.collect_and_partition_mono_items(()); + all.iter() + .find(|cgu| cgu.name() == name) + .unwrap_or_else(|| panic!("failed to find cgu with name {:?}", name)) + }; +} diff --git a/compiler/rustc_monomorphize/src/polymorphize.rs b/compiler/rustc_monomorphize/src/polymorphize.rs new file mode 100644 index 000000000..394843e51 --- /dev/null +++ b/compiler/rustc_monomorphize/src/polymorphize.rs @@ -0,0 +1,385 @@ +//! Polymorphization Analysis +//! ========================= +//! +//! This module implements an analysis of functions, methods and closures to determine which +//! generic parameters are unused (and eventually, in what ways generic parameters are used - only +//! for their size, offset of a field, etc.). + +use rustc_hir::{def::DefKind, def_id::DefId, ConstContext}; +use rustc_index::bit_set::FiniteBitSet; +use rustc_middle::mir::{ + visit::{TyContext, Visitor}, + Local, LocalDecl, Location, +}; +use rustc_middle::ty::{ + self, + query::Providers, + subst::SubstsRef, + visit::{TypeSuperVisitable, TypeVisitable, TypeVisitor}, + Const, Ty, TyCtxt, +}; +use rustc_span::symbol::sym; +use std::convert::TryInto; +use std::ops::ControlFlow; + +/// Provide implementations of queries relating to polymorphization analysis. +pub fn provide(providers: &mut Providers) { + providers.unused_generic_params = unused_generic_params; +} + +/// Determine which generic parameters are used by the instance. +/// +/// Returns a bitset where bits representing unused parameters are set (`is_empty` indicates all +/// parameters are used). +#[instrument(level = "debug", skip(tcx))] +fn unused_generic_params<'tcx>( + tcx: TyCtxt<'tcx>, + instance: ty::InstanceDef<'tcx>, +) -> FiniteBitSet<u32> { + if !tcx.sess.opts.unstable_opts.polymorphize { + // If polymorphization disabled, then all parameters are used. + return FiniteBitSet::new_empty(); + } + + let def_id = instance.def_id(); + // Exit early if this instance should not be polymorphized. + if !should_polymorphize(tcx, def_id, instance) { + return FiniteBitSet::new_empty(); + } + + let generics = tcx.generics_of(def_id); + debug!(?generics); + + // Exit early when there are no parameters to be unused. + if generics.count() == 0 { + return FiniteBitSet::new_empty(); + } + + // Create a bitset with N rightmost ones for each parameter. + let generics_count: u32 = + generics.count().try_into().expect("more generic parameters than can fit into a `u32`"); + let mut unused_parameters = FiniteBitSet::<u32>::new_empty(); + unused_parameters.set_range(0..generics_count); + debug!(?unused_parameters, "(start)"); + + mark_used_by_default_parameters(tcx, def_id, generics, &mut unused_parameters); + debug!(?unused_parameters, "(after default)"); + + // Visit MIR and accumulate used generic parameters. + let body = match tcx.hir().body_const_context(def_id.expect_local()) { + // Const functions are actually called and should thus be considered for polymorphization + // via their runtime MIR. + Some(ConstContext::ConstFn) | None => tcx.optimized_mir(def_id), + Some(_) => tcx.mir_for_ctfe(def_id), + }; + let mut vis = MarkUsedGenericParams { tcx, def_id, unused_parameters: &mut unused_parameters }; + vis.visit_body(body); + debug!(?unused_parameters, "(end)"); + + // Emit errors for debugging and testing if enabled. + if !unused_parameters.is_empty() { + emit_unused_generic_params_error(tcx, def_id, generics, &unused_parameters); + } + + unused_parameters +} + +/// Returns `true` if the instance should be polymorphized. +fn should_polymorphize<'tcx>( + tcx: TyCtxt<'tcx>, + def_id: DefId, + instance: ty::InstanceDef<'tcx>, +) -> bool { + // If an instance's MIR body is not polymorphic then the modified substitutions that are + // derived from polymorphization's result won't make any difference. + if !instance.has_polymorphic_mir_body() { + return false; + } + + // Don't polymorphize intrinsics or virtual calls - calling `instance_mir` will panic. + if matches!(instance, ty::InstanceDef::Intrinsic(..) | ty::InstanceDef::Virtual(..)) { + return false; + } + + // Polymorphization results are stored in cross-crate metadata only when there are unused + // parameters, so assume that non-local items must have only used parameters (else this query + // would not be invoked, and the cross-crate metadata used instead). + if !def_id.is_local() { + return false; + } + + // Foreign items have no bodies to analyze. + if tcx.is_foreign_item(def_id) { + return false; + } + + // Make sure there is MIR available. + match tcx.hir().body_const_context(def_id.expect_local()) { + Some(ConstContext::ConstFn) | None if !tcx.is_mir_available(def_id) => { + debug!("no mir available"); + return false; + } + Some(_) if !tcx.is_ctfe_mir_available(def_id) => { + debug!("no ctfe mir available"); + return false; + } + _ => true, + } +} + +/// Some parameters are considered used-by-default, such as non-generic parameters and the dummy +/// generic parameters from closures, this function marks them as used. `leaf_is_closure` should +/// be `true` if the item that `unused_generic_params` was invoked on is a closure. +#[instrument(level = "debug", skip(tcx, def_id, generics, unused_parameters))] +fn mark_used_by_default_parameters<'tcx>( + tcx: TyCtxt<'tcx>, + def_id: DefId, + generics: &'tcx ty::Generics, + unused_parameters: &mut FiniteBitSet<u32>, +) { + match tcx.def_kind(def_id) { + DefKind::Closure | DefKind::Generator => { + for param in &generics.params { + debug!(?param, "(closure/gen)"); + unused_parameters.clear(param.index); + } + } + DefKind::Mod + | DefKind::Struct + | DefKind::Union + | DefKind::Enum + | DefKind::Variant + | DefKind::Trait + | DefKind::TyAlias + | DefKind::ForeignTy + | DefKind::TraitAlias + | DefKind::AssocTy + | DefKind::TyParam + | DefKind::Fn + | DefKind::Const + | DefKind::ConstParam + | DefKind::Static(_) + | DefKind::Ctor(_, _) + | DefKind::AssocFn + | DefKind::AssocConst + | DefKind::Macro(_) + | DefKind::ExternCrate + | DefKind::Use + | DefKind::ForeignMod + | DefKind::AnonConst + | DefKind::InlineConst + | DefKind::OpaqueTy + | DefKind::Field + | DefKind::LifetimeParam + | DefKind::GlobalAsm + | DefKind::Impl => { + for param in &generics.params { + debug!(?param, "(other)"); + if let ty::GenericParamDefKind::Lifetime = param.kind { + unused_parameters.clear(param.index); + } + } + } + } + + if let Some(parent) = generics.parent { + mark_used_by_default_parameters(tcx, parent, tcx.generics_of(parent), unused_parameters); + } +} + +/// Emit errors for the function annotated by `#[rustc_polymorphize_error]`, labelling each generic +/// parameter which was unused. +#[instrument(level = "debug", skip(tcx, generics))] +fn emit_unused_generic_params_error<'tcx>( + tcx: TyCtxt<'tcx>, + def_id: DefId, + generics: &'tcx ty::Generics, + unused_parameters: &FiniteBitSet<u32>, +) { + let base_def_id = tcx.typeck_root_def_id(def_id); + if !tcx.has_attr(base_def_id, sym::rustc_polymorphize_error) { + return; + } + + let fn_span = match tcx.opt_item_ident(def_id) { + Some(ident) => ident.span, + _ => tcx.def_span(def_id), + }; + + let mut err = tcx.sess.struct_span_err(fn_span, "item has unused generic parameters"); + + let mut next_generics = Some(generics); + while let Some(generics) = next_generics { + for param in &generics.params { + if unused_parameters.contains(param.index).unwrap_or(false) { + debug!(?param); + let def_span = tcx.def_span(param.def_id); + err.span_label(def_span, &format!("generic parameter `{}` is unused", param.name)); + } + } + + next_generics = generics.parent.map(|did| tcx.generics_of(did)); + } + + err.emit(); +} + +/// Visitor used to aggregate generic parameter uses. +struct MarkUsedGenericParams<'a, 'tcx> { + tcx: TyCtxt<'tcx>, + def_id: DefId, + unused_parameters: &'a mut FiniteBitSet<u32>, +} + +impl<'a, 'tcx> MarkUsedGenericParams<'a, 'tcx> { + /// Invoke `unused_generic_params` on a body contained within the current item (e.g. + /// a closure, generator or constant). + #[instrument(level = "debug", skip(self, def_id, substs))] + fn visit_child_body(&mut self, def_id: DefId, substs: SubstsRef<'tcx>) { + let instance = ty::InstanceDef::Item(ty::WithOptConstParam::unknown(def_id)); + let unused = self.tcx.unused_generic_params(instance); + debug!(?self.unused_parameters, ?unused); + for (i, arg) in substs.iter().enumerate() { + let i = i.try_into().unwrap(); + if !unused.contains(i).unwrap_or(false) { + arg.visit_with(self); + } + } + debug!(?self.unused_parameters); + } +} + +impl<'a, 'tcx> Visitor<'tcx> for MarkUsedGenericParams<'a, 'tcx> { + #[instrument(level = "debug", skip(self, local))] + fn visit_local_decl(&mut self, local: Local, local_decl: &LocalDecl<'tcx>) { + if local == Local::from_usize(1) { + let def_kind = self.tcx.def_kind(self.def_id); + if matches!(def_kind, DefKind::Closure | DefKind::Generator) { + // Skip visiting the closure/generator that is currently being processed. This only + // happens because the first argument to the closure is a reference to itself and + // that will call `visit_substs`, resulting in each generic parameter captured being + // considered used by default. + debug!("skipping closure substs"); + return; + } + } + + self.super_local_decl(local, local_decl); + } + + fn visit_const(&mut self, c: Const<'tcx>, _: Location) { + c.visit_with(self); + } + + fn visit_ty(&mut self, ty: Ty<'tcx>, _: TyContext) { + ty.visit_with(self); + } +} + +impl<'a, 'tcx> TypeVisitor<'tcx> for MarkUsedGenericParams<'a, 'tcx> { + #[instrument(level = "debug", skip(self))] + fn visit_const(&mut self, c: Const<'tcx>) -> ControlFlow<Self::BreakTy> { + if !c.has_param_types_or_consts() { + return ControlFlow::CONTINUE; + } + + match c.kind() { + ty::ConstKind::Param(param) => { + debug!(?param); + self.unused_parameters.clear(param.index); + ControlFlow::CONTINUE + } + ty::ConstKind::Unevaluated(ty::Unevaluated { def, substs: _, promoted: Some(p)}) + // Avoid considering `T` unused when constants are of the form: + // `<Self as Foo<T>>::foo::promoted[p]` + if self.def_id == def.did && !self.tcx.generics_of(def.did).has_self => + { + // If there is a promoted, don't look at the substs - since it will always contain + // the generic parameters, instead, traverse the promoted MIR. + let promoted = self.tcx.promoted_mir(def.did); + self.visit_body(&promoted[p]); + ControlFlow::CONTINUE + } + ty::ConstKind::Unevaluated(uv) + if matches!(self.tcx.def_kind(uv.def.did), DefKind::AnonConst | DefKind::InlineConst) => + { + self.visit_child_body(uv.def.did, uv.substs); + ControlFlow::CONTINUE + } + _ => c.super_visit_with(self), + } + } + + #[instrument(level = "debug", skip(self))] + fn visit_ty(&mut self, ty: Ty<'tcx>) -> ControlFlow<Self::BreakTy> { + if !ty.has_param_types_or_consts() { + return ControlFlow::CONTINUE; + } + + match *ty.kind() { + ty::Closure(def_id, substs) | ty::Generator(def_id, substs, ..) => { + debug!(?def_id); + // Avoid cycle errors with generators. + if def_id == self.def_id { + return ControlFlow::CONTINUE; + } + + // Consider any generic parameters used by any closures/generators as used in the + // parent. + self.visit_child_body(def_id, substs); + ControlFlow::CONTINUE + } + ty::Param(param) => { + debug!(?param); + self.unused_parameters.clear(param.index); + ControlFlow::CONTINUE + } + _ => ty.super_visit_with(self), + } + } +} + +/// Visitor used to check if a generic parameter is used. +struct HasUsedGenericParams<'a> { + unused_parameters: &'a FiniteBitSet<u32>, +} + +impl<'a, 'tcx> TypeVisitor<'tcx> for HasUsedGenericParams<'a> { + type BreakTy = (); + + #[instrument(level = "debug", skip(self))] + fn visit_const(&mut self, c: Const<'tcx>) -> ControlFlow<Self::BreakTy> { + if !c.has_param_types_or_consts() { + return ControlFlow::CONTINUE; + } + + match c.kind() { + ty::ConstKind::Param(param) => { + if self.unused_parameters.contains(param.index).unwrap_or(false) { + ControlFlow::CONTINUE + } else { + ControlFlow::BREAK + } + } + _ => c.super_visit_with(self), + } + } + + #[instrument(level = "debug", skip(self))] + fn visit_ty(&mut self, ty: Ty<'tcx>) -> ControlFlow<Self::BreakTy> { + if !ty.has_param_types_or_consts() { + return ControlFlow::CONTINUE; + } + + match ty.kind() { + ty::Param(param) => { + if self.unused_parameters.contains(param.index).unwrap_or(false) { + ControlFlow::CONTINUE + } else { + ControlFlow::BREAK + } + } + _ => ty.super_visit_with(self), + } + } +} diff --git a/compiler/rustc_monomorphize/src/util.rs b/compiler/rustc_monomorphize/src/util.rs new file mode 100644 index 000000000..847e64dc2 --- /dev/null +++ b/compiler/rustc_monomorphize/src/util.rs @@ -0,0 +1,70 @@ +use rustc_middle::ty::{self, ClosureSizeProfileData, Instance, TyCtxt}; +use std::fs::OpenOptions; +use std::io::prelude::*; + +/// For a given closure, writes out the data for the profiling the impact of RFC 2229 on +/// closure size into a CSV. +/// +/// During the same compile all closures dump the information in the same file +/// "closure_profile_XXXXX.csv", which is created in the directory where the compiler is invoked. +pub(crate) fn dump_closure_profile<'tcx>(tcx: TyCtxt<'tcx>, closure_instance: Instance<'tcx>) { + let Ok(mut file) = OpenOptions::new() + .create(true) + .append(true) + .open(&format!("closure_profile_{}.csv", std::process::id())) + else { + eprintln!("Cound't open file for writing closure profile"); + return; + }; + + let closure_def_id = closure_instance.def_id().expect_local(); + let typeck_results = tcx.typeck(closure_def_id); + + if typeck_results.closure_size_eval.contains_key(&closure_def_id) { + let param_env = ty::ParamEnv::reveal_all(); + + let ClosureSizeProfileData { before_feature_tys, after_feature_tys } = + typeck_results.closure_size_eval[&closure_def_id]; + + let before_feature_tys = tcx.subst_and_normalize_erasing_regions( + closure_instance.substs, + param_env, + before_feature_tys, + ); + let after_feature_tys = tcx.subst_and_normalize_erasing_regions( + closure_instance.substs, + param_env, + after_feature_tys, + ); + + let new_size = tcx + .layout_of(param_env.and(after_feature_tys)) + .map(|l| format!("{:?}", l.size.bytes())) + .unwrap_or_else(|e| format!("Failed {:?}", e)); + + let old_size = tcx + .layout_of(param_env.and(before_feature_tys)) + .map(|l| format!("{:?}", l.size.bytes())) + .unwrap_or_else(|e| format!("Failed {:?}", e)); + + let closure_span = tcx.def_span(closure_def_id); + let src_file = tcx.sess.source_map().span_to_filename(closure_span); + let line_nos = tcx + .sess + .source_map() + .span_to_lines(closure_span) + .map(|l| format!("{:?} {:?}", l.lines.first(), l.lines.last())) + .unwrap_or_else(|e| format!("{:?}", e)); + + if let Err(e) = writeln!( + file, + "{}, {}, {}, {:?}", + old_size, + new_size, + src_file.prefer_local(), + line_nos + ) { + eprintln!("Error writing to file {}", e) + } + } +} |