summaryrefslogtreecommitdiffstats
path: root/compiler/rustc_mir_transform/src
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/rustc_mir_transform/src')
-rw-r--r--compiler/rustc_mir_transform/src/abort_unwinding_calls.rs1
-rw-r--r--compiler/rustc_mir_transform/src/add_call_guards.rs1
-rw-r--r--compiler/rustc_mir_transform/src/add_moves_for_packed_drops.rs1
-rw-r--r--compiler/rustc_mir_transform/src/add_retag.rs1
-rw-r--r--compiler/rustc_mir_transform/src/add_subtyping_projections.rs1
-rw-r--r--compiler/rustc_mir_transform/src/check_alignment.rs1
-rw-r--r--compiler/rustc_mir_transform/src/check_const_item_mutation.rs11
-rw-r--r--compiler/rustc_mir_transform/src/check_packed_ref.rs2
-rw-r--r--compiler/rustc_mir_transform/src/check_unsafety.rs43
-rw-r--r--compiler/rustc_mir_transform/src/const_goto.rs1
-rw-r--r--compiler/rustc_mir_transform/src/const_prop.rs14
-rw-r--r--compiler/rustc_mir_transform/src/const_prop_lint.rs6
-rw-r--r--compiler/rustc_mir_transform/src/copy_prop.rs5
-rw-r--r--compiler/rustc_mir_transform/src/coroutine.rs370
-rw-r--r--compiler/rustc_mir_transform/src/cost_checker.rs4
-rw-r--r--compiler/rustc_mir_transform/src/coverage/counters.rs315
-rw-r--r--compiler/rustc_mir_transform/src/coverage/graph.rs69
-rw-r--r--compiler/rustc_mir_transform/src/coverage/mod.rs176
-rw-r--r--compiler/rustc_mir_transform/src/coverage/query.rs4
-rw-r--r--compiler/rustc_mir_transform/src/coverage/spans.rs203
-rw-r--r--compiler/rustc_mir_transform/src/coverage/spans/from_mir.rs40
-rw-r--r--compiler/rustc_mir_transform/src/coverage/test_macros/Cargo.toml7
-rw-r--r--compiler/rustc_mir_transform/src/coverage/test_macros/src/lib.rs6
-rw-r--r--compiler/rustc_mir_transform/src/coverage/tests.rs92
-rw-r--r--compiler/rustc_mir_transform/src/cross_crate_inline.rs19
-rw-r--r--compiler/rustc_mir_transform/src/ctfe_limit.rs2
-rw-r--r--compiler/rustc_mir_transform/src/dataflow_const_prop.rs15
-rw-r--r--compiler/rustc_mir_transform/src/deduplicate_blocks.rs2
-rw-r--r--compiler/rustc_mir_transform/src/deref_separator.rs1
-rw-r--r--compiler/rustc_mir_transform/src/elaborate_box_derefs.rs1
-rw-r--r--compiler/rustc_mir_transform/src/elaborate_drops.rs60
-rw-r--r--compiler/rustc_mir_transform/src/errors.rs155
-rw-r--r--compiler/rustc_mir_transform/src/function_item_references.rs6
-rw-r--r--compiler/rustc_mir_transform/src/gvn.rs29
-rw-r--r--compiler/rustc_mir_transform/src/inline.rs300
-rw-r--r--compiler/rustc_mir_transform/src/instsimplify.rs9
-rw-r--r--compiler/rustc_mir_transform/src/jump_threading.rs17
-rw-r--r--compiler/rustc_mir_transform/src/large_enums.rs1
-rw-r--r--compiler/rustc_mir_transform/src/lib.rs13
-rw-r--r--compiler/rustc_mir_transform/src/lower_intrinsics.rs33
-rw-r--r--compiler/rustc_mir_transform/src/lower_slice_len.rs1
-rw-r--r--compiler/rustc_mir_transform/src/match_branches.rs1
-rw-r--r--compiler/rustc_mir_transform/src/multiple_return_terminators.rs2
-rw-r--r--compiler/rustc_mir_transform/src/normalize_array_len.rs1
-rw-r--r--compiler/rustc_mir_transform/src/pass_manager.rs6
-rw-r--r--compiler/rustc_mir_transform/src/prettify.rs1
-rw-r--r--compiler/rustc_mir_transform/src/ref_prop.rs1
-rw-r--r--compiler/rustc_mir_transform/src/remove_noop_landing_pads.rs1
-rw-r--r--compiler/rustc_mir_transform/src/remove_place_mention.rs1
-rw-r--r--compiler/rustc_mir_transform/src/remove_storage_markers.rs1
-rw-r--r--compiler/rustc_mir_transform/src/remove_uninit_drops.rs6
-rw-r--r--compiler/rustc_mir_transform/src/remove_unneeded_drops.rs1
-rw-r--r--compiler/rustc_mir_transform/src/remove_zsts.rs12
-rw-r--r--compiler/rustc_mir_transform/src/reveal_all.rs1
-rw-r--r--compiler/rustc_mir_transform/src/separate_const_switch.rs1
-rw-r--r--compiler/rustc_mir_transform/src/shim.rs10
-rw-r--r--compiler/rustc_mir_transform/src/simplify.rs3
-rw-r--r--compiler/rustc_mir_transform/src/simplify_branches.rs1
-rw-r--r--compiler/rustc_mir_transform/src/sroa.rs3
-rw-r--r--compiler/rustc_mir_transform/src/ssa.rs55
-rw-r--r--compiler/rustc_mir_transform/src/uninhabited_enum_branching.rs2
-rw-r--r--compiler/rustc_mir_transform/src/unreachable_prop.rs1
62 files changed, 1098 insertions, 1051 deletions
diff --git a/compiler/rustc_mir_transform/src/abort_unwinding_calls.rs b/compiler/rustc_mir_transform/src/abort_unwinding_calls.rs
index 2b3d423ea..dfc7a9891 100644
--- a/compiler/rustc_mir_transform/src/abort_unwinding_calls.rs
+++ b/compiler/rustc_mir_transform/src/abort_unwinding_calls.rs
@@ -1,4 +1,3 @@
-use crate::MirPass;
use rustc_ast::InlineAsmOptions;
use rustc_middle::mir::*;
use rustc_middle::ty::layout;
diff --git a/compiler/rustc_mir_transform/src/add_call_guards.rs b/compiler/rustc_mir_transform/src/add_call_guards.rs
index b814fbf32..a47c8d94b 100644
--- a/compiler/rustc_mir_transform/src/add_call_guards.rs
+++ b/compiler/rustc_mir_transform/src/add_call_guards.rs
@@ -1,4 +1,3 @@
-use crate::MirPass;
use rustc_index::{Idx, IndexVec};
use rustc_middle::mir::*;
use rustc_middle::ty::TyCtxt;
diff --git a/compiler/rustc_mir_transform/src/add_moves_for_packed_drops.rs b/compiler/rustc_mir_transform/src/add_moves_for_packed_drops.rs
index ef2a0c790..de6d20ae3 100644
--- a/compiler/rustc_mir_transform/src/add_moves_for_packed_drops.rs
+++ b/compiler/rustc_mir_transform/src/add_moves_for_packed_drops.rs
@@ -2,7 +2,6 @@ use rustc_middle::mir::*;
use rustc_middle::ty::TyCtxt;
use crate::util;
-use crate::MirPass;
use rustc_middle::mir::patch::MirPatch;
/// This pass moves values being dropped that are within a packed
diff --git a/compiler/rustc_mir_transform/src/add_retag.rs b/compiler/rustc_mir_transform/src/add_retag.rs
index 75473ca53..94077c630 100644
--- a/compiler/rustc_mir_transform/src/add_retag.rs
+++ b/compiler/rustc_mir_transform/src/add_retag.rs
@@ -4,7 +4,6 @@
//! of MIR building, and only after this pass we think of the program has having the
//! normal MIR semantics.
-use crate::MirPass;
use rustc_middle::mir::*;
use rustc_middle::ty::{self, Ty, TyCtxt};
diff --git a/compiler/rustc_mir_transform/src/add_subtyping_projections.rs b/compiler/rustc_mir_transform/src/add_subtyping_projections.rs
index e5be7c0ca..04204c68f 100644
--- a/compiler/rustc_mir_transform/src/add_subtyping_projections.rs
+++ b/compiler/rustc_mir_transform/src/add_subtyping_projections.rs
@@ -1,4 +1,3 @@
-use crate::MirPass;
use rustc_index::IndexVec;
use rustc_middle::mir::patch::MirPatch;
use rustc_middle::mir::visit::MutVisitor;
diff --git a/compiler/rustc_mir_transform/src/check_alignment.rs b/compiler/rustc_mir_transform/src/check_alignment.rs
index 42b2f1886..9eec724ef 100644
--- a/compiler/rustc_mir_transform/src/check_alignment.rs
+++ b/compiler/rustc_mir_transform/src/check_alignment.rs
@@ -1,4 +1,3 @@
-use crate::MirPass;
use rustc_hir::lang_items::LangItem;
use rustc_index::IndexVec;
use rustc_middle::mir::*;
diff --git a/compiler/rustc_mir_transform/src/check_const_item_mutation.rs b/compiler/rustc_mir_transform/src/check_const_item_mutation.rs
index 61bf530f1..3195cd362 100644
--- a/compiler/rustc_mir_transform/src/check_const_item_mutation.rs
+++ b/compiler/rustc_mir_transform/src/check_const_item_mutation.rs
@@ -13,7 +13,7 @@ pub struct CheckConstItemMutation;
impl<'tcx> MirLint<'tcx> for CheckConstItemMutation {
fn run_lint(&self, tcx: TyCtxt<'tcx>, body: &Body<'tcx>) {
let mut checker = ConstMutationChecker { body, tcx, target_local: None };
- checker.visit_body(&body);
+ checker.visit_body(body);
}
}
@@ -98,7 +98,7 @@ impl<'tcx> Visitor<'tcx> for ConstMutationChecker<'_, 'tcx> {
if !lhs.projection.is_empty() {
if let Some(def_id) = self.is_const_item_without_destructor(lhs.local)
&& let Some((lint_root, span, item)) =
- self.should_lint_const_item_usage(&lhs, def_id, loc)
+ self.should_lint_const_item_usage(lhs, def_id, loc)
{
self.tcx.emit_spanned_lint(
CONST_ITEM_MUTATION,
@@ -132,12 +132,7 @@ impl<'tcx> Visitor<'tcx> for ConstMutationChecker<'_, 'tcx> {
// the `self` parameter of a method call (as the terminator of our current
// BasicBlock). If so, we emit a more specific lint.
let method_did = self.target_local.and_then(|target_local| {
- rustc_middle::util::find_self_call(
- self.tcx,
- &self.body,
- target_local,
- loc.block,
- )
+ rustc_middle::util::find_self_call(self.tcx, self.body, target_local, loc.block)
});
let lint_loc =
if method_did.is_some() { self.body.terminator_loc(loc.block) } else { loc };
diff --git a/compiler/rustc_mir_transform/src/check_packed_ref.rs b/compiler/rustc_mir_transform/src/check_packed_ref.rs
index 9ee0a7040..77bcba50a 100644
--- a/compiler/rustc_mir_transform/src/check_packed_ref.rs
+++ b/compiler/rustc_mir_transform/src/check_packed_ref.rs
@@ -12,7 +12,7 @@ impl<'tcx> MirLint<'tcx> for CheckPackedRef {
let param_env = tcx.param_env(body.source.def_id());
let source_info = SourceInfo::outermost(body.span);
let mut checker = PackedRefChecker { body, tcx, param_env, source_info };
- checker.visit_body(&body);
+ checker.visit_body(body);
}
}
diff --git a/compiler/rustc_mir_transform/src/check_unsafety.rs b/compiler/rustc_mir_transform/src/check_unsafety.rs
index 8872f9a97..f246de55c 100644
--- a/compiler/rustc_mir_transform/src/check_unsafety.rs
+++ b/compiler/rustc_mir_transform/src/check_unsafety.rs
@@ -242,7 +242,7 @@ impl<'tcx> Visitor<'tcx> for UnsafetyChecker<'_, 'tcx> {
let assigned_ty = place.ty(&self.body.local_decls, self.tcx).ty;
if assigned_ty.needs_drop(self.tcx, self.param_env) {
// This would be unsafe, but should be outright impossible since we reject such unions.
- self.tcx.sess.delay_span_bug(
+ self.tcx.sess.span_delayed_bug(
self.source_info.span,
format!("union fields that need dropping should be impossible: {assigned_ty}")
);
@@ -287,19 +287,20 @@ impl<'tcx> UnsafetyChecker<'_, 'tcx> {
.safety;
match safety {
// `unsafe` blocks are required in safe code
- Safety::Safe => violations.into_iter().for_each(|&violation| {
+ Safety::Safe => violations.into_iter().for_each(|violation| {
match violation.kind {
UnsafetyViolationKind::General => {}
UnsafetyViolationKind::UnsafeFn => {
bug!("`UnsafetyViolationKind::UnsafeFn` in an `Safe` context")
}
}
- if !self.violations.contains(&violation) {
- self.violations.push(violation)
+ if !self.violations.contains(violation) {
+ self.violations.push(violation.clone())
}
}),
// With the RFC 2585, no longer allow `unsafe` operations in `unsafe fn`s
- Safety::FnUnsafe => violations.into_iter().for_each(|&(mut violation)| {
+ Safety::FnUnsafe => violations.into_iter().for_each(|violation| {
+ let mut violation = violation.clone();
violation.kind = UnsafetyViolationKind::UnsafeFn;
if !self.violations.contains(&violation) {
self.violations.push(violation)
@@ -367,9 +368,22 @@ impl<'tcx> UnsafetyChecker<'_, 'tcx> {
// Is `callee_features` a subset of `calling_features`?
if !callee_features.iter().all(|feature| self_features.contains(feature)) {
+ let missing: Vec<_> = callee_features
+ .iter()
+ .copied()
+ .filter(|feature| !self_features.contains(feature))
+ .collect();
+ let build_enabled = self
+ .tcx
+ .sess
+ .target_features
+ .iter()
+ .copied()
+ .filter(|feature| missing.contains(feature))
+ .collect();
self.require_unsafe(
UnsafetyViolationKind::General,
- UnsafetyViolationDetails::CallToFunctionWith,
+ UnsafetyViolationDetails::CallToFunctionWith { missing, build_enabled },
)
}
}
@@ -385,7 +399,7 @@ pub(crate) fn provide(providers: &mut Providers) {
enum Context {
Safe,
/// in an `unsafe fn`
- UnsafeFn(HirId),
+ UnsafeFn,
/// in a *used* `unsafe` block
/// (i.e. a block without unused-unsafe warning)
UnsafeBlock(HirId),
@@ -407,7 +421,7 @@ impl<'tcx> intravisit::Visitor<'tcx> for UnusedUnsafeVisitor<'_, 'tcx> {
};
let unused_unsafe = match (self.context, used) {
(_, false) => UnusedUnsafe::Unused,
- (Context::Safe, true) | (Context::UnsafeFn(_), true) => {
+ (Context::Safe, true) | (Context::UnsafeFn, true) => {
let previous_context = self.context;
self.context = Context::UnsafeBlock(block.hir_id);
intravisit::walk_block(self, block);
@@ -452,9 +466,9 @@ fn check_unused_unsafe(
};
let body = tcx.hir().body(body_id);
- let hir_id = tcx.hir().local_def_id_to_hir_id(def_id);
+ let hir_id = tcx.local_def_id_to_hir_id(def_id);
let context = match tcx.hir().fn_sig_by_hir_id(hir_id) {
- Some(sig) if sig.header.unsafety == hir::Unsafety::Unsafe => Context::UnsafeFn(hir_id),
+ Some(sig) if sig.header.unsafety == hir::Unsafety::Unsafe => Context::UnsafeFn,
_ => Context::Safe,
};
@@ -494,7 +508,7 @@ fn unsafety_check_result(tcx: TyCtxt<'_>, def: LocalDefId) -> &UnsafetyCheckResu
let param_env = tcx.param_env(def);
let mut checker = UnsafetyChecker::new(body, def, tcx, param_env);
- checker.visit_body(&body);
+ checker.visit_body(body);
let unused_unsafes = (!tcx.is_typeck_child(def.to_def_id()))
.then(|| check_unused_unsafe(tcx, def, &checker.used_unsafe_blocks));
@@ -528,8 +542,9 @@ pub fn check_unsafety(tcx: TyCtxt<'_>, def_id: LocalDefId) {
// Only suggest wrapping the entire function body in an unsafe block once
let mut suggest_unsafe_block = true;
- for &UnsafetyViolation { source_info, lint_root, kind, details } in violations.iter() {
- let details = errors::RequiresUnsafeDetail { violation: details, span: source_info.span };
+ for &UnsafetyViolation { source_info, lint_root, kind, ref details } in violations.iter() {
+ let details =
+ errors::RequiresUnsafeDetail { violation: details.clone(), span: source_info.span };
match kind {
UnsafetyViolationKind::General => {
@@ -568,7 +583,7 @@ pub fn check_unsafety(tcx: TyCtxt<'_>, def_id: LocalDefId) {
errors::UnsafeOpInUnsafeFn {
details,
suggest_unsafe_block: suggest_unsafe_block.then(|| {
- let hir_id = tcx.hir().local_def_id_to_hir_id(def_id);
+ let hir_id = tcx.local_def_id_to_hir_id(def_id);
let fn_sig = tcx
.hir()
.fn_sig_by_hir_id(hir_id)
diff --git a/compiler/rustc_mir_transform/src/const_goto.rs b/compiler/rustc_mir_transform/src/const_goto.rs
index fd2d37dbe..388434607 100644
--- a/compiler/rustc_mir_transform/src/const_goto.rs
+++ b/compiler/rustc_mir_transform/src/const_goto.rs
@@ -17,7 +17,6 @@
//! }
//! ```
-use crate::MirPass;
use rustc_middle::mir::*;
use rustc_middle::ty::TyCtxt;
use rustc_middle::{mir::visit::Visitor, ty::ParamEnv};
diff --git a/compiler/rustc_mir_transform/src/const_prop.rs b/compiler/rustc_mir_transform/src/const_prop.rs
index f7f882310..e66d5e0a9 100644
--- a/compiler/rustc_mir_transform/src/const_prop.rs
+++ b/compiler/rustc_mir_transform/src/const_prop.rs
@@ -11,6 +11,7 @@ use rustc_middle::mir::visit::{
MutVisitor, MutatingUseContext, NonMutatingUseContext, PlaceContext, Visitor,
};
use rustc_middle::mir::*;
+use rustc_middle::query::TyCtxtAt;
use rustc_middle::ty::layout::{LayoutError, LayoutOf, LayoutOfHelpers, TyAndLayout};
use rustc_middle::ty::{self, GenericArgs, Instance, ParamEnv, Ty, TyCtxt, TypeVisitableExt};
use rustc_span::{def_id::DefId, Span};
@@ -18,7 +19,6 @@ use rustc_target::abi::{self, HasDataLayout, Size, TargetDataLayout};
use rustc_target::spec::abi::Abi as CallAbi;
use crate::dataflow_const_prop::Patch;
-use crate::MirPass;
use rustc_const_eval::interpret::{
self, compile_time_machine, AllocId, ConstAllocation, FnArg, Frame, ImmTy, Immediate, InterpCx,
InterpResult, MemoryKind, OpTy, PlaceTy, Pointer, Scalar, StackPopCleanup,
@@ -84,8 +84,7 @@ impl<'tcx> MirPass<'tcx> for ConstProp {
// FIXME(welseywiser) const prop doesn't work on coroutines because of query cycles
// computing their layout.
- let is_coroutine = def_kind == DefKind::Coroutine;
- if is_coroutine {
+ if tcx.is_coroutine(def_id.to_def_id()) {
trace!("ConstProp skipped for coroutine {:?}", def_id);
return;
}
@@ -221,7 +220,7 @@ impl<'mir, 'tcx> interpret::Machine<'mir, 'tcx> for ConstPropMachine<'mir, 'tcx>
}
fn before_access_global(
- _tcx: TyCtxt<'tcx>,
+ _tcx: TyCtxtAt<'tcx>,
_machine: &Self,
_alloc_id: AllocId,
alloc: ConstAllocation<'tcx>,
@@ -241,10 +240,7 @@ impl<'mir, 'tcx> interpret::Machine<'mir, 'tcx> for ConstPropMachine<'mir, 'tcx>
}
#[inline(always)]
- fn expose_ptr(
- _ecx: &mut InterpCx<'mir, 'tcx, Self>,
- _ptr: Pointer<AllocId>,
- ) -> InterpResult<'tcx> {
+ fn expose_ptr(_ecx: &mut InterpCx<'mir, 'tcx, Self>, _ptr: Pointer) -> InterpResult<'tcx> {
throw_machine_stop_str!("exposing pointers isn't supported in ConstProp")
}
@@ -607,7 +603,7 @@ impl CanConstProp {
for arg in body.args_iter() {
cpv.found_assignment.insert(arg);
}
- cpv.visit_body(&body);
+ cpv.visit_body(body);
cpv.can_const_prop
}
}
diff --git a/compiler/rustc_mir_transform/src/const_prop_lint.rs b/compiler/rustc_mir_transform/src/const_prop_lint.rs
index a23ba9c4a..99eecb567 100644
--- a/compiler/rustc_mir_transform/src/const_prop_lint.rs
+++ b/compiler/rustc_mir_transform/src/const_prop_lint.rs
@@ -61,7 +61,7 @@ impl<'tcx> MirLint<'tcx> for ConstPropLint {
// FIXME(welseywiser) const prop doesn't work on coroutines because of query cycles
// computing their layout.
- if let DefKind::Coroutine = def_kind {
+ if tcx.is_coroutine(def_id.to_def_id()) {
trace!("ConstPropLint skipped for coroutine {:?}", def_id);
return;
}
@@ -453,7 +453,7 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> {
cond: &Operand<'tcx>,
location: Location,
) -> Option<!> {
- let value = &self.eval_operand(&cond, location)?;
+ let value = &self.eval_operand(cond, location)?;
trace!("assertion on {:?} should be {:?}", value, expected);
let expected = Scalar::from_bool(expected);
@@ -626,7 +626,7 @@ impl<'tcx> Visitor<'tcx> for ConstPropagator<'_, 'tcx> {
self.check_assertion(*expected, msg, cond, location);
}
TerminatorKind::SwitchInt { ref discr, ref targets } => {
- if let Some(ref value) = self.eval_operand(&discr, location)
+ if let Some(ref value) = self.eval_operand(discr, location)
&& let Some(value_const) =
self.use_ecx(location, |this| this.ecx.read_scalar(value))
&& let Ok(constant) = value_const.try_to_int()
diff --git a/compiler/rustc_mir_transform/src/copy_prop.rs b/compiler/rustc_mir_transform/src/copy_prop.rs
index f5db7ce97..0119b95cc 100644
--- a/compiler/rustc_mir_transform/src/copy_prop.rs
+++ b/compiler/rustc_mir_transform/src/copy_prop.rs
@@ -6,7 +6,6 @@ use rustc_middle::ty::TyCtxt;
use rustc_mir_dataflow::impls::borrowed_locals;
use crate::ssa::SsaLocals;
-use crate::MirPass;
/// Unify locals that copy each other.
///
@@ -50,7 +49,7 @@ fn propagate_ssa<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
Replacer {
tcx,
- copy_classes: &ssa.copy_classes(),
+ copy_classes: ssa.copy_classes(),
fully_moved,
borrowed_locals,
storage_to_remove,
@@ -124,7 +123,7 @@ impl<'tcx> MutVisitor<'tcx> for Replacer<'_, 'tcx> {
}
fn visit_place(&mut self, place: &mut Place<'tcx>, ctxt: PlaceContext, loc: Location) {
- if let Some(new_projection) = self.process_projection(&place.projection, loc) {
+ if let Some(new_projection) = self.process_projection(place.projection, loc) {
place.projection = self.tcx().mk_place_elems(&new_projection);
}
diff --git a/compiler/rustc_mir_transform/src/coroutine.rs b/compiler/rustc_mir_transform/src/coroutine.rs
index abaed103f..d7dd44af7 100644
--- a/compiler/rustc_mir_transform/src/coroutine.rs
+++ b/compiler/rustc_mir_transform/src/coroutine.rs
@@ -55,7 +55,6 @@ use crate::deref_separator::deref_finder;
use crate::errors;
use crate::pass_manager as pm;
use crate::simplify;
-use crate::MirPass;
use rustc_data_structures::fx::{FxHashMap, FxHashSet};
use rustc_errors::pluralize;
use rustc_hir as hir;
@@ -63,17 +62,16 @@ use rustc_hir::lang_items::LangItem;
use rustc_hir::CoroutineKind;
use rustc_index::bit_set::{BitMatrix, BitSet, GrowableBitSet};
use rustc_index::{Idx, IndexVec};
-use rustc_middle::mir::dump_mir;
use rustc_middle::mir::visit::{MutVisitor, PlaceContext, Visitor};
use rustc_middle::mir::*;
+use rustc_middle::ty::CoroutineArgs;
use rustc_middle::ty::InstanceDef;
-use rustc_middle::ty::{self, AdtDef, Ty, TyCtxt};
-use rustc_middle::ty::{CoroutineArgs, GenericArgsRef};
+use rustc_middle::ty::{self, Ty, TyCtxt};
use rustc_mir_dataflow::impls::{
MaybeBorrowedLocals, MaybeLiveLocals, MaybeRequiresStorage, MaybeStorageLive,
};
use rustc_mir_dataflow::storage::always_storage_live_locals;
-use rustc_mir_dataflow::{self, Analysis};
+use rustc_mir_dataflow::Analysis;
use rustc_span::def_id::{DefId, LocalDefId};
use rustc_span::symbol::sym;
use rustc_span::Span;
@@ -225,8 +223,6 @@ struct SuspensionPoint<'tcx> {
struct TransformVisitor<'tcx> {
tcx: TyCtxt<'tcx>,
coroutine_kind: hir::CoroutineKind,
- state_adt_ref: AdtDef<'tcx>,
- state_args: GenericArgsRef<'tcx>,
// The type of the discriminant in the coroutine struct
discr_ty: Ty<'tcx>,
@@ -245,22 +241,56 @@ struct TransformVisitor<'tcx> {
always_live_locals: BitSet<Local>,
// The original RETURN_PLACE local
- new_ret_local: Local,
+ old_ret_local: Local,
+
+ old_yield_ty: Ty<'tcx>,
+
+ old_ret_ty: Ty<'tcx>,
}
impl<'tcx> TransformVisitor<'tcx> {
fn insert_none_ret_block(&self, body: &mut Body<'tcx>) -> BasicBlock {
let block = BasicBlock::new(body.basic_blocks.len());
-
let source_info = SourceInfo::outermost(body.span);
- let (kind, idx) = self.coroutine_state_adt_and_variant_idx(true);
- assert_eq!(self.state_adt_ref.variant(idx).fields.len(), 0);
+ let none_value = match self.coroutine_kind {
+ CoroutineKind::Async(_) => span_bug!(body.span, "`Future`s are not fused inherently"),
+ CoroutineKind::Coroutine => span_bug!(body.span, "`Coroutine`s cannot be fused"),
+ // `gen` continues return `None`
+ CoroutineKind::Gen(_) => {
+ let option_def_id = self.tcx.require_lang_item(LangItem::Option, None);
+ Rvalue::Aggregate(
+ Box::new(AggregateKind::Adt(
+ option_def_id,
+ VariantIdx::from_usize(0),
+ self.tcx.mk_args(&[self.old_yield_ty.into()]),
+ None,
+ None,
+ )),
+ IndexVec::new(),
+ )
+ }
+ // `async gen` continues to return `Poll::Ready(None)`
+ CoroutineKind::AsyncGen(_) => {
+ let ty::Adt(_poll_adt, args) = *self.old_yield_ty.kind() else { bug!() };
+ let ty::Adt(_option_adt, args) = *args.type_at(0).kind() else { bug!() };
+ let yield_ty = args.type_at(0);
+ Rvalue::Use(Operand::Constant(Box::new(ConstOperand {
+ span: source_info.span,
+ const_: Const::Unevaluated(
+ UnevaluatedConst::new(
+ self.tcx.require_lang_item(LangItem::AsyncGenFinished, None),
+ self.tcx.mk_args(&[yield_ty.into()]),
+ ),
+ self.old_yield_ty,
+ ),
+ user_ty: None,
+ })))
+ }
+ };
+
let statements = vec![Statement {
- kind: StatementKind::Assign(Box::new((
- Place::return_place(),
- Rvalue::Aggregate(Box::new(kind), IndexVec::new()),
- ))),
+ kind: StatementKind::Assign(Box::new((Place::return_place(), none_value))),
source_info,
}];
@@ -273,23 +303,6 @@ impl<'tcx> TransformVisitor<'tcx> {
block
}
- fn coroutine_state_adt_and_variant_idx(
- &self,
- is_return: bool,
- ) -> (AggregateKind<'tcx>, VariantIdx) {
- let idx = VariantIdx::new(match (is_return, self.coroutine_kind) {
- (true, hir::CoroutineKind::Coroutine) => 1, // CoroutineState::Complete
- (false, hir::CoroutineKind::Coroutine) => 0, // CoroutineState::Yielded
- (true, hir::CoroutineKind::Async(_)) => 0, // Poll::Ready
- (false, hir::CoroutineKind::Async(_)) => 1, // Poll::Pending
- (true, hir::CoroutineKind::Gen(_)) => 0, // Option::None
- (false, hir::CoroutineKind::Gen(_)) => 1, // Option::Some
- });
-
- let kind = AggregateKind::Adt(self.state_adt_ref.did(), idx, self.state_args, None, None);
- (kind, idx)
- }
-
// Make a `CoroutineState` or `Poll` variant assignment.
//
// `core::ops::CoroutineState` only has single element tuple variants,
@@ -302,51 +315,119 @@ impl<'tcx> TransformVisitor<'tcx> {
is_return: bool,
statements: &mut Vec<Statement<'tcx>>,
) {
- let (kind, idx) = self.coroutine_state_adt_and_variant_idx(is_return);
-
- match self.coroutine_kind {
- // `Poll::Pending`
+ let rvalue = match self.coroutine_kind {
CoroutineKind::Async(_) => {
- if !is_return {
- assert_eq!(self.state_adt_ref.variant(idx).fields.len(), 0);
-
- // FIXME(swatinem): assert that `val` is indeed unit?
- statements.push(Statement {
- kind: StatementKind::Assign(Box::new((
- Place::return_place(),
- Rvalue::Aggregate(Box::new(kind), IndexVec::new()),
- ))),
- source_info,
- });
- return;
+ let poll_def_id = self.tcx.require_lang_item(LangItem::Poll, None);
+ let args = self.tcx.mk_args(&[self.old_ret_ty.into()]);
+ if is_return {
+ // Poll::Ready(val)
+ Rvalue::Aggregate(
+ Box::new(AggregateKind::Adt(
+ poll_def_id,
+ VariantIdx::from_usize(0),
+ args,
+ None,
+ None,
+ )),
+ IndexVec::from_raw(vec![val]),
+ )
+ } else {
+ // Poll::Pending
+ Rvalue::Aggregate(
+ Box::new(AggregateKind::Adt(
+ poll_def_id,
+ VariantIdx::from_usize(1),
+ args,
+ None,
+ None,
+ )),
+ IndexVec::new(),
+ )
}
}
- // `Option::None`
CoroutineKind::Gen(_) => {
+ let option_def_id = self.tcx.require_lang_item(LangItem::Option, None);
+ let args = self.tcx.mk_args(&[self.old_yield_ty.into()]);
if is_return {
- assert_eq!(self.state_adt_ref.variant(idx).fields.len(), 0);
-
- statements.push(Statement {
- kind: StatementKind::Assign(Box::new((
- Place::return_place(),
- Rvalue::Aggregate(Box::new(kind), IndexVec::new()),
- ))),
- source_info,
- });
- return;
+ // None
+ Rvalue::Aggregate(
+ Box::new(AggregateKind::Adt(
+ option_def_id,
+ VariantIdx::from_usize(0),
+ args,
+ None,
+ None,
+ )),
+ IndexVec::new(),
+ )
+ } else {
+ // Some(val)
+ Rvalue::Aggregate(
+ Box::new(AggregateKind::Adt(
+ option_def_id,
+ VariantIdx::from_usize(1),
+ args,
+ None,
+ None,
+ )),
+ IndexVec::from_raw(vec![val]),
+ )
}
}
- CoroutineKind::Coroutine => {}
- }
-
- // else: `Poll::Ready(x)`, `CoroutineState::Yielded(x)`, `CoroutineState::Complete(x)`, or `Option::Some(x)`
- assert_eq!(self.state_adt_ref.variant(idx).fields.len(), 1);
+ CoroutineKind::AsyncGen(_) => {
+ if is_return {
+ let ty::Adt(_poll_adt, args) = *self.old_yield_ty.kind() else { bug!() };
+ let ty::Adt(_option_adt, args) = *args.type_at(0).kind() else { bug!() };
+ let yield_ty = args.type_at(0);
+ Rvalue::Use(Operand::Constant(Box::new(ConstOperand {
+ span: source_info.span,
+ const_: Const::Unevaluated(
+ UnevaluatedConst::new(
+ self.tcx.require_lang_item(LangItem::AsyncGenFinished, None),
+ self.tcx.mk_args(&[yield_ty.into()]),
+ ),
+ self.old_yield_ty,
+ ),
+ user_ty: None,
+ })))
+ } else {
+ Rvalue::Use(val)
+ }
+ }
+ CoroutineKind::Coroutine => {
+ let coroutine_state_def_id =
+ self.tcx.require_lang_item(LangItem::CoroutineState, None);
+ let args = self.tcx.mk_args(&[self.old_yield_ty.into(), self.old_ret_ty.into()]);
+ if is_return {
+ // CoroutineState::Complete(val)
+ Rvalue::Aggregate(
+ Box::new(AggregateKind::Adt(
+ coroutine_state_def_id,
+ VariantIdx::from_usize(1),
+ args,
+ None,
+ None,
+ )),
+ IndexVec::from_raw(vec![val]),
+ )
+ } else {
+ // CoroutineState::Yielded(val)
+ Rvalue::Aggregate(
+ Box::new(AggregateKind::Adt(
+ coroutine_state_def_id,
+ VariantIdx::from_usize(0),
+ args,
+ None,
+ None,
+ )),
+ IndexVec::from_raw(vec![val]),
+ )
+ }
+ }
+ };
statements.push(Statement {
- kind: StatementKind::Assign(Box::new((
- Place::return_place(),
- Rvalue::Aggregate(Box::new(kind), [val].into()),
- ))),
+ kind: StatementKind::Assign(Box::new((Place::return_place(), rvalue))),
source_info,
});
}
@@ -420,7 +501,7 @@ impl<'tcx> MutVisitor<'tcx> for TransformVisitor<'tcx> {
let ret_val = match data.terminator().kind {
TerminatorKind::Return => {
- Some((true, None, Operand::Move(Place::from(self.new_ret_local)), None))
+ Some((true, None, Operand::Move(Place::from(self.old_ret_local)), None))
}
TerminatorKind::Yield { ref value, resume, resume_arg, drop } => {
Some((false, Some((resume, resume_arg)), value.clone(), drop))
@@ -446,12 +527,26 @@ impl<'tcx> MutVisitor<'tcx> for TransformVisitor<'tcx> {
resume_arg
};
+ let storage_liveness: GrowableBitSet<Local> =
+ self.storage_liveness[block].clone().unwrap().into();
+
+ for i in 0..self.always_live_locals.domain_size() {
+ let l = Local::new(i);
+ let needs_storage_dead = storage_liveness.contains(l)
+ && !self.remap.contains_key(&l)
+ && !self.always_live_locals.contains(l);
+ if needs_storage_dead {
+ data.statements
+ .push(Statement { source_info, kind: StatementKind::StorageDead(l) });
+ }
+ }
+
self.suspension_points.push(SuspensionPoint {
state,
resume,
resume_arg,
drop,
- storage_liveness: self.storage_liveness[block].clone().unwrap().into(),
+ storage_liveness,
});
VariantIdx::new(state)
@@ -617,6 +712,22 @@ fn replace_resume_ty_local<'tcx>(
}
}
+/// Transforms the `body` of the coroutine applying the following transform:
+///
+/// - Remove the `resume` argument.
+///
+/// Ideally the async lowering would not add the `resume` argument.
+///
+/// The async lowering step and the type / lifetime inference / checking are
+/// still using the `resume` argument for the time being. After this transform,
+/// the coroutine body doesn't have the `resume` argument.
+fn transform_gen_context<'tcx>(_tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
+ // This leaves the local representing the `resume` argument in place,
+ // but turns it into a regular local variable. This is cheaper than
+ // adjusting all local references in the body after removing it.
+ body.arg_count = 1;
+}
+
struct LivenessInfo {
/// Which locals are live across any suspension point.
saved_locals: CoroutineSavedLocals,
@@ -651,36 +762,34 @@ fn locals_live_across_suspend_points<'tcx>(
always_live_locals: &BitSet<Local>,
movable: bool,
) -> LivenessInfo {
- let body_ref: &Body<'_> = &body;
-
// Calculate when MIR locals have live storage. This gives us an upper bound of their
// lifetimes.
let mut storage_live = MaybeStorageLive::new(std::borrow::Cow::Borrowed(always_live_locals))
- .into_engine(tcx, body_ref)
+ .into_engine(tcx, body)
.iterate_to_fixpoint()
- .into_results_cursor(body_ref);
+ .into_results_cursor(body);
// Calculate the MIR locals which have been previously
// borrowed (even if they are still active).
let borrowed_locals_results =
- MaybeBorrowedLocals.into_engine(tcx, body_ref).pass_name("coroutine").iterate_to_fixpoint();
+ MaybeBorrowedLocals.into_engine(tcx, body).pass_name("coroutine").iterate_to_fixpoint();
- let mut borrowed_locals_cursor = borrowed_locals_results.cloned_results_cursor(body_ref);
+ let mut borrowed_locals_cursor = borrowed_locals_results.clone().into_results_cursor(body);
// Calculate the MIR locals that we actually need to keep storage around
// for.
- let mut requires_storage_results =
- MaybeRequiresStorage::new(borrowed_locals_results.cloned_results_cursor(body))
- .into_engine(tcx, body_ref)
- .iterate_to_fixpoint();
- let mut requires_storage_cursor = requires_storage_results.as_results_cursor(body_ref);
+ let mut requires_storage_cursor =
+ MaybeRequiresStorage::new(borrowed_locals_results.into_results_cursor(body))
+ .into_engine(tcx, body)
+ .iterate_to_fixpoint()
+ .into_results_cursor(body);
// Calculate the liveness of MIR locals ignoring borrows.
let mut liveness = MaybeLiveLocals
- .into_engine(tcx, body_ref)
+ .into_engine(tcx, body)
.pass_name("coroutine")
.iterate_to_fixpoint()
- .into_results_cursor(body_ref);
+ .into_results_cursor(body);
let mut storage_liveness_map = IndexVec::from_elem(None, &body.basic_blocks);
let mut live_locals_at_suspension_points = Vec::new();
@@ -742,14 +851,14 @@ fn locals_live_across_suspend_points<'tcx>(
// saving.
let live_locals_at_suspension_points = live_locals_at_suspension_points
.iter()
- .map(|live_here| saved_locals.renumber_bitset(&live_here))
+ .map(|live_here| saved_locals.renumber_bitset(live_here))
.collect();
let storage_conflicts = compute_storage_conflicts(
- body_ref,
+ body,
&saved_locals,
always_live_locals.clone(),
- requires_storage_results,
+ requires_storage_cursor.into_results(),
);
LivenessInfo {
@@ -778,7 +887,7 @@ impl CoroutineSavedLocals {
/// Transforms a `BitSet<Local>` that contains only locals saved across yield points to the
/// equivalent `BitSet<CoroutineSavedLocal>`.
fn renumber_bitset(&self, input: &BitSet<Local>) -> BitSet<CoroutineSavedLocal> {
- assert!(self.superset(&input), "{:?} not a superset of {:?}", self.0, input);
+ assert!(self.superset(input), "{:?} not a superset of {:?}", self.0, input);
let mut out = BitSet::new_empty(self.count());
for (saved_local, local) in self.iter_enumerated() {
if input.contains(local) {
@@ -814,7 +923,7 @@ fn compute_storage_conflicts<'mir, 'tcx>(
body: &'mir Body<'tcx>,
saved_locals: &CoroutineSavedLocals,
always_live_locals: BitSet<Local>,
- mut requires_storage: rustc_mir_dataflow::Results<'tcx, MaybeRequiresStorage<'_, 'mir, 'tcx>>,
+ mut requires_storage: rustc_mir_dataflow::Results<'tcx, MaybeRequiresStorage<'mir, 'tcx>>,
) -> BitMatrix<CoroutineSavedLocal, CoroutineSavedLocal> {
assert_eq!(body.local_decls.len(), saved_locals.domain_size());
@@ -829,7 +938,7 @@ fn compute_storage_conflicts<'mir, 'tcx>(
// Compute the storage conflicts for all eligible locals.
let mut visitor = StorageConflictVisitor {
body,
- saved_locals: &saved_locals,
+ saved_locals: saved_locals,
local_conflicts: BitMatrix::from_row_n(&ineligible_locals, body.local_decls.len()),
};
@@ -1128,7 +1237,7 @@ fn create_coroutine_drop_shim<'tcx>(
// The returned state and the poisoned state fall through to the default
// case which is just to return
- insert_switch(&mut body, cases, &transform, TerminatorKind::Return);
+ insert_switch(&mut body, cases, transform, TerminatorKind::Return);
for block in body.basic_blocks_mut() {
let kind = &mut block.terminator_mut().kind;
@@ -1320,7 +1429,9 @@ fn create_coroutine_resume_function<'tcx>(
CoroutineKind::Async(_) | CoroutineKind::Coroutine => {
insert_panic_block(tcx, body, ResumedAfterReturn(coroutine_kind))
}
- CoroutineKind::Gen(_) => transform.insert_none_ret_block(body),
+ CoroutineKind::AsyncGen(_) | CoroutineKind::Gen(_) => {
+ transform.insert_none_ret_block(body)
+ }
};
cases.insert(1, (RETURNED, block));
}
@@ -1328,7 +1439,15 @@ fn create_coroutine_resume_function<'tcx>(
insert_switch(body, cases, &transform, TerminatorKind::Unreachable);
make_coroutine_state_argument_indirect(tcx, body);
- make_coroutine_state_argument_pinned(tcx, body);
+
+ match coroutine_kind {
+ // Iterator::next doesn't accept a pinned argument,
+ // unlike for all other coroutine kinds.
+ CoroutineKind::Gen(_) => {}
+ _ => {
+ make_coroutine_state_argument_pinned(tcx, body);
+ }
+ }
// Make sure we remove dead blocks to remove
// unrelated code from the drop part of the function
@@ -1391,13 +1510,6 @@ fn create_cases<'tcx>(
// Create StorageLive instructions for locals with live storage
for i in 0..(body.local_decls.len()) {
- if i == 2 {
- // The resume argument is live on function entry. Don't insert a
- // `StorageLive`, or the following `Assign` will read from uninitialized
- // memory.
- continue;
- }
-
let l = Local::new(i);
let needs_storage_live = point.storage_liveness.contains(l)
&& !transform.remap.contains_key(&l)
@@ -1456,7 +1568,7 @@ pub(crate) fn mir_coroutine_witnesses<'tcx>(
// The witness simply contains all locals live across suspend points.
- let always_live_locals = always_storage_live_locals(&body);
+ let always_live_locals = always_storage_live_locals(body);
let liveness_info = locals_live_across_suspend_points(tcx, body, &always_live_locals, movable);
// Extract locals which are live across suspension point into `layout`
@@ -1464,17 +1576,18 @@ pub(crate) fn mir_coroutine_witnesses<'tcx>(
// `storage_liveness` tells us which locals have live storage at suspension points
let (_, coroutine_layout, _) = compute_layout(liveness_info, body);
- check_suspend_tys(tcx, &coroutine_layout, &body);
+ check_suspend_tys(tcx, &coroutine_layout, body);
Some(coroutine_layout)
}
impl<'tcx> MirPass<'tcx> for StateTransform {
fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
- let Some(yield_ty) = body.yield_ty() else {
+ let Some(old_yield_ty) = body.yield_ty() else {
// This only applies to coroutines
return;
};
+ let old_ret_ty = body.return_ty();
assert!(body.coroutine_drop().is_none());
@@ -1488,44 +1601,51 @@ impl<'tcx> MirPass<'tcx> for StateTransform {
(args.discr_ty(tcx), movability == hir::Movability::Movable)
}
_ => {
- tcx.sess
- .delay_span_bug(body.span, format!("unexpected coroutine type {coroutine_ty}"));
+ tcx.sess.span_delayed_bug(
+ body.span,
+ format!("unexpected coroutine type {coroutine_ty}"),
+ );
return;
}
};
let is_async_kind = matches!(body.coroutine_kind(), Some(CoroutineKind::Async(_)));
- let (state_adt_ref, state_args) = match body.coroutine_kind().unwrap() {
+ let is_async_gen_kind = matches!(body.coroutine_kind(), Some(CoroutineKind::AsyncGen(_)));
+ let is_gen_kind = matches!(body.coroutine_kind(), Some(CoroutineKind::Gen(_)));
+ let new_ret_ty = match body.coroutine_kind().unwrap() {
CoroutineKind::Async(_) => {
// Compute Poll<return_ty>
let poll_did = tcx.require_lang_item(LangItem::Poll, None);
let poll_adt_ref = tcx.adt_def(poll_did);
- let poll_args = tcx.mk_args(&[body.return_ty().into()]);
- (poll_adt_ref, poll_args)
+ let poll_args = tcx.mk_args(&[old_ret_ty.into()]);
+ Ty::new_adt(tcx, poll_adt_ref, poll_args)
}
CoroutineKind::Gen(_) => {
// Compute Option<yield_ty>
let option_did = tcx.require_lang_item(LangItem::Option, None);
let option_adt_ref = tcx.adt_def(option_did);
- let option_args = tcx.mk_args(&[body.yield_ty().unwrap().into()]);
- (option_adt_ref, option_args)
+ let option_args = tcx.mk_args(&[old_yield_ty.into()]);
+ Ty::new_adt(tcx, option_adt_ref, option_args)
+ }
+ CoroutineKind::AsyncGen(_) => {
+ // The yield ty is already `Poll<Option<yield_ty>>`
+ old_yield_ty
}
CoroutineKind::Coroutine => {
// Compute CoroutineState<yield_ty, return_ty>
let state_did = tcx.require_lang_item(LangItem::CoroutineState, None);
let state_adt_ref = tcx.adt_def(state_did);
- let state_args = tcx.mk_args(&[yield_ty.into(), body.return_ty().into()]);
- (state_adt_ref, state_args)
+ let state_args = tcx.mk_args(&[old_yield_ty.into(), old_ret_ty.into()]);
+ Ty::new_adt(tcx, state_adt_ref, state_args)
}
};
- let ret_ty = Ty::new_adt(tcx, state_adt_ref, state_args);
- // We rename RETURN_PLACE which has type mir.return_ty to new_ret_local
+ // We rename RETURN_PLACE which has type mir.return_ty to old_ret_local
// RETURN_PLACE then is a fresh unused local with type ret_ty.
- let new_ret_local = replace_local(RETURN_PLACE, ret_ty, body, tcx);
+ let old_ret_local = replace_local(RETURN_PLACE, new_ret_ty, body, tcx);
// Replace all occurrences of `ResumeTy` with `&mut Context<'_>` within async bodies.
- if is_async_kind {
+ if is_async_kind || is_async_gen_kind {
transform_async_context(tcx, body);
}
@@ -1539,9 +1659,10 @@ impl<'tcx> MirPass<'tcx> for StateTransform {
} else {
body.local_decls[resume_local].ty
};
- let new_resume_local = replace_local(resume_local, resume_ty, body, tcx);
+ let old_resume_local = replace_local(resume_local, resume_ty, body, tcx);
- // When first entering the coroutine, move the resume argument into its new local.
+ // When first entering the coroutine, move the resume argument into its old local
+ // (which is now a generator interior).
let source_info = SourceInfo::outermost(body.span);
let stmts = &mut body.basic_blocks_mut()[START_BLOCK].statements;
stmts.insert(
@@ -1549,13 +1670,13 @@ impl<'tcx> MirPass<'tcx> for StateTransform {
Statement {
source_info,
kind: StatementKind::Assign(Box::new((
- new_resume_local.into(),
+ old_resume_local.into(),
Rvalue::Use(Operand::Move(resume_local.into())),
))),
},
);
- let always_live_locals = always_storage_live_locals(&body);
+ let always_live_locals = always_storage_live_locals(body);
let liveness_info =
locals_live_across_suspend_points(tcx, body, &always_live_locals, movable);
@@ -1585,14 +1706,14 @@ impl<'tcx> MirPass<'tcx> for StateTransform {
let mut transform = TransformVisitor {
tcx,
coroutine_kind: body.coroutine_kind().unwrap(),
- state_adt_ref,
- state_args,
remap,
storage_liveness,
always_live_locals,
suspension_points: Vec::new(),
- new_ret_local,
+ old_ret_local,
discr_ty,
+ old_ret_ty,
+ old_yield_ty,
};
transform.visit_body(body);
@@ -1600,6 +1721,11 @@ impl<'tcx> MirPass<'tcx> for StateTransform {
body.arg_count = 2; // self, resume arg
body.spread_arg = None;
+ // Remove the context argument within generator bodies.
+ if is_gen_kind {
+ transform_gen_context(tcx, body);
+ }
+
// The original arguments to the function are no longer arguments, mark them as such.
// Otherwise they'll conflict with our new arguments, which although they don't have
// argument_index set, will get emitted as unnamed arguments.
diff --git a/compiler/rustc_mir_transform/src/cost_checker.rs b/compiler/rustc_mir_transform/src/cost_checker.rs
index 9bb26693c..79bed960b 100644
--- a/compiler/rustc_mir_transform/src/cost_checker.rs
+++ b/compiler/rustc_mir_transform/src/cost_checker.rs
@@ -69,7 +69,9 @@ impl<'tcx> Visitor<'tcx> for CostChecker<'_, 'tcx> {
}
TerminatorKind::Call { func: Operand::Constant(ref f), unwind, .. } => {
let fn_ty = self.instantiate_ty(f.const_.ty());
- self.cost += if let ty::FnDef(def_id, _) = *fn_ty.kind() && tcx.is_intrinsic(def_id) {
+ self.cost += if let ty::FnDef(def_id, _) = *fn_ty.kind()
+ && tcx.is_intrinsic(def_id)
+ {
// Don't give intrinsics the extra penalty for calls
INSTR_COST
} else {
diff --git a/compiler/rustc_mir_transform/src/coverage/counters.rs b/compiler/rustc_mir_transform/src/coverage/counters.rs
index b34ec95b4..604589e5b 100644
--- a/compiler/rustc_mir_transform/src/coverage/counters.rs
+++ b/compiler/rustc_mir_transform/src/coverage/counters.rs
@@ -1,18 +1,16 @@
-use super::graph;
-
-use graph::{BasicCoverageBlock, BcbBranch, CoverageGraph, TraverseCoverageGraphWithLoops};
-
use rustc_data_structures::fx::FxHashMap;
use rustc_data_structures::graph::WithNumNodes;
use rustc_index::bit_set::BitSet;
use rustc_index::IndexVec;
use rustc_middle::mir::coverage::*;
+use super::graph::{BasicCoverageBlock, CoverageGraph, TraverseCoverageGraphWithLoops};
+
use std::fmt::{self, Debug};
/// The coverage counter or counter expression associated with a particular
/// BCB node or BCB edge.
-#[derive(Clone)]
+#[derive(Clone, Copy)]
pub(super) enum BcbCounter {
Counter { id: CounterId },
Expression { id: ExpressionId },
@@ -88,11 +86,20 @@ impl CoverageCounters {
BcbCounter::Counter { id }
}
- fn make_expression(&mut self, lhs: CovTerm, op: Op, rhs: CovTerm) -> BcbCounter {
- let id = self.expressions.push(Expression { lhs, op, rhs });
+ fn make_expression(&mut self, lhs: BcbCounter, op: Op, rhs: BcbCounter) -> BcbCounter {
+ let expression = Expression { lhs: lhs.as_term(), op, rhs: rhs.as_term() };
+ let id = self.expressions.push(expression);
BcbCounter::Expression { id }
}
+ /// Variant of `make_expression` that makes `lhs` optional and assumes [`Op::Add`].
+ ///
+ /// This is useful when using [`Iterator::fold`] to build an arbitrary-length sum.
+ fn make_sum_expression(&mut self, lhs: Option<BcbCounter>, rhs: BcbCounter) -> BcbCounter {
+ let Some(lhs) = lhs else { return rhs };
+ self.make_expression(lhs, Op::Add, rhs)
+ }
+
/// Counter IDs start from one and go up.
fn next_counter(&mut self) -> CounterId {
let next = self.next_counter_id;
@@ -109,7 +116,7 @@ impl CoverageCounters {
self.expressions.len()
}
- fn set_bcb_counter(&mut self, bcb: BasicCoverageBlock, counter_kind: BcbCounter) -> CovTerm {
+ fn set_bcb_counter(&mut self, bcb: BasicCoverageBlock, counter_kind: BcbCounter) -> BcbCounter {
assert!(
// If the BCB has an edge counter (to be injected into a new `BasicBlock`), it can also
// have an expression (to be injected into an existing `BasicBlock` represented by this
@@ -118,14 +125,13 @@ impl CoverageCounters {
"attempt to add a `Counter` to a BCB target with existing incoming edge counters"
);
- let term = counter_kind.as_term();
if let Some(replaced) = self.bcb_counters[bcb].replace(counter_kind) {
bug!(
"attempt to set a BasicCoverageBlock coverage counter more than once; \
{bcb:?} already had counter {replaced:?}",
);
} else {
- term
+ counter_kind
}
}
@@ -134,11 +140,13 @@ impl CoverageCounters {
from_bcb: BasicCoverageBlock,
to_bcb: BasicCoverageBlock,
counter_kind: BcbCounter,
- ) -> CovTerm {
+ ) -> BcbCounter {
// If the BCB has an edge counter (to be injected into a new `BasicBlock`), it can also
// have an expression (to be injected into an existing `BasicBlock` represented by this
// `BasicCoverageBlock`).
- if let Some(node_counter) = self.bcb_counter(to_bcb) && !node_counter.is_expression() {
+ if let Some(node_counter) = self.bcb_counter(to_bcb)
+ && !node_counter.is_expression()
+ {
bug!(
"attempt to add an incoming edge counter from {from_bcb:?} \
when the target BCB already has {node_counter:?}"
@@ -146,19 +154,18 @@ impl CoverageCounters {
}
self.bcb_has_incoming_edge_counters.insert(to_bcb);
- let term = counter_kind.as_term();
if let Some(replaced) = self.bcb_edge_counters.insert((from_bcb, to_bcb), counter_kind) {
bug!(
"attempt to set an edge counter more than once; from_bcb: \
{from_bcb:?} already had counter {replaced:?}",
);
} else {
- term
+ counter_kind
}
}
- pub(super) fn bcb_counter(&self, bcb: BasicCoverageBlock) -> Option<&BcbCounter> {
- self.bcb_counters[bcb].as_ref()
+ pub(super) fn bcb_counter(&self, bcb: BasicCoverageBlock) -> Option<BcbCounter> {
+ self.bcb_counters[bcb]
}
pub(super) fn bcb_node_counters(
@@ -220,15 +227,11 @@ impl<'a> MakeBcbCounters<'a> {
// all `BasicCoverageBlock` nodes in the loop are visited before visiting any node outside
// the loop. The `traversal` state includes a `context_stack`, providing a way to know if
// the current BCB is in one or more nested loops or not.
- let mut traversal = TraverseCoverageGraphWithLoops::new(&self.basic_coverage_blocks);
+ let mut traversal = TraverseCoverageGraphWithLoops::new(self.basic_coverage_blocks);
while let Some(bcb) = traversal.next() {
if bcb_has_coverage_spans(bcb) {
debug!("{:?} has at least one coverage span. Get or make its counter", bcb);
- let branching_counter_operand = self.get_or_make_counter_operand(bcb);
-
- if self.bcb_needs_branch_counters(bcb) {
- self.make_branch_counters(&traversal, bcb, branching_counter_operand);
- }
+ self.make_node_and_branch_counters(&traversal, bcb);
} else {
debug!(
"{:?} does not have any coverage spans. A counter will only be added if \
@@ -245,100 +248,93 @@ impl<'a> MakeBcbCounters<'a> {
);
}
- fn make_branch_counters(
+ fn make_node_and_branch_counters(
&mut self,
traversal: &TraverseCoverageGraphWithLoops<'_>,
- branching_bcb: BasicCoverageBlock,
- branching_counter_operand: CovTerm,
+ from_bcb: BasicCoverageBlock,
) {
- let branches = self.bcb_branches(branching_bcb);
+ // First, ensure that this node has a counter of some kind.
+ // We might also use its term later to compute one of the branch counters.
+ let from_bcb_operand = self.get_or_make_counter_operand(from_bcb);
+
+ let branch_target_bcbs = self.basic_coverage_blocks.successors[from_bcb].as_slice();
+
+ // If this node doesn't have multiple out-edges, or all of its out-edges
+ // already have counters, then we don't need to create edge counters.
+ let needs_branch_counters = branch_target_bcbs.len() > 1
+ && branch_target_bcbs
+ .iter()
+ .any(|&to_bcb| self.branch_has_no_counter(from_bcb, to_bcb));
+ if !needs_branch_counters {
+ return;
+ }
+
debug!(
- "{:?} has some branch(es) without counters:\n {}",
- branching_bcb,
- branches
+ "{from_bcb:?} has some branch(es) without counters:\n {}",
+ branch_target_bcbs
.iter()
- .map(|branch| { format!("{:?}: {:?}", branch, self.branch_counter(branch)) })
+ .map(|&to_bcb| {
+ format!("{from_bcb:?}->{to_bcb:?}: {:?}", self.branch_counter(from_bcb, to_bcb))
+ })
.collect::<Vec<_>>()
.join("\n "),
);
- // Use the `traversal` state to decide if a subset of the branches exit a loop, making it
- // likely that branch is executed less than branches that do not exit the same loop. In this
- // case, any branch that does not exit the loop (and has not already been assigned a
- // counter) should be counted by expression, if possible. (If a preferred expression branch
- // is not selected based on the loop context, select any branch without an existing
- // counter.)
- let expression_branch = self.choose_preferred_expression_branch(traversal, &branches);
-
- // Assign a Counter or Expression to each branch, plus additional `Expression`s, as needed,
- // to sum up intermediate results.
- let mut some_sumup_counter_operand = None;
- for branch in branches {
- // Skip the selected `expression_branch`, if any. It's expression will be assigned after
- // all others.
- if branch != expression_branch {
- let branch_counter_operand = if branch.is_only_path_to_target() {
- debug!(
- " {:?} has only one incoming edge (from {:?}), so adding a \
- counter",
- branch, branching_bcb
- );
- self.get_or_make_counter_operand(branch.target_bcb)
- } else {
- debug!(" {:?} has multiple incoming edges, so adding an edge counter", branch);
- self.get_or_make_edge_counter_operand(branching_bcb, branch.target_bcb)
- };
- if let Some(sumup_counter_operand) =
- some_sumup_counter_operand.replace(branch_counter_operand)
- {
- let intermediate_expression = self.coverage_counters.make_expression(
- branch_counter_operand,
- Op::Add,
- sumup_counter_operand,
- );
- debug!(" [new intermediate expression: {:?}]", intermediate_expression);
- let intermediate_expression_operand = intermediate_expression.as_term();
- some_sumup_counter_operand.replace(intermediate_expression_operand);
- }
- }
- }
+ // Of the branch edges that don't have counters yet, one can be given an expression
+ // (computed from the other edges) instead of a dedicated counter.
+ let expression_to_bcb = self.choose_preferred_expression_branch(traversal, from_bcb);
- // Assign the final expression to the `expression_branch` by subtracting the total of all
- // other branches from the counter of the branching BCB.
- let sumup_counter_operand =
- some_sumup_counter_operand.expect("sumup_counter_operand should have a value");
+ // For each branch arm other than the one that was chosen to get an expression,
+ // ensure that it has a counter (existing counter/expression or a new counter),
+ // and accumulate the corresponding terms into a single sum term.
+ let sum_of_all_other_branches: BcbCounter = {
+ let _span = debug_span!("sum_of_all_other_branches", ?expression_to_bcb).entered();
+ branch_target_bcbs
+ .iter()
+ .copied()
+ // Skip the chosen branch, since we'll calculate it from the other branches.
+ .filter(|&to_bcb| to_bcb != expression_to_bcb)
+ .fold(None, |accum, to_bcb| {
+ let _span = debug_span!("to_bcb", ?accum, ?to_bcb).entered();
+ let branch_counter = self.get_or_make_edge_counter_operand(from_bcb, to_bcb);
+ Some(self.coverage_counters.make_sum_expression(accum, branch_counter))
+ })
+ .expect("there must be at least one other branch")
+ };
+
+ // For the branch that was chosen to get an expression, create that expression
+ // by taking the count of the node we're branching from, and subtracting the
+ // sum of all the other branches.
debug!(
- "Making an expression for the selected expression_branch: {:?} \
- (expression_branch predecessors: {:?})",
- expression_branch,
- self.bcb_predecessors(expression_branch.target_bcb),
+ "Making an expression for the selected expression_branch: \
+ {expression_to_bcb:?} (expression_branch predecessors: {:?})",
+ self.bcb_predecessors(expression_to_bcb),
);
let expression = self.coverage_counters.make_expression(
- branching_counter_operand,
+ from_bcb_operand,
Op::Subtract,
- sumup_counter_operand,
+ sum_of_all_other_branches,
);
- debug!("{:?} gets an expression: {:?}", expression_branch, expression);
- let bcb = expression_branch.target_bcb;
- if expression_branch.is_only_path_to_target() {
- self.coverage_counters.set_bcb_counter(bcb, expression);
+ debug!("{expression_to_bcb:?} gets an expression: {expression:?}");
+ if self.basic_coverage_blocks.bcb_has_multiple_in_edges(expression_to_bcb) {
+ self.coverage_counters.set_bcb_edge_counter(from_bcb, expression_to_bcb, expression);
} else {
- self.coverage_counters.set_bcb_edge_counter(branching_bcb, bcb, expression);
+ self.coverage_counters.set_bcb_counter(expression_to_bcb, expression);
}
}
#[instrument(level = "debug", skip(self))]
- fn get_or_make_counter_operand(&mut self, bcb: BasicCoverageBlock) -> CovTerm {
+ fn get_or_make_counter_operand(&mut self, bcb: BasicCoverageBlock) -> BcbCounter {
// If the BCB already has a counter, return it.
- if let Some(counter_kind) = &self.coverage_counters.bcb_counters[bcb] {
+ if let Some(counter_kind) = self.coverage_counters.bcb_counters[bcb] {
debug!("{bcb:?} already has a counter: {counter_kind:?}");
- return counter_kind.as_term();
+ return counter_kind;
}
// A BCB with only one incoming edge gets a simple `Counter` (via `make_counter()`).
// Also, a BCB that loops back to itself gets a simple `Counter`. This may indicate the
// program results in a tight infinite loop, but it should still compile.
- let one_path_to_target = self.bcb_has_one_path_to_target(bcb);
+ let one_path_to_target = !self.basic_coverage_blocks.bcb_has_multiple_in_edges(bcb);
if one_path_to_target || self.bcb_predecessors(bcb).contains(&bcb) {
let counter_kind = self.coverage_counters.make_counter();
if one_path_to_target {
@@ -353,40 +349,25 @@ impl<'a> MakeBcbCounters<'a> {
return self.coverage_counters.set_bcb_counter(bcb, counter_kind);
}
- // A BCB with multiple incoming edges can compute its count by `Expression`, summing up the
- // counters and/or expressions of its incoming edges. This will recursively get or create
- // counters for those incoming edges first, then call `make_expression()` to sum them up,
- // with additional intermediate expressions as needed.
- let _sumup_debug_span = debug_span!("(preparing sum-up expression)").entered();
-
- let mut predecessors = self.bcb_predecessors(bcb).to_owned().into_iter();
- let first_edge_counter_operand =
- self.get_or_make_edge_counter_operand(predecessors.next().unwrap(), bcb);
- let mut some_sumup_edge_counter_operand = None;
- for predecessor in predecessors {
- let edge_counter_operand = self.get_or_make_edge_counter_operand(predecessor, bcb);
- if let Some(sumup_edge_counter_operand) =
- some_sumup_edge_counter_operand.replace(edge_counter_operand)
- {
- let intermediate_expression = self.coverage_counters.make_expression(
- sumup_edge_counter_operand,
- Op::Add,
- edge_counter_operand,
- );
- debug!("new intermediate expression: {intermediate_expression:?}");
- let intermediate_expression_operand = intermediate_expression.as_term();
- some_sumup_edge_counter_operand.replace(intermediate_expression_operand);
- }
- }
- let counter_kind = self.coverage_counters.make_expression(
- first_edge_counter_operand,
- Op::Add,
- some_sumup_edge_counter_operand.unwrap(),
- );
- drop(_sumup_debug_span);
-
- debug!("{bcb:?} gets a new counter (sum of predecessor counters): {counter_kind:?}");
- self.coverage_counters.set_bcb_counter(bcb, counter_kind)
+ // A BCB with multiple incoming edges can compute its count by ensuring that counters
+ // exist for each of those edges, and then adding them up to get a total count.
+ let sum_of_in_edges: BcbCounter = {
+ let _span = debug_span!("sum_of_in_edges", ?bcb).entered();
+ // We avoid calling `self.bcb_predecessors` here so that we can
+ // call methods on `&mut self` inside the fold.
+ self.basic_coverage_blocks.predecessors[bcb]
+ .iter()
+ .copied()
+ .fold(None, |accum, from_bcb| {
+ let _span = debug_span!("from_bcb", ?accum, ?from_bcb).entered();
+ let edge_counter = self.get_or_make_edge_counter_operand(from_bcb, bcb);
+ Some(self.coverage_counters.make_sum_expression(accum, edge_counter))
+ })
+ .expect("there must be at least one in-edge")
+ };
+
+ debug!("{bcb:?} gets a new counter (sum of predecessor counters): {sum_of_in_edges:?}");
+ self.coverage_counters.set_bcb_counter(bcb, sum_of_in_edges)
}
#[instrument(level = "debug", skip(self))]
@@ -394,20 +375,26 @@ impl<'a> MakeBcbCounters<'a> {
&mut self,
from_bcb: BasicCoverageBlock,
to_bcb: BasicCoverageBlock,
- ) -> CovTerm {
+ ) -> BcbCounter {
+ // If the target BCB has only one in-edge (i.e. this one), then create
+ // a node counter instead, since it will have the same value.
+ if !self.basic_coverage_blocks.bcb_has_multiple_in_edges(to_bcb) {
+ assert_eq!([from_bcb].as_slice(), self.basic_coverage_blocks.predecessors[to_bcb]);
+ return self.get_or_make_counter_operand(to_bcb);
+ }
+
// If the source BCB has only one successor (assumed to be the given target), an edge
// counter is unnecessary. Just get or make a counter for the source BCB.
- let successors = self.bcb_successors(from_bcb).iter();
- if successors.len() == 1 {
+ if self.bcb_successors(from_bcb).len() == 1 {
return self.get_or_make_counter_operand(from_bcb);
}
// If the edge already has a counter, return it.
- if let Some(counter_kind) =
+ if let Some(&counter_kind) =
self.coverage_counters.bcb_edge_counters.get(&(from_bcb, to_bcb))
{
debug!("Edge {from_bcb:?}->{to_bcb:?} already has a counter: {counter_kind:?}");
- return counter_kind.as_term();
+ return counter_kind;
}
// Make a new counter to count this edge.
@@ -421,16 +408,19 @@ impl<'a> MakeBcbCounters<'a> {
fn choose_preferred_expression_branch(
&self,
traversal: &TraverseCoverageGraphWithLoops<'_>,
- branches: &[BcbBranch],
- ) -> BcbBranch {
- let good_reloop_branch = self.find_good_reloop_branch(traversal, &branches);
- if let Some(reloop_branch) = good_reloop_branch {
- assert!(self.branch_has_no_counter(&reloop_branch));
- debug!("Selecting reloop branch {reloop_branch:?} to get an expression");
- reloop_branch
+ from_bcb: BasicCoverageBlock,
+ ) -> BasicCoverageBlock {
+ let good_reloop_branch = self.find_good_reloop_branch(traversal, from_bcb);
+ if let Some(reloop_target) = good_reloop_branch {
+ assert!(self.branch_has_no_counter(from_bcb, reloop_target));
+ debug!("Selecting reloop target {reloop_target:?} to get an expression");
+ reloop_target
} else {
- let &branch_without_counter =
- branches.iter().find(|&branch| self.branch_has_no_counter(branch)).expect(
+ let &branch_without_counter = self
+ .bcb_successors(from_bcb)
+ .iter()
+ .find(|&&to_bcb| self.branch_has_no_counter(from_bcb, to_bcb))
+ .expect(
"needs_branch_counters was `true` so there should be at least one \
branch",
);
@@ -451,26 +441,28 @@ impl<'a> MakeBcbCounters<'a> {
fn find_good_reloop_branch(
&self,
traversal: &TraverseCoverageGraphWithLoops<'_>,
- branches: &[BcbBranch],
- ) -> Option<BcbBranch> {
+ from_bcb: BasicCoverageBlock,
+ ) -> Option<BasicCoverageBlock> {
+ let branch_target_bcbs = self.bcb_successors(from_bcb);
+
// Consider each loop on the current traversal context stack, top-down.
for reloop_bcbs in traversal.reloop_bcbs_per_loop() {
let mut all_branches_exit_this_loop = true;
// Try to find a branch that doesn't exit this loop and doesn't
// already have a counter.
- for &branch in branches {
+ for &branch_target_bcb in branch_target_bcbs {
// A branch is a reloop branch if it dominates any BCB that has
// an edge back to the loop header. (Other branches are exits.)
let is_reloop_branch = reloop_bcbs.iter().any(|&reloop_bcb| {
- self.basic_coverage_blocks.dominates(branch.target_bcb, reloop_bcb)
+ self.basic_coverage_blocks.dominates(branch_target_bcb, reloop_bcb)
});
if is_reloop_branch {
all_branches_exit_this_loop = false;
- if self.branch_has_no_counter(&branch) {
+ if self.branch_has_no_counter(from_bcb, branch_target_bcb) {
// We found a good branch to be given an expression.
- return Some(branch);
+ return Some(branch_target_bcb);
}
// Keep looking for another reloop branch without a counter.
} else {
@@ -503,36 +495,23 @@ impl<'a> MakeBcbCounters<'a> {
}
#[inline]
- fn bcb_branches(&self, from_bcb: BasicCoverageBlock) -> Vec<BcbBranch> {
- self.bcb_successors(from_bcb)
- .iter()
- .map(|&to_bcb| BcbBranch::from_to(from_bcb, to_bcb, &self.basic_coverage_blocks))
- .collect::<Vec<_>>()
- }
-
- fn bcb_needs_branch_counters(&self, bcb: BasicCoverageBlock) -> bool {
- let branch_needs_a_counter = |branch: &BcbBranch| self.branch_has_no_counter(branch);
- let branches = self.bcb_branches(bcb);
- branches.len() > 1 && branches.iter().any(branch_needs_a_counter)
- }
-
- fn branch_has_no_counter(&self, branch: &BcbBranch) -> bool {
- self.branch_counter(branch).is_none()
+ fn branch_has_no_counter(
+ &self,
+ from_bcb: BasicCoverageBlock,
+ to_bcb: BasicCoverageBlock,
+ ) -> bool {
+ self.branch_counter(from_bcb, to_bcb).is_none()
}
- fn branch_counter(&self, branch: &BcbBranch) -> Option<&BcbCounter> {
- let to_bcb = branch.target_bcb;
- if let Some(from_bcb) = branch.edge_from_bcb {
+ fn branch_counter(
+ &self,
+ from_bcb: BasicCoverageBlock,
+ to_bcb: BasicCoverageBlock,
+ ) -> Option<&BcbCounter> {
+ if self.basic_coverage_blocks.bcb_has_multiple_in_edges(to_bcb) {
self.coverage_counters.bcb_edge_counters.get(&(from_bcb, to_bcb))
} else {
self.coverage_counters.bcb_counters[to_bcb].as_ref()
}
}
-
- /// Returns true if the BasicCoverageBlock has zero or one incoming edge. (If zero, it should be
- /// the entry point for the function.)
- #[inline]
- fn bcb_has_one_path_to_target(&self, bcb: BasicCoverageBlock) -> bool {
- self.bcb_predecessors(bcb).len() <= 1
- }
}
diff --git a/compiler/rustc_mir_transform/src/coverage/graph.rs b/compiler/rustc_mir_transform/src/coverage/graph.rs
index 6bab62aa8..263bfdaaa 100644
--- a/compiler/rustc_mir_transform/src/coverage/graph.rs
+++ b/compiler/rustc_mir_transform/src/coverage/graph.rs
@@ -38,7 +38,7 @@ impl CoverageGraph {
}
let bcb_data = &bcbs[bcb];
let mut bcb_successors = Vec::new();
- for successor in bcb_filtered_successors(&mir_body, bcb_data.last_bb())
+ for successor in bcb_filtered_successors(mir_body, bcb_data.last_bb())
.filter_map(|successor_bb| bb_to_bcb[successor_bb])
{
if !seen[successor] {
@@ -62,6 +62,14 @@ impl CoverageGraph {
Self { bcbs, bb_to_bcb, successors, predecessors, dominators: None };
let dominators = dominators::dominators(&basic_coverage_blocks);
basic_coverage_blocks.dominators = Some(dominators);
+
+ // The coverage graph's entry-point node (bcb0) always starts with bb0,
+ // which never has predecessors. Any other blocks merged into bcb0 can't
+ // have multiple (coverage-relevant) predecessors, so bcb0 always has
+ // zero in-edges.
+ assert!(basic_coverage_blocks[START_BCB].leader_bb() == mir::START_BLOCK);
+ assert!(basic_coverage_blocks.predecessors[START_BCB].is_empty());
+
basic_coverage_blocks
}
@@ -199,6 +207,25 @@ impl CoverageGraph {
pub fn cmp_in_dominator_order(&self, a: BasicCoverageBlock, b: BasicCoverageBlock) -> Ordering {
self.dominators.as_ref().unwrap().cmp_in_dominator_order(a, b)
}
+
+ /// Returns true if the given node has 2 or more in-edges, i.e. 2 or more
+ /// predecessors.
+ ///
+ /// This property is interesting to code that assigns counters to nodes and
+ /// edges, because if a node _doesn't_ have multiple in-edges, then there's
+ /// no benefit in having a separate counter for its in-edge, because it
+ /// would have the same value as the node's own counter.
+ ///
+ /// FIXME: That assumption might not be true for [`TerminatorKind::Yield`]?
+ #[inline(always)]
+ pub(super) fn bcb_has_multiple_in_edges(&self, bcb: BasicCoverageBlock) -> bool {
+ // Even though bcb0 conceptually has an extra virtual in-edge due to
+ // being the entry point, we've already asserted that it has no _other_
+ // in-edges, so there's no possibility of it having _multiple_ in-edges.
+ // (And since its virtual in-edge doesn't exist in the graph, that edge
+ // can't have a separate counter anyway.)
+ self.predecessors[bcb].len() > 1
+ }
}
impl Index<BasicCoverageBlock> for CoverageGraph {
@@ -264,6 +291,7 @@ impl graph::WithPredecessors for CoverageGraph {
rustc_index::newtype_index! {
/// A node in the control-flow graph of CoverageGraph.
+ #[orderable]
#[debug_format = "bcb{}"]
pub(super) struct BasicCoverageBlock {
const START_BCB = 0;
@@ -318,45 +346,6 @@ impl BasicCoverageBlockData {
}
}
-/// Represents a successor from a branching BasicCoverageBlock (such as the arms of a `SwitchInt`)
-/// as either the successor BCB itself, if it has only one incoming edge, or the successor _plus_
-/// the specific branching BCB, representing the edge between the two. The latter case
-/// distinguishes this incoming edge from other incoming edges to the same `target_bcb`.
-#[derive(Clone, Copy, PartialEq, Eq)]
-pub(super) struct BcbBranch {
- pub edge_from_bcb: Option<BasicCoverageBlock>,
- pub target_bcb: BasicCoverageBlock,
-}
-
-impl BcbBranch {
- pub fn from_to(
- from_bcb: BasicCoverageBlock,
- to_bcb: BasicCoverageBlock,
- basic_coverage_blocks: &CoverageGraph,
- ) -> Self {
- let edge_from_bcb = if basic_coverage_blocks.predecessors[to_bcb].len() > 1 {
- Some(from_bcb)
- } else {
- None
- };
- Self { edge_from_bcb, target_bcb: to_bcb }
- }
-
- pub fn is_only_path_to_target(&self) -> bool {
- self.edge_from_bcb.is_none()
- }
-}
-
-impl std::fmt::Debug for BcbBranch {
- fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
- if let Some(from_bcb) = self.edge_from_bcb {
- write!(fmt, "{:?}->{:?}", from_bcb, self.target_bcb)
- } else {
- write!(fmt, "{:?}", self.target_bcb)
- }
- }
-}
-
// Returns the subset of a block's successors that are relevant to the coverage
// graph, i.e. those that do not represent unwinds or unreachable branches.
// FIXME(#78544): MIR InstrumentCoverage: Improve coverage of `#[should_panic]` tests and
diff --git a/compiler/rustc_mir_transform/src/coverage/mod.rs b/compiler/rustc_mir_transform/src/coverage/mod.rs
index 97e4468a0..c5a339128 100644
--- a/compiler/rustc_mir_transform/src/coverage/mod.rs
+++ b/compiler/rustc_mir_transform/src/coverage/mod.rs
@@ -13,7 +13,6 @@ use self::spans::CoverageSpans;
use crate::MirPass;
-use rustc_data_structures::sync::Lrc;
use rustc_middle::hir;
use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags;
use rustc_middle::mir::coverage::*;
@@ -22,9 +21,9 @@ use rustc_middle::mir::{
TerminatorKind,
};
use rustc_middle::ty::TyCtxt;
-use rustc_span::def_id::DefId;
+use rustc_span::def_id::LocalDefId;
use rustc_span::source_map::SourceMap;
-use rustc_span::{ExpnKind, SourceFile, Span, Symbol};
+use rustc_span::{ExpnKind, Span, Symbol};
/// Inserts `StatementKind::Coverage` statements that either instrument the binary with injected
/// counters, via intrinsic `llvm.instrprof.increment`, and/or inject metadata used during codegen
@@ -39,31 +38,19 @@ impl<'tcx> MirPass<'tcx> for InstrumentCoverage {
fn run_pass(&self, tcx: TyCtxt<'tcx>, mir_body: &mut mir::Body<'tcx>) {
let mir_source = mir_body.source;
- // If the InstrumentCoverage pass is called on promoted MIRs, skip them.
- // See: https://github.com/rust-lang/rust/pull/73011#discussion_r438317601
- if mir_source.promoted.is_some() {
- trace!(
- "InstrumentCoverage skipped for {:?} (already promoted for Miri evaluation)",
- mir_source.def_id()
- );
- return;
- }
+ // This pass runs after MIR promotion, but before promoted MIR starts to
+ // be transformed, so it should never see promoted MIR.
+ assert!(mir_source.promoted.is_none());
+
+ let def_id = mir_source.def_id().expect_local();
- let is_fn_like =
- tcx.hir().get_by_def_id(mir_source.def_id().expect_local()).fn_kind().is_some();
-
- // Only instrument functions, methods, and closures (not constants since they are evaluated
- // at compile time by Miri).
- // FIXME(#73156): Handle source code coverage in const eval, but note, if and when const
- // expressions get coverage spans, we will probably have to "carve out" space for const
- // expressions from coverage spans in enclosing MIR's, like we do for closures. (That might
- // be tricky if const expressions have no corresponding statements in the enclosing MIR.
- // Closures are carved out by their initial `Assign` statement.)
- if !is_fn_like {
- trace!("InstrumentCoverage skipped for {:?} (not an fn-like)", mir_source.def_id());
+ if !is_eligible_for_coverage(tcx, def_id) {
+ trace!("InstrumentCoverage skipped for {def_id:?} (not eligible)");
return;
}
+ // An otherwise-eligible function is still skipped if its start block
+ // is known to be unreachable.
match mir_body.basic_blocks[mir::START_BLOCK].terminator().kind {
TerminatorKind::Unreachable => {
trace!("InstrumentCoverage skipped for unreachable `START_BLOCK`");
@@ -72,81 +59,43 @@ impl<'tcx> MirPass<'tcx> for InstrumentCoverage {
_ => {}
}
- let codegen_fn_attrs = tcx.codegen_fn_attrs(mir_source.def_id());
- if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::NO_COVERAGE) {
- return;
- }
-
- trace!("InstrumentCoverage starting for {:?}", mir_source.def_id());
+ trace!("InstrumentCoverage starting for {def_id:?}");
Instrumentor::new(tcx, mir_body).inject_counters();
- trace!("InstrumentCoverage done for {:?}", mir_source.def_id());
+ trace!("InstrumentCoverage done for {def_id:?}");
}
}
struct Instrumentor<'a, 'tcx> {
tcx: TyCtxt<'tcx>,
mir_body: &'a mut mir::Body<'tcx>,
- source_file: Lrc<SourceFile>,
- fn_sig_span: Span,
- body_span: Span,
- function_source_hash: u64,
+ hir_info: ExtractedHirInfo,
basic_coverage_blocks: CoverageGraph,
coverage_counters: CoverageCounters,
}
impl<'a, 'tcx> Instrumentor<'a, 'tcx> {
fn new(tcx: TyCtxt<'tcx>, mir_body: &'a mut mir::Body<'tcx>) -> Self {
- let source_map = tcx.sess.source_map();
- let def_id = mir_body.source.def_id();
- let (some_fn_sig, hir_body) = fn_sig_and_body(tcx, def_id);
-
- let body_span = get_body_span(tcx, hir_body, mir_body);
-
- let source_file = source_map.lookup_source_file(body_span.lo());
- let fn_sig_span = match some_fn_sig.filter(|fn_sig| {
- fn_sig.span.eq_ctxt(body_span)
- && Lrc::ptr_eq(&source_file, &source_map.lookup_source_file(fn_sig.span.lo()))
- }) {
- Some(fn_sig) => fn_sig.span.with_hi(body_span.lo()),
- None => body_span.shrink_to_lo(),
- };
+ let hir_info = extract_hir_info(tcx, mir_body.source.def_id().expect_local());
- debug!(
- "instrumenting {}: {:?}, fn sig span: {:?}, body span: {:?}",
- if tcx.is_closure(def_id) { "closure" } else { "function" },
- def_id,
- fn_sig_span,
- body_span
- );
+ debug!(?hir_info, "instrumenting {:?}", mir_body.source.def_id());
- let function_source_hash = hash_mir_source(tcx, hir_body);
let basic_coverage_blocks = CoverageGraph::from_mir(mir_body);
let coverage_counters = CoverageCounters::new(&basic_coverage_blocks);
- Self {
- tcx,
- mir_body,
- source_file,
- fn_sig_span,
- body_span,
- function_source_hash,
- basic_coverage_blocks,
- coverage_counters,
- }
+ Self { tcx, mir_body, hir_info, basic_coverage_blocks, coverage_counters }
}
fn inject_counters(&'a mut self) {
- let fn_sig_span = self.fn_sig_span;
- let body_span = self.body_span;
-
////////////////////////////////////////////////////
// Compute coverage spans from the `CoverageGraph`.
- let coverage_spans = CoverageSpans::generate_coverage_spans(
- &self.mir_body,
- fn_sig_span,
- body_span,
+ let Some(coverage_spans) = CoverageSpans::generate_coverage_spans(
+ self.mir_body,
+ &self.hir_info,
&self.basic_coverage_blocks,
- );
+ ) else {
+ // No relevant spans were found in MIR, so skip instrumenting this function.
+ return;
+ };
////////////////////////////////////////////////////
// Create an optimized mix of `Counter`s and `Expression`s for the `CoverageGraph`. Ensure
@@ -160,7 +109,7 @@ impl<'a, 'tcx> Instrumentor<'a, 'tcx> {
let mappings = self.create_mappings_and_inject_coverage_statements(&coverage_spans);
self.mir_body.function_coverage_info = Some(Box::new(FunctionCoverageInfo {
- function_source_hash: self.function_source_hash,
+ function_source_hash: self.hir_info.function_source_hash,
num_counters: self.coverage_counters.num_counters(),
expressions: self.coverage_counters.take_expressions(),
mappings,
@@ -175,11 +124,12 @@ impl<'a, 'tcx> Instrumentor<'a, 'tcx> {
coverage_spans: &CoverageSpans,
) -> Vec<Mapping> {
let source_map = self.tcx.sess.source_map();
- let body_span = self.body_span;
+ let body_span = self.hir_info.body_span;
+ let source_file = source_map.lookup_source_file(body_span.lo());
use rustc_session::RemapFileNameExt;
let file_name =
- Symbol::intern(&self.source_file.name.for_codegen(self.tcx.sess).to_string_lossy());
+ Symbol::intern(&source_file.name.for_codegen(self.tcx.sess).to_string_lossy());
let mut mappings = Vec::new();
@@ -240,7 +190,7 @@ impl<'a, 'tcx> Instrumentor<'a, 'tcx> {
);
// Inject a counter into the newly-created BB.
- inject_statement(self.mir_body, self.make_mir_coverage_kind(&counter_kind), new_bb);
+ inject_statement(self.mir_body, self.make_mir_coverage_kind(counter_kind), new_bb);
}
mappings
@@ -325,27 +275,77 @@ fn make_code_region(
}
}
-fn fn_sig_and_body(
- tcx: TyCtxt<'_>,
- def_id: DefId,
-) -> (Option<&rustc_hir::FnSig<'_>>, &rustc_hir::Body<'_>) {
+fn is_eligible_for_coverage(tcx: TyCtxt<'_>, def_id: LocalDefId) -> bool {
+ // Only instrument functions, methods, and closures (not constants since they are evaluated
+ // at compile time by Miri).
+ // FIXME(#73156): Handle source code coverage in const eval, but note, if and when const
+ // expressions get coverage spans, we will probably have to "carve out" space for const
+ // expressions from coverage spans in enclosing MIR's, like we do for closures. (That might
+ // be tricky if const expressions have no corresponding statements in the enclosing MIR.
+ // Closures are carved out by their initial `Assign` statement.)
+ if !tcx.def_kind(def_id).is_fn_like() {
+ trace!("InstrumentCoverage skipped for {def_id:?} (not an fn-like)");
+ return false;
+ }
+
+ if tcx.codegen_fn_attrs(def_id).flags.contains(CodegenFnAttrFlags::NO_COVERAGE) {
+ return false;
+ }
+
+ true
+}
+
+/// Function information extracted from HIR by the coverage instrumentor.
+#[derive(Debug)]
+struct ExtractedHirInfo {
+ function_source_hash: u64,
+ is_async_fn: bool,
+ fn_sig_span: Span,
+ body_span: Span,
+}
+
+fn extract_hir_info<'tcx>(tcx: TyCtxt<'tcx>, def_id: LocalDefId) -> ExtractedHirInfo {
// FIXME(#79625): Consider improving MIR to provide the information needed, to avoid going back
// to HIR for it.
- let hir_node = tcx.hir().get_if_local(def_id).expect("expected DefId is local");
+
+ let hir_node = tcx.hir_node_by_def_id(def_id);
let (_, fn_body_id) =
hir::map::associated_body(hir_node).expect("HIR node is a function with body");
- (hir_node.fn_sig(), tcx.hir().body(fn_body_id))
+ let hir_body = tcx.hir().body(fn_body_id);
+
+ let is_async_fn = hir_node.fn_sig().is_some_and(|fn_sig| fn_sig.header.is_async());
+ let body_span = get_body_span(tcx, hir_body, def_id);
+
+ // The actual signature span is only used if it has the same context and
+ // filename as the body, and precedes the body.
+ let maybe_fn_sig_span = hir_node.fn_sig().map(|fn_sig| fn_sig.span);
+ let fn_sig_span = maybe_fn_sig_span
+ .filter(|&fn_sig_span| {
+ let source_map = tcx.sess.source_map();
+ let file_idx = |span: Span| source_map.lookup_source_file_idx(span.lo());
+
+ fn_sig_span.eq_ctxt(body_span)
+ && fn_sig_span.hi() <= body_span.lo()
+ && file_idx(fn_sig_span) == file_idx(body_span)
+ })
+ // If so, extend it to the start of the body span.
+ .map(|fn_sig_span| fn_sig_span.with_hi(body_span.lo()))
+ // Otherwise, create a dummy signature span at the start of the body.
+ .unwrap_or_else(|| body_span.shrink_to_lo());
+
+ let function_source_hash = hash_mir_source(tcx, hir_body);
+
+ ExtractedHirInfo { function_source_hash, is_async_fn, fn_sig_span, body_span }
}
fn get_body_span<'tcx>(
tcx: TyCtxt<'tcx>,
hir_body: &rustc_hir::Body<'tcx>,
- mir_body: &mut mir::Body<'tcx>,
+ def_id: LocalDefId,
) -> Span {
let mut body_span = hir_body.value.span;
- let def_id = mir_body.source.def_id();
- if tcx.is_closure(def_id) {
+ if tcx.is_closure(def_id.to_def_id()) {
// If the MIR function is a closure, and if the closure body span
// starts from a macro, but it's content is not in that macro, try
// to find a non-macro callsite, and instrument the spans there
diff --git a/compiler/rustc_mir_transform/src/coverage/query.rs b/compiler/rustc_mir_transform/src/coverage/query.rs
index 809407f89..dfc7c3a71 100644
--- a/compiler/rustc_mir_transform/src/coverage/query.rs
+++ b/compiler/rustc_mir_transform/src/coverage/query.rs
@@ -2,9 +2,9 @@ use super::*;
use rustc_data_structures::captures::Captures;
use rustc_middle::mir::coverage::*;
-use rustc_middle::mir::{Body, Coverage, CoverageIdsInfo};
+use rustc_middle::mir::{Body, CoverageIdsInfo};
use rustc_middle::query::Providers;
-use rustc_middle::ty::{self, TyCtxt};
+use rustc_middle::ty::{self};
/// A `query` provider for retrieving coverage information injected into MIR.
pub(crate) fn provide(providers: &mut Providers) {
diff --git a/compiler/rustc_mir_transform/src/coverage/spans.rs b/compiler/rustc_mir_transform/src/coverage/spans.rs
index b318134ae..ae43a18ad 100644
--- a/compiler/rustc_mir_transform/src/coverage/spans.rs
+++ b/compiler/rustc_mir_transform/src/coverage/spans.rs
@@ -6,6 +6,7 @@ use rustc_middle::mir;
use rustc_span::{BytePos, ExpnKind, MacroKind, Span, Symbol, DUMMY_SP};
use super::graph::{BasicCoverageBlock, CoverageGraph, START_BCB};
+use crate::coverage::ExtractedHirInfo;
mod from_mir;
@@ -15,26 +16,32 @@ pub(super) struct CoverageSpans {
}
impl CoverageSpans {
+ /// Extracts coverage-relevant spans from MIR, and associates them with
+ /// their corresponding BCBs.
+ ///
+ /// Returns `None` if no coverage-relevant spans could be extracted.
pub(super) fn generate_coverage_spans(
mir_body: &mir::Body<'_>,
- fn_sig_span: Span,
- body_span: Span,
+ hir_info: &ExtractedHirInfo,
basic_coverage_blocks: &CoverageGraph,
- ) -> Self {
+ ) -> Option<Self> {
let coverage_spans = CoverageSpansGenerator::generate_coverage_spans(
mir_body,
- fn_sig_span,
- body_span,
+ hir_info,
basic_coverage_blocks,
);
+ if coverage_spans.is_empty() {
+ return None;
+ }
+
// Group the coverage spans by BCB, with the BCBs in sorted order.
let mut bcb_to_spans = IndexVec::from_elem_n(Vec::new(), basic_coverage_blocks.num_nodes());
for CoverageSpan { bcb, span, .. } in coverage_spans {
bcb_to_spans[bcb].push(span);
}
- Self { bcb_to_spans }
+ Some(Self { bcb_to_spans })
}
pub(super) fn bcb_has_coverage_spans(&self, bcb: BasicCoverageBlock) -> bool {
@@ -89,10 +96,10 @@ impl CoverageSpan {
}
}
- pub fn merge_from(&mut self, mut other: CoverageSpan) {
- debug_assert!(self.is_mergeable(&other));
+ pub fn merge_from(&mut self, other: &Self) {
+ debug_assert!(self.is_mergeable(other));
self.span = self.span.to(other.span);
- self.merged_spans.append(&mut other.merged_spans);
+ self.merged_spans.extend_from_slice(&other.merged_spans);
}
pub fn cutoff_statements_at(&mut self, cutoff_pos: BytePos) {
@@ -129,16 +136,14 @@ impl CoverageSpan {
/// If the span is part of a macro, and the macro is visible (expands directly to the given
/// body_span), returns the macro name symbol.
pub fn visible_macro(&self, body_span: Span) -> Option<Symbol> {
- if let Some(current_macro) = self.current_macro()
- && self
- .expn_span
- .parent_callsite()
- .unwrap_or_else(|| bug!("macro must have a parent"))
- .eq_ctxt(body_span)
- {
- return Some(current_macro);
- }
- None
+ let current_macro = self.current_macro()?;
+ let parent_callsite = self.expn_span.parent_callsite()?;
+
+ // In addition to matching the context of the body span, the parent callsite
+ // must also be the source callsite, i.e. the parent must have no parent.
+ let is_visible_macro =
+ parent_callsite.parent_callsite().is_none() && parent_callsite.eq_ctxt(body_span);
+ is_visible_macro.then_some(current_macro)
}
pub fn is_macro_expansion(&self) -> bool {
@@ -224,19 +229,17 @@ impl<'a> CoverageSpansGenerator<'a> {
/// to be).
pub(super) fn generate_coverage_spans(
mir_body: &mir::Body<'_>,
- fn_sig_span: Span, // Ensured to be same SourceFile and SyntaxContext as `body_span`
- body_span: Span,
+ hir_info: &ExtractedHirInfo,
basic_coverage_blocks: &'a CoverageGraph,
) -> Vec<CoverageSpan> {
let sorted_spans = from_mir::mir_to_initial_sorted_coverage_spans(
mir_body,
- fn_sig_span,
- body_span,
+ hir_info,
basic_coverage_blocks,
);
let coverage_spans = Self {
- body_span,
+ body_span: hir_info.body_span,
basic_coverage_blocks,
sorted_spans_iter: sorted_spans.into_iter(),
some_curr: None,
@@ -269,7 +272,7 @@ impl<'a> CoverageSpansGenerator<'a> {
if curr.is_mergeable(prev) {
debug!(" same bcb (and neither is a closure), merge with prev={prev:?}");
let prev = self.take_prev();
- self.curr_mut().merge_from(prev);
+ self.curr_mut().merge_from(&prev);
self.maybe_push_macro_name_span();
// Note that curr.span may now differ from curr_original_span
} else if prev.span.hi() <= curr.span.lo() {
@@ -277,7 +280,7 @@ impl<'a> CoverageSpansGenerator<'a> {
" different bcbs and disjoint spans, so keep curr for next iter, and add prev={prev:?}",
);
let prev = self.take_prev();
- self.push_refined_span(prev);
+ self.refined_spans.push(prev);
self.maybe_push_macro_name_span();
} else if prev.is_closure {
// drop any equal or overlapping span (`curr`) and keep `prev` to test again in the
@@ -321,33 +324,30 @@ impl<'a> CoverageSpansGenerator<'a> {
}
}
- let prev = self.take_prev();
- debug!(" AT END, adding last prev={prev:?}");
-
- // Take `pending_dups` so that we can drain it while calling self methods.
- // It is never used as a field after this point.
- for dup in std::mem::take(&mut self.pending_dups) {
+ // Drain any remaining dups into the output.
+ for dup in self.pending_dups.drain(..) {
debug!(" ...adding at least one pending dup={:?}", dup);
- self.push_refined_span(dup);
+ self.refined_spans.push(dup);
}
- // Async functions wrap a closure that implements the body to be executed. The enclosing
- // function is called and returns an `impl Future` without initially executing any of the
- // body. To avoid showing the return from the enclosing function as a "covered" return from
- // the closure, the enclosing function's `TerminatorKind::Return`s `CoverageSpan` is
- // excluded. The closure's `Return` is the only one that will be counted. This provides
- // adequate coverage, and more intuitive counts. (Avoids double-counting the closing brace
- // of the function body.)
- let body_ends_with_closure = if let Some(last_covspan) = self.refined_spans.last() {
- last_covspan.is_closure && last_covspan.span.hi() == self.body_span.hi()
- } else {
- false
- };
-
- if !body_ends_with_closure {
- self.push_refined_span(prev);
+ // There is usually a final span remaining in `prev` after the loop ends,
+ // so add it to the output as well.
+ if let Some(prev) = self.some_prev.take() {
+ debug!(" AT END, adding last prev={prev:?}");
+ self.refined_spans.push(prev);
}
+ // Do one last merge pass, to simplify the output.
+ self.refined_spans.dedup_by(|b, a| {
+ if a.is_mergeable(b) {
+ debug!(?a, ?b, "merging list-adjacent refined spans");
+ a.merge_from(b);
+ true
+ } else {
+ false
+ }
+ });
+
// Remove `CoverageSpan`s derived from closures, originally added to ensure the coverage
// regions for the current function leave room for the closure's own coverage regions
// (injected separately, from the closure's own MIR).
@@ -355,18 +355,6 @@ impl<'a> CoverageSpansGenerator<'a> {
self.refined_spans
}
- fn push_refined_span(&mut self, covspan: CoverageSpan) {
- if let Some(last) = self.refined_spans.last_mut()
- && last.is_mergeable(&covspan)
- {
- // Instead of pushing the new span, merge it with the last refined span.
- debug!(?last, ?covspan, "merging new refined span with last refined span");
- last.merge_from(covspan);
- } else {
- self.refined_spans.push(covspan);
- }
- }
-
/// If `curr` is part of a new macro expansion, carve out and push a separate
/// span that ends just after the macro name and its subsequent `!`.
fn maybe_push_macro_name_span(&mut self) {
@@ -379,57 +367,59 @@ impl<'a> CoverageSpansGenerator<'a> {
return;
}
- let merged_prefix_len = self.curr_original_span.lo() - curr.span.lo();
- let after_macro_bang = merged_prefix_len + BytePos(visible_macro.as_str().len() as u32 + 1);
- if self.curr().span.lo() + after_macro_bang > self.curr().span.hi() {
+ // The split point is relative to `curr_original_span`,
+ // because `curr.span` may have been merged with preceding spans.
+ let split_point_after_macro_bang = self.curr_original_span.lo()
+ + BytePos(visible_macro.as_str().len() as u32)
+ + BytePos(1); // add 1 for the `!`
+ debug_assert!(split_point_after_macro_bang <= curr.span.hi());
+ if split_point_after_macro_bang > curr.span.hi() {
// Something is wrong with the macro name span;
- // return now to avoid emitting malformed mappings.
- // FIXME(#117788): Track down why this happens.
+ // return now to avoid emitting malformed mappings (e.g. #117788).
return;
}
+
let mut macro_name_cov = curr.clone();
- self.curr_mut().span = curr.span.with_lo(curr.span.lo() + after_macro_bang);
- macro_name_cov.span =
- macro_name_cov.span.with_hi(macro_name_cov.span.lo() + after_macro_bang);
+ macro_name_cov.span = macro_name_cov.span.with_hi(split_point_after_macro_bang);
+ self.curr_mut().span = curr.span.with_lo(split_point_after_macro_bang);
+
debug!(
" and curr starts a new macro expansion, so add a new span just for \
the macro `{visible_macro}!`, new span={macro_name_cov:?}",
);
- self.push_refined_span(macro_name_cov);
+ self.refined_spans.push(macro_name_cov);
}
+ #[track_caller]
fn curr(&self) -> &CoverageSpan {
- self.some_curr
- .as_ref()
- .unwrap_or_else(|| bug!("invalid attempt to unwrap a None some_curr"))
+ self.some_curr.as_ref().unwrap_or_else(|| bug!("some_curr is None (curr)"))
}
+ #[track_caller]
fn curr_mut(&mut self) -> &mut CoverageSpan {
- self.some_curr
- .as_mut()
- .unwrap_or_else(|| bug!("invalid attempt to unwrap a None some_curr"))
+ self.some_curr.as_mut().unwrap_or_else(|| bug!("some_curr is None (curr_mut)"))
}
/// If called, then the next call to `next_coverage_span()` will *not* update `prev` with the
/// `curr` coverage span.
+ #[track_caller]
fn take_curr(&mut self) -> CoverageSpan {
- self.some_curr.take().unwrap_or_else(|| bug!("invalid attempt to unwrap a None some_curr"))
+ self.some_curr.take().unwrap_or_else(|| bug!("some_curr is None (take_curr)"))
}
+ #[track_caller]
fn prev(&self) -> &CoverageSpan {
- self.some_prev
- .as_ref()
- .unwrap_or_else(|| bug!("invalid attempt to unwrap a None some_prev"))
+ self.some_prev.as_ref().unwrap_or_else(|| bug!("some_prev is None (prev)"))
}
+ #[track_caller]
fn prev_mut(&mut self) -> &mut CoverageSpan {
- self.some_prev
- .as_mut()
- .unwrap_or_else(|| bug!("invalid attempt to unwrap a None some_prev"))
+ self.some_prev.as_mut().unwrap_or_else(|| bug!("some_prev is None (prev_mut)"))
}
+ #[track_caller]
fn take_prev(&mut self) -> CoverageSpan {
- self.some_prev.take().unwrap_or_else(|| bug!("invalid attempt to unwrap a None some_prev"))
+ self.some_prev.take().unwrap_or_else(|| bug!("some_prev is None (take_prev)"))
}
/// If there are `pending_dups` but `prev` is not a matching dup (`prev.span` doesn't match the
@@ -452,19 +442,14 @@ impl<'a> CoverageSpansGenerator<'a> {
previous iteration, or prev started a new disjoint span"
);
if last_dup.span.hi() <= self.curr().span.lo() {
- // Temporarily steal `pending_dups` into a local, so that we can
- // drain it while calling other self methods.
- let mut pending_dups = std::mem::take(&mut self.pending_dups);
- for dup in pending_dups.drain(..) {
+ for dup in self.pending_dups.drain(..) {
debug!(" ...adding at least one pending={:?}", dup);
- self.push_refined_span(dup);
+ self.refined_spans.push(dup);
}
- // The list of dups is now empty, but we can recycle its capacity.
- assert!(pending_dups.is_empty() && self.pending_dups.is_empty());
- self.pending_dups = pending_dups;
} else {
self.pending_dups.clear();
}
+ assert!(self.pending_dups.is_empty());
}
/// Advance `prev` to `curr` (if any), and `curr` to the next `CoverageSpan` in sorted order.
@@ -475,7 +460,9 @@ impl<'a> CoverageSpansGenerator<'a> {
}
while let Some(curr) = self.sorted_spans_iter.next() {
debug!("FOR curr={:?}", curr);
- if let Some(prev) = &self.some_prev && prev.span.lo() > curr.span.lo() {
+ if let Some(prev) = &self.some_prev
+ && prev.span.lo() > curr.span.lo()
+ {
// Skip curr because prev has already advanced beyond the end of curr.
// This can only happen if a prior iteration updated `prev` to skip past
// a region of code, such as skipping past a closure.
@@ -509,22 +496,18 @@ impl<'a> CoverageSpansGenerator<'a> {
let has_pre_closure_span = prev.span.lo() < right_cutoff;
let has_post_closure_span = prev.span.hi() > right_cutoff;
- // Temporarily steal `pending_dups` into a local, so that we can
- // mutate and/or drain it while calling other self methods.
- let mut pending_dups = std::mem::take(&mut self.pending_dups);
-
if has_pre_closure_span {
let mut pre_closure = self.prev().clone();
pre_closure.span = pre_closure.span.with_hi(left_cutoff);
debug!(" prev overlaps a closure. Adding span for pre_closure={:?}", pre_closure);
- if !pending_dups.is_empty() {
- for mut dup in pending_dups.iter().cloned() {
- dup.span = dup.span.with_hi(left_cutoff);
- debug!(" ...and at least one pre_closure dup={:?}", dup);
- self.push_refined_span(dup);
- }
+
+ for mut dup in self.pending_dups.iter().cloned() {
+ dup.span = dup.span.with_hi(left_cutoff);
+ debug!(" ...and at least one pre_closure dup={:?}", dup);
+ self.refined_spans.push(dup);
}
- self.push_refined_span(pre_closure);
+
+ self.refined_spans.push(pre_closure);
}
if has_post_closure_span {
@@ -533,19 +516,17 @@ impl<'a> CoverageSpansGenerator<'a> {
// about how the `CoverageSpan`s are ordered.)
self.prev_mut().span = self.prev().span.with_lo(right_cutoff);
debug!(" Mutated prev.span to start after the closure. prev={:?}", self.prev());
- for dup in pending_dups.iter_mut() {
+
+ for dup in &mut self.pending_dups {
debug!(" ...and at least one overlapping dup={:?}", dup);
dup.span = dup.span.with_lo(right_cutoff);
}
+
let closure_covspan = self.take_curr(); // Prevent this curr from becoming prev.
- self.push_refined_span(closure_covspan); // since self.prev() was already updated
+ self.refined_spans.push(closure_covspan); // since self.prev() was already updated
} else {
- pending_dups.clear();
+ self.pending_dups.clear();
}
-
- // Restore the modified post-closure spans, or the empty vector's capacity.
- assert!(self.pending_dups.is_empty());
- self.pending_dups = pending_dups;
}
/// Called if `curr.span` equals `prev_original_span` (and potentially equal to all
@@ -641,7 +622,7 @@ impl<'a> CoverageSpansGenerator<'a> {
} else {
debug!(" ... adding modified prev={:?}", self.prev());
let prev = self.take_prev();
- self.push_refined_span(prev);
+ self.refined_spans.push(prev);
}
} else {
// with `pending_dups`, `prev` cannot have any statements that don't overlap
diff --git a/compiler/rustc_mir_transform/src/coverage/spans/from_mir.rs b/compiler/rustc_mir_transform/src/coverage/spans/from_mir.rs
index 6189e5379..a9c4ea33d 100644
--- a/compiler/rustc_mir_transform/src/coverage/spans/from_mir.rs
+++ b/compiler/rustc_mir_transform/src/coverage/spans/from_mir.rs
@@ -7,13 +7,22 @@ use rustc_span::Span;
use crate::coverage::graph::{BasicCoverageBlock, BasicCoverageBlockData, CoverageGraph};
use crate::coverage::spans::CoverageSpan;
+use crate::coverage::ExtractedHirInfo;
pub(super) fn mir_to_initial_sorted_coverage_spans(
mir_body: &mir::Body<'_>,
- fn_sig_span: Span,
- body_span: Span,
+ hir_info: &ExtractedHirInfo,
basic_coverage_blocks: &CoverageGraph,
) -> Vec<CoverageSpan> {
+ let &ExtractedHirInfo { is_async_fn, fn_sig_span, body_span, .. } = hir_info;
+ if is_async_fn {
+ // An async function desugars into a function that returns a future,
+ // with the user code wrapped in a closure. Any spans in the desugared
+ // outer function will be unhelpful, so just produce a single span
+ // associating the function signature with its entry BCB.
+ return vec![CoverageSpan::for_fn_sig(fn_sig_span)];
+ }
+
let mut initial_spans = Vec::with_capacity(mir_body.basic_blocks.len() * 2);
for (bcb, bcb_data) in basic_coverage_blocks.iter_enumerated() {
initial_spans.extend(bcb_to_initial_coverage_spans(mir_body, body_span, bcb, bcb_data));
@@ -63,14 +72,14 @@ fn bcb_to_initial_coverage_spans<'a, 'tcx>(
let statement_spans = data.statements.iter().filter_map(move |statement| {
let expn_span = filtered_statement_span(statement)?;
- let span = function_source_span(expn_span, body_span);
+ let span = unexpand_into_body_span(expn_span, body_span)?;
Some(CoverageSpan::new(span, expn_span, bcb, is_closure(statement)))
});
let terminator_span = Some(data.terminator()).into_iter().filter_map(move |terminator| {
let expn_span = filtered_terminator_span(terminator)?;
- let span = function_source_span(expn_span, body_span);
+ let span = unexpand_into_body_span(expn_span, body_span)?;
Some(CoverageSpan::new(span, expn_span, bcb, false))
});
@@ -92,13 +101,13 @@ fn is_closure(statement: &Statement<'_>) -> bool {
/// If the MIR `Statement` has a span contributive to computing coverage spans,
/// return it; otherwise return `None`.
fn filtered_statement_span(statement: &Statement<'_>) -> Option<Span> {
+ use mir::coverage::CoverageKind;
+
match statement.kind {
// These statements have spans that are often outside the scope of the executed source code
// for their parent `BasicBlock`.
StatementKind::StorageLive(_)
| StatementKind::StorageDead(_)
- // Coverage should not be encountered, but don't inject coverage coverage
- | StatementKind::Coverage(_)
// Ignore `ConstEvalCounter`s
| StatementKind::ConstEvalCounter
// Ignore `Nop`s
@@ -122,9 +131,13 @@ fn filtered_statement_span(statement: &Statement<'_>) -> Option<Span> {
// If and when the Issue is resolved, remove this special case match pattern:
StatementKind::FakeRead(box (FakeReadCause::ForGuardBinding, _)) => None,
- // Retain spans from all other statements
+ // Retain spans from most other statements.
StatementKind::FakeRead(box (_, _)) // Not including `ForGuardBinding`
| StatementKind::Intrinsic(..)
+ | StatementKind::Coverage(box mir::Coverage {
+ // The purpose of `SpanMarker` is to be matched and accepted here.
+ kind: CoverageKind::SpanMarker
+ })
| StatementKind::Assign(_)
| StatementKind::SetDiscriminant { .. }
| StatementKind::Deinit(..)
@@ -133,6 +146,11 @@ fn filtered_statement_span(statement: &Statement<'_>) -> Option<Span> {
| StatementKind::AscribeUserType(_, _) => {
Some(statement.source_info.span)
}
+
+ StatementKind::Coverage(box mir::Coverage {
+ // These coverage statements should not exist prior to coverage instrumentation.
+ kind: CoverageKind::CounterIncrement { .. } | CoverageKind::ExpressionUsed { .. }
+ }) => bug!("Unexpected coverage statement found during coverage instrumentation: {statement:?}"),
}
}
@@ -180,14 +198,16 @@ fn filtered_terminator_span(terminator: &Terminator<'_>) -> Option<Span> {
/// Returns an extrapolated span (pre-expansion[^1]) corresponding to a range
/// within the function's body source. This span is guaranteed to be contained
/// within, or equal to, the `body_span`. If the extrapolated span is not
-/// contained within the `body_span`, the `body_span` is returned.
+/// contained within the `body_span`, `None` is returned.
///
/// [^1]Expansions result from Rust syntax including macros, syntactic sugar,
/// etc.).
#[inline]
-fn function_source_span(span: Span, body_span: Span) -> Span {
+fn unexpand_into_body_span(span: Span, body_span: Span) -> Option<Span> {
use rustc_span::source_map::original_sp;
+ // FIXME(#118525): Consider switching from `original_sp` to `Span::find_ancestor_inside`,
+ // which is similar but gives slightly different results in some edge cases.
let original_span = original_sp(span, body_span).with_ctxt(body_span.ctxt());
- if body_span.contains(original_span) { original_span } else { body_span }
+ body_span.contains(original_span).then_some(original_span)
}
diff --git a/compiler/rustc_mir_transform/src/coverage/test_macros/Cargo.toml b/compiler/rustc_mir_transform/src/coverage/test_macros/Cargo.toml
deleted file mode 100644
index f753caa91..000000000
--- a/compiler/rustc_mir_transform/src/coverage/test_macros/Cargo.toml
+++ /dev/null
@@ -1,7 +0,0 @@
-[package]
-name = "coverage_test_macros"
-version = "0.0.0"
-edition = "2021"
-
-[lib]
-proc-macro = true
diff --git a/compiler/rustc_mir_transform/src/coverage/test_macros/src/lib.rs b/compiler/rustc_mir_transform/src/coverage/test_macros/src/lib.rs
deleted file mode 100644
index f41adf667..000000000
--- a/compiler/rustc_mir_transform/src/coverage/test_macros/src/lib.rs
+++ /dev/null
@@ -1,6 +0,0 @@
-use proc_macro::TokenStream;
-
-#[proc_macro]
-pub fn let_bcb(item: TokenStream) -> TokenStream {
- format!("let bcb{item} = graph::BasicCoverageBlock::from_usize({item});").parse().unwrap()
-}
diff --git a/compiler/rustc_mir_transform/src/coverage/tests.rs b/compiler/rustc_mir_transform/src/coverage/tests.rs
index 702fe5f56..931bc8e58 100644
--- a/compiler/rustc_mir_transform/src/coverage/tests.rs
+++ b/compiler/rustc_mir_transform/src/coverage/tests.rs
@@ -27,15 +27,17 @@
use super::counters;
use super::graph::{self, BasicCoverageBlock};
-use coverage_test_macros::let_bcb;
-
use itertools::Itertools;
use rustc_data_structures::graph::WithNumNodes;
use rustc_data_structures::graph::WithSuccessors;
use rustc_index::{Idx, IndexVec};
use rustc_middle::mir::*;
use rustc_middle::ty;
-use rustc_span::{self, BytePos, Pos, Span, DUMMY_SP};
+use rustc_span::{BytePos, Pos, Span, DUMMY_SP};
+
+fn bcb(index: u32) -> BasicCoverageBlock {
+ BasicCoverageBlock::from_u32(index)
+}
// All `TEMP_BLOCK` targets should be replaced before calling `to_body() -> mir::Body`.
const TEMP_BLOCK: BasicBlock = BasicBlock::MAX;
@@ -300,12 +302,15 @@ fn goto_switchint<'a>() -> Body<'a> {
mir_body
}
-macro_rules! assert_successors {
- ($basic_coverage_blocks:ident, $i:ident, [$($successor:ident),*]) => {
- let mut successors = $basic_coverage_blocks.successors[$i].clone();
- successors.sort_unstable();
- assert_eq!(successors, vec![$($successor),*]);
- }
+#[track_caller]
+fn assert_successors(
+ basic_coverage_blocks: &graph::CoverageGraph,
+ bcb: BasicCoverageBlock,
+ expected_successors: &[BasicCoverageBlock],
+) {
+ let mut successors = basic_coverage_blocks.successors[bcb].clone();
+ successors.sort_unstable();
+ assert_eq!(successors, expected_successors);
}
#[test]
@@ -334,13 +339,9 @@ fn test_covgraph_goto_switchint() {
basic_coverage_blocks.iter_enumerated().collect::<Vec<_>>()
);
- let_bcb!(0);
- let_bcb!(1);
- let_bcb!(2);
-
- assert_successors!(basic_coverage_blocks, bcb0, [bcb1, bcb2]);
- assert_successors!(basic_coverage_blocks, bcb1, []);
- assert_successors!(basic_coverage_blocks, bcb2, []);
+ assert_successors(&basic_coverage_blocks, bcb(0), &[bcb(1), bcb(2)]);
+ assert_successors(&basic_coverage_blocks, bcb(1), &[]);
+ assert_successors(&basic_coverage_blocks, bcb(2), &[]);
}
/// Create a mock `Body` with a loop.
@@ -418,15 +419,10 @@ fn test_covgraph_switchint_then_loop_else_return() {
basic_coverage_blocks.iter_enumerated().collect::<Vec<_>>()
);
- let_bcb!(0);
- let_bcb!(1);
- let_bcb!(2);
- let_bcb!(3);
-
- assert_successors!(basic_coverage_blocks, bcb0, [bcb1]);
- assert_successors!(basic_coverage_blocks, bcb1, [bcb2, bcb3]);
- assert_successors!(basic_coverage_blocks, bcb2, []);
- assert_successors!(basic_coverage_blocks, bcb3, [bcb1]);
+ assert_successors(&basic_coverage_blocks, bcb(0), &[bcb(1)]);
+ assert_successors(&basic_coverage_blocks, bcb(1), &[bcb(2), bcb(3)]);
+ assert_successors(&basic_coverage_blocks, bcb(2), &[]);
+ assert_successors(&basic_coverage_blocks, bcb(3), &[bcb(1)]);
}
/// Create a mock `Body` with nested loops.
@@ -546,21 +542,13 @@ fn test_covgraph_switchint_loop_then_inner_loop_else_break() {
basic_coverage_blocks.iter_enumerated().collect::<Vec<_>>()
);
- let_bcb!(0);
- let_bcb!(1);
- let_bcb!(2);
- let_bcb!(3);
- let_bcb!(4);
- let_bcb!(5);
- let_bcb!(6);
-
- assert_successors!(basic_coverage_blocks, bcb0, [bcb1]);
- assert_successors!(basic_coverage_blocks, bcb1, [bcb2, bcb3]);
- assert_successors!(basic_coverage_blocks, bcb2, []);
- assert_successors!(basic_coverage_blocks, bcb3, [bcb4]);
- assert_successors!(basic_coverage_blocks, bcb4, [bcb5, bcb6]);
- assert_successors!(basic_coverage_blocks, bcb5, [bcb1]);
- assert_successors!(basic_coverage_blocks, bcb6, [bcb4]);
+ assert_successors(&basic_coverage_blocks, bcb(0), &[bcb(1)]);
+ assert_successors(&basic_coverage_blocks, bcb(1), &[bcb(2), bcb(3)]);
+ assert_successors(&basic_coverage_blocks, bcb(2), &[]);
+ assert_successors(&basic_coverage_blocks, bcb(3), &[bcb(4)]);
+ assert_successors(&basic_coverage_blocks, bcb(4), &[bcb(5), bcb(6)]);
+ assert_successors(&basic_coverage_blocks, bcb(5), &[bcb(1)]);
+ assert_successors(&basic_coverage_blocks, bcb(6), &[bcb(4)]);
}
#[test]
@@ -595,10 +583,7 @@ fn test_find_loop_backedges_one() {
backedges
);
- let_bcb!(1);
- let_bcb!(3);
-
- assert_eq!(backedges[bcb1], vec![bcb3]);
+ assert_eq!(backedges[bcb(1)], &[bcb(3)]);
}
#[test]
@@ -613,13 +598,8 @@ fn test_find_loop_backedges_two() {
backedges
);
- let_bcb!(1);
- let_bcb!(4);
- let_bcb!(5);
- let_bcb!(6);
-
- assert_eq!(backedges[bcb1], vec![bcb5]);
- assert_eq!(backedges[bcb4], vec![bcb6]);
+ assert_eq!(backedges[bcb(1)], &[bcb(5)]);
+ assert_eq!(backedges[bcb(4)], &[bcb(6)]);
}
#[test]
@@ -632,13 +612,11 @@ fn test_traverse_coverage_with_loops() {
traversed_in_order.push(bcb);
}
- let_bcb!(6);
-
// bcb0 is visited first. Then bcb1 starts the first loop, and all remaining nodes, *except*
// bcb6 are inside the first loop.
assert_eq!(
*traversed_in_order.last().expect("should have elements"),
- bcb6,
+ bcb(6),
"bcb6 should not be visited until all nodes inside the first loop have been visited"
);
}
@@ -656,20 +634,18 @@ fn test_make_bcb_counters() {
coverage_counters.make_bcb_counters(&basic_coverage_blocks, bcb_has_coverage_spans);
assert_eq!(coverage_counters.num_expressions(), 0);
- let_bcb!(1);
assert_eq!(
0, // bcb1 has a `Counter` with id = 0
- match coverage_counters.bcb_counter(bcb1).expect("should have a counter") {
+ match coverage_counters.bcb_counter(bcb(1)).expect("should have a counter") {
counters::BcbCounter::Counter { id, .. } => id,
_ => panic!("expected a Counter"),
}
.as_u32()
);
- let_bcb!(2);
assert_eq!(
1, // bcb2 has a `Counter` with id = 1
- match coverage_counters.bcb_counter(bcb2).expect("should have a counter") {
+ match coverage_counters.bcb_counter(bcb(2)).expect("should have a counter") {
counters::BcbCounter::Counter { id, .. } => id,
_ => panic!("expected a Counter"),
}
diff --git a/compiler/rustc_mir_transform/src/cross_crate_inline.rs b/compiler/rustc_mir_transform/src/cross_crate_inline.rs
index 261d9dd44..5f01b8418 100644
--- a/compiler/rustc_mir_transform/src/cross_crate_inline.rs
+++ b/compiler/rustc_mir_transform/src/cross_crate_inline.rs
@@ -22,6 +22,18 @@ fn cross_crate_inlinable(tcx: TyCtxt<'_>, def_id: LocalDefId) -> bool {
return false;
}
+ // This just reproduces the logic from Instance::requires_inline.
+ match tcx.def_kind(def_id) {
+ DefKind::Ctor(..) | DefKind::Closure => return true,
+ DefKind::Fn | DefKind::AssocFn => {}
+ _ => return false,
+ }
+
+ // From this point on, it is valid to return true or false.
+ if tcx.sess.opts.unstable_opts.cross_crate_inline_threshold == InliningThreshold::Always {
+ return true;
+ }
+
// Obey source annotations first; this is important because it means we can use
// #[inline(never)] to force code generation.
match codegen_fn_attrs.inline {
@@ -30,13 +42,6 @@ fn cross_crate_inlinable(tcx: TyCtxt<'_>, def_id: LocalDefId) -> bool {
_ => {}
}
- // This just reproduces the logic from Instance::requires_inline.
- match tcx.def_kind(def_id) {
- DefKind::Ctor(..) | DefKind::Closure => return true,
- DefKind::Fn | DefKind::AssocFn => {}
- _ => return false,
- }
-
// Don't do any inference when incremental compilation is enabled; the additional inlining that
// inference permits also creates more work for small edits.
if tcx.sess.opts.incremental.is_some() {
diff --git a/compiler/rustc_mir_transform/src/ctfe_limit.rs b/compiler/rustc_mir_transform/src/ctfe_limit.rs
index bf5722b3d..dcc960e1e 100644
--- a/compiler/rustc_mir_transform/src/ctfe_limit.rs
+++ b/compiler/rustc_mir_transform/src/ctfe_limit.rs
@@ -20,7 +20,7 @@ impl<'tcx> MirPass<'tcx> for CtfeLimit {
.filter_map(|(node, node_data)| {
if matches!(node_data.terminator().kind, TerminatorKind::Call { .. })
// Back edges in a CFG indicate loops
- || has_back_edge(&doms, node, &node_data)
+ || has_back_edge(doms, node, node_data)
{
Some(node)
} else {
diff --git a/compiler/rustc_mir_transform/src/dataflow_const_prop.rs b/compiler/rustc_mir_transform/src/dataflow_const_prop.rs
index 81d2bba98..ad12bce9b 100644
--- a/compiler/rustc_mir_transform/src/dataflow_const_prop.rs
+++ b/compiler/rustc_mir_transform/src/dataflow_const_prop.rs
@@ -8,6 +8,7 @@ use rustc_hir::def::DefKind;
use rustc_middle::mir::interpret::{AllocId, ConstAllocation, InterpResult, Scalar};
use rustc_middle::mir::visit::{MutVisitor, PlaceContext, Visitor};
use rustc_middle::mir::*;
+use rustc_middle::query::TyCtxtAt;
use rustc_middle::ty::layout::{LayoutOf, TyAndLayout};
use rustc_middle::ty::{self, Ty, TyCtxt};
use rustc_mir_dataflow::value_analysis::{
@@ -19,7 +20,6 @@ use rustc_span::DUMMY_SP;
use rustc_target::abi::{Abi, FieldIdx, Size, VariantIdx, FIRST_VARIANT};
use crate::const_prop::throw_machine_stop_str;
-use crate::MirPass;
// These constants are somewhat random guesses and have not been optimized.
// If `tcx.sess.mir_opt_level() >= 4`, we ignore the limits (this can become very expensive).
@@ -362,7 +362,7 @@ impl<'a, 'tcx> ConstAnalysis<'a, 'tcx> {
&& let Ok(rhs_layout) = self.tcx.layout_of(self.param_env.and(rhs_ty))
{
let op = ImmTy::from_scalar(pointer, rhs_layout).into();
- self.assign_constant(state, place, op, &rhs.projection);
+ self.assign_constant(state, place, op, rhs.projection);
}
}
Operand::Constant(box constant) => {
@@ -496,7 +496,7 @@ impl<'a, 'tcx> ConstAnalysis<'a, 'tcx> {
FlatSet::Elem(scalar) => {
let ty = op.ty(self.local_decls, self.tcx);
self.tcx.layout_of(self.param_env.and(ty)).map_or(FlatSet::Top, |layout| {
- FlatSet::Elem(ImmTy::from_scalar(scalar.into(), layout))
+ FlatSet::Elem(ImmTy::from_scalar(scalar, layout))
})
}
FlatSet::Bottom => FlatSet::Bottom,
@@ -597,7 +597,9 @@ fn propagatable_scalar(
state: &State<FlatSet<Scalar>>,
map: &Map,
) -> Option<Scalar> {
- if let FlatSet::Elem(value) = state.get_idx(place, map) && value.try_to_int().is_ok() {
+ if let FlatSet::Elem(value) = state.get_idx(place, map)
+ && value.try_to_int().is_ok()
+ {
// Do not attempt to propagate pointers, as we may fail to preserve their identity.
Some(value)
} else {
@@ -836,7 +838,8 @@ impl<'tcx> Visitor<'tcx> for OperandCollector<'tcx, '_, '_, '_> {
location: Location,
) {
if let PlaceElem::Index(local) = elem
- && let Some(value) = self.visitor.try_make_constant(self.ecx, local.into(), self.state, self.map)
+ && let Some(value) =
+ self.visitor.try_make_constant(self.ecx, local.into(), self.state, self.map)
{
self.visitor.patch.before_effect.insert((location, local.into()), value);
}
@@ -873,7 +876,7 @@ impl<'mir, 'tcx: 'mir> rustc_const_eval::interpret::Machine<'mir, 'tcx> for Dumm
}
fn before_access_global(
- _tcx: TyCtxt<'tcx>,
+ _tcx: TyCtxtAt<'tcx>,
_machine: &Self,
_alloc_id: AllocId,
alloc: ConstAllocation<'tcx>,
diff --git a/compiler/rustc_mir_transform/src/deduplicate_blocks.rs b/compiler/rustc_mir_transform/src/deduplicate_blocks.rs
index 666293cbc..b40b2ec8b 100644
--- a/compiler/rustc_mir_transform/src/deduplicate_blocks.rs
+++ b/compiler/rustc_mir_transform/src/deduplicate_blocks.rs
@@ -3,8 +3,6 @@
use std::{collections::hash_map::Entry, hash::Hash, hash::Hasher, iter};
-use crate::MirPass;
-
use rustc_data_structures::fx::FxHashMap;
use rustc_middle::mir::visit::MutVisitor;
use rustc_middle::mir::*;
diff --git a/compiler/rustc_mir_transform/src/deref_separator.rs b/compiler/rustc_mir_transform/src/deref_separator.rs
index 42be74570..0e2fccc85 100644
--- a/compiler/rustc_mir_transform/src/deref_separator.rs
+++ b/compiler/rustc_mir_transform/src/deref_separator.rs
@@ -1,4 +1,3 @@
-use crate::MirPass;
use rustc_index::IndexVec;
use rustc_middle::mir::patch::MirPatch;
use rustc_middle::mir::visit::NonUseContext::VarDebugInfo;
diff --git a/compiler/rustc_mir_transform/src/elaborate_box_derefs.rs b/compiler/rustc_mir_transform/src/elaborate_box_derefs.rs
index 1c917a85c..96943435b 100644
--- a/compiler/rustc_mir_transform/src/elaborate_box_derefs.rs
+++ b/compiler/rustc_mir_transform/src/elaborate_box_derefs.rs
@@ -2,7 +2,6 @@
//!
//! Box is not actually a pointer so it is incorrect to dereference it directly.
-use crate::MirPass;
use rustc_hir::def_id::DefId;
use rustc_index::Idx;
use rustc_middle::mir::patch::MirPatch;
diff --git a/compiler/rustc_mir_transform/src/elaborate_drops.rs b/compiler/rustc_mir_transform/src/elaborate_drops.rs
index 59156b242..c45badbc5 100644
--- a/compiler/rustc_mir_transform/src/elaborate_drops.rs
+++ b/compiler/rustc_mir_transform/src/elaborate_drops.rs
@@ -1,5 +1,4 @@
use crate::deref_separator::deref_finder;
-use crate::MirPass;
use rustc_index::bit_set::BitSet;
use rustc_index::IndexVec;
use rustc_middle::mir::patch::MirPatch;
@@ -57,7 +56,7 @@ impl<'tcx> MirPass<'tcx> for ElaborateDrops {
// For types that do not need dropping, the behaviour is trivial. So we only need to track
// init/uninit for types that do need dropping.
let move_data =
- MoveData::gather_moves(&body, tcx, param_env, |ty| ty.needs_drop(tcx, param_env));
+ MoveData::gather_moves(body, tcx, param_env, |ty| ty.needs_drop(tcx, param_env));
let elaborate_patch = {
let env = MoveDataParamEnv { move_data, param_env };
@@ -67,7 +66,7 @@ impl<'tcx> MirPass<'tcx> for ElaborateDrops {
.pass_name("elaborate_drops")
.iterate_to_fixpoint()
.into_results_cursor(body);
- let dead_unwinds = compute_dead_unwinds(&body, &mut inits);
+ let dead_unwinds = compute_dead_unwinds(body, &mut inits);
let uninits = MaybeUninitializedPlaces::new(tcx, body, &env)
.mark_inactive_variants_as_uninit()
@@ -172,19 +171,13 @@ impl<'a, 'tcx> DropElaborator<'a, 'tcx> for Elaborator<'a, '_, 'tcx> {
let mut some_live = false;
let mut some_dead = false;
let mut children_count = 0;
- on_all_children_bits(
- self.tcx(),
- self.body(),
- self.ctxt.move_data(),
- path,
- |child| {
- let (live, dead) = self.ctxt.init_data.maybe_live_dead(child);
- debug!("elaborate_drop: state({:?}) = {:?}", child, (live, dead));
- some_live |= live;
- some_dead |= dead;
- children_count += 1;
- },
- );
+ on_all_children_bits(self.ctxt.move_data(), path, |child| {
+ let (live, dead) = self.ctxt.init_data.maybe_live_dead(child);
+ debug!("elaborate_drop: state({:?}) = {:?}", child, (live, dead));
+ some_live |= live;
+ some_dead |= dead;
+ children_count += 1;
+ });
((some_live, some_dead), children_count != 1)
}
};
@@ -202,13 +195,9 @@ impl<'a, 'tcx> DropElaborator<'a, 'tcx> for Elaborator<'a, '_, 'tcx> {
self.ctxt.set_drop_flag(loc, path, DropFlagState::Absent);
}
DropFlagMode::Deep => {
- on_all_children_bits(
- self.tcx(),
- self.body(),
- self.ctxt.move_data(),
- path,
- |child| self.ctxt.set_drop_flag(loc, child, DropFlagState::Absent),
- );
+ on_all_children_bits(self.ctxt.move_data(), path, |child| {
+ self.ctxt.set_drop_flag(loc, child, DropFlagState::Absent)
+ });
}
}
}
@@ -268,10 +257,9 @@ impl<'b, 'tcx> ElaborateDropsCtxt<'b, 'tcx> {
}
fn create_drop_flag(&mut self, index: MovePathIndex, span: Span) {
- let tcx = self.tcx;
let patch = &mut self.patch;
debug!("create_drop_flag({:?})", self.body.span);
- self.drop_flags[index].get_or_insert_with(|| patch.new_temp(tcx.types.bool, span));
+ self.drop_flags[index].get_or_insert_with(|| patch.new_temp(self.tcx.types.bool, span));
}
fn drop_flag(&mut self, index: MovePathIndex) -> Option<Place<'tcx>> {
@@ -304,7 +292,7 @@ impl<'b, 'tcx> ElaborateDropsCtxt<'b, 'tcx> {
match path {
LookupResult::Exact(path) => {
self.init_data.seek_before(self.body.terminator_loc(bb));
- on_all_children_bits(self.tcx, self.body, self.move_data(), path, |child| {
+ on_all_children_bits(self.move_data(), path, |child| {
let (maybe_live, maybe_dead) = self.init_data.maybe_live_dead(child);
debug!(
"collect_drop_flags: collecting {:?} from {:?}@{:?} - {:?}",
@@ -327,7 +315,7 @@ impl<'b, 'tcx> ElaborateDropsCtxt<'b, 'tcx> {
self.init_data.seek_before(self.body.terminator_loc(bb));
let (_maybe_live, maybe_dead) = self.init_data.maybe_live_dead(parent);
if maybe_dead {
- self.tcx.sess.delay_span_bug(
+ self.tcx.sess.span_delayed_bug(
terminator.source_info.span,
format!(
"drop of untracked, uninitialized value {bb:?}, place {place:?} ({path:?})"
@@ -392,7 +380,7 @@ impl<'b, 'tcx> ElaborateDropsCtxt<'b, 'tcx> {
LookupResult::Parent(None) => {}
LookupResult::Parent(Some(_)) => {
if !replace {
- self.tcx.sess.delay_span_bug(
+ self.tcx.sess.span_delayed_bug(
terminator.source_info.span,
format!("drop of untracked value {bb:?}"),
);
@@ -444,7 +432,7 @@ impl<'b, 'tcx> ElaborateDropsCtxt<'b, 'tcx> {
let loc = Location { block: tgt, statement_index: 0 };
let path = self.move_data().rev_lookup.find(destination.as_ref());
- on_lookup_result_bits(self.tcx, self.body, self.move_data(), path, |child| {
+ on_lookup_result_bits(self.move_data(), path, |child| {
self.set_drop_flag(loc, child, DropFlagState::Present)
});
}
@@ -453,14 +441,9 @@ impl<'b, 'tcx> ElaborateDropsCtxt<'b, 'tcx> {
fn drop_flags_for_args(&mut self) {
let loc = Location::START;
- rustc_mir_dataflow::drop_flag_effects_for_function_entry(
- self.tcx,
- self.body,
- self.env,
- |path, ds| {
- self.set_drop_flag(loc, path, ds);
- },
- )
+ rustc_mir_dataflow::drop_flag_effects_for_function_entry(self.body, self.env, |path, ds| {
+ self.set_drop_flag(loc, path, ds);
+ })
}
fn drop_flags_for_locs(&mut self) {
@@ -492,7 +475,6 @@ impl<'b, 'tcx> ElaborateDropsCtxt<'b, 'tcx> {
}
let loc = Location { block: bb, statement_index: i };
rustc_mir_dataflow::drop_flag_effects_for_location(
- self.tcx,
self.body,
self.env,
loc,
@@ -515,7 +497,7 @@ impl<'b, 'tcx> ElaborateDropsCtxt<'b, 'tcx> {
let loc = Location { block: bb, statement_index: data.statements.len() };
let path = self.move_data().rev_lookup.find(destination.as_ref());
- on_lookup_result_bits(self.tcx, self.body, self.move_data(), path, |child| {
+ on_lookup_result_bits(self.move_data(), path, |child| {
self.set_drop_flag(loc, child, DropFlagState::Present)
});
}
diff --git a/compiler/rustc_mir_transform/src/errors.rs b/compiler/rustc_mir_transform/src/errors.rs
index 5879a8039..fd4af3150 100644
--- a/compiler/rustc_mir_transform/src/errors.rs
+++ b/compiler/rustc_mir_transform/src/errors.rs
@@ -1,6 +1,8 @@
+use std::borrow::Cow;
+
use rustc_errors::{
- Applicability, DecorateLint, DiagnosticBuilder, DiagnosticMessage, EmissionGuarantee, Handler,
- IntoDiagnostic,
+ Applicability, DecorateLint, DiagCtxt, DiagnosticArgValue, DiagnosticBuilder,
+ DiagnosticMessage, EmissionGuarantee, ErrorGuaranteed, IntoDiagnostic,
};
use rustc_macros::{Diagnostic, LintDiagnostic, Subdiagnostic};
use rustc_middle::mir::{AssertKind, UnsafetyViolationDetails};
@@ -9,6 +11,8 @@ use rustc_session::lint::{self, Lint};
use rustc_span::def_id::DefId;
use rustc_span::Span;
+use crate::fluent_generated as fluent;
+
#[derive(LintDiagnostic)]
pub(crate) enum ConstMutate {
#[diag(mir_transform_const_modify)]
@@ -58,75 +62,108 @@ pub(crate) struct RequiresUnsafe {
// so we need to eagerly translate the label here, which isn't supported by the derive API
// We could also exhaustively list out the primary messages for all unsafe violations,
// but this would result in a lot of duplication.
-impl<'sess, G: EmissionGuarantee> IntoDiagnostic<'sess, G> for RequiresUnsafe {
+impl<'sess> IntoDiagnostic<'sess> for RequiresUnsafe {
#[track_caller]
- fn into_diagnostic(self, handler: &'sess Handler) -> DiagnosticBuilder<'sess, G> {
- let mut diag =
- handler.struct_diagnostic(crate::fluent_generated::mir_transform_requires_unsafe);
+ fn into_diagnostic(self, dcx: &'sess DiagCtxt) -> DiagnosticBuilder<'sess, ErrorGuaranteed> {
+ let mut diag = dcx.struct_err(fluent::mir_transform_requires_unsafe);
diag.code(rustc_errors::DiagnosticId::Error("E0133".to_string()));
diag.set_span(self.span);
diag.span_label(self.span, self.details.label());
- diag.note(self.details.note());
- let desc = handler.eagerly_translate_to_string(self.details.label(), [].into_iter());
+ let desc = dcx.eagerly_translate_to_string(self.details.label(), [].into_iter());
diag.set_arg("details", desc);
diag.set_arg("op_in_unsafe_fn_allowed", self.op_in_unsafe_fn_allowed);
+ self.details.add_subdiagnostics(&mut diag);
if let Some(sp) = self.enclosing {
- diag.span_label(sp, crate::fluent_generated::mir_transform_not_inherited);
+ diag.span_label(sp, fluent::mir_transform_not_inherited);
}
diag
}
}
-#[derive(Copy, Clone)]
+#[derive(Clone)]
pub(crate) struct RequiresUnsafeDetail {
pub span: Span,
pub violation: UnsafetyViolationDetails,
}
impl RequiresUnsafeDetail {
- fn note(self) -> DiagnosticMessage {
+ fn add_subdiagnostics<G: EmissionGuarantee>(&self, diag: &mut DiagnosticBuilder<'_, G>) {
use UnsafetyViolationDetails::*;
match self.violation {
- CallToUnsafeFunction => crate::fluent_generated::mir_transform_call_to_unsafe_note,
- UseOfInlineAssembly => crate::fluent_generated::mir_transform_use_of_asm_note,
+ CallToUnsafeFunction => {
+ diag.note(fluent::mir_transform_call_to_unsafe_note);
+ }
+ UseOfInlineAssembly => {
+ diag.note(fluent::mir_transform_use_of_asm_note);
+ }
InitializingTypeWith => {
- crate::fluent_generated::mir_transform_initializing_valid_range_note
+ diag.note(fluent::mir_transform_initializing_valid_range_note);
+ }
+ CastOfPointerToInt => {
+ diag.note(fluent::mir_transform_const_ptr2int_note);
+ }
+ UseOfMutableStatic => {
+ diag.note(fluent::mir_transform_use_of_static_mut_note);
+ }
+ UseOfExternStatic => {
+ diag.note(fluent::mir_transform_use_of_extern_static_note);
+ }
+ DerefOfRawPointer => {
+ diag.note(fluent::mir_transform_deref_ptr_note);
+ }
+ AccessToUnionField => {
+ diag.note(fluent::mir_transform_union_access_note);
}
- CastOfPointerToInt => crate::fluent_generated::mir_transform_const_ptr2int_note,
- UseOfMutableStatic => crate::fluent_generated::mir_transform_use_of_static_mut_note,
- UseOfExternStatic => crate::fluent_generated::mir_transform_use_of_extern_static_note,
- DerefOfRawPointer => crate::fluent_generated::mir_transform_deref_ptr_note,
- AccessToUnionField => crate::fluent_generated::mir_transform_union_access_note,
MutationOfLayoutConstrainedField => {
- crate::fluent_generated::mir_transform_mutation_layout_constrained_note
+ diag.note(fluent::mir_transform_mutation_layout_constrained_note);
}
BorrowOfLayoutConstrainedField => {
- crate::fluent_generated::mir_transform_mutation_layout_constrained_borrow_note
+ diag.note(fluent::mir_transform_mutation_layout_constrained_borrow_note);
+ }
+ CallToFunctionWith { ref missing, ref build_enabled } => {
+ diag.help(fluent::mir_transform_target_feature_call_help);
+ diag.set_arg(
+ "missing_target_features",
+ DiagnosticArgValue::StrListSepByAnd(
+ missing.iter().map(|feature| Cow::from(feature.as_str())).collect(),
+ ),
+ );
+ diag.set_arg("missing_target_features_count", missing.len());
+ if !build_enabled.is_empty() {
+ diag.note(fluent::mir_transform_target_feature_call_note);
+ diag.set_arg(
+ "build_target_features",
+ DiagnosticArgValue::StrListSepByAnd(
+ build_enabled
+ .iter()
+ .map(|feature| Cow::from(feature.as_str()))
+ .collect(),
+ ),
+ );
+ diag.set_arg("build_target_features_count", build_enabled.len());
+ }
}
- CallToFunctionWith => crate::fluent_generated::mir_transform_target_feature_call_note,
}
}
- fn label(self) -> DiagnosticMessage {
+ fn label(&self) -> DiagnosticMessage {
use UnsafetyViolationDetails::*;
match self.violation {
- CallToUnsafeFunction => crate::fluent_generated::mir_transform_call_to_unsafe_label,
- UseOfInlineAssembly => crate::fluent_generated::mir_transform_use_of_asm_label,
- InitializingTypeWith => {
- crate::fluent_generated::mir_transform_initializing_valid_range_label
- }
- CastOfPointerToInt => crate::fluent_generated::mir_transform_const_ptr2int_label,
- UseOfMutableStatic => crate::fluent_generated::mir_transform_use_of_static_mut_label,
- UseOfExternStatic => crate::fluent_generated::mir_transform_use_of_extern_static_label,
- DerefOfRawPointer => crate::fluent_generated::mir_transform_deref_ptr_label,
- AccessToUnionField => crate::fluent_generated::mir_transform_union_access_label,
+ CallToUnsafeFunction => fluent::mir_transform_call_to_unsafe_label,
+ UseOfInlineAssembly => fluent::mir_transform_use_of_asm_label,
+ InitializingTypeWith => fluent::mir_transform_initializing_valid_range_label,
+ CastOfPointerToInt => fluent::mir_transform_const_ptr2int_label,
+ UseOfMutableStatic => fluent::mir_transform_use_of_static_mut_label,
+ UseOfExternStatic => fluent::mir_transform_use_of_extern_static_label,
+ DerefOfRawPointer => fluent::mir_transform_deref_ptr_label,
+ AccessToUnionField => fluent::mir_transform_union_access_label,
MutationOfLayoutConstrainedField => {
- crate::fluent_generated::mir_transform_mutation_layout_constrained_label
+ fluent::mir_transform_mutation_layout_constrained_label
}
BorrowOfLayoutConstrainedField => {
- crate::fluent_generated::mir_transform_mutation_layout_constrained_borrow_label
+ fluent::mir_transform_mutation_layout_constrained_borrow_label
}
- CallToFunctionWith => crate::fluent_generated::mir_transform_target_feature_call_label,
+ CallToFunctionWith { .. } => fluent::mir_transform_target_feature_call_label,
}
}
}
@@ -143,30 +180,25 @@ pub(crate) struct UnsafeOpInUnsafeFn {
impl<'a> DecorateLint<'a, ()> for UnsafeOpInUnsafeFn {
#[track_caller]
- fn decorate_lint<'b>(
- self,
- diag: &'b mut DiagnosticBuilder<'a, ()>,
- ) -> &'b mut DiagnosticBuilder<'a, ()> {
- let handler = diag.handler().expect("lint should not yet be emitted");
- let desc = handler.eagerly_translate_to_string(self.details.label(), [].into_iter());
+ fn decorate_lint<'b>(self, diag: &'b mut DiagnosticBuilder<'a, ()>) {
+ let dcx = diag.dcx().expect("lint should not yet be emitted");
+ let desc = dcx.eagerly_translate_to_string(self.details.label(), [].into_iter());
diag.set_arg("details", desc);
diag.span_label(self.details.span, self.details.label());
- diag.note(self.details.note());
+ self.details.add_subdiagnostics(diag);
if let Some((start, end, fn_sig)) = self.suggest_unsafe_block {
- diag.span_note(fn_sig, crate::fluent_generated::mir_transform_note);
+ diag.span_note(fn_sig, fluent::mir_transform_note);
diag.tool_only_multipart_suggestion(
- crate::fluent_generated::mir_transform_suggestion,
+ fluent::mir_transform_suggestion,
vec![(start, " unsafe {".into()), (end, "}".into())],
Applicability::MaybeIncorrect,
);
}
-
- diag
}
fn msg(&self) -> DiagnosticMessage {
- crate::fluent_generated::mir_transform_unsafe_op_in_unsafe_fn
+ fluent::mir_transform_unsafe_op_in_unsafe_fn
}
}
@@ -176,10 +208,7 @@ pub(crate) enum AssertLint<P> {
}
impl<'a, P: std::fmt::Debug> DecorateLint<'a, ()> for AssertLint<P> {
- fn decorate_lint<'b>(
- self,
- diag: &'b mut DiagnosticBuilder<'a, ()>,
- ) -> &'b mut DiagnosticBuilder<'a, ()> {
+ fn decorate_lint<'b>(self, diag: &'b mut DiagnosticBuilder<'a, ()>) {
let span = self.span();
let assert_kind = self.panic();
let message = assert_kind.diagnostic_message();
@@ -187,18 +216,12 @@ impl<'a, P: std::fmt::Debug> DecorateLint<'a, ()> for AssertLint<P> {
diag.set_arg(name, value);
});
diag.span_label(span, message);
-
- diag
}
fn msg(&self) -> DiagnosticMessage {
match self {
- AssertLint::ArithmeticOverflow(..) => {
- crate::fluent_generated::mir_transform_arithmetic_overflow
- }
- AssertLint::UnconditionalPanic(..) => {
- crate::fluent_generated::mir_transform_operation_will_panic
- }
+ AssertLint::ArithmeticOverflow(..) => fluent::mir_transform_arithmetic_overflow,
+ AssertLint::UnconditionalPanic(..) => fluent::mir_transform_operation_will_panic,
}
}
}
@@ -251,23 +274,19 @@ pub(crate) struct MustNotSupend<'tcx, 'a> {
// Needed for def_path_str
impl<'a> DecorateLint<'a, ()> for MustNotSupend<'_, '_> {
- fn decorate_lint<'b>(
- self,
- diag: &'b mut rustc_errors::DiagnosticBuilder<'a, ()>,
- ) -> &'b mut rustc_errors::DiagnosticBuilder<'a, ()> {
- diag.span_label(self.yield_sp, crate::fluent_generated::_subdiag::label);
+ fn decorate_lint<'b>(self, diag: &'b mut rustc_errors::DiagnosticBuilder<'a, ()>) {
+ diag.span_label(self.yield_sp, fluent::_subdiag::label);
if let Some(reason) = self.reason {
diag.subdiagnostic(reason);
}
- diag.span_help(self.src_sp, crate::fluent_generated::_subdiag::help);
+ diag.span_help(self.src_sp, fluent::_subdiag::help);
diag.set_arg("pre", self.pre);
diag.set_arg("def_path", self.tcx.def_path_str(self.def_id));
diag.set_arg("post", self.post);
- diag
}
fn msg(&self) -> rustc_errors::DiagnosticMessage {
- crate::fluent_generated::mir_transform_must_not_suspend
+ fluent::mir_transform_must_not_suspend
}
}
diff --git a/compiler/rustc_mir_transform/src/function_item_references.rs b/compiler/rustc_mir_transform/src/function_item_references.rs
index a42eacbf2..340bb1948 100644
--- a/compiler/rustc_mir_transform/src/function_item_references.rs
+++ b/compiler/rustc_mir_transform/src/function_item_references.rs
@@ -14,7 +14,7 @@ pub struct FunctionItemReferences;
impl<'tcx> MirLint<'tcx> for FunctionItemReferences {
fn run_lint(&self, tcx: TyCtxt<'tcx>, body: &Body<'tcx>) {
let mut checker = FunctionItemRefChecker { tcx, body };
- checker.visit_body(&body);
+ checker.visit_body(body);
}
}
@@ -47,12 +47,12 @@ impl<'tcx> Visitor<'tcx> for FunctionItemRefChecker<'_, 'tcx> {
for inner_ty in arg_ty.walk().filter_map(|arg| arg.as_type()) {
if let Some((fn_id, fn_args)) = FunctionItemRefChecker::is_fn_ref(inner_ty)
{
- let span = self.nth_arg_span(&args, 0);
+ let span = self.nth_arg_span(args, 0);
self.emit_lint(fn_id, fn_args, source_info, span);
}
}
} else {
- self.check_bound_args(def_id, args_ref, &args, source_info);
+ self.check_bound_args(def_id, args_ref, args, source_info);
}
}
}
diff --git a/compiler/rustc_mir_transform/src/gvn.rs b/compiler/rustc_mir_transform/src/gvn.rs
index dce298e92..3b8adf7e8 100644
--- a/compiler/rustc_mir_transform/src/gvn.rs
+++ b/compiler/rustc_mir_transform/src/gvn.rs
@@ -88,8 +88,8 @@ use rustc_data_structures::fx::{FxHashMap, FxIndexSet};
use rustc_data_structures::graph::dominators::Dominators;
use rustc_hir::def::DefKind;
use rustc_index::bit_set::BitSet;
+use rustc_index::newtype_index;
use rustc_index::IndexVec;
-use rustc_macros::newtype_index;
use rustc_middle::mir::interpret::GlobalAlloc;
use rustc_middle::mir::visit::*;
use rustc_middle::mir::*;
@@ -103,7 +103,6 @@ use std::borrow::Cow;
use crate::dataflow_const_prop::DummyMachine;
use crate::ssa::{AssignedValue, SsaLocals};
-use crate::MirPass;
use either::Either;
pub struct GVN;
@@ -388,7 +387,9 @@ impl<'body, 'tcx> VnState<'body, 'tcx> {
self.ecx.copy_op(op, &field_dest, /*allow_transmute*/ false).ok()?;
}
self.ecx.write_discriminant(variant.unwrap_or(FIRST_VARIANT), &dest).ok()?;
- self.ecx.alloc_mark_immutable(dest.ptr().provenance.unwrap()).ok()?;
+ self.ecx
+ .alloc_mark_immutable(dest.ptr().provenance.unwrap().alloc_id())
+ .ok()?;
dest.into()
} else {
return None;
@@ -461,7 +462,9 @@ impl<'body, 'tcx> VnState<'body, 'tcx> {
}
NullaryOp(null_op, ty) => {
let layout = self.ecx.layout_of(ty).ok()?;
- if let NullOp::SizeOf | NullOp::AlignOf = null_op && layout.is_unsized() {
+ if let NullOp::SizeOf | NullOp::AlignOf = null_op
+ && layout.is_unsized()
+ {
return None;
}
let val = match null_op {
@@ -641,12 +644,10 @@ impl<'body, 'tcx> VnState<'body, 'tcx> {
{
if let Some(offset) = self.evaluated[idx].as_ref()
&& let Ok(offset) = self.ecx.read_target_usize(offset)
+ && let Some(min_length) = offset.checked_add(1)
{
- projection.to_mut()[i] = ProjectionElem::ConstantIndex {
- offset,
- min_length: offset + 1,
- from_end: false,
- };
+ projection.to_mut()[i] =
+ ProjectionElem::ConstantIndex { offset, min_length, from_end: false };
} else if let Some(new_idx) = self.try_as_local(idx, location) {
projection.to_mut()[i] = ProjectionElem::Index(new_idx);
self.reused_locals.insert(new_idx);
@@ -865,7 +866,9 @@ impl<'body, 'tcx> VnState<'body, 'tcx> {
.collect();
let fields = fields?;
- if let AggregateTy::Array = ty && fields.len() > 4 {
+ if let AggregateTy::Array = ty
+ && fields.len() > 4
+ {
let first = fields[0];
if fields.iter().all(|&v| v == first) {
let len = ty::Const::from_target_usize(self.tcx, fields.len().try_into().unwrap());
@@ -924,7 +927,8 @@ fn op_to_prop_const<'tcx>(
}
let pointer = mplace.ptr().into_pointer_or_addr().ok()?;
- let (alloc_id, offset) = pointer.into_parts();
+ let (prov, offset) = pointer.into_parts();
+ let alloc_id = prov.alloc_id();
intern_const_alloc_for_constprop(ecx, alloc_id).ok()?;
if matches!(ecx.tcx.global_alloc(alloc_id), GlobalAlloc::Memory(_)) {
// `alloc_id` may point to a static. Codegen will choke on an `Indirect` with anything
@@ -1008,8 +1012,7 @@ impl<'tcx> MutVisitor<'tcx> for VnState<'_, 'tcx> {
// Do not try to simplify a constant, it's already in canonical shape.
&& !matches!(rvalue, Rvalue::Use(Operand::Constant(_)))
{
- if let Some(value) = self.simplify_rvalue(rvalue, location)
- {
+ if let Some(value) = self.simplify_rvalue(rvalue, location) {
if let Some(const_) = self.try_as_constant(value) {
*rvalue = Rvalue::Use(Operand::Constant(Box::new(const_)));
} else if let Some(local) = self.try_as_local(value, location)
diff --git a/compiler/rustc_mir_transform/src/inline.rs b/compiler/rustc_mir_transform/src/inline.rs
index 793dcf0d9..8ad804bf3 100644
--- a/compiler/rustc_mir_transform/src/inline.rs
+++ b/compiler/rustc_mir_transform/src/inline.rs
@@ -17,7 +17,6 @@ use rustc_target::spec::abi::Abi;
use crate::cost_checker::CostChecker;
use crate::simplify::{remove_dead_blocks, CfgSimplifier};
use crate::util;
-use crate::MirPass;
use std::iter;
use std::ops::{Range, RangeFrom};
@@ -32,7 +31,6 @@ struct CallSite<'tcx> {
callee: Instance<'tcx>,
fn_sig: ty::PolyFnSig<'tcx>,
block: BasicBlock,
- target: Option<BasicBlock>,
source_info: SourceInfo,
}
@@ -281,7 +279,7 @@ impl<'tcx> Inliner<'tcx> {
}
let old_blocks = caller_body.basic_blocks.next_index();
- self.inline_call(caller_body, &callsite, callee_body);
+ self.inline_call(caller_body, callsite, callee_body);
let new_blocks = old_blocks..caller_body.basic_blocks.next_index();
Ok(new_blocks)
@@ -367,7 +365,7 @@ impl<'tcx> Inliner<'tcx> {
) -> Option<CallSite<'tcx>> {
// Only consider direct calls to functions
let terminator = bb_data.terminator();
- if let TerminatorKind::Call { ref func, target, fn_span, .. } = terminator.kind {
+ if let TerminatorKind::Call { ref func, fn_span, .. } = terminator.kind {
let func_ty = func.ty(caller_body, self.tcx);
if let ty::FnDef(def_id, args) = *func_ty.kind() {
// To resolve an instance its args have to be fully normalized.
@@ -386,7 +384,7 @@ impl<'tcx> Inliner<'tcx> {
let fn_sig = self.tcx.fn_sig(def_id).instantiate(self.tcx, args);
let source_info = SourceInfo { span: fn_span, ..terminator.source_info };
- return Some(CallSite { callee, fn_sig, block: bb, target, source_info });
+ return Some(CallSite { callee, fn_sig, block: bb, source_info });
}
}
@@ -541,142 +539,158 @@ impl<'tcx> Inliner<'tcx> {
mut callee_body: Body<'tcx>,
) {
let terminator = caller_body[callsite.block].terminator.take().unwrap();
- match terminator.kind {
- TerminatorKind::Call { args, destination, unwind, .. } => {
- // If the call is something like `a[*i] = f(i)`, where
- // `i : &mut usize`, then just duplicating the `a[*i]`
- // Place could result in two different locations if `f`
- // writes to `i`. To prevent this we need to create a temporary
- // borrow of the place and pass the destination as `*temp` instead.
- fn dest_needs_borrow(place: Place<'_>) -> bool {
- for elem in place.projection.iter() {
- match elem {
- ProjectionElem::Deref | ProjectionElem::Index(_) => return true,
- _ => {}
- }
- }
+ let TerminatorKind::Call { args, destination, unwind, target, .. } = terminator.kind else {
+ bug!("unexpected terminator kind {:?}", terminator.kind);
+ };
+
+ let return_block = if let Some(block) = target {
+ // Prepare a new block for code that should execute when call returns. We don't use
+ // target block directly since it might have other predecessors.
+ let mut data = BasicBlockData::new(Some(Terminator {
+ source_info: terminator.source_info,
+ kind: TerminatorKind::Goto { target: block },
+ }));
+ data.is_cleanup = caller_body[block].is_cleanup;
+ Some(caller_body.basic_blocks_mut().push(data))
+ } else {
+ None
+ };
- false
+ // If the call is something like `a[*i] = f(i)`, where
+ // `i : &mut usize`, then just duplicating the `a[*i]`
+ // Place could result in two different locations if `f`
+ // writes to `i`. To prevent this we need to create a temporary
+ // borrow of the place and pass the destination as `*temp` instead.
+ fn dest_needs_borrow(place: Place<'_>) -> bool {
+ for elem in place.projection.iter() {
+ match elem {
+ ProjectionElem::Deref | ProjectionElem::Index(_) => return true,
+ _ => {}
}
+ }
- let dest = if dest_needs_borrow(destination) {
- trace!("creating temp for return destination");
- let dest = Rvalue::Ref(
- self.tcx.lifetimes.re_erased,
- BorrowKind::Mut { kind: MutBorrowKind::Default },
- destination,
- );
- let dest_ty = dest.ty(caller_body, self.tcx);
- let temp = Place::from(self.new_call_temp(caller_body, &callsite, dest_ty));
- caller_body[callsite.block].statements.push(Statement {
- source_info: callsite.source_info,
- kind: StatementKind::Assign(Box::new((temp, dest))),
- });
- self.tcx.mk_place_deref(temp)
- } else {
- destination
- };
+ false
+ }
- // Always create a local to hold the destination, as `RETURN_PLACE` may appear
- // where a full `Place` is not allowed.
- let (remap_destination, destination_local) = if let Some(d) = dest.as_local() {
- (false, d)
- } else {
- (
- true,
- self.new_call_temp(
- caller_body,
- &callsite,
- destination.ty(caller_body, self.tcx).ty,
- ),
- )
- };
+ let dest = if dest_needs_borrow(destination) {
+ trace!("creating temp for return destination");
+ let dest = Rvalue::Ref(
+ self.tcx.lifetimes.re_erased,
+ BorrowKind::Mut { kind: MutBorrowKind::Default },
+ destination,
+ );
+ let dest_ty = dest.ty(caller_body, self.tcx);
+ let temp =
+ Place::from(self.new_call_temp(caller_body, &callsite, dest_ty, return_block));
+ caller_body[callsite.block].statements.push(Statement {
+ source_info: callsite.source_info,
+ kind: StatementKind::Assign(Box::new((temp, dest))),
+ });
+ self.tcx.mk_place_deref(temp)
+ } else {
+ destination
+ };
- // Copy the arguments if needed.
- let args: Vec<_> = self.make_call_args(args, &callsite, caller_body, &callee_body);
-
- let mut integrator = Integrator {
- args: &args,
- new_locals: Local::new(caller_body.local_decls.len())..,
- new_scopes: SourceScope::new(caller_body.source_scopes.len())..,
- new_blocks: BasicBlock::new(caller_body.basic_blocks.len())..,
- destination: destination_local,
- callsite_scope: caller_body.source_scopes[callsite.source_info.scope].clone(),
- callsite,
- cleanup_block: unwind,
- in_cleanup_block: false,
- tcx: self.tcx,
- always_live_locals: BitSet::new_filled(callee_body.local_decls.len()),
- };
+ // Always create a local to hold the destination, as `RETURN_PLACE` may appear
+ // where a full `Place` is not allowed.
+ let (remap_destination, destination_local) = if let Some(d) = dest.as_local() {
+ (false, d)
+ } else {
+ (
+ true,
+ self.new_call_temp(
+ caller_body,
+ &callsite,
+ destination.ty(caller_body, self.tcx).ty,
+ return_block,
+ ),
+ )
+ };
- // Map all `Local`s, `SourceScope`s and `BasicBlock`s to new ones
- // (or existing ones, in a few special cases) in the caller.
- integrator.visit_body(&mut callee_body);
-
- // If there are any locals without storage markers, give them storage only for the
- // duration of the call.
- for local in callee_body.vars_and_temps_iter() {
- if integrator.always_live_locals.contains(local) {
- let new_local = integrator.map_local(local);
- caller_body[callsite.block].statements.push(Statement {
- source_info: callsite.source_info,
- kind: StatementKind::StorageLive(new_local),
- });
- }
- }
- if let Some(block) = callsite.target {
- // To avoid repeated O(n) insert, push any new statements to the end and rotate
- // the slice once.
- let mut n = 0;
- if remap_destination {
- caller_body[block].statements.push(Statement {
- source_info: callsite.source_info,
- kind: StatementKind::Assign(Box::new((
- dest,
- Rvalue::Use(Operand::Move(destination_local.into())),
- ))),
- });
- n += 1;
- }
- for local in callee_body.vars_and_temps_iter().rev() {
- if integrator.always_live_locals.contains(local) {
- let new_local = integrator.map_local(local);
- caller_body[block].statements.push(Statement {
- source_info: callsite.source_info,
- kind: StatementKind::StorageDead(new_local),
- });
- n += 1;
- }
- }
- caller_body[block].statements.rotate_right(n);
- }
+ // Copy the arguments if needed.
+ let args: Vec<_> =
+ self.make_call_args(args, &callsite, caller_body, &callee_body, return_block);
+
+ let mut integrator = Integrator {
+ args: &args,
+ new_locals: Local::new(caller_body.local_decls.len())..,
+ new_scopes: SourceScope::new(caller_body.source_scopes.len())..,
+ new_blocks: BasicBlock::new(caller_body.basic_blocks.len())..,
+ destination: destination_local,
+ callsite_scope: caller_body.source_scopes[callsite.source_info.scope].clone(),
+ callsite,
+ cleanup_block: unwind,
+ in_cleanup_block: false,
+ return_block,
+ tcx: self.tcx,
+ always_live_locals: BitSet::new_filled(callee_body.local_decls.len()),
+ };
- // Insert all of the (mapped) parts of the callee body into the caller.
- caller_body.local_decls.extend(callee_body.drain_vars_and_temps());
- caller_body.source_scopes.extend(&mut callee_body.source_scopes.drain(..));
- caller_body.var_debug_info.append(&mut callee_body.var_debug_info);
- caller_body.basic_blocks_mut().extend(callee_body.basic_blocks_mut().drain(..));
+ // Map all `Local`s, `SourceScope`s and `BasicBlock`s to new ones
+ // (or existing ones, in a few special cases) in the caller.
+ integrator.visit_body(&mut callee_body);
- caller_body[callsite.block].terminator = Some(Terminator {
+ // If there are any locals without storage markers, give them storage only for the
+ // duration of the call.
+ for local in callee_body.vars_and_temps_iter() {
+ if integrator.always_live_locals.contains(local) {
+ let new_local = integrator.map_local(local);
+ caller_body[callsite.block].statements.push(Statement {
source_info: callsite.source_info,
- kind: TerminatorKind::Goto { target: integrator.map_block(START_BLOCK) },
+ kind: StatementKind::StorageLive(new_local),
});
-
- // Copy only unevaluated constants from the callee_body into the caller_body.
- // Although we are only pushing `ConstKind::Unevaluated` consts to
- // `required_consts`, here we may not only have `ConstKind::Unevaluated`
- // because we are calling `instantiate_and_normalize_erasing_regions`.
- caller_body.required_consts.extend(
- callee_body.required_consts.iter().copied().filter(|&ct| match ct.const_ {
- Const::Ty(_) => {
- bug!("should never encounter ty::UnevaluatedConst in `required_consts`")
- }
- Const::Val(..) | Const::Unevaluated(..) => true,
- }),
- );
}
- kind => bug!("unexpected terminator kind {:?}", kind),
}
+ if let Some(block) = return_block {
+ // To avoid repeated O(n) insert, push any new statements to the end and rotate
+ // the slice once.
+ let mut n = 0;
+ if remap_destination {
+ caller_body[block].statements.push(Statement {
+ source_info: callsite.source_info,
+ kind: StatementKind::Assign(Box::new((
+ dest,
+ Rvalue::Use(Operand::Move(destination_local.into())),
+ ))),
+ });
+ n += 1;
+ }
+ for local in callee_body.vars_and_temps_iter().rev() {
+ if integrator.always_live_locals.contains(local) {
+ let new_local = integrator.map_local(local);
+ caller_body[block].statements.push(Statement {
+ source_info: callsite.source_info,
+ kind: StatementKind::StorageDead(new_local),
+ });
+ n += 1;
+ }
+ }
+ caller_body[block].statements.rotate_right(n);
+ }
+
+ // Insert all of the (mapped) parts of the callee body into the caller.
+ caller_body.local_decls.extend(callee_body.drain_vars_and_temps());
+ caller_body.source_scopes.extend(&mut callee_body.source_scopes.drain(..));
+ caller_body.var_debug_info.append(&mut callee_body.var_debug_info);
+ caller_body.basic_blocks_mut().extend(callee_body.basic_blocks_mut().drain(..));
+
+ caller_body[callsite.block].terminator = Some(Terminator {
+ source_info: callsite.source_info,
+ kind: TerminatorKind::Goto { target: integrator.map_block(START_BLOCK) },
+ });
+
+ // Copy only unevaluated constants from the callee_body into the caller_body.
+ // Although we are only pushing `ConstKind::Unevaluated` consts to
+ // `required_consts`, here we may not only have `ConstKind::Unevaluated`
+ // because we are calling `instantiate_and_normalize_erasing_regions`.
+ caller_body.required_consts.extend(callee_body.required_consts.iter().copied().filter(
+ |&ct| match ct.const_ {
+ Const::Ty(_) => {
+ bug!("should never encounter ty::UnevaluatedConst in `required_consts`")
+ }
+ Const::Val(..) | Const::Unevaluated(..) => true,
+ },
+ ));
}
fn make_call_args(
@@ -685,6 +699,7 @@ impl<'tcx> Inliner<'tcx> {
callsite: &CallSite<'tcx>,
caller_body: &mut Body<'tcx>,
callee_body: &Body<'tcx>,
+ return_block: Option<BasicBlock>,
) -> Vec<Local> {
let tcx = self.tcx;
@@ -713,8 +728,18 @@ impl<'tcx> Inliner<'tcx> {
// and the vector is `[closure_ref, tmp0, tmp1, tmp2]`.
if callsite.fn_sig.abi() == Abi::RustCall && callee_body.spread_arg.is_none() {
let mut args = args.into_iter();
- let self_ = self.create_temp_if_necessary(args.next().unwrap(), callsite, caller_body);
- let tuple = self.create_temp_if_necessary(args.next().unwrap(), callsite, caller_body);
+ let self_ = self.create_temp_if_necessary(
+ args.next().unwrap(),
+ callsite,
+ caller_body,
+ return_block,
+ );
+ let tuple = self.create_temp_if_necessary(
+ args.next().unwrap(),
+ callsite,
+ caller_body,
+ return_block,
+ );
assert!(args.next().is_none());
let tuple = Place::from(tuple);
@@ -731,13 +756,13 @@ impl<'tcx> Inliner<'tcx> {
let tuple_field = Operand::Move(tcx.mk_place_field(tuple, FieldIdx::new(i), ty));
// Spill to a local to make e.g., `tmp0`.
- self.create_temp_if_necessary(tuple_field, callsite, caller_body)
+ self.create_temp_if_necessary(tuple_field, callsite, caller_body, return_block)
});
closure_ref_arg.chain(tuple_tmp_args).collect()
} else {
args.into_iter()
- .map(|a| self.create_temp_if_necessary(a, callsite, caller_body))
+ .map(|a| self.create_temp_if_necessary(a, callsite, caller_body, return_block))
.collect()
}
}
@@ -749,6 +774,7 @@ impl<'tcx> Inliner<'tcx> {
arg: Operand<'tcx>,
callsite: &CallSite<'tcx>,
caller_body: &mut Body<'tcx>,
+ return_block: Option<BasicBlock>,
) -> Local {
// Reuse the operand if it is a moved temporary.
if let Operand::Move(place) = &arg
@@ -761,7 +787,7 @@ impl<'tcx> Inliner<'tcx> {
// Otherwise, create a temporary for the argument.
trace!("creating temp for argument {:?}", arg);
let arg_ty = arg.ty(caller_body, self.tcx);
- let local = self.new_call_temp(caller_body, callsite, arg_ty);
+ let local = self.new_call_temp(caller_body, callsite, arg_ty, return_block);
caller_body[callsite.block].statements.push(Statement {
source_info: callsite.source_info,
kind: StatementKind::Assign(Box::new((Place::from(local), Rvalue::Use(arg)))),
@@ -775,6 +801,7 @@ impl<'tcx> Inliner<'tcx> {
caller_body: &mut Body<'tcx>,
callsite: &CallSite<'tcx>,
ty: Ty<'tcx>,
+ return_block: Option<BasicBlock>,
) -> Local {
let local = caller_body.local_decls.push(LocalDecl::new(ty, callsite.source_info.span));
@@ -783,7 +810,7 @@ impl<'tcx> Inliner<'tcx> {
kind: StatementKind::StorageLive(local),
});
- if let Some(block) = callsite.target {
+ if let Some(block) = return_block {
caller_body[block].statements.insert(
0,
Statement {
@@ -814,6 +841,7 @@ struct Integrator<'a, 'tcx> {
callsite: &'a CallSite<'tcx>,
cleanup_block: UnwindAction,
in_cleanup_block: bool,
+ return_block: Option<BasicBlock>,
tcx: TyCtxt<'tcx>,
always_live_locals: BitSet<Local>,
}
@@ -957,7 +985,7 @@ impl<'tcx> MutVisitor<'tcx> for Integrator<'_, 'tcx> {
*unwind = self.map_unwind(*unwind);
}
TerminatorKind::Return => {
- terminator.kind = if let Some(tgt) = self.callsite.target {
+ terminator.kind = if let Some(tgt) = self.return_block {
TerminatorKind::Goto { target: tgt }
} else {
TerminatorKind::Unreachable
diff --git a/compiler/rustc_mir_transform/src/instsimplify.rs b/compiler/rustc_mir_transform/src/instsimplify.rs
index fbcd6e75a..4f0f63d22 100644
--- a/compiler/rustc_mir_transform/src/instsimplify.rs
+++ b/compiler/rustc_mir_transform/src/instsimplify.rs
@@ -1,8 +1,6 @@
//! Performs various peephole optimizations.
use crate::simplify::simplify_duplicate_switch_targets;
-use crate::MirPass;
-use rustc_hir::Mutability;
use rustc_middle::mir::*;
use rustc_middle::ty::layout::ValidityRequirement;
use rustc_middle::ty::{self, GenericArgsRef, ParamEnv, Ty, TyCtxt};
@@ -35,12 +33,9 @@ impl<'tcx> MirPass<'tcx> for InstSimplify {
}
}
- ctx.simplify_primitive_clone(
- &mut block.terminator.as_mut().unwrap(),
- &mut block.statements,
- );
+ ctx.simplify_primitive_clone(block.terminator.as_mut().unwrap(), &mut block.statements);
ctx.simplify_intrinsic_assert(
- &mut block.terminator.as_mut().unwrap(),
+ block.terminator.as_mut().unwrap(),
&mut block.statements,
);
simplify_duplicate_switch_targets(block.terminator.as_mut().unwrap());
diff --git a/compiler/rustc_mir_transform/src/jump_threading.rs b/compiler/rustc_mir_transform/src/jump_threading.rs
index 7b918be44..a41d8e212 100644
--- a/compiler/rustc_mir_transform/src/jump_threading.rs
+++ b/compiler/rustc_mir_transform/src/jump_threading.rs
@@ -45,7 +45,6 @@ use rustc_middle::ty::{self, ScalarInt, Ty, TyCtxt};
use rustc_mir_dataflow::value_analysis::{Map, PlaceIndex, State, TrackElem};
use crate::cost_checker::CostChecker;
-use crate::MirPass;
pub struct JumpThreading;
@@ -95,7 +94,7 @@ impl<'tcx> MirPass<'tcx> for JumpThreading {
let cost = CostChecker::new(tcx, param_env, None, body);
- let mut state = State::new(ConditionSet::default(), &finder.map);
+ let mut state = State::new(ConditionSet::default(), finder.map);
let conds = if let Some((value, then, else_)) = targets.as_static_if() {
let Some(value) = ScalarInt::try_from_uint(value, discr_layout.size) else {
@@ -112,7 +111,7 @@ impl<'tcx> MirPass<'tcx> for JumpThreading {
}))
};
let conds = ConditionSet(conds);
- state.insert_value_idx(discr, conds, &finder.map);
+ state.insert_value_idx(discr, conds, finder.map);
finder.find_opportunity(bb, state, cost, 0);
}
@@ -247,7 +246,9 @@ impl<'tcx, 'a> TOFinder<'tcx, 'a> {
let last_non_rec = self.opportunities.len();
let predecessors = &self.body.basic_blocks.predecessors()[bb];
- if let &[pred] = &predecessors[..] && bb != START_BLOCK {
+ if let &[pred] = &predecessors[..]
+ && bb != START_BLOCK
+ {
let term = self.body.basic_blocks[pred].terminator();
match term.kind {
TerminatorKind::SwitchInt { ref discr, ref targets } => {
@@ -419,8 +420,10 @@ impl<'tcx, 'a> TOFinder<'tcx, 'a> {
// Do not support unions.
AggregateKind::Adt(.., Some(_)) => return None,
AggregateKind::Adt(_, variant_index, ..) if agg_ty.is_enum() => {
- if let Some(discr_target) = self.map.apply(lhs, TrackElem::Discriminant)
- && let Some(discr_value) = discriminant_for_variant(agg_ty, *variant_index)
+ if let Some(discr_target) =
+ self.map.apply(lhs, TrackElem::Discriminant)
+ && let Some(discr_value) =
+ discriminant_for_variant(agg_ty, *variant_index)
{
self.process_operand(bb, discr_target, &discr_value, state);
}
@@ -646,7 +649,7 @@ impl OpportunitySet {
// `succ` must be a successor of `current`. If it is not, this means this TO is not
// satisfiable and a previous TO erased this edge, so we bail out.
- if basic_blocks[current].terminator().successors().find(|s| *s == succ).is_none() {
+ if !basic_blocks[current].terminator().successors().any(|s| s == succ) {
debug!("impossible");
return;
}
diff --git a/compiler/rustc_mir_transform/src/large_enums.rs b/compiler/rustc_mir_transform/src/large_enums.rs
index 0a8b13d66..1d788a55f 100644
--- a/compiler/rustc_mir_transform/src/large_enums.rs
+++ b/compiler/rustc_mir_transform/src/large_enums.rs
@@ -1,5 +1,4 @@
use crate::rustc_middle::ty::util::IntTypeExt;
-use crate::MirPass;
use rustc_data_structures::fx::FxHashMap;
use rustc_middle::mir::interpret::AllocId;
use rustc_middle::mir::*;
diff --git a/compiler/rustc_mir_transform/src/lib.rs b/compiler/rustc_mir_transform/src/lib.rs
index bf5f0ca7c..89e897191 100644
--- a/compiler/rustc_mir_transform/src/lib.rs
+++ b/compiler/rustc_mir_transform/src/lib.rs
@@ -118,10 +118,7 @@ use rustc_const_eval::transform::promote_consts;
use rustc_const_eval::transform::validate;
use rustc_mir_dataflow::rustc_peek;
-use rustc_errors::{DiagnosticMessage, SubdiagnosticMessage};
-use rustc_fluent_macro::fluent_messages;
-
-fluent_messages! { "../messages.ftl" }
+rustc_fluent_macro::fluent_messages! { "../messages.ftl" }
pub fn provide(providers: &mut Providers) {
check_unsafety::provide(providers);
@@ -269,7 +266,7 @@ fn mir_const_qualif(tcx: TyCtxt<'_>, def: LocalDefId) -> ConstQualifs {
let body = &tcx.mir_const(def).borrow();
if body.return_ty().references_error() {
- tcx.sess.delay_span_bug(body.span, "mir_const_qualif: MIR had errors");
+ tcx.sess.span_delayed_bug(body.span, "mir_const_qualif: MIR had errors");
return Default::default();
}
@@ -398,7 +395,7 @@ fn inner_mir_for_ctfe(tcx: TyCtxt<'_>, def: LocalDefId) -> Body<'_> {
/// mir borrowck *before* doing so in order to ensure that borrowck can be run and doesn't
/// end up missing the source MIR due to stealing happening.
fn mir_drops_elaborated_and_const_checked(tcx: TyCtxt<'_>, def: LocalDefId) -> &Steal<Body<'_>> {
- if let DefKind::Coroutine = tcx.def_kind(def) {
+ if tcx.is_coroutine(def.to_def_id()) {
tcx.ensure_with_value().mir_coroutine_witnesses(def);
}
let mir_borrowck = tcx.mir_borrowck(def);
@@ -481,14 +478,14 @@ pub fn run_analysis_to_runtime_passes<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'
assert!(body.phase == MirPhase::Analysis(AnalysisPhase::PostCleanup));
// Do a little drop elaboration before const-checking if `const_precise_live_drops` is enabled.
- if check_consts::post_drop_elaboration::checking_enabled(&ConstCx::new(tcx, &body)) {
+ if check_consts::post_drop_elaboration::checking_enabled(&ConstCx::new(tcx, body)) {
pm::run_passes(
tcx,
body,
&[&remove_uninit_drops::RemoveUninitDrops, &simplify::SimplifyCfg::RemoveFalseEdges],
None,
);
- check_consts::post_drop_elaboration::check_live_drops(tcx, &body); // FIXME: make this a MIR lint
+ check_consts::post_drop_elaboration::check_live_drops(tcx, body); // FIXME: make this a MIR lint
}
debug!("runtime_mir_lowering({:?})", did);
diff --git a/compiler/rustc_mir_transform/src/lower_intrinsics.rs b/compiler/rustc_mir_transform/src/lower_intrinsics.rs
index 5f3d8dfc6..18f588dcc 100644
--- a/compiler/rustc_mir_transform/src/lower_intrinsics.rs
+++ b/compiler/rustc_mir_transform/src/lower_intrinsics.rs
@@ -1,10 +1,8 @@
//! Lowers intrinsic calls
-use crate::MirPass;
use rustc_middle::mir::*;
use rustc_middle::ty::{self, TyCtxt};
use rustc_span::symbol::sym;
-use rustc_target::abi::{FieldIdx, VariantIdx};
pub struct LowerIntrinsics;
@@ -251,37 +249,6 @@ impl<'tcx> MirPass<'tcx> for LowerIntrinsics {
});
terminator.kind = TerminatorKind::Goto { target };
}
- sym::option_payload_ptr => {
- if let (Some(target), Some(arg)) = (*target, args[0].place()) {
- let ty::RawPtr(ty::TypeAndMut { ty: dest_ty, .. }) =
- destination.ty(local_decls, tcx).ty.kind()
- else {
- bug!();
- };
-
- block.statements.push(Statement {
- source_info: terminator.source_info,
- kind: StatementKind::Assign(Box::new((
- *destination,
- Rvalue::AddressOf(
- Mutability::Not,
- arg.project_deeper(
- &[
- PlaceElem::Deref,
- PlaceElem::Downcast(
- Some(sym::Some),
- VariantIdx::from_u32(1),
- ),
- PlaceElem::Field(FieldIdx::from_u32(0), *dest_ty),
- ],
- tcx,
- ),
- ),
- ))),
- });
- terminator.kind = TerminatorKind::Goto { target };
- }
- }
sym::transmute | sym::transmute_unchecked => {
let dst_ty = destination.ty(local_decls, tcx).ty;
let Ok([arg]) = <[_; 1]>::try_from(std::mem::take(args)) else {
diff --git a/compiler/rustc_mir_transform/src/lower_slice_len.rs b/compiler/rustc_mir_transform/src/lower_slice_len.rs
index ae4878411..daeb56666 100644
--- a/compiler/rustc_mir_transform/src/lower_slice_len.rs
+++ b/compiler/rustc_mir_transform/src/lower_slice_len.rs
@@ -1,7 +1,6 @@
//! This pass lowers calls to core::slice::len to just Len op.
//! It should run before inlining!
-use crate::MirPass;
use rustc_hir::def_id::DefId;
use rustc_index::IndexSlice;
use rustc_middle::mir::*;
diff --git a/compiler/rustc_mir_transform/src/match_branches.rs b/compiler/rustc_mir_transform/src/match_branches.rs
index 3dc627b61..1c4aa37d5 100644
--- a/compiler/rustc_mir_transform/src/match_branches.rs
+++ b/compiler/rustc_mir_transform/src/match_branches.rs
@@ -1,4 +1,3 @@
-use crate::MirPass;
use rustc_middle::mir::*;
use rustc_middle::ty::TyCtxt;
use std::iter;
diff --git a/compiler/rustc_mir_transform/src/multiple_return_terminators.rs b/compiler/rustc_mir_transform/src/multiple_return_terminators.rs
index c9b42e75c..64749a4b5 100644
--- a/compiler/rustc_mir_transform/src/multiple_return_terminators.rs
+++ b/compiler/rustc_mir_transform/src/multiple_return_terminators.rs
@@ -1,7 +1,7 @@
//! This pass removes jumps to basic blocks containing only a return, and replaces them with a
//! return instead.
-use crate::{simplify, MirPass};
+use crate::simplify;
use rustc_index::bit_set::BitSet;
use rustc_middle::mir::*;
use rustc_middle::ty::TyCtxt;
diff --git a/compiler/rustc_mir_transform/src/normalize_array_len.rs b/compiler/rustc_mir_transform/src/normalize_array_len.rs
index 206cdf9fe..128634bd7 100644
--- a/compiler/rustc_mir_transform/src/normalize_array_len.rs
+++ b/compiler/rustc_mir_transform/src/normalize_array_len.rs
@@ -2,7 +2,6 @@
//! is taken using `.len()` method. Handy to preserve information in MIR for const prop
use crate::ssa::SsaLocals;
-use crate::MirPass;
use rustc_index::IndexVec;
use rustc_middle::mir::visit::*;
use rustc_middle::mir::*;
diff --git a/compiler/rustc_mir_transform/src/pass_manager.rs b/compiler/rustc_mir_transform/src/pass_manager.rs
index a8aba29ad..c4eca18ff 100644
--- a/compiler/rustc_mir_transform/src/pass_manager.rs
+++ b/compiler/rustc_mir_transform/src/pass_manager.rs
@@ -99,7 +99,7 @@ where
);
*polarity
});
- overridden.unwrap_or_else(|| pass.is_enabled(&tcx.sess))
+ overridden.unwrap_or_else(|| pass.is_enabled(tcx.sess))
}
fn run_passes_inner<'tcx>(
@@ -126,7 +126,7 @@ fn run_passes_inner<'tcx>(
let dump_enabled = pass.is_mir_dump_enabled();
if dump_enabled {
- dump_mir_for_pass(tcx, body, &name, false);
+ dump_mir_for_pass(tcx, body, name, false);
}
if validate {
validate_body(tcx, body, format!("before pass {name}"));
@@ -142,7 +142,7 @@ fn run_passes_inner<'tcx>(
}
if dump_enabled {
- dump_mir_for_pass(tcx, body, &name, true);
+ dump_mir_for_pass(tcx, body, name, true);
}
if validate {
validate_body(tcx, body, format!("after pass {name}"));
diff --git a/compiler/rustc_mir_transform/src/prettify.rs b/compiler/rustc_mir_transform/src/prettify.rs
index 745fa3084..7b77d0323 100644
--- a/compiler/rustc_mir_transform/src/prettify.rs
+++ b/compiler/rustc_mir_transform/src/prettify.rs
@@ -4,7 +4,6 @@
//! (`-Zmir-enable-passes=+ReorderBasicBlocks,+ReorderLocals`)
//! to make the MIR easier to read for humans.
-use crate::MirPass;
use rustc_index::{bit_set::BitSet, IndexSlice, IndexVec};
use rustc_middle::mir::visit::{MutVisitor, PlaceContext, Visitor};
use rustc_middle::mir::*;
diff --git a/compiler/rustc_mir_transform/src/ref_prop.rs b/compiler/rustc_mir_transform/src/ref_prop.rs
index df39c819b..f13ab5b0f 100644
--- a/compiler/rustc_mir_transform/src/ref_prop.rs
+++ b/compiler/rustc_mir_transform/src/ref_prop.rs
@@ -9,7 +9,6 @@ use rustc_mir_dataflow::storage::always_storage_live_locals;
use rustc_mir_dataflow::Analysis;
use crate::ssa::{SsaLocals, StorageLiveLocals};
-use crate::MirPass;
/// Propagate references using SSA analysis.
///
diff --git a/compiler/rustc_mir_transform/src/remove_noop_landing_pads.rs b/compiler/rustc_mir_transform/src/remove_noop_landing_pads.rs
index 54892442c..095119e2e 100644
--- a/compiler/rustc_mir_transform/src/remove_noop_landing_pads.rs
+++ b/compiler/rustc_mir_transform/src/remove_noop_landing_pads.rs
@@ -1,4 +1,3 @@
-use crate::MirPass;
use rustc_index::bit_set::BitSet;
use rustc_middle::mir::patch::MirPatch;
use rustc_middle::mir::*;
diff --git a/compiler/rustc_mir_transform/src/remove_place_mention.rs b/compiler/rustc_mir_transform/src/remove_place_mention.rs
index 8be1c3757..78335b3b5 100644
--- a/compiler/rustc_mir_transform/src/remove_place_mention.rs
+++ b/compiler/rustc_mir_transform/src/remove_place_mention.rs
@@ -1,6 +1,5 @@
//! This pass removes `PlaceMention` statement, which has no effect at codegen.
-use crate::MirPass;
use rustc_middle::mir::*;
use rustc_middle::ty::TyCtxt;
diff --git a/compiler/rustc_mir_transform/src/remove_storage_markers.rs b/compiler/rustc_mir_transform/src/remove_storage_markers.rs
index dbe082e90..795f5232e 100644
--- a/compiler/rustc_mir_transform/src/remove_storage_markers.rs
+++ b/compiler/rustc_mir_transform/src/remove_storage_markers.rs
@@ -1,6 +1,5 @@
//! This pass removes storage markers if they won't be emitted during codegen.
-use crate::MirPass;
use rustc_middle::mir::*;
use rustc_middle::ty::TyCtxt;
diff --git a/compiler/rustc_mir_transform/src/remove_uninit_drops.rs b/compiler/rustc_mir_transform/src/remove_uninit_drops.rs
index 87fee2410..7d12bcf2f 100644
--- a/compiler/rustc_mir_transform/src/remove_uninit_drops.rs
+++ b/compiler/rustc_mir_transform/src/remove_uninit_drops.rs
@@ -4,9 +4,7 @@ use rustc_middle::ty::GenericArgsRef;
use rustc_middle::ty::{self, ParamEnv, Ty, TyCtxt, VariantDef};
use rustc_mir_dataflow::impls::MaybeInitializedPlaces;
use rustc_mir_dataflow::move_paths::{LookupResult, MoveData, MovePathIndex};
-use rustc_mir_dataflow::{
- self, move_path_children_matching, Analysis, MaybeReachable, MoveDataParamEnv,
-};
+use rustc_mir_dataflow::{move_path_children_matching, Analysis, MaybeReachable, MoveDataParamEnv};
use rustc_target::abi::FieldIdx;
use crate::MirPass;
@@ -25,7 +23,7 @@ impl<'tcx> MirPass<'tcx> for RemoveUninitDrops {
fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
let param_env = tcx.param_env(body.source.def_id());
let move_data =
- MoveData::gather_moves(&body, tcx, param_env, |ty| ty.needs_drop(tcx, param_env));
+ MoveData::gather_moves(body, tcx, param_env, |ty| ty.needs_drop(tcx, param_env));
let mdpe = MoveDataParamEnv { move_data, param_env };
let mut maybe_inits = MaybeInitializedPlaces::new(tcx, body, &mdpe)
diff --git a/compiler/rustc_mir_transform/src/remove_unneeded_drops.rs b/compiler/rustc_mir_transform/src/remove_unneeded_drops.rs
index 08b2a6537..5d528bed3 100644
--- a/compiler/rustc_mir_transform/src/remove_unneeded_drops.rs
+++ b/compiler/rustc_mir_transform/src/remove_unneeded_drops.rs
@@ -4,7 +4,6 @@
//! useful because (unlike MIR building) it runs after type checking, so it can make use of
//! `Reveal::All` to provide more precise type information.
-use crate::MirPass;
use rustc_middle::mir::*;
use rustc_middle::ty::TyCtxt;
diff --git a/compiler/rustc_mir_transform/src/remove_zsts.rs b/compiler/rustc_mir_transform/src/remove_zsts.rs
index 5aa3c3cfe..34d57a453 100644
--- a/compiler/rustc_mir_transform/src/remove_zsts.rs
+++ b/compiler/rustc_mir_transform/src/remove_zsts.rs
@@ -1,6 +1,5 @@
//! Removes operations on ZST places, and convert ZST operands to constants.
-use crate::MirPass;
use rustc_middle::mir::visit::*;
use rustc_middle::mir::*;
use rustc_middle::ty::{self, Ty, TyCtxt};
@@ -17,6 +16,11 @@ impl<'tcx> MirPass<'tcx> for RemoveZsts {
if tcx.type_of(body.source.def_id()).instantiate_identity().is_coroutine() {
return;
}
+
+ if !tcx.consider_optimizing(|| format!("RemoveZsts - {:?}", body.source.def_id())) {
+ return;
+ }
+
let param_env = tcx.param_env_reveal_all_normalized(body.source.def_id());
let local_decls = &body.local_decls;
let mut replacer = Replacer { tcx, param_env, local_decls };
@@ -125,12 +129,6 @@ impl<'tcx> MutVisitor<'tcx> for Replacer<'_, 'tcx> {
if let Some(place_for_ty) = place_for_ty
&& let ty = place_for_ty.ty(self.local_decls, self.tcx).ty
&& self.known_to_be_zst(ty)
- && self.tcx.consider_optimizing(|| {
- format!(
- "RemoveZsts - Place: {:?} SourceInfo: {:?}",
- place_for_ty, statement.source_info
- )
- })
{
statement.make_nop();
} else {
diff --git a/compiler/rustc_mir_transform/src/reveal_all.rs b/compiler/rustc_mir_transform/src/reveal_all.rs
index 1626cf3c0..4d2eca578 100644
--- a/compiler/rustc_mir_transform/src/reveal_all.rs
+++ b/compiler/rustc_mir_transform/src/reveal_all.rs
@@ -1,6 +1,5 @@
//! Normalizes MIR in RevealAll mode.
-use crate::MirPass;
use rustc_middle::mir::visit::*;
use rustc_middle::mir::*;
use rustc_middle::ty::{self, Ty, TyCtxt};
diff --git a/compiler/rustc_mir_transform/src/separate_const_switch.rs b/compiler/rustc_mir_transform/src/separate_const_switch.rs
index 907cfe758..6e22690d8 100644
--- a/compiler/rustc_mir_transform/src/separate_const_switch.rs
+++ b/compiler/rustc_mir_transform/src/separate_const_switch.rs
@@ -37,7 +37,6 @@
//! simplicity rather than completeness (it notably
//! sometimes duplicates abusively).
-use crate::MirPass;
use rustc_middle::mir::*;
use rustc_middle::ty::TyCtxt;
use smallvec::SmallVec;
diff --git a/compiler/rustc_mir_transform/src/shim.rs b/compiler/rustc_mir_transform/src/shim.rs
index ab7961321..fba73d519 100644
--- a/compiler/rustc_mir_transform/src/shim.rs
+++ b/compiler/rustc_mir_transform/src/shim.rs
@@ -113,8 +113,8 @@ fn make_shim<'tcx>(tcx: TyCtxt<'tcx>, instance: ty::InstanceDef<'tcx>) -> Body<'
&deref_separator::Derefer,
&remove_noop_landing_pads::RemoveNoopLandingPads,
&simplify::SimplifyCfg::MakeShim,
- &add_call_guards::CriticalCallEdges,
&abort_unwinding_calls::AbortUnwindingCalls,
+ &add_call_guards::CriticalCallEdges,
],
Some(MirPhase::Runtime(RuntimePhase::Optimized)),
);
@@ -181,7 +181,7 @@ fn build_drop_shim<'tcx>(tcx: TyCtxt<'tcx>, def_id: DefId, ty: Option<Ty<'tcx>>)
GenericArgs::identity_for_item(tcx, def_id)
};
let sig = tcx.fn_sig(def_id).instantiate(tcx, args);
- let sig = tcx.erase_late_bound_regions(sig);
+ let sig = tcx.instantiate_bound_regions_with_erased(sig);
let span = tcx.def_span(def_id);
let source_info = SourceInfo::outermost(span);
@@ -418,7 +418,7 @@ impl<'tcx> CloneShimBuilder<'tcx> {
// otherwise going to be TySelf and we can't index
// or access fields of a Place of type TySelf.
let sig = tcx.fn_sig(def_id).instantiate(tcx, &[self_ty.into()]);
- let sig = tcx.erase_late_bound_regions(sig);
+ let sig = tcx.instantiate_bound_regions_with_erased(sig);
let span = tcx.def_span(def_id);
CloneShimBuilder {
@@ -656,7 +656,7 @@ fn build_call_shim<'tcx>(
// to substitute into the signature of the shim. It is not necessary for users of this
// MIR body to perform further substitutions (see `InstanceDef::has_polymorphic_mir_body`).
let (sig_args, untuple_args) = if let ty::InstanceDef::FnPtrShim(_, ty) = instance {
- let sig = tcx.erase_late_bound_regions(ty.fn_sig(tcx));
+ let sig = tcx.instantiate_bound_regions_with_erased(ty.fn_sig(tcx));
let untuple_args = sig.inputs();
@@ -670,7 +670,7 @@ fn build_call_shim<'tcx>(
let def_id = instance.def_id();
let sig = tcx.fn_sig(def_id);
- let sig = sig.map_bound(|sig| tcx.erase_late_bound_regions(sig));
+ let sig = sig.map_bound(|sig| tcx.instantiate_bound_regions_with_erased(sig));
assert_eq!(sig_args.is_some(), !instance.has_polymorphic_mir_body());
let mut sig = if let Some(sig_args) = sig_args {
diff --git a/compiler/rustc_mir_transform/src/simplify.rs b/compiler/rustc_mir_transform/src/simplify.rs
index 0a1c01114..856a0f227 100644
--- a/compiler/rustc_mir_transform/src/simplify.rs
+++ b/compiler/rustc_mir_transform/src/simplify.rs
@@ -27,7 +27,6 @@
//! naively generate still contains the `_a = ()` write in the unreachable block "after" the
//! return.
-use crate::MirPass;
use rustc_data_structures::fx::FxIndexSet;
use rustc_index::{Idx, IndexSlice, IndexVec};
use rustc_middle::mir::visit::{MutVisitor, MutatingUseContext, PlaceContext, Visitor};
@@ -74,7 +73,7 @@ pub fn simplify_cfg<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
impl<'tcx> MirPass<'tcx> for SimplifyCfg {
fn name(&self) -> &'static str {
- &self.name()
+ self.name()
}
fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
diff --git a/compiler/rustc_mir_transform/src/simplify_branches.rs b/compiler/rustc_mir_transform/src/simplify_branches.rs
index 1f0e605c3..35a052166 100644
--- a/compiler/rustc_mir_transform/src/simplify_branches.rs
+++ b/compiler/rustc_mir_transform/src/simplify_branches.rs
@@ -1,4 +1,3 @@
-use crate::MirPass;
use rustc_middle::mir::*;
use rustc_middle::ty::TyCtxt;
diff --git a/compiler/rustc_mir_transform/src/sroa.rs b/compiler/rustc_mir_transform/src/sroa.rs
index 7de4ca667..06d5e17fd 100644
--- a/compiler/rustc_mir_transform/src/sroa.rs
+++ b/compiler/rustc_mir_transform/src/sroa.rs
@@ -1,4 +1,3 @@
-use crate::MirPass;
use rustc_data_structures::flat_map_in_place::FlatMapInPlace;
use rustc_index::bit_set::{BitSet, GrowableBitSet};
use rustc_index::IndexVec;
@@ -167,7 +166,7 @@ impl<'tcx> ReplacementMap<'tcx> {
};
let fields = self.fragments[place.local].as_ref()?;
let (_, new_local) = fields[f]?;
- Some(Place { local: new_local, projection: tcx.mk_place_elems(&rest) })
+ Some(Place { local: new_local, projection: tcx.mk_place_elems(rest) })
}
fn place_fragments(
diff --git a/compiler/rustc_mir_transform/src/ssa.rs b/compiler/rustc_mir_transform/src/ssa.rs
index 1f59c790b..3a6e1ef34 100644
--- a/compiler/rustc_mir_transform/src/ssa.rs
+++ b/compiler/rustc_mir_transform/src/ssa.rs
@@ -40,7 +40,8 @@ impl SsaLocals {
let dominators = body.basic_blocks.dominators();
let direct_uses = IndexVec::from_elem(0, &body.local_decls);
- let mut visitor = SsaVisitor { assignments, assignment_order, dominators, direct_uses };
+ let mut visitor =
+ SsaVisitor { body, assignments, assignment_order, dominators, direct_uses };
for local in body.args_iter() {
visitor.assignments[local] = Set1::One(DefLocation::Argument);
@@ -110,7 +111,7 @@ impl SsaLocals {
body: &'a Body<'tcx>,
) -> impl Iterator<Item = (Local, &'a Rvalue<'tcx>, Location)> + 'a {
self.assignment_order.iter().filter_map(|&local| {
- if let Set1::One(DefLocation::Body(loc)) = self.assignments[local] {
+ if let Set1::One(DefLocation::Assignment(loc)) = self.assignments[local] {
let stmt = body.stmt_at(loc).left()?;
// `loc` must point to a direct assignment to `local`.
let Some((target, rvalue)) = stmt.kind.as_assign() else { bug!() };
@@ -134,21 +135,21 @@ impl SsaLocals {
AssignedValue::Arg,
Location { block: START_BLOCK, statement_index: 0 },
),
- Set1::One(DefLocation::Body(loc)) => {
+ Set1::One(DefLocation::Assignment(loc)) => {
let bb = &mut basic_blocks[loc.block];
- let value = if loc.statement_index < bb.statements.len() {
- // `loc` must point to a direct assignment to `local`.
- let stmt = &mut bb.statements[loc.statement_index];
- let StatementKind::Assign(box (target, ref mut rvalue)) = stmt.kind else {
- bug!()
- };
- assert_eq!(target.as_local(), Some(local));
- AssignedValue::Rvalue(rvalue)
- } else {
- let term = bb.terminator_mut();
- AssignedValue::Terminator(&mut term.kind)
+ // `loc` must point to a direct assignment to `local`.
+ let stmt = &mut bb.statements[loc.statement_index];
+ let StatementKind::Assign(box (target, ref mut rvalue)) = stmt.kind else {
+ bug!()
};
- f(local, value, loc)
+ assert_eq!(target.as_local(), Some(local));
+ f(local, AssignedValue::Rvalue(rvalue), loc)
+ }
+ Set1::One(DefLocation::CallReturn { call, .. }) => {
+ let bb = &mut basic_blocks[call];
+ let loc = Location { block: call, statement_index: bb.statements.len() };
+ let term = bb.terminator_mut();
+ f(local, AssignedValue::Terminator(&mut term.kind), loc)
}
_ => {}
}
@@ -201,14 +202,15 @@ impl SsaLocals {
}
}
-struct SsaVisitor<'a> {
+struct SsaVisitor<'tcx, 'a> {
+ body: &'a Body<'tcx>,
dominators: &'a Dominators<BasicBlock>,
assignments: IndexVec<Local, Set1<DefLocation>>,
assignment_order: Vec<Local>,
direct_uses: IndexVec<Local, u32>,
}
-impl SsaVisitor<'_> {
+impl SsaVisitor<'_, '_> {
fn check_dominates(&mut self, local: Local, loc: Location) {
let set = &mut self.assignments[local];
let assign_dominates = match *set {
@@ -224,7 +226,7 @@ impl SsaVisitor<'_> {
}
}
-impl<'tcx> Visitor<'tcx> for SsaVisitor<'_> {
+impl<'tcx> Visitor<'tcx> for SsaVisitor<'tcx, '_> {
fn visit_local(&mut self, local: Local, ctxt: PlaceContext, loc: Location) {
match ctxt {
PlaceContext::MutatingUse(MutatingUseContext::Projection)
@@ -250,9 +252,18 @@ impl<'tcx> Visitor<'tcx> for SsaVisitor<'_> {
fn visit_place(&mut self, place: &Place<'tcx>, ctxt: PlaceContext, loc: Location) {
let location = match ctxt {
- PlaceContext::MutatingUse(
- MutatingUseContext::Store | MutatingUseContext::Call | MutatingUseContext::Yield,
- ) => Some(DefLocation::Body(loc)),
+ PlaceContext::MutatingUse(MutatingUseContext::Store) => {
+ Some(DefLocation::Assignment(loc))
+ }
+ PlaceContext::MutatingUse(MutatingUseContext::Call) => {
+ let call = loc.block;
+ let TerminatorKind::Call { target, .. } =
+ self.body.basic_blocks[call].terminator().kind
+ else {
+ bug!()
+ };
+ Some(DefLocation::CallReturn { call, target })
+ }
_ => None,
};
if let Some(location) = location
@@ -359,7 +370,7 @@ impl StorageLiveLocals {
for (statement_index, statement) in bbdata.statements.iter().enumerate() {
if let StatementKind::StorageLive(local) = statement.kind {
storage_live[local]
- .insert(DefLocation::Body(Location { block, statement_index }));
+ .insert(DefLocation::Assignment(Location { block, statement_index }));
}
}
}
diff --git a/compiler/rustc_mir_transform/src/uninhabited_enum_branching.rs b/compiler/rustc_mir_transform/src/uninhabited_enum_branching.rs
index 98f67e18a..e68d37f4c 100644
--- a/compiler/rustc_mir_transform/src/uninhabited_enum_branching.rs
+++ b/compiler/rustc_mir_transform/src/uninhabited_enum_branching.rs
@@ -86,7 +86,7 @@ impl<'tcx> MirPass<'tcx> for UninhabitedEnumBranching {
continue;
}
- let Some(discriminant_ty) = get_switched_on_type(&bb_data, tcx, body) else { continue };
+ let Some(discriminant_ty) = get_switched_on_type(bb_data, tcx, body) else { continue };
let layout = tcx.layout_of(
tcx.param_env_reveal_all_normalized(body.source.def_id()).and(discriminant_ty),
diff --git a/compiler/rustc_mir_transform/src/unreachable_prop.rs b/compiler/rustc_mir_transform/src/unreachable_prop.rs
index 919e8d6a2..f12a6aa24 100644
--- a/compiler/rustc_mir_transform/src/unreachable_prop.rs
+++ b/compiler/rustc_mir_transform/src/unreachable_prop.rs
@@ -2,7 +2,6 @@
//! when all of their successors are unreachable. This is achieved through a
//! post-order traversal of the blocks.
-use crate::MirPass;
use rustc_data_structures::fx::FxHashSet;
use rustc_middle::mir::interpret::Scalar;
use rustc_middle::mir::patch::MirPatch;