diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-30 03:59:35 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-30 03:59:35 +0000 |
commit | d1b2d29528b7794b41e66fc2136e395a02f8529b (patch) | |
tree | a4a17504b260206dec3cf55b2dca82929a348ac2 /compiler/rustc_mir_transform/src | |
parent | Releasing progress-linux version 1.72.1+dfsg1-1~progress7.99u1. (diff) | |
download | rustc-d1b2d29528b7794b41e66fc2136e395a02f8529b.tar.xz rustc-d1b2d29528b7794b41e66fc2136e395a02f8529b.zip |
Merging upstream version 1.73.0+dfsg1.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'compiler/rustc_mir_transform/src')
43 files changed, 887 insertions, 1114 deletions
diff --git a/compiler/rustc_mir_transform/src/add_retag.rs b/compiler/rustc_mir_transform/src/add_retag.rs index d9e7339f1..75473ca53 100644 --- a/compiler/rustc_mir_transform/src/add_retag.rs +++ b/compiler/rustc_mir_transform/src/add_retag.rs @@ -60,7 +60,7 @@ impl<'tcx> MirPass<'tcx> for AddRetag { let basic_blocks = body.basic_blocks.as_mut(); let local_decls = &body.local_decls; let needs_retag = |place: &Place<'tcx>| { - !place.has_deref() // we're not really interested in stores to "outside" locations, they are hard to keep track of anyway + !place.is_indirect_first_projection() // we're not really interested in stores to "outside" locations, they are hard to keep track of anyway && may_contain_reference(place.ty(&*local_decls, tcx).ty, /*depth*/ 3, tcx) && !local_decls[place.local].is_deref_temp() }; diff --git a/compiler/rustc_mir_transform/src/check_unsafety.rs b/compiler/rustc_mir_transform/src/check_unsafety.rs index 70812761e..58e9786ec 100644 --- a/compiler/rustc_mir_transform/src/check_unsafety.rs +++ b/compiler/rustc_mir_transform/src/check_unsafety.rs @@ -1,4 +1,4 @@ -use rustc_data_structures::unord::{UnordItems, UnordSet}; +use rustc_data_structures::unord::{ExtendUnord, UnordItems, UnordSet}; use rustc_hir as hir; use rustc_hir::def::DefKind; use rustc_hir::def_id::{DefId, LocalDefId}; diff --git a/compiler/rustc_mir_transform/src/const_prop.rs b/compiler/rustc_mir_transform/src/const_prop.rs index 2f2c7357b..7529ed818 100644 --- a/compiler/rustc_mir_transform/src/const_prop.rs +++ b/compiler/rustc_mir_transform/src/const_prop.rs @@ -14,16 +14,15 @@ use rustc_middle::mir::visit::{ }; use rustc_middle::mir::*; use rustc_middle::ty::layout::{LayoutError, LayoutOf, LayoutOfHelpers, TyAndLayout}; -use rustc_middle::ty::InternalSubsts; -use rustc_middle::ty::{self, ConstKind, Instance, ParamEnv, Ty, TyCtxt, TypeVisitableExt}; +use rustc_middle::ty::{self, GenericArgs, Instance, ParamEnv, Ty, TyCtxt, TypeVisitableExt}; use rustc_span::{def_id::DefId, Span, DUMMY_SP}; use rustc_target::abi::{self, Align, HasDataLayout, Size, TargetDataLayout}; use rustc_target::spec::abi::Abi as CallAbi; use crate::MirPass; use rustc_const_eval::interpret::{ - self, compile_time_machine, AllocId, ConstAllocation, ConstValue, Frame, ImmTy, Immediate, - InterpCx, InterpResult, LocalValue, MemoryKind, OpTy, PlaceTy, Pointer, Scalar, + self, compile_time_machine, AllocId, ConstAllocation, ConstValue, FnArg, Frame, ImmTy, + Immediate, InterpCx, InterpResult, LocalValue, MemoryKind, OpTy, PlaceTy, Pointer, Scalar, StackPopCleanup, }; @@ -87,7 +86,7 @@ impl<'tcx> MirPass<'tcx> for ConstProp { return; } - let is_generator = tcx.type_of(def_id.to_def_id()).subst_identity().is_generator(); + let is_generator = tcx.type_of(def_id.to_def_id()).instantiate_identity().is_generator(); // FIXME(welseywiser) const prop doesn't work on generators because of query cycles // computing their layout. if is_generator { @@ -185,7 +184,7 @@ impl<'mir, 'tcx> interpret::Machine<'mir, 'tcx> for ConstPropMachine<'mir, 'tcx> _ecx: &mut InterpCx<'mir, 'tcx, Self>, _instance: ty::Instance<'tcx>, _abi: CallAbi, - _args: &[OpTy<'tcx>], + _args: &[FnArg<'tcx>], _destination: &PlaceTy<'tcx>, _target: Option<BasicBlock>, _unwind: UnwindAction, @@ -338,7 +337,7 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> { tcx: TyCtxt<'tcx>, ) -> ConstPropagator<'mir, 'tcx> { let def_id = body.source.def_id(); - let substs = &InternalSubsts::identity_for_item(tcx, def_id); + let args = &GenericArgs::identity_for_item(tcx, def_id); let param_env = tcx.param_env_reveal_all_normalized(def_id); let can_const_prop = CanConstProp::check(tcx, param_env, body); @@ -350,7 +349,7 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> { ); let ret_layout = ecx - .layout_of(body.bound_return_ty().subst(tcx, substs)) + .layout_of(body.bound_return_ty().instantiate(tcx, args)) .ok() // Don't bother allocating memory for large values. // I don't know how return types can seem to be unsized but this happens in the @@ -366,7 +365,7 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> { .into(); ecx.push_stack_frame( - Instance::new(def_id, substs), + Instance::new(def_id, args), dummy_body, &ret, StackPopCleanup::Root { cleanup: false }, @@ -407,51 +406,9 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> { ecx.machine.written_only_inside_own_block_locals.remove(&local); } - /// Returns the value, if any, of evaluating `c`. - fn eval_constant(&mut self, c: &Constant<'tcx>) -> Option<OpTy<'tcx>> { - // FIXME we need to revisit this for #67176 - if c.has_param() { - return None; - } - - // No span, we don't want errors to be shown. - self.ecx.eval_mir_constant(&c.literal, None, None).ok() - } - - /// Returns the value, if any, of evaluating `place`. - fn eval_place(&mut self, place: Place<'tcx>) -> Option<OpTy<'tcx>> { - trace!("eval_place(place={:?})", place); - self.ecx.eval_place_to_op(place, None).ok() - } - - /// Returns the value, if any, of evaluating `op`. Calls upon `eval_constant` - /// or `eval_place`, depending on the variant of `Operand` used. - fn eval_operand(&mut self, op: &Operand<'tcx>) -> Option<OpTy<'tcx>> { - match *op { - Operand::Constant(ref c) => self.eval_constant(c), - Operand::Move(place) | Operand::Copy(place) => self.eval_place(place), - } - } - fn propagate_operand(&mut self, operand: &mut Operand<'tcx>) { - match *operand { - Operand::Copy(l) | Operand::Move(l) => { - if let Some(value) = self.get_const(l) && self.should_const_prop(&value) { - // FIXME(felix91gr): this code only handles `Scalar` cases. - // For now, we're not handling `ScalarPair` cases because - // doing so here would require a lot of code duplication. - // We should hopefully generalize `Operand` handling into a fn, - // and use it to do const-prop here and everywhere else - // where it makes sense. - if let interpret::Operand::Immediate(interpret::Immediate::Scalar( - scalar, - )) = *value - { - *operand = self.operand_from_scalar(scalar, value.layout.ty); - } - } - } - Operand::Constant(_) => (), + if let Some(place) = operand.place() && let Some(op) = self.replace_with_const(place) { + *operand = op; } } @@ -579,93 +536,45 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> { })) } - fn replace_with_const(&mut self, place: Place<'tcx>, rval: &mut Rvalue<'tcx>) { + fn replace_with_const(&mut self, place: Place<'tcx>) -> Option<Operand<'tcx>> { // This will return None if the above `const_prop` invocation only "wrote" a // type whose creation requires no write. E.g. a generator whose initial state // consists solely of uninitialized memory (so it doesn't capture any locals). - let Some(ref value) = self.get_const(place) else { return }; - if !self.should_const_prop(value) { - return; - } - trace!("replacing {:?}={:?} with {:?}", place, rval, value); - - if let Rvalue::Use(Operand::Constant(c)) = rval { - match c.literal { - ConstantKind::Ty(c) if matches!(c.kind(), ConstKind::Unevaluated(..)) => {} - _ => { - trace!("skipping replace of Rvalue::Use({:?} because it is already a const", c); - return; - } - } + let value = self.get_const(place)?; + if !self.tcx.consider_optimizing(|| format!("ConstantPropagation - {value:?}")) { + return None; } + trace!("replacing {:?} with {:?}", place, value); - trace!("attempting to replace {:?} with {:?}", rval, value); // FIXME> figure out what to do when read_immediate_raw fails - let imm = self.ecx.read_immediate_raw(value).ok(); + let imm = self.ecx.read_immediate_raw(&value).ok()?; - if let Some(Right(imm)) = imm { - match *imm { - interpret::Immediate::Scalar(scalar) => { - *rval = Rvalue::Use(self.operand_from_scalar(scalar, value.layout.ty)); - } - Immediate::ScalarPair(..) => { - // Found a value represented as a pair. For now only do const-prop if the type - // of `rvalue` is also a tuple with two scalars. - // FIXME: enable the general case stated above ^. - let ty = value.layout.ty; - // Only do it for tuples - if let ty::Tuple(types) = ty.kind() { - // Only do it if tuple is also a pair with two scalars - if let [ty1, ty2] = types[..] { - let ty_is_scalar = |ty| { - self.ecx.layout_of(ty).ok().map(|layout| layout.abi.is_scalar()) - == Some(true) - }; - let alloc = if ty_is_scalar(ty1) && ty_is_scalar(ty2) { - let alloc = self - .ecx - .intern_with_temp_alloc(value.layout, |ecx, dest| { - ecx.write_immediate(*imm, dest) - }) - .unwrap(); - Some(alloc) - } else { - None - }; - - if let Some(alloc) = alloc { - // Assign entire constant in a single statement. - // We can't use aggregates, as we run after the aggregate-lowering `MirPhase`. - let const_val = ConstValue::ByRef { alloc, offset: Size::ZERO }; - let literal = ConstantKind::Val(const_val, ty); - *rval = Rvalue::Use(Operand::Constant(Box::new(Constant { - span: DUMMY_SP, - user_ty: None, - literal, - }))); - } - } - } - } - // Scalars or scalar pairs that contain undef values are assumed to not have - // successfully evaluated and are thus not propagated. - _ => {} + let Right(imm) = imm else { return None }; + match *imm { + Immediate::Scalar(scalar) if scalar.try_to_int().is_ok() => { + Some(self.operand_from_scalar(scalar, value.layout.ty)) } - } - } - - /// Returns `true` if and only if this `op` should be const-propagated into. - fn should_const_prop(&mut self, op: &OpTy<'tcx>) -> bool { - if !self.tcx.consider_optimizing(|| format!("ConstantPropagation - OpTy: {:?}", op)) { - return false; - } - - match **op { - interpret::Operand::Immediate(Immediate::Scalar(s)) => s.try_to_int().is_ok(), - interpret::Operand::Immediate(Immediate::ScalarPair(l, r)) => { - l.try_to_int().is_ok() && r.try_to_int().is_ok() + Immediate::ScalarPair(l, r) if l.try_to_int().is_ok() && r.try_to_int().is_ok() => { + let alloc = self + .ecx + .intern_with_temp_alloc(value.layout, |ecx, dest| { + ecx.write_immediate(*imm, dest) + }) + .ok()?; + + let literal = ConstantKind::Val( + ConstValue::ByRef { alloc, offset: Size::ZERO }, + value.layout.ty, + ); + Some(Operand::Constant(Box::new(Constant { + span: DUMMY_SP, + user_ty: None, + literal, + }))) } - _ => false, + // Scalars or scalar pairs that contain undef values are assumed to not have + // successfully evaluated and are thus not propagated. + _ => None, } } @@ -810,12 +719,7 @@ impl<'tcx> MutVisitor<'tcx> for ConstPropagator<'_, 'tcx> { fn visit_operand(&mut self, operand: &mut Operand<'tcx>, location: Location) { self.super_operand(operand, location); - - // Only const prop copies and moves on `mir_opt_level=3` as doing so - // currently slightly increases compile time in some cases. - if self.tcx.sess.mir_opt_level() >= 3 { - self.propagate_operand(operand) - } + self.propagate_operand(operand) } fn process_projection_elem( @@ -825,8 +729,7 @@ impl<'tcx> MutVisitor<'tcx> for ConstPropagator<'_, 'tcx> { ) -> Option<PlaceElem<'tcx>> { if let PlaceElem::Index(local) = elem && let Some(value) = self.get_const(local.into()) - && self.should_const_prop(&value) - && let interpret::Operand::Immediate(interpret::Immediate::Scalar(scalar)) = *value + && let interpret::Operand::Immediate(Immediate::Scalar(scalar)) = *value && let Ok(offset) = scalar.to_target_usize(&self.tcx) && let Some(min_length) = offset.checked_add(1) { @@ -852,7 +755,14 @@ impl<'tcx> MutVisitor<'tcx> for ConstPropagator<'_, 'tcx> { ConstPropMode::NoPropagation => self.ensure_not_propagated(place.local), ConstPropMode::OnlyInsideOwnBlock | ConstPropMode::FullConstProp => { if let Some(()) = self.eval_rvalue_with_identities(rvalue, *place) { - self.replace_with_const(*place, rvalue); + // If this was already an evaluated constant, keep it. + if let Rvalue::Use(Operand::Constant(c)) = rvalue + && let ConstantKind::Val(..) = c.literal + { + trace!("skipping replace of Rvalue::Use({:?} because it is already a const", c); + } else if let Some(operand) = self.replace_with_const(*place) { + *rvalue = Rvalue::Use(operand); + } } else { // Const prop failed, so erase the destination, ensuring that whatever happens // from here on, does not know about the previous value. @@ -919,45 +829,6 @@ impl<'tcx> MutVisitor<'tcx> for ConstPropagator<'_, 'tcx> { } } - fn visit_terminator(&mut self, terminator: &mut Terminator<'tcx>, location: Location) { - self.super_terminator(terminator, location); - - match &mut terminator.kind { - TerminatorKind::Assert { expected, ref mut cond, .. } => { - if let Some(ref value) = self.eval_operand(&cond) - && let Ok(value_const) = self.ecx.read_scalar(&value) - && self.should_const_prop(value) - { - trace!("assertion on {:?} should be {:?}", value, expected); - *cond = self.operand_from_scalar(value_const, self.tcx.types.bool); - } - } - TerminatorKind::SwitchInt { ref mut discr, .. } => { - // FIXME: This is currently redundant with `visit_operand`, but sadly - // always visiting operands currently causes a perf regression in LLVM codegen, so - // `visit_operand` currently only runs for propagates places for `mir_opt_level=4`. - self.propagate_operand(discr) - } - // None of these have Operands to const-propagate. - TerminatorKind::Goto { .. } - | TerminatorKind::Resume - | TerminatorKind::Terminate - | TerminatorKind::Return - | TerminatorKind::Unreachable - | TerminatorKind::Drop { .. } - | TerminatorKind::Yield { .. } - | TerminatorKind::GeneratorDrop - | TerminatorKind::FalseEdge { .. } - | TerminatorKind::FalseUnwind { .. } - | TerminatorKind::InlineAsm { .. } => {} - // Every argument in our function calls have already been propagated in `visit_operand`. - // - // NOTE: because LLVM codegen gives slight performance regressions with it, so this is - // gated on `mir_opt_level=3`. - TerminatorKind::Call { .. } => {} - } - } - fn visit_basic_block_data(&mut self, block: BasicBlock, data: &mut BasicBlockData<'tcx>) { self.super_basic_block_data(block, data); diff --git a/compiler/rustc_mir_transform/src/const_prop_lint.rs b/compiler/rustc_mir_transform/src/const_prop_lint.rs index 759650fe4..ac07c2576 100644 --- a/compiler/rustc_mir_transform/src/const_prop_lint.rs +++ b/compiler/rustc_mir_transform/src/const_prop_lint.rs @@ -16,7 +16,7 @@ use rustc_index::bit_set::BitSet; use rustc_middle::mir::visit::Visitor; use rustc_middle::mir::*; use rustc_middle::ty::layout::{LayoutError, LayoutOf, LayoutOfHelpers, TyAndLayout}; -use rustc_middle::ty::InternalSubsts; +use rustc_middle::ty::GenericArgs; use rustc_middle::ty::{ self, ConstInt, Instance, ParamEnv, ScalarInt, Ty, TyCtxt, TypeVisitableExt, }; @@ -55,7 +55,7 @@ impl<'tcx> MirLint<'tcx> for ConstProp { return; } - let is_generator = tcx.type_of(def_id.to_def_id()).subst_identity().is_generator(); + let is_generator = tcx.type_of(def_id.to_def_id()).instantiate_identity().is_generator(); // FIXME(welseywiser) const prop doesn't work on generators because of query cycles // computing their layout. if is_generator { @@ -171,7 +171,7 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> { tcx: TyCtxt<'tcx>, ) -> ConstPropagator<'mir, 'tcx> { let def_id = body.source.def_id(); - let substs = &InternalSubsts::identity_for_item(tcx, def_id); + let args = &GenericArgs::identity_for_item(tcx, def_id); let param_env = tcx.param_env_reveal_all_normalized(def_id); let can_const_prop = CanConstProp::check(tcx, param_env, body); @@ -183,7 +183,7 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> { ); let ret_layout = ecx - .layout_of(body.bound_return_ty().subst(tcx, substs)) + .layout_of(body.bound_return_ty().instantiate(tcx, args)) .ok() // Don't bother allocating memory for large values. // I don't know how return types can seem to be unsized but this happens in the @@ -199,7 +199,7 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> { .into(); ecx.push_stack_frame( - Instance::new(def_id, substs), + Instance::new(def_id, args), dummy_body, &ret, StackPopCleanup::Root { cleanup: false }, @@ -494,7 +494,7 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> { trace!("assertion on {:?} should be {:?}", value, expected); let expected = Scalar::from_bool(expected); - let value_const = self.use_ecx(location, |this| this.ecx.read_scalar(&value))?; + let value_const = self.use_ecx(location, |this| this.ecx.read_scalar(value))?; if expected != value_const { // Poison all places this operand references so that further code @@ -664,7 +664,7 @@ impl<'tcx> Visitor<'tcx> for ConstPropagator<'_, 'tcx> { } TerminatorKind::SwitchInt { ref discr, ref targets } => { if let Some(ref value) = self.eval_operand(&discr, location) - && let Some(value_const) = self.use_ecx(location, |this| this.ecx.read_scalar(&value)) + && let Some(value_const) = self.use_ecx(location, |this| this.ecx.read_scalar(value)) && let Ok(constant) = value_const.try_to_int() && let Ok(constant) = constant.to_bits(constant.size()) { diff --git a/compiler/rustc_mir_transform/src/copy_prop.rs b/compiler/rustc_mir_transform/src/copy_prop.rs index 3df459dfa..9a3798eea 100644 --- a/compiler/rustc_mir_transform/src/copy_prop.rs +++ b/compiler/rustc_mir_transform/src/copy_prop.rs @@ -76,9 +76,11 @@ fn fully_moved_locals(ssa: &SsaLocals, body: &Body<'_>) -> BitSet<Local> { let mut fully_moved = BitSet::new_filled(body.local_decls.len()); for (_, rvalue, _) in ssa.assignments(body) { - let (Rvalue::Use(Operand::Copy(place) | Operand::Move(place)) | Rvalue::CopyForDeref(place)) - = rvalue - else { continue }; + let (Rvalue::Use(Operand::Copy(place) | Operand::Move(place)) + | Rvalue::CopyForDeref(place)) = rvalue + else { + continue; + }; let Some(rhs) = place.as_local() else { continue }; if !ssa.is_ssa(rhs) { @@ -152,7 +154,7 @@ impl<'tcx> MutVisitor<'tcx> for Replacer<'_, 'tcx> { fn visit_operand(&mut self, operand: &mut Operand<'tcx>, loc: Location) { if let Operand::Move(place) = *operand // A move out of a projection of a copy is equivalent to a copy of the original projection. - && !place.has_deref() + && !place.is_indirect_first_projection() && !self.fully_moved.contains(place.local) { *operand = Operand::Copy(place); diff --git a/compiler/rustc_mir_transform/src/coverage/counters.rs b/compiler/rustc_mir_transform/src/coverage/counters.rs index 658e01d93..3d442e5dc 100644 --- a/compiler/rustc_mir_transform/src/coverage/counters.rs +++ b/compiler/rustc_mir_transform/src/coverage/counters.rs @@ -8,55 +8,116 @@ use debug::{DebugCounters, NESTED_INDENT}; use graph::{BasicCoverageBlock, BcbBranch, CoverageGraph, TraverseCoverageGraphWithLoops}; use spans::CoverageSpan; +use rustc_data_structures::fx::FxHashMap; use rustc_data_structures::graph::WithNumNodes; use rustc_index::bit_set::BitSet; +use rustc_index::IndexVec; use rustc_middle::mir::coverage::*; -/// Manages the counter and expression indexes/IDs to generate `CoverageKind` components for MIR -/// `Coverage` statements. +use std::fmt::{self, Debug}; + +/// The coverage counter or counter expression associated with a particular +/// BCB node or BCB edge. +#[derive(Clone)] +pub(super) enum BcbCounter { + Counter { id: CounterId }, + Expression { id: ExpressionId, lhs: Operand, op: Op, rhs: Operand }, +} + +impl BcbCounter { + fn is_expression(&self) -> bool { + matches!(self, Self::Expression { .. }) + } + + pub(super) fn as_operand(&self) -> Operand { + match *self { + BcbCounter::Counter { id, .. } => Operand::Counter(id), + BcbCounter::Expression { id, .. } => Operand::Expression(id), + } + } +} + +impl Debug for BcbCounter { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Self::Counter { id, .. } => write!(fmt, "Counter({:?})", id.index()), + Self::Expression { id, lhs, op, rhs } => write!( + fmt, + "Expression({:?}) = {:?} {} {:?}", + id.index(), + lhs, + match op { + Op::Add => "+", + Op::Subtract => "-", + }, + rhs, + ), + } + } +} + +/// Generates and stores coverage counter and coverage expression information +/// associated with nodes/edges in the BCB graph. pub(super) struct CoverageCounters { - function_source_hash: u64, - next_counter_id: u32, - num_expressions: u32, + next_counter_id: CounterId, + next_expression_id: ExpressionId, + + /// Coverage counters/expressions that are associated with individual BCBs. + bcb_counters: IndexVec<BasicCoverageBlock, Option<BcbCounter>>, + /// Coverage counters/expressions that are associated with the control-flow + /// edge between two BCBs. + bcb_edge_counters: FxHashMap<(BasicCoverageBlock, BasicCoverageBlock), BcbCounter>, + /// Tracks which BCBs have a counter associated with some incoming edge. + /// Only used by debug assertions, to verify that BCBs with incoming edge + /// counters do not have their own physical counters (expressions are allowed). + bcb_has_incoming_edge_counters: BitSet<BasicCoverageBlock>, + /// Expression nodes that are not directly associated with any particular + /// BCB/edge, but are needed as operands to more complex expressions. + /// These are always [`BcbCounter::Expression`]. + pub(super) intermediate_expressions: Vec<BcbCounter>, + pub debug_counters: DebugCounters, } impl CoverageCounters { - pub fn new(function_source_hash: u64) -> Self { + pub(super) fn new(basic_coverage_blocks: &CoverageGraph) -> Self { + let num_bcbs = basic_coverage_blocks.num_nodes(); + Self { - function_source_hash, - next_counter_id: CounterValueReference::START.as_u32(), - num_expressions: 0, + next_counter_id: CounterId::START, + next_expression_id: ExpressionId::START, + + bcb_counters: IndexVec::from_elem_n(None, num_bcbs), + bcb_edge_counters: FxHashMap::default(), + bcb_has_incoming_edge_counters: BitSet::new_empty(num_bcbs), + intermediate_expressions: Vec::new(), + debug_counters: DebugCounters::new(), } } /// Activate the `DebugCounters` data structures, to provide additional debug formatting - /// features when formatting `CoverageKind` (counter) values. + /// features when formatting [`BcbCounter`] (counter) values. pub fn enable_debug(&mut self) { self.debug_counters.enable(); } - /// Makes `CoverageKind` `Counter`s and `Expressions` for the `BasicCoverageBlock`s directly or - /// indirectly associated with `CoverageSpans`, and returns additional `Expression`s + /// Makes [`BcbCounter`] `Counter`s and `Expressions` for the `BasicCoverageBlock`s directly or + /// indirectly associated with `CoverageSpans`, and accumulates additional `Expression`s /// representing intermediate values. pub fn make_bcb_counters( &mut self, - basic_coverage_blocks: &mut CoverageGraph, + basic_coverage_blocks: &CoverageGraph, coverage_spans: &[CoverageSpan], - ) -> Result<Vec<CoverageKind>, Error> { - let mut bcb_counters = BcbCounters::new(self, basic_coverage_blocks); - bcb_counters.make_bcb_counters(coverage_spans) + ) -> Result<(), Error> { + MakeBcbCounters::new(self, basic_coverage_blocks).make_bcb_counters(coverage_spans) } - fn make_counter<F>(&mut self, debug_block_label_fn: F) -> CoverageKind + fn make_counter<F>(&mut self, debug_block_label_fn: F) -> BcbCounter where F: Fn() -> Option<String>, { - let counter = CoverageKind::Counter { - function_source_hash: self.function_source_hash, - id: self.next_counter(), - }; + let counter = BcbCounter::Counter { id: self.next_counter() }; if self.debug_counters.is_enabled() { self.debug_counters.add_counter(&counter, (debug_block_label_fn)()); } @@ -65,49 +126,120 @@ impl CoverageCounters { fn make_expression<F>( &mut self, - lhs: ExpressionOperandId, + lhs: Operand, op: Op, - rhs: ExpressionOperandId, + rhs: Operand, debug_block_label_fn: F, - ) -> CoverageKind + ) -> BcbCounter where F: Fn() -> Option<String>, { let id = self.next_expression(); - let expression = CoverageKind::Expression { id, lhs, op, rhs }; + let expression = BcbCounter::Expression { id, lhs, op, rhs }; if self.debug_counters.is_enabled() { self.debug_counters.add_counter(&expression, (debug_block_label_fn)()); } expression } - pub fn make_identity_counter(&mut self, counter_operand: ExpressionOperandId) -> CoverageKind { + pub fn make_identity_counter(&mut self, counter_operand: Operand) -> BcbCounter { let some_debug_block_label = if self.debug_counters.is_enabled() { self.debug_counters.some_block_label(counter_operand).cloned() } else { None }; - self.make_expression(counter_operand, Op::Add, ExpressionOperandId::ZERO, || { + self.make_expression(counter_operand, Op::Add, Operand::Zero, || { some_debug_block_label.clone() }) } /// Counter IDs start from one and go up. - fn next_counter(&mut self) -> CounterValueReference { - assert!(self.next_counter_id < u32::MAX - self.num_expressions); + fn next_counter(&mut self) -> CounterId { let next = self.next_counter_id; - self.next_counter_id += 1; - CounterValueReference::from(next) + self.next_counter_id = next.next_id(); + next + } + + /// Expression IDs start from 0 and go up. + /// (Counter IDs and Expression IDs are distinguished by the `Operand` enum.) + fn next_expression(&mut self) -> ExpressionId { + let next = self.next_expression_id; + self.next_expression_id = next.next_id(); + next + } + + fn set_bcb_counter( + &mut self, + bcb: BasicCoverageBlock, + counter_kind: BcbCounter, + ) -> Result<Operand, Error> { + debug_assert!( + // If the BCB has an edge counter (to be injected into a new `BasicBlock`), it can also + // have an expression (to be injected into an existing `BasicBlock` represented by this + // `BasicCoverageBlock`). + counter_kind.is_expression() || !self.bcb_has_incoming_edge_counters.contains(bcb), + "attempt to add a `Counter` to a BCB target with existing incoming edge counters" + ); + let operand = counter_kind.as_operand(); + if let Some(replaced) = self.bcb_counters[bcb].replace(counter_kind) { + Error::from_string(format!( + "attempt to set a BasicCoverageBlock coverage counter more than once; \ + {bcb:?} already had counter {replaced:?}", + )) + } else { + Ok(operand) + } + } + + fn set_bcb_edge_counter( + &mut self, + from_bcb: BasicCoverageBlock, + to_bcb: BasicCoverageBlock, + counter_kind: BcbCounter, + ) -> Result<Operand, Error> { + if level_enabled!(tracing::Level::DEBUG) { + // If the BCB has an edge counter (to be injected into a new `BasicBlock`), it can also + // have an expression (to be injected into an existing `BasicBlock` represented by this + // `BasicCoverageBlock`). + if self.bcb_counter(to_bcb).is_some_and(|c| !c.is_expression()) { + return Error::from_string(format!( + "attempt to add an incoming edge counter from {from_bcb:?} when the target BCB already \ + has a `Counter`" + )); + } + } + self.bcb_has_incoming_edge_counters.insert(to_bcb); + let operand = counter_kind.as_operand(); + if let Some(replaced) = self.bcb_edge_counters.insert((from_bcb, to_bcb), counter_kind) { + Error::from_string(format!( + "attempt to set an edge counter more than once; from_bcb: \ + {from_bcb:?} already had counter {replaced:?}", + )) + } else { + Ok(operand) + } } - /// Expression IDs start from u32::MAX and go down because an Expression can reference - /// (add or subtract counts) of both Counter regions and Expression regions. The counter - /// expression operand IDs must be unique across both types. - fn next_expression(&mut self) -> InjectedExpressionId { - assert!(self.next_counter_id < u32::MAX - self.num_expressions); - let next = u32::MAX - self.num_expressions; - self.num_expressions += 1; - InjectedExpressionId::from(next) + pub(super) fn bcb_counter(&self, bcb: BasicCoverageBlock) -> Option<&BcbCounter> { + self.bcb_counters[bcb].as_ref() + } + + pub(super) fn take_bcb_counter(&mut self, bcb: BasicCoverageBlock) -> Option<BcbCounter> { + self.bcb_counters[bcb].take() + } + + pub(super) fn drain_bcb_counters( + &mut self, + ) -> impl Iterator<Item = (BasicCoverageBlock, BcbCounter)> + '_ { + self.bcb_counters + .iter_enumerated_mut() + .filter_map(|(bcb, counter)| Some((bcb, counter.take()?))) + } + + pub(super) fn drain_bcb_edge_counters( + &mut self, + ) -> impl Iterator<Item = ((BasicCoverageBlock, BasicCoverageBlock), BcbCounter)> + '_ { + self.bcb_edge_counters.drain() } } @@ -115,15 +247,15 @@ impl CoverageCounters { /// injected with `CoverageSpan`s. `Expressions` have no runtime overhead, so if a viable expression /// (adding or subtracting two other counters or expressions) can compute the same result as an /// embedded counter, an `Expression` should be used. -struct BcbCounters<'a> { +struct MakeBcbCounters<'a> { coverage_counters: &'a mut CoverageCounters, - basic_coverage_blocks: &'a mut CoverageGraph, + basic_coverage_blocks: &'a CoverageGraph, } -impl<'a> BcbCounters<'a> { +impl<'a> MakeBcbCounters<'a> { fn new( coverage_counters: &'a mut CoverageCounters, - basic_coverage_blocks: &'a mut CoverageGraph, + basic_coverage_blocks: &'a CoverageGraph, ) -> Self { Self { coverage_counters, basic_coverage_blocks } } @@ -138,13 +270,9 @@ impl<'a> BcbCounters<'a> { /// Returns any non-code-span expressions created to represent intermediate values (such as to /// add two counters so the result can be subtracted from another counter), or an Error with /// message for subsequent debugging. - fn make_bcb_counters( - &mut self, - coverage_spans: &[CoverageSpan], - ) -> Result<Vec<CoverageKind>, Error> { + fn make_bcb_counters(&mut self, coverage_spans: &[CoverageSpan]) -> Result<(), Error> { debug!("make_bcb_counters(): adding a counter or expression to each BasicCoverageBlock"); let num_bcbs = self.basic_coverage_blocks.num_nodes(); - let mut collect_intermediate_expressions = Vec::with_capacity(num_bcbs); let mut bcbs_with_coverage = BitSet::new_empty(num_bcbs); for covspan in coverage_spans { @@ -165,16 +293,10 @@ impl<'a> BcbCounters<'a> { while let Some(bcb) = traversal.next(self.basic_coverage_blocks) { if bcbs_with_coverage.contains(bcb) { debug!("{:?} has at least one `CoverageSpan`. Get or make its counter", bcb); - let branching_counter_operand = - self.get_or_make_counter_operand(bcb, &mut collect_intermediate_expressions)?; + let branching_counter_operand = self.get_or_make_counter_operand(bcb)?; if self.bcb_needs_branch_counters(bcb) { - self.make_branch_counters( - &mut traversal, - bcb, - branching_counter_operand, - &mut collect_intermediate_expressions, - )?; + self.make_branch_counters(&mut traversal, bcb, branching_counter_operand)?; } } else { debug!( @@ -186,7 +308,7 @@ impl<'a> BcbCounters<'a> { } if traversal.is_complete() { - Ok(collect_intermediate_expressions) + Ok(()) } else { Error::from_string(format!( "`TraverseCoverageGraphWithLoops` missed some `BasicCoverageBlock`s: {:?}", @@ -199,8 +321,7 @@ impl<'a> BcbCounters<'a> { &mut self, traversal: &mut TraverseCoverageGraphWithLoops, branching_bcb: BasicCoverageBlock, - branching_counter_operand: ExpressionOperandId, - collect_intermediate_expressions: &mut Vec<CoverageKind>, + branching_counter_operand: Operand, ) -> Result<(), Error> { let branches = self.bcb_branches(branching_bcb); debug!( @@ -208,9 +329,7 @@ impl<'a> BcbCounters<'a> { branching_bcb, branches .iter() - .map(|branch| { - format!("{:?}: {:?}", branch, branch.counter(&self.basic_coverage_blocks)) - }) + .map(|branch| { format!("{:?}: {:?}", branch, self.branch_counter(branch)) }) .collect::<Vec<_>>() .join("\n "), ); @@ -236,17 +355,10 @@ impl<'a> BcbCounters<'a> { counter", branch, branching_bcb ); - self.get_or_make_counter_operand( - branch.target_bcb, - collect_intermediate_expressions, - )? + self.get_or_make_counter_operand(branch.target_bcb)? } else { debug!(" {:?} has multiple incoming edges, so adding an edge counter", branch); - self.get_or_make_edge_counter_operand( - branching_bcb, - branch.target_bcb, - collect_intermediate_expressions, - )? + self.get_or_make_edge_counter_operand(branching_bcb, branch.target_bcb)? }; if let Some(sumup_counter_operand) = some_sumup_counter_operand.replace(branch_counter_operand) @@ -261,8 +373,8 @@ impl<'a> BcbCounters<'a> { " [new intermediate expression: {}]", self.format_counter(&intermediate_expression) ); - let intermediate_expression_operand = intermediate_expression.as_operand_id(); - collect_intermediate_expressions.push(intermediate_expression); + let intermediate_expression_operand = intermediate_expression.as_operand(); + self.coverage_counters.intermediate_expressions.push(intermediate_expression); some_sumup_counter_operand.replace(intermediate_expression_operand); } } @@ -282,41 +394,36 @@ impl<'a> BcbCounters<'a> { branching_counter_operand, Op::Subtract, sumup_counter_operand, - || Some(format!("{:?}", expression_branch)), + || Some(format!("{expression_branch:?}")), ); debug!("{:?} gets an expression: {}", expression_branch, self.format_counter(&expression)); let bcb = expression_branch.target_bcb; if expression_branch.is_only_path_to_target() { - self.basic_coverage_blocks[bcb].set_counter(expression)?; + self.coverage_counters.set_bcb_counter(bcb, expression)?; } else { - self.basic_coverage_blocks[bcb].set_edge_counter_from(branching_bcb, expression)?; + self.coverage_counters.set_bcb_edge_counter(branching_bcb, bcb, expression)?; } Ok(()) } - fn get_or_make_counter_operand( - &mut self, - bcb: BasicCoverageBlock, - collect_intermediate_expressions: &mut Vec<CoverageKind>, - ) -> Result<ExpressionOperandId, Error> { - self.recursive_get_or_make_counter_operand(bcb, collect_intermediate_expressions, 1) + fn get_or_make_counter_operand(&mut self, bcb: BasicCoverageBlock) -> Result<Operand, Error> { + self.recursive_get_or_make_counter_operand(bcb, 1) } fn recursive_get_or_make_counter_operand( &mut self, bcb: BasicCoverageBlock, - collect_intermediate_expressions: &mut Vec<CoverageKind>, debug_indent_level: usize, - ) -> Result<ExpressionOperandId, Error> { + ) -> Result<Operand, Error> { // If the BCB already has a counter, return it. - if let Some(counter_kind) = self.basic_coverage_blocks[bcb].counter() { + if let Some(counter_kind) = &self.coverage_counters.bcb_counters[bcb] { debug!( "{}{:?} already has a counter: {}", NESTED_INDENT.repeat(debug_indent_level), bcb, self.format_counter(counter_kind), ); - return Ok(counter_kind.as_operand_id()); + return Ok(counter_kind.as_operand()); } // A BCB with only one incoming edge gets a simple `Counter` (via `make_counter()`). @@ -324,7 +431,7 @@ impl<'a> BcbCounters<'a> { // program results in a tight infinite loop, but it should still compile. let one_path_to_target = self.bcb_has_one_path_to_target(bcb); if one_path_to_target || self.bcb_predecessors(bcb).contains(&bcb) { - let counter_kind = self.coverage_counters.make_counter(|| Some(format!("{:?}", bcb))); + let counter_kind = self.coverage_counters.make_counter(|| Some(format!("{bcb:?}"))); if one_path_to_target { debug!( "{}{:?} gets a new counter: {}", @@ -342,7 +449,7 @@ impl<'a> BcbCounters<'a> { self.format_counter(&counter_kind), ); } - return self.basic_coverage_blocks[bcb].set_counter(counter_kind); + return self.coverage_counters.set_bcb_counter(bcb, counter_kind); } // A BCB with multiple incoming edges can compute its count by `Expression`, summing up the @@ -358,7 +465,6 @@ impl<'a> BcbCounters<'a> { let first_edge_counter_operand = self.recursive_get_or_make_edge_counter_operand( predecessors.next().unwrap(), bcb, - collect_intermediate_expressions, debug_indent_level + 1, )?; let mut some_sumup_edge_counter_operand = None; @@ -366,7 +472,6 @@ impl<'a> BcbCounters<'a> { let edge_counter_operand = self.recursive_get_or_make_edge_counter_operand( predecessor, bcb, - collect_intermediate_expressions, debug_indent_level + 1, )?; if let Some(sumup_edge_counter_operand) = @@ -383,8 +488,8 @@ impl<'a> BcbCounters<'a> { NESTED_INDENT.repeat(debug_indent_level), self.format_counter(&intermediate_expression) ); - let intermediate_expression_operand = intermediate_expression.as_operand_id(); - collect_intermediate_expressions.push(intermediate_expression); + let intermediate_expression_operand = intermediate_expression.as_operand(); + self.coverage_counters.intermediate_expressions.push(intermediate_expression); some_sumup_edge_counter_operand.replace(intermediate_expression_operand); } } @@ -392,7 +497,7 @@ impl<'a> BcbCounters<'a> { first_edge_counter_operand, Op::Add, some_sumup_edge_counter_operand.unwrap(), - || Some(format!("{:?}", bcb)), + || Some(format!("{bcb:?}")), ); debug!( "{}{:?} gets a new counter (sum of predecessor counters): {}", @@ -400,43 +505,34 @@ impl<'a> BcbCounters<'a> { bcb, self.format_counter(&counter_kind) ); - self.basic_coverage_blocks[bcb].set_counter(counter_kind) + self.coverage_counters.set_bcb_counter(bcb, counter_kind) } fn get_or_make_edge_counter_operand( &mut self, from_bcb: BasicCoverageBlock, to_bcb: BasicCoverageBlock, - collect_intermediate_expressions: &mut Vec<CoverageKind>, - ) -> Result<ExpressionOperandId, Error> { - self.recursive_get_or_make_edge_counter_operand( - from_bcb, - to_bcb, - collect_intermediate_expressions, - 1, - ) + ) -> Result<Operand, Error> { + self.recursive_get_or_make_edge_counter_operand(from_bcb, to_bcb, 1) } fn recursive_get_or_make_edge_counter_operand( &mut self, from_bcb: BasicCoverageBlock, to_bcb: BasicCoverageBlock, - collect_intermediate_expressions: &mut Vec<CoverageKind>, debug_indent_level: usize, - ) -> Result<ExpressionOperandId, Error> { + ) -> Result<Operand, Error> { // If the source BCB has only one successor (assumed to be the given target), an edge // counter is unnecessary. Just get or make a counter for the source BCB. let successors = self.bcb_successors(from_bcb).iter(); if successors.len() == 1 { - return self.recursive_get_or_make_counter_operand( - from_bcb, - collect_intermediate_expressions, - debug_indent_level + 1, - ); + return self.recursive_get_or_make_counter_operand(from_bcb, debug_indent_level + 1); } // If the edge already has a counter, return it. - if let Some(counter_kind) = self.basic_coverage_blocks[to_bcb].edge_counter_from(from_bcb) { + if let Some(counter_kind) = + self.coverage_counters.bcb_edge_counters.get(&(from_bcb, to_bcb)) + { debug!( "{}Edge {:?}->{:?} already has a counter: {}", NESTED_INDENT.repeat(debug_indent_level), @@ -444,12 +540,12 @@ impl<'a> BcbCounters<'a> { to_bcb, self.format_counter(counter_kind) ); - return Ok(counter_kind.as_operand_id()); + return Ok(counter_kind.as_operand()); } // Make a new counter to count this edge. let counter_kind = - self.coverage_counters.make_counter(|| Some(format!("{:?}->{:?}", from_bcb, to_bcb))); + self.coverage_counters.make_counter(|| Some(format!("{from_bcb:?}->{to_bcb:?}"))); debug!( "{}Edge {:?}->{:?} gets a new counter: {}", NESTED_INDENT.repeat(debug_indent_level), @@ -457,7 +553,7 @@ impl<'a> BcbCounters<'a> { to_bcb, self.format_counter(&counter_kind) ); - self.basic_coverage_blocks[to_bcb].set_edge_counter_from(from_bcb, counter_kind) + self.coverage_counters.set_bcb_edge_counter(from_bcb, to_bcb, counter_kind) } /// Select a branch for the expression, either the recommended `reloop_branch`, or if none was @@ -467,8 +563,7 @@ impl<'a> BcbCounters<'a> { traversal: &TraverseCoverageGraphWithLoops, branches: &[BcbBranch], ) -> BcbBranch { - let branch_needs_a_counter = - |branch: &BcbBranch| branch.counter(&self.basic_coverage_blocks).is_none(); + let branch_needs_a_counter = |branch: &BcbBranch| self.branch_has_no_counter(branch); let some_reloop_branch = self.find_some_reloop_branch(traversal, &branches); if let Some(reloop_branch_without_counter) = @@ -481,10 +576,8 @@ impl<'a> BcbCounters<'a> { ); reloop_branch_without_counter } else { - let &branch_without_counter = branches - .iter() - .find(|&&branch| branch.counter(&self.basic_coverage_blocks).is_none()) - .expect( + let &branch_without_counter = + branches.iter().find(|&branch| self.branch_has_no_counter(branch)).expect( "needs_branch_counters was `true` so there should be at least one \ branch", ); @@ -511,8 +604,7 @@ impl<'a> BcbCounters<'a> { traversal: &TraverseCoverageGraphWithLoops, branches: &[BcbBranch], ) -> Option<BcbBranch> { - let branch_needs_a_counter = - |branch: &BcbBranch| branch.counter(&self.basic_coverage_blocks).is_none(); + let branch_needs_a_counter = |branch: &BcbBranch| self.branch_has_no_counter(branch); let mut some_reloop_branch: Option<BcbBranch> = None; for context in traversal.context_stack.iter().rev() { @@ -523,7 +615,7 @@ impl<'a> BcbCounters<'a> { self.bcb_dominates(branch.target_bcb, backedge_from_bcb) }) { if let Some(reloop_branch) = some_reloop_branch { - if reloop_branch.counter(&self.basic_coverage_blocks).is_none() { + if self.branch_has_no_counter(&reloop_branch) { // we already found a candidate reloop_branch that still // needs a counter continue; @@ -589,12 +681,24 @@ impl<'a> BcbCounters<'a> { } fn bcb_needs_branch_counters(&self, bcb: BasicCoverageBlock) -> bool { - let branch_needs_a_counter = - |branch: &BcbBranch| branch.counter(&self.basic_coverage_blocks).is_none(); + let branch_needs_a_counter = |branch: &BcbBranch| self.branch_has_no_counter(branch); let branches = self.bcb_branches(bcb); branches.len() > 1 && branches.iter().any(branch_needs_a_counter) } + fn branch_has_no_counter(&self, branch: &BcbBranch) -> bool { + self.branch_counter(branch).is_none() + } + + fn branch_counter(&self, branch: &BcbBranch) -> Option<&BcbCounter> { + let to_bcb = branch.target_bcb; + if let Some(from_bcb) = branch.edge_from_bcb { + self.coverage_counters.bcb_edge_counters.get(&(from_bcb, to_bcb)) + } else { + self.coverage_counters.bcb_counters[to_bcb].as_ref() + } + } + /// Returns true if the BasicCoverageBlock has zero or one incoming edge. (If zero, it should be /// the entry point for the function.) #[inline] @@ -608,7 +712,7 @@ impl<'a> BcbCounters<'a> { } #[inline] - fn format_counter(&self, counter_kind: &CoverageKind) -> String { + fn format_counter(&self, counter_kind: &BcbCounter) -> String { self.coverage_counters.debug_counters.format_counter(counter_kind) } } diff --git a/compiler/rustc_mir_transform/src/coverage/debug.rs b/compiler/rustc_mir_transform/src/coverage/debug.rs index 7ad981441..af616c498 100644 --- a/compiler/rustc_mir_transform/src/coverage/debug.rs +++ b/compiler/rustc_mir_transform/src/coverage/debug.rs @@ -108,6 +108,7 @@ //! recursively, generating labels with nested operations, enclosed in parentheses //! (for example: `bcb2 + (bcb0 - bcb1)`). +use super::counters::{BcbCounter, CoverageCounters}; use super::graph::{BasicCoverageBlock, BasicCoverageBlockData, CoverageGraph}; use super::spans::CoverageSpan; @@ -198,9 +199,9 @@ impl DebugOptions { fn bool_option_val(option: &str, some_strval: Option<&str>) -> bool { if let Some(val) = some_strval { - if vec!["yes", "y", "on", "true"].contains(&val) { + if ["yes", "y", "on", "true"].contains(&val) { true - } else if vec!["no", "n", "off", "false"].contains(&val) { + } else if ["no", "n", "off", "false"].contains(&val) { false } else { bug!( @@ -246,11 +247,11 @@ impl Default for ExpressionFormat { } } -/// If enabled, this struct maintains a map from `CoverageKind` IDs (as `ExpressionOperandId`) to -/// the `CoverageKind` data and optional label (normally, the counter's associated +/// If enabled, this struct maintains a map from `BcbCounter` IDs (as `Operand`) to +/// the `BcbCounter` data and optional label (normally, the counter's associated /// `BasicCoverageBlock` format string, if any). /// -/// Use `format_counter` to convert one of these `CoverageKind` counters to a debug output string, +/// Use `format_counter` to convert one of these `BcbCounter` counters to a debug output string, /// as directed by the `DebugOptions`. This allows the format of counter labels in logs and dump /// files (including the `CoverageGraph` graphviz file) to be changed at runtime, via environment /// variable. @@ -258,7 +259,7 @@ impl Default for ExpressionFormat { /// `DebugCounters` supports a recursive rendering of `Expression` counters, so they can be /// presented as nested expressions such as `(bcb3 - (bcb0 + bcb1))`. pub(super) struct DebugCounters { - some_counters: Option<FxHashMap<ExpressionOperandId, DebugCounter>>, + some_counters: Option<FxHashMap<Operand, DebugCounter>>, } impl DebugCounters { @@ -275,36 +276,35 @@ impl DebugCounters { self.some_counters.is_some() } - pub fn add_counter(&mut self, counter_kind: &CoverageKind, some_block_label: Option<String>) { + pub fn add_counter(&mut self, counter_kind: &BcbCounter, some_block_label: Option<String>) { if let Some(counters) = &mut self.some_counters { - let id = counter_kind.as_operand_id(); + let id = counter_kind.as_operand(); counters .try_insert(id, DebugCounter::new(counter_kind.clone(), some_block_label)) .expect("attempt to add the same counter_kind to DebugCounters more than once"); } } - pub fn some_block_label(&self, operand: ExpressionOperandId) -> Option<&String> { + pub fn some_block_label(&self, operand: Operand) -> Option<&String> { self.some_counters.as_ref().and_then(|counters| { counters.get(&operand).and_then(|debug_counter| debug_counter.some_block_label.as_ref()) }) } - pub fn format_counter(&self, counter_kind: &CoverageKind) -> String { + pub fn format_counter(&self, counter_kind: &BcbCounter) -> String { match *counter_kind { - CoverageKind::Counter { .. } => { + BcbCounter::Counter { .. } => { format!("Counter({})", self.format_counter_kind(counter_kind)) } - CoverageKind::Expression { .. } => { + BcbCounter::Expression { .. } => { format!("Expression({})", self.format_counter_kind(counter_kind)) } - CoverageKind::Unreachable { .. } => "Unreachable".to_owned(), } } - fn format_counter_kind(&self, counter_kind: &CoverageKind) -> String { + fn format_counter_kind(&self, counter_kind: &BcbCounter) -> String { let counter_format = &debug_options().counter_format; - if let CoverageKind::Expression { id, lhs, op, rhs } = *counter_kind { + if let BcbCounter::Expression { id, lhs, op, rhs } = *counter_kind { if counter_format.operation { return format!( "{}{} {} {}", @@ -323,29 +323,29 @@ impl DebugCounters { } } - let id = counter_kind.as_operand_id(); + let id = counter_kind.as_operand(); if self.some_counters.is_some() && (counter_format.block || !counter_format.id) { let counters = self.some_counters.as_ref().unwrap(); if let Some(DebugCounter { some_block_label: Some(block_label), .. }) = counters.get(&id) { return if counter_format.id { - format!("{}#{}", block_label, id.index()) + format!("{}#{:?}", block_label, id) } else { block_label.to_string() }; } } - format!("#{}", id.index()) + format!("#{:?}", id) } - fn format_operand(&self, operand: ExpressionOperandId) -> String { - if operand.index() == 0 { + fn format_operand(&self, operand: Operand) -> String { + if matches!(operand, Operand::Zero) { return String::from("0"); } if let Some(counters) = &self.some_counters { if let Some(DebugCounter { counter_kind, some_block_label }) = counters.get(&operand) { - if let CoverageKind::Expression { .. } = counter_kind { + if let BcbCounter::Expression { .. } = counter_kind { if let Some(label) = some_block_label && debug_options().counter_format.block { return format!( "{}:({})", @@ -358,19 +358,19 @@ impl DebugCounters { return self.format_counter_kind(counter_kind); } } - format!("#{}", operand.index()) + format!("#{:?}", operand) } } /// A non-public support class to `DebugCounters`. #[derive(Debug)] struct DebugCounter { - counter_kind: CoverageKind, + counter_kind: BcbCounter, some_block_label: Option<String>, } impl DebugCounter { - fn new(counter_kind: CoverageKind, some_block_label: Option<String>) -> Self { + fn new(counter_kind: BcbCounter, some_block_label: Option<String>) -> Self { Self { counter_kind, some_block_label } } } @@ -379,9 +379,9 @@ impl DebugCounter { /// a Graphviz (.dot file) representation of the `CoverageGraph`, for debugging purposes. pub(super) struct GraphvizData { some_bcb_to_coverage_spans_with_counters: - Option<FxHashMap<BasicCoverageBlock, Vec<(CoverageSpan, CoverageKind)>>>, - some_bcb_to_dependency_counters: Option<FxHashMap<BasicCoverageBlock, Vec<CoverageKind>>>, - some_edge_to_counter: Option<FxHashMap<(BasicCoverageBlock, BasicBlock), CoverageKind>>, + Option<FxHashMap<BasicCoverageBlock, Vec<(CoverageSpan, BcbCounter)>>>, + some_bcb_to_dependency_counters: Option<FxHashMap<BasicCoverageBlock, Vec<BcbCounter>>>, + some_edge_to_counter: Option<FxHashMap<(BasicCoverageBlock, BasicBlock), BcbCounter>>, } impl GraphvizData { @@ -408,7 +408,7 @@ impl GraphvizData { &mut self, bcb: BasicCoverageBlock, coverage_span: &CoverageSpan, - counter_kind: &CoverageKind, + counter_kind: &BcbCounter, ) { if let Some(bcb_to_coverage_spans_with_counters) = self.some_bcb_to_coverage_spans_with_counters.as_mut() @@ -423,7 +423,7 @@ impl GraphvizData { pub fn get_bcb_coverage_spans_with_counters( &self, bcb: BasicCoverageBlock, - ) -> Option<&[(CoverageSpan, CoverageKind)]> { + ) -> Option<&[(CoverageSpan, BcbCounter)]> { if let Some(bcb_to_coverage_spans_with_counters) = self.some_bcb_to_coverage_spans_with_counters.as_ref() { @@ -436,7 +436,7 @@ impl GraphvizData { pub fn add_bcb_dependency_counter( &mut self, bcb: BasicCoverageBlock, - counter_kind: &CoverageKind, + counter_kind: &BcbCounter, ) { if let Some(bcb_to_dependency_counters) = self.some_bcb_to_dependency_counters.as_mut() { bcb_to_dependency_counters @@ -446,7 +446,7 @@ impl GraphvizData { } } - pub fn get_bcb_dependency_counters(&self, bcb: BasicCoverageBlock) -> Option<&[CoverageKind]> { + pub fn get_bcb_dependency_counters(&self, bcb: BasicCoverageBlock) -> Option<&[BcbCounter]> { if let Some(bcb_to_dependency_counters) = self.some_bcb_to_dependency_counters.as_ref() { bcb_to_dependency_counters.get(&bcb).map(Deref::deref) } else { @@ -458,7 +458,7 @@ impl GraphvizData { &mut self, from_bcb: BasicCoverageBlock, to_bb: BasicBlock, - counter_kind: &CoverageKind, + counter_kind: &BcbCounter, ) { if let Some(edge_to_counter) = self.some_edge_to_counter.as_mut() { edge_to_counter @@ -471,7 +471,7 @@ impl GraphvizData { &self, from_bcb: BasicCoverageBlock, to_bb: BasicBlock, - ) -> Option<&CoverageKind> { + ) -> Option<&BcbCounter> { if let Some(edge_to_counter) = self.some_edge_to_counter.as_ref() { edge_to_counter.get(&(from_bcb, to_bb)) } else { @@ -485,10 +485,9 @@ impl GraphvizData { /// _not_ used are retained in the `unused_expressions` Vec, to be included in debug output (logs /// and/or a `CoverageGraph` graphviz output). pub(super) struct UsedExpressions { - some_used_expression_operands: - Option<FxHashMap<ExpressionOperandId, Vec<InjectedExpressionId>>>, + some_used_expression_operands: Option<FxHashMap<Operand, Vec<ExpressionId>>>, some_unused_expressions: - Option<Vec<(CoverageKind, Option<BasicCoverageBlock>, BasicCoverageBlock)>>, + Option<Vec<(BcbCounter, Option<BasicCoverageBlock>, BasicCoverageBlock)>>, } impl UsedExpressions { @@ -506,18 +505,18 @@ impl UsedExpressions { self.some_used_expression_operands.is_some() } - pub fn add_expression_operands(&mut self, expression: &CoverageKind) { + pub fn add_expression_operands(&mut self, expression: &BcbCounter) { if let Some(used_expression_operands) = self.some_used_expression_operands.as_mut() { - if let CoverageKind::Expression { id, lhs, rhs, .. } = *expression { + if let BcbCounter::Expression { id, lhs, rhs, .. } = *expression { used_expression_operands.entry(lhs).or_insert_with(Vec::new).push(id); used_expression_operands.entry(rhs).or_insert_with(Vec::new).push(id); } } } - pub fn expression_is_used(&self, expression: &CoverageKind) -> bool { + pub fn expression_is_used(&self, expression: &BcbCounter) -> bool { if let Some(used_expression_operands) = self.some_used_expression_operands.as_ref() { - used_expression_operands.contains_key(&expression.as_operand_id()) + used_expression_operands.contains_key(&expression.as_operand()) } else { false } @@ -525,12 +524,12 @@ impl UsedExpressions { pub fn add_unused_expression_if_not_found( &mut self, - expression: &CoverageKind, + expression: &BcbCounter, edge_from_bcb: Option<BasicCoverageBlock>, target_bcb: BasicCoverageBlock, ) { if let Some(used_expression_operands) = self.some_used_expression_operands.as_ref() { - if !used_expression_operands.contains_key(&expression.as_operand_id()) { + if !used_expression_operands.contains_key(&expression.as_operand()) { self.some_unused_expressions.as_mut().unwrap().push(( expression.clone(), edge_from_bcb, @@ -540,11 +539,11 @@ impl UsedExpressions { } } - /// Return the list of unused counters (if any) as a tuple with the counter (`CoverageKind`), + /// Return the list of unused counters (if any) as a tuple with the counter (`BcbCounter`), /// optional `from_bcb` (if it was an edge counter), and `target_bcb`. pub fn get_unused_expressions( &self, - ) -> Vec<(CoverageKind, Option<BasicCoverageBlock>, BasicCoverageBlock)> { + ) -> Vec<(BcbCounter, Option<BasicCoverageBlock>, BasicCoverageBlock)> { if let Some(unused_expressions) = self.some_unused_expressions.as_ref() { unused_expressions.clone() } else { @@ -560,7 +559,7 @@ impl UsedExpressions { bcb_counters_without_direct_coverage_spans: &[( Option<BasicCoverageBlock>, BasicCoverageBlock, - CoverageKind, + BcbCounter, )], ) { if self.is_enabled() { @@ -630,7 +629,7 @@ pub(super) fn dump_coverage_spanview<'tcx>( .expect("Unexpected error creating MIR spanview HTML file"); let crate_name = tcx.crate_name(def_id.krate); let item_name = tcx.def_path(def_id).to_filename_friendly_no_crate(); - let title = format!("{}.{} - Coverage Spans", crate_name, item_name); + let title = format!("{crate_name}.{item_name} - Coverage Spans"); spanview::write_document(tcx, body_span, span_viewables, &title, &mut file) .expect("Unexpected IO error dumping coverage spans as HTML"); } @@ -660,18 +659,21 @@ pub(super) fn dump_coverage_graphviz<'tcx>( mir_body: &mir::Body<'tcx>, pass_name: &str, basic_coverage_blocks: &CoverageGraph, - debug_counters: &DebugCounters, + coverage_counters: &CoverageCounters, graphviz_data: &GraphvizData, - intermediate_expressions: &[CoverageKind], + intermediate_expressions: &[BcbCounter], debug_used_expressions: &UsedExpressions, ) { + let debug_counters = &coverage_counters.debug_counters; + let mir_source = mir_body.source; let def_id = mir_source.def_id(); let node_content = |bcb| { bcb_to_string_sections( tcx, mir_body, - debug_counters, + coverage_counters, + bcb, &basic_coverage_blocks[bcb], graphviz_data.get_bcb_coverage_spans_with_counters(bcb), graphviz_data.get_bcb_dependency_counters(bcb), @@ -737,12 +739,15 @@ pub(super) fn dump_coverage_graphviz<'tcx>( fn bcb_to_string_sections<'tcx>( tcx: TyCtxt<'tcx>, mir_body: &mir::Body<'tcx>, - debug_counters: &DebugCounters, + coverage_counters: &CoverageCounters, + bcb: BasicCoverageBlock, bcb_data: &BasicCoverageBlockData, - some_coverage_spans_with_counters: Option<&[(CoverageSpan, CoverageKind)]>, - some_dependency_counters: Option<&[CoverageKind]>, - some_intermediate_expressions: Option<&[CoverageKind]>, + some_coverage_spans_with_counters: Option<&[(CoverageSpan, BcbCounter)]>, + some_dependency_counters: Option<&[BcbCounter]>, + some_intermediate_expressions: Option<&[BcbCounter]>, ) -> Vec<String> { + let debug_counters = &coverage_counters.debug_counters; + let len = bcb_data.basic_blocks.len(); let mut sections = Vec::new(); if let Some(collect_intermediate_expressions) = some_intermediate_expressions { @@ -778,8 +783,8 @@ fn bcb_to_string_sections<'tcx>( .join(" \n"), )); } - if let Some(counter_kind) = &bcb_data.counter_kind { - sections.push(format!("{:?}", counter_kind)); + if let Some(counter_kind) = coverage_counters.bcb_counter(bcb) { + sections.push(format!("{counter_kind:?}")); } let non_term_blocks = bcb_data.basic_blocks[0..len - 1] .iter() diff --git a/compiler/rustc_mir_transform/src/coverage/graph.rs b/compiler/rustc_mir_transform/src/coverage/graph.rs index d2a854b26..59b01ffec 100644 --- a/compiler/rustc_mir_transform/src/coverage/graph.rs +++ b/compiler/rustc_mir_transform/src/coverage/graph.rs @@ -1,12 +1,8 @@ -use super::Error; - use itertools::Itertools; -use rustc_data_structures::fx::FxHashMap; use rustc_data_structures::graph::dominators::{self, Dominators}; use rustc_data_structures::graph::{self, GraphSuccessors, WithNumNodes, WithStartNode}; use rustc_index::bit_set::BitSet; use rustc_index::{IndexSlice, IndexVec}; -use rustc_middle::mir::coverage::*; use rustc_middle::mir::{self, BasicBlock, BasicBlockData, Terminator, TerminatorKind}; use std::cmp::Ordering; @@ -15,10 +11,7 @@ use std::ops::{Index, IndexMut}; const ID_SEPARATOR: &str = ","; /// A coverage-specific simplification of the MIR control flow graph (CFG). The `CoverageGraph`s -/// nodes are `BasicCoverageBlock`s, which encompass one or more MIR `BasicBlock`s, plus a -/// `CoverageKind` counter (to be added by `CoverageCounters::make_bcb_counters`), and an optional -/// set of additional counters--if needed--to count incoming edges, if there are more than one. -/// (These "edge counters" are eventually converted into new MIR `BasicBlock`s.) +/// nodes are `BasicCoverageBlock`s, which encompass one or more MIR `BasicBlock`s. #[derive(Debug)] pub(super) struct CoverageGraph { bcbs: IndexVec<BasicCoverageBlock, BasicCoverageBlockData>, @@ -196,13 +189,6 @@ impl CoverageGraph { } #[inline(always)] - pub fn iter_enumerated_mut( - &mut self, - ) -> impl Iterator<Item = (BasicCoverageBlock, &mut BasicCoverageBlockData)> { - self.bcbs.iter_enumerated_mut() - } - - #[inline(always)] pub fn bcb_from_bb(&self, bb: BasicBlock) -> Option<BasicCoverageBlock> { if bb.index() < self.bb_to_bcb.len() { self.bb_to_bcb[bb] } else { None } } @@ -320,14 +306,12 @@ rustc_index::newtype_index! { #[derive(Debug, Clone)] pub(super) struct BasicCoverageBlockData { pub basic_blocks: Vec<BasicBlock>, - pub counter_kind: Option<CoverageKind>, - edge_from_bcbs: Option<FxHashMap<BasicCoverageBlock, CoverageKind>>, } impl BasicCoverageBlockData { pub fn from(basic_blocks: Vec<BasicBlock>) -> Self { assert!(basic_blocks.len() > 0); - Self { basic_blocks, counter_kind: None, edge_from_bcbs: None } + Self { basic_blocks } } #[inline(always)] @@ -345,86 +329,6 @@ impl BasicCoverageBlockData { &mir_body[self.last_bb()].terminator() } - pub fn set_counter( - &mut self, - counter_kind: CoverageKind, - ) -> Result<ExpressionOperandId, Error> { - debug_assert!( - // If the BCB has an edge counter (to be injected into a new `BasicBlock`), it can also - // have an expression (to be injected into an existing `BasicBlock` represented by this - // `BasicCoverageBlock`). - self.edge_from_bcbs.is_none() || counter_kind.is_expression(), - "attempt to add a `Counter` to a BCB target with existing incoming edge counters" - ); - let operand = counter_kind.as_operand_id(); - if let Some(replaced) = self.counter_kind.replace(counter_kind) { - Error::from_string(format!( - "attempt to set a BasicCoverageBlock coverage counter more than once; \ - {:?} already had counter {:?}", - self, replaced, - )) - } else { - Ok(operand) - } - } - - #[inline(always)] - pub fn counter(&self) -> Option<&CoverageKind> { - self.counter_kind.as_ref() - } - - #[inline(always)] - pub fn take_counter(&mut self) -> Option<CoverageKind> { - self.counter_kind.take() - } - - pub fn set_edge_counter_from( - &mut self, - from_bcb: BasicCoverageBlock, - counter_kind: CoverageKind, - ) -> Result<ExpressionOperandId, Error> { - if level_enabled!(tracing::Level::DEBUG) { - // If the BCB has an edge counter (to be injected into a new `BasicBlock`), it can also - // have an expression (to be injected into an existing `BasicBlock` represented by this - // `BasicCoverageBlock`). - if self.counter_kind.as_ref().is_some_and(|c| !c.is_expression()) { - return Error::from_string(format!( - "attempt to add an incoming edge counter from {:?} when the target BCB already \ - has a `Counter`", - from_bcb - )); - } - } - let operand = counter_kind.as_operand_id(); - if let Some(replaced) = - self.edge_from_bcbs.get_or_insert_default().insert(from_bcb, counter_kind) - { - Error::from_string(format!( - "attempt to set an edge counter more than once; from_bcb: \ - {:?} already had counter {:?}", - from_bcb, replaced, - )) - } else { - Ok(operand) - } - } - - #[inline] - pub fn edge_counter_from(&self, from_bcb: BasicCoverageBlock) -> Option<&CoverageKind> { - if let Some(edge_from_bcbs) = &self.edge_from_bcbs { - edge_from_bcbs.get(&from_bcb) - } else { - None - } - } - - #[inline] - pub fn take_edge_counters( - &mut self, - ) -> Option<impl Iterator<Item = (BasicCoverageBlock, CoverageKind)>> { - self.edge_from_bcbs.take().map(|m| m.into_iter()) - } - pub fn id(&self) -> String { format!("@{}", self.basic_blocks.iter().map(|bb| bb.index().to_string()).join(ID_SEPARATOR)) } @@ -454,17 +358,6 @@ impl BcbBranch { Self { edge_from_bcb, target_bcb: to_bcb } } - pub fn counter<'a>( - &self, - basic_coverage_blocks: &'a CoverageGraph, - ) -> Option<&'a CoverageKind> { - if let Some(from_bcb) = self.edge_from_bcb { - basic_coverage_blocks[self.target_bcb].edge_counter_from(from_bcb) - } else { - basic_coverage_blocks[self.target_bcb].counter() - } - } - pub fn is_only_path_to_target(&self) -> bool { self.edge_from_bcb.is_none() } @@ -612,7 +505,7 @@ impl TraverseCoverageGraphWithLoops { the {}", successor_to_add, if let Some(loop_header) = some_loop_header { - format!("worklist for the loop headed by {:?}", loop_header) + format!("worklist for the loop headed by {loop_header:?}") } else { String::from("non-loop worklist") }, @@ -623,7 +516,7 @@ impl TraverseCoverageGraphWithLoops { "{:?} successor is non-branching. Defer it to the end of the {}", successor_to_add, if let Some(loop_header) = some_loop_header { - format!("worklist for the loop headed by {:?}", loop_header) + format!("worklist for the loop headed by {loop_header:?}") } else { String::from("non-loop worklist") }, diff --git a/compiler/rustc_mir_transform/src/coverage/mod.rs b/compiler/rustc_mir_transform/src/coverage/mod.rs index 076e714d7..8c9eae508 100644 --- a/compiler/rustc_mir_transform/src/coverage/mod.rs +++ b/compiler/rustc_mir_transform/src/coverage/mod.rs @@ -8,9 +8,9 @@ mod spans; #[cfg(test)] mod tests; -use counters::CoverageCounters; -use graph::{BasicCoverageBlock, BasicCoverageBlockData, CoverageGraph}; -use spans::{CoverageSpan, CoverageSpans}; +use self::counters::{BcbCounter, CoverageCounters}; +use self::graph::{BasicCoverageBlock, BasicCoverageBlockData, CoverageGraph}; +use self::spans::{CoverageSpan, CoverageSpans}; use crate::MirPass; @@ -106,6 +106,7 @@ struct Instrumentor<'a, 'tcx> { source_file: Lrc<SourceFile>, fn_sig_span: Span, body_span: Span, + function_source_hash: u64, basic_coverage_blocks: CoverageGraph, coverage_counters: CoverageCounters, } @@ -137,6 +138,8 @@ impl<'a, 'tcx> Instrumentor<'a, 'tcx> { let function_source_hash = hash_mir_source(tcx, hir_body); let basic_coverage_blocks = CoverageGraph::from_mir(mir_body); + let coverage_counters = CoverageCounters::new(&basic_coverage_blocks); + Self { pass_name, tcx, @@ -144,8 +147,9 @@ impl<'a, 'tcx> Instrumentor<'a, 'tcx> { source_file, fn_sig_span, body_span, + function_source_hash, basic_coverage_blocks, - coverage_counters: CoverageCounters::new(function_source_hash), + coverage_counters, } } @@ -199,52 +203,47 @@ impl<'a, 'tcx> Instrumentor<'a, 'tcx> { // `BasicCoverageBlock`s not already associated with a `CoverageSpan`. // // Intermediate expressions (used to compute other `Expression` values), which have no - // direct associate to any `BasicCoverageBlock`, are returned in the method `Result`. - let intermediate_expressions_or_error = self + // direct association with any `BasicCoverageBlock`, are accumulated inside `coverage_counters`. + let result = self .coverage_counters .make_bcb_counters(&mut self.basic_coverage_blocks, &coverage_spans); - let (result, intermediate_expressions) = match intermediate_expressions_or_error { - Ok(intermediate_expressions) => { - // If debugging, add any intermediate expressions (which are not associated with any - // BCB) to the `debug_used_expressions` map. - if debug_used_expressions.is_enabled() { - for intermediate_expression in &intermediate_expressions { - debug_used_expressions.add_expression_operands(intermediate_expression); - } + if let Ok(()) = result { + // If debugging, add any intermediate expressions (which are not associated with any + // BCB) to the `debug_used_expressions` map. + if debug_used_expressions.is_enabled() { + for intermediate_expression in &self.coverage_counters.intermediate_expressions { + debug_used_expressions.add_expression_operands(intermediate_expression); } - - //////////////////////////////////////////////////// - // Remove the counter or edge counter from of each `CoverageSpan`s associated - // `BasicCoverageBlock`, and inject a `Coverage` statement into the MIR. - // - // `Coverage` statements injected from `CoverageSpan`s will include the code regions - // (source code start and end positions) to be counted by the associated counter. - // - // These `CoverageSpan`-associated counters are removed from their associated - // `BasicCoverageBlock`s so that the only remaining counters in the `CoverageGraph` - // are indirect counters (to be injected next, without associated code regions). - self.inject_coverage_span_counters( - coverage_spans, - &mut graphviz_data, - &mut debug_used_expressions, - ); - - //////////////////////////////////////////////////// - // For any remaining `BasicCoverageBlock` counters (that were not associated with - // any `CoverageSpan`), inject `Coverage` statements (_without_ code region `Span`s) - // to ensure `BasicCoverageBlock` counters that other `Expression`s may depend on - // are in fact counted, even though they don't directly contribute to counting - // their own independent code region's coverage. - self.inject_indirect_counters(&mut graphviz_data, &mut debug_used_expressions); - - // Intermediate expressions will be injected as the final step, after generating - // debug output, if any. - //////////////////////////////////////////////////// - - (Ok(()), intermediate_expressions) } - Err(e) => (Err(e), Vec::new()), + + //////////////////////////////////////////////////// + // Remove the counter or edge counter from of each `CoverageSpan`s associated + // `BasicCoverageBlock`, and inject a `Coverage` statement into the MIR. + // + // `Coverage` statements injected from `CoverageSpan`s will include the code regions + // (source code start and end positions) to be counted by the associated counter. + // + // These `CoverageSpan`-associated counters are removed from their associated + // `BasicCoverageBlock`s so that the only remaining counters in the `CoverageGraph` + // are indirect counters (to be injected next, without associated code regions). + self.inject_coverage_span_counters( + coverage_spans, + &mut graphviz_data, + &mut debug_used_expressions, + ); + + //////////////////////////////////////////////////// + // For any remaining `BasicCoverageBlock` counters (that were not associated with + // any `CoverageSpan`), inject `Coverage` statements (_without_ code region `Span`s) + // to ensure `BasicCoverageBlock` counters that other `Expression`s may depend on + // are in fact counted, even though they don't directly contribute to counting + // their own independent code region's coverage. + self.inject_indirect_counters(&mut graphviz_data, &mut debug_used_expressions); + + // Intermediate expressions will be injected as the final step, after generating + // debug output, if any. + //////////////////////////////////////////////////// }; if graphviz_data.is_enabled() { @@ -255,9 +254,9 @@ impl<'a, 'tcx> Instrumentor<'a, 'tcx> { self.mir_body, self.pass_name, &self.basic_coverage_blocks, - &self.coverage_counters.debug_counters, + &self.coverage_counters, &graphviz_data, - &intermediate_expressions, + &self.coverage_counters.intermediate_expressions, &debug_used_expressions, ); } @@ -273,8 +272,11 @@ impl<'a, 'tcx> Instrumentor<'a, 'tcx> { //////////////////////////////////////////////////// // Finally, inject the intermediate expressions collected along the way. - for intermediate_expression in intermediate_expressions { - inject_intermediate_expression(self.mir_body, intermediate_expression); + for intermediate_expression in &self.coverage_counters.intermediate_expressions { + inject_intermediate_expression( + self.mir_body, + self.make_mir_coverage_kind(intermediate_expression), + ); } } @@ -303,8 +305,8 @@ impl<'a, 'tcx> Instrumentor<'a, 'tcx> { let span = covspan.span; let counter_kind = if let Some(&counter_operand) = bcb_counters[bcb].as_ref() { self.coverage_counters.make_identity_counter(counter_operand) - } else if let Some(counter_kind) = self.bcb_data_mut(bcb).take_counter() { - bcb_counters[bcb] = Some(counter_kind.as_operand_id()); + } else if let Some(counter_kind) = self.coverage_counters.take_bcb_counter(bcb) { + bcb_counters[bcb] = Some(counter_kind.as_operand()); debug_used_expressions.add_expression_operands(&counter_kind); counter_kind } else { @@ -312,19 +314,14 @@ impl<'a, 'tcx> Instrumentor<'a, 'tcx> { }; graphviz_data.add_bcb_coverage_span_with_counter(bcb, &covspan, &counter_kind); - debug!( - "Calling make_code_region(file_name={}, source_file={:?}, span={}, body_span={})", - file_name, - self.source_file, - source_map.span_to_diagnostic_string(span), - source_map.span_to_diagnostic_string(body_span) - ); + let code_region = + make_code_region(source_map, file_name, &self.source_file, span, body_span); inject_statement( self.mir_body, - counter_kind, + self.make_mir_coverage_kind(&counter_kind), self.bcb_leader_bb(bcb), - Some(make_code_region(source_map, file_name, &self.source_file, span, body_span)), + Some(code_region), ); } } @@ -343,19 +340,17 @@ impl<'a, 'tcx> Instrumentor<'a, 'tcx> { debug_used_expressions: &mut debug::UsedExpressions, ) { let mut bcb_counters_without_direct_coverage_spans = Vec::new(); - for (target_bcb, target_bcb_data) in self.basic_coverage_blocks.iter_enumerated_mut() { - if let Some(counter_kind) = target_bcb_data.take_counter() { - bcb_counters_without_direct_coverage_spans.push((None, target_bcb, counter_kind)); - } - if let Some(edge_counters) = target_bcb_data.take_edge_counters() { - for (from_bcb, counter_kind) in edge_counters { - bcb_counters_without_direct_coverage_spans.push(( - Some(from_bcb), - target_bcb, - counter_kind, - )); - } - } + for (target_bcb, counter_kind) in self.coverage_counters.drain_bcb_counters() { + bcb_counters_without_direct_coverage_spans.push((None, target_bcb, counter_kind)); + } + for ((from_bcb, target_bcb), counter_kind) in + self.coverage_counters.drain_bcb_edge_counters() + { + bcb_counters_without_direct_coverage_spans.push(( + Some(from_bcb), + target_bcb, + counter_kind, + )); } // If debug is enabled, validate that every BCB or edge counter not directly associated @@ -372,7 +367,7 @@ impl<'a, 'tcx> Instrumentor<'a, 'tcx> { ); match counter_kind { - CoverageKind::Counter { .. } => { + BcbCounter::Counter { .. } => { let inject_to_bb = if let Some(from_bcb) = edge_from_bcb { // The MIR edge starts `from_bb` (the outgoing / last BasicBlock in // `from_bcb`) and ends at `to_bb` (the incoming / first BasicBlock in the @@ -405,12 +400,17 @@ impl<'a, 'tcx> Instrumentor<'a, 'tcx> { target_bb }; - inject_statement(self.mir_body, counter_kind, inject_to_bb, None); + inject_statement( + self.mir_body, + self.make_mir_coverage_kind(&counter_kind), + inject_to_bb, + None, + ); } - CoverageKind::Expression { .. } => { - inject_intermediate_expression(self.mir_body, counter_kind) - } - _ => bug!("CoverageKind should be a counter"), + BcbCounter::Expression { .. } => inject_intermediate_expression( + self.mir_body, + self.make_mir_coverage_kind(&counter_kind), + ), } } } @@ -431,13 +431,19 @@ impl<'a, 'tcx> Instrumentor<'a, 'tcx> { } #[inline] - fn bcb_data_mut(&mut self, bcb: BasicCoverageBlock) -> &mut BasicCoverageBlockData { - &mut self.basic_coverage_blocks[bcb] + fn format_counter(&self, counter_kind: &BcbCounter) -> String { + self.coverage_counters.debug_counters.format_counter(counter_kind) } - #[inline] - fn format_counter(&self, counter_kind: &CoverageKind) -> String { - self.coverage_counters.debug_counters.format_counter(counter_kind) + fn make_mir_coverage_kind(&self, counter_kind: &BcbCounter) -> CoverageKind { + match *counter_kind { + BcbCounter::Counter { id } => { + CoverageKind::Counter { function_source_hash: self.function_source_hash, id } + } + BcbCounter::Expression { id, lhs, op, rhs } => { + CoverageKind::Expression { id, lhs, op, rhs } + } + } } } @@ -508,6 +514,14 @@ fn make_code_region( span: Span, body_span: Span, ) -> CodeRegion { + debug!( + "Called make_code_region(file_name={}, source_file={:?}, span={}, body_span={})", + file_name, + source_file, + source_map.span_to_diagnostic_string(span), + source_map.span_to_diagnostic_string(body_span) + ); + let (start_line, mut start_col) = source_file.lookup_file_pos(span.lo()); let (end_line, end_col) = if span.hi() == span.lo() { let (end_line, mut end_col) = (start_line, start_col); diff --git a/compiler/rustc_mir_transform/src/coverage/query.rs b/compiler/rustc_mir_transform/src/coverage/query.rs index 74b4b4a07..aa205655f 100644 --- a/compiler/rustc_mir_transform/src/coverage/query.rs +++ b/compiler/rustc_mir_transform/src/coverage/query.rs @@ -43,43 +43,25 @@ struct CoverageVisitor { } impl CoverageVisitor { - /// Updates `num_counters` to the maximum encountered zero-based counter_id plus 1. Note the - /// final computed number of counters should be the number of all `CoverageKind::Counter` - /// statements in the MIR *plus one* for the implicit `ZERO` counter. + /// Updates `num_counters` to the maximum encountered counter ID plus 1. #[inline(always)] - fn update_num_counters(&mut self, counter_id: u32) { + fn update_num_counters(&mut self, counter_id: CounterId) { + let counter_id = counter_id.as_u32(); self.info.num_counters = std::cmp::max(self.info.num_counters, counter_id + 1); } - /// Computes an expression index for each expression ID, and updates `num_expressions` to the - /// maximum encountered index plus 1. + /// Updates `num_expressions` to the maximum encountered expression ID plus 1. #[inline(always)] - fn update_num_expressions(&mut self, expression_id: u32) { - let expression_index = u32::MAX - expression_id; - self.info.num_expressions = std::cmp::max(self.info.num_expressions, expression_index + 1); + fn update_num_expressions(&mut self, expression_id: ExpressionId) { + let expression_id = expression_id.as_u32(); + self.info.num_expressions = std::cmp::max(self.info.num_expressions, expression_id + 1); } - fn update_from_expression_operand(&mut self, operand_id: u32) { - if operand_id >= self.info.num_counters { - let operand_as_expression_index = u32::MAX - operand_id; - if operand_as_expression_index >= self.info.num_expressions { - // The operand ID is outside the known range of counter IDs and also outside the - // known range of expression IDs. In either case, the result of a missing operand - // (if and when used in an expression) will be zero, so from a computation - // perspective, it doesn't matter whether it is interpreted as a counter or an - // expression. - // - // However, the `num_counters` and `num_expressions` query results are used to - // allocate arrays when generating the coverage map (during codegen), so choose - // the type that grows either `num_counters` or `num_expressions` the least. - if operand_id - self.info.num_counters - < operand_as_expression_index - self.info.num_expressions - { - self.update_num_counters(operand_id) - } else { - self.update_num_expressions(operand_id) - } - } + fn update_from_expression_operand(&mut self, operand: Operand) { + match operand { + Operand::Counter(id) => self.update_num_counters(id), + Operand::Expression(id) => self.update_num_expressions(id), + Operand::Zero => {} } } @@ -100,19 +82,15 @@ impl CoverageVisitor { if self.add_missing_operands { match coverage.kind { CoverageKind::Expression { lhs, rhs, .. } => { - self.update_from_expression_operand(u32::from(lhs)); - self.update_from_expression_operand(u32::from(rhs)); + self.update_from_expression_operand(lhs); + self.update_from_expression_operand(rhs); } _ => {} } } else { match coverage.kind { - CoverageKind::Counter { id, .. } => { - self.update_num_counters(u32::from(id)); - } - CoverageKind::Expression { id, .. } => { - self.update_num_expressions(u32::from(id)); - } + CoverageKind::Counter { id, .. } => self.update_num_counters(id), + CoverageKind::Expression { id, .. } => self.update_num_expressions(id), _ => {} } } @@ -123,8 +101,7 @@ fn coverageinfo<'tcx>(tcx: TyCtxt<'tcx>, instance_def: ty::InstanceDef<'tcx>) -> let mir_body = tcx.instance_mir(instance_def); let mut coverage_visitor = CoverageVisitor { - // num_counters always has at least the `ZERO` counter. - info: CoverageInfo { num_counters: 1, num_expressions: 0 }, + info: CoverageInfo { num_counters: 0, num_expressions: 0 }, add_missing_operands: false, }; diff --git a/compiler/rustc_mir_transform/src/coverage/spans.rs b/compiler/rustc_mir_transform/src/coverage/spans.rs index 35cf9ea5f..deebf5345 100644 --- a/compiler/rustc_mir_transform/src/coverage/spans.rs +++ b/compiler/rustc_mir_transform/src/coverage/spans.rs @@ -11,7 +11,7 @@ use rustc_middle::ty::TyCtxt; use rustc_span::source_map::original_sp; use rustc_span::{BytePos, ExpnKind, MacroKind, Span, Symbol}; -use std::cell::RefCell; +use std::cell::OnceCell; use std::cmp::Ordering; #[derive(Debug, Copy, Clone)] @@ -67,7 +67,7 @@ impl CoverageStatement { pub(super) struct CoverageSpan { pub span: Span, pub expn_span: Span, - pub current_macro_or_none: RefCell<Option<Option<Symbol>>>, + pub current_macro_or_none: OnceCell<Option<Symbol>>, pub bcb: BasicCoverageBlock, pub coverage_statements: Vec<CoverageStatement>, pub is_closure: bool, @@ -175,8 +175,7 @@ impl CoverageSpan { /// If the span is part of a macro, returns the macro name symbol. pub fn current_macro(&self) -> Option<Symbol> { self.current_macro_or_none - .borrow_mut() - .get_or_insert_with(|| { + .get_or_init(|| { if let ExpnKind::Macro(MacroKind::Bang, current_macro) = self.expn_span.ctxt().outer_expn_data().kind { diff --git a/compiler/rustc_mir_transform/src/coverage/test_macros/src/lib.rs b/compiler/rustc_mir_transform/src/coverage/test_macros/src/lib.rs index 3d6095d27..f41adf667 100644 --- a/compiler/rustc_mir_transform/src/coverage/test_macros/src/lib.rs +++ b/compiler/rustc_mir_transform/src/coverage/test_macros/src/lib.rs @@ -2,5 +2,5 @@ use proc_macro::TokenStream; #[proc_macro] pub fn let_bcb(item: TokenStream) -> TokenStream { - format!("let bcb{} = graph::BasicCoverageBlock::from_usize({});", item, item).parse().unwrap() + format!("let bcb{item} = graph::BasicCoverageBlock::from_usize({item});").parse().unwrap() } diff --git a/compiler/rustc_mir_transform/src/coverage/tests.rs b/compiler/rustc_mir_transform/src/coverage/tests.rs index 25891d3ca..4a066ed3a 100644 --- a/compiler/rustc_mir_transform/src/coverage/tests.rs +++ b/compiler/rustc_mir_transform/src/coverage/tests.rs @@ -34,7 +34,6 @@ use itertools::Itertools; use rustc_data_structures::graph::WithNumNodes; use rustc_data_structures::graph::WithSuccessors; use rustc_index::{Idx, IndexVec}; -use rustc_middle::mir::coverage::CoverageKind; use rustc_middle::mir::*; use rustc_middle::ty; use rustc_span::{self, BytePos, Pos, Span, DUMMY_SP}; @@ -675,17 +674,17 @@ fn test_make_bcb_counters() { )); } } - let mut coverage_counters = counters::CoverageCounters::new(0); - let intermediate_expressions = coverage_counters + let mut coverage_counters = counters::CoverageCounters::new(&basic_coverage_blocks); + coverage_counters .make_bcb_counters(&mut basic_coverage_blocks, &coverage_spans) .expect("should be Ok"); - assert_eq!(intermediate_expressions.len(), 0); + assert_eq!(coverage_counters.intermediate_expressions.len(), 0); let_bcb!(1); assert_eq!( - 1, // coincidentally, bcb1 has a `Counter` with id = 1 - match basic_coverage_blocks[bcb1].counter().expect("should have a counter") { - CoverageKind::Counter { id, .. } => id, + 0, // bcb1 has a `Counter` with id = 0 + match coverage_counters.bcb_counter(bcb1).expect("should have a counter") { + counters::BcbCounter::Counter { id, .. } => id, _ => panic!("expected a Counter"), } .as_u32() @@ -693,9 +692,9 @@ fn test_make_bcb_counters() { let_bcb!(2); assert_eq!( - 2, // coincidentally, bcb2 has a `Counter` with id = 2 - match basic_coverage_blocks[bcb2].counter().expect("should have a counter") { - CoverageKind::Counter { id, .. } => id, + 1, // bcb2 has a `Counter` with id = 1 + match coverage_counters.bcb_counter(bcb2).expect("should have a counter") { + counters::BcbCounter::Counter { id, .. } => id, _ => panic!("expected a Counter"), } .as_u32() diff --git a/compiler/rustc_mir_transform/src/dataflow_const_prop.rs b/compiler/rustc_mir_transform/src/dataflow_const_prop.rs index 78fb19635..8f4dc9f69 100644 --- a/compiler/rustc_mir_transform/src/dataflow_const_prop.rs +++ b/compiler/rustc_mir_transform/src/dataflow_const_prop.rs @@ -13,9 +13,7 @@ use rustc_middle::ty::{self, Ty, TyCtxt}; use rustc_mir_dataflow::value_analysis::{ Map, State, TrackElem, ValueAnalysis, ValueAnalysisWrapper, ValueOrPlace, }; -use rustc_mir_dataflow::{ - lattice::FlatSet, Analysis, Results, ResultsVisitor, SwitchIntEdgeEffects, -}; +use rustc_mir_dataflow::{lattice::FlatSet, Analysis, Results, ResultsVisitor}; use rustc_span::DUMMY_SP; use rustc_target::abi::{Align, FieldIdx, VariantIdx}; @@ -249,49 +247,27 @@ impl<'tcx> ValueAnalysis<'tcx> for ConstAnalysis<'_, 'tcx> { .unwrap_or(FlatSet::Top) } - fn handle_switch_int( + fn handle_switch_int<'mir>( &self, - discr: &Operand<'tcx>, - apply_edge_effects: &mut impl SwitchIntEdgeEffects<State<Self::Value>>, - ) { - // FIXME: The dataflow framework only provides the state if we call `apply()`, which makes - // this more inefficient than it has to be. - let mut discr_value = None; - let mut handled = false; - apply_edge_effects.apply(|state, target| { - let discr_value = match discr_value { - Some(value) => value, - None => { - let value = match self.handle_operand(discr, state) { - ValueOrPlace::Value(value) => value, - ValueOrPlace::Place(place) => state.get_idx(place, self.map()), - }; - let result = match value { - FlatSet::Top => FlatSet::Top, - FlatSet::Elem(ScalarTy(scalar, _)) => { - let int = scalar.assert_int(); - FlatSet::Elem(int.assert_bits(int.size())) - } - FlatSet::Bottom => FlatSet::Bottom, - }; - discr_value = Some(result); - result - } - }; - - let FlatSet::Elem(choice) = discr_value else { - // Do nothing if we don't know which branch will be taken. - return - }; - - if target.value.map(|n| n == choice).unwrap_or(!handled) { - // Branch is taken. Has no effect on state. - handled = true; - } else { - // Branch is not taken. - state.mark_unreachable(); + discr: &'mir Operand<'tcx>, + targets: &'mir SwitchTargets, + state: &mut State<Self::Value>, + ) -> TerminatorEdges<'mir, 'tcx> { + let value = match self.handle_operand(discr, state) { + ValueOrPlace::Value(value) => value, + ValueOrPlace::Place(place) => state.get_idx(place, self.map()), + }; + match value { + // We are branching on uninitialized data, this is UB, treat it as unreachable. + // This allows the set of visited edges to grow monotonically with the lattice. + FlatSet::Bottom => TerminatorEdges::None, + FlatSet::Elem(ScalarTy(scalar, _)) => { + let int = scalar.assert_int(); + let choice = int.assert_bits(int.size()); + TerminatorEdges::Single(targets.target_for_value(choice)) } - }) + FlatSet::Top => TerminatorEdges::SwitchInt { discr, targets }, + } } } @@ -532,7 +508,7 @@ impl<'tcx, 'map, 'a> Visitor<'tcx> for OperandCollector<'tcx, 'map, 'a> { struct DummyMachine; -impl<'mir, 'tcx> rustc_const_eval::interpret::Machine<'mir, 'tcx> for DummyMachine { +impl<'mir, 'tcx: 'mir> rustc_const_eval::interpret::Machine<'mir, 'tcx> for DummyMachine { rustc_const_eval::interpret::compile_time_machine!(<'mir, 'tcx>); type MemoryKind = !; const PANIC_ON_ALLOC_FAIL: bool = true; @@ -557,7 +533,7 @@ impl<'mir, 'tcx> rustc_const_eval::interpret::Machine<'mir, 'tcx> for DummyMachi _ecx: &mut InterpCx<'mir, 'tcx, Self>, _instance: ty::Instance<'tcx>, _abi: rustc_target::spec::abi::Abi, - _args: &[rustc_const_eval::interpret::OpTy<'tcx, Self::Provenance>], + _args: &[rustc_const_eval::interpret::FnArg<'tcx, Self::Provenance>], _destination: &rustc_const_eval::interpret::PlaceTy<'tcx, Self::Provenance>, _target: Option<BasicBlock>, _unwind: UnwindAction, diff --git a/compiler/rustc_mir_transform/src/dead_store_elimination.rs b/compiler/rustc_mir_transform/src/dead_store_elimination.rs index 7bc5183a0..3f988930b 100644 --- a/compiler/rustc_mir_transform/src/dead_store_elimination.rs +++ b/compiler/rustc_mir_transform/src/dead_store_elimination.rs @@ -13,9 +13,12 @@ //! use rustc_index::bit_set::BitSet; +use rustc_middle::mir::visit::Visitor; use rustc_middle::mir::*; use rustc_middle::ty::TyCtxt; -use rustc_mir_dataflow::impls::{borrowed_locals, MaybeTransitiveLiveLocals}; +use rustc_mir_dataflow::impls::{ + borrowed_locals, LivenessTransferFunction, MaybeTransitiveLiveLocals, +}; use rustc_mir_dataflow::Analysis; /// Performs the optimization on the body @@ -28,8 +31,33 @@ pub fn eliminate<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>, borrowed: &BitS .iterate_to_fixpoint() .into_results_cursor(body); + // For blocks with a call terminator, if an argument copy can be turned into a move, + // record it as (block, argument index). + let mut call_operands_to_move = Vec::new(); let mut patch = Vec::new(); + for (bb, bb_data) in traversal::preorder(body) { + if let TerminatorKind::Call { ref args, .. } = bb_data.terminator().kind { + let loc = Location { block: bb, statement_index: bb_data.statements.len() }; + + // Position ourselves between the evaluation of `args` and the write to `destination`. + live.seek_to_block_end(bb); + let mut state = live.get().clone(); + + for (index, arg) in args.iter().enumerate().rev() { + if let Operand::Copy(place) = *arg + && !place.is_indirect() + && !borrowed.contains(place.local) + && !state.contains(place.local) + { + call_operands_to_move.push((bb, index)); + } + + // Account that `arg` is read from, so we don't promote another argument to a move. + LivenessTransferFunction(&mut state).visit_operand(arg, loc); + } + } + for (statement_index, statement) in bb_data.statements.iter().enumerate().rev() { let loc = Location { block: bb, statement_index }; if let StatementKind::Assign(assign) = &statement.kind { @@ -64,7 +92,7 @@ pub fn eliminate<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>, borrowed: &BitS } } - if patch.is_empty() { + if patch.is_empty() && call_operands_to_move.is_empty() { return; } @@ -72,6 +100,14 @@ pub fn eliminate<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>, borrowed: &BitS for Location { block, statement_index } in patch { bbs[block].statements[statement_index].make_nop(); } + for (block, argument_index) in call_operands_to_move { + let TerminatorKind::Call { ref mut args, .. } = bbs[block].terminator_mut().kind else { + bug!() + }; + let arg = &mut args[argument_index]; + let Operand::Copy(place) = *arg else { bug!() }; + *arg = Operand::Move(place); + } crate::simplify::simplify_locals(body, tcx) } diff --git a/compiler/rustc_mir_transform/src/deduce_param_attrs.rs b/compiler/rustc_mir_transform/src/deduce_param_attrs.rs index e782c0373..79645310a 100644 --- a/compiler/rustc_mir_transform/src/deduce_param_attrs.rs +++ b/compiler/rustc_mir_transform/src/deduce_param_attrs.rs @@ -166,7 +166,7 @@ pub fn deduced_param_attrs<'tcx>( // Codegen won't use this information for anything if all the function parameters are passed // directly. Detect that and bail, for compilation speed. - let fn_ty = tcx.type_of(def_id).subst_identity(); + let fn_ty = tcx.type_of(def_id).instantiate_identity(); if matches!(fn_ty.kind(), ty::FnDef(..)) { if fn_ty .fn_sig(tcx) diff --git a/compiler/rustc_mir_transform/src/dest_prop.rs b/compiler/rustc_mir_transform/src/dest_prop.rs index a31551cf6..b73b72c31 100644 --- a/compiler/rustc_mir_transform/src/dest_prop.rs +++ b/compiler/rustc_mir_transform/src/dest_prop.rs @@ -218,9 +218,9 @@ impl<'tcx> MirPass<'tcx> for DestinationPropagation { if merged_locals.contains(*src) { continue; } - let Some(dest) = - candidates.iter().find(|dest| !merged_locals.contains(**dest)) else { - continue; + let Some(dest) = candidates.iter().find(|dest| !merged_locals.contains(**dest)) + else { + continue; }; if !tcx.consider_optimizing(|| { format!("{} round {}", tcx.def_path_str(def_id), round_count) @@ -601,9 +601,7 @@ impl WriteInfo { rhs: &Operand<'tcx>, body: &Body<'tcx>, ) { - let Some(rhs) = rhs.place() else { - return - }; + let Some(rhs) = rhs.place() else { return }; if let Some(pair) = places_to_candidate_pair(lhs, rhs, body) { self.skip_pair = Some(pair); } diff --git a/compiler/rustc_mir_transform/src/early_otherwise_branch.rs b/compiler/rustc_mir_transform/src/early_otherwise_branch.rs index 8a7b027dd..319fb4eaf 100644 --- a/compiler/rustc_mir_transform/src/early_otherwise_branch.rs +++ b/compiler/rustc_mir_transform/src/early_otherwise_branch.rs @@ -107,9 +107,7 @@ impl<'tcx> MirPass<'tcx> for EarlyOtherwiseBranch { for i in 0..body.basic_blocks.len() { let bbs = &*body.basic_blocks; let parent = BasicBlock::from_usize(i); - let Some(opt_data) = evaluate_candidate(tcx, body, parent) else { - continue - }; + let Some(opt_data) = evaluate_candidate(tcx, body, parent) else { continue }; if !tcx.consider_optimizing(|| format!("EarlyOtherwiseBranch {:?}", &opt_data)) { break; @@ -119,10 +117,9 @@ impl<'tcx> MirPass<'tcx> for EarlyOtherwiseBranch { should_cleanup = true; - let TerminatorKind::SwitchInt { - discr: parent_op, - targets: parent_targets - } = &bbs[parent].terminator().kind else { + let TerminatorKind::SwitchInt { discr: parent_op, targets: parent_targets } = + &bbs[parent].terminator().kind + else { unreachable!() }; // Always correct since we can only switch on `Copy` types @@ -168,7 +165,8 @@ impl<'tcx> MirPass<'tcx> for EarlyOtherwiseBranch { ); let eq_new_targets = parent_targets.iter().map(|(value, child)| { - let TerminatorKind::SwitchInt{ targets, .. } = &bbs[child].terminator().kind else { + let TerminatorKind::SwitchInt { targets, .. } = &bbs[child].terminator().kind + else { unreachable!() }; (value, targets.target_for_value(value)) @@ -311,11 +309,9 @@ fn evaluate_candidate<'tcx>( parent: BasicBlock, ) -> Option<OptimizationData<'tcx>> { let bbs = &body.basic_blocks; - let TerminatorKind::SwitchInt { - targets, - discr: parent_discr, - } = &bbs[parent].terminator().kind else { - return None + let TerminatorKind::SwitchInt { targets, discr: parent_discr } = &bbs[parent].terminator().kind + else { + return None; }; let parent_ty = parent_discr.ty(body.local_decls(), tcx); let parent_dest = { @@ -332,18 +328,16 @@ fn evaluate_candidate<'tcx>( }; let (_, child) = targets.iter().next()?; let child_terminator = &bbs[child].terminator(); - let TerminatorKind::SwitchInt { - targets: child_targets, - discr: child_discr, - } = &child_terminator.kind else { - return None + let TerminatorKind::SwitchInt { targets: child_targets, discr: child_discr } = + &child_terminator.kind + else { + return None; }; let child_ty = child_discr.ty(body.local_decls(), tcx); if child_ty != parent_ty { return None; } - let Some(StatementKind::Assign(boxed)) - = &bbs[child].statements.first().map(|x| &x.kind) else { + let Some(StatementKind::Assign(boxed)) = &bbs[child].statements.first().map(|x| &x.kind) else { return None; }; let (_, Rvalue::Discriminant(child_place)) = &**boxed else { @@ -383,12 +377,8 @@ fn verify_candidate_branch<'tcx>( return false; } // ...assign the discriminant of `place` in that statement - let StatementKind::Assign(boxed) = &branch.statements[0].kind else { - return false - }; - let (discr_place, Rvalue::Discriminant(from_place)) = &**boxed else { - return false - }; + let StatementKind::Assign(boxed) = &branch.statements[0].kind else { return false }; + let (discr_place, Rvalue::Discriminant(from_place)) = &**boxed else { return false }; if *from_place != place { return false; } @@ -397,8 +387,9 @@ fn verify_candidate_branch<'tcx>( return false; } // ...terminate on a `SwitchInt` that invalidates that local - let TerminatorKind::SwitchInt{ discr: switch_op, targets, .. } = &branch.terminator().kind else { - return false + let TerminatorKind::SwitchInt { discr: switch_op, targets, .. } = &branch.terminator().kind + else { + return false; }; if *switch_op != Operand::Move(*discr_place) { return false; diff --git a/compiler/rustc_mir_transform/src/elaborate_box_derefs.rs b/compiler/rustc_mir_transform/src/elaborate_box_derefs.rs index cc0d7d51b..e51f771e0 100644 --- a/compiler/rustc_mir_transform/src/elaborate_box_derefs.rs +++ b/compiler/rustc_mir_transform/src/elaborate_box_derefs.rs @@ -18,9 +18,9 @@ pub fn build_ptr_tys<'tcx>( unique_did: DefId, nonnull_did: DefId, ) -> (Ty<'tcx>, Ty<'tcx>, Ty<'tcx>) { - let substs = tcx.mk_substs(&[pointee.into()]); - let unique_ty = tcx.type_of(unique_did).subst(tcx, substs); - let nonnull_ty = tcx.type_of(nonnull_did).subst(tcx, substs); + let args = tcx.mk_args(&[pointee.into()]); + let unique_ty = tcx.type_of(unique_did).instantiate(tcx, args); + let nonnull_ty = tcx.type_of(nonnull_did).instantiate(tcx, args); let ptr_ty = Ty::new_imm_ptr(tcx, pointee); (unique_ty, nonnull_ty, ptr_ty) @@ -95,7 +95,8 @@ impl<'tcx> MirPass<'tcx> for ElaborateBoxDerefs { let unique_did = tcx.adt_def(def_id).non_enum_variant().fields[FieldIdx::from_u32(0)].did; - let Some(nonnull_def) = tcx.type_of(unique_did).subst_identity().ty_adt_def() else { + let Some(nonnull_def) = tcx.type_of(unique_did).instantiate_identity().ty_adt_def() + else { span_bug!(tcx.def_span(unique_did), "expected Box to contain Unique") }; diff --git a/compiler/rustc_mir_transform/src/elaborate_drops.rs b/compiler/rustc_mir_transform/src/elaborate_drops.rs index d5664e2b4..b6b1ae6d3 100644 --- a/compiler/rustc_mir_transform/src/elaborate_drops.rs +++ b/compiler/rustc_mir_transform/src/elaborate_drops.rs @@ -48,6 +48,7 @@ use std::fmt; pub struct ElaborateDrops; impl<'tcx> MirPass<'tcx> for ElaborateDrops { + #[instrument(level = "trace", skip(self, tcx, body))] fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) { debug!("elaborate_drops({:?} @ {:?})", body.source, body.span); @@ -65,23 +66,23 @@ impl<'tcx> MirPass<'tcx> for ElaborateDrops { }; let elaborate_patch = { let env = MoveDataParamEnv { move_data, param_env }; - remove_dead_unwinds(tcx, body, &env); - let inits = MaybeInitializedPlaces::new(tcx, body, &env) + let mut inits = MaybeInitializedPlaces::new(tcx, body, &env) + .skipping_unreachable_unwind() .into_engine(tcx, body) .pass_name("elaborate_drops") .iterate_to_fixpoint() .into_results_cursor(body); + let dead_unwinds = compute_dead_unwinds(&body, &mut inits); let uninits = MaybeUninitializedPlaces::new(tcx, body, &env) .mark_inactive_variants_as_uninit() + .skipping_unreachable_unwind(dead_unwinds) .into_engine(tcx, body) .pass_name("elaborate_drops") .iterate_to_fixpoint() .into_results_cursor(body); - let reachable = traversal::reachable_as_bitset(body); - let drop_flags = IndexVec::from_elem(None, &env.move_data.move_paths); ElaborateDropsCtxt { tcx, @@ -90,7 +91,6 @@ impl<'tcx> MirPass<'tcx> for ElaborateDrops { init_data: InitializationData { inits, uninits }, drop_flags, patch: MirPatch::new(body), - reachable, } .elaborate() }; @@ -99,65 +99,30 @@ impl<'tcx> MirPass<'tcx> for ElaborateDrops { } } -/// Removes unwind edges which are known to be unreachable, because they are in `drop` terminators +/// Records unwind edges which are known to be unreachable, because they are in `drop` terminators /// that can't drop anything. -fn remove_dead_unwinds<'tcx>( - tcx: TyCtxt<'tcx>, - body: &mut Body<'tcx>, - env: &MoveDataParamEnv<'tcx>, -) { - debug!("remove_dead_unwinds({:?})", body.span); +#[instrument(level = "trace", skip(body, flow_inits), ret)] +fn compute_dead_unwinds<'mir, 'tcx>( + body: &'mir Body<'tcx>, + flow_inits: &mut ResultsCursor<'mir, 'tcx, MaybeInitializedPlaces<'mir, 'tcx>>, +) -> BitSet<BasicBlock> { // We only need to do this pass once, because unwind edges can only // reach cleanup blocks, which can't have unwind edges themselves. - let mut dead_unwinds = Vec::new(); - let mut flow_inits = MaybeInitializedPlaces::new(tcx, body, &env) - .into_engine(tcx, body) - .pass_name("remove_dead_unwinds") - .iterate_to_fixpoint() - .into_results_cursor(body); + let mut dead_unwinds = BitSet::new_empty(body.basic_blocks.len()); for (bb, bb_data) in body.basic_blocks.iter_enumerated() { - let place = match bb_data.terminator().kind { - TerminatorKind::Drop { place, unwind: UnwindAction::Cleanup(_), .. } => place, - _ => continue, - }; - - debug!("remove_dead_unwinds @ {:?}: {:?}", bb, bb_data); - - let LookupResult::Exact(path) = env.move_data.rev_lookup.find(place.as_ref()) else { - debug!("remove_dead_unwinds: has parent; skipping"); + let TerminatorKind::Drop { place, unwind: UnwindAction::Cleanup(_), .. } = + bb_data.terminator().kind + else { continue; }; flow_inits.seek_before_primary_effect(body.terminator_loc(bb)); - debug!( - "remove_dead_unwinds @ {:?}: path({:?})={:?}; init_data={:?}", - bb, - place, - path, - flow_inits.get() - ); - - let mut maybe_live = false; - on_all_drop_children_bits(tcx, body, &env, path, |child| { - maybe_live |= flow_inits.contains(child); - }); - - debug!("remove_dead_unwinds @ {:?}: maybe_live={}", bb, maybe_live); - if !maybe_live { - dead_unwinds.push(bb); + if flow_inits.analysis().is_unwind_dead(place, flow_inits.get()) { + dead_unwinds.insert(bb); } } - if dead_unwinds.is_empty() { - return; - } - - let basic_blocks = body.basic_blocks.as_mut(); - for &bb in dead_unwinds.iter() { - if let Some(unwind) = basic_blocks[bb].terminator_mut().unwind_mut() { - *unwind = UnwindAction::Unreachable; - } - } + dead_unwinds } struct InitializationData<'mir, 'tcx> { @@ -290,7 +255,6 @@ struct ElaborateDropsCtxt<'a, 'tcx> { init_data: InitializationData<'a, 'tcx>, drop_flags: IndexVec<MovePathIndex, Option<Local>>, patch: MirPatch<'tcx>, - reachable: BitSet<BasicBlock>, } impl<'b, 'tcx> ElaborateDropsCtxt<'b, 'tcx> { @@ -330,9 +294,6 @@ impl<'b, 'tcx> ElaborateDropsCtxt<'b, 'tcx> { fn collect_drop_flags(&mut self) { for (bb, data) in self.body.basic_blocks.iter_enumerated() { - if !self.reachable.contains(bb) { - continue; - } let terminator = data.terminator(); let place = match terminator.kind { TerminatorKind::Drop { ref place, .. } => place, @@ -358,8 +319,7 @@ impl<'b, 'tcx> ElaborateDropsCtxt<'b, 'tcx> { self.tcx.sess.delay_span_bug( terminator.source_info.span, format!( - "drop of untracked, uninitialized value {:?}, place {:?} ({:?})", - bb, place, path + "drop of untracked, uninitialized value {bb:?}, place {place:?} ({path:?})" ), ); } @@ -385,9 +345,6 @@ impl<'b, 'tcx> ElaborateDropsCtxt<'b, 'tcx> { fn elaborate_drops(&mut self) { for (bb, data) in self.body.basic_blocks.iter_enumerated() { - if !self.reachable.contains(bb) { - continue; - } let loc = Location { block: bb, statement_index: data.statements.len() }; let terminator = data.terminator(); @@ -424,7 +381,7 @@ impl<'b, 'tcx> ElaborateDropsCtxt<'b, 'tcx> { if !replace { self.tcx.sess.delay_span_bug( terminator.source_info.span, - format!("drop of untracked value {:?}", bb), + format!("drop of untracked value {bb:?}"), ); } // A drop and replace behind a pointer/array/whatever. @@ -466,9 +423,6 @@ impl<'b, 'tcx> ElaborateDropsCtxt<'b, 'tcx> { fn drop_flags_for_fn_rets(&mut self) { for (bb, data) in self.body.basic_blocks.iter_enumerated() { - if !self.reachable.contains(bb) { - continue; - } if let TerminatorKind::Call { destination, target: Some(tgt), @@ -507,9 +461,6 @@ impl<'b, 'tcx> ElaborateDropsCtxt<'b, 'tcx> { // clobbered before they are read. for (bb, data) in self.body.basic_blocks.iter_enumerated() { - if !self.reachable.contains(bb) { - continue; - } debug!("drop_flags_for_locs({:?})", data); for i in 0..(data.statements.len() + 1) { debug!("drop_flag_for_locs: stmt {}", i); diff --git a/compiler/rustc_mir_transform/src/ffi_unwind_calls.rs b/compiler/rustc_mir_transform/src/ffi_unwind_calls.rs index 58cc161dd..d20286084 100644 --- a/compiler/rustc_mir_transform/src/ffi_unwind_calls.rs +++ b/compiler/rustc_mir_transform/src/ffi_unwind_calls.rs @@ -30,6 +30,8 @@ fn abi_can_unwind(abi: Abi) -> bool { | EfiApi | AvrInterrupt | AvrNonBlockingInterrupt + | RiscvInterruptM + | RiscvInterruptS | CCmseNonSecureCall | Wasm | RustIntrinsic diff --git a/compiler/rustc_mir_transform/src/function_item_references.rs b/compiler/rustc_mir_transform/src/function_item_references.rs index 0b41e57be..a42eacbf2 100644 --- a/compiler/rustc_mir_transform/src/function_item_references.rs +++ b/compiler/rustc_mir_transform/src/function_item_references.rs @@ -2,7 +2,7 @@ use itertools::Itertools; use rustc_hir::def_id::DefId; use rustc_middle::mir::visit::Visitor; use rustc_middle::mir::*; -use rustc_middle::ty::{self, EarlyBinder, SubstsRef, Ty, TyCtxt}; +use rustc_middle::ty::{self, EarlyBinder, GenericArgsRef, Ty, TyCtxt}; use rustc_session::lint::builtin::FUNCTION_ITEM_REFERENCES; use rustc_span::{symbol::sym, Span}; use rustc_target::spec::abi::Abi; @@ -40,20 +40,19 @@ impl<'tcx> Visitor<'tcx> for FunctionItemRefChecker<'_, 'tcx> { { let source_info = *self.body.source_info(location); let func_ty = func.ty(self.body, self.tcx); - if let ty::FnDef(def_id, substs_ref) = *func_ty.kind() { + if let ty::FnDef(def_id, args_ref) = *func_ty.kind() { // Handle calls to `transmute` if self.tcx.is_diagnostic_item(sym::transmute, def_id) { let arg_ty = args[0].ty(self.body, self.tcx); for inner_ty in arg_ty.walk().filter_map(|arg| arg.as_type()) { - if let Some((fn_id, fn_substs)) = - FunctionItemRefChecker::is_fn_ref(inner_ty) + if let Some((fn_id, fn_args)) = FunctionItemRefChecker::is_fn_ref(inner_ty) { let span = self.nth_arg_span(&args, 0); - self.emit_lint(fn_id, fn_substs, source_info, span); + self.emit_lint(fn_id, fn_args, source_info, span); } } } else { - self.check_bound_args(def_id, substs_ref, &args, source_info); + self.check_bound_args(def_id, args_ref, &args, source_info); } } } @@ -63,11 +62,11 @@ impl<'tcx> Visitor<'tcx> for FunctionItemRefChecker<'_, 'tcx> { impl<'tcx> FunctionItemRefChecker<'_, 'tcx> { /// Emits a lint for function reference arguments bound by `fmt::Pointer` in calls to the - /// function defined by `def_id` with the substitutions `substs_ref`. + /// function defined by `def_id` with the substitutions `args_ref`. fn check_bound_args( &self, def_id: DefId, - substs_ref: SubstsRef<'tcx>, + args_ref: GenericArgsRef<'tcx>, args: &[Operand<'tcx>], source_info: SourceInfo, ) { @@ -76,15 +75,17 @@ impl<'tcx> FunctionItemRefChecker<'_, 'tcx> { for bound in bounds { if let Some(bound_ty) = self.is_pointer_trait(bound) { // Get the argument types as they appear in the function signature. - let arg_defs = self.tcx.fn_sig(def_id).subst_identity().skip_binder().inputs(); + let arg_defs = + self.tcx.fn_sig(def_id).instantiate_identity().skip_binder().inputs(); for (arg_num, arg_def) in arg_defs.iter().enumerate() { // For all types reachable from the argument type in the fn sig for inner_ty in arg_def.walk().filter_map(|arg| arg.as_type()) { // If the inner type matches the type bound by `Pointer` if inner_ty == bound_ty { // Do a substitution using the parameters from the callsite - let subst_ty = EarlyBinder::bind(inner_ty).subst(self.tcx, substs_ref); - if let Some((fn_id, fn_substs)) = + let subst_ty = + EarlyBinder::bind(inner_ty).instantiate(self.tcx, args_ref); + if let Some((fn_id, fn_args)) = FunctionItemRefChecker::is_fn_ref(subst_ty) { let mut span = self.nth_arg_span(args, arg_num); @@ -94,7 +95,7 @@ impl<'tcx> FunctionItemRefChecker<'_, 'tcx> { let callsite_ctxt = span.source_callsite().ctxt(); span = span.with_ctxt(callsite_ctxt); } - self.emit_lint(fn_id, fn_substs, source_info, span); + self.emit_lint(fn_id, fn_args, source_info, span); } } } @@ -115,8 +116,8 @@ impl<'tcx> FunctionItemRefChecker<'_, 'tcx> { } /// If a type is a reference or raw pointer to the anonymous type of a function definition, - /// returns that function's `DefId` and `SubstsRef`. - fn is_fn_ref(ty: Ty<'tcx>) -> Option<(DefId, SubstsRef<'tcx>)> { + /// returns that function's `DefId` and `GenericArgsRef`. + fn is_fn_ref(ty: Ty<'tcx>) -> Option<(DefId, GenericArgsRef<'tcx>)> { let referent_ty = match ty.kind() { ty::Ref(_, referent_ty, _) => Some(referent_ty), ty::RawPtr(ty_and_mut) => Some(&ty_and_mut.ty), @@ -124,8 +125,8 @@ impl<'tcx> FunctionItemRefChecker<'_, 'tcx> { }; referent_ty .map(|ref_ty| { - if let ty::FnDef(def_id, substs_ref) = *ref_ty.kind() { - Some((def_id, substs_ref)) + if let ty::FnDef(def_id, args_ref) = *ref_ty.kind() { + Some((def_id, args_ref)) } else { None } @@ -145,7 +146,7 @@ impl<'tcx> FunctionItemRefChecker<'_, 'tcx> { fn emit_lint( &self, fn_id: DefId, - fn_substs: SubstsRef<'tcx>, + fn_args: GenericArgsRef<'tcx>, source_info: SourceInfo, span: Span, ) { @@ -155,7 +156,7 @@ impl<'tcx> FunctionItemRefChecker<'_, 'tcx> { .assert_crate_local() .lint_root; // FIXME: use existing printing routines to print the function signature - let fn_sig = self.tcx.fn_sig(fn_id).subst(self.tcx, fn_substs); + let fn_sig = self.tcx.fn_sig(fn_id).instantiate(self.tcx, fn_args); let unsafety = fn_sig.unsafety().prefix_str(); let abi = match fn_sig.abi() { Abi::Rust => String::from(""), @@ -167,15 +168,15 @@ impl<'tcx> FunctionItemRefChecker<'_, 'tcx> { } }; let ident = self.tcx.item_name(fn_id).to_ident_string(); - let ty_params = fn_substs.types().map(|ty| format!("{}", ty)); - let const_params = fn_substs.consts().map(|c| format!("{}", c)); + let ty_params = fn_args.types().map(|ty| format!("{ty}")); + let const_params = fn_args.consts().map(|c| format!("{c}")); let params = ty_params.chain(const_params).join(", "); let num_args = fn_sig.inputs().map_bound(|inputs| inputs.len()).skip_binder(); let variadic = if fn_sig.c_variadic() { ", ..." } else { "" }; let ret = if fn_sig.output().skip_binder().is_unit() { "" } else { " -> _" }; let sugg = format!( "{} as {}{}fn({}{}){}", - if params.is_empty() { ident.clone() } else { format!("{}::<{}>", ident, params) }, + if params.is_empty() { ident.clone() } else { format!("{ident}::<{params}>") }, unsafety, abi, vec!["_"; num_args].join(", "), diff --git a/compiler/rustc_mir_transform/src/generator.rs b/compiler/rustc_mir_transform/src/generator.rs index 264bc61f1..ff4822f33 100644 --- a/compiler/rustc_mir_transform/src/generator.rs +++ b/compiler/rustc_mir_transform/src/generator.rs @@ -50,8 +50,10 @@ //! For generators with state 1 (returned) and state 2 (poisoned) it does nothing. //! Otherwise it drops all the values in scope at the last suspension point. +use crate::abort_unwinding_calls; use crate::deref_separator::deref_finder; use crate::errors; +use crate::pass_manager as pm; use crate::simplify; use crate::MirPass; use rustc_data_structures::fx::{FxHashMap, FxHashSet}; @@ -64,8 +66,9 @@ use rustc_index::{Idx, IndexVec}; use rustc_middle::mir::dump_mir; use rustc_middle::mir::visit::{MutVisitor, PlaceContext, Visitor}; use rustc_middle::mir::*; +use rustc_middle::ty::InstanceDef; use rustc_middle::ty::{self, AdtDef, Ty, TyCtxt}; -use rustc_middle::ty::{GeneratorSubsts, SubstsRef}; +use rustc_middle::ty::{GeneratorArgs, GenericArgsRef}; use rustc_mir_dataflow::impls::{ MaybeBorrowedLocals, MaybeLiveLocals, MaybeRequiresStorage, MaybeStorageLive, }; @@ -194,11 +197,11 @@ fn replace_base<'tcx>(place: &mut Place<'tcx>, new_base: Place<'tcx>, tcx: TyCtx const SELF_ARG: Local = Local::from_u32(1); /// Generator has not been resumed yet. -const UNRESUMED: usize = GeneratorSubsts::UNRESUMED; +const UNRESUMED: usize = GeneratorArgs::UNRESUMED; /// Generator has returned / is completed. -const RETURNED: usize = GeneratorSubsts::RETURNED; +const RETURNED: usize = GeneratorArgs::RETURNED; /// Generator has panicked and is poisoned. -const POISONED: usize = GeneratorSubsts::POISONED; +const POISONED: usize = GeneratorArgs::POISONED; /// Number of variants to reserve in generator state. Corresponds to /// `UNRESUMED` (beginning of a generator) and `RETURNED`/`POISONED` @@ -223,7 +226,7 @@ struct TransformVisitor<'tcx> { tcx: TyCtxt<'tcx>, is_async_kind: bool, state_adt_ref: AdtDef<'tcx>, - state_substs: SubstsRef<'tcx>, + state_args: GenericArgsRef<'tcx>, // The type of the discriminant in the generator struct discr_ty: Ty<'tcx>, @@ -265,7 +268,7 @@ impl<'tcx> TransformVisitor<'tcx> { (false, true) => 1, // Poll::Pending }); - let kind = AggregateKind::Adt(self.state_adt_ref.did(), idx, self.state_substs, None, None); + let kind = AggregateKind::Adt(self.state_adt_ref.did(), idx, self.state_args, None, None); // `Poll::Pending` if self.is_async_kind && idx == VariantIdx::new(1) { @@ -431,8 +434,8 @@ fn make_generator_state_argument_pinned<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body let pin_did = tcx.require_lang_item(LangItem::Pin, Some(body.span)); let pin_adt_ref = tcx.adt_def(pin_did); - let substs = tcx.mk_substs(&[ref_gen_ty.into()]); - let pin_ref_gen_ty = Ty::new_adt(tcx, pin_adt_ref, substs); + let args = tcx.mk_args(&[ref_gen_ty.into()]); + let pin_ref_gen_ty = Ty::new_adt(tcx, pin_adt_ref, args); // Replace the by ref generator argument body.local_decls.raw[1].ty = pin_ref_gen_ty; @@ -856,7 +859,7 @@ fn sanitize_witness<'tcx>( tcx: TyCtxt<'tcx>, body: &Body<'tcx>, witness: Ty<'tcx>, - upvars: Vec<Ty<'tcx>>, + upvars: &'tcx ty::List<Ty<'tcx>>, layout: &GeneratorLayout<'tcx>, ) { let did = body.source.def_id(); @@ -1147,7 +1150,25 @@ fn create_generator_drop_shim<'tcx>( // unrelated code from the resume part of the function simplify::remove_dead_blocks(tcx, &mut body); + // Update the body's def to become the drop glue. + // This needs to be updated before the AbortUnwindingCalls pass. + let gen_instance = body.source.instance; + let drop_in_place = tcx.require_lang_item(LangItem::DropInPlace, None); + let drop_instance = InstanceDef::DropGlue(drop_in_place, Some(gen_ty)); + body.source.instance = drop_instance; + + pm::run_passes_no_validate( + tcx, + &mut body, + &[&abort_unwinding_calls::AbortUnwindingCalls], + None, + ); + + // Temporary change MirSource to generator's instance so that dump_mir produces more sensible + // filename. + body.source.instance = gen_instance; dump_mir(tcx, false, "generator_drop", &0, &body, |_, _| Ok(())); + body.source.instance = drop_instance; body } @@ -1317,6 +1338,8 @@ fn create_generator_resume_function<'tcx>( // unrelated code from the drop part of the function simplify::remove_dead_blocks(tcx, body); + pm::run_passes_no_validate(tcx, body, &[&abort_unwinding_calls::AbortUnwindingCalls], None); + dump_mir(tcx, false, "generator_resume", &0, body, |_, _| Ok(())); } @@ -1431,7 +1454,7 @@ pub(crate) fn mir_generator_witnesses<'tcx>( // The first argument is the generator type passed by value let gen_ty = body.local_decls[ty::CAPTURE_STRUCT_LOCAL].ty; - // Get the interior types and substs which typeck computed + // Get the interior types and args which typeck computed let movable = match *gen_ty.kind() { ty::Generator(_, _, movability) => movability == hir::Movability::Movable, ty::Error(_) => return None, @@ -1465,38 +1488,38 @@ impl<'tcx> MirPass<'tcx> for StateTransform { // The first argument is the generator type passed by value let gen_ty = body.local_decls.raw[1].ty; - // Get the discriminant type and substs which typeck computed + // Get the discriminant type and args which typeck computed let (discr_ty, upvars, interior, movable) = match *gen_ty.kind() { - ty::Generator(_, substs, movability) => { - let substs = substs.as_generator(); + ty::Generator(_, args, movability) => { + let args = args.as_generator(); ( - substs.discr_ty(tcx), - substs.upvar_tys().collect::<Vec<_>>(), - substs.witness(), + args.discr_ty(tcx), + args.upvar_tys(), + args.witness(), movability == hir::Movability::Movable, ) } _ => { - tcx.sess.delay_span_bug(body.span, format!("unexpected generator type {}", gen_ty)); + tcx.sess.delay_span_bug(body.span, format!("unexpected generator type {gen_ty}")); return; } }; let is_async_kind = matches!(body.generator_kind(), Some(GeneratorKind::Async(_))); - let (state_adt_ref, state_substs) = if is_async_kind { + let (state_adt_ref, state_args) = if is_async_kind { // Compute Poll<return_ty> let poll_did = tcx.require_lang_item(LangItem::Poll, None); let poll_adt_ref = tcx.adt_def(poll_did); - let poll_substs = tcx.mk_substs(&[body.return_ty().into()]); - (poll_adt_ref, poll_substs) + let poll_args = tcx.mk_args(&[body.return_ty().into()]); + (poll_adt_ref, poll_args) } else { // Compute GeneratorState<yield_ty, return_ty> let state_did = tcx.require_lang_item(LangItem::GeneratorState, None); let state_adt_ref = tcx.adt_def(state_did); - let state_substs = tcx.mk_substs(&[yield_ty.into(), body.return_ty().into()]); - (state_adt_ref, state_substs) + let state_args = tcx.mk_args(&[yield_ty.into(), body.return_ty().into()]); + (state_adt_ref, state_args) }; - let ret_ty = Ty::new_adt(tcx, state_adt_ref, state_substs); + let ret_ty = Ty::new_adt(tcx, state_adt_ref, state_args); // We rename RETURN_PLACE which has type mir.return_ty to new_ret_local // RETURN_PLACE then is a fresh unused local with type ret_ty. @@ -1570,7 +1593,7 @@ impl<'tcx> MirPass<'tcx> for StateTransform { tcx, is_async_kind, state_adt_ref, - state_substs, + state_args, remap, storage_liveness, always_live_locals, @@ -1763,7 +1786,9 @@ fn check_suspend_tys<'tcx>(tcx: TyCtxt<'tcx>, layout: &GeneratorLayout<'tcx>, bo debug!(?decl); if !decl.ignore_for_traits && linted_tys.insert(decl.ty) { - let Some(hir_id) = decl.source_info.scope.lint_root(&body.source_scopes) else { continue }; + let Some(hir_id) = decl.source_info.scope.lint_root(&body.source_scopes) else { + continue; + }; check_must_not_suspend_ty( tcx, diff --git a/compiler/rustc_mir_transform/src/inline.rs b/compiler/rustc_mir_transform/src/inline.rs index b6578cb25..fc9e18378 100644 --- a/compiler/rustc_mir_transform/src/inline.rs +++ b/compiler/rustc_mir_transform/src/inline.rs @@ -1,6 +1,7 @@ //! Inlining pass for MIR functions use crate::deref_separator::deref_finder; use rustc_attr::InlineAttr; +use rustc_const_eval::transform::validate::validate_types; use rustc_hir::def_id::DefId; use rustc_index::bit_set::BitSet; use rustc_index::Idx; @@ -10,7 +11,7 @@ use rustc_middle::mir::*; use rustc_middle::ty::TypeVisitableExt; use rustc_middle::ty::{self, Instance, InstanceDef, ParamEnv, Ty, TyCtxt}; use rustc_session::config::OptLevel; -use rustc_target::abi::{FieldIdx, FIRST_VARIANT}; +use rustc_target::abi::FieldIdx; use rustc_target::spec::abi::Abi; use crate::simplify::{remove_dead_blocks, CfgSimplifier}; @@ -105,7 +106,7 @@ struct Inliner<'tcx> { /// Caller codegen attributes. codegen_fn_attrs: &'tcx CodegenFnAttrs, /// Stack of inlined instances. - /// We only check the `DefId` and not the substs because we want to + /// We only check the `DefId` and not the args because we want to /// avoid inlining cases of polymorphic recursion. /// The number of `DefId`s is finite, so checking history is enough /// to ensure that we do not loop endlessly while inlining. @@ -200,6 +201,19 @@ impl<'tcx> Inliner<'tcx> { return Err("failed to normalize callee body"); }; + // Normally, this shouldn't be required, but trait normalization failure can create a + // validation ICE. + if !validate_types( + self.tcx, + MirPhase::Runtime(RuntimePhase::Optimized), + self.param_env, + &callee_body, + ) + .is_empty() + { + return Err("failed to validate callee body"); + } + // Check call signature compatibility. // Normally, this shouldn't be required, but trait normalization failure can create a // validation ICE. @@ -209,19 +223,29 @@ impl<'tcx> Inliner<'tcx> { return Err("failed to normalize return type"); } if callsite.fn_sig.abi() == Abi::RustCall { - let (arg_tuple, skipped_args) = match &args[..] { - [arg_tuple] => (arg_tuple, 0), - [_, arg_tuple] => (arg_tuple, 1), + // FIXME: Don't inline user-written `extern "rust-call"` functions, + // since this is generally perf-negative on rustc, and we hope that + // LLVM will inline these functions instead. + if callee_body.spread_arg.is_some() { + return Err("do not inline user-written rust-call functions"); + } + + let (self_arg, arg_tuple) = match &args[..] { + [arg_tuple] => (None, arg_tuple), + [self_arg, arg_tuple] => (Some(self_arg), arg_tuple), _ => bug!("Expected `rust-call` to have 1 or 2 args"), }; + let self_arg_ty = + self_arg.map(|self_arg| self_arg.ty(&caller_body.local_decls, self.tcx)); + let arg_tuple_ty = arg_tuple.ty(&caller_body.local_decls, self.tcx); - let ty::Tuple(arg_tuple_tys) = arg_tuple_ty.kind() else { + let ty::Tuple(arg_tuple_tys) = *arg_tuple_ty.kind() else { bug!("Closure arguments are not passed as a tuple"); }; for (arg_ty, input) in - arg_tuple_tys.iter().zip(callee_body.args_iter().skip(skipped_args)) + self_arg_ty.into_iter().chain(arg_tuple_tys).zip(callee_body.args_iter()) { let input_type = callee_body.local_decls[input].ty; if !util::is_subtype(self.tcx, self.param_env, input_type, arg_ty) { @@ -329,11 +353,11 @@ impl<'tcx> Inliner<'tcx> { let terminator = bb_data.terminator(); if let TerminatorKind::Call { ref func, target, fn_span, .. } = terminator.kind { let func_ty = func.ty(caller_body, self.tcx); - if let ty::FnDef(def_id, substs) = *func_ty.kind() { - // To resolve an instance its substs have to be fully normalized. - let substs = self.tcx.try_normalize_erasing_regions(self.param_env, substs).ok()?; + if let ty::FnDef(def_id, args) = *func_ty.kind() { + // To resolve an instance its args have to be fully normalized. + let args = self.tcx.try_normalize_erasing_regions(self.param_env, args).ok()?; let callee = - Instance::resolve(self.tcx, self.param_env, def_id, substs).ok().flatten()?; + Instance::resolve(self.tcx, self.param_env, def_id, args).ok().flatten()?; if let InstanceDef::Virtual(..) | InstanceDef::Intrinsic(_) = callee.def { return None; @@ -343,7 +367,7 @@ impl<'tcx> Inliner<'tcx> { return None; } - let fn_sig = self.tcx.fn_sig(def_id).subst(self.tcx, substs); + let fn_sig = self.tcx.fn_sig(def_id).instantiate(self.tcx, args); let source_info = SourceInfo { span: fn_span, ..terminator.source_info }; return Some(CallSite { callee, fn_sig, block: bb, target, source_info }); @@ -368,7 +392,7 @@ impl<'tcx> Inliner<'tcx> { // inlining. This is to ensure that the final crate doesn't have MIR that // reference unexported symbols if callsite.callee.def_id().is_local() { - let is_generic = callsite.callee.substs.non_erasable_generics().next().is_some(); + let is_generic = callsite.callee.args.non_erasable_generics().next().is_some(); if !is_generic && !callee_attrs.requests_inline() { return Err("not exported"); } @@ -437,13 +461,8 @@ impl<'tcx> Inliner<'tcx> { instance: callsite.callee, callee_body, cost: 0, - validation: Ok(()), }; - for var_debug_info in callee_body.var_debug_info.iter() { - checker.visit_var_debug_info(var_debug_info); - } - // Traverse the MIR manually so we can account for the effects of inlining on the CFG. let mut work_list = vec![START_BLOCK]; let mut visited = BitSet::new_empty(callee_body.basic_blocks.len()); @@ -480,9 +499,6 @@ impl<'tcx> Inliner<'tcx> { } } - // Abort if type validation found anything fishy. - checker.validation?; - // N.B. We still apply our cost threshold to #[inline(always)] functions. // That attribute is often applied to very large functions that exceed LLVM's (very // generous) inlining threshold. Such functions are very poor MIR inlining candidates. @@ -774,11 +790,10 @@ struct CostChecker<'b, 'tcx> { cost: usize, callee_body: &'b Body<'tcx>, instance: ty::Instance<'tcx>, - validation: Result<(), &'static str>, } impl<'tcx> Visitor<'tcx> for CostChecker<'_, 'tcx> { - fn visit_statement(&mut self, statement: &Statement<'tcx>, location: Location) { + fn visit_statement(&mut self, statement: &Statement<'tcx>, _: Location) { // Don't count StorageLive/StorageDead in the inlining cost. match statement.kind { StatementKind::StorageLive(_) @@ -787,11 +802,9 @@ impl<'tcx> Visitor<'tcx> for CostChecker<'_, 'tcx> { | StatementKind::Nop => {} _ => self.cost += INSTR_COST, } - - self.super_statement(statement, location); } - fn visit_terminator(&mut self, terminator: &Terminator<'tcx>, location: Location) { + fn visit_terminator(&mut self, terminator: &Terminator<'tcx>, _: Location) { let tcx = self.tcx; match terminator.kind { TerminatorKind::Drop { ref place, unwind, .. } => { @@ -835,109 +848,6 @@ impl<'tcx> Visitor<'tcx> for CostChecker<'_, 'tcx> { } _ => self.cost += INSTR_COST, } - - self.super_terminator(terminator, location); - } - - /// This method duplicates code from MIR validation in an attempt to detect type mismatches due - /// to normalization failure. - fn visit_projection_elem( - &mut self, - place_ref: PlaceRef<'tcx>, - elem: PlaceElem<'tcx>, - context: PlaceContext, - location: Location, - ) { - if let ProjectionElem::Field(f, ty) = elem { - let parent_ty = place_ref.ty(&self.callee_body.local_decls, self.tcx); - let check_equal = |this: &mut Self, f_ty| { - // Fast path if there is nothing to substitute. - if ty == f_ty { - return; - } - let ty = this.instance.subst_mir(this.tcx, ty::EarlyBinder::bind(&ty)); - let f_ty = this.instance.subst_mir(this.tcx, ty::EarlyBinder::bind(&f_ty)); - if ty == f_ty { - return; - } - if !util::is_subtype(this.tcx, this.param_env, ty, f_ty) { - trace!(?ty, ?f_ty); - this.validation = Err("failed to normalize projection type"); - return; - } - }; - - let kind = match parent_ty.ty.kind() { - &ty::Alias(ty::Opaque, ty::AliasTy { def_id, substs, .. }) => { - self.tcx.type_of(def_id).subst(self.tcx, substs).kind() - } - kind => kind, - }; - - match kind { - ty::Tuple(fields) => { - let Some(f_ty) = fields.get(f.as_usize()) else { - self.validation = Err("malformed MIR"); - return; - }; - check_equal(self, *f_ty); - } - ty::Adt(adt_def, substs) => { - let var = parent_ty.variant_index.unwrap_or(FIRST_VARIANT); - let Some(field) = adt_def.variant(var).fields.get(f) else { - self.validation = Err("malformed MIR"); - return; - }; - check_equal(self, field.ty(self.tcx, substs)); - } - ty::Closure(_, substs) => { - let substs = substs.as_closure(); - let Some(f_ty) = substs.upvar_tys().nth(f.as_usize()) else { - self.validation = Err("malformed MIR"); - return; - }; - check_equal(self, f_ty); - } - &ty::Generator(def_id, substs, _) => { - let f_ty = if let Some(var) = parent_ty.variant_index { - let gen_body = if def_id == self.callee_body.source.def_id() { - self.callee_body - } else { - self.tcx.optimized_mir(def_id) - }; - - let Some(layout) = gen_body.generator_layout() else { - self.validation = Err("malformed MIR"); - return; - }; - - let Some(&local) = layout.variant_fields[var].get(f) else { - self.validation = Err("malformed MIR"); - return; - }; - - let Some(f_ty) = layout.field_tys.get(local) else { - self.validation = Err("malformed MIR"); - return; - }; - - f_ty.ty - } else { - let Some(f_ty) = substs.as_generator().prefix_tys().nth(f.index()) else { - self.validation = Err("malformed MIR"); - return; - }; - - f_ty - }; - - check_equal(self, f_ty); - } - _ => self.validation = Err("malformed MIR"), - } - } - - self.super_projection_elem(place_ref, elem, context, location); } } @@ -1143,10 +1053,10 @@ fn try_instance_mir<'tcx>( ) -> Result<&'tcx Body<'tcx>, &'static str> { match instance { ty::InstanceDef::DropGlue(_, Some(ty)) => match ty.kind() { - ty::Adt(def, substs) => { + ty::Adt(def, args) => { let fields = def.all_fields(); for field in fields { - let field_ty = field.ty(tcx, substs); + let field_ty = field.ty(tcx, args); if field_ty.has_param() && field_ty.has_projections() { return Err("cannot build drop shim for polymorphic type"); } diff --git a/compiler/rustc_mir_transform/src/inline/cycle.rs b/compiler/rustc_mir_transform/src/inline/cycle.rs index 8a10445f8..822634129 100644 --- a/compiler/rustc_mir_transform/src/inline/cycle.rs +++ b/compiler/rustc_mir_transform/src/inline/cycle.rs @@ -3,7 +3,7 @@ use rustc_data_structures::stack::ensure_sufficient_stack; use rustc_hir::def_id::{DefId, LocalDefId}; use rustc_middle::mir::TerminatorKind; use rustc_middle::ty::TypeVisitableExt; -use rustc_middle::ty::{self, subst::SubstsRef, InstanceDef, TyCtxt}; +use rustc_middle::ty::{self, GenericArgsRef, InstanceDef, TyCtxt}; use rustc_session::Limit; // FIXME: check whether it is cheaper to precompute the entire call graph instead of invoking @@ -43,16 +43,16 @@ pub(crate) fn mir_callgraph_reachable<'tcx>( recursion_limit: Limit, ) -> bool { trace!(%caller); - for &(callee, substs) in tcx.mir_inliner_callees(caller.def) { - let Ok(substs) = caller.try_subst_mir_and_normalize_erasing_regions( + for &(callee, args) in tcx.mir_inliner_callees(caller.def) { + let Ok(args) = caller.try_subst_mir_and_normalize_erasing_regions( tcx, param_env, - ty::EarlyBinder::bind(substs), + ty::EarlyBinder::bind(args), ) else { - trace!(?caller, ?param_env, ?substs, "cannot normalize, skipping"); + trace!(?caller, ?param_env, ?args, "cannot normalize, skipping"); continue; }; - let Ok(Some(callee)) = ty::Instance::resolve(tcx, param_env, callee, substs) else { + let Ok(Some(callee)) = ty::Instance::resolve(tcx, param_env, callee, args) else { trace!(?callee, "cannot resolve, skipping"); continue; }; @@ -147,7 +147,7 @@ pub(crate) fn mir_callgraph_reachable<'tcx>( pub(crate) fn mir_inliner_callees<'tcx>( tcx: TyCtxt<'tcx>, instance: ty::InstanceDef<'tcx>, -) -> &'tcx [(DefId, SubstsRef<'tcx>)] { +) -> &'tcx [(DefId, GenericArgsRef<'tcx>)] { let steal; let guard; let body = match (instance, instance.def_id().as_local()) { @@ -165,7 +165,7 @@ pub(crate) fn mir_inliner_callees<'tcx>( if let TerminatorKind::Call { func, .. } = &terminator.kind { let ty = func.ty(&body.local_decls, tcx); let call = match ty.kind() { - ty::FnDef(def_id, substs) => (*def_id, *substs), + ty::FnDef(def_id, args) => (*def_id, *args), _ => continue, }; calls.insert(call); diff --git a/compiler/rustc_mir_transform/src/instsimplify.rs b/compiler/rustc_mir_transform/src/instsimplify.rs index e4dc61762..8b0a0903d 100644 --- a/compiler/rustc_mir_transform/src/instsimplify.rs +++ b/compiler/rustc_mir_transform/src/instsimplify.rs @@ -5,7 +5,7 @@ use crate::MirPass; use rustc_hir::Mutability; use rustc_middle::mir::*; use rustc_middle::ty::layout::ValidityRequirement; -use rustc_middle::ty::{self, ParamEnv, SubstsRef, Ty, TyCtxt}; +use rustc_middle::ty::{self, GenericArgsRef, ParamEnv, Ty, TyCtxt}; use rustc_span::symbol::Symbol; use rustc_target::abi::FieldIdx; @@ -57,7 +57,7 @@ struct InstSimplifyContext<'tcx, 'a> { impl<'tcx> InstSimplifyContext<'tcx, '_> { fn should_simplify(&self, source_info: &SourceInfo, rvalue: &Rvalue<'tcx>) -> bool { self.tcx.consider_optimizing(|| { - format!("InstSimplify - Rvalue: {:?} SourceInfo: {:?}", rvalue, source_info) + format!("InstSimplify - Rvalue: {rvalue:?} SourceInfo: {source_info:?}") }) } @@ -163,14 +163,14 @@ impl<'tcx> InstSimplifyContext<'tcx, '_> { } // Transmuting a transparent struct/union to a field's type is a projection - if let ty::Adt(adt_def, substs) = operand_ty.kind() + if let ty::Adt(adt_def, args) = operand_ty.kind() && adt_def.repr().transparent() && (adt_def.is_struct() || adt_def.is_union()) && let Some(place) = operand.place() { let variant = adt_def.non_enum_variant(); for (i, field) in variant.fields.iter().enumerate() { - let field_ty = field.ty(self.tcx, substs); + let field_ty = field.ty(self.tcx, args); if field_ty == *cast_ty { let place = place.project_deeper(&[ProjectionElem::Field(FieldIdx::from_usize(i), *cast_ty)], self.tcx); let operand = if operand.is_move() { Operand::Move(place) } else { Operand::Copy(place) }; @@ -189,22 +189,22 @@ impl<'tcx> InstSimplifyContext<'tcx, '_> { statements: &mut Vec<Statement<'tcx>>, ) { let TerminatorKind::Call { func, args, destination, target, .. } = &mut terminator.kind - else { return }; + else { + return; + }; // It's definitely not a clone if there are multiple arguments if args.len() != 1 { return; } - let Some(destination_block) = *target - else { return }; + let Some(destination_block) = *target else { return }; // Only bother looking more if it's easy to know what we're calling - let Some((fn_def_id, fn_substs)) = func.const_fn_def() - else { return }; + let Some((fn_def_id, fn_args)) = func.const_fn_def() else { return }; // Clone needs one subst, so we can cheaply rule out other stuff - if fn_substs.len() != 1 { + if fn_args.len() != 1 { return; } @@ -212,8 +212,7 @@ impl<'tcx> InstSimplifyContext<'tcx, '_> { // doing DefId lookups to figure out what we're actually calling. let arg_ty = args[0].ty(self.local_decls, self.tcx); - let ty::Ref(_region, inner_ty, Mutability::Not) = *arg_ty.kind() - else { return }; + let ty::Ref(_region, inner_ty, Mutability::Not) = *arg_ty.kind() else { return }; if !inner_ty.is_trivially_pure_clone_copy() { return; @@ -227,15 +226,14 @@ impl<'tcx> InstSimplifyContext<'tcx, '_> { if !self.tcx.consider_optimizing(|| { format!( "InstSimplify - Call: {:?} SourceInfo: {:?}", - (fn_def_id, fn_substs), + (fn_def_id, fn_args), terminator.source_info ) }) { return; } - let Some(arg_place) = args.pop().unwrap().place() - else { return }; + let Some(arg_place) = args.pop().unwrap().place() else { return }; statements.push(Statement { source_info: terminator.source_info, @@ -254,17 +252,21 @@ impl<'tcx> InstSimplifyContext<'tcx, '_> { terminator: &mut Terminator<'tcx>, _statements: &mut Vec<Statement<'tcx>>, ) { - let TerminatorKind::Call { func, target, .. } = &mut terminator.kind else { return; }; - let Some(target_block) = target else { return; }; + let TerminatorKind::Call { func, target, .. } = &mut terminator.kind else { + return; + }; + let Some(target_block) = target else { + return; + }; let func_ty = func.ty(self.local_decls, self.tcx); - let Some((intrinsic_name, substs)) = resolve_rust_intrinsic(self.tcx, func_ty) else { + let Some((intrinsic_name, args)) = resolve_rust_intrinsic(self.tcx, func_ty) else { return; }; // The intrinsics we are interested in have one generic parameter - if substs.is_empty() { + if args.is_empty() { return; } - let ty = substs.type_at(0); + let ty = args.type_at(0); let known_is_valid = intrinsic_assert_panics(self.tcx, self.param_env, ty, intrinsic_name); match known_is_valid { @@ -295,10 +297,10 @@ fn intrinsic_assert_panics<'tcx>( fn resolve_rust_intrinsic<'tcx>( tcx: TyCtxt<'tcx>, func_ty: Ty<'tcx>, -) -> Option<(Symbol, SubstsRef<'tcx>)> { - if let ty::FnDef(def_id, substs) = *func_ty.kind() { +) -> Option<(Symbol, GenericArgsRef<'tcx>)> { + if let ty::FnDef(def_id, args) = *func_ty.kind() { if tcx.is_intrinsic(def_id) { - return Some((tcx.item_name(def_id), substs)); + return Some((tcx.item_name(def_id), args)); } } None diff --git a/compiler/rustc_mir_transform/src/large_enums.rs b/compiler/rustc_mir_transform/src/large_enums.rs index 8ed4706e1..19108dabd 100644 --- a/compiler/rustc_mir_transform/src/large_enums.rs +++ b/compiler/rustc_mir_transform/src/large_enums.rs @@ -48,7 +48,7 @@ impl EnumSizeOpt { alloc_cache: &mut FxHashMap<Ty<'tcx>, AllocId>, ) -> Option<(AdtDef<'tcx>, usize, AllocId)> { let adt_def = match ty.kind() { - ty::Adt(adt_def, _substs) if adt_def.is_enum() => adt_def, + ty::Adt(adt_def, _args) if adt_def.is_enum() => adt_def, _ => return None, }; let layout = tcx.layout_of(param_env.and(ty)).ok()?; diff --git a/compiler/rustc_mir_transform/src/lib.rs b/compiler/rustc_mir_transform/src/lib.rs index fa8257cf9..bf798adee 100644 --- a/compiler/rustc_mir_transform/src/lib.rs +++ b/compiler/rustc_mir_transform/src/lib.rs @@ -75,7 +75,7 @@ mod errors; mod ffi_unwind_calls; mod function_item_references; mod generator; -mod inline; +pub mod inline; mod instsimplify; mod large_enums; mod lower_intrinsics; @@ -338,38 +338,16 @@ fn inner_mir_for_ctfe(tcx: TyCtxt<'_>, def: LocalDefId) -> Body<'_> { return shim::build_adt_ctor(tcx, def.to_def_id()); } - let context = tcx - .hir() - .body_const_context(def) - .expect("mir_for_ctfe should not be used for runtime functions"); - - let body = tcx.mir_drops_elaborated_and_const_checked(def).borrow().clone(); + let body = tcx.mir_drops_elaborated_and_const_checked(def); + let body = match tcx.hir().body_const_context(def) { + // consts and statics do not have `optimized_mir`, so we can steal the body instead of + // cloning it. + Some(hir::ConstContext::Const | hir::ConstContext::Static(_)) => body.steal(), + Some(hir::ConstContext::ConstFn) => body.borrow().clone(), + None => bug!("`mir_for_ctfe` called on non-const {def:?}"), + }; let mut body = remap_mir_for_const_eval_select(tcx, body, hir::Constness::Const); - - match context { - // Do not const prop functions, either they get executed at runtime or exported to metadata, - // so we run const prop on them, or they don't, in which case we const evaluate some control - // flow paths of the function and any errors in those paths will get emitted as const eval - // errors. - hir::ConstContext::ConstFn => {} - // Static items always get evaluated, so we can just let const eval see if any erroneous - // control flow paths get executed. - hir::ConstContext::Static(_) => {} - // Associated constants get const prop run so we detect common failure situations in the - // crate that defined the constant. - // Technically we want to not run on regular const items, but oli-obk doesn't know how to - // conveniently detect that at this point without looking at the HIR. - hir::ConstContext::Const => { - pm::run_passes( - tcx, - &mut body, - &[&const_prop::ConstProp], - Some(MirPhase::Runtime(RuntimePhase::Optimized)), - ); - } - } - pm::run_passes(tcx, &mut body, &[&ctfe_limit::CtfeLimit], None); body @@ -446,10 +424,16 @@ fn mir_drops_elaborated_and_const_checked(tcx: TyCtxt<'_>, def: LocalDefId) -> & run_analysis_to_runtime_passes(tcx, &mut body); + // Now that drop elaboration has been performed, we can check for + // unconditional drop recursion. + rustc_mir_build::lints::check_drop_recursion(tcx, &body); + tcx.alloc_steal_mir(body) } -fn run_analysis_to_runtime_passes<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) { +// Made public such that `mir_drops_elaborated_and_const_checked` can be overridden +// by custom rustc drivers, running all the steps by themselves. +pub fn run_analysis_to_runtime_passes<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) { assert!(body.phase == MirPhase::Analysis(AnalysisPhase::Initial)); let did = body.source.def_id(); @@ -553,6 +537,7 @@ fn run_optimization_passes<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) { &normalize_array_len::NormalizeArrayLen, // has to run after `slice::len` lowering &const_goto::ConstGoto, &remove_unneeded_drops::RemoveUnneededDrops, + &ref_prop::ReferencePropagation, &sroa::ScalarReplacementOfAggregates, &match_branches::MatchBranchSimplification, // inst combine is after MatchBranchSimplification to clean up Ne(_1, false) @@ -560,7 +545,6 @@ fn run_optimization_passes<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) { &instsimplify::InstSimplify, &simplify::SimplifyLocals::BeforeConstProp, ©_prop::CopyProp, - &ref_prop::ReferencePropagation, // Perform `SeparateConstSwitch` after SSA-based analyses, as cloning blocks may // destroy the SSA property. It should still happen before const-propagation, so the // latter pass will leverage the created opportunities. @@ -615,7 +599,7 @@ fn inner_optimized_mir(tcx: TyCtxt<'_>, did: LocalDefId) -> Body<'_> { // computes and caches its result. Some(hir::ConstContext::ConstFn) => tcx.ensure_with_value().mir_for_ctfe(did), None => {} - Some(other) => panic!("do not use `optimized_mir` for constants: {:?}", other), + Some(other) => panic!("do not use `optimized_mir` for constants: {other:?}"), } debug!("about to call mir_drops_elaborated..."); let body = tcx.mir_drops_elaborated_and_const_checked(did).steal(); diff --git a/compiler/rustc_mir_transform/src/lower_intrinsics.rs b/compiler/rustc_mir_transform/src/lower_intrinsics.rs index ce98e9b0c..fc36c6e41 100644 --- a/compiler/rustc_mir_transform/src/lower_intrinsics.rs +++ b/compiler/rustc_mir_transform/src/lower_intrinsics.rs @@ -2,7 +2,7 @@ use crate::{errors, MirPass}; use rustc_middle::mir::*; -use rustc_middle::ty::subst::SubstsRef; +use rustc_middle::ty::GenericArgsRef; use rustc_middle::ty::{self, Ty, TyCtxt}; use rustc_span::symbol::{sym, Symbol}; use rustc_span::Span; @@ -19,7 +19,8 @@ impl<'tcx> MirPass<'tcx> for LowerIntrinsics { &mut terminator.kind { let func_ty = func.ty(local_decls, tcx); - let Some((intrinsic_name, substs)) = resolve_rust_intrinsic(tcx, func_ty) else { + let Some((intrinsic_name, generic_args)) = resolve_rust_intrinsic(tcx, func_ty) + else { continue; }; match intrinsic_name { @@ -149,7 +150,7 @@ impl<'tcx> MirPass<'tcx> for LowerIntrinsics { } sym::size_of | sym::min_align_of => { if let Some(target) = *target { - let tp_ty = substs.type_at(0); + let tp_ty = generic_args.type_at(0); let null_op = match intrinsic_name { sym::size_of => NullOp::SizeOf, sym::min_align_of => NullOp::AlignOf, @@ -251,7 +252,9 @@ impl<'tcx> MirPass<'tcx> for LowerIntrinsics { if let (Some(target), Some(arg)) = (*target, args[0].place()) { let ty::RawPtr(ty::TypeAndMut { ty: dest_ty, .. }) = destination.ty(local_decls, tcx).ty.kind() - else { bug!(); }; + else { + bug!(); + }; block.statements.push(Statement { source_info: terminator.source_info, @@ -302,7 +305,7 @@ impl<'tcx> MirPass<'tcx> for LowerIntrinsics { terminator.kind = TerminatorKind::Unreachable; } } - _ if intrinsic_name.as_str().starts_with("simd_shuffle") => { + sym::simd_shuffle => { validate_simd_shuffle(tcx, args, terminator.source_info.span); } _ => {} @@ -315,10 +318,10 @@ impl<'tcx> MirPass<'tcx> for LowerIntrinsics { fn resolve_rust_intrinsic<'tcx>( tcx: TyCtxt<'tcx>, func_ty: Ty<'tcx>, -) -> Option<(Symbol, SubstsRef<'tcx>)> { - if let ty::FnDef(def_id, substs) = *func_ty.kind() { +) -> Option<(Symbol, GenericArgsRef<'tcx>)> { + if let ty::FnDef(def_id, args) = *func_ty.kind() { if tcx.is_intrinsic(def_id) { - return Some((tcx.item_name(def_id), substs)); + return Some((tcx.item_name(def_id), args)); } } None diff --git a/compiler/rustc_mir_transform/src/match_branches.rs b/compiler/rustc_mir_transform/src/match_branches.rs index 6eb484982..bc29fb8de 100644 --- a/compiler/rustc_mir_transform/src/match_branches.rs +++ b/compiler/rustc_mir_transform/src/match_branches.rs @@ -51,7 +51,7 @@ impl<'tcx> MirPass<'tcx> for MatchBranchSimplification { let bbs = body.basic_blocks.as_mut(); let mut should_cleanup = false; 'outer: for bb_idx in bbs.indices() { - if !tcx.consider_optimizing(|| format!("MatchBranchSimplification {:?} ", def_id)) { + if !tcx.consider_optimizing(|| format!("MatchBranchSimplification {def_id:?} ")) { continue; } diff --git a/compiler/rustc_mir_transform/src/multiple_return_terminators.rs b/compiler/rustc_mir_transform/src/multiple_return_terminators.rs index 3957cd92c..c97d03454 100644 --- a/compiler/rustc_mir_transform/src/multiple_return_terminators.rs +++ b/compiler/rustc_mir_transform/src/multiple_return_terminators.rs @@ -27,7 +27,7 @@ impl<'tcx> MirPass<'tcx> for MultipleReturnTerminators { } for bb in bbs { - if !tcx.consider_optimizing(|| format!("MultipleReturnTerminators {:?} ", def_id)) { + if !tcx.consider_optimizing(|| format!("MultipleReturnTerminators {def_id:?} ")) { break; } diff --git a/compiler/rustc_mir_transform/src/nrvo.rs b/compiler/rustc_mir_transform/src/nrvo.rs index 5ce96012b..e1298b065 100644 --- a/compiler/rustc_mir_transform/src/nrvo.rs +++ b/compiler/rustc_mir_transform/src/nrvo.rs @@ -45,7 +45,7 @@ impl<'tcx> MirPass<'tcx> for RenameReturnPlace { return; }; - if !tcx.consider_optimizing(|| format!("RenameReturnPlace {:?}", def_id)) { + if !tcx.consider_optimizing(|| format!("RenameReturnPlace {def_id:?}")) { return; } diff --git a/compiler/rustc_mir_transform/src/pass_manager.rs b/compiler/rustc_mir_transform/src/pass_manager.rs index 710eed3ed..057f5fe82 100644 --- a/compiler/rustc_mir_transform/src/pass_manager.rs +++ b/compiler/rustc_mir_transform/src/pass_manager.rs @@ -118,7 +118,7 @@ fn run_passes_inner<'tcx>( dump_mir_for_pass(tcx, body, &name, false); } if validate { - validate_body(tcx, body, format!("before pass {}", name)); + validate_body(tcx, body, format!("before pass {name}")); } tcx.sess.time(name, || pass.run_pass(tcx, body)); @@ -127,7 +127,7 @@ fn run_passes_inner<'tcx>( dump_mir_for_pass(tcx, body, &name, true); } if validate { - validate_body(tcx, body, format!("after pass {}", name)); + validate_body(tcx, body, format!("after pass {name}")); } body.pass_count += 1; diff --git a/compiler/rustc_mir_transform/src/ref_prop.rs b/compiler/rustc_mir_transform/src/ref_prop.rs index bbd9f76ba..49a940b57 100644 --- a/compiler/rustc_mir_transform/src/ref_prop.rs +++ b/compiler/rustc_mir_transform/src/ref_prop.rs @@ -71,7 +71,7 @@ pub struct ReferencePropagation; impl<'tcx> MirPass<'tcx> for ReferencePropagation { fn is_enabled(&self, sess: &rustc_session::Session) -> bool { - sess.mir_opt_level() >= 4 + sess.mir_opt_level() >= 2 } #[instrument(level = "trace", skip(self, tcx, body))] @@ -265,7 +265,6 @@ fn compute_replacement<'tcx>( targets, storage_to_remove, allowed_replacements, - fully_replacable_locals, any_replacement: false, }; @@ -346,7 +345,6 @@ struct Replacer<'tcx> { storage_to_remove: BitSet<Local>, allowed_replacements: FxHashSet<(Local, Location)>, any_replacement: bool, - fully_replacable_locals: BitSet<Local>, } impl<'tcx> MutVisitor<'tcx> for Replacer<'tcx> { @@ -355,7 +353,10 @@ impl<'tcx> MutVisitor<'tcx> for Replacer<'tcx> { } fn visit_var_debug_info(&mut self, debuginfo: &mut VarDebugInfo<'tcx>) { - if let VarDebugInfoContents::Place(ref mut place) = debuginfo.value + // If the debuginfo is a pointer to another place: + // - if it's a reborrow, see through it; + // - if it's a direct borrow, increase `debuginfo.references`. + while let VarDebugInfoContents::Place(ref mut place) = debuginfo.value && place.projection.is_empty() && let Value::Pointer(target, _) = self.targets[place.local] && target.projection.iter().all(|p| p.can_use_in_debuginfo()) @@ -363,34 +364,37 @@ impl<'tcx> MutVisitor<'tcx> for Replacer<'tcx> { if let Some((&PlaceElem::Deref, rest)) = target.projection.split_last() { *place = Place::from(target.local).project_deeper(rest, self.tcx); self.any_replacement = true; - } else if self.fully_replacable_locals.contains(place.local) - && let Some(references) = debuginfo.references.checked_add(1) - { - debuginfo.references = references; - *place = target; - self.any_replacement = true; + } else { + break } } + + // Simplify eventual projections left inside `debuginfo`. + self.super_var_debug_info(debuginfo); } fn visit_place(&mut self, place: &mut Place<'tcx>, ctxt: PlaceContext, loc: Location) { - if place.projection.first() != Some(&PlaceElem::Deref) { - return; - } - loop { - if let Value::Pointer(target, _) = self.targets[place.local] { - let perform_opt = matches!(ctxt, PlaceContext::NonUse(_)) - || self.allowed_replacements.contains(&(target.local, loc)); - - if perform_opt { - *place = target.project_deeper(&place.projection[1..], self.tcx); - self.any_replacement = true; - continue; + if place.projection.first() != Some(&PlaceElem::Deref) { + return; + } + + let Value::Pointer(target, _) = self.targets[place.local] else { return }; + + let perform_opt = match ctxt { + PlaceContext::NonUse(NonUseContext::VarDebugInfo) => { + target.projection.iter().all(|p| p.can_use_in_debuginfo()) } + PlaceContext::NonUse(_) => true, + _ => self.allowed_replacements.contains(&(target.local, loc)), + }; + + if !perform_opt { + return; } - break; + *place = target.project_deeper(&place.projection[1..], self.tcx); + self.any_replacement = true; } } diff --git a/compiler/rustc_mir_transform/src/remove_noop_landing_pads.rs b/compiler/rustc_mir_transform/src/remove_noop_landing_pads.rs index 4941c9edc..4e85c76fb 100644 --- a/compiler/rustc_mir_transform/src/remove_noop_landing_pads.rs +++ b/compiler/rustc_mir_transform/src/remove_noop_landing_pads.rs @@ -6,8 +6,8 @@ use rustc_middle::ty::TyCtxt; use rustc_target::spec::PanicStrategy; /// A pass that removes noop landing pads and replaces jumps to them with -/// `None`. This is important because otherwise LLVM generates terrible -/// code for these. +/// `UnwindAction::Continue`. This is important because otherwise LLVM generates +/// terrible code for these. pub struct RemoveNoopLandingPads; impl<'tcx> MirPass<'tcx> for RemoveNoopLandingPads { @@ -84,7 +84,17 @@ impl RemoveNoopLandingPads { fn remove_nop_landing_pads(&self, body: &mut Body<'_>) { debug!("body: {:#?}", body); - // make sure there's a resume block + // Skip the pass if there are no blocks with a resume terminator. + let has_resume = body + .basic_blocks + .iter_enumerated() + .any(|(_bb, block)| matches!(block.terminator().kind, TerminatorKind::Resume)); + if !has_resume { + debug!("remove_noop_landing_pads: no resume block in MIR"); + return; + } + + // make sure there's a resume block without any statements let resume_block = { let mut patch = MirPatch::new(body); let resume_block = patch.resume_block(); diff --git a/compiler/rustc_mir_transform/src/remove_uninit_drops.rs b/compiler/rustc_mir_transform/src/remove_uninit_drops.rs index 283931de0..263849747 100644 --- a/compiler/rustc_mir_transform/src/remove_uninit_drops.rs +++ b/compiler/rustc_mir_transform/src/remove_uninit_drops.rs @@ -1,10 +1,12 @@ use rustc_index::bit_set::ChunkedBitSet; use rustc_middle::mir::{Body, TerminatorKind}; -use rustc_middle::ty::subst::SubstsRef; +use rustc_middle::ty::GenericArgsRef; use rustc_middle::ty::{self, ParamEnv, Ty, TyCtxt, VariantDef}; use rustc_mir_dataflow::impls::MaybeInitializedPlaces; use rustc_mir_dataflow::move_paths::{LookupResult, MoveData, MovePathIndex}; -use rustc_mir_dataflow::{self, move_path_children_matching, Analysis, MoveDataParamEnv}; +use rustc_mir_dataflow::{ + self, move_path_children_matching, Analysis, MaybeReachable, MoveDataParamEnv, +}; use rustc_target::abi::FieldIdx; use crate::MirPass; @@ -38,10 +40,10 @@ impl<'tcx> MirPass<'tcx> for RemoveUninitDrops { let mut to_remove = vec![]; for (bb, block) in body.basic_blocks.iter_enumerated() { let terminator = block.terminator(); - let TerminatorKind::Drop { place, .. } = &terminator.kind - else { continue }; + let TerminatorKind::Drop { place, .. } = &terminator.kind else { continue }; maybe_inits.seek_before_primary_effect(body.terminator_loc(bb)); + let MaybeReachable::Reachable(maybe_inits) = maybe_inits.get() else { continue }; // If there's no move path for the dropped place, it's probably a `Deref`. Let it alone. let LookupResult::Exact(mpi) = mdpe.move_data.rev_lookup.find(place.as_ref()) else { @@ -51,7 +53,7 @@ impl<'tcx> MirPass<'tcx> for RemoveUninitDrops { let should_keep = is_needs_drop_and_init( tcx, param_env, - maybe_inits.get(), + maybe_inits, &mdpe.move_data, place.ty(body, tcx).ty, mpi, @@ -64,9 +66,9 @@ impl<'tcx> MirPass<'tcx> for RemoveUninitDrops { for bb in to_remove { let block = &mut body.basic_blocks_mut()[bb]; - let TerminatorKind::Drop { target, .. } - = &block.terminator().kind - else { unreachable!() }; + let TerminatorKind::Drop { target, .. } = &block.terminator().kind else { + unreachable!() + }; // Replace block terminator with `Goto`. block.terminator_mut().kind = TerminatorKind::Goto { target: *target }; @@ -99,7 +101,7 @@ fn is_needs_drop_and_init<'tcx>( // This pass is only needed for const-checking, so it doesn't handle as many cases as // `DropCtxt::open_drop`, since they aren't relevant in a const-context. match ty.kind() { - ty::Adt(adt, substs) => { + ty::Adt(adt, args) => { let dont_elaborate = adt.is_union() || adt.is_manually_drop() || adt.has_dtor(tcx); if dont_elaborate { return true; @@ -119,7 +121,7 @@ fn is_needs_drop_and_init<'tcx>( let downcast = move_path_children_matching(move_data, mpi, |x| x.is_downcast_to(vid)); let Some(dc_mpi) = downcast else { - return variant_needs_drop(tcx, param_env, substs, variant); + return variant_needs_drop(tcx, param_env, args, variant); }; dc_mpi @@ -131,7 +133,7 @@ fn is_needs_drop_and_init<'tcx>( .fields .iter() .enumerate() - .map(|(f, field)| (FieldIdx::from_usize(f), field.ty(tcx, substs), mpi)) + .map(|(f, field)| (FieldIdx::from_usize(f), field.ty(tcx, args), mpi)) .any(field_needs_drop_and_init) }) } @@ -149,11 +151,11 @@ fn is_needs_drop_and_init<'tcx>( fn variant_needs_drop<'tcx>( tcx: TyCtxt<'tcx>, param_env: ParamEnv<'tcx>, - substs: SubstsRef<'tcx>, + args: GenericArgsRef<'tcx>, variant: &VariantDef, ) -> bool { variant.fields.iter().any(|field| { - let f_ty = field.ty(tcx, substs); + let f_ty = field.ty(tcx, args); f_ty.needs_drop(tcx, param_env) }) } diff --git a/compiler/rustc_mir_transform/src/remove_unneeded_drops.rs b/compiler/rustc_mir_transform/src/remove_unneeded_drops.rs index 84ccf6e1f..08b2a6537 100644 --- a/compiler/rustc_mir_transform/src/remove_unneeded_drops.rs +++ b/compiler/rustc_mir_transform/src/remove_unneeded_drops.rs @@ -27,7 +27,7 @@ impl<'tcx> MirPass<'tcx> for RemoveUnneededDrops { if ty.ty.needs_drop(tcx, param_env) { continue; } - if !tcx.consider_optimizing(|| format!("RemoveUnneededDrops {:?} ", did)) { + if !tcx.consider_optimizing(|| format!("RemoveUnneededDrops {did:?} ")) { continue; } debug!("SUCCESS: replacing `drop` with goto({:?})", target); diff --git a/compiler/rustc_mir_transform/src/remove_zsts.rs b/compiler/rustc_mir_transform/src/remove_zsts.rs index 1f37f03cf..9c6c55b08 100644 --- a/compiler/rustc_mir_transform/src/remove_zsts.rs +++ b/compiler/rustc_mir_transform/src/remove_zsts.rs @@ -15,7 +15,7 @@ impl<'tcx> MirPass<'tcx> for RemoveZsts { fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) { // Avoid query cycles (generators require optimized MIR for layout). - if tcx.type_of(body.source.def_id()).subst_identity().is_generator() { + if tcx.type_of(body.source.def_id()).instantiate_identity().is_generator() { return; } let param_env = tcx.param_env_reveal_all_normalized(body.source.def_id()); @@ -102,7 +102,7 @@ impl<'tcx> MutVisitor<'tcx> for Replacer<'_, 'tcx> { let op_ty = operand.ty(self.local_decls, self.tcx); if self.known_to_be_zst(op_ty) && self.tcx.consider_optimizing(|| { - format!("RemoveZsts - Operand: {:?} Location: {:?}", operand, loc) + format!("RemoveZsts - Operand: {operand:?} Location: {loc:?}") }) { *operand = Operand::Constant(Box::new(self.make_zst(op_ty))) diff --git a/compiler/rustc_mir_transform/src/shim.rs b/compiler/rustc_mir_transform/src/shim.rs index b176db3c9..223dc59c6 100644 --- a/compiler/rustc_mir_transform/src/shim.rs +++ b/compiler/rustc_mir_transform/src/shim.rs @@ -3,8 +3,8 @@ use rustc_hir::def_id::DefId; use rustc_hir::lang_items::LangItem; use rustc_middle::mir::*; use rustc_middle::query::Providers; -use rustc_middle::ty::InternalSubsts; -use rustc_middle::ty::{self, EarlyBinder, GeneratorSubsts, Ty, TyCtxt}; +use rustc_middle::ty::GenericArgs; +use rustc_middle::ty::{self, EarlyBinder, GeneratorArgs, Ty, TyCtxt}; use rustc_target::abi::{FieldIdx, VariantIdx, FIRST_VARIANT}; use rustc_index::{Idx, IndexVec}; @@ -69,10 +69,19 @@ fn make_shim<'tcx>(tcx: TyCtxt<'tcx>, instance: ty::InstanceDef<'tcx>) -> Body<' ty::InstanceDef::DropGlue(def_id, ty) => { // FIXME(#91576): Drop shims for generators aren't subject to the MIR passes at the end // of this function. Is this intentional? - if let Some(ty::Generator(gen_def_id, substs, _)) = ty.map(Ty::kind) { + if let Some(ty::Generator(gen_def_id, args, _)) = ty.map(Ty::kind) { let body = tcx.optimized_mir(*gen_def_id).generator_drop().unwrap(); - let body = EarlyBinder::bind(body.clone()).subst(tcx, substs); + let mut body = EarlyBinder::bind(body.clone()).instantiate(tcx, args); debug!("make_shim({:?}) = {:?}", instance, body); + + // Run empty passes to mark phase change and perform validation. + pm::run_passes( + tcx, + &mut body, + &[], + Some(MirPhase::Runtime(RuntimePhase::Optimized)), + ); + return body; } @@ -160,12 +169,12 @@ fn build_drop_shim<'tcx>(tcx: TyCtxt<'tcx>, def_id: DefId, ty: Option<Ty<'tcx>>) assert!(!matches!(ty, Some(ty) if ty.is_generator())); - let substs = if let Some(ty) = ty { - tcx.mk_substs(&[ty.into()]) + let args = if let Some(ty) = ty { + tcx.mk_args(&[ty.into()]) } else { - InternalSubsts::identity_for_item(tcx, def_id) + GenericArgs::identity_for_item(tcx, def_id) }; - let sig = tcx.fn_sig(def_id).subst(tcx, substs); + let sig = tcx.fn_sig(def_id).instantiate(tcx, args); let sig = tcx.erase_late_bound_regions(sig); let span = tcx.def_span(def_id); @@ -377,12 +386,10 @@ fn build_clone_shim<'tcx>(tcx: TyCtxt<'tcx>, def_id: DefId, self_ty: Ty<'tcx>) - match self_ty.kind() { _ if is_copy => builder.copy_shim(), - ty::Closure(_, substs) => { - builder.tuple_like_shim(dest, src, substs.as_closure().upvar_tys()) - } + ty::Closure(_, args) => builder.tuple_like_shim(dest, src, args.as_closure().upvar_tys()), ty::Tuple(..) => builder.tuple_like_shim(dest, src, self_ty.tuple_fields()), - ty::Generator(gen_def_id, substs, hir::Movability::Movable) => { - builder.generator_shim(dest, src, *gen_def_id, substs.as_generator()) + ty::Generator(gen_def_id, args, hir::Movability::Movable) => { + builder.generator_shim(dest, src, *gen_def_id, args.as_generator()) } _ => bug!("clone shim for `{:?}` which is not `Copy` and is not an aggregate", self_ty), }; @@ -404,7 +411,7 @@ impl<'tcx> CloneShimBuilder<'tcx> { // we must subst the self_ty because it's // otherwise going to be TySelf and we can't index // or access fields of a Place of type TySelf. - let sig = tcx.fn_sig(def_id).subst(tcx, &[self_ty.into()]); + let sig = tcx.fn_sig(def_id).instantiate(tcx, &[self_ty.into()]); let sig = tcx.erase_late_bound_regions(sig); let span = tcx.def_span(def_id); @@ -587,17 +594,17 @@ impl<'tcx> CloneShimBuilder<'tcx> { dest: Place<'tcx>, src: Place<'tcx>, gen_def_id: DefId, - substs: GeneratorSubsts<'tcx>, + args: GeneratorArgs<'tcx>, ) { self.block(vec![], TerminatorKind::Goto { target: self.block_index_offset(3) }, false); let unwind = self.block(vec![], TerminatorKind::Resume, true); // This will get overwritten with a switch once we know the target blocks let switch = self.block(vec![], TerminatorKind::Unreachable, false); - let unwind = self.clone_fields(dest, src, switch, unwind, substs.upvar_tys()); + let unwind = self.clone_fields(dest, src, switch, unwind, args.upvar_tys()); let target = self.block(vec![], TerminatorKind::Return, false); let unreachable = self.block(vec![], TerminatorKind::Unreachable, false); - let mut cases = Vec::with_capacity(substs.state_tys(gen_def_id, self.tcx).count()); - for (index, state_tys) in substs.state_tys(gen_def_id, self.tcx).enumerate() { + let mut cases = Vec::with_capacity(args.state_tys(gen_def_id, self.tcx).count()); + for (index, state_tys) in args.state_tys(gen_def_id, self.tcx).enumerate() { let variant_index = VariantIdx::new(index); let dest = self.tcx.mk_place_downcast_unnamed(dest, variant_index); let src = self.tcx.mk_place_downcast_unnamed(src, variant_index); @@ -613,7 +620,7 @@ impl<'tcx> CloneShimBuilder<'tcx> { cases.push((index as u128, start_block)); let _final_cleanup_block = self.clone_fields(dest, src, target, unwind, state_tys); } - let discr_ty = substs.discr_ty(self.tcx); + let discr_ty = args.discr_ty(self.tcx); let temp = self.make_place(Mutability::Mut, discr_ty); let rvalue = Rvalue::Discriminant(src); let statement = self.make_statement(StatementKind::Assign(Box::new((temp, rvalue)))); @@ -642,7 +649,7 @@ fn build_call_shim<'tcx>( // `FnPtrShim` contains the fn pointer type that a call shim is being built for - this is used // to substitute into the signature of the shim. It is not necessary for users of this // MIR body to perform further substitutions (see `InstanceDef::has_polymorphic_mir_body`). - let (sig_substs, untuple_args) = if let ty::InstanceDef::FnPtrShim(_, ty) = instance { + let (sig_args, untuple_args) = if let ty::InstanceDef::FnPtrShim(_, ty) = instance { let sig = tcx.erase_late_bound_regions(ty.fn_sig(tcx)); let untuple_args = sig.inputs(); @@ -659,11 +666,11 @@ fn build_call_shim<'tcx>( let sig = tcx.fn_sig(def_id); let sig = sig.map_bound(|sig| tcx.erase_late_bound_regions(sig)); - assert_eq!(sig_substs.is_some(), !instance.has_polymorphic_mir_body()); - let mut sig = if let Some(sig_substs) = sig_substs { - sig.subst(tcx, &sig_substs) + assert_eq!(sig_args.is_some(), !instance.has_polymorphic_mir_body()); + let mut sig = if let Some(sig_args) = sig_args { + sig.instantiate(tcx, &sig_args) } else { - sig.subst_identity() + sig.instantiate_identity() }; if let CallKind::Indirect(fnty) = call_kind { @@ -751,7 +758,7 @@ fn build_call_shim<'tcx>( // `FnDef` call with optional receiver. CallKind::Direct(def_id) => { - let ty = tcx.type_of(def_id).subst_identity(); + let ty = tcx.type_of(def_id).instantiate_identity(); ( Operand::Constant(Box::new(Constant { span, @@ -868,12 +875,12 @@ pub fn build_adt_ctor(tcx: TyCtxt<'_>, ctor_id: DefId) -> Body<'_> { // Normalize the sig. let sig = tcx .fn_sig(ctor_id) - .subst_identity() + .instantiate_identity() .no_bound_vars() .expect("LBR in ADT constructor signature"); let sig = tcx.normalize_erasing_regions(param_env, sig); - let ty::Adt(adt_def, substs) = sig.output().kind() else { + let ty::Adt(adt_def, args) = sig.output().kind() else { bug!("unexpected type for ADT ctor {:?}", sig.output()); }; @@ -896,7 +903,7 @@ pub fn build_adt_ctor(tcx: TyCtxt<'_>, ctor_id: DefId) -> Body<'_> { // return; debug!("build_ctor: variant_index={:?}", variant_index); - let kind = AggregateKind::Adt(adt_def.did(), variant_index, substs, None, None); + let kind = AggregateKind::Adt(adt_def.did(), variant_index, args, None, None); let variant = adt_def.variant(variant_index); let statement = Statement { kind: StatementKind::Assign(Box::new(( @@ -941,7 +948,7 @@ pub fn build_adt_ctor(tcx: TyCtxt<'_>, ctor_id: DefId) -> Body<'_> { fn build_fn_ptr_addr_shim<'tcx>(tcx: TyCtxt<'tcx>, def_id: DefId, self_ty: Ty<'tcx>) -> Body<'tcx> { assert!(matches!(self_ty.kind(), ty::FnPtr(..)), "expected fn ptr, found {self_ty}"); let span = tcx.def_span(def_id); - let Some(sig) = tcx.fn_sig(def_id).subst(tcx, &[self_ty.into()]).no_bound_vars() else { + let Some(sig) = tcx.fn_sig(def_id).instantiate(tcx, &[self_ty.into()]).no_bound_vars() else { span_bug!(span, "FnPtr::addr with bound vars for `{self_ty}`"); }; let locals = local_decls_for_sig(&sig, span); diff --git a/compiler/rustc_mir_transform/src/simplify.rs b/compiler/rustc_mir_transform/src/simplify.rs index e59219321..b7a51cfd6 100644 --- a/compiler/rustc_mir_transform/src/simplify.rs +++ b/compiler/rustc_mir_transform/src/simplify.rs @@ -199,7 +199,8 @@ impl<'a, 'tcx> CfgSimplifier<'a, 'tcx> { let last = current; *start = last; while let Some((current, mut terminator)) = terminators.pop() { - let Terminator { kind: TerminatorKind::Goto { ref mut target }, .. } = terminator else { + let Terminator { kind: TerminatorKind::Goto { ref mut target }, .. } = terminator + else { unreachable!(); }; *changed |= *target != last; diff --git a/compiler/rustc_mir_transform/src/sroa.rs b/compiler/rustc_mir_transform/src/sroa.rs index 94e1da8e1..e66ae8ff8 100644 --- a/compiler/rustc_mir_transform/src/sroa.rs +++ b/compiler/rustc_mir_transform/src/sroa.rs @@ -12,7 +12,7 @@ pub struct ScalarReplacementOfAggregates; impl<'tcx> MirPass<'tcx> for ScalarReplacementOfAggregates { fn is_enabled(&self, sess: &rustc_session::Session) -> bool { - sess.mir_opt_level() >= 3 + sess.mir_opt_level() >= 2 } #[instrument(level = "debug", skip(self, tcx, body))] @@ -20,7 +20,7 @@ impl<'tcx> MirPass<'tcx> for ScalarReplacementOfAggregates { debug!(def_id = ?body.source.def_id()); // Avoid query cycles (generators require optimized MIR for layout). - if tcx.type_of(body.source.def_id()).subst_identity().is_generator() { + if tcx.type_of(body.source.def_id()).instantiate_identity().is_generator() { return; } @@ -64,7 +64,7 @@ fn escaping_locals<'tcx>( if ty.is_union() || ty.is_enum() { return true; } - if let ty::Adt(def, _substs) = ty.kind() { + if let ty::Adt(def, _args) = ty.kind() { if def.repr().flags.contains(ReprFlags::IS_SIMD) { // Exclude #[repr(simd)] types so that they are not de-optimized into an array return true; @@ -161,7 +161,9 @@ struct ReplacementMap<'tcx> { impl<'tcx> ReplacementMap<'tcx> { fn replace_place(&self, tcx: TyCtxt<'tcx>, place: PlaceRef<'tcx>) -> Option<Place<'tcx>> { - let &[PlaceElem::Field(f, _), ref rest @ ..] = place.projection else { return None; }; + let &[PlaceElem::Field(f, _), ref rest @ ..] = place.projection else { + return None; + }; let fields = self.fragments[place.local].as_ref()?; let (_, new_local) = fields[f]?; Some(Place { local: new_local, projection: tcx.mk_place_elems(&rest) }) diff --git a/compiler/rustc_mir_transform/src/ssa.rs b/compiler/rustc_mir_transform/src/ssa.rs index 8dc2dfe13..04bc461c8 100644 --- a/compiler/rustc_mir_transform/src/ssa.rs +++ b/compiler/rustc_mir_transform/src/ssa.rs @@ -266,9 +266,11 @@ fn compute_copy_classes(ssa: &mut SsaLocals, body: &Body<'_>) { let mut copies = IndexVec::from_fn_n(|l| l, body.local_decls.len()); for (local, rvalue, _) in ssa.assignments(body) { - let (Rvalue::Use(Operand::Copy(place) | Operand::Move(place)) | Rvalue::CopyForDeref(place)) - = rvalue - else { continue }; + let (Rvalue::Use(Operand::Copy(place) | Operand::Move(place)) + | Rvalue::CopyForDeref(place)) = rvalue + else { + continue; + }; let Some(rhs) = place.as_local() else { continue }; let local_ty = body.local_decls()[local].ty; diff --git a/compiler/rustc_mir_transform/src/uninhabited_enum_branching.rs b/compiler/rustc_mir_transform/src/uninhabited_enum_branching.rs index 5389b9f52..092bcb5c9 100644 --- a/compiler/rustc_mir_transform/src/uninhabited_enum_branching.rs +++ b/compiler/rustc_mir_transform/src/uninhabited_enum_branching.rs @@ -105,7 +105,8 @@ impl<'tcx> MirPass<'tcx> for UninhabitedEnumBranching { for bb in body.basic_blocks.indices() { trace!("processing block {:?}", bb); - let Some(discriminant_ty) = get_switched_on_type(&body.basic_blocks[bb], tcx, body) else { + let Some(discriminant_ty) = get_switched_on_type(&body.basic_blocks[bb], tcx, body) + else { continue; }; |