summaryrefslogtreecommitdiffstats
path: root/compiler/rustc_mir_build/src/build
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-17 12:02:58 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-17 12:02:58 +0000
commit698f8c2f01ea549d77d7dc3338a12e04c11057b9 (patch)
tree173a775858bd501c378080a10dca74132f05bc50 /compiler/rustc_mir_build/src/build
parentInitial commit. (diff)
downloadrustc-698f8c2f01ea549d77d7dc3338a12e04c11057b9.tar.xz
rustc-698f8c2f01ea549d77d7dc3338a12e04c11057b9.zip
Adding upstream version 1.64.0+dfsg1.upstream/1.64.0+dfsg1
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'compiler/rustc_mir_build/src/build')
-rw-r--r--compiler/rustc_mir_build/src/build/block.rs240
-rw-r--r--compiler/rustc_mir_build/src/build/cfg.rs113
-rw-r--r--compiler/rustc_mir_build/src/build/expr/as_constant.rs152
-rw-r--r--compiler/rustc_mir_build/src/build/expr/as_operand.rs184
-rw-r--r--compiler/rustc_mir_build/src/build/expr/as_place.rs820
-rw-r--r--compiler/rustc_mir_build/src/build/expr/as_rvalue.rs694
-rw-r--r--compiler/rustc_mir_build/src/build/expr/as_temp.rs119
-rw-r--r--compiler/rustc_mir_build/src/build/expr/category.rs92
-rw-r--r--compiler/rustc_mir_build/src/build/expr/into.rs599
-rw-r--r--compiler/rustc_mir_build/src/build/expr/mod.rs70
-rw-r--r--compiler/rustc_mir_build/src/build/expr/stmt.rs149
-rw-r--r--compiler/rustc_mir_build/src/build/matches/mod.rs2354
-rw-r--r--compiler/rustc_mir_build/src/build/matches/simplify.rs318
-rw-r--r--compiler/rustc_mir_build/src/build/matches/test.rs837
-rw-r--r--compiler/rustc_mir_build/src/build/matches/util.rs109
-rw-r--r--compiler/rustc_mir_build/src/build/misc.rs75
-rw-r--r--compiler/rustc_mir_build/src/build/mod.rs1171
-rw-r--r--compiler/rustc_mir_build/src/build/scope.rs1395
18 files changed, 9491 insertions, 0 deletions
diff --git a/compiler/rustc_mir_build/src/build/block.rs b/compiler/rustc_mir_build/src/build/block.rs
new file mode 100644
index 000000000..687560012
--- /dev/null
+++ b/compiler/rustc_mir_build/src/build/block.rs
@@ -0,0 +1,240 @@
+use crate::build::matches::ArmHasGuard;
+use crate::build::ForGuard::OutsideGuard;
+use crate::build::{BlockAnd, BlockAndExtension, BlockFrame, Builder};
+use rustc_middle::thir::*;
+use rustc_middle::{mir::*, ty};
+use rustc_span::Span;
+
+impl<'a, 'tcx> Builder<'a, 'tcx> {
+ pub(crate) fn ast_block(
+ &mut self,
+ destination: Place<'tcx>,
+ block: BasicBlock,
+ ast_block: &Block,
+ source_info: SourceInfo,
+ ) -> BlockAnd<()> {
+ let Block {
+ region_scope,
+ opt_destruction_scope,
+ span,
+ ref stmts,
+ expr,
+ targeted_by_break,
+ safety_mode,
+ } = *ast_block;
+ let expr = expr.map(|expr| &self.thir[expr]);
+ self.in_opt_scope(opt_destruction_scope.map(|de| (de, source_info)), move |this| {
+ this.in_scope((region_scope, source_info), LintLevel::Inherited, move |this| {
+ if targeted_by_break {
+ this.in_breakable_scope(None, destination, span, |this| {
+ Some(this.ast_block_stmts(
+ destination,
+ block,
+ span,
+ &stmts,
+ expr,
+ safety_mode,
+ ))
+ })
+ } else {
+ this.ast_block_stmts(destination, block, span, &stmts, expr, safety_mode)
+ }
+ })
+ })
+ }
+
+ fn ast_block_stmts(
+ &mut self,
+ destination: Place<'tcx>,
+ mut block: BasicBlock,
+ span: Span,
+ stmts: &[StmtId],
+ expr: Option<&Expr<'tcx>>,
+ safety_mode: BlockSafety,
+ ) -> BlockAnd<()> {
+ let this = self;
+
+ // This convoluted structure is to avoid using recursion as we walk down a list
+ // of statements. Basically, the structure we get back is something like:
+ //
+ // let x = <init> in {
+ // expr1;
+ // let y = <init> in {
+ // expr2;
+ // expr3;
+ // ...
+ // }
+ // }
+ //
+ // The let bindings are valid till the end of block so all we have to do is to pop all
+ // the let-scopes at the end.
+ //
+ // First we build all the statements in the block.
+ let mut let_scope_stack = Vec::with_capacity(8);
+ let outer_source_scope = this.source_scope;
+ let outer_in_scope_unsafe = this.in_scope_unsafe;
+ this.update_source_scope_for_safety_mode(span, safety_mode);
+
+ let source_info = this.source_info(span);
+ for stmt in stmts {
+ let Stmt { ref kind, opt_destruction_scope } = this.thir[*stmt];
+ match kind {
+ StmtKind::Expr { scope, expr } => {
+ this.block_context.push(BlockFrame::Statement { ignores_expr_result: true });
+ unpack!(
+ block = this.in_opt_scope(
+ opt_destruction_scope.map(|de| (de, source_info)),
+ |this| {
+ let si = (*scope, source_info);
+ this.in_scope(si, LintLevel::Inherited, |this| {
+ this.stmt_expr(block, &this.thir[*expr], Some(*scope))
+ })
+ }
+ )
+ );
+ }
+ StmtKind::Let {
+ remainder_scope,
+ init_scope,
+ ref pattern,
+ initializer,
+ lint_level,
+ else_block,
+ } => {
+ let ignores_expr_result = matches!(*pattern.kind, PatKind::Wild);
+ this.block_context.push(BlockFrame::Statement { ignores_expr_result });
+
+ // Enter the remainder scope, i.e., the bindings' destruction scope.
+ this.push_scope((*remainder_scope, source_info));
+ let_scope_stack.push(remainder_scope);
+
+ // Declare the bindings, which may create a source scope.
+ let remainder_span = remainder_scope.span(this.tcx, this.region_scope_tree);
+
+ let visibility_scope =
+ Some(this.new_source_scope(remainder_span, LintLevel::Inherited, None));
+
+ // Evaluate the initializer, if present.
+ if let Some(init) = initializer {
+ let init = &this.thir[*init];
+ let initializer_span = init.span;
+
+ unpack!(
+ block = this.in_opt_scope(
+ opt_destruction_scope.map(|de| (de, source_info)),
+ |this| {
+ let scope = (*init_scope, source_info);
+ this.in_scope(scope, *lint_level, |this| {
+ if let Some(else_block) = else_block {
+ this.ast_let_else(
+ block,
+ init,
+ initializer_span,
+ else_block,
+ visibility_scope,
+ *remainder_scope,
+ remainder_span,
+ pattern,
+ )
+ } else {
+ this.declare_bindings(
+ visibility_scope,
+ remainder_span,
+ pattern,
+ ArmHasGuard(false),
+ Some((None, initializer_span)),
+ );
+ this.expr_into_pattern(block, pattern.clone(), init) // irrefutable pattern
+ }
+ })
+ },
+ )
+ )
+ } else {
+ let scope = (*init_scope, source_info);
+ unpack!(this.in_scope(scope, *lint_level, |this| {
+ this.declare_bindings(
+ visibility_scope,
+ remainder_span,
+ pattern,
+ ArmHasGuard(false),
+ None,
+ );
+ block.unit()
+ }));
+
+ debug!("ast_block_stmts: pattern={:?}", pattern);
+ this.visit_primary_bindings(
+ pattern,
+ UserTypeProjections::none(),
+ &mut |this, _, _, _, node, span, _, _| {
+ this.storage_live_binding(block, node, span, OutsideGuard, true);
+ this.schedule_drop_for_binding(node, span, OutsideGuard);
+ },
+ )
+ }
+
+ // Enter the visibility scope, after evaluating the initializer.
+ if let Some(source_scope) = visibility_scope {
+ this.source_scope = source_scope;
+ }
+ }
+ }
+
+ let popped = this.block_context.pop();
+ assert!(popped.map_or(false, |bf| bf.is_statement()));
+ }
+
+ // Then, the block may have an optional trailing expression which is a “return” value
+ // of the block, which is stored into `destination`.
+ let tcx = this.tcx;
+ let destination_ty = destination.ty(&this.local_decls, tcx).ty;
+ if let Some(expr) = expr {
+ let tail_result_is_ignored =
+ destination_ty.is_unit() || this.block_context.currently_ignores_tail_results();
+ this.block_context
+ .push(BlockFrame::TailExpr { tail_result_is_ignored, span: expr.span });
+
+ unpack!(block = this.expr_into_dest(destination, block, expr));
+ let popped = this.block_context.pop();
+
+ assert!(popped.map_or(false, |bf| bf.is_tail_expr()));
+ } else {
+ // If a block has no trailing expression, then it is given an implicit return type.
+ // This return type is usually `()`, unless the block is diverging, in which case the
+ // return type is `!`. For the unit type, we need to actually return the unit, but in
+ // the case of `!`, no return value is required, as the block will never return.
+ // Opaque types of empty bodies also need this unit assignment, in order to infer that their
+ // type is actually unit. Otherwise there will be no defining use found in the MIR.
+ if destination_ty.is_unit() || matches!(destination_ty.kind(), ty::Opaque(..)) {
+ // We only want to assign an implicit `()` as the return value of the block if the
+ // block does not diverge. (Otherwise, we may try to assign a unit to a `!`-type.)
+ this.cfg.push_assign_unit(block, source_info, destination, this.tcx);
+ }
+ }
+ // Finally, we pop all the let scopes before exiting out from the scope of block
+ // itself.
+ for scope in let_scope_stack.into_iter().rev() {
+ unpack!(block = this.pop_scope((*scope, source_info), block));
+ }
+ // Restore the original source scope.
+ this.source_scope = outer_source_scope;
+ this.in_scope_unsafe = outer_in_scope_unsafe;
+ block.unit()
+ }
+
+ /// If we are entering an unsafe block, create a new source scope
+ fn update_source_scope_for_safety_mode(&mut self, span: Span, safety_mode: BlockSafety) {
+ debug!("update_source_scope_for({:?}, {:?})", span, safety_mode);
+ let new_unsafety = match safety_mode {
+ BlockSafety::Safe => return,
+ BlockSafety::BuiltinUnsafe => Safety::BuiltinUnsafe,
+ BlockSafety::ExplicitUnsafe(hir_id) => {
+ self.in_scope_unsafe = Safety::ExplicitUnsafe(hir_id);
+ Safety::ExplicitUnsafe(hir_id)
+ }
+ };
+
+ self.source_scope = self.new_source_scope(span, LintLevel::Inherited, Some(new_unsafety));
+ }
+}
diff --git a/compiler/rustc_mir_build/src/build/cfg.rs b/compiler/rustc_mir_build/src/build/cfg.rs
new file mode 100644
index 000000000..d7b4b1f73
--- /dev/null
+++ b/compiler/rustc_mir_build/src/build/cfg.rs
@@ -0,0 +1,113 @@
+//! Routines for manipulating the control-flow graph.
+
+use crate::build::CFG;
+use rustc_middle::mir::*;
+use rustc_middle::ty::TyCtxt;
+
+impl<'tcx> CFG<'tcx> {
+ pub(crate) fn block_data(&self, blk: BasicBlock) -> &BasicBlockData<'tcx> {
+ &self.basic_blocks[blk]
+ }
+
+ pub(crate) fn block_data_mut(&mut self, blk: BasicBlock) -> &mut BasicBlockData<'tcx> {
+ &mut self.basic_blocks[blk]
+ }
+
+ // llvm.org/PR32488 makes this function use an excess of stack space. Mark
+ // it as #[inline(never)] to keep rustc's stack use in check.
+ #[inline(never)]
+ pub(crate) fn start_new_block(&mut self) -> BasicBlock {
+ self.basic_blocks.push(BasicBlockData::new(None))
+ }
+
+ pub(crate) fn start_new_cleanup_block(&mut self) -> BasicBlock {
+ let bb = self.start_new_block();
+ self.block_data_mut(bb).is_cleanup = true;
+ bb
+ }
+
+ pub(crate) fn push(&mut self, block: BasicBlock, statement: Statement<'tcx>) {
+ debug!("push({:?}, {:?})", block, statement);
+ self.block_data_mut(block).statements.push(statement);
+ }
+
+ pub(crate) fn push_assign(
+ &mut self,
+ block: BasicBlock,
+ source_info: SourceInfo,
+ place: Place<'tcx>,
+ rvalue: Rvalue<'tcx>,
+ ) {
+ self.push(
+ block,
+ Statement { source_info, kind: StatementKind::Assign(Box::new((place, rvalue))) },
+ );
+ }
+
+ pub(crate) fn push_assign_constant(
+ &mut self,
+ block: BasicBlock,
+ source_info: SourceInfo,
+ temp: Place<'tcx>,
+ constant: Constant<'tcx>,
+ ) {
+ self.push_assign(
+ block,
+ source_info,
+ temp,
+ Rvalue::Use(Operand::Constant(Box::new(constant))),
+ );
+ }
+
+ pub(crate) fn push_assign_unit(
+ &mut self,
+ block: BasicBlock,
+ source_info: SourceInfo,
+ place: Place<'tcx>,
+ tcx: TyCtxt<'tcx>,
+ ) {
+ self.push_assign(
+ block,
+ source_info,
+ place,
+ Rvalue::Use(Operand::Constant(Box::new(Constant {
+ span: source_info.span,
+ user_ty: None,
+ literal: ConstantKind::zero_sized(tcx.types.unit),
+ }))),
+ );
+ }
+
+ pub(crate) fn push_fake_read(
+ &mut self,
+ block: BasicBlock,
+ source_info: SourceInfo,
+ cause: FakeReadCause,
+ place: Place<'tcx>,
+ ) {
+ let kind = StatementKind::FakeRead(Box::new((cause, place)));
+ let stmt = Statement { source_info, kind };
+ self.push(block, stmt);
+ }
+
+ pub(crate) fn terminate(
+ &mut self,
+ block: BasicBlock,
+ source_info: SourceInfo,
+ kind: TerminatorKind<'tcx>,
+ ) {
+ debug!("terminating block {:?} <- {:?}", block, kind);
+ debug_assert!(
+ self.block_data(block).terminator.is_none(),
+ "terminate: block {:?}={:?} already has a terminator set",
+ block,
+ self.block_data(block)
+ );
+ self.block_data_mut(block).terminator = Some(Terminator { source_info, kind });
+ }
+
+ /// In the `origin` block, push a `goto -> target` terminator.
+ pub(crate) fn goto(&mut self, origin: BasicBlock, source_info: SourceInfo, target: BasicBlock) {
+ self.terminate(origin, source_info, TerminatorKind::Goto { target })
+ }
+}
diff --git a/compiler/rustc_mir_build/src/build/expr/as_constant.rs b/compiler/rustc_mir_build/src/build/expr/as_constant.rs
new file mode 100644
index 000000000..648d10b9e
--- /dev/null
+++ b/compiler/rustc_mir_build/src/build/expr/as_constant.rs
@@ -0,0 +1,152 @@
+//! See docs in build/expr/mod.rs
+
+use crate::build::{parse_float_into_constval, Builder};
+use rustc_ast as ast;
+use rustc_hir::def_id::DefId;
+use rustc_middle::mir::interpret::{
+ Allocation, ConstValue, LitToConstError, LitToConstInput, Scalar,
+};
+use rustc_middle::mir::*;
+use rustc_middle::thir::*;
+use rustc_middle::ty::subst::SubstsRef;
+use rustc_middle::ty::{self, CanonicalUserTypeAnnotation, Ty, TyCtxt};
+use rustc_target::abi::Size;
+
+impl<'a, 'tcx> Builder<'a, 'tcx> {
+ /// Compile `expr`, yielding a compile-time constant. Assumes that
+ /// `expr` is a valid compile-time constant!
+ pub(crate) fn as_constant(&mut self, expr: &Expr<'tcx>) -> Constant<'tcx> {
+ let create_uneval_from_def_id =
+ |tcx: TyCtxt<'tcx>, def_id: DefId, ty: Ty<'tcx>, substs: SubstsRef<'tcx>| {
+ let uneval = ty::Unevaluated::new(ty::WithOptConstParam::unknown(def_id), substs);
+ tcx.mk_const(ty::ConstS { kind: ty::ConstKind::Unevaluated(uneval), ty })
+ };
+
+ let this = self;
+ let tcx = this.tcx;
+ let Expr { ty, temp_lifetime: _, span, ref kind } = *expr;
+ match *kind {
+ ExprKind::Scope { region_scope: _, lint_level: _, value } => {
+ this.as_constant(&this.thir[value])
+ }
+ ExprKind::Literal { lit, neg } => {
+ let literal =
+ match lit_to_mir_constant(tcx, LitToConstInput { lit: &lit.node, ty, neg }) {
+ Ok(c) => c,
+ Err(LitToConstError::Reported) => ConstantKind::Ty(tcx.const_error(ty)),
+ Err(LitToConstError::TypeError) => {
+ bug!("encountered type error in `lit_to_mir_constant")
+ }
+ };
+
+ Constant { span, user_ty: None, literal }
+ }
+ ExprKind::NonHirLiteral { lit, user_ty } => {
+ let user_ty = user_ty.map(|user_ty| {
+ this.canonical_user_type_annotations.push(CanonicalUserTypeAnnotation {
+ span,
+ user_ty,
+ inferred_ty: ty,
+ })
+ });
+ let literal = ConstantKind::Val(ConstValue::Scalar(Scalar::Int(lit)), ty);
+
+ Constant { span, user_ty: user_ty, literal }
+ }
+ ExprKind::ZstLiteral { user_ty } => {
+ let user_ty = user_ty.map(|user_ty| {
+ this.canonical_user_type_annotations.push(CanonicalUserTypeAnnotation {
+ span,
+ user_ty,
+ inferred_ty: ty,
+ })
+ });
+ let literal = ConstantKind::Val(ConstValue::ZeroSized, ty);
+
+ Constant { span, user_ty: user_ty, literal }
+ }
+ ExprKind::NamedConst { def_id, substs, user_ty } => {
+ let user_ty = user_ty.map(|user_ty| {
+ this.canonical_user_type_annotations.push(CanonicalUserTypeAnnotation {
+ span,
+ user_ty,
+ inferred_ty: ty,
+ })
+ });
+ let literal = ConstantKind::Ty(create_uneval_from_def_id(tcx, def_id, ty, substs));
+
+ Constant { user_ty, span, literal }
+ }
+ ExprKind::ConstParam { param, def_id: _ } => {
+ let const_param =
+ tcx.mk_const(ty::ConstS { kind: ty::ConstKind::Param(param), ty: expr.ty });
+ let literal = ConstantKind::Ty(const_param);
+
+ Constant { user_ty: None, span, literal }
+ }
+ ExprKind::ConstBlock { did: def_id, substs } => {
+ let literal = ConstantKind::Ty(create_uneval_from_def_id(tcx, def_id, ty, substs));
+
+ Constant { user_ty: None, span, literal }
+ }
+ ExprKind::StaticRef { alloc_id, ty, .. } => {
+ let const_val = ConstValue::Scalar(Scalar::from_pointer(alloc_id.into(), &tcx));
+ let literal = ConstantKind::Val(const_val, ty);
+
+ Constant { span, user_ty: None, literal }
+ }
+ _ => span_bug!(span, "expression is not a valid constant {:?}", kind),
+ }
+ }
+}
+
+#[instrument(skip(tcx, lit_input))]
+pub(crate) fn lit_to_mir_constant<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ lit_input: LitToConstInput<'tcx>,
+) -> Result<ConstantKind<'tcx>, LitToConstError> {
+ let LitToConstInput { lit, ty, neg } = lit_input;
+ let trunc = |n| {
+ let param_ty = ty::ParamEnv::reveal_all().and(ty);
+ let width = tcx.layout_of(param_ty).map_err(|_| LitToConstError::Reported)?.size;
+ trace!("trunc {} with size {} and shift {}", n, width.bits(), 128 - width.bits());
+ let result = width.truncate(n);
+ trace!("trunc result: {}", result);
+ Ok(ConstValue::Scalar(Scalar::from_uint(result, width)))
+ };
+
+ let value = match (lit, &ty.kind()) {
+ (ast::LitKind::Str(s, _), ty::Ref(_, inner_ty, _)) if inner_ty.is_str() => {
+ let s = s.as_str();
+ let allocation = Allocation::from_bytes_byte_aligned_immutable(s.as_bytes());
+ let allocation = tcx.intern_const_alloc(allocation);
+ ConstValue::Slice { data: allocation, start: 0, end: s.len() }
+ }
+ (ast::LitKind::ByteStr(data), ty::Ref(_, inner_ty, _))
+ if matches!(inner_ty.kind(), ty::Slice(_)) =>
+ {
+ let allocation = Allocation::from_bytes_byte_aligned_immutable(data as &[u8]);
+ let allocation = tcx.intern_const_alloc(allocation);
+ ConstValue::Slice { data: allocation, start: 0, end: data.len() }
+ }
+ (ast::LitKind::ByteStr(data), ty::Ref(_, inner_ty, _)) if inner_ty.is_array() => {
+ let id = tcx.allocate_bytes(data);
+ ConstValue::Scalar(Scalar::from_pointer(id.into(), &tcx))
+ }
+ (ast::LitKind::Byte(n), ty::Uint(ty::UintTy::U8)) => {
+ ConstValue::Scalar(Scalar::from_uint(*n, Size::from_bytes(1)))
+ }
+ (ast::LitKind::Int(n, _), ty::Uint(_)) | (ast::LitKind::Int(n, _), ty::Int(_)) => {
+ trunc(if neg { (*n as i128).overflowing_neg().0 as u128 } else { *n })?
+ }
+ (ast::LitKind::Float(n, _), ty::Float(fty)) => {
+ parse_float_into_constval(*n, *fty, neg).ok_or(LitToConstError::Reported)?
+ }
+ (ast::LitKind::Bool(b), ty::Bool) => ConstValue::Scalar(Scalar::from_bool(*b)),
+ (ast::LitKind::Char(c), ty::Char) => ConstValue::Scalar(Scalar::from_char(*c)),
+ (ast::LitKind::Err(_), _) => return Err(LitToConstError::Reported),
+ _ => return Err(LitToConstError::TypeError),
+ };
+
+ Ok(ConstantKind::Val(value, ty))
+}
diff --git a/compiler/rustc_mir_build/src/build/expr/as_operand.rs b/compiler/rustc_mir_build/src/build/expr/as_operand.rs
new file mode 100644
index 000000000..e707c373f
--- /dev/null
+++ b/compiler/rustc_mir_build/src/build/expr/as_operand.rs
@@ -0,0 +1,184 @@
+//! See docs in build/expr/mod.rs
+
+use crate::build::expr::category::Category;
+use crate::build::{BlockAnd, BlockAndExtension, Builder, NeedsTemporary};
+use rustc_middle::middle::region;
+use rustc_middle::mir::*;
+use rustc_middle::thir::*;
+
+impl<'a, 'tcx> Builder<'a, 'tcx> {
+ /// Returns an operand suitable for use until the end of the current
+ /// scope expression.
+ ///
+ /// The operand returned from this function will *not be valid*
+ /// after the current enclosing `ExprKind::Scope` has ended, so
+ /// please do *not* return it from functions to avoid bad
+ /// miscompiles.
+ pub(crate) fn as_local_operand(
+ &mut self,
+ block: BasicBlock,
+ expr: &Expr<'tcx>,
+ ) -> BlockAnd<Operand<'tcx>> {
+ let local_scope = self.local_scope();
+ self.as_operand(block, Some(local_scope), expr, None, NeedsTemporary::Maybe)
+ }
+
+ /// Returns an operand suitable for use until the end of the current scope expression and
+ /// suitable also to be passed as function arguments.
+ ///
+ /// The operand returned from this function will *not be valid* after an ExprKind::Scope is
+ /// passed, so please do *not* return it from functions to avoid bad miscompiles. Returns an
+ /// operand suitable for use as a call argument. This is almost always equivalent to
+ /// `as_operand`, except for the particular case of passing values of (potentially) unsized
+ /// types "by value" (see details below).
+ ///
+ /// The operand returned from this function will *not be valid*
+ /// after the current enclosing `ExprKind::Scope` has ended, so
+ /// please do *not* return it from functions to avoid bad
+ /// miscompiles.
+ ///
+ /// # Parameters of unsized types
+ ///
+ /// We tweak the handling of parameters of unsized type slightly to avoid the need to create a
+ /// local variable of unsized type. For example, consider this program:
+ ///
+ /// ```
+ /// #![feature(unsized_locals, unsized_fn_params)]
+ /// # use core::fmt::Debug;
+ /// fn foo(p: dyn Debug) { dbg!(p); }
+ ///
+ /// fn bar(box_p: Box<dyn Debug>) { foo(*box_p); }
+ /// ```
+ ///
+ /// Ordinarily, for sized types, we would compile the call `foo(*p)` like so:
+ ///
+ /// ```ignore (illustrative)
+ /// let tmp0 = *box_p; // tmp0 would be the operand returned by this function call
+ /// foo(tmp0)
+ /// ```
+ ///
+ /// But because the parameter to `foo` is of the unsized type `dyn Debug`, and because it is
+ /// being moved the deref of a box, we compile it slightly differently. The temporary `tmp0`
+ /// that we create *stores the entire box*, and the parameter to the call itself will be
+ /// `*tmp0`:
+ ///
+ /// ```ignore (illustrative)
+ /// let tmp0 = box_p; call foo(*tmp0)
+ /// ```
+ ///
+ /// This way, the temporary `tmp0` that we create has type `Box<dyn Debug>`, which is sized.
+ /// The value passed to the call (`*tmp0`) still has the `dyn Debug` type -- but the way that
+ /// calls are compiled means that this parameter will be passed "by reference", meaning that we
+ /// will actually provide a pointer to the interior of the box, and not move the `dyn Debug`
+ /// value to the stack.
+ ///
+ /// See #68034 for more details.
+ pub(crate) fn as_local_call_operand(
+ &mut self,
+ block: BasicBlock,
+ expr: &Expr<'tcx>,
+ ) -> BlockAnd<Operand<'tcx>> {
+ let local_scope = self.local_scope();
+ self.as_call_operand(block, Some(local_scope), expr)
+ }
+
+ /// Compile `expr` into a value that can be used as an operand.
+ /// If `expr` is a place like `x`, this will introduce a
+ /// temporary `tmp = x`, so that we capture the value of `x` at
+ /// this time.
+ ///
+ /// If we end up needing to create a temporary, then we will use
+ /// `local_info` as its `LocalInfo`, unless `as_temporary`
+ /// has already assigned it a non-`None` `LocalInfo`.
+ /// Normally, you should use `None` for `local_info`
+ ///
+ /// The operand is known to be live until the end of `scope`.
+ ///
+ /// Like `as_local_call_operand`, except that the argument will
+ /// not be valid once `scope` ends.
+ #[instrument(level = "debug", skip(self, scope))]
+ pub(crate) fn as_operand(
+ &mut self,
+ mut block: BasicBlock,
+ scope: Option<region::Scope>,
+ expr: &Expr<'tcx>,
+ local_info: Option<Box<LocalInfo<'tcx>>>,
+ needs_temporary: NeedsTemporary,
+ ) -> BlockAnd<Operand<'tcx>> {
+ let this = self;
+
+ if let ExprKind::Scope { region_scope, lint_level, value } = expr.kind {
+ let source_info = this.source_info(expr.span);
+ let region_scope = (region_scope, source_info);
+ return this.in_scope(region_scope, lint_level, |this| {
+ this.as_operand(block, scope, &this.thir[value], local_info, needs_temporary)
+ });
+ }
+
+ let category = Category::of(&expr.kind).unwrap();
+ debug!(?category, ?expr.kind);
+ match category {
+ Category::Constant if let NeedsTemporary::No = needs_temporary || !expr.ty.needs_drop(this.tcx, this.param_env) => {
+ let constant = this.as_constant(expr);
+ block.and(Operand::Constant(Box::new(constant)))
+ }
+ Category::Constant | Category::Place | Category::Rvalue(..) => {
+ let operand = unpack!(block = this.as_temp(block, scope, expr, Mutability::Mut));
+ if this.local_decls[operand].local_info.is_none() {
+ this.local_decls[operand].local_info = local_info;
+ }
+ block.and(Operand::Move(Place::from(operand)))
+ }
+ }
+ }
+
+ pub(crate) fn as_call_operand(
+ &mut self,
+ mut block: BasicBlock,
+ scope: Option<region::Scope>,
+ expr: &Expr<'tcx>,
+ ) -> BlockAnd<Operand<'tcx>> {
+ debug!("as_call_operand(block={:?}, expr={:?})", block, expr);
+ let this = self;
+
+ if let ExprKind::Scope { region_scope, lint_level, value } = expr.kind {
+ let source_info = this.source_info(expr.span);
+ let region_scope = (region_scope, source_info);
+ return this.in_scope(region_scope, lint_level, |this| {
+ this.as_call_operand(block, scope, &this.thir[value])
+ });
+ }
+
+ let tcx = this.tcx;
+
+ if tcx.features().unsized_fn_params {
+ let ty = expr.ty;
+ let span = expr.span;
+ let param_env = this.param_env;
+
+ if !ty.is_sized(tcx.at(span), param_env) {
+ // !sized means !copy, so this is an unsized move
+ assert!(!ty.is_copy_modulo_regions(tcx.at(span), param_env));
+
+ // As described above, detect the case where we are passing a value of unsized
+ // type, and that value is coming from the deref of a box.
+ if let ExprKind::Deref { arg } = expr.kind {
+ // Generate let tmp0 = arg0
+ let operand = unpack!(
+ block = this.as_temp(block, scope, &this.thir[arg], Mutability::Mut)
+ );
+
+ // Return the operand *tmp0 to be used as the call argument
+ let place = Place {
+ local: operand,
+ projection: tcx.intern_place_elems(&[PlaceElem::Deref]),
+ };
+
+ return block.and(Operand::Move(place));
+ }
+ }
+ }
+
+ this.as_operand(block, scope, expr, None, NeedsTemporary::Maybe)
+ }
+}
diff --git a/compiler/rustc_mir_build/src/build/expr/as_place.rs b/compiler/rustc_mir_build/src/build/expr/as_place.rs
new file mode 100644
index 000000000..0c06aad4e
--- /dev/null
+++ b/compiler/rustc_mir_build/src/build/expr/as_place.rs
@@ -0,0 +1,820 @@
+//! See docs in build/expr/mod.rs
+
+use crate::build::expr::category::Category;
+use crate::build::ForGuard::{OutsideGuard, RefWithinGuard};
+use crate::build::{BlockAnd, BlockAndExtension, Builder};
+use rustc_hir::def_id::LocalDefId;
+use rustc_middle::hir::place::Projection as HirProjection;
+use rustc_middle::hir::place::ProjectionKind as HirProjectionKind;
+use rustc_middle::middle::region;
+use rustc_middle::mir::AssertKind::BoundsCheck;
+use rustc_middle::mir::*;
+use rustc_middle::thir::*;
+use rustc_middle::ty::AdtDef;
+use rustc_middle::ty::{self, CanonicalUserTypeAnnotation, Ty, TyCtxt, Variance};
+use rustc_span::Span;
+use rustc_target::abi::VariantIdx;
+
+use rustc_index::vec::Idx;
+
+use std::iter;
+
+/// The "outermost" place that holds this value.
+#[derive(Copy, Clone, Debug, PartialEq)]
+pub(crate) enum PlaceBase {
+ /// Denotes the start of a `Place`.
+ Local(Local),
+
+ /// When building place for an expression within a closure, the place might start off a
+ /// captured path. When `capture_disjoint_fields` is enabled, we might not know the capture
+ /// index (within the desugared closure) of the captured path until most of the projections
+ /// are applied. We use `PlaceBase::Upvar` to keep track of the root variable off of which the
+ /// captured path starts, the closure the capture belongs to and the trait the closure
+ /// implements.
+ ///
+ /// Once we have figured out the capture index, we can convert the place builder to start from
+ /// `PlaceBase::Local`.
+ ///
+ /// Consider the following example
+ /// ```rust
+ /// let t = (((10, 10), 10), 10);
+ ///
+ /// let c = || {
+ /// println!("{}", t.0.0.0);
+ /// };
+ /// ```
+ /// Here the THIR expression for `t.0.0.0` will be something like
+ ///
+ /// ```ignore (illustrative)
+ /// * Field(0)
+ /// * Field(0)
+ /// * Field(0)
+ /// * UpvarRef(t)
+ /// ```
+ ///
+ /// When `capture_disjoint_fields` is enabled, `t.0.0.0` is captured and we won't be able to
+ /// figure out that it is captured until all the `Field` projections are applied.
+ Upvar {
+ /// HirId of the upvar
+ var_hir_id: LocalVarId,
+ /// DefId of the closure
+ closure_def_id: LocalDefId,
+ /// The trait closure implements, `Fn`, `FnMut`, `FnOnce`
+ closure_kind: ty::ClosureKind,
+ },
+}
+
+/// `PlaceBuilder` is used to create places during MIR construction. It allows you to "build up" a
+/// place by pushing more and more projections onto the end, and then convert the final set into a
+/// place using the `into_place` method.
+///
+/// This is used internally when building a place for an expression like `a.b.c`. The fields `b`
+/// and `c` can be progressively pushed onto the place builder that is created when converting `a`.
+#[derive(Clone, Debug, PartialEq)]
+pub(crate) struct PlaceBuilder<'tcx> {
+ base: PlaceBase,
+ projection: Vec<PlaceElem<'tcx>>,
+}
+
+/// Given a list of MIR projections, convert them to list of HIR ProjectionKind.
+/// The projections are truncated to represent a path that might be captured by a
+/// closure/generator. This implies the vector returned from this function doesn't contain
+/// ProjectionElems `Downcast`, `ConstantIndex`, `Index`, or `Subslice` because those will never be
+/// part of a path that is captured by a closure. We stop applying projections once we see the first
+/// projection that isn't captured by a closure.
+fn convert_to_hir_projections_and_truncate_for_capture<'tcx>(
+ mir_projections: &[PlaceElem<'tcx>],
+) -> Vec<HirProjectionKind> {
+ let mut hir_projections = Vec::new();
+ let mut variant = None;
+
+ for mir_projection in mir_projections {
+ let hir_projection = match mir_projection {
+ ProjectionElem::Deref => HirProjectionKind::Deref,
+ ProjectionElem::Field(field, _) => {
+ let variant = variant.unwrap_or(VariantIdx::new(0));
+ HirProjectionKind::Field(field.index() as u32, variant)
+ }
+ ProjectionElem::Downcast(.., idx) => {
+ // We don't expect to see multi-variant enums here, as earlier
+ // phases will have truncated them already. However, there can
+ // still be downcasts, thanks to single-variant enums.
+ // We keep track of VariantIdx so we can use this information
+ // if the next ProjectionElem is a Field.
+ variant = Some(*idx);
+ continue;
+ }
+ ProjectionElem::Index(..)
+ | ProjectionElem::ConstantIndex { .. }
+ | ProjectionElem::Subslice { .. } => {
+ // We don't capture array-access projections.
+ // We can stop here as arrays are captured completely.
+ break;
+ }
+ };
+ variant = None;
+ hir_projections.push(hir_projection);
+ }
+
+ hir_projections
+}
+
+/// Return true if the `proj_possible_ancestor` represents an ancestor path
+/// to `proj_capture` or `proj_possible_ancestor` is same as `proj_capture`,
+/// assuming they both start off of the same root variable.
+///
+/// **Note:** It's the caller's responsibility to ensure that both lists of projections
+/// start off of the same root variable.
+///
+/// Eg: 1. `foo.x` which is represented using `projections=[Field(x)]` is an ancestor of
+/// `foo.x.y` which is represented using `projections=[Field(x), Field(y)]`.
+/// Note both `foo.x` and `foo.x.y` start off of the same root variable `foo`.
+/// 2. Since we only look at the projections here function will return `bar.x` as an a valid
+/// ancestor of `foo.x.y`. It's the caller's responsibility to ensure that both projections
+/// list are being applied to the same root variable.
+fn is_ancestor_or_same_capture(
+ proj_possible_ancestor: &[HirProjectionKind],
+ proj_capture: &[HirProjectionKind],
+) -> bool {
+ // We want to make sure `is_ancestor_or_same_capture("x.0.0", "x.0")` to return false.
+ // Therefore we can't just check if all projections are same in the zipped iterator below.
+ if proj_possible_ancestor.len() > proj_capture.len() {
+ return false;
+ }
+
+ iter::zip(proj_possible_ancestor, proj_capture).all(|(a, b)| a == b)
+}
+
+/// Computes the index of a capture within the desugared closure provided the closure's
+/// `closure_min_captures` and the capture's index of the capture in the
+/// `ty::MinCaptureList` of the root variable `var_hir_id`.
+fn compute_capture_idx<'tcx>(
+ closure_min_captures: &ty::RootVariableMinCaptureList<'tcx>,
+ var_hir_id: LocalVarId,
+ root_var_idx: usize,
+) -> usize {
+ let mut res = 0;
+ for (var_id, capture_list) in closure_min_captures {
+ if *var_id == var_hir_id.0 {
+ res += root_var_idx;
+ break;
+ } else {
+ res += capture_list.len();
+ }
+ }
+
+ res
+}
+
+/// Given a closure, returns the index of a capture within the desugared closure struct and the
+/// `ty::CapturedPlace` which is the ancestor of the Place represented using the `var_hir_id`
+/// and `projection`.
+///
+/// Note there will be at most one ancestor for any given Place.
+///
+/// Returns None, when the ancestor is not found.
+fn find_capture_matching_projections<'a, 'tcx>(
+ typeck_results: &'a ty::TypeckResults<'tcx>,
+ var_hir_id: LocalVarId,
+ closure_def_id: LocalDefId,
+ projections: &[PlaceElem<'tcx>],
+) -> Option<(usize, &'a ty::CapturedPlace<'tcx>)> {
+ let closure_min_captures = typeck_results.closure_min_captures.get(&closure_def_id)?;
+ let root_variable_min_captures = closure_min_captures.get(&var_hir_id.0)?;
+
+ let hir_projections = convert_to_hir_projections_and_truncate_for_capture(projections);
+
+ // If an ancestor is found, `idx` is the index within the list of captured places
+ // for root variable `var_hir_id` and `capture` is the `ty::CapturedPlace` itself.
+ let (idx, capture) = root_variable_min_captures.iter().enumerate().find(|(_, capture)| {
+ let possible_ancestor_proj_kinds: Vec<_> =
+ capture.place.projections.iter().map(|proj| proj.kind).collect();
+ is_ancestor_or_same_capture(&possible_ancestor_proj_kinds, &hir_projections)
+ })?;
+
+ // Convert index to be from the perspective of the entire closure_min_captures map
+ // instead of just the root variable capture list
+ Some((compute_capture_idx(closure_min_captures, var_hir_id, idx), capture))
+}
+
+/// Takes a PlaceBuilder and resolves the upvar (if any) within it, so that the
+/// `PlaceBuilder` now starts from `PlaceBase::Local`.
+///
+/// Returns a Result with the error being the PlaceBuilder (`from_builder`) that was not found.
+fn to_upvars_resolved_place_builder<'a, 'tcx>(
+ from_builder: PlaceBuilder<'tcx>,
+ tcx: TyCtxt<'tcx>,
+ typeck_results: &'a ty::TypeckResults<'tcx>,
+) -> Result<PlaceBuilder<'tcx>, PlaceBuilder<'tcx>> {
+ match from_builder.base {
+ PlaceBase::Local(_) => Ok(from_builder),
+ PlaceBase::Upvar { var_hir_id, closure_def_id, closure_kind } => {
+ let mut upvar_resolved_place_builder = PlaceBuilder::from(ty::CAPTURE_STRUCT_LOCAL);
+ match closure_kind {
+ ty::ClosureKind::Fn | ty::ClosureKind::FnMut => {
+ upvar_resolved_place_builder = upvar_resolved_place_builder.deref();
+ }
+ ty::ClosureKind::FnOnce => {}
+ }
+
+ let Some((capture_index, capture)) =
+ find_capture_matching_projections(
+ typeck_results,
+ var_hir_id,
+ closure_def_id,
+ &from_builder.projection,
+ ) else {
+ let closure_span = tcx.def_span(closure_def_id);
+ if !enable_precise_capture(tcx, closure_span) {
+ bug!(
+ "No associated capture found for {:?}[{:#?}] even though \
+ capture_disjoint_fields isn't enabled",
+ var_hir_id,
+ from_builder.projection
+ )
+ } else {
+ debug!(
+ "No associated capture found for {:?}[{:#?}]",
+ var_hir_id, from_builder.projection,
+ );
+ }
+ return Err(from_builder);
+ };
+
+ // We won't be building MIR if the closure wasn't local
+ let closure_hir_id = tcx.hir().local_def_id_to_hir_id(closure_def_id);
+ let closure_ty = typeck_results.node_type(closure_hir_id);
+
+ let substs = match closure_ty.kind() {
+ ty::Closure(_, substs) => ty::UpvarSubsts::Closure(substs),
+ ty::Generator(_, substs, _) => ty::UpvarSubsts::Generator(substs),
+ _ => bug!("Lowering capture for non-closure type {:?}", closure_ty),
+ };
+
+ // Access the capture by accessing the field within the Closure struct.
+ //
+ // We must have inferred the capture types since we are building MIR, therefore
+ // it's safe to call `tuple_element_ty` and we can unwrap here because
+ // we know that the capture exists and is the `capture_index`-th capture.
+ let var_ty = substs.tupled_upvars_ty().tuple_fields()[capture_index];
+
+ upvar_resolved_place_builder =
+ upvar_resolved_place_builder.field(Field::new(capture_index), var_ty);
+
+ // If the variable is captured via ByRef(Immutable/Mutable) Borrow,
+ // we need to deref it
+ upvar_resolved_place_builder = match capture.info.capture_kind {
+ ty::UpvarCapture::ByRef(_) => upvar_resolved_place_builder.deref(),
+ ty::UpvarCapture::ByValue => upvar_resolved_place_builder,
+ };
+
+ // We used some of the projections to build the capture itself,
+ // now we apply the remaining to the upvar resolved place.
+ let remaining_projections = strip_prefix(
+ capture.place.base_ty,
+ from_builder.projection,
+ &capture.place.projections,
+ );
+ upvar_resolved_place_builder.projection.extend(remaining_projections);
+
+ Ok(upvar_resolved_place_builder)
+ }
+ }
+}
+
+/// Returns projections remaining after stripping an initial prefix of HIR
+/// projections.
+///
+/// Supports only HIR projection kinds that represent a path that might be
+/// captured by a closure or a generator, i.e., an `Index` or a `Subslice`
+/// projection kinds are unsupported.
+fn strip_prefix<'tcx>(
+ mut base_ty: Ty<'tcx>,
+ projections: Vec<PlaceElem<'tcx>>,
+ prefix_projections: &[HirProjection<'tcx>],
+) -> impl Iterator<Item = PlaceElem<'tcx>> {
+ let mut iter = projections.into_iter();
+ for projection in prefix_projections {
+ match projection.kind {
+ HirProjectionKind::Deref => {
+ assert!(matches!(iter.next(), Some(ProjectionElem::Deref)));
+ }
+ HirProjectionKind::Field(..) => {
+ if base_ty.is_enum() {
+ assert!(matches!(iter.next(), Some(ProjectionElem::Downcast(..))));
+ }
+ assert!(matches!(iter.next(), Some(ProjectionElem::Field(..))));
+ }
+ HirProjectionKind::Index | HirProjectionKind::Subslice => {
+ bug!("unexpected projection kind: {:?}", projection);
+ }
+ }
+ base_ty = projection.ty;
+ }
+ iter
+}
+
+impl<'tcx> PlaceBuilder<'tcx> {
+ pub(crate) fn into_place<'a>(
+ self,
+ tcx: TyCtxt<'tcx>,
+ typeck_results: &'a ty::TypeckResults<'tcx>,
+ ) -> Place<'tcx> {
+ if let PlaceBase::Local(local) = self.base {
+ Place { local, projection: tcx.intern_place_elems(&self.projection) }
+ } else {
+ self.expect_upvars_resolved(tcx, typeck_results).into_place(tcx, typeck_results)
+ }
+ }
+
+ fn expect_upvars_resolved<'a>(
+ self,
+ tcx: TyCtxt<'tcx>,
+ typeck_results: &'a ty::TypeckResults<'tcx>,
+ ) -> PlaceBuilder<'tcx> {
+ to_upvars_resolved_place_builder(self, tcx, typeck_results).unwrap()
+ }
+
+ /// Attempts to resolve the `PlaceBuilder`.
+ /// On success, it will return the resolved `PlaceBuilder`.
+ /// On failure, it will return itself.
+ ///
+ /// Upvars resolve may fail for a `PlaceBuilder` when attempting to
+ /// resolve a disjoint field whose root variable is not captured
+ /// (destructured assignments) or when attempting to resolve a root
+ /// variable (discriminant matching with only wildcard arm) that is
+ /// not captured. This can happen because the final mir that will be
+ /// generated doesn't require a read for this place. Failures will only
+ /// happen inside closures.
+ pub(crate) fn try_upvars_resolved<'a>(
+ self,
+ tcx: TyCtxt<'tcx>,
+ typeck_results: &'a ty::TypeckResults<'tcx>,
+ ) -> Result<PlaceBuilder<'tcx>, PlaceBuilder<'tcx>> {
+ to_upvars_resolved_place_builder(self, tcx, typeck_results)
+ }
+
+ pub(crate) fn base(&self) -> PlaceBase {
+ self.base
+ }
+
+ pub(crate) fn field(self, f: Field, ty: Ty<'tcx>) -> Self {
+ self.project(PlaceElem::Field(f, ty))
+ }
+
+ pub(crate) fn deref(self) -> Self {
+ self.project(PlaceElem::Deref)
+ }
+
+ pub(crate) fn downcast(self, adt_def: AdtDef<'tcx>, variant_index: VariantIdx) -> Self {
+ self.project(PlaceElem::Downcast(Some(adt_def.variant(variant_index).name), variant_index))
+ }
+
+ fn index(self, index: Local) -> Self {
+ self.project(PlaceElem::Index(index))
+ }
+
+ pub(crate) fn project(mut self, elem: PlaceElem<'tcx>) -> Self {
+ self.projection.push(elem);
+ self
+ }
+}
+
+impl<'tcx> From<Local> for PlaceBuilder<'tcx> {
+ fn from(local: Local) -> Self {
+ Self { base: PlaceBase::Local(local), projection: Vec::new() }
+ }
+}
+
+impl<'tcx> From<PlaceBase> for PlaceBuilder<'tcx> {
+ fn from(base: PlaceBase) -> Self {
+ Self { base, projection: Vec::new() }
+ }
+}
+
+impl<'a, 'tcx> Builder<'a, 'tcx> {
+ /// Compile `expr`, yielding a place that we can move from etc.
+ ///
+ /// WARNING: Any user code might:
+ /// * Invalidate any slice bounds checks performed.
+ /// * Change the address that this `Place` refers to.
+ /// * Modify the memory that this place refers to.
+ /// * Invalidate the memory that this place refers to, this will be caught
+ /// by borrow checking.
+ ///
+ /// Extra care is needed if any user code is allowed to run between calling
+ /// this method and using it, as is the case for `match` and index
+ /// expressions.
+ pub(crate) fn as_place(
+ &mut self,
+ mut block: BasicBlock,
+ expr: &Expr<'tcx>,
+ ) -> BlockAnd<Place<'tcx>> {
+ let place_builder = unpack!(block = self.as_place_builder(block, expr));
+ block.and(place_builder.into_place(self.tcx, self.typeck_results))
+ }
+
+ /// This is used when constructing a compound `Place`, so that we can avoid creating
+ /// intermediate `Place` values until we know the full set of projections.
+ pub(crate) fn as_place_builder(
+ &mut self,
+ block: BasicBlock,
+ expr: &Expr<'tcx>,
+ ) -> BlockAnd<PlaceBuilder<'tcx>> {
+ self.expr_as_place(block, expr, Mutability::Mut, None)
+ }
+
+ /// Compile `expr`, yielding a place that we can move from etc.
+ /// Mutability note: The caller of this method promises only to read from the resulting
+ /// place. The place itself may or may not be mutable:
+ /// * If this expr is a place expr like a.b, then we will return that place.
+ /// * Otherwise, a temporary is created: in that event, it will be an immutable temporary.
+ pub(crate) fn as_read_only_place(
+ &mut self,
+ mut block: BasicBlock,
+ expr: &Expr<'tcx>,
+ ) -> BlockAnd<Place<'tcx>> {
+ let place_builder = unpack!(block = self.as_read_only_place_builder(block, expr));
+ block.and(place_builder.into_place(self.tcx, self.typeck_results))
+ }
+
+ /// This is used when constructing a compound `Place`, so that we can avoid creating
+ /// intermediate `Place` values until we know the full set of projections.
+ /// Mutability note: The caller of this method promises only to read from the resulting
+ /// place. The place itself may or may not be mutable:
+ /// * If this expr is a place expr like a.b, then we will return that place.
+ /// * Otherwise, a temporary is created: in that event, it will be an immutable temporary.
+ fn as_read_only_place_builder(
+ &mut self,
+ block: BasicBlock,
+ expr: &Expr<'tcx>,
+ ) -> BlockAnd<PlaceBuilder<'tcx>> {
+ self.expr_as_place(block, expr, Mutability::Not, None)
+ }
+
+ fn expr_as_place(
+ &mut self,
+ mut block: BasicBlock,
+ expr: &Expr<'tcx>,
+ mutability: Mutability,
+ fake_borrow_temps: Option<&mut Vec<Local>>,
+ ) -> BlockAnd<PlaceBuilder<'tcx>> {
+ debug!("expr_as_place(block={:?}, expr={:?}, mutability={:?})", block, expr, mutability);
+
+ let this = self;
+ let expr_span = expr.span;
+ let source_info = this.source_info(expr_span);
+ match expr.kind {
+ ExprKind::Scope { region_scope, lint_level, value } => {
+ this.in_scope((region_scope, source_info), lint_level, |this| {
+ this.expr_as_place(block, &this.thir[value], mutability, fake_borrow_temps)
+ })
+ }
+ ExprKind::Field { lhs, variant_index, name } => {
+ let lhs = &this.thir[lhs];
+ let mut place_builder =
+ unpack!(block = this.expr_as_place(block, lhs, mutability, fake_borrow_temps,));
+ if let ty::Adt(adt_def, _) = lhs.ty.kind() {
+ if adt_def.is_enum() {
+ place_builder = place_builder.downcast(*adt_def, variant_index);
+ }
+ }
+ block.and(place_builder.field(name, expr.ty))
+ }
+ ExprKind::Deref { arg } => {
+ let place_builder = unpack!(
+ block =
+ this.expr_as_place(block, &this.thir[arg], mutability, fake_borrow_temps,)
+ );
+ block.and(place_builder.deref())
+ }
+ ExprKind::Index { lhs, index } => this.lower_index_expression(
+ block,
+ &this.thir[lhs],
+ &this.thir[index],
+ mutability,
+ fake_borrow_temps,
+ expr.temp_lifetime,
+ expr_span,
+ source_info,
+ ),
+ ExprKind::UpvarRef { closure_def_id, var_hir_id } => {
+ this.lower_captured_upvar(block, closure_def_id.expect_local(), var_hir_id)
+ }
+
+ ExprKind::VarRef { id } => {
+ let place_builder = if this.is_bound_var_in_guard(id) {
+ let index = this.var_local_id(id, RefWithinGuard);
+ PlaceBuilder::from(index).deref()
+ } else {
+ let index = this.var_local_id(id, OutsideGuard);
+ PlaceBuilder::from(index)
+ };
+ block.and(place_builder)
+ }
+
+ ExprKind::PlaceTypeAscription { source, user_ty } => {
+ let place_builder = unpack!(
+ block = this.expr_as_place(
+ block,
+ &this.thir[source],
+ mutability,
+ fake_borrow_temps,
+ )
+ );
+ if let Some(user_ty) = user_ty {
+ let annotation_index =
+ this.canonical_user_type_annotations.push(CanonicalUserTypeAnnotation {
+ span: source_info.span,
+ user_ty,
+ inferred_ty: expr.ty,
+ });
+
+ let place = place_builder.clone().into_place(this.tcx, this.typeck_results);
+ this.cfg.push(
+ block,
+ Statement {
+ source_info,
+ kind: StatementKind::AscribeUserType(
+ Box::new((
+ place,
+ UserTypeProjection { base: annotation_index, projs: vec![] },
+ )),
+ Variance::Invariant,
+ ),
+ },
+ );
+ }
+ block.and(place_builder)
+ }
+ ExprKind::ValueTypeAscription { source, user_ty } => {
+ let source = &this.thir[source];
+ let temp =
+ unpack!(block = this.as_temp(block, source.temp_lifetime, source, mutability));
+ if let Some(user_ty) = user_ty {
+ let annotation_index =
+ this.canonical_user_type_annotations.push(CanonicalUserTypeAnnotation {
+ span: source_info.span,
+ user_ty,
+ inferred_ty: expr.ty,
+ });
+ this.cfg.push(
+ block,
+ Statement {
+ source_info,
+ kind: StatementKind::AscribeUserType(
+ Box::new((
+ Place::from(temp),
+ UserTypeProjection { base: annotation_index, projs: vec![] },
+ )),
+ Variance::Invariant,
+ ),
+ },
+ );
+ }
+ block.and(PlaceBuilder::from(temp))
+ }
+
+ ExprKind::Array { .. }
+ | ExprKind::Tuple { .. }
+ | ExprKind::Adt { .. }
+ | ExprKind::Closure { .. }
+ | ExprKind::Unary { .. }
+ | ExprKind::Binary { .. }
+ | ExprKind::LogicalOp { .. }
+ | ExprKind::Box { .. }
+ | ExprKind::Cast { .. }
+ | ExprKind::Use { .. }
+ | ExprKind::NeverToAny { .. }
+ | ExprKind::Pointer { .. }
+ | ExprKind::Repeat { .. }
+ | ExprKind::Borrow { .. }
+ | ExprKind::AddressOf { .. }
+ | ExprKind::Match { .. }
+ | ExprKind::If { .. }
+ | ExprKind::Loop { .. }
+ | ExprKind::Block { .. }
+ | ExprKind::Let { .. }
+ | ExprKind::Assign { .. }
+ | ExprKind::AssignOp { .. }
+ | ExprKind::Break { .. }
+ | ExprKind::Continue { .. }
+ | ExprKind::Return { .. }
+ | ExprKind::Literal { .. }
+ | ExprKind::NamedConst { .. }
+ | ExprKind::NonHirLiteral { .. }
+ | ExprKind::ZstLiteral { .. }
+ | ExprKind::ConstParam { .. }
+ | ExprKind::ConstBlock { .. }
+ | ExprKind::StaticRef { .. }
+ | ExprKind::InlineAsm { .. }
+ | ExprKind::Yield { .. }
+ | ExprKind::ThreadLocalRef(_)
+ | ExprKind::Call { .. } => {
+ // these are not places, so we need to make a temporary.
+ debug_assert!(!matches!(Category::of(&expr.kind), Some(Category::Place)));
+ let temp =
+ unpack!(block = this.as_temp(block, expr.temp_lifetime, expr, mutability));
+ block.and(PlaceBuilder::from(temp))
+ }
+ }
+ }
+
+ /// Lower a captured upvar. Note we might not know the actual capture index,
+ /// so we create a place starting from `PlaceBase::Upvar`, which will be resolved
+ /// once all projections that allow us to identify a capture have been applied.
+ fn lower_captured_upvar(
+ &mut self,
+ block: BasicBlock,
+ closure_def_id: LocalDefId,
+ var_hir_id: LocalVarId,
+ ) -> BlockAnd<PlaceBuilder<'tcx>> {
+ let closure_ty =
+ self.typeck_results.node_type(self.tcx.hir().local_def_id_to_hir_id(closure_def_id));
+
+ let closure_kind = if let ty::Closure(_, closure_substs) = closure_ty.kind() {
+ self.infcx.closure_kind(closure_substs).unwrap()
+ } else {
+ // Generators are considered FnOnce.
+ ty::ClosureKind::FnOnce
+ };
+
+ block.and(PlaceBuilder::from(PlaceBase::Upvar { var_hir_id, closure_def_id, closure_kind }))
+ }
+
+ /// Lower an index expression
+ ///
+ /// This has two complications;
+ ///
+ /// * We need to do a bounds check.
+ /// * We need to ensure that the bounds check can't be invalidated using an
+ /// expression like `x[1][{x = y; 2}]`. We use fake borrows here to ensure
+ /// that this is the case.
+ fn lower_index_expression(
+ &mut self,
+ mut block: BasicBlock,
+ base: &Expr<'tcx>,
+ index: &Expr<'tcx>,
+ mutability: Mutability,
+ fake_borrow_temps: Option<&mut Vec<Local>>,
+ temp_lifetime: Option<region::Scope>,
+ expr_span: Span,
+ source_info: SourceInfo,
+ ) -> BlockAnd<PlaceBuilder<'tcx>> {
+ let base_fake_borrow_temps = &mut Vec::new();
+ let is_outermost_index = fake_borrow_temps.is_none();
+ let fake_borrow_temps = fake_borrow_temps.unwrap_or(base_fake_borrow_temps);
+
+ let mut base_place =
+ unpack!(block = self.expr_as_place(block, base, mutability, Some(fake_borrow_temps),));
+
+ // Making this a *fresh* temporary means we do not have to worry about
+ // the index changing later: Nothing will ever change this temporary.
+ // The "retagging" transformation (for Stacked Borrows) relies on this.
+ let idx = unpack!(block = self.as_temp(block, temp_lifetime, index, Mutability::Not,));
+
+ block = self.bounds_check(block, base_place.clone(), idx, expr_span, source_info);
+
+ if is_outermost_index {
+ self.read_fake_borrows(block, fake_borrow_temps, source_info)
+ } else {
+ base_place = base_place.expect_upvars_resolved(self.tcx, self.typeck_results);
+ self.add_fake_borrows_of_base(
+ &base_place,
+ block,
+ fake_borrow_temps,
+ expr_span,
+ source_info,
+ );
+ }
+
+ block.and(base_place.index(idx))
+ }
+
+ fn bounds_check(
+ &mut self,
+ block: BasicBlock,
+ slice: PlaceBuilder<'tcx>,
+ index: Local,
+ expr_span: Span,
+ source_info: SourceInfo,
+ ) -> BasicBlock {
+ let usize_ty = self.tcx.types.usize;
+ let bool_ty = self.tcx.types.bool;
+ // bounds check:
+ let len = self.temp(usize_ty, expr_span);
+ let lt = self.temp(bool_ty, expr_span);
+
+ // len = len(slice)
+ self.cfg.push_assign(
+ block,
+ source_info,
+ len,
+ Rvalue::Len(slice.into_place(self.tcx, self.typeck_results)),
+ );
+ // lt = idx < len
+ self.cfg.push_assign(
+ block,
+ source_info,
+ lt,
+ Rvalue::BinaryOp(
+ BinOp::Lt,
+ Box::new((Operand::Copy(Place::from(index)), Operand::Copy(len))),
+ ),
+ );
+ let msg = BoundsCheck { len: Operand::Move(len), index: Operand::Copy(Place::from(index)) };
+ // assert!(lt, "...")
+ self.assert(block, Operand::Move(lt), true, msg, expr_span)
+ }
+
+ fn add_fake_borrows_of_base(
+ &mut self,
+ base_place: &PlaceBuilder<'tcx>,
+ block: BasicBlock,
+ fake_borrow_temps: &mut Vec<Local>,
+ expr_span: Span,
+ source_info: SourceInfo,
+ ) {
+ let tcx = self.tcx;
+ let local = match base_place.base {
+ PlaceBase::Local(local) => local,
+ PlaceBase::Upvar { .. } => bug!("Expected PlacseBase::Local found Upvar"),
+ };
+
+ let place_ty = Place::ty_from(local, &base_place.projection, &self.local_decls, tcx);
+ if let ty::Slice(_) = place_ty.ty.kind() {
+ // We need to create fake borrows to ensure that the bounds
+ // check that we just did stays valid. Since we can't assign to
+ // unsized values, we only need to ensure that none of the
+ // pointers in the base place are modified.
+ for (idx, elem) in base_place.projection.iter().enumerate().rev() {
+ match elem {
+ ProjectionElem::Deref => {
+ let fake_borrow_deref_ty = Place::ty_from(
+ local,
+ &base_place.projection[..idx],
+ &self.local_decls,
+ tcx,
+ )
+ .ty;
+ let fake_borrow_ty =
+ tcx.mk_imm_ref(tcx.lifetimes.re_erased, fake_borrow_deref_ty);
+ let fake_borrow_temp =
+ self.local_decls.push(LocalDecl::new(fake_borrow_ty, expr_span));
+ let projection = tcx.intern_place_elems(&base_place.projection[..idx]);
+ self.cfg.push_assign(
+ block,
+ source_info,
+ fake_borrow_temp.into(),
+ Rvalue::Ref(
+ tcx.lifetimes.re_erased,
+ BorrowKind::Shallow,
+ Place { local, projection },
+ ),
+ );
+ fake_borrow_temps.push(fake_borrow_temp);
+ }
+ ProjectionElem::Index(_) => {
+ let index_ty = Place::ty_from(
+ local,
+ &base_place.projection[..idx],
+ &self.local_decls,
+ tcx,
+ );
+ match index_ty.ty.kind() {
+ // The previous index expression has already
+ // done any index expressions needed here.
+ ty::Slice(_) => break,
+ ty::Array(..) => (),
+ _ => bug!("unexpected index base"),
+ }
+ }
+ ProjectionElem::Field(..)
+ | ProjectionElem::Downcast(..)
+ | ProjectionElem::ConstantIndex { .. }
+ | ProjectionElem::Subslice { .. } => (),
+ }
+ }
+ }
+ }
+
+ fn read_fake_borrows(
+ &mut self,
+ bb: BasicBlock,
+ fake_borrow_temps: &mut Vec<Local>,
+ source_info: SourceInfo,
+ ) {
+ // All indexes have been evaluated now, read all of the
+ // fake borrows so that they are live across those index
+ // expressions.
+ for temp in fake_borrow_temps {
+ self.cfg.push_fake_read(bb, source_info, FakeReadCause::ForIndex, Place::from(*temp));
+ }
+ }
+}
+
+/// Precise capture is enabled if the feature gate `capture_disjoint_fields` is enabled or if
+/// user is using Rust Edition 2021 or higher.
+fn enable_precise_capture(tcx: TyCtxt<'_>, closure_span: Span) -> bool {
+ tcx.features().capture_disjoint_fields || closure_span.rust_2021()
+}
diff --git a/compiler/rustc_mir_build/src/build/expr/as_rvalue.rs b/compiler/rustc_mir_build/src/build/expr/as_rvalue.rs
new file mode 100644
index 000000000..15f2d17c4
--- /dev/null
+++ b/compiler/rustc_mir_build/src/build/expr/as_rvalue.rs
@@ -0,0 +1,694 @@
+//! See docs in `build/expr/mod.rs`.
+
+use rustc_index::vec::Idx;
+use rustc_middle::ty::util::IntTypeExt;
+
+use crate::build::expr::as_place::PlaceBase;
+use crate::build::expr::category::{Category, RvalueFunc};
+use crate::build::{BlockAnd, BlockAndExtension, Builder, NeedsTemporary};
+use rustc_hir::lang_items::LangItem;
+use rustc_middle::middle::region;
+use rustc_middle::mir::AssertKind;
+use rustc_middle::mir::Place;
+use rustc_middle::mir::*;
+use rustc_middle::thir::*;
+use rustc_middle::ty::cast::CastTy;
+use rustc_middle::ty::{self, Ty, UpvarSubsts};
+use rustc_span::Span;
+
+impl<'a, 'tcx> Builder<'a, 'tcx> {
+ /// Returns an rvalue suitable for use until the end of the current
+ /// scope expression.
+ ///
+ /// The operand returned from this function will *not be valid* after
+ /// an ExprKind::Scope is passed, so please do *not* return it from
+ /// functions to avoid bad miscompiles.
+ pub(crate) fn as_local_rvalue(
+ &mut self,
+ block: BasicBlock,
+ expr: &Expr<'tcx>,
+ ) -> BlockAnd<Rvalue<'tcx>> {
+ let local_scope = self.local_scope();
+ self.as_rvalue(block, Some(local_scope), expr)
+ }
+
+ /// Compile `expr`, yielding an rvalue.
+ pub(crate) fn as_rvalue(
+ &mut self,
+ mut block: BasicBlock,
+ scope: Option<region::Scope>,
+ expr: &Expr<'tcx>,
+ ) -> BlockAnd<Rvalue<'tcx>> {
+ debug!("expr_as_rvalue(block={:?}, scope={:?}, expr={:?})", block, scope, expr);
+
+ let this = self;
+ let expr_span = expr.span;
+ let source_info = this.source_info(expr_span);
+
+ match expr.kind {
+ ExprKind::ThreadLocalRef(did) => block.and(Rvalue::ThreadLocalRef(did)),
+ ExprKind::Scope { region_scope, lint_level, value } => {
+ let region_scope = (region_scope, source_info);
+ this.in_scope(region_scope, lint_level, |this| {
+ this.as_rvalue(block, scope, &this.thir[value])
+ })
+ }
+ ExprKind::Repeat { value, count } => {
+ if Some(0) == count.try_eval_usize(this.tcx, this.param_env) {
+ this.build_zero_repeat(block, value, scope, source_info)
+ } else {
+ let value_operand = unpack!(
+ block = this.as_operand(
+ block,
+ scope,
+ &this.thir[value],
+ None,
+ NeedsTemporary::No
+ )
+ );
+ block.and(Rvalue::Repeat(value_operand, count))
+ }
+ }
+ ExprKind::Binary { op, lhs, rhs } => {
+ let lhs = unpack!(
+ block =
+ this.as_operand(block, scope, &this.thir[lhs], None, NeedsTemporary::Maybe)
+ );
+ let rhs = unpack!(
+ block =
+ this.as_operand(block, scope, &this.thir[rhs], None, NeedsTemporary::No)
+ );
+ this.build_binary_op(block, op, expr_span, expr.ty, lhs, rhs)
+ }
+ ExprKind::Unary { op, arg } => {
+ let arg = unpack!(
+ block =
+ this.as_operand(block, scope, &this.thir[arg], None, NeedsTemporary::No)
+ );
+ // Check for -MIN on signed integers
+ if this.check_overflow && op == UnOp::Neg && expr.ty.is_signed() {
+ let bool_ty = this.tcx.types.bool;
+
+ let minval = this.minval_literal(expr_span, expr.ty);
+ let is_min = this.temp(bool_ty, expr_span);
+
+ this.cfg.push_assign(
+ block,
+ source_info,
+ is_min,
+ Rvalue::BinaryOp(BinOp::Eq, Box::new((arg.to_copy(), minval))),
+ );
+
+ block = this.assert(
+ block,
+ Operand::Move(is_min),
+ false,
+ AssertKind::OverflowNeg(arg.to_copy()),
+ expr_span,
+ );
+ }
+ block.and(Rvalue::UnaryOp(op, arg))
+ }
+ ExprKind::Box { value } => {
+ let value = &this.thir[value];
+ let tcx = this.tcx;
+
+ // `exchange_malloc` is unsafe but box is safe, so need a new scope.
+ let synth_scope = this.new_source_scope(
+ expr_span,
+ LintLevel::Inherited,
+ Some(Safety::BuiltinUnsafe),
+ );
+ let synth_info = SourceInfo { span: expr_span, scope: synth_scope };
+
+ let size = this.temp(tcx.types.usize, expr_span);
+ this.cfg.push_assign(
+ block,
+ synth_info,
+ size,
+ Rvalue::NullaryOp(NullOp::SizeOf, value.ty),
+ );
+
+ let align = this.temp(tcx.types.usize, expr_span);
+ this.cfg.push_assign(
+ block,
+ synth_info,
+ align,
+ Rvalue::NullaryOp(NullOp::AlignOf, value.ty),
+ );
+
+ // malloc some memory of suitable size and align:
+ let exchange_malloc = Operand::function_handle(
+ tcx,
+ tcx.require_lang_item(LangItem::ExchangeMalloc, Some(expr_span)),
+ ty::List::empty(),
+ expr_span,
+ );
+ let storage = this.temp(tcx.mk_mut_ptr(tcx.types.u8), expr_span);
+ let success = this.cfg.start_new_block();
+ this.cfg.terminate(
+ block,
+ synth_info,
+ TerminatorKind::Call {
+ func: exchange_malloc,
+ args: vec![Operand::Move(size), Operand::Move(align)],
+ destination: storage,
+ target: Some(success),
+ cleanup: None,
+ from_hir_call: false,
+ fn_span: expr_span,
+ },
+ );
+ this.diverge_from(block);
+ block = success;
+
+ // The `Box<T>` temporary created here is not a part of the HIR,
+ // and therefore is not considered during generator auto-trait
+ // determination. See the comment about `box` at `yield_in_scope`.
+ let result = this.local_decls.push(LocalDecl::new(expr.ty, expr_span).internal());
+ this.cfg.push(
+ block,
+ Statement { source_info, kind: StatementKind::StorageLive(result) },
+ );
+ if let Some(scope) = scope {
+ // schedule a shallow free of that memory, lest we unwind:
+ this.schedule_drop_storage_and_value(expr_span, scope, result);
+ }
+
+ // Transmute `*mut u8` to the box (thus far, uninitialized):
+ let box_ = Rvalue::ShallowInitBox(Operand::Move(storage), value.ty);
+ this.cfg.push_assign(block, source_info, Place::from(result), box_);
+
+ // initialize the box contents:
+ unpack!(
+ block = this.expr_into_dest(
+ this.tcx.mk_place_deref(Place::from(result)),
+ block,
+ value
+ )
+ );
+ block.and(Rvalue::Use(Operand::Move(Place::from(result))))
+ }
+ ExprKind::Cast { source } => {
+ let source = &this.thir[source];
+
+ // Casting an enum to an integer is equivalent to computing the discriminant and casting the
+ // discriminant. Previously every backend had to repeat the logic for this operation. Now we
+ // create all the steps directly in MIR with operations all backends need to support anyway.
+ let (source, ty) = if let ty::Adt(adt_def, ..) = source.ty.kind() && adt_def.is_enum() {
+ let discr_ty = adt_def.repr().discr_type().to_ty(this.tcx);
+ let place = unpack!(block = this.as_place(block, source));
+ let discr = this.temp(discr_ty, source.span);
+ this.cfg.push_assign(
+ block,
+ source_info,
+ discr,
+ Rvalue::Discriminant(place),
+ );
+
+ (Operand::Move(discr), discr_ty)
+ } else {
+ let ty = source.ty;
+ let source = unpack!(
+ block = this.as_operand(block, scope, source, None, NeedsTemporary::No)
+ );
+ (source, ty)
+ };
+ let from_ty = CastTy::from_ty(ty);
+ let cast_ty = CastTy::from_ty(expr.ty);
+ let cast_kind = match (from_ty, cast_ty) {
+ (Some(CastTy::Ptr(_) | CastTy::FnPtr), Some(CastTy::Int(_))) => {
+ CastKind::PointerExposeAddress
+ }
+ (Some(CastTy::Int(_)), Some(CastTy::Ptr(_))) => {
+ CastKind::PointerFromExposedAddress
+ }
+ (_, _) => CastKind::Misc,
+ };
+ block.and(Rvalue::Cast(cast_kind, source, expr.ty))
+ }
+ ExprKind::Pointer { cast, source } => {
+ let source = unpack!(
+ block =
+ this.as_operand(block, scope, &this.thir[source], None, NeedsTemporary::No)
+ );
+ block.and(Rvalue::Cast(CastKind::Pointer(cast), source, expr.ty))
+ }
+ ExprKind::Array { ref fields } => {
+ // (*) We would (maybe) be closer to codegen if we
+ // handled this and other aggregate cases via
+ // `into()`, not `as_rvalue` -- in that case, instead
+ // of generating
+ //
+ // let tmp1 = ...1;
+ // let tmp2 = ...2;
+ // dest = Rvalue::Aggregate(Foo, [tmp1, tmp2])
+ //
+ // we could just generate
+ //
+ // dest.f = ...1;
+ // dest.g = ...2;
+ //
+ // The problem is that then we would need to:
+ //
+ // (a) have a more complex mechanism for handling
+ // partial cleanup;
+ // (b) distinguish the case where the type `Foo` has a
+ // destructor, in which case creating an instance
+ // as a whole "arms" the destructor, and you can't
+ // write individual fields; and,
+ // (c) handle the case where the type Foo has no
+ // fields. We don't want `let x: ();` to compile
+ // to the same MIR as `let x = ();`.
+
+ // first process the set of fields
+ let el_ty = expr.ty.sequence_element_type(this.tcx);
+ let fields: Vec<_> = fields
+ .into_iter()
+ .copied()
+ .map(|f| {
+ unpack!(
+ block = this.as_operand(
+ block,
+ scope,
+ &this.thir[f],
+ None,
+ NeedsTemporary::Maybe
+ )
+ )
+ })
+ .collect();
+
+ block.and(Rvalue::Aggregate(Box::new(AggregateKind::Array(el_ty)), fields))
+ }
+ ExprKind::Tuple { ref fields } => {
+ // see (*) above
+ // first process the set of fields
+ let fields: Vec<_> = fields
+ .into_iter()
+ .copied()
+ .map(|f| {
+ unpack!(
+ block = this.as_operand(
+ block,
+ scope,
+ &this.thir[f],
+ None,
+ NeedsTemporary::Maybe
+ )
+ )
+ })
+ .collect();
+
+ block.and(Rvalue::Aggregate(Box::new(AggregateKind::Tuple), fields))
+ }
+ ExprKind::Closure { closure_id, substs, ref upvars, movability, ref fake_reads } => {
+ // Convert the closure fake reads, if any, from `ExprRef` to mir `Place`
+ // and push the fake reads.
+ // This must come before creating the operands. This is required in case
+ // there is a fake read and a borrow of the same path, since otherwise the
+ // fake read might interfere with the borrow. Consider an example like this
+ // one:
+ // ```
+ // let mut x = 0;
+ // let c = || {
+ // &mut x; // mutable borrow of `x`
+ // match x { _ => () } // fake read of `x`
+ // };
+ // ```
+ //
+ for (thir_place, cause, hir_id) in fake_reads.into_iter() {
+ let place_builder =
+ unpack!(block = this.as_place_builder(block, &this.thir[*thir_place]));
+
+ if let Ok(place_builder_resolved) =
+ place_builder.try_upvars_resolved(this.tcx, this.typeck_results)
+ {
+ let mir_place =
+ place_builder_resolved.into_place(this.tcx, this.typeck_results);
+ this.cfg.push_fake_read(
+ block,
+ this.source_info(this.tcx.hir().span(*hir_id)),
+ *cause,
+ mir_place,
+ );
+ }
+ }
+
+ // see (*) above
+ let operands: Vec<_> = upvars
+ .into_iter()
+ .copied()
+ .map(|upvar| {
+ let upvar = &this.thir[upvar];
+ match Category::of(&upvar.kind) {
+ // Use as_place to avoid creating a temporary when
+ // moving a variable into a closure, so that
+ // borrowck knows which variables to mark as being
+ // used as mut. This is OK here because the upvar
+ // expressions have no side effects and act on
+ // disjoint places.
+ // This occurs when capturing by copy/move, while
+ // by reference captures use as_operand
+ Some(Category::Place) => {
+ let place = unpack!(block = this.as_place(block, upvar));
+ this.consume_by_copy_or_move(place)
+ }
+ _ => {
+ // Turn mutable borrow captures into unique
+ // borrow captures when capturing an immutable
+ // variable. This is sound because the mutation
+ // that caused the capture will cause an error.
+ match upvar.kind {
+ ExprKind::Borrow {
+ borrow_kind:
+ BorrowKind::Mut { allow_two_phase_borrow: false },
+ arg,
+ } => unpack!(
+ block = this.limit_capture_mutability(
+ upvar.span,
+ upvar.ty,
+ scope,
+ block,
+ &this.thir[arg],
+ )
+ ),
+ _ => {
+ unpack!(
+ block = this.as_operand(
+ block,
+ scope,
+ upvar,
+ None,
+ NeedsTemporary::Maybe
+ )
+ )
+ }
+ }
+ }
+ }
+ })
+ .collect();
+
+ let result = match substs {
+ UpvarSubsts::Generator(substs) => {
+ // We implicitly set the discriminant to 0. See
+ // librustc_mir/transform/deaggregator.rs for details.
+ let movability = movability.unwrap();
+ Box::new(AggregateKind::Generator(closure_id, substs, movability))
+ }
+ UpvarSubsts::Closure(substs) => {
+ Box::new(AggregateKind::Closure(closure_id, substs))
+ }
+ };
+ block.and(Rvalue::Aggregate(result, operands))
+ }
+ ExprKind::Assign { .. } | ExprKind::AssignOp { .. } => {
+ block = unpack!(this.stmt_expr(block, expr, None));
+ block.and(Rvalue::Use(Operand::Constant(Box::new(Constant {
+ span: expr_span,
+ user_ty: None,
+ literal: ConstantKind::zero_sized(this.tcx.types.unit),
+ }))))
+ }
+
+ ExprKind::Literal { .. }
+ | ExprKind::NamedConst { .. }
+ | ExprKind::NonHirLiteral { .. }
+ | ExprKind::ZstLiteral { .. }
+ | ExprKind::ConstParam { .. }
+ | ExprKind::ConstBlock { .. }
+ | ExprKind::StaticRef { .. } => {
+ let constant = this.as_constant(expr);
+ block.and(Rvalue::Use(Operand::Constant(Box::new(constant))))
+ }
+
+ ExprKind::Yield { .. }
+ | ExprKind::Block { .. }
+ | ExprKind::Match { .. }
+ | ExprKind::If { .. }
+ | ExprKind::NeverToAny { .. }
+ | ExprKind::Use { .. }
+ | ExprKind::Borrow { .. }
+ | ExprKind::AddressOf { .. }
+ | ExprKind::Adt { .. }
+ | ExprKind::Loop { .. }
+ | ExprKind::LogicalOp { .. }
+ | ExprKind::Call { .. }
+ | ExprKind::Field { .. }
+ | ExprKind::Let { .. }
+ | ExprKind::Deref { .. }
+ | ExprKind::Index { .. }
+ | ExprKind::VarRef { .. }
+ | ExprKind::UpvarRef { .. }
+ | ExprKind::Break { .. }
+ | ExprKind::Continue { .. }
+ | ExprKind::Return { .. }
+ | ExprKind::InlineAsm { .. }
+ | ExprKind::PlaceTypeAscription { .. }
+ | ExprKind::ValueTypeAscription { .. } => {
+ // these do not have corresponding `Rvalue` variants,
+ // so make an operand and then return that
+ debug_assert!(!matches!(
+ Category::of(&expr.kind),
+ Some(Category::Rvalue(RvalueFunc::AsRvalue) | Category::Constant)
+ ));
+ let operand =
+ unpack!(block = this.as_operand(block, scope, expr, None, NeedsTemporary::No));
+ block.and(Rvalue::Use(operand))
+ }
+ }
+ }
+
+ pub(crate) fn build_binary_op(
+ &mut self,
+ mut block: BasicBlock,
+ op: BinOp,
+ span: Span,
+ ty: Ty<'tcx>,
+ lhs: Operand<'tcx>,
+ rhs: Operand<'tcx>,
+ ) -> BlockAnd<Rvalue<'tcx>> {
+ let source_info = self.source_info(span);
+ let bool_ty = self.tcx.types.bool;
+ if self.check_overflow && op.is_checkable() && ty.is_integral() {
+ let result_tup = self.tcx.intern_tup(&[ty, bool_ty]);
+ let result_value = self.temp(result_tup, span);
+
+ self.cfg.push_assign(
+ block,
+ source_info,
+ result_value,
+ Rvalue::CheckedBinaryOp(op, Box::new((lhs.to_copy(), rhs.to_copy()))),
+ );
+ let val_fld = Field::new(0);
+ let of_fld = Field::new(1);
+
+ let tcx = self.tcx;
+ let val = tcx.mk_place_field(result_value, val_fld, ty);
+ let of = tcx.mk_place_field(result_value, of_fld, bool_ty);
+
+ let err = AssertKind::Overflow(op, lhs, rhs);
+
+ block = self.assert(block, Operand::Move(of), false, err, span);
+
+ block.and(Rvalue::Use(Operand::Move(val)))
+ } else {
+ if ty.is_integral() && (op == BinOp::Div || op == BinOp::Rem) {
+ // Checking division and remainder is more complex, since we 1. always check
+ // and 2. there are two possible failure cases, divide-by-zero and overflow.
+
+ let zero_err = if op == BinOp::Div {
+ AssertKind::DivisionByZero(lhs.to_copy())
+ } else {
+ AssertKind::RemainderByZero(lhs.to_copy())
+ };
+ let overflow_err = AssertKind::Overflow(op, lhs.to_copy(), rhs.to_copy());
+
+ // Check for / 0
+ let is_zero = self.temp(bool_ty, span);
+ let zero = self.zero_literal(span, ty);
+ self.cfg.push_assign(
+ block,
+ source_info,
+ is_zero,
+ Rvalue::BinaryOp(BinOp::Eq, Box::new((rhs.to_copy(), zero))),
+ );
+
+ block = self.assert(block, Operand::Move(is_zero), false, zero_err, span);
+
+ // We only need to check for the overflow in one case:
+ // MIN / -1, and only for signed values.
+ if ty.is_signed() {
+ let neg_1 = self.neg_1_literal(span, ty);
+ let min = self.minval_literal(span, ty);
+
+ let is_neg_1 = self.temp(bool_ty, span);
+ let is_min = self.temp(bool_ty, span);
+ let of = self.temp(bool_ty, span);
+
+ // this does (rhs == -1) & (lhs == MIN). It could short-circuit instead
+
+ self.cfg.push_assign(
+ block,
+ source_info,
+ is_neg_1,
+ Rvalue::BinaryOp(BinOp::Eq, Box::new((rhs.to_copy(), neg_1))),
+ );
+ self.cfg.push_assign(
+ block,
+ source_info,
+ is_min,
+ Rvalue::BinaryOp(BinOp::Eq, Box::new((lhs.to_copy(), min))),
+ );
+
+ let is_neg_1 = Operand::Move(is_neg_1);
+ let is_min = Operand::Move(is_min);
+ self.cfg.push_assign(
+ block,
+ source_info,
+ of,
+ Rvalue::BinaryOp(BinOp::BitAnd, Box::new((is_neg_1, is_min))),
+ );
+
+ block = self.assert(block, Operand::Move(of), false, overflow_err, span);
+ }
+ }
+
+ block.and(Rvalue::BinaryOp(op, Box::new((lhs, rhs))))
+ }
+ }
+
+ fn build_zero_repeat(
+ &mut self,
+ mut block: BasicBlock,
+ value: ExprId,
+ scope: Option<region::Scope>,
+ outer_source_info: SourceInfo,
+ ) -> BlockAnd<Rvalue<'tcx>> {
+ let this = self;
+ let value = &this.thir[value];
+ let elem_ty = value.ty;
+ if let Some(Category::Constant) = Category::of(&value.kind) {
+ // Repeating a const does nothing
+ } else {
+ // For a non-const, we may need to generate an appropriate `Drop`
+ let value_operand =
+ unpack!(block = this.as_operand(block, scope, value, None, NeedsTemporary::No));
+ if let Operand::Move(to_drop) = value_operand {
+ let success = this.cfg.start_new_block();
+ this.cfg.terminate(
+ block,
+ outer_source_info,
+ TerminatorKind::Drop { place: to_drop, target: success, unwind: None },
+ );
+ this.diverge_from(block);
+ block = success;
+ }
+ this.record_operands_moved(&[value_operand]);
+ }
+ block.and(Rvalue::Aggregate(Box::new(AggregateKind::Array(elem_ty)), Vec::new()))
+ }
+
+ fn limit_capture_mutability(
+ &mut self,
+ upvar_span: Span,
+ upvar_ty: Ty<'tcx>,
+ temp_lifetime: Option<region::Scope>,
+ mut block: BasicBlock,
+ arg: &Expr<'tcx>,
+ ) -> BlockAnd<Operand<'tcx>> {
+ let this = self;
+
+ let source_info = this.source_info(upvar_span);
+ let temp = this.local_decls.push(LocalDecl::new(upvar_ty, upvar_span));
+
+ this.cfg.push(block, Statement { source_info, kind: StatementKind::StorageLive(temp) });
+
+ let arg_place_builder = unpack!(block = this.as_place_builder(block, arg));
+
+ let mutability = match arg_place_builder.base() {
+ // We are capturing a path that starts off a local variable in the parent.
+ // The mutability of the current capture is same as the mutability
+ // of the local declaration in the parent.
+ PlaceBase::Local(local) => this.local_decls[local].mutability,
+ // Parent is a closure and we are capturing a path that is captured
+ // by the parent itself. The mutability of the current capture
+ // is same as that of the capture in the parent closure.
+ PlaceBase::Upvar { .. } => {
+ let enclosing_upvars_resolved =
+ arg_place_builder.clone().into_place(this.tcx, this.typeck_results);
+
+ match enclosing_upvars_resolved.as_ref() {
+ PlaceRef {
+ local,
+ projection: &[ProjectionElem::Field(upvar_index, _), ..],
+ }
+ | PlaceRef {
+ local,
+ projection:
+ &[ProjectionElem::Deref, ProjectionElem::Field(upvar_index, _), ..],
+ } => {
+ // Not in a closure
+ debug_assert!(
+ local == ty::CAPTURE_STRUCT_LOCAL,
+ "Expected local to be Local(1), found {:?}",
+ local
+ );
+ // Not in a closure
+ debug_assert!(
+ this.upvar_mutbls.len() > upvar_index.index(),
+ "Unexpected capture place, upvar_mutbls={:#?}, upvar_index={:?}",
+ this.upvar_mutbls,
+ upvar_index
+ );
+ this.upvar_mutbls[upvar_index.index()]
+ }
+ _ => bug!("Unexpected capture place"),
+ }
+ }
+ };
+
+ let borrow_kind = match mutability {
+ Mutability::Not => BorrowKind::Unique,
+ Mutability::Mut => BorrowKind::Mut { allow_two_phase_borrow: false },
+ };
+
+ let arg_place = arg_place_builder.into_place(this.tcx, this.typeck_results);
+
+ this.cfg.push_assign(
+ block,
+ source_info,
+ Place::from(temp),
+ Rvalue::Ref(this.tcx.lifetimes.re_erased, borrow_kind, arg_place),
+ );
+
+ // See the comment in `expr_as_temp` and on the `rvalue_scopes` field for why
+ // this can be `None`.
+ if let Some(temp_lifetime) = temp_lifetime {
+ this.schedule_drop_storage_and_value(upvar_span, temp_lifetime, temp);
+ }
+
+ block.and(Operand::Move(Place::from(temp)))
+ }
+
+ // Helper to get a `-1` value of the appropriate type
+ fn neg_1_literal(&mut self, span: Span, ty: Ty<'tcx>) -> Operand<'tcx> {
+ let param_ty = ty::ParamEnv::empty().and(ty);
+ let size = self.tcx.layout_of(param_ty).unwrap().size;
+ let literal = ConstantKind::from_bits(self.tcx, size.unsigned_int_max(), param_ty);
+
+ self.literal_operand(span, literal)
+ }
+
+ // Helper to get the minimum value of the appropriate type
+ fn minval_literal(&mut self, span: Span, ty: Ty<'tcx>) -> Operand<'tcx> {
+ assert!(ty.is_signed());
+ let param_ty = ty::ParamEnv::empty().and(ty);
+ let bits = self.tcx.layout_of(param_ty).unwrap().size.bits();
+ let n = 1 << (bits - 1);
+ let literal = ConstantKind::from_bits(self.tcx, n, param_ty);
+
+ self.literal_operand(span, literal)
+ }
+}
diff --git a/compiler/rustc_mir_build/src/build/expr/as_temp.rs b/compiler/rustc_mir_build/src/build/expr/as_temp.rs
new file mode 100644
index 000000000..724b72f87
--- /dev/null
+++ b/compiler/rustc_mir_build/src/build/expr/as_temp.rs
@@ -0,0 +1,119 @@
+//! See docs in build/expr/mod.rs
+
+use crate::build::scope::DropKind;
+use crate::build::{BlockAnd, BlockAndExtension, Builder};
+use rustc_data_structures::stack::ensure_sufficient_stack;
+use rustc_middle::middle::region;
+use rustc_middle::mir::*;
+use rustc_middle::thir::*;
+
+impl<'a, 'tcx> Builder<'a, 'tcx> {
+ /// Compile `expr` into a fresh temporary. This is used when building
+ /// up rvalues so as to freeze the value that will be consumed.
+ pub(crate) fn as_temp(
+ &mut self,
+ block: BasicBlock,
+ temp_lifetime: Option<region::Scope>,
+ expr: &Expr<'tcx>,
+ mutability: Mutability,
+ ) -> BlockAnd<Local> {
+ // this is the only place in mir building that we need to truly need to worry about
+ // infinite recursion. Everything else does recurse, too, but it always gets broken up
+ // at some point by inserting an intermediate temporary
+ ensure_sufficient_stack(|| self.as_temp_inner(block, temp_lifetime, expr, mutability))
+ }
+
+ fn as_temp_inner(
+ &mut self,
+ mut block: BasicBlock,
+ temp_lifetime: Option<region::Scope>,
+ expr: &Expr<'tcx>,
+ mutability: Mutability,
+ ) -> BlockAnd<Local> {
+ debug!(
+ "as_temp(block={:?}, temp_lifetime={:?}, expr={:?}, mutability={:?})",
+ block, temp_lifetime, expr, mutability
+ );
+ let this = self;
+
+ let expr_span = expr.span;
+ let source_info = this.source_info(expr_span);
+ if let ExprKind::Scope { region_scope, lint_level, value } = expr.kind {
+ return this.in_scope((region_scope, source_info), lint_level, |this| {
+ this.as_temp(block, temp_lifetime, &this.thir[value], mutability)
+ });
+ }
+
+ let expr_ty = expr.ty;
+ let temp = {
+ let mut local_decl = LocalDecl::new(expr_ty, expr_span);
+ if mutability == Mutability::Not {
+ local_decl = local_decl.immutable();
+ }
+
+ debug!("creating temp {:?} with block_context: {:?}", local_decl, this.block_context);
+ // Find out whether this temp is being created within the
+ // tail expression of a block whose result is ignored.
+ if let Some(tail_info) = this.block_context.currently_in_block_tail() {
+ local_decl = local_decl.block_tail(tail_info);
+ }
+ match expr.kind {
+ ExprKind::StaticRef { def_id, .. } => {
+ assert!(!this.tcx.is_thread_local_static(def_id));
+ local_decl.internal = true;
+ local_decl.local_info =
+ Some(Box::new(LocalInfo::StaticRef { def_id, is_thread_local: false }));
+ }
+ ExprKind::ThreadLocalRef(def_id) => {
+ assert!(this.tcx.is_thread_local_static(def_id));
+ local_decl.internal = true;
+ local_decl.local_info =
+ Some(Box::new(LocalInfo::StaticRef { def_id, is_thread_local: true }));
+ }
+ ExprKind::NamedConst { def_id, .. } | ExprKind::ConstParam { def_id, .. } => {
+ local_decl.local_info = Some(Box::new(LocalInfo::ConstRef { def_id }));
+ }
+ _ => {}
+ }
+ this.local_decls.push(local_decl)
+ };
+ let temp_place = Place::from(temp);
+
+ match expr.kind {
+ // Don't bother with StorageLive and Dead for these temporaries,
+ // they are never assigned.
+ ExprKind::Break { .. } | ExprKind::Continue { .. } | ExprKind::Return { .. } => (),
+ ExprKind::Block { body: Block { expr: None, targeted_by_break: false, .. } }
+ if expr_ty.is_never() => {}
+ _ => {
+ this.cfg
+ .push(block, Statement { source_info, kind: StatementKind::StorageLive(temp) });
+
+ // In constants, `temp_lifetime` is `None` for temporaries that
+ // live for the `'static` lifetime. Thus we do not drop these
+ // temporaries and simply leak them.
+ // This is equivalent to what `let x = &foo();` does in
+ // functions. The temporary is lifted to their surrounding
+ // scope. In a function that means the temporary lives until
+ // just before the function returns. In constants that means it
+ // outlives the constant's initialization value computation.
+ // Anything outliving a constant must have the `'static`
+ // lifetime and live forever.
+ // Anything with a shorter lifetime (e.g the `&foo()` in
+ // `bar(&foo())` or anything within a block will keep the
+ // regular drops just like runtime code.
+ if let Some(temp_lifetime) = temp_lifetime {
+ this.schedule_drop(expr_span, temp_lifetime, temp, DropKind::Storage);
+ }
+ }
+ }
+
+ unpack!(block = this.expr_into_dest(temp_place, block, expr));
+
+ if let Some(temp_lifetime) = temp_lifetime {
+ this.schedule_drop(expr_span, temp_lifetime, temp, DropKind::Value);
+ }
+
+ block.and(temp)
+ }
+}
diff --git a/compiler/rustc_mir_build/src/build/expr/category.rs b/compiler/rustc_mir_build/src/build/expr/category.rs
new file mode 100644
index 000000000..a4386319d
--- /dev/null
+++ b/compiler/rustc_mir_build/src/build/expr/category.rs
@@ -0,0 +1,92 @@
+use rustc_middle::thir::*;
+
+#[derive(Debug, PartialEq)]
+pub(crate) enum Category {
+ // An assignable memory location like `x`, `x.f`, `foo()[3]`, that
+ // sort of thing. Something that could appear on the LHS of an `=`
+ // sign.
+ Place,
+
+ // A literal like `23` or `"foo"`. Does not include constant
+ // expressions like `3 + 5`.
+ Constant,
+
+ // Something that generates a new value at runtime, like `x + y`
+ // or `foo()`.
+ Rvalue(RvalueFunc),
+}
+
+// Rvalues fall into different "styles" that will determine which fn
+// is best suited to generate them.
+#[derive(Debug, PartialEq)]
+pub(crate) enum RvalueFunc {
+ // Best generated by `into`. This is generally exprs that
+ // cause branching, like `match`, but also includes calls.
+ Into,
+
+ // Best generated by `as_rvalue`. This is usually the case.
+ AsRvalue,
+}
+
+/// Determines the category for a given expression. Note that scope
+/// and paren expressions have no category.
+impl Category {
+ pub(crate) fn of(ek: &ExprKind<'_>) -> Option<Category> {
+ match *ek {
+ ExprKind::Scope { .. } => None,
+
+ ExprKind::Field { .. }
+ | ExprKind::Deref { .. }
+ | ExprKind::Index { .. }
+ | ExprKind::UpvarRef { .. }
+ | ExprKind::VarRef { .. }
+ | ExprKind::PlaceTypeAscription { .. }
+ | ExprKind::ValueTypeAscription { .. } => Some(Category::Place),
+
+ ExprKind::LogicalOp { .. }
+ | ExprKind::Match { .. }
+ | ExprKind::If { .. }
+ | ExprKind::Let { .. }
+ | ExprKind::NeverToAny { .. }
+ | ExprKind::Use { .. }
+ | ExprKind::Adt { .. }
+ | ExprKind::Borrow { .. }
+ | ExprKind::AddressOf { .. }
+ | ExprKind::Yield { .. }
+ | ExprKind::Call { .. }
+ | ExprKind::InlineAsm { .. } => Some(Category::Rvalue(RvalueFunc::Into)),
+
+ ExprKind::Array { .. }
+ | ExprKind::Tuple { .. }
+ | ExprKind::Closure { .. }
+ | ExprKind::Unary { .. }
+ | ExprKind::Binary { .. }
+ | ExprKind::Box { .. }
+ | ExprKind::Cast { .. }
+ | ExprKind::Pointer { .. }
+ | ExprKind::Repeat { .. }
+ | ExprKind::Assign { .. }
+ | ExprKind::AssignOp { .. }
+ | ExprKind::ThreadLocalRef(_) => Some(Category::Rvalue(RvalueFunc::AsRvalue)),
+
+ ExprKind::ConstBlock { .. }
+ | ExprKind::Literal { .. }
+ | ExprKind::NonHirLiteral { .. }
+ | ExprKind::ZstLiteral { .. }
+ | ExprKind::ConstParam { .. }
+ | ExprKind::StaticRef { .. }
+ | ExprKind::NamedConst { .. } => Some(Category::Constant),
+
+ ExprKind::Loop { .. }
+ | ExprKind::Block { .. }
+ | ExprKind::Break { .. }
+ | ExprKind::Continue { .. }
+ | ExprKind::Return { .. } =>
+ // FIXME(#27840) these probably want their own
+ // category, like "nonterminating"
+ {
+ Some(Category::Rvalue(RvalueFunc::Into))
+ }
+ }
+ }
+}
diff --git a/compiler/rustc_mir_build/src/build/expr/into.rs b/compiler/rustc_mir_build/src/build/expr/into.rs
new file mode 100644
index 000000000..017d43d10
--- /dev/null
+++ b/compiler/rustc_mir_build/src/build/expr/into.rs
@@ -0,0 +1,599 @@
+//! See docs in build/expr/mod.rs
+
+use crate::build::expr::category::{Category, RvalueFunc};
+use crate::build::{BlockAnd, BlockAndExtension, BlockFrame, Builder, NeedsTemporary};
+use rustc_ast::InlineAsmOptions;
+use rustc_data_structures::fx::FxHashMap;
+use rustc_data_structures::stack::ensure_sufficient_stack;
+use rustc_hir as hir;
+use rustc_index::vec::Idx;
+use rustc_middle::mir::*;
+use rustc_middle::thir::*;
+use rustc_middle::ty::CanonicalUserTypeAnnotation;
+use std::iter;
+
+impl<'a, 'tcx> Builder<'a, 'tcx> {
+ /// Compile `expr`, storing the result into `destination`, which
+ /// is assumed to be uninitialized.
+ pub(crate) fn expr_into_dest(
+ &mut self,
+ destination: Place<'tcx>,
+ mut block: BasicBlock,
+ expr: &Expr<'tcx>,
+ ) -> BlockAnd<()> {
+ debug!("expr_into_dest(destination={:?}, block={:?}, expr={:?})", destination, block, expr);
+
+ // since we frequently have to reference `self` from within a
+ // closure, where `self` would be shadowed, it's easier to
+ // just use the name `this` uniformly
+ let this = self;
+ let expr_span = expr.span;
+ let source_info = this.source_info(expr_span);
+
+ let expr_is_block_or_scope =
+ matches!(expr.kind, ExprKind::Block { .. } | ExprKind::Scope { .. });
+
+ if !expr_is_block_or_scope {
+ this.block_context.push(BlockFrame::SubExpr);
+ }
+
+ let block_and = match expr.kind {
+ ExprKind::Scope { region_scope, lint_level, value } => {
+ let region_scope = (region_scope, source_info);
+ ensure_sufficient_stack(|| {
+ this.in_scope(region_scope, lint_level, |this| {
+ this.expr_into_dest(destination, block, &this.thir[value])
+ })
+ })
+ }
+ ExprKind::Block { body: ref ast_block } => {
+ this.ast_block(destination, block, ast_block, source_info)
+ }
+ ExprKind::Match { scrutinee, ref arms } => {
+ this.match_expr(destination, expr_span, block, &this.thir[scrutinee], arms)
+ }
+ ExprKind::If { cond, then, else_opt, if_then_scope } => {
+ let then_blk;
+ let then_expr = &this.thir[then];
+ let then_source_info = this.source_info(then_expr.span);
+ let condition_scope = this.local_scope();
+
+ let mut else_blk = unpack!(
+ then_blk = this.in_scope(
+ (if_then_scope, then_source_info),
+ LintLevel::Inherited,
+ |this| {
+ let source_info = if this.is_let(cond) {
+ let variable_scope = this.new_source_scope(
+ then_expr.span,
+ LintLevel::Inherited,
+ None,
+ );
+ this.source_scope = variable_scope;
+ SourceInfo { span: then_expr.span, scope: variable_scope }
+ } else {
+ this.source_info(then_expr.span)
+ };
+ let (then_block, else_block) =
+ this.in_if_then_scope(condition_scope, |this| {
+ let then_blk = unpack!(this.then_else_break(
+ block,
+ &this.thir[cond],
+ Some(condition_scope),
+ condition_scope,
+ source_info
+ ));
+
+ this.expr_into_dest(destination, then_blk, then_expr)
+ });
+ then_block.and(else_block)
+ },
+ )
+ );
+
+ else_blk = if let Some(else_opt) = else_opt {
+ unpack!(this.expr_into_dest(destination, else_blk, &this.thir[else_opt]))
+ } else {
+ // Body of the `if` expression without an `else` clause must return `()`, thus
+ // we implicitly generate an `else {}` if it is not specified.
+ let correct_si = this.source_info(expr_span.shrink_to_hi());
+ this.cfg.push_assign_unit(else_blk, correct_si, destination, this.tcx);
+ else_blk
+ };
+
+ let join_block = this.cfg.start_new_block();
+ this.cfg.goto(then_blk, source_info, join_block);
+ this.cfg.goto(else_blk, source_info, join_block);
+ join_block.unit()
+ }
+ ExprKind::Let { expr, ref pat } => {
+ let scope = this.local_scope();
+ let (true_block, false_block) = this.in_if_then_scope(scope, |this| {
+ this.lower_let_expr(block, &this.thir[expr], pat, scope, None, expr_span)
+ });
+
+ this.cfg.push_assign_constant(
+ true_block,
+ source_info,
+ destination,
+ Constant {
+ span: expr_span,
+ user_ty: None,
+ literal: ConstantKind::from_bool(this.tcx, true),
+ },
+ );
+
+ this.cfg.push_assign_constant(
+ false_block,
+ source_info,
+ destination,
+ Constant {
+ span: expr_span,
+ user_ty: None,
+ literal: ConstantKind::from_bool(this.tcx, false),
+ },
+ );
+
+ let join_block = this.cfg.start_new_block();
+ this.cfg.goto(true_block, source_info, join_block);
+ this.cfg.goto(false_block, source_info, join_block);
+ join_block.unit()
+ }
+ ExprKind::NeverToAny { source } => {
+ let source = &this.thir[source];
+ let is_call =
+ matches!(source.kind, ExprKind::Call { .. } | ExprKind::InlineAsm { .. });
+
+ // (#66975) Source could be a const of type `!`, so has to
+ // exist in the generated MIR.
+ unpack!(
+ block = this.as_temp(block, Some(this.local_scope()), source, Mutability::Mut,)
+ );
+
+ // This is an optimization. If the expression was a call then we already have an
+ // unreachable block. Don't bother to terminate it and create a new one.
+ if is_call {
+ block.unit()
+ } else {
+ this.cfg.terminate(block, source_info, TerminatorKind::Unreachable);
+ let end_block = this.cfg.start_new_block();
+ end_block.unit()
+ }
+ }
+ ExprKind::LogicalOp { op, lhs, rhs } => {
+ // And:
+ //
+ // [block: If(lhs)] -true-> [else_block: dest = (rhs)]
+ // | (false)
+ // [shortcurcuit_block: dest = false]
+ //
+ // Or:
+ //
+ // [block: If(lhs)] -false-> [else_block: dest = (rhs)]
+ // | (true)
+ // [shortcurcuit_block: dest = true]
+
+ let (shortcircuit_block, mut else_block, join_block) = (
+ this.cfg.start_new_block(),
+ this.cfg.start_new_block(),
+ this.cfg.start_new_block(),
+ );
+
+ let lhs = unpack!(block = this.as_local_operand(block, &this.thir[lhs]));
+ let blocks = match op {
+ LogicalOp::And => (else_block, shortcircuit_block),
+ LogicalOp::Or => (shortcircuit_block, else_block),
+ };
+ let term = TerminatorKind::if_(this.tcx, lhs, blocks.0, blocks.1);
+ this.cfg.terminate(block, source_info, term);
+
+ this.cfg.push_assign_constant(
+ shortcircuit_block,
+ source_info,
+ destination,
+ Constant {
+ span: expr_span,
+ user_ty: None,
+ literal: match op {
+ LogicalOp::And => ConstantKind::from_bool(this.tcx, false),
+ LogicalOp::Or => ConstantKind::from_bool(this.tcx, true),
+ },
+ },
+ );
+ this.cfg.goto(shortcircuit_block, source_info, join_block);
+
+ let rhs = unpack!(else_block = this.as_local_operand(else_block, &this.thir[rhs]));
+ this.cfg.push_assign(else_block, source_info, destination, Rvalue::Use(rhs));
+ this.cfg.goto(else_block, source_info, join_block);
+
+ join_block.unit()
+ }
+ ExprKind::Loop { body } => {
+ // [block]
+ // |
+ // [loop_block] -> [body_block] -/eval. body/-> [body_block_end]
+ // | ^ |
+ // false link | |
+ // | +-----------------------------------------+
+ // +-> [diverge_cleanup]
+ // The false link is required to make sure borrowck considers unwinds through the
+ // body, even when the exact code in the body cannot unwind
+
+ let loop_block = this.cfg.start_new_block();
+
+ // Start the loop.
+ this.cfg.goto(block, source_info, loop_block);
+
+ this.in_breakable_scope(Some(loop_block), destination, expr_span, move |this| {
+ // conduct the test, if necessary
+ let body_block = this.cfg.start_new_block();
+ this.cfg.terminate(
+ loop_block,
+ source_info,
+ TerminatorKind::FalseUnwind { real_target: body_block, unwind: None },
+ );
+ this.diverge_from(loop_block);
+
+ // The “return” value of the loop body must always be a unit. We therefore
+ // introduce a unit temporary as the destination for the loop body.
+ let tmp = this.get_unit_temp();
+ // Execute the body, branching back to the test.
+ let body_block_end =
+ unpack!(this.expr_into_dest(tmp, body_block, &this.thir[body]));
+ this.cfg.goto(body_block_end, source_info, loop_block);
+
+ // Loops are only exited by `break` expressions.
+ None
+ })
+ }
+ ExprKind::Call { ty: _, fun, ref args, from_hir_call, fn_span } => {
+ let fun = unpack!(block = this.as_local_operand(block, &this.thir[fun]));
+ let args: Vec<_> = args
+ .into_iter()
+ .copied()
+ .map(|arg| unpack!(block = this.as_local_call_operand(block, &this.thir[arg])))
+ .collect();
+
+ let success = this.cfg.start_new_block();
+
+ this.record_operands_moved(&args);
+
+ debug!("expr_into_dest: fn_span={:?}", fn_span);
+
+ this.cfg.terminate(
+ block,
+ source_info,
+ TerminatorKind::Call {
+ func: fun,
+ args,
+ cleanup: None,
+ destination,
+ // The presence or absence of a return edge affects control-flow sensitive
+ // MIR checks and ultimately whether code is accepted or not. We can only
+ // omit the return edge if a return type is visibly uninhabited to a module
+ // that makes the call.
+ target: if this.tcx.is_ty_uninhabited_from(
+ this.parent_module,
+ expr.ty,
+ this.param_env,
+ ) {
+ None
+ } else {
+ Some(success)
+ },
+ from_hir_call,
+ fn_span,
+ },
+ );
+ this.diverge_from(block);
+ success.unit()
+ }
+ ExprKind::Use { source } => this.expr_into_dest(destination, block, &this.thir[source]),
+ ExprKind::Borrow { arg, borrow_kind } => {
+ let arg = &this.thir[arg];
+ // We don't do this in `as_rvalue` because we use `as_place`
+ // for borrow expressions, so we cannot create an `RValue` that
+ // remains valid across user code. `as_rvalue` is usually called
+ // by this method anyway, so this shouldn't cause too many
+ // unnecessary temporaries.
+ let arg_place = match borrow_kind {
+ BorrowKind::Shared => unpack!(block = this.as_read_only_place(block, arg)),
+ _ => unpack!(block = this.as_place(block, arg)),
+ };
+ let borrow = Rvalue::Ref(this.tcx.lifetimes.re_erased, borrow_kind, arg_place);
+ this.cfg.push_assign(block, source_info, destination, borrow);
+ block.unit()
+ }
+ ExprKind::AddressOf { mutability, arg } => {
+ let arg = &this.thir[arg];
+ let place = match mutability {
+ hir::Mutability::Not => this.as_read_only_place(block, arg),
+ hir::Mutability::Mut => this.as_place(block, arg),
+ };
+ let address_of = Rvalue::AddressOf(mutability, unpack!(block = place));
+ this.cfg.push_assign(block, source_info, destination, address_of);
+ block.unit()
+ }
+ ExprKind::Adt(box Adt {
+ adt_def,
+ variant_index,
+ substs,
+ user_ty,
+ ref fields,
+ ref base,
+ }) => {
+ // See the notes for `ExprKind::Array` in `as_rvalue` and for
+ // `ExprKind::Borrow` above.
+ let is_union = adt_def.is_union();
+ let active_field_index = if is_union { Some(fields[0].name.index()) } else { None };
+
+ let scope = this.local_scope();
+
+ // first process the set of fields that were provided
+ // (evaluating them in order given by user)
+ let fields_map: FxHashMap<_, _> = fields
+ .into_iter()
+ .map(|f| {
+ let local_info = Box::new(LocalInfo::AggregateTemp);
+ (
+ f.name,
+ unpack!(
+ block = this.as_operand(
+ block,
+ Some(scope),
+ &this.thir[f.expr],
+ Some(local_info),
+ NeedsTemporary::Maybe,
+ )
+ ),
+ )
+ })
+ .collect();
+
+ let field_names: Vec<_> =
+ (0..adt_def.variant(variant_index).fields.len()).map(Field::new).collect();
+
+ let fields: Vec<_> = if let Some(FruInfo { base, field_types }) = base {
+ let place_builder =
+ unpack!(block = this.as_place_builder(block, &this.thir[*base]));
+
+ // MIR does not natively support FRU, so for each
+ // base-supplied field, generate an operand that
+ // reads it from the base.
+ iter::zip(field_names, &**field_types)
+ .map(|(n, ty)| match fields_map.get(&n) {
+ Some(v) => v.clone(),
+ None => {
+ let place_builder = place_builder.clone();
+ this.consume_by_copy_or_move(
+ place_builder
+ .field(n, *ty)
+ .into_place(this.tcx, this.typeck_results),
+ )
+ }
+ })
+ .collect()
+ } else {
+ field_names.iter().filter_map(|n| fields_map.get(n).cloned()).collect()
+ };
+
+ let inferred_ty = expr.ty;
+ let user_ty = user_ty.map(|ty| {
+ this.canonical_user_type_annotations.push(CanonicalUserTypeAnnotation {
+ span: source_info.span,
+ user_ty: ty,
+ inferred_ty,
+ })
+ });
+ let adt = Box::new(AggregateKind::Adt(
+ adt_def.did(),
+ variant_index,
+ substs,
+ user_ty,
+ active_field_index,
+ ));
+ this.cfg.push_assign(
+ block,
+ source_info,
+ destination,
+ Rvalue::Aggregate(adt, fields),
+ );
+ block.unit()
+ }
+ ExprKind::InlineAsm { template, ref operands, options, line_spans } => {
+ use rustc_middle::{mir, thir};
+ let operands = operands
+ .into_iter()
+ .map(|op| match *op {
+ thir::InlineAsmOperand::In { reg, expr } => mir::InlineAsmOperand::In {
+ reg,
+ value: unpack!(block = this.as_local_operand(block, &this.thir[expr])),
+ },
+ thir::InlineAsmOperand::Out { reg, late, expr } => {
+ mir::InlineAsmOperand::Out {
+ reg,
+ late,
+ place: expr.map(|expr| {
+ unpack!(block = this.as_place(block, &this.thir[expr]))
+ }),
+ }
+ }
+ thir::InlineAsmOperand::InOut { reg, late, expr } => {
+ let place = unpack!(block = this.as_place(block, &this.thir[expr]));
+ mir::InlineAsmOperand::InOut {
+ reg,
+ late,
+ // This works because asm operands must be Copy
+ in_value: Operand::Copy(place),
+ out_place: Some(place),
+ }
+ }
+ thir::InlineAsmOperand::SplitInOut { reg, late, in_expr, out_expr } => {
+ mir::InlineAsmOperand::InOut {
+ reg,
+ late,
+ in_value: unpack!(
+ block = this.as_local_operand(block, &this.thir[in_expr])
+ ),
+ out_place: out_expr.map(|out_expr| {
+ unpack!(block = this.as_place(block, &this.thir[out_expr]))
+ }),
+ }
+ }
+ thir::InlineAsmOperand::Const { value, span } => {
+ mir::InlineAsmOperand::Const {
+ value: Box::new(Constant { span, user_ty: None, literal: value }),
+ }
+ }
+ thir::InlineAsmOperand::SymFn { value, span } => {
+ mir::InlineAsmOperand::SymFn {
+ value: Box::new(Constant { span, user_ty: None, literal: value }),
+ }
+ }
+ thir::InlineAsmOperand::SymStatic { def_id } => {
+ mir::InlineAsmOperand::SymStatic { def_id }
+ }
+ })
+ .collect();
+
+ if !options.contains(InlineAsmOptions::NORETURN) {
+ this.cfg.push_assign_unit(block, source_info, destination, this.tcx);
+ }
+
+ let destination_block = this.cfg.start_new_block();
+ this.cfg.terminate(
+ block,
+ source_info,
+ TerminatorKind::InlineAsm {
+ template,
+ operands,
+ options,
+ line_spans,
+ destination: if options.contains(InlineAsmOptions::NORETURN) {
+ None
+ } else {
+ Some(destination_block)
+ },
+ cleanup: None,
+ },
+ );
+ if options.contains(InlineAsmOptions::MAY_UNWIND) {
+ this.diverge_from(block);
+ }
+ destination_block.unit()
+ }
+
+ // These cases don't actually need a destination
+ ExprKind::Assign { .. } | ExprKind::AssignOp { .. } => {
+ unpack!(block = this.stmt_expr(block, expr, None));
+ this.cfg.push_assign_unit(block, source_info, destination, this.tcx);
+ block.unit()
+ }
+
+ ExprKind::Continue { .. } | ExprKind::Break { .. } | ExprKind::Return { .. } => {
+ unpack!(block = this.stmt_expr(block, expr, None));
+ // No assign, as these have type `!`.
+ block.unit()
+ }
+
+ // Avoid creating a temporary
+ ExprKind::VarRef { .. }
+ | ExprKind::UpvarRef { .. }
+ | ExprKind::PlaceTypeAscription { .. }
+ | ExprKind::ValueTypeAscription { .. } => {
+ debug_assert!(Category::of(&expr.kind) == Some(Category::Place));
+
+ let place = unpack!(block = this.as_place(block, expr));
+ let rvalue = Rvalue::Use(this.consume_by_copy_or_move(place));
+ this.cfg.push_assign(block, source_info, destination, rvalue);
+ block.unit()
+ }
+ ExprKind::Index { .. } | ExprKind::Deref { .. } | ExprKind::Field { .. } => {
+ debug_assert_eq!(Category::of(&expr.kind), Some(Category::Place));
+
+ // Create a "fake" temporary variable so that we check that the
+ // value is Sized. Usually, this is caught in type checking, but
+ // in the case of box expr there is no such check.
+ if !destination.projection.is_empty() {
+ this.local_decls.push(LocalDecl::new(expr.ty, expr.span));
+ }
+
+ let place = unpack!(block = this.as_place(block, expr));
+ let rvalue = Rvalue::Use(this.consume_by_copy_or_move(place));
+ this.cfg.push_assign(block, source_info, destination, rvalue);
+ block.unit()
+ }
+
+ ExprKind::Yield { value } => {
+ let scope = this.local_scope();
+ let value = unpack!(
+ block = this.as_operand(
+ block,
+ Some(scope),
+ &this.thir[value],
+ None,
+ NeedsTemporary::No
+ )
+ );
+ let resume = this.cfg.start_new_block();
+ this.cfg.terminate(
+ block,
+ source_info,
+ TerminatorKind::Yield { value, resume, resume_arg: destination, drop: None },
+ );
+ this.generator_drop_cleanup(block);
+ resume.unit()
+ }
+
+ // these are the cases that are more naturally handled by some other mode
+ ExprKind::Unary { .. }
+ | ExprKind::Binary { .. }
+ | ExprKind::Box { .. }
+ | ExprKind::Cast { .. }
+ | ExprKind::Pointer { .. }
+ | ExprKind::Repeat { .. }
+ | ExprKind::Array { .. }
+ | ExprKind::Tuple { .. }
+ | ExprKind::Closure { .. }
+ | ExprKind::ConstBlock { .. }
+ | ExprKind::Literal { .. }
+ | ExprKind::NamedConst { .. }
+ | ExprKind::NonHirLiteral { .. }
+ | ExprKind::ZstLiteral { .. }
+ | ExprKind::ConstParam { .. }
+ | ExprKind::ThreadLocalRef(_)
+ | ExprKind::StaticRef { .. } => {
+ debug_assert!(match Category::of(&expr.kind).unwrap() {
+ // should be handled above
+ Category::Rvalue(RvalueFunc::Into) => false,
+
+ // must be handled above or else we get an
+ // infinite loop in the builder; see
+ // e.g., `ExprKind::VarRef` above
+ Category::Place => false,
+
+ _ => true,
+ });
+
+ let rvalue = unpack!(block = this.as_local_rvalue(block, expr));
+ this.cfg.push_assign(block, source_info, destination, rvalue);
+ block.unit()
+ }
+ };
+
+ if !expr_is_block_or_scope {
+ let popped = this.block_context.pop();
+ assert!(popped.is_some());
+ }
+
+ block_and
+ }
+
+ fn is_let(&self, expr: ExprId) -> bool {
+ match self.thir[expr].kind {
+ ExprKind::Let { .. } => true,
+ ExprKind::Scope { value, .. } => self.is_let(value),
+ _ => false,
+ }
+ }
+}
diff --git a/compiler/rustc_mir_build/src/build/expr/mod.rs b/compiler/rustc_mir_build/src/build/expr/mod.rs
new file mode 100644
index 000000000..f5ae060d6
--- /dev/null
+++ b/compiler/rustc_mir_build/src/build/expr/mod.rs
@@ -0,0 +1,70 @@
+//! Builds MIR from expressions. As a caller into this module, you
+//! have many options, but the first thing you have to decide is
+//! whether you are evaluating this expression for its *value*, its
+//! *location*, or as a *constant*.
+//!
+//! Typically, you want the value: e.g., if you are doing `expr_a +
+//! expr_b`, you want the values of those expressions. In that case,
+//! you want one of the following functions. Note that if the expr has
+//! a type that is not `Copy`, then using any of these functions will
+//! "move" the value out of its current home (if any).
+//!
+//! - `expr_into_dest` -- writes the value into a specific location, which
+//! should be uninitialized
+//! - `as_operand` -- evaluates the value and yields an `Operand`,
+//! suitable for use as an argument to an `Rvalue`
+//! - `as_temp` -- evaluates into a temporary; this is similar to `as_operand`
+//! except it always returns a fresh place, even for constants
+//! - `as_rvalue` -- yields an `Rvalue`, suitable for use in an assignment;
+//! as of this writing, never needed outside of the `expr` module itself
+//!
+//! Sometimes though want the expression's *location*. An example
+//! would be during a match statement, or the operand of the `&`
+//! operator. In that case, you want `as_place`. This will create a
+//! temporary if necessary.
+//!
+//! Finally, if it's a constant you seek, then call
+//! `as_constant`. This creates a `Constant<H>`, but naturally it can
+//! only be used on constant expressions and hence is needed only in
+//! very limited contexts.
+//!
+//! ### Implementation notes
+//!
+//! For any given kind of expression, there is generally one way that
+//! can be lowered most naturally. This is specified by the
+//! `Category::of` function in the `category` module. For example, a
+//! struct expression (or other expression that creates a new value)
+//! is typically easiest to write in terms of `as_rvalue` or `into`,
+//! whereas a reference to a field is easiest to write in terms of
+//! `as_place`. (The exception to this is scope and paren
+//! expressions, which have no category.)
+//!
+//! Therefore, the various functions above make use of one another in
+//! a descending fashion. For any given expression, you should pick
+//! the most suitable spot to implement it, and then just let the
+//! other fns cycle around. The handoff works like this:
+//!
+//! - `into(place)` -> fallback is to create a rvalue with `as_rvalue` and assign it to `place`
+//! - `as_rvalue` -> fallback is to create an Operand with `as_operand` and use `Rvalue::use`
+//! - `as_operand` -> either invokes `as_constant` or `as_temp`
+//! - `as_constant` -> (no fallback)
+//! - `as_temp` -> creates a temporary and either calls `as_place` or `into`
+//! - `as_place` -> for rvalues, falls back to `as_temp` and returns that
+//!
+//! As you can see, there is a cycle where `into` can (in theory) fallback to `as_temp`
+//! which can fallback to `into`. So if one of the `ExprKind` variants is not, in fact,
+//! implemented in the category where it is supposed to be, there will be a problem.
+//!
+//! Of those fallbacks, the most interesting one is `into`, because
+//! it discriminates based on the category of the expression. This is
+//! basically the point where the "by value" operations are bridged
+//! over to the "by reference" mode (`as_place`).
+
+pub(crate) mod as_constant;
+mod as_operand;
+pub mod as_place;
+mod as_rvalue;
+mod as_temp;
+pub mod category;
+mod into;
+mod stmt;
diff --git a/compiler/rustc_mir_build/src/build/expr/stmt.rs b/compiler/rustc_mir_build/src/build/expr/stmt.rs
new file mode 100644
index 000000000..a7e1331aa
--- /dev/null
+++ b/compiler/rustc_mir_build/src/build/expr/stmt.rs
@@ -0,0 +1,149 @@
+use crate::build::scope::BreakableTarget;
+use crate::build::{BlockAnd, BlockAndExtension, BlockFrame, Builder};
+use rustc_middle::middle::region;
+use rustc_middle::mir::*;
+use rustc_middle::thir::*;
+
+impl<'a, 'tcx> Builder<'a, 'tcx> {
+ /// Builds a block of MIR statements to evaluate the THIR `expr`.
+ /// If the original expression was an AST statement,
+ /// (e.g., `some().code(&here());`) then `opt_stmt_span` is the
+ /// span of that statement (including its semicolon, if any).
+ /// The scope is used if a statement temporary must be dropped.
+ pub(crate) fn stmt_expr(
+ &mut self,
+ mut block: BasicBlock,
+ expr: &Expr<'tcx>,
+ statement_scope: Option<region::Scope>,
+ ) -> BlockAnd<()> {
+ let this = self;
+ let expr_span = expr.span;
+ let source_info = this.source_info(expr.span);
+ // Handle a number of expressions that don't need a destination at all. This
+ // avoids needing a mountain of temporary `()` variables.
+ match expr.kind {
+ ExprKind::Scope { region_scope, lint_level, value } => {
+ this.in_scope((region_scope, source_info), lint_level, |this| {
+ this.stmt_expr(block, &this.thir[value], statement_scope)
+ })
+ }
+ ExprKind::Assign { lhs, rhs } => {
+ let lhs = &this.thir[lhs];
+ let rhs = &this.thir[rhs];
+ let lhs_span = lhs.span;
+
+ // Note: we evaluate assignments right-to-left. This
+ // is better for borrowck interaction with overloaded
+ // operators like x[j] = x[i].
+
+ debug!("stmt_expr Assign block_context.push(SubExpr) : {:?}", expr);
+ this.block_context.push(BlockFrame::SubExpr);
+
+ // Generate better code for things that don't need to be
+ // dropped.
+ if lhs.ty.needs_drop(this.tcx, this.param_env) {
+ let rhs = unpack!(block = this.as_local_operand(block, rhs));
+ let lhs = unpack!(block = this.as_place(block, lhs));
+ unpack!(block = this.build_drop_and_replace(block, lhs_span, lhs, rhs));
+ } else {
+ let rhs = unpack!(block = this.as_local_rvalue(block, rhs));
+ let lhs = unpack!(block = this.as_place(block, lhs));
+ this.cfg.push_assign(block, source_info, lhs, rhs);
+ }
+
+ this.block_context.pop();
+ block.unit()
+ }
+ ExprKind::AssignOp { op, lhs, rhs } => {
+ // FIXME(#28160) there is an interesting semantics
+ // question raised here -- should we "freeze" the
+ // value of the lhs here? I'm inclined to think not,
+ // since it seems closer to the semantics of the
+ // overloaded version, which takes `&mut self`. This
+ // only affects weird things like `x += {x += 1; x}`
+ // -- is that equal to `x + (x + 1)` or `2*(x+1)`?
+
+ let lhs = &this.thir[lhs];
+ let rhs = &this.thir[rhs];
+ let lhs_ty = lhs.ty;
+
+ debug!("stmt_expr AssignOp block_context.push(SubExpr) : {:?}", expr);
+ this.block_context.push(BlockFrame::SubExpr);
+
+ // As above, RTL.
+ let rhs = unpack!(block = this.as_local_operand(block, rhs));
+ let lhs = unpack!(block = this.as_place(block, lhs));
+
+ // we don't have to drop prior contents or anything
+ // because AssignOp is only legal for Copy types
+ // (overloaded ops should be desugared into a call).
+ let result = unpack!(
+ block =
+ this.build_binary_op(block, op, expr_span, lhs_ty, Operand::Copy(lhs), rhs)
+ );
+ this.cfg.push_assign(block, source_info, lhs, result);
+
+ this.block_context.pop();
+ block.unit()
+ }
+ ExprKind::Continue { label } => {
+ this.break_scope(block, None, BreakableTarget::Continue(label), source_info)
+ }
+ ExprKind::Break { label, value } => this.break_scope(
+ block,
+ value.map(|value| &this.thir[value]),
+ BreakableTarget::Break(label),
+ source_info,
+ ),
+ ExprKind::Return { value } => this.break_scope(
+ block,
+ value.map(|value| &this.thir[value]),
+ BreakableTarget::Return,
+ source_info,
+ ),
+ _ => {
+ assert!(
+ statement_scope.is_some(),
+ "Should not be calling `stmt_expr` on a general expression \
+ without a statement scope",
+ );
+
+ // Issue #54382: When creating temp for the value of
+ // expression like:
+ //
+ // `{ side_effects(); { let l = stuff(); the_value } }`
+ //
+ // it is usually better to focus on `the_value` rather
+ // than the entirety of block(s) surrounding it.
+ let adjusted_span = (|| {
+ if let ExprKind::Block { body } = &expr.kind && let Some(tail_ex) = body.expr {
+ let mut expr = &this.thir[tail_ex];
+ while let ExprKind::Block {
+ body: Block { expr: Some(nested_expr), .. },
+ }
+ | ExprKind::Scope { value: nested_expr, .. } = expr.kind
+ {
+ expr = &this.thir[nested_expr];
+ }
+ this.block_context.push(BlockFrame::TailExpr {
+ tail_result_is_ignored: true,
+ span: expr.span,
+ });
+ return Some(expr.span);
+ }
+ None
+ })();
+
+ let temp =
+ unpack!(block = this.as_temp(block, statement_scope, expr, Mutability::Not));
+
+ if let Some(span) = adjusted_span {
+ this.local_decls[temp].source_info.span = span;
+ this.block_context.pop();
+ }
+
+ block.unit()
+ }
+ }
+ }
+}
diff --git a/compiler/rustc_mir_build/src/build/matches/mod.rs b/compiler/rustc_mir_build/src/build/matches/mod.rs
new file mode 100644
index 000000000..58b1564cc
--- /dev/null
+++ b/compiler/rustc_mir_build/src/build/matches/mod.rs
@@ -0,0 +1,2354 @@
+//! Code related to match expressions. These are sufficiently complex to
+//! warrant their own module and submodules. :) This main module includes the
+//! high-level algorithm, the submodules contain the details.
+//!
+//! This also includes code for pattern bindings in `let` statements and
+//! function parameters.
+
+use crate::build::expr::as_place::PlaceBuilder;
+use crate::build::scope::DropKind;
+use crate::build::ForGuard::{self, OutsideGuard, RefWithinGuard};
+use crate::build::{BlockAnd, BlockAndExtension, Builder};
+use crate::build::{GuardFrame, GuardFrameLocal, LocalsForNode};
+use rustc_data_structures::{
+ fx::{FxHashSet, FxIndexMap, FxIndexSet},
+ stack::ensure_sufficient_stack,
+};
+use rustc_index::bit_set::BitSet;
+use rustc_middle::middle::region;
+use rustc_middle::mir::*;
+use rustc_middle::thir::{self, *};
+use rustc_middle::ty::{self, CanonicalUserTypeAnnotation, Ty};
+use rustc_span::symbol::Symbol;
+use rustc_span::{BytePos, Pos, Span};
+use rustc_target::abi::VariantIdx;
+use smallvec::{smallvec, SmallVec};
+
+// helper functions, broken out by category:
+mod simplify;
+mod test;
+mod util;
+
+use std::borrow::Borrow;
+use std::convert::TryFrom;
+use std::mem;
+
+impl<'a, 'tcx> Builder<'a, 'tcx> {
+ pub(crate) fn then_else_break(
+ &mut self,
+ mut block: BasicBlock,
+ expr: &Expr<'tcx>,
+ temp_scope_override: Option<region::Scope>,
+ break_scope: region::Scope,
+ variable_source_info: SourceInfo,
+ ) -> BlockAnd<()> {
+ let this = self;
+ let expr_span = expr.span;
+
+ match expr.kind {
+ ExprKind::LogicalOp { op: LogicalOp::And, lhs, rhs } => {
+ let lhs_then_block = unpack!(this.then_else_break(
+ block,
+ &this.thir[lhs],
+ temp_scope_override,
+ break_scope,
+ variable_source_info,
+ ));
+
+ let rhs_then_block = unpack!(this.then_else_break(
+ lhs_then_block,
+ &this.thir[rhs],
+ temp_scope_override,
+ break_scope,
+ variable_source_info,
+ ));
+
+ rhs_then_block.unit()
+ }
+ ExprKind::Scope { region_scope, lint_level, value } => {
+ let region_scope = (region_scope, this.source_info(expr_span));
+ this.in_scope(region_scope, lint_level, |this| {
+ this.then_else_break(
+ block,
+ &this.thir[value],
+ temp_scope_override,
+ break_scope,
+ variable_source_info,
+ )
+ })
+ }
+ ExprKind::Let { expr, ref pat } => this.lower_let_expr(
+ block,
+ &this.thir[expr],
+ pat,
+ break_scope,
+ Some(variable_source_info.scope),
+ variable_source_info.span,
+ ),
+ _ => {
+ let temp_scope = temp_scope_override.unwrap_or_else(|| this.local_scope());
+ let mutability = Mutability::Mut;
+ let place =
+ unpack!(block = this.as_temp(block, Some(temp_scope), expr, mutability));
+ let operand = Operand::Move(Place::from(place));
+
+ let then_block = this.cfg.start_new_block();
+ let else_block = this.cfg.start_new_block();
+ let term = TerminatorKind::if_(this.tcx, operand, then_block, else_block);
+
+ let source_info = this.source_info(expr_span);
+ this.cfg.terminate(block, source_info, term);
+ this.break_for_else(else_block, break_scope, source_info);
+
+ then_block.unit()
+ }
+ }
+ }
+
+ /// Generates MIR for a `match` expression.
+ ///
+ /// The MIR that we generate for a match looks like this.
+ ///
+ /// ```text
+ /// [ 0. Pre-match ]
+ /// |
+ /// [ 1. Evaluate Scrutinee (expression being matched on) ]
+ /// [ (fake read of scrutinee) ]
+ /// |
+ /// [ 2. Decision tree -- check discriminants ] <--------+
+ /// | |
+ /// | (once a specific arm is chosen) |
+ /// | |
+ /// [pre_binding_block] [otherwise_block]
+ /// | |
+ /// [ 3. Create "guard bindings" for arm ] |
+ /// [ (create fake borrows) ] |
+ /// | |
+ /// [ 4. Execute guard code ] |
+ /// [ (read fake borrows) ] --(guard is false)-----------+
+ /// |
+ /// | (guard results in true)
+ /// |
+ /// [ 5. Create real bindings and execute arm ]
+ /// |
+ /// [ Exit match ]
+ /// ```
+ ///
+ /// All of the different arms have been stacked on top of each other to
+ /// simplify the diagram. For an arm with no guard the blocks marked 3 and
+ /// 4 and the fake borrows are omitted.
+ ///
+ /// We generate MIR in the following steps:
+ ///
+ /// 1. Evaluate the scrutinee and add the fake read of it ([Builder::lower_scrutinee]).
+ /// 2. Create the decision tree ([Builder::lower_match_tree]).
+ /// 3. Determine the fake borrows that are needed from the places that were
+ /// matched against and create the required temporaries for them
+ /// ([Builder::calculate_fake_borrows]).
+ /// 4. Create everything else: the guards and the arms ([Builder::lower_match_arms]).
+ ///
+ /// ## False edges
+ ///
+ /// We don't want to have the exact structure of the decision tree be
+ /// visible through borrow checking. False edges ensure that the CFG as
+ /// seen by borrow checking doesn't encode this. False edges are added:
+ ///
+ /// * From each pre-binding block to the next pre-binding block.
+ /// * From each otherwise block to the next pre-binding block.
+ #[tracing::instrument(level = "debug", skip(self, arms))]
+ pub(crate) fn match_expr(
+ &mut self,
+ destination: Place<'tcx>,
+ span: Span,
+ mut block: BasicBlock,
+ scrutinee: &Expr<'tcx>,
+ arms: &[ArmId],
+ ) -> BlockAnd<()> {
+ let scrutinee_span = scrutinee.span;
+ let scrutinee_place =
+ unpack!(block = self.lower_scrutinee(block, scrutinee, scrutinee_span,));
+
+ let mut arm_candidates = self.create_match_candidates(scrutinee_place.clone(), &arms);
+
+ let match_has_guard = arms.iter().copied().any(|arm| self.thir[arm].guard.is_some());
+ let mut candidates =
+ arm_candidates.iter_mut().map(|(_, candidate)| candidate).collect::<Vec<_>>();
+
+ let match_start_span = span.shrink_to_lo().to(scrutinee.span);
+
+ let fake_borrow_temps = self.lower_match_tree(
+ block,
+ scrutinee_span,
+ match_start_span,
+ match_has_guard,
+ &mut candidates,
+ );
+
+ self.lower_match_arms(
+ destination,
+ scrutinee_place,
+ scrutinee_span,
+ arm_candidates,
+ self.source_info(span),
+ fake_borrow_temps,
+ )
+ }
+
+ /// Evaluate the scrutinee and add the fake read of it.
+ fn lower_scrutinee(
+ &mut self,
+ mut block: BasicBlock,
+ scrutinee: &Expr<'tcx>,
+ scrutinee_span: Span,
+ ) -> BlockAnd<PlaceBuilder<'tcx>> {
+ let scrutinee_place_builder = unpack!(block = self.as_place_builder(block, scrutinee));
+ // Matching on a `scrutinee_place` with an uninhabited type doesn't
+ // generate any memory reads by itself, and so if the place "expression"
+ // contains unsafe operations like raw pointer dereferences or union
+ // field projections, we wouldn't know to require an `unsafe` block
+ // around a `match` equivalent to `std::intrinsics::unreachable()`.
+ // See issue #47412 for this hole being discovered in the wild.
+ //
+ // HACK(eddyb) Work around the above issue by adding a dummy inspection
+ // of `scrutinee_place`, specifically by applying `ReadForMatch`.
+ //
+ // NOTE: ReadForMatch also checks that the scrutinee is initialized.
+ // This is currently needed to not allow matching on an uninitialized,
+ // uninhabited value. If we get never patterns, those will check that
+ // the place is initialized, and so this read would only be used to
+ // check safety.
+ let cause_matched_place = FakeReadCause::ForMatchedPlace(None);
+ let source_info = self.source_info(scrutinee_span);
+
+ if let Ok(scrutinee_builder) =
+ scrutinee_place_builder.clone().try_upvars_resolved(self.tcx, self.typeck_results)
+ {
+ let scrutinee_place = scrutinee_builder.into_place(self.tcx, self.typeck_results);
+ self.cfg.push_fake_read(block, source_info, cause_matched_place, scrutinee_place);
+ }
+
+ block.and(scrutinee_place_builder)
+ }
+
+ /// Create the initial `Candidate`s for a `match` expression.
+ fn create_match_candidates<'pat>(
+ &mut self,
+ scrutinee: PlaceBuilder<'tcx>,
+ arms: &'pat [ArmId],
+ ) -> Vec<(&'pat Arm<'tcx>, Candidate<'pat, 'tcx>)>
+ where
+ 'a: 'pat,
+ {
+ // Assemble a list of candidates: there is one candidate per pattern,
+ // which means there may be more than one candidate *per arm*.
+ arms.iter()
+ .copied()
+ .map(|arm| {
+ let arm = &self.thir[arm];
+ let arm_has_guard = arm.guard.is_some();
+ let arm_candidate = Candidate::new(scrutinee.clone(), &arm.pattern, arm_has_guard);
+ (arm, arm_candidate)
+ })
+ .collect()
+ }
+
+ /// Create the decision tree for the match expression, starting from `block`.
+ ///
+ /// Modifies `candidates` to store the bindings and type ascriptions for
+ /// that candidate.
+ ///
+ /// Returns the places that need fake borrows because we bind or test them.
+ fn lower_match_tree<'pat>(
+ &mut self,
+ block: BasicBlock,
+ scrutinee_span: Span,
+ match_start_span: Span,
+ match_has_guard: bool,
+ candidates: &mut [&mut Candidate<'pat, 'tcx>],
+ ) -> Vec<(Place<'tcx>, Local)> {
+ // The set of places that we are creating fake borrows of. If there are
+ // no match guards then we don't need any fake borrows, so don't track
+ // them.
+ let mut fake_borrows = match_has_guard.then(FxIndexSet::default);
+
+ let mut otherwise = None;
+
+ // This will generate code to test scrutinee_place and
+ // branch to the appropriate arm block
+ self.match_candidates(
+ match_start_span,
+ scrutinee_span,
+ block,
+ &mut otherwise,
+ candidates,
+ &mut fake_borrows,
+ );
+
+ if let Some(otherwise_block) = otherwise {
+ // See the doc comment on `match_candidates` for why we may have an
+ // otherwise block. Match checking will ensure this is actually
+ // unreachable.
+ let source_info = self.source_info(scrutinee_span);
+ self.cfg.terminate(otherwise_block, source_info, TerminatorKind::Unreachable);
+ }
+
+ // Link each leaf candidate to the `pre_binding_block` of the next one.
+ let mut previous_candidate: Option<&mut Candidate<'_, '_>> = None;
+
+ for candidate in candidates {
+ candidate.visit_leaves(|leaf_candidate| {
+ if let Some(ref mut prev) = previous_candidate {
+ prev.next_candidate_pre_binding_block = leaf_candidate.pre_binding_block;
+ }
+ previous_candidate = Some(leaf_candidate);
+ });
+ }
+
+ if let Some(ref borrows) = fake_borrows {
+ self.calculate_fake_borrows(borrows, scrutinee_span)
+ } else {
+ Vec::new()
+ }
+ }
+
+ /// Lower the bindings, guards and arm bodies of a `match` expression.
+ ///
+ /// The decision tree should have already been created
+ /// (by [Builder::lower_match_tree]).
+ ///
+ /// `outer_source_info` is the SourceInfo for the whole match.
+ fn lower_match_arms(
+ &mut self,
+ destination: Place<'tcx>,
+ scrutinee_place_builder: PlaceBuilder<'tcx>,
+ scrutinee_span: Span,
+ arm_candidates: Vec<(&'_ Arm<'tcx>, Candidate<'_, 'tcx>)>,
+ outer_source_info: SourceInfo,
+ fake_borrow_temps: Vec<(Place<'tcx>, Local)>,
+ ) -> BlockAnd<()> {
+ let arm_end_blocks: Vec<_> = arm_candidates
+ .into_iter()
+ .map(|(arm, candidate)| {
+ debug!("lowering arm {:?}\ncandidate = {:?}", arm, candidate);
+
+ let arm_source_info = self.source_info(arm.span);
+ let arm_scope = (arm.scope, arm_source_info);
+ let match_scope = self.local_scope();
+ self.in_scope(arm_scope, arm.lint_level, |this| {
+ // `try_upvars_resolved` may fail if it is unable to resolve the given
+ // `PlaceBuilder` inside a closure. In this case, we don't want to include
+ // a scrutinee place. `scrutinee_place_builder` will fail to be resolved
+ // if the only match arm is a wildcard (`_`).
+ // Example:
+ // ```
+ // let foo = (0, 1);
+ // let c = || {
+ // match foo { _ => () };
+ // };
+ // ```
+ let mut opt_scrutinee_place: Option<(Option<&Place<'tcx>>, Span)> = None;
+ let scrutinee_place: Place<'tcx>;
+ if let Ok(scrutinee_builder) = scrutinee_place_builder
+ .clone()
+ .try_upvars_resolved(this.tcx, this.typeck_results)
+ {
+ scrutinee_place =
+ scrutinee_builder.into_place(this.tcx, this.typeck_results);
+ opt_scrutinee_place = Some((Some(&scrutinee_place), scrutinee_span));
+ }
+ let scope = this.declare_bindings(
+ None,
+ arm.span,
+ &arm.pattern,
+ ArmHasGuard(arm.guard.is_some()),
+ opt_scrutinee_place,
+ );
+
+ let arm_block = this.bind_pattern(
+ outer_source_info,
+ candidate,
+ arm.guard.as_ref(),
+ &fake_borrow_temps,
+ scrutinee_span,
+ Some(arm.span),
+ Some(arm.scope),
+ Some(match_scope),
+ );
+
+ if let Some(source_scope) = scope {
+ this.source_scope = source_scope;
+ }
+
+ this.expr_into_dest(destination, arm_block, &&this.thir[arm.body])
+ })
+ })
+ .collect();
+
+ // all the arm blocks will rejoin here
+ let end_block = self.cfg.start_new_block();
+
+ let end_brace = self.source_info(
+ outer_source_info.span.with_lo(outer_source_info.span.hi() - BytePos::from_usize(1)),
+ );
+ for arm_block in arm_end_blocks {
+ let block = &self.cfg.basic_blocks[arm_block.0];
+ let last_location = block.statements.last().map(|s| s.source_info);
+
+ self.cfg.goto(unpack!(arm_block), last_location.unwrap_or(end_brace), end_block);
+ }
+
+ self.source_scope = outer_source_info.scope;
+
+ end_block.unit()
+ }
+
+ /// Binds the variables and ascribes types for a given `match` arm or
+ /// `let` binding.
+ ///
+ /// Also check if the guard matches, if it's provided.
+ /// `arm_scope` should be `Some` if and only if this is called for a
+ /// `match` arm.
+ fn bind_pattern(
+ &mut self,
+ outer_source_info: SourceInfo,
+ candidate: Candidate<'_, 'tcx>,
+ guard: Option<&Guard<'tcx>>,
+ fake_borrow_temps: &[(Place<'tcx>, Local)],
+ scrutinee_span: Span,
+ arm_span: Option<Span>,
+ arm_scope: Option<region::Scope>,
+ match_scope: Option<region::Scope>,
+ ) -> BasicBlock {
+ if candidate.subcandidates.is_empty() {
+ // Avoid generating another `BasicBlock` when we only have one
+ // candidate.
+ self.bind_and_guard_matched_candidate(
+ candidate,
+ &[],
+ guard,
+ fake_borrow_temps,
+ scrutinee_span,
+ arm_span,
+ match_scope,
+ true,
+ )
+ } else {
+ // It's helpful to avoid scheduling drops multiple times to save
+ // drop elaboration from having to clean up the extra drops.
+ //
+ // If we are in a `let` then we only schedule drops for the first
+ // candidate.
+ //
+ // If we're in a `match` arm then we could have a case like so:
+ //
+ // Ok(x) | Err(x) if return => { /* ... */ }
+ //
+ // In this case we don't want a drop of `x` scheduled when we
+ // return: it isn't bound by move until right before enter the arm.
+ // To handle this we instead unschedule it's drop after each time
+ // we lower the guard.
+ let target_block = self.cfg.start_new_block();
+ let mut schedule_drops = true;
+ // We keep a stack of all of the bindings and type ascriptions
+ // from the parent candidates that we visit, that also need to
+ // be bound for each candidate.
+ traverse_candidate(
+ candidate,
+ &mut Vec::new(),
+ &mut |leaf_candidate, parent_bindings| {
+ if let Some(arm_scope) = arm_scope {
+ self.clear_top_scope(arm_scope);
+ }
+ let binding_end = self.bind_and_guard_matched_candidate(
+ leaf_candidate,
+ parent_bindings,
+ guard,
+ &fake_borrow_temps,
+ scrutinee_span,
+ arm_span,
+ match_scope,
+ schedule_drops,
+ );
+ if arm_scope.is_none() {
+ schedule_drops = false;
+ }
+ self.cfg.goto(binding_end, outer_source_info, target_block);
+ },
+ |inner_candidate, parent_bindings| {
+ parent_bindings.push((inner_candidate.bindings, inner_candidate.ascriptions));
+ inner_candidate.subcandidates.into_iter()
+ },
+ |parent_bindings| {
+ parent_bindings.pop();
+ },
+ );
+
+ target_block
+ }
+ }
+
+ pub(super) fn expr_into_pattern(
+ &mut self,
+ mut block: BasicBlock,
+ irrefutable_pat: Pat<'tcx>,
+ initializer: &Expr<'tcx>,
+ ) -> BlockAnd<()> {
+ match *irrefutable_pat.kind {
+ // Optimize the case of `let x = ...` to write directly into `x`
+ PatKind::Binding { mode: BindingMode::ByValue, var, subpattern: None, .. } => {
+ let place =
+ self.storage_live_binding(block, var, irrefutable_pat.span, OutsideGuard, true);
+ unpack!(block = self.expr_into_dest(place, block, initializer));
+
+ // Inject a fake read, see comments on `FakeReadCause::ForLet`.
+ let source_info = self.source_info(irrefutable_pat.span);
+ self.cfg.push_fake_read(block, source_info, FakeReadCause::ForLet(None), place);
+
+ self.schedule_drop_for_binding(var, irrefutable_pat.span, OutsideGuard);
+ block.unit()
+ }
+
+ // Optimize the case of `let x: T = ...` to write directly
+ // into `x` and then require that `T == typeof(x)`.
+ //
+ // Weirdly, this is needed to prevent the
+ // `intrinsic-move-val.rs` test case from crashing. That
+ // test works with uninitialized values in a rather
+ // dubious way, so it may be that the test is kind of
+ // broken.
+ PatKind::AscribeUserType {
+ subpattern:
+ Pat {
+ kind:
+ box PatKind::Binding {
+ mode: BindingMode::ByValue,
+ var,
+ subpattern: None,
+ ..
+ },
+ ..
+ },
+ ascription: thir::Ascription { annotation, variance: _ },
+ } => {
+ let place =
+ self.storage_live_binding(block, var, irrefutable_pat.span, OutsideGuard, true);
+ unpack!(block = self.expr_into_dest(place, block, initializer));
+
+ // Inject a fake read, see comments on `FakeReadCause::ForLet`.
+ let pattern_source_info = self.source_info(irrefutable_pat.span);
+ let cause_let = FakeReadCause::ForLet(None);
+ self.cfg.push_fake_read(block, pattern_source_info, cause_let, place);
+
+ let ty_source_info = self.source_info(annotation.span);
+
+ let base = self.canonical_user_type_annotations.push(annotation);
+ self.cfg.push(
+ block,
+ Statement {
+ source_info: ty_source_info,
+ kind: StatementKind::AscribeUserType(
+ Box::new((place, UserTypeProjection { base, projs: Vec::new() })),
+ // We always use invariant as the variance here. This is because the
+ // variance field from the ascription refers to the variance to use
+ // when applying the type to the value being matched, but this
+ // ascription applies rather to the type of the binding. e.g., in this
+ // example:
+ //
+ // ```
+ // let x: T = <expr>
+ // ```
+ //
+ // We are creating an ascription that defines the type of `x` to be
+ // exactly `T` (i.e., with invariance). The variance field, in
+ // contrast, is intended to be used to relate `T` to the type of
+ // `<expr>`.
+ ty::Variance::Invariant,
+ ),
+ },
+ );
+
+ self.schedule_drop_for_binding(var, irrefutable_pat.span, OutsideGuard);
+ block.unit()
+ }
+
+ _ => {
+ let place_builder = unpack!(block = self.as_place_builder(block, initializer));
+ self.place_into_pattern(block, irrefutable_pat, place_builder, true)
+ }
+ }
+ }
+
+ pub(crate) fn place_into_pattern(
+ &mut self,
+ block: BasicBlock,
+ irrefutable_pat: Pat<'tcx>,
+ initializer: PlaceBuilder<'tcx>,
+ set_match_place: bool,
+ ) -> BlockAnd<()> {
+ let mut candidate = Candidate::new(initializer.clone(), &irrefutable_pat, false);
+ let fake_borrow_temps = self.lower_match_tree(
+ block,
+ irrefutable_pat.span,
+ irrefutable_pat.span,
+ false,
+ &mut [&mut candidate],
+ );
+ // For matches and function arguments, the place that is being matched
+ // can be set when creating the variables. But the place for
+ // let PATTERN = ... might not even exist until we do the assignment.
+ // so we set it here instead.
+ if set_match_place {
+ let mut candidate_ref = &candidate;
+ while let Some(next) = {
+ for binding in &candidate_ref.bindings {
+ let local = self.var_local_id(binding.var_id, OutsideGuard);
+
+ let Some(box LocalInfo::User(ClearCrossCrate::Set(BindingForm::Var(
+ VarBindingForm { opt_match_place: Some((ref mut match_place, _)), .. },
+ )))) = self.local_decls[local].local_info else {
+ bug!("Let binding to non-user variable.")
+ };
+ // `try_upvars_resolved` may fail if it is unable to resolve the given
+ // `PlaceBuilder` inside a closure. In this case, we don't want to include
+ // a scrutinee place. `scrutinee_place_builder` will fail for destructured
+ // assignments. This is because a closure only captures the precise places
+ // that it will read and as a result a closure may not capture the entire
+ // tuple/struct and rather have individual places that will be read in the
+ // final MIR.
+ // Example:
+ // ```
+ // let foo = (0, 1);
+ // let c = || {
+ // let (v1, v2) = foo;
+ // };
+ // ```
+ if let Ok(match_pair_resolved) =
+ initializer.clone().try_upvars_resolved(self.tcx, self.typeck_results)
+ {
+ let place = match_pair_resolved.into_place(self.tcx, self.typeck_results);
+ *match_place = Some(place);
+ }
+ }
+ // All of the subcandidates should bind the same locals, so we
+ // only visit the first one.
+ candidate_ref.subcandidates.get(0)
+ } {
+ candidate_ref = next;
+ }
+ }
+
+ self.bind_pattern(
+ self.source_info(irrefutable_pat.span),
+ candidate,
+ None,
+ &fake_borrow_temps,
+ irrefutable_pat.span,
+ None,
+ None,
+ None,
+ )
+ .unit()
+ }
+
+ /// Declares the bindings of the given patterns and returns the visibility
+ /// scope for the bindings in these patterns, if such a scope had to be
+ /// created. NOTE: Declaring the bindings should always be done in their
+ /// drop scope.
+ pub(crate) fn declare_bindings(
+ &mut self,
+ mut visibility_scope: Option<SourceScope>,
+ scope_span: Span,
+ pattern: &Pat<'tcx>,
+ has_guard: ArmHasGuard,
+ opt_match_place: Option<(Option<&Place<'tcx>>, Span)>,
+ ) -> Option<SourceScope> {
+ debug!("declare_bindings: pattern={:?}", pattern);
+ self.visit_primary_bindings(
+ &pattern,
+ UserTypeProjections::none(),
+ &mut |this, mutability, name, mode, var, span, ty, user_ty| {
+ if visibility_scope.is_none() {
+ visibility_scope =
+ Some(this.new_source_scope(scope_span, LintLevel::Inherited, None));
+ }
+ let source_info = SourceInfo { span, scope: this.source_scope };
+ let visibility_scope = visibility_scope.unwrap();
+ this.declare_binding(
+ source_info,
+ visibility_scope,
+ mutability,
+ name,
+ mode,
+ var,
+ ty,
+ user_ty,
+ has_guard,
+ opt_match_place.map(|(x, y)| (x.cloned(), y)),
+ pattern.span,
+ );
+ },
+ );
+ visibility_scope
+ }
+
+ pub(crate) fn storage_live_binding(
+ &mut self,
+ block: BasicBlock,
+ var: LocalVarId,
+ span: Span,
+ for_guard: ForGuard,
+ schedule_drop: bool,
+ ) -> Place<'tcx> {
+ let local_id = self.var_local_id(var, for_guard);
+ let source_info = self.source_info(span);
+ self.cfg.push(block, Statement { source_info, kind: StatementKind::StorageLive(local_id) });
+ // Altough there is almost always scope for given variable in corner cases
+ // like #92893 we might get variable with no scope.
+ if let Some(region_scope) = self.region_scope_tree.var_scope(var.0.local_id) && schedule_drop{
+ self.schedule_drop(span, region_scope, local_id, DropKind::Storage);
+ }
+ Place::from(local_id)
+ }
+
+ pub(crate) fn schedule_drop_for_binding(
+ &mut self,
+ var: LocalVarId,
+ span: Span,
+ for_guard: ForGuard,
+ ) {
+ let local_id = self.var_local_id(var, for_guard);
+ if let Some(region_scope) = self.region_scope_tree.var_scope(var.0.local_id) {
+ self.schedule_drop(span, region_scope, local_id, DropKind::Value);
+ }
+ }
+
+ /// Visit all of the primary bindings in a patterns, that is, visit the
+ /// leftmost occurrence of each variable bound in a pattern. A variable
+ /// will occur more than once in an or-pattern.
+ pub(super) fn visit_primary_bindings(
+ &mut self,
+ pattern: &Pat<'tcx>,
+ pattern_user_ty: UserTypeProjections,
+ f: &mut impl FnMut(
+ &mut Self,
+ Mutability,
+ Symbol,
+ BindingMode,
+ LocalVarId,
+ Span,
+ Ty<'tcx>,
+ UserTypeProjections,
+ ),
+ ) {
+ debug!(
+ "visit_primary_bindings: pattern={:?} pattern_user_ty={:?}",
+ pattern, pattern_user_ty
+ );
+ match *pattern.kind {
+ PatKind::Binding {
+ mutability,
+ name,
+ mode,
+ var,
+ ty,
+ ref subpattern,
+ is_primary,
+ ..
+ } => {
+ if is_primary {
+ f(self, mutability, name, mode, var, pattern.span, ty, pattern_user_ty.clone());
+ }
+ if let Some(subpattern) = subpattern.as_ref() {
+ self.visit_primary_bindings(subpattern, pattern_user_ty, f);
+ }
+ }
+
+ PatKind::Array { ref prefix, ref slice, ref suffix }
+ | PatKind::Slice { ref prefix, ref slice, ref suffix } => {
+ let from = u64::try_from(prefix.len()).unwrap();
+ let to = u64::try_from(suffix.len()).unwrap();
+ for subpattern in prefix {
+ self.visit_primary_bindings(subpattern, pattern_user_ty.clone().index(), f);
+ }
+ for subpattern in slice {
+ self.visit_primary_bindings(
+ subpattern,
+ pattern_user_ty.clone().subslice(from, to),
+ f,
+ );
+ }
+ for subpattern in suffix {
+ self.visit_primary_bindings(subpattern, pattern_user_ty.clone().index(), f);
+ }
+ }
+
+ PatKind::Constant { .. } | PatKind::Range { .. } | PatKind::Wild => {}
+
+ PatKind::Deref { ref subpattern } => {
+ self.visit_primary_bindings(subpattern, pattern_user_ty.deref(), f);
+ }
+
+ PatKind::AscribeUserType {
+ ref subpattern,
+ ascription: thir::Ascription { ref annotation, variance: _ },
+ } => {
+ // This corresponds to something like
+ //
+ // ```
+ // let A::<'a>(_): A<'static> = ...;
+ // ```
+ //
+ // Note that the variance doesn't apply here, as we are tracking the effect
+ // of `user_ty` on any bindings contained with subpattern.
+
+ let projection = UserTypeProjection {
+ base: self.canonical_user_type_annotations.push(annotation.clone()),
+ projs: Vec::new(),
+ };
+ let subpattern_user_ty =
+ pattern_user_ty.push_projection(&projection, annotation.span);
+ self.visit_primary_bindings(subpattern, subpattern_user_ty, f)
+ }
+
+ PatKind::Leaf { ref subpatterns } => {
+ for subpattern in subpatterns {
+ let subpattern_user_ty = pattern_user_ty.clone().leaf(subpattern.field);
+ debug!("visit_primary_bindings: subpattern_user_ty={:?}", subpattern_user_ty);
+ self.visit_primary_bindings(&subpattern.pattern, subpattern_user_ty, f);
+ }
+ }
+
+ PatKind::Variant { adt_def, substs: _, variant_index, ref subpatterns } => {
+ for subpattern in subpatterns {
+ let subpattern_user_ty =
+ pattern_user_ty.clone().variant(adt_def, variant_index, subpattern.field);
+ self.visit_primary_bindings(&subpattern.pattern, subpattern_user_ty, f);
+ }
+ }
+ PatKind::Or { ref pats } => {
+ // In cases where we recover from errors the primary bindings
+ // may not all be in the leftmost subpattern. For example in
+ // `let (x | y) = ...`, the primary binding of `y` occurs in
+ // the right subpattern
+ for subpattern in pats {
+ self.visit_primary_bindings(subpattern, pattern_user_ty.clone(), f);
+ }
+ }
+ }
+ }
+}
+
+#[derive(Debug)]
+struct Candidate<'pat, 'tcx> {
+ /// [`Span`] of the original pattern that gave rise to this candidate.
+ span: Span,
+
+ /// Whether this `Candidate` has a guard.
+ has_guard: bool,
+
+ /// All of these must be satisfied...
+ match_pairs: SmallVec<[MatchPair<'pat, 'tcx>; 1]>,
+
+ /// ...these bindings established...
+ bindings: Vec<Binding<'tcx>>,
+
+ /// ...and these types asserted...
+ ascriptions: Vec<Ascription<'tcx>>,
+
+ /// ...and if this is non-empty, one of these subcandidates also has to match...
+ subcandidates: Vec<Candidate<'pat, 'tcx>>,
+
+ /// ...and the guard must be evaluated; if it's `false` then branch to `otherwise_block`.
+ otherwise_block: Option<BasicBlock>,
+
+ /// The block before the `bindings` have been established.
+ pre_binding_block: Option<BasicBlock>,
+ /// The pre-binding block of the next candidate.
+ next_candidate_pre_binding_block: Option<BasicBlock>,
+}
+
+impl<'tcx, 'pat> Candidate<'pat, 'tcx> {
+ fn new(place: PlaceBuilder<'tcx>, pattern: &'pat Pat<'tcx>, has_guard: bool) -> Self {
+ Candidate {
+ span: pattern.span,
+ has_guard,
+ match_pairs: smallvec![MatchPair { place, pattern }],
+ bindings: Vec::new(),
+ ascriptions: Vec::new(),
+ subcandidates: Vec::new(),
+ otherwise_block: None,
+ pre_binding_block: None,
+ next_candidate_pre_binding_block: None,
+ }
+ }
+
+ /// Visit the leaf candidates (those with no subcandidates) contained in
+ /// this candidate.
+ fn visit_leaves<'a>(&'a mut self, mut visit_leaf: impl FnMut(&'a mut Self)) {
+ traverse_candidate(
+ self,
+ &mut (),
+ &mut move |c, _| visit_leaf(c),
+ move |c, _| c.subcandidates.iter_mut(),
+ |_| {},
+ );
+ }
+}
+
+/// A depth-first traversal of the `Candidate` and all of its recursive
+/// subcandidates.
+fn traverse_candidate<'pat, 'tcx: 'pat, C, T, I>(
+ candidate: C,
+ context: &mut T,
+ visit_leaf: &mut impl FnMut(C, &mut T),
+ get_children: impl Copy + Fn(C, &mut T) -> I,
+ complete_children: impl Copy + Fn(&mut T),
+) where
+ C: Borrow<Candidate<'pat, 'tcx>>,
+ I: Iterator<Item = C>,
+{
+ if candidate.borrow().subcandidates.is_empty() {
+ visit_leaf(candidate, context)
+ } else {
+ for child in get_children(candidate, context) {
+ traverse_candidate(child, context, visit_leaf, get_children, complete_children);
+ }
+ complete_children(context)
+ }
+}
+
+#[derive(Clone, Debug)]
+struct Binding<'tcx> {
+ span: Span,
+ source: Place<'tcx>,
+ var_id: LocalVarId,
+ binding_mode: BindingMode,
+}
+
+/// Indicates that the type of `source` must be a subtype of the
+/// user-given type `user_ty`; this is basically a no-op but can
+/// influence region inference.
+#[derive(Clone, Debug)]
+struct Ascription<'tcx> {
+ source: Place<'tcx>,
+ annotation: CanonicalUserTypeAnnotation<'tcx>,
+ variance: ty::Variance,
+}
+
+#[derive(Clone, Debug)]
+pub(crate) struct MatchPair<'pat, 'tcx> {
+ // this place...
+ place: PlaceBuilder<'tcx>,
+
+ // ... must match this pattern.
+ pattern: &'pat Pat<'tcx>,
+}
+
+/// See [`Test`] for more.
+#[derive(Clone, Debug, PartialEq)]
+enum TestKind<'tcx> {
+ /// Test what enum variant a value is.
+ Switch {
+ /// The enum type being tested.
+ adt_def: ty::AdtDef<'tcx>,
+ /// The set of variants that we should create a branch for. We also
+ /// create an additional "otherwise" case.
+ variants: BitSet<VariantIdx>,
+ },
+
+ /// Test what value an integer, `bool`, or `char` has.
+ SwitchInt {
+ /// The type of the value that we're testing.
+ switch_ty: Ty<'tcx>,
+ /// The (ordered) set of values that we test for.
+ ///
+ /// For integers and `char`s we create a branch to each of the values in
+ /// `options`, as well as an "otherwise" branch for all other values, even
+ /// in the (rare) case that `options` is exhaustive.
+ ///
+ /// For `bool` we always generate two edges, one for `true` and one for
+ /// `false`.
+ options: FxIndexMap<ConstantKind<'tcx>, u128>,
+ },
+
+ /// Test for equality with value, possibly after an unsizing coercion to
+ /// `ty`,
+ Eq {
+ value: ConstantKind<'tcx>,
+ // Integer types are handled by `SwitchInt`, and constants with ADT
+ // types are converted back into patterns, so this can only be `&str`,
+ // `&[T]`, `f32` or `f64`.
+ ty: Ty<'tcx>,
+ },
+
+ /// Test whether the value falls within an inclusive or exclusive range
+ Range(PatRange<'tcx>),
+
+ /// Test that the length of the slice is equal to `len`.
+ Len { len: u64, op: BinOp },
+}
+
+/// A test to perform to determine which [`Candidate`] matches a value.
+///
+/// [`Test`] is just the test to perform; it does not include the value
+/// to be tested.
+#[derive(Debug)]
+pub(crate) struct Test<'tcx> {
+ span: Span,
+ kind: TestKind<'tcx>,
+}
+
+/// `ArmHasGuard` is a wrapper around a boolean flag. It indicates whether
+/// a match arm has a guard expression attached to it.
+#[derive(Copy, Clone, Debug)]
+pub(crate) struct ArmHasGuard(pub(crate) bool);
+
+///////////////////////////////////////////////////////////////////////////
+// Main matching algorithm
+
+impl<'a, 'tcx> Builder<'a, 'tcx> {
+ /// The main match algorithm. It begins with a set of candidates
+ /// `candidates` and has the job of generating code to determine
+ /// which of these candidates, if any, is the correct one. The
+ /// candidates are sorted such that the first item in the list
+ /// has the highest priority. When a candidate is found to match
+ /// the value, we will set and generate a branch to the appropriate
+ /// pre-binding block.
+ ///
+ /// If we find that *NONE* of the candidates apply, we branch to the
+ /// `otherwise_block`, setting it to `Some` if required. In principle, this
+ /// means that the input list was not exhaustive, though at present we
+ /// sometimes are not smart enough to recognize all exhaustive inputs.
+ ///
+ /// It might be surprising that the input can be non-exhaustive.
+ /// Indeed, initially, it is not, because all matches are
+ /// exhaustive in Rust. But during processing we sometimes divide
+ /// up the list of candidates and recurse with a non-exhaustive
+ /// list. This is important to keep the size of the generated code
+ /// under control. See [`Builder::test_candidates`] for more details.
+ ///
+ /// If `fake_borrows` is `Some`, then places which need fake borrows
+ /// will be added to it.
+ ///
+ /// For an example of a case where we set `otherwise_block`, even for an
+ /// exhaustive match, consider:
+ ///
+ /// ```
+ /// # fn foo(x: (bool, bool)) {
+ /// match x {
+ /// (true, true) => (),
+ /// (_, false) => (),
+ /// (false, true) => (),
+ /// }
+ /// # }
+ /// ```
+ ///
+ /// For this match, we check if `x.0` matches `true` (for the first
+ /// arm). If it doesn't match, we check `x.1`. If `x.1` is `true` we check
+ /// if `x.0` matches `false` (for the third arm). In the (impossible at
+ /// runtime) case when `x.0` is now `true`, we branch to
+ /// `otherwise_block`.
+ fn match_candidates<'pat>(
+ &mut self,
+ span: Span,
+ scrutinee_span: Span,
+ start_block: BasicBlock,
+ otherwise_block: &mut Option<BasicBlock>,
+ candidates: &mut [&mut Candidate<'pat, 'tcx>],
+ fake_borrows: &mut Option<FxIndexSet<Place<'tcx>>>,
+ ) {
+ debug!(
+ "matched_candidate(span={:?}, candidates={:?}, start_block={:?}, otherwise_block={:?})",
+ span, candidates, start_block, otherwise_block,
+ );
+
+ // Start by simplifying candidates. Once this process is complete, all
+ // the match pairs which remain require some form of test, whether it
+ // be a switch or pattern comparison.
+ let mut split_or_candidate = false;
+ for candidate in &mut *candidates {
+ split_or_candidate |= self.simplify_candidate(candidate);
+ }
+
+ ensure_sufficient_stack(|| {
+ if split_or_candidate {
+ // At least one of the candidates has been split into subcandidates.
+ // We need to change the candidate list to include those.
+ let mut new_candidates = Vec::new();
+
+ for candidate in candidates {
+ candidate.visit_leaves(|leaf_candidate| new_candidates.push(leaf_candidate));
+ }
+ self.match_simplified_candidates(
+ span,
+ scrutinee_span,
+ start_block,
+ otherwise_block,
+ &mut *new_candidates,
+ fake_borrows,
+ );
+ } else {
+ self.match_simplified_candidates(
+ span,
+ scrutinee_span,
+ start_block,
+ otherwise_block,
+ candidates,
+ fake_borrows,
+ );
+ }
+ });
+ }
+
+ fn match_simplified_candidates(
+ &mut self,
+ span: Span,
+ scrutinee_span: Span,
+ start_block: BasicBlock,
+ otherwise_block: &mut Option<BasicBlock>,
+ candidates: &mut [&mut Candidate<'_, 'tcx>],
+ fake_borrows: &mut Option<FxIndexSet<Place<'tcx>>>,
+ ) {
+ // The candidates are sorted by priority. Check to see whether the
+ // higher priority candidates (and hence at the front of the slice)
+ // have satisfied all their match pairs.
+ let fully_matched = candidates.iter().take_while(|c| c.match_pairs.is_empty()).count();
+ debug!("match_candidates: {:?} candidates fully matched", fully_matched);
+ let (matched_candidates, unmatched_candidates) = candidates.split_at_mut(fully_matched);
+
+ let block = if !matched_candidates.is_empty() {
+ let otherwise_block =
+ self.select_matched_candidates(matched_candidates, start_block, fake_borrows);
+
+ if let Some(last_otherwise_block) = otherwise_block {
+ last_otherwise_block
+ } else {
+ // Any remaining candidates are unreachable.
+ if unmatched_candidates.is_empty() {
+ return;
+ }
+ self.cfg.start_new_block()
+ }
+ } else {
+ start_block
+ };
+
+ // If there are no candidates that still need testing, we're
+ // done. Since all matches are exhaustive, execution should
+ // never reach this point.
+ if unmatched_candidates.is_empty() {
+ let source_info = self.source_info(span);
+ if let Some(otherwise) = *otherwise_block {
+ self.cfg.goto(block, source_info, otherwise);
+ } else {
+ *otherwise_block = Some(block);
+ }
+ return;
+ }
+
+ // Test for the remaining candidates.
+ self.test_candidates_with_or(
+ span,
+ scrutinee_span,
+ unmatched_candidates,
+ block,
+ otherwise_block,
+ fake_borrows,
+ );
+ }
+
+ /// Link up matched candidates.
+ ///
+ /// For example, if we have something like this:
+ ///
+ /// ```ignore (illustrative)
+ /// ...
+ /// Some(x) if cond1 => ...
+ /// Some(x) => ...
+ /// Some(x) if cond2 => ...
+ /// ...
+ /// ```
+ ///
+ /// We generate real edges from:
+ ///
+ /// * `start_block` to the [pre-binding block] of the first pattern,
+ /// * the [otherwise block] of the first pattern to the second pattern,
+ /// * the [otherwise block] of the third pattern to a block with an
+ /// [`Unreachable` terminator](TerminatorKind::Unreachable).
+ ///
+ /// In addition, we add fake edges from the otherwise blocks to the
+ /// pre-binding block of the next candidate in the original set of
+ /// candidates.
+ ///
+ /// [pre-binding block]: Candidate::pre_binding_block
+ /// [otherwise block]: Candidate::otherwise_block
+ fn select_matched_candidates(
+ &mut self,
+ matched_candidates: &mut [&mut Candidate<'_, 'tcx>],
+ start_block: BasicBlock,
+ fake_borrows: &mut Option<FxIndexSet<Place<'tcx>>>,
+ ) -> Option<BasicBlock> {
+ debug_assert!(
+ !matched_candidates.is_empty(),
+ "select_matched_candidates called with no candidates",
+ );
+ debug_assert!(
+ matched_candidates.iter().all(|c| c.subcandidates.is_empty()),
+ "subcandidates should be empty in select_matched_candidates",
+ );
+
+ // Insert a borrows of prefixes of places that are bound and are
+ // behind a dereference projection.
+ //
+ // These borrows are taken to avoid situations like the following:
+ //
+ // match x[10] {
+ // _ if { x = &[0]; false } => (),
+ // y => (), // Out of bounds array access!
+ // }
+ //
+ // match *x {
+ // // y is bound by reference in the guard and then by copy in the
+ // // arm, so y is 2 in the arm!
+ // y if { y == 1 && (x = &2) == () } => y,
+ // _ => 3,
+ // }
+ if let Some(fake_borrows) = fake_borrows {
+ for Binding { source, .. } in
+ matched_candidates.iter().flat_map(|candidate| &candidate.bindings)
+ {
+ if let Some(i) =
+ source.projection.iter().rposition(|elem| elem == ProjectionElem::Deref)
+ {
+ let proj_base = &source.projection[..i];
+
+ fake_borrows.insert(Place {
+ local: source.local,
+ projection: self.tcx.intern_place_elems(proj_base),
+ });
+ }
+ }
+ }
+
+ let fully_matched_with_guard = matched_candidates
+ .iter()
+ .position(|c| !c.has_guard)
+ .unwrap_or(matched_candidates.len() - 1);
+
+ let (reachable_candidates, unreachable_candidates) =
+ matched_candidates.split_at_mut(fully_matched_with_guard + 1);
+
+ let mut next_prebinding = start_block;
+
+ for candidate in reachable_candidates.iter_mut() {
+ assert!(candidate.otherwise_block.is_none());
+ assert!(candidate.pre_binding_block.is_none());
+ candidate.pre_binding_block = Some(next_prebinding);
+ if candidate.has_guard {
+ // Create the otherwise block for this candidate, which is the
+ // pre-binding block for the next candidate.
+ next_prebinding = self.cfg.start_new_block();
+ candidate.otherwise_block = Some(next_prebinding);
+ }
+ }
+
+ debug!(
+ "match_candidates: add pre_binding_blocks for unreachable {:?}",
+ unreachable_candidates,
+ );
+ for candidate in unreachable_candidates {
+ assert!(candidate.pre_binding_block.is_none());
+ candidate.pre_binding_block = Some(self.cfg.start_new_block());
+ }
+
+ reachable_candidates.last_mut().unwrap().otherwise_block
+ }
+
+ /// Tests a candidate where there are only or-patterns left to test, or
+ /// forwards to [Builder::test_candidates].
+ ///
+ /// Given a pattern `(P | Q, R | S)` we (in principle) generate a CFG like
+ /// so:
+ ///
+ /// ```text
+ /// [ start ]
+ /// |
+ /// [ match P, Q ]
+ /// |
+ /// +----------------------------------------+------------------------------------+
+ /// | | |
+ /// V V V
+ /// [ P matches ] [ Q matches ] [ otherwise ]
+ /// | | |
+ /// V V |
+ /// [ match R, S ] [ match R, S ] |
+ /// | | |
+ /// +--------------+------------+ +--------------+------------+ |
+ /// | | | | | | |
+ /// V V V V V V |
+ /// [ R matches ] [ S matches ] [otherwise ] [ R matches ] [ S matches ] [otherwise ] |
+ /// | | | | | | |
+ /// +--------------+------------|------------+--------------+ | |
+ /// | | | |
+ /// | +----------------------------------------+--------+
+ /// | |
+ /// V V
+ /// [ Success ] [ Failure ]
+ /// ```
+ ///
+ /// In practice there are some complications:
+ ///
+ /// * If there's a guard, then the otherwise branch of the first match on
+ /// `R | S` goes to a test for whether `Q` matches, and the control flow
+ /// doesn't merge into a single success block until after the guard is
+ /// tested.
+ /// * If neither `P` or `Q` has any bindings or type ascriptions and there
+ /// isn't a match guard, then we create a smaller CFG like:
+ ///
+ /// ```text
+ /// ...
+ /// +---------------+------------+
+ /// | | |
+ /// [ P matches ] [ Q matches ] [ otherwise ]
+ /// | | |
+ /// +---------------+ |
+ /// | ...
+ /// [ match R, S ]
+ /// |
+ /// ...
+ /// ```
+ fn test_candidates_with_or(
+ &mut self,
+ span: Span,
+ scrutinee_span: Span,
+ candidates: &mut [&mut Candidate<'_, 'tcx>],
+ block: BasicBlock,
+ otherwise_block: &mut Option<BasicBlock>,
+ fake_borrows: &mut Option<FxIndexSet<Place<'tcx>>>,
+ ) {
+ let (first_candidate, remaining_candidates) = candidates.split_first_mut().unwrap();
+
+ // All of the or-patterns have been sorted to the end, so if the first
+ // pattern is an or-pattern we only have or-patterns.
+ match *first_candidate.match_pairs[0].pattern.kind {
+ PatKind::Or { .. } => (),
+ _ => {
+ self.test_candidates(
+ span,
+ scrutinee_span,
+ candidates,
+ block,
+ otherwise_block,
+ fake_borrows,
+ );
+ return;
+ }
+ }
+
+ let match_pairs = mem::take(&mut first_candidate.match_pairs);
+ first_candidate.pre_binding_block = Some(block);
+
+ let mut otherwise = None;
+ for match_pair in match_pairs {
+ let PatKind::Or { ref pats } = &*match_pair.pattern.kind else {
+ bug!("Or-patterns should have been sorted to the end");
+ };
+ let or_span = match_pair.pattern.span;
+ let place = match_pair.place;
+
+ first_candidate.visit_leaves(|leaf_candidate| {
+ self.test_or_pattern(
+ leaf_candidate,
+ &mut otherwise,
+ pats,
+ or_span,
+ place.clone(),
+ fake_borrows,
+ );
+ });
+ }
+
+ let remainder_start = otherwise.unwrap_or_else(|| self.cfg.start_new_block());
+
+ self.match_candidates(
+ span,
+ scrutinee_span,
+ remainder_start,
+ otherwise_block,
+ remaining_candidates,
+ fake_borrows,
+ )
+ }
+
+ fn test_or_pattern<'pat>(
+ &mut self,
+ candidate: &mut Candidate<'pat, 'tcx>,
+ otherwise: &mut Option<BasicBlock>,
+ pats: &'pat [Pat<'tcx>],
+ or_span: Span,
+ place: PlaceBuilder<'tcx>,
+ fake_borrows: &mut Option<FxIndexSet<Place<'tcx>>>,
+ ) {
+ debug!("test_or_pattern:\ncandidate={:#?}\npats={:#?}", candidate, pats);
+ let mut or_candidates: Vec<_> = pats
+ .iter()
+ .map(|pat| Candidate::new(place.clone(), pat, candidate.has_guard))
+ .collect();
+ let mut or_candidate_refs: Vec<_> = or_candidates.iter_mut().collect();
+ let otherwise = if candidate.otherwise_block.is_some() {
+ &mut candidate.otherwise_block
+ } else {
+ otherwise
+ };
+ self.match_candidates(
+ or_span,
+ or_span,
+ candidate.pre_binding_block.unwrap(),
+ otherwise,
+ &mut or_candidate_refs,
+ fake_borrows,
+ );
+ candidate.subcandidates = or_candidates;
+ self.merge_trivial_subcandidates(candidate, self.source_info(or_span));
+ }
+
+ /// Try to merge all of the subcandidates of the given candidate into one.
+ /// This avoids exponentially large CFGs in cases like `(1 | 2, 3 | 4, ...)`.
+ fn merge_trivial_subcandidates(
+ &mut self,
+ candidate: &mut Candidate<'_, 'tcx>,
+ source_info: SourceInfo,
+ ) {
+ if candidate.subcandidates.is_empty() || candidate.has_guard {
+ // FIXME(or_patterns; matthewjasper) Don't give up if we have a guard.
+ return;
+ }
+
+ let mut can_merge = true;
+
+ // Not `Iterator::all` because we don't want to short-circuit.
+ for subcandidate in &mut candidate.subcandidates {
+ self.merge_trivial_subcandidates(subcandidate, source_info);
+
+ // FIXME(or_patterns; matthewjasper) Try to be more aggressive here.
+ can_merge &= subcandidate.subcandidates.is_empty()
+ && subcandidate.bindings.is_empty()
+ && subcandidate.ascriptions.is_empty();
+ }
+
+ if can_merge {
+ let any_matches = self.cfg.start_new_block();
+ for subcandidate in mem::take(&mut candidate.subcandidates) {
+ let or_block = subcandidate.pre_binding_block.unwrap();
+ self.cfg.goto(or_block, source_info, any_matches);
+ }
+ candidate.pre_binding_block = Some(any_matches);
+ }
+ }
+
+ /// This is the most subtle part of the matching algorithm. At
+ /// this point, the input candidates have been fully simplified,
+ /// and so we know that all remaining match-pairs require some
+ /// sort of test. To decide what test to perform, we take the highest
+ /// priority candidate (the first one in the list, as of January 2021)
+ /// and extract the first match-pair from the list. From this we decide
+ /// what kind of test is needed using [`Builder::test`], defined in the
+ /// [`test` module](mod@test).
+ ///
+ /// *Note:* taking the first match pair is somewhat arbitrary, and
+ /// we might do better here by choosing more carefully what to
+ /// test.
+ ///
+ /// For example, consider the following possible match-pairs:
+ ///
+ /// 1. `x @ Some(P)` -- we will do a [`Switch`] to decide what variant `x` has
+ /// 2. `x @ 22` -- we will do a [`SwitchInt`] to decide what value `x` has
+ /// 3. `x @ 3..5` -- we will do a [`Range`] test to decide what range `x` falls in
+ /// 4. etc.
+ ///
+ /// [`Switch`]: TestKind::Switch
+ /// [`SwitchInt`]: TestKind::SwitchInt
+ /// [`Range`]: TestKind::Range
+ ///
+ /// Once we know what sort of test we are going to perform, this
+ /// test may also help us winnow down our candidates. So we walk over
+ /// the candidates (from high to low priority) and check. This
+ /// gives us, for each outcome of the test, a transformed list of
+ /// candidates. For example, if we are testing `x.0`'s variant,
+ /// and we have a candidate `(x.0 @ Some(v), x.1 @ 22)`,
+ /// then we would have a resulting candidate of `((x.0 as Some).0 @ v, x.1 @ 22)`.
+ /// Note that the first match-pair is now simpler (and, in fact, irrefutable).
+ ///
+ /// But there may also be candidates that the test just doesn't
+ /// apply to. The classical example involves wildcards:
+ ///
+ /// ```
+ /// # let (x, y, z) = (true, true, true);
+ /// match (x, y, z) {
+ /// (true , _ , true ) => true, // (0)
+ /// (_ , true , _ ) => true, // (1)
+ /// (false, false, _ ) => false, // (2)
+ /// (true , _ , false) => false, // (3)
+ /// }
+ /// # ;
+ /// ```
+ ///
+ /// In that case, after we test on `x`, there are 2 overlapping candidate
+ /// sets:
+ ///
+ /// - If the outcome is that `x` is true, candidates 0, 1, and 3
+ /// - If the outcome is that `x` is false, candidates 1 and 2
+ ///
+ /// Here, the traditional "decision tree" method would generate 2
+ /// separate code-paths for the 2 separate cases.
+ ///
+ /// In some cases, this duplication can create an exponential amount of
+ /// code. This is most easily seen by noticing that this method terminates
+ /// with precisely the reachable arms being reachable - but that problem
+ /// is trivially NP-complete:
+ ///
+ /// ```ignore (illustrative)
+ /// match (var0, var1, var2, var3, ...) {
+ /// (true , _ , _ , false, true, ...) => false,
+ /// (_ , true, true , false, _ , ...) => false,
+ /// (false, _ , false, false, _ , ...) => false,
+ /// ...
+ /// _ => true
+ /// }
+ /// ```
+ ///
+ /// Here the last arm is reachable only if there is an assignment to
+ /// the variables that does not match any of the literals. Therefore,
+ /// compilation would take an exponential amount of time in some cases.
+ ///
+ /// That kind of exponential worst-case might not occur in practice, but
+ /// our simplistic treatment of constants and guards would make it occur
+ /// in very common situations - for example [#29740]:
+ ///
+ /// ```ignore (illustrative)
+ /// match x {
+ /// "foo" if foo_guard => ...,
+ /// "bar" if bar_guard => ...,
+ /// "baz" if baz_guard => ...,
+ /// ...
+ /// }
+ /// ```
+ ///
+ /// [#29740]: https://github.com/rust-lang/rust/issues/29740
+ ///
+ /// Here we first test the match-pair `x @ "foo"`, which is an [`Eq` test].
+ ///
+ /// [`Eq` test]: TestKind::Eq
+ ///
+ /// It might seem that we would end up with 2 disjoint candidate
+ /// sets, consisting of the first candidate or the other two, but our
+ /// algorithm doesn't reason about `"foo"` being distinct from the other
+ /// constants; it considers the latter arms to potentially match after
+ /// both outcomes, which obviously leads to an exponential number
+ /// of tests.
+ ///
+ /// To avoid these kinds of problems, our algorithm tries to ensure
+ /// the amount of generated tests is linear. When we do a k-way test,
+ /// we return an additional "unmatched" set alongside the obvious `k`
+ /// sets. When we encounter a candidate that would be present in more
+ /// than one of the sets, we put it and all candidates below it into the
+ /// "unmatched" set. This ensures these `k+1` sets are disjoint.
+ ///
+ /// After we perform our test, we branch into the appropriate candidate
+ /// set and recurse with `match_candidates`. These sub-matches are
+ /// obviously non-exhaustive - as we discarded our otherwise set - so
+ /// we set their continuation to do `match_candidates` on the
+ /// "unmatched" set (which is again non-exhaustive).
+ ///
+ /// If you apply this to the above test, you basically wind up
+ /// with an if-else-if chain, testing each candidate in turn,
+ /// which is precisely what we want.
+ ///
+ /// In addition to avoiding exponential-time blowups, this algorithm
+ /// also has the nice property that each guard and arm is only generated
+ /// once.
+ fn test_candidates<'pat, 'b, 'c>(
+ &mut self,
+ span: Span,
+ scrutinee_span: Span,
+ mut candidates: &'b mut [&'c mut Candidate<'pat, 'tcx>],
+ block: BasicBlock,
+ otherwise_block: &mut Option<BasicBlock>,
+ fake_borrows: &mut Option<FxIndexSet<Place<'tcx>>>,
+ ) {
+ // extract the match-pair from the highest priority candidate
+ let match_pair = &candidates.first().unwrap().match_pairs[0];
+ let mut test = self.test(match_pair);
+ let match_place = match_pair.place.clone();
+
+ // most of the time, the test to perform is simply a function
+ // of the main candidate; but for a test like SwitchInt, we
+ // may want to add cases based on the candidates that are
+ // available
+ match test.kind {
+ TestKind::SwitchInt { switch_ty, ref mut options } => {
+ for candidate in candidates.iter() {
+ if !self.add_cases_to_switch(&match_place, candidate, switch_ty, options) {
+ break;
+ }
+ }
+ }
+ TestKind::Switch { adt_def: _, ref mut variants } => {
+ for candidate in candidates.iter() {
+ if !self.add_variants_to_switch(&match_place, candidate, variants) {
+ break;
+ }
+ }
+ }
+ _ => {}
+ }
+
+ // Insert a Shallow borrow of any places that is switched on.
+ if let Some(fb) = fake_borrows && let Ok(match_place_resolved) =
+ match_place.clone().try_upvars_resolved(self.tcx, self.typeck_results)
+ {
+ let resolved_place = match_place_resolved.into_place(self.tcx, self.typeck_results);
+ fb.insert(resolved_place);
+ }
+
+ // perform the test, branching to one of N blocks. For each of
+ // those N possible outcomes, create a (initially empty)
+ // vector of candidates. Those are the candidates that still
+ // apply if the test has that particular outcome.
+ debug!("test_candidates: test={:?} match_pair={:?}", test, match_pair);
+ let mut target_candidates: Vec<Vec<&mut Candidate<'pat, 'tcx>>> = vec![];
+ target_candidates.resize_with(test.targets(), Default::default);
+
+ let total_candidate_count = candidates.len();
+
+ // Sort the candidates into the appropriate vector in
+ // `target_candidates`. Note that at some point we may
+ // encounter a candidate where the test is not relevant; at
+ // that point, we stop sorting.
+ while let Some(candidate) = candidates.first_mut() {
+ let Some(idx) = self.sort_candidate(&match_place.clone(), &test, candidate) else {
+ break;
+ };
+ let (candidate, rest) = candidates.split_first_mut().unwrap();
+ target_candidates[idx].push(candidate);
+ candidates = rest;
+ }
+ // at least the first candidate ought to be tested
+ assert!(total_candidate_count > candidates.len());
+ debug!("test_candidates: tested_candidates: {}", total_candidate_count - candidates.len());
+ debug!("test_candidates: untested_candidates: {}", candidates.len());
+
+ // HACK(matthewjasper) This is a closure so that we can let the test
+ // create its blocks before the rest of the match. This currently
+ // improves the speed of llvm when optimizing long string literal
+ // matches
+ let make_target_blocks = move |this: &mut Self| -> Vec<BasicBlock> {
+ // The block that we should branch to if none of the
+ // `target_candidates` match. This is either the block where we
+ // start matching the untested candidates if there are any,
+ // otherwise it's the `otherwise_block`.
+ let remainder_start = &mut None;
+ let remainder_start =
+ if candidates.is_empty() { &mut *otherwise_block } else { remainder_start };
+
+ // For each outcome of test, process the candidates that still
+ // apply. Collect a list of blocks where control flow will
+ // branch if one of the `target_candidate` sets is not
+ // exhaustive.
+ let target_blocks: Vec<_> = target_candidates
+ .into_iter()
+ .map(|mut candidates| {
+ if !candidates.is_empty() {
+ let candidate_start = this.cfg.start_new_block();
+ this.match_candidates(
+ span,
+ scrutinee_span,
+ candidate_start,
+ remainder_start,
+ &mut *candidates,
+ fake_borrows,
+ );
+ candidate_start
+ } else {
+ *remainder_start.get_or_insert_with(|| this.cfg.start_new_block())
+ }
+ })
+ .collect();
+
+ if !candidates.is_empty() {
+ let remainder_start = remainder_start.unwrap_or_else(|| this.cfg.start_new_block());
+ this.match_candidates(
+ span,
+ scrutinee_span,
+ remainder_start,
+ otherwise_block,
+ candidates,
+ fake_borrows,
+ );
+ };
+
+ target_blocks
+ };
+
+ self.perform_test(span, scrutinee_span, block, match_place, &test, make_target_blocks);
+ }
+
+ /// Determine the fake borrows that are needed from a set of places that
+ /// have to be stable across match guards.
+ ///
+ /// Returns a list of places that need a fake borrow and the temporary
+ /// that's used to store the fake borrow.
+ ///
+ /// Match exhaustiveness checking is not able to handle the case where the
+ /// place being matched on is mutated in the guards. We add "fake borrows"
+ /// to the guards that prevent any mutation of the place being matched.
+ /// There are a some subtleties:
+ ///
+ /// 1. Borrowing `*x` doesn't prevent assigning to `x`. If `x` is a shared
+ /// reference, the borrow isn't even tracked. As such we have to add fake
+ /// borrows of any prefixes of a place
+ /// 2. We don't want `match x { _ => (), }` to conflict with mutable
+ /// borrows of `x`, so we only add fake borrows for places which are
+ /// bound or tested by the match.
+ /// 3. We don't want the fake borrows to conflict with `ref mut` bindings,
+ /// so we use a special BorrowKind for them.
+ /// 4. The fake borrows may be of places in inactive variants, so it would
+ /// be UB to generate code for them. They therefore have to be removed
+ /// by a MIR pass run after borrow checking.
+ fn calculate_fake_borrows<'b>(
+ &mut self,
+ fake_borrows: &'b FxIndexSet<Place<'tcx>>,
+ temp_span: Span,
+ ) -> Vec<(Place<'tcx>, Local)> {
+ let tcx = self.tcx;
+
+ debug!("add_fake_borrows fake_borrows = {:?}", fake_borrows);
+
+ let mut all_fake_borrows = Vec::with_capacity(fake_borrows.len());
+
+ // Insert a Shallow borrow of the prefixes of any fake borrows.
+ for place in fake_borrows {
+ let mut cursor = place.projection.as_ref();
+ while let [proj_base @ .., elem] = cursor {
+ cursor = proj_base;
+
+ if let ProjectionElem::Deref = elem {
+ // Insert a shallow borrow after a deref. For other
+ // projections the borrow of prefix_cursor will
+ // conflict with any mutation of base.
+ all_fake_borrows.push(PlaceRef { local: place.local, projection: proj_base });
+ }
+ }
+
+ all_fake_borrows.push(place.as_ref());
+ }
+
+ // Deduplicate
+ let mut dedup = FxHashSet::default();
+ all_fake_borrows.retain(|b| dedup.insert(*b));
+
+ debug!("add_fake_borrows all_fake_borrows = {:?}", all_fake_borrows);
+
+ all_fake_borrows
+ .into_iter()
+ .map(|matched_place_ref| {
+ let matched_place = Place {
+ local: matched_place_ref.local,
+ projection: tcx.intern_place_elems(matched_place_ref.projection),
+ };
+ let fake_borrow_deref_ty = matched_place.ty(&self.local_decls, tcx).ty;
+ let fake_borrow_ty = tcx.mk_imm_ref(tcx.lifetimes.re_erased, fake_borrow_deref_ty);
+ let fake_borrow_temp =
+ self.local_decls.push(LocalDecl::new(fake_borrow_ty, temp_span));
+
+ (matched_place, fake_borrow_temp)
+ })
+ .collect()
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////
+// Pat binding - used for `let` and function parameters as well.
+
+impl<'a, 'tcx> Builder<'a, 'tcx> {
+ pub(crate) fn lower_let_expr(
+ &mut self,
+ mut block: BasicBlock,
+ expr: &Expr<'tcx>,
+ pat: &Pat<'tcx>,
+ else_target: region::Scope,
+ source_scope: Option<SourceScope>,
+ span: Span,
+ ) -> BlockAnd<()> {
+ let expr_span = expr.span;
+ let expr_place_builder = unpack!(block = self.lower_scrutinee(block, expr, expr_span));
+ let wildcard = Pat::wildcard_from_ty(pat.ty);
+ let mut guard_candidate = Candidate::new(expr_place_builder.clone(), &pat, false);
+ let mut otherwise_candidate = Candidate::new(expr_place_builder.clone(), &wildcard, false);
+ let fake_borrow_temps = self.lower_match_tree(
+ block,
+ pat.span,
+ pat.span,
+ false,
+ &mut [&mut guard_candidate, &mut otherwise_candidate],
+ );
+ let mut opt_expr_place: Option<(Option<&Place<'tcx>>, Span)> = None;
+ let expr_place: Place<'tcx>;
+ if let Ok(expr_builder) =
+ expr_place_builder.try_upvars_resolved(self.tcx, self.typeck_results)
+ {
+ expr_place = expr_builder.into_place(self.tcx, self.typeck_results);
+ opt_expr_place = Some((Some(&expr_place), expr_span));
+ }
+ let otherwise_post_guard_block = otherwise_candidate.pre_binding_block.unwrap();
+ self.break_for_else(otherwise_post_guard_block, else_target, self.source_info(expr_span));
+
+ self.declare_bindings(
+ source_scope,
+ pat.span.to(span),
+ pat,
+ ArmHasGuard(false),
+ opt_expr_place,
+ );
+
+ let post_guard_block = self.bind_pattern(
+ self.source_info(pat.span),
+ guard_candidate,
+ None,
+ &fake_borrow_temps,
+ expr.span,
+ None,
+ None,
+ None,
+ );
+
+ post_guard_block.unit()
+ }
+
+ /// Initializes each of the bindings from the candidate by
+ /// moving/copying/ref'ing the source as appropriate. Tests the guard, if
+ /// any, and then branches to the arm. Returns the block for the case where
+ /// the guard succeeds.
+ ///
+ /// Note: we do not check earlier that if there is a guard,
+ /// there cannot be move bindings. We avoid a use-after-move by only
+ /// moving the binding once the guard has evaluated to true (see below).
+ fn bind_and_guard_matched_candidate<'pat>(
+ &mut self,
+ candidate: Candidate<'pat, 'tcx>,
+ parent_bindings: &[(Vec<Binding<'tcx>>, Vec<Ascription<'tcx>>)],
+ guard: Option<&Guard<'tcx>>,
+ fake_borrows: &[(Place<'tcx>, Local)],
+ scrutinee_span: Span,
+ arm_span: Option<Span>,
+ match_scope: Option<region::Scope>,
+ schedule_drops: bool,
+ ) -> BasicBlock {
+ debug!("bind_and_guard_matched_candidate(candidate={:?})", candidate);
+
+ debug_assert!(candidate.match_pairs.is_empty());
+
+ let candidate_source_info = self.source_info(candidate.span);
+
+ let mut block = candidate.pre_binding_block.unwrap();
+
+ if candidate.next_candidate_pre_binding_block.is_some() {
+ let fresh_block = self.cfg.start_new_block();
+ self.false_edges(
+ block,
+ fresh_block,
+ candidate.next_candidate_pre_binding_block,
+ candidate_source_info,
+ );
+ block = fresh_block;
+ }
+
+ self.ascribe_types(
+ block,
+ parent_bindings
+ .iter()
+ .flat_map(|(_, ascriptions)| ascriptions)
+ .cloned()
+ .chain(candidate.ascriptions),
+ );
+
+ // rust-lang/rust#27282: The `autoref` business deserves some
+ // explanation here.
+ //
+ // The intent of the `autoref` flag is that when it is true,
+ // then any pattern bindings of type T will map to a `&T`
+ // within the context of the guard expression, but will
+ // continue to map to a `T` in the context of the arm body. To
+ // avoid surfacing this distinction in the user source code
+ // (which would be a severe change to the language and require
+ // far more revision to the compiler), when `autoref` is true,
+ // then any occurrence of the identifier in the guard
+ // expression will automatically get a deref op applied to it.
+ //
+ // So an input like:
+ //
+ // ```
+ // let place = Foo::new();
+ // match place { foo if inspect(foo)
+ // => feed(foo), ... }
+ // ```
+ //
+ // will be treated as if it were really something like:
+ //
+ // ```
+ // let place = Foo::new();
+ // match place { Foo { .. } if { let tmp1 = &place; inspect(*tmp1) }
+ // => { let tmp2 = place; feed(tmp2) }, ... }
+ //
+ // And an input like:
+ //
+ // ```
+ // let place = Foo::new();
+ // match place { ref mut foo if inspect(foo)
+ // => feed(foo), ... }
+ // ```
+ //
+ // will be treated as if it were really something like:
+ //
+ // ```
+ // let place = Foo::new();
+ // match place { Foo { .. } if { let tmp1 = & &mut place; inspect(*tmp1) }
+ // => { let tmp2 = &mut place; feed(tmp2) }, ... }
+ // ```
+ //
+ // In short, any pattern binding will always look like *some*
+ // kind of `&T` within the guard at least in terms of how the
+ // MIR-borrowck views it, and this will ensure that guard
+ // expressions cannot mutate their the match inputs via such
+ // bindings. (It also ensures that guard expressions can at
+ // most *copy* values from such bindings; non-Copy things
+ // cannot be moved via pattern bindings in guard expressions.)
+ //
+ // ----
+ //
+ // Implementation notes (under assumption `autoref` is true).
+ //
+ // To encode the distinction above, we must inject the
+ // temporaries `tmp1` and `tmp2`.
+ //
+ // There are two cases of interest: binding by-value, and binding by-ref.
+ //
+ // 1. Binding by-value: Things are simple.
+ //
+ // * Establishing `tmp1` creates a reference into the
+ // matched place. This code is emitted by
+ // bind_matched_candidate_for_guard.
+ //
+ // * `tmp2` is only initialized "lazily", after we have
+ // checked the guard. Thus, the code that can trigger
+ // moves out of the candidate can only fire after the
+ // guard evaluated to true. This initialization code is
+ // emitted by bind_matched_candidate_for_arm.
+ //
+ // 2. Binding by-reference: Things are tricky.
+ //
+ // * Here, the guard expression wants a `&&` or `&&mut`
+ // into the original input. This means we need to borrow
+ // the reference that we create for the arm.
+ // * So we eagerly create the reference for the arm and then take a
+ // reference to that.
+ if let Some(guard) = guard {
+ let tcx = self.tcx;
+ let bindings = parent_bindings
+ .iter()
+ .flat_map(|(bindings, _)| bindings)
+ .chain(&candidate.bindings);
+
+ self.bind_matched_candidate_for_guard(block, schedule_drops, bindings.clone());
+ let guard_frame = GuardFrame {
+ locals: bindings.map(|b| GuardFrameLocal::new(b.var_id, b.binding_mode)).collect(),
+ };
+ debug!("entering guard building context: {:?}", guard_frame);
+ self.guard_context.push(guard_frame);
+
+ let re_erased = tcx.lifetimes.re_erased;
+ let scrutinee_source_info = self.source_info(scrutinee_span);
+ for &(place, temp) in fake_borrows {
+ let borrow = Rvalue::Ref(re_erased, BorrowKind::Shallow, place);
+ self.cfg.push_assign(block, scrutinee_source_info, Place::from(temp), borrow);
+ }
+
+ let arm_span = arm_span.unwrap();
+ let match_scope = match_scope.unwrap();
+ let mut guard_span = rustc_span::DUMMY_SP;
+
+ let (post_guard_block, otherwise_post_guard_block) =
+ self.in_if_then_scope(match_scope, |this| match *guard {
+ Guard::If(e) => {
+ let e = &this.thir[e];
+ guard_span = e.span;
+ this.then_else_break(
+ block,
+ e,
+ None,
+ match_scope,
+ this.source_info(arm_span),
+ )
+ }
+ Guard::IfLet(ref pat, scrutinee) => {
+ let s = &this.thir[scrutinee];
+ guard_span = s.span;
+ this.lower_let_expr(block, s, pat, match_scope, None, arm_span)
+ }
+ });
+
+ let source_info = self.source_info(guard_span);
+ let guard_end = self.source_info(tcx.sess.source_map().end_point(guard_span));
+ let guard_frame = self.guard_context.pop().unwrap();
+ debug!("Exiting guard building context with locals: {:?}", guard_frame);
+
+ for &(_, temp) in fake_borrows {
+ let cause = FakeReadCause::ForMatchGuard;
+ self.cfg.push_fake_read(post_guard_block, guard_end, cause, Place::from(temp));
+ }
+
+ let otherwise_block = candidate.otherwise_block.unwrap_or_else(|| {
+ let unreachable = self.cfg.start_new_block();
+ self.cfg.terminate(unreachable, source_info, TerminatorKind::Unreachable);
+ unreachable
+ });
+ self.false_edges(
+ otherwise_post_guard_block,
+ otherwise_block,
+ candidate.next_candidate_pre_binding_block,
+ source_info,
+ );
+
+ // We want to ensure that the matched candidates are bound
+ // after we have confirmed this candidate *and* any
+ // associated guard; Binding them on `block` is too soon,
+ // because that would be before we've checked the result
+ // from the guard.
+ //
+ // But binding them on the arm is *too late*, because
+ // then all of the candidates for a single arm would be
+ // bound in the same place, that would cause a case like:
+ //
+ // ```rust
+ // match (30, 2) {
+ // (mut x, 1) | (2, mut x) if { true } => { ... }
+ // ... // ^^^^^^^ (this is `arm_block`)
+ // }
+ // ```
+ //
+ // would yield an `arm_block` something like:
+ //
+ // ```
+ // StorageLive(_4); // _4 is `x`
+ // _4 = &mut (_1.0: i32); // this is handling `(mut x, 1)` case
+ // _4 = &mut (_1.1: i32); // this is handling `(2, mut x)` case
+ // ```
+ //
+ // and that is clearly not correct.
+ let by_value_bindings = parent_bindings
+ .iter()
+ .flat_map(|(bindings, _)| bindings)
+ .chain(&candidate.bindings)
+ .filter(|binding| matches!(binding.binding_mode, BindingMode::ByValue));
+ // Read all of the by reference bindings to ensure that the
+ // place they refer to can't be modified by the guard.
+ for binding in by_value_bindings.clone() {
+ let local_id = self.var_local_id(binding.var_id, RefWithinGuard);
+ let cause = FakeReadCause::ForGuardBinding;
+ self.cfg.push_fake_read(post_guard_block, guard_end, cause, Place::from(local_id));
+ }
+ assert!(schedule_drops, "patterns with guards must schedule drops");
+ self.bind_matched_candidate_for_arm_body(post_guard_block, true, by_value_bindings);
+
+ post_guard_block
+ } else {
+ // (Here, it is not too early to bind the matched
+ // candidate on `block`, because there is no guard result
+ // that we have to inspect before we bind them.)
+ self.bind_matched_candidate_for_arm_body(
+ block,
+ schedule_drops,
+ parent_bindings
+ .iter()
+ .flat_map(|(bindings, _)| bindings)
+ .chain(&candidate.bindings),
+ );
+ block
+ }
+ }
+
+ /// Append `AscribeUserType` statements onto the end of `block`
+ /// for each ascription
+ fn ascribe_types(
+ &mut self,
+ block: BasicBlock,
+ ascriptions: impl IntoIterator<Item = Ascription<'tcx>>,
+ ) {
+ for ascription in ascriptions {
+ let source_info = self.source_info(ascription.annotation.span);
+
+ let base = self.canonical_user_type_annotations.push(ascription.annotation);
+ self.cfg.push(
+ block,
+ Statement {
+ source_info,
+ kind: StatementKind::AscribeUserType(
+ Box::new((
+ ascription.source,
+ UserTypeProjection { base, projs: Vec::new() },
+ )),
+ ascription.variance,
+ ),
+ },
+ );
+ }
+ }
+
+ fn bind_matched_candidate_for_guard<'b>(
+ &mut self,
+ block: BasicBlock,
+ schedule_drops: bool,
+ bindings: impl IntoIterator<Item = &'b Binding<'tcx>>,
+ ) where
+ 'tcx: 'b,
+ {
+ debug!("bind_matched_candidate_for_guard(block={:?})", block);
+
+ // Assign each of the bindings. Since we are binding for a
+ // guard expression, this will never trigger moves out of the
+ // candidate.
+ let re_erased = self.tcx.lifetimes.re_erased;
+ for binding in bindings {
+ debug!("bind_matched_candidate_for_guard(binding={:?})", binding);
+ let source_info = self.source_info(binding.span);
+
+ // For each pattern ident P of type T, `ref_for_guard` is
+ // a reference R: &T pointing to the location matched by
+ // the pattern, and every occurrence of P within a guard
+ // denotes *R.
+ let ref_for_guard = self.storage_live_binding(
+ block,
+ binding.var_id,
+ binding.span,
+ RefWithinGuard,
+ schedule_drops,
+ );
+ match binding.binding_mode {
+ BindingMode::ByValue => {
+ let rvalue = Rvalue::Ref(re_erased, BorrowKind::Shared, binding.source);
+ self.cfg.push_assign(block, source_info, ref_for_guard, rvalue);
+ }
+ BindingMode::ByRef(borrow_kind) => {
+ let value_for_arm = self.storage_live_binding(
+ block,
+ binding.var_id,
+ binding.span,
+ OutsideGuard,
+ schedule_drops,
+ );
+
+ let rvalue = Rvalue::Ref(re_erased, borrow_kind, binding.source);
+ self.cfg.push_assign(block, source_info, value_for_arm, rvalue);
+ let rvalue = Rvalue::Ref(re_erased, BorrowKind::Shared, value_for_arm);
+ self.cfg.push_assign(block, source_info, ref_for_guard, rvalue);
+ }
+ }
+ }
+ }
+
+ fn bind_matched_candidate_for_arm_body<'b>(
+ &mut self,
+ block: BasicBlock,
+ schedule_drops: bool,
+ bindings: impl IntoIterator<Item = &'b Binding<'tcx>>,
+ ) where
+ 'tcx: 'b,
+ {
+ debug!("bind_matched_candidate_for_arm_body(block={:?})", block);
+
+ let re_erased = self.tcx.lifetimes.re_erased;
+ // Assign each of the bindings. This may trigger moves out of the candidate.
+ for binding in bindings {
+ let source_info = self.source_info(binding.span);
+ let local = self.storage_live_binding(
+ block,
+ binding.var_id,
+ binding.span,
+ OutsideGuard,
+ schedule_drops,
+ );
+ if schedule_drops {
+ self.schedule_drop_for_binding(binding.var_id, binding.span, OutsideGuard);
+ }
+ let rvalue = match binding.binding_mode {
+ BindingMode::ByValue => Rvalue::Use(self.consume_by_copy_or_move(binding.source)),
+ BindingMode::ByRef(borrow_kind) => {
+ Rvalue::Ref(re_erased, borrow_kind, binding.source)
+ }
+ };
+ self.cfg.push_assign(block, source_info, local, rvalue);
+ }
+ }
+
+ /// Each binding (`ref mut var`/`ref var`/`mut var`/`var`, where the bound
+ /// `var` has type `T` in the arm body) in a pattern maps to 2 locals. The
+ /// first local is a binding for occurrences of `var` in the guard, which
+ /// will have type `&T`. The second local is a binding for occurrences of
+ /// `var` in the arm body, which will have type `T`.
+ fn declare_binding(
+ &mut self,
+ source_info: SourceInfo,
+ visibility_scope: SourceScope,
+ mutability: Mutability,
+ name: Symbol,
+ mode: BindingMode,
+ var_id: LocalVarId,
+ var_ty: Ty<'tcx>,
+ user_ty: UserTypeProjections,
+ has_guard: ArmHasGuard,
+ opt_match_place: Option<(Option<Place<'tcx>>, Span)>,
+ pat_span: Span,
+ ) {
+ debug!(
+ "declare_binding(var_id={:?}, name={:?}, mode={:?}, var_ty={:?}, \
+ visibility_scope={:?}, source_info={:?})",
+ var_id, name, mode, var_ty, visibility_scope, source_info
+ );
+
+ let tcx = self.tcx;
+ let debug_source_info = SourceInfo { span: source_info.span, scope: visibility_scope };
+ let binding_mode = match mode {
+ BindingMode::ByValue => ty::BindingMode::BindByValue(mutability),
+ BindingMode::ByRef(_) => ty::BindingMode::BindByReference(mutability),
+ };
+ debug!("declare_binding: user_ty={:?}", user_ty);
+ let local = LocalDecl::<'tcx> {
+ mutability,
+ ty: var_ty,
+ user_ty: if user_ty.is_empty() { None } else { Some(Box::new(user_ty)) },
+ source_info,
+ internal: false,
+ is_block_tail: None,
+ local_info: Some(Box::new(LocalInfo::User(ClearCrossCrate::Set(BindingForm::Var(
+ VarBindingForm {
+ binding_mode,
+ // hypothetically, `visit_primary_bindings` could try to unzip
+ // an outermost hir::Ty as we descend, matching up
+ // idents in pat; but complex w/ unclear UI payoff.
+ // Instead, just abandon providing diagnostic info.
+ opt_ty_info: None,
+ opt_match_place,
+ pat_span,
+ },
+ ))))),
+ };
+ let for_arm_body = self.local_decls.push(local);
+ self.var_debug_info.push(VarDebugInfo {
+ name,
+ source_info: debug_source_info,
+ value: VarDebugInfoContents::Place(for_arm_body.into()),
+ });
+ let locals = if has_guard.0 {
+ let ref_for_guard = self.local_decls.push(LocalDecl::<'tcx> {
+ // This variable isn't mutated but has a name, so has to be
+ // immutable to avoid the unused mut lint.
+ mutability: Mutability::Not,
+ ty: tcx.mk_imm_ref(tcx.lifetimes.re_erased, var_ty),
+ user_ty: None,
+ source_info,
+ internal: false,
+ is_block_tail: None,
+ local_info: Some(Box::new(LocalInfo::User(ClearCrossCrate::Set(
+ BindingForm::RefForGuard,
+ )))),
+ });
+ self.var_debug_info.push(VarDebugInfo {
+ name,
+ source_info: debug_source_info,
+ value: VarDebugInfoContents::Place(ref_for_guard.into()),
+ });
+ LocalsForNode::ForGuard { ref_for_guard, for_arm_body }
+ } else {
+ LocalsForNode::One(for_arm_body)
+ };
+ debug!("declare_binding: vars={:?}", locals);
+ self.var_indices.insert(var_id, locals);
+ }
+
+ pub(crate) fn ast_let_else(
+ &mut self,
+ mut block: BasicBlock,
+ init: &Expr<'tcx>,
+ initializer_span: Span,
+ else_block: &Block,
+ visibility_scope: Option<SourceScope>,
+ remainder_scope: region::Scope,
+ remainder_span: Span,
+ pattern: &Pat<'tcx>,
+ ) -> BlockAnd<()> {
+ let (matching, failure) = self.in_if_then_scope(remainder_scope, |this| {
+ let scrutinee = unpack!(block = this.lower_scrutinee(block, init, initializer_span));
+ let pat = Pat { ty: init.ty, span: else_block.span, kind: Box::new(PatKind::Wild) };
+ let mut wildcard = Candidate::new(scrutinee.clone(), &pat, false);
+ this.declare_bindings(
+ visibility_scope,
+ remainder_span,
+ pattern,
+ ArmHasGuard(false),
+ Some((None, initializer_span)),
+ );
+ let mut candidate = Candidate::new(scrutinee.clone(), pattern, false);
+ let fake_borrow_temps = this.lower_match_tree(
+ block,
+ initializer_span,
+ pattern.span,
+ false,
+ &mut [&mut candidate, &mut wildcard],
+ );
+ // This block is for the matching case
+ let matching = this.bind_pattern(
+ this.source_info(pattern.span),
+ candidate,
+ None,
+ &fake_borrow_temps,
+ initializer_span,
+ None,
+ None,
+ None,
+ );
+ // This block is for the failure case
+ let failure = this.bind_pattern(
+ this.source_info(else_block.span),
+ wildcard,
+ None,
+ &fake_borrow_temps,
+ initializer_span,
+ None,
+ None,
+ None,
+ );
+ this.break_for_else(failure, remainder_scope, this.source_info(initializer_span));
+ matching.unit()
+ });
+
+ // This place is not really used because this destination place
+ // should never be used to take values at the end of the failure
+ // block.
+ let dummy_place = Place { local: RETURN_PLACE, projection: ty::List::empty() };
+ let failure_block;
+ unpack!(
+ failure_block = self.ast_block(
+ dummy_place,
+ failure,
+ else_block,
+ self.source_info(else_block.span),
+ )
+ );
+ self.cfg.terminate(
+ failure_block,
+ self.source_info(else_block.span),
+ TerminatorKind::Unreachable,
+ );
+ matching.unit()
+ }
+}
diff --git a/compiler/rustc_mir_build/src/build/matches/simplify.rs b/compiler/rustc_mir_build/src/build/matches/simplify.rs
new file mode 100644
index 000000000..c62989041
--- /dev/null
+++ b/compiler/rustc_mir_build/src/build/matches/simplify.rs
@@ -0,0 +1,318 @@
+//! Simplifying Candidates
+//!
+//! *Simplifying* a match pair `place @ pattern` means breaking it down
+//! into bindings or other, simpler match pairs. For example:
+//!
+//! - `place @ (P1, P2)` can be simplified to `[place.0 @ P1, place.1 @ P2]`
+//! - `place @ x` can be simplified to `[]` by binding `x` to `place`
+//!
+//! The `simplify_candidate` routine just repeatedly applies these
+//! sort of simplifications until there is nothing left to
+//! simplify. Match pairs cannot be simplified if they require some
+//! sort of test: for example, testing which variant an enum is, or
+//! testing a value against a constant.
+
+use crate::build::expr::as_place::PlaceBuilder;
+use crate::build::matches::{Ascription, Binding, Candidate, MatchPair};
+use crate::build::Builder;
+use rustc_hir::RangeEnd;
+use rustc_middle::thir::{self, *};
+use rustc_middle::ty;
+use rustc_middle::ty::layout::IntegerExt;
+use rustc_target::abi::{Integer, Size};
+
+use std::mem;
+
+impl<'a, 'tcx> Builder<'a, 'tcx> {
+ /// Simplify a candidate so that all match pairs require a test.
+ ///
+ /// This method will also split a candidate, in which the only
+ /// match-pair is an or-pattern, into multiple candidates.
+ /// This is so that
+ ///
+ /// match x {
+ /// 0 | 1 => { ... },
+ /// 2 | 3 => { ... },
+ /// }
+ ///
+ /// only generates a single switch. If this happens this method returns
+ /// `true`.
+ pub(super) fn simplify_candidate<'pat>(
+ &mut self,
+ candidate: &mut Candidate<'pat, 'tcx>,
+ ) -> bool {
+ // repeatedly simplify match pairs until fixed point is reached
+ debug!(?candidate, "simplify_candidate");
+
+ // existing_bindings and new_bindings exists to keep the semantics in order.
+ // Reversing the binding order for bindings after `@` changes the binding order in places
+ // it shouldn't be changed, for example `let (Some(a), Some(b)) = (x, y)`
+ //
+ // To avoid this, the binding occurs in the following manner:
+ // * the bindings for one iteration of the following loop occurs in order (i.e. left to
+ // right)
+ // * the bindings from the previous iteration of the loop is prepended to the bindings from
+ // the current iteration (in the implementation this is done by mem::swap and extend)
+ // * after all iterations, these new bindings are then appended to the bindings that were
+ // preexisting (i.e. `candidate.binding` when the function was called).
+ //
+ // example:
+ // candidate.bindings = [1, 2, 3]
+ // binding in iter 1: [4, 5]
+ // binding in iter 2: [6, 7]
+ //
+ // final binding: [1, 2, 3, 6, 7, 4, 5]
+ let mut existing_bindings = mem::take(&mut candidate.bindings);
+ let mut new_bindings = Vec::new();
+ loop {
+ let match_pairs = mem::take(&mut candidate.match_pairs);
+
+ if let [MatchPair { pattern: Pat { kind: box PatKind::Or { pats }, .. }, place }] =
+ &*match_pairs
+ {
+ existing_bindings.extend_from_slice(&new_bindings);
+ mem::swap(&mut candidate.bindings, &mut existing_bindings);
+ candidate.subcandidates =
+ self.create_or_subcandidates(candidate, place.clone(), pats);
+ return true;
+ }
+
+ let mut changed = false;
+ for match_pair in match_pairs {
+ match self.simplify_match_pair(match_pair, candidate) {
+ Ok(()) => {
+ changed = true;
+ }
+ Err(match_pair) => {
+ candidate.match_pairs.push(match_pair);
+ }
+ }
+ }
+ // Avoid issue #69971: the binding order should be right to left if there are more
+ // bindings after `@` to please the borrow checker
+ // Ex
+ // struct NonCopyStruct {
+ // copy_field: u32,
+ // }
+ //
+ // fn foo1(x: NonCopyStruct) {
+ // let y @ NonCopyStruct { copy_field: z } = x;
+ // // the above should turn into
+ // let z = x.copy_field;
+ // let y = x;
+ // }
+ candidate.bindings.extend_from_slice(&new_bindings);
+ mem::swap(&mut candidate.bindings, &mut new_bindings);
+ candidate.bindings.clear();
+
+ if !changed {
+ existing_bindings.extend_from_slice(&new_bindings);
+ mem::swap(&mut candidate.bindings, &mut existing_bindings);
+ // Move or-patterns to the end, because they can result in us
+ // creating additional candidates, so we want to test them as
+ // late as possible.
+ candidate
+ .match_pairs
+ .sort_by_key(|pair| matches!(*pair.pattern.kind, PatKind::Or { .. }));
+ debug!(simplified = ?candidate, "simplify_candidate");
+ return false; // if we were not able to simplify any, done.
+ }
+ }
+ }
+
+ /// Given `candidate` that has a single or-pattern for its match-pairs,
+ /// creates a fresh candidate for each of its input subpatterns passed via
+ /// `pats`.
+ fn create_or_subcandidates<'pat>(
+ &mut self,
+ candidate: &Candidate<'pat, 'tcx>,
+ place: PlaceBuilder<'tcx>,
+ pats: &'pat [Pat<'tcx>],
+ ) -> Vec<Candidate<'pat, 'tcx>> {
+ pats.iter()
+ .map(|pat| {
+ let mut candidate = Candidate::new(place.clone(), pat, candidate.has_guard);
+ self.simplify_candidate(&mut candidate);
+ candidate
+ })
+ .collect()
+ }
+
+ /// Tries to simplify `match_pair`, returning `Ok(())` if
+ /// successful. If successful, new match pairs and bindings will
+ /// have been pushed into the candidate. If no simplification is
+ /// possible, `Err` is returned and no changes are made to
+ /// candidate.
+ fn simplify_match_pair<'pat>(
+ &mut self,
+ match_pair: MatchPair<'pat, 'tcx>,
+ candidate: &mut Candidate<'pat, 'tcx>,
+ ) -> Result<(), MatchPair<'pat, 'tcx>> {
+ let tcx = self.tcx;
+ match *match_pair.pattern.kind {
+ PatKind::AscribeUserType {
+ ref subpattern,
+ ascription: thir::Ascription { ref annotation, variance },
+ } => {
+ // Apply the type ascription to the value at `match_pair.place`, which is the
+ if let Ok(place_resolved) =
+ match_pair.place.clone().try_upvars_resolved(self.tcx, self.typeck_results)
+ {
+ candidate.ascriptions.push(Ascription {
+ annotation: annotation.clone(),
+ source: place_resolved.into_place(self.tcx, self.typeck_results),
+ variance,
+ });
+ }
+
+ candidate.match_pairs.push(MatchPair::new(match_pair.place, subpattern));
+
+ Ok(())
+ }
+
+ PatKind::Wild => {
+ // nothing left to do
+ Ok(())
+ }
+
+ PatKind::Binding {
+ name: _,
+ mutability: _,
+ mode,
+ var,
+ ty: _,
+ ref subpattern,
+ is_primary: _,
+ } => {
+ if let Ok(place_resolved) =
+ match_pair.place.clone().try_upvars_resolved(self.tcx, self.typeck_results)
+ {
+ candidate.bindings.push(Binding {
+ span: match_pair.pattern.span,
+ source: place_resolved.into_place(self.tcx, self.typeck_results),
+ var_id: var,
+ binding_mode: mode,
+ });
+ }
+
+ if let Some(subpattern) = subpattern.as_ref() {
+ // this is the `x @ P` case; have to keep matching against `P` now
+ candidate.match_pairs.push(MatchPair::new(match_pair.place, subpattern));
+ }
+
+ Ok(())
+ }
+
+ PatKind::Constant { .. } => {
+ // FIXME normalize patterns when possible
+ Err(match_pair)
+ }
+
+ PatKind::Range(PatRange { lo, hi, end }) => {
+ let (range, bias) = match *lo.ty().kind() {
+ ty::Char => {
+ (Some(('\u{0000}' as u128, '\u{10FFFF}' as u128, Size::from_bits(32))), 0)
+ }
+ ty::Int(ity) => {
+ let size = Integer::from_int_ty(&tcx, ity).size();
+ let max = size.truncate(u128::MAX);
+ let bias = 1u128 << (size.bits() - 1);
+ (Some((0, max, size)), bias)
+ }
+ ty::Uint(uty) => {
+ let size = Integer::from_uint_ty(&tcx, uty).size();
+ let max = size.truncate(u128::MAX);
+ (Some((0, max, size)), 0)
+ }
+ _ => (None, 0),
+ };
+ if let Some((min, max, sz)) = range {
+ // We want to compare ranges numerically, but the order of the bitwise
+ // representation of signed integers does not match their numeric order. Thus,
+ // to correct the ordering, we need to shift the range of signed integers to
+ // correct the comparison. This is achieved by XORing with a bias (see
+ // pattern/_match.rs for another pertinent example of this pattern).
+ //
+ // Also, for performance, it's important to only do the second `try_to_bits` if
+ // necessary.
+ let lo = lo.try_to_bits(sz).unwrap() ^ bias;
+ if lo <= min {
+ let hi = hi.try_to_bits(sz).unwrap() ^ bias;
+ if hi > max || hi == max && end == RangeEnd::Included {
+ // Irrefutable pattern match.
+ return Ok(());
+ }
+ }
+ }
+ Err(match_pair)
+ }
+
+ PatKind::Slice { ref prefix, ref slice, ref suffix } => {
+ if prefix.is_empty() && slice.is_some() && suffix.is_empty() {
+ // irrefutable
+ self.prefix_slice_suffix(
+ &mut candidate.match_pairs,
+ &match_pair.place,
+ prefix,
+ slice.as_ref(),
+ suffix,
+ );
+ Ok(())
+ } else {
+ Err(match_pair)
+ }
+ }
+
+ PatKind::Variant { adt_def, substs, variant_index, ref subpatterns } => {
+ let irrefutable = adt_def.variants().iter_enumerated().all(|(i, v)| {
+ i == variant_index || {
+ self.tcx.features().exhaustive_patterns
+ && !v
+ .uninhabited_from(
+ self.tcx,
+ substs,
+ adt_def.adt_kind(),
+ self.param_env,
+ )
+ .is_empty()
+ }
+ }) && (adt_def.did().is_local()
+ || !adt_def.is_variant_list_non_exhaustive());
+ if irrefutable {
+ let place_builder = match_pair.place.downcast(adt_def, variant_index);
+ candidate
+ .match_pairs
+ .extend(self.field_match_pairs(place_builder, subpatterns));
+ Ok(())
+ } else {
+ Err(match_pair)
+ }
+ }
+
+ PatKind::Array { ref prefix, ref slice, ref suffix } => {
+ self.prefix_slice_suffix(
+ &mut candidate.match_pairs,
+ &match_pair.place,
+ prefix,
+ slice.as_ref(),
+ suffix,
+ );
+ Ok(())
+ }
+
+ PatKind::Leaf { ref subpatterns } => {
+ // tuple struct, match subpats (if any)
+ candidate.match_pairs.extend(self.field_match_pairs(match_pair.place, subpatterns));
+ Ok(())
+ }
+
+ PatKind::Deref { ref subpattern } => {
+ let place_builder = match_pair.place.deref();
+ candidate.match_pairs.push(MatchPair::new(place_builder, subpattern));
+ Ok(())
+ }
+
+ PatKind::Or { .. } => Err(match_pair),
+ }
+ }
+}
diff --git a/compiler/rustc_mir_build/src/build/matches/test.rs b/compiler/rustc_mir_build/src/build/matches/test.rs
new file mode 100644
index 000000000..598da80c5
--- /dev/null
+++ b/compiler/rustc_mir_build/src/build/matches/test.rs
@@ -0,0 +1,837 @@
+// Testing candidates
+//
+// After candidates have been simplified, the only match pairs that
+// remain are those that require some sort of test. The functions here
+// identify what tests are needed, perform the tests, and then filter
+// the candidates based on the result.
+
+use crate::build::expr::as_place::PlaceBuilder;
+use crate::build::matches::{Candidate, MatchPair, Test, TestKind};
+use crate::build::Builder;
+use crate::thir::pattern::compare_const_vals;
+use rustc_data_structures::fx::FxIndexMap;
+use rustc_hir::{LangItem, RangeEnd};
+use rustc_index::bit_set::BitSet;
+use rustc_middle::mir::*;
+use rustc_middle::thir::*;
+use rustc_middle::ty::subst::{GenericArg, Subst};
+use rustc_middle::ty::util::IntTypeExt;
+use rustc_middle::ty::{self, adjustment::PointerCast, Ty, TyCtxt};
+use rustc_span::def_id::DefId;
+use rustc_span::symbol::{sym, Symbol};
+use rustc_span::Span;
+use rustc_target::abi::VariantIdx;
+
+use std::cmp::Ordering;
+
+impl<'a, 'tcx> Builder<'a, 'tcx> {
+ /// Identifies what test is needed to decide if `match_pair` is applicable.
+ ///
+ /// It is a bug to call this with a not-fully-simplified pattern.
+ pub(super) fn test<'pat>(&mut self, match_pair: &MatchPair<'pat, 'tcx>) -> Test<'tcx> {
+ match *match_pair.pattern.kind {
+ PatKind::Variant { adt_def, substs: _, variant_index: _, subpatterns: _ } => Test {
+ span: match_pair.pattern.span,
+ kind: TestKind::Switch {
+ adt_def,
+ variants: BitSet::new_empty(adt_def.variants().len()),
+ },
+ },
+
+ PatKind::Constant { .. } if is_switch_ty(match_pair.pattern.ty) => {
+ // For integers, we use a `SwitchInt` match, which allows
+ // us to handle more cases.
+ Test {
+ span: match_pair.pattern.span,
+ kind: TestKind::SwitchInt {
+ switch_ty: match_pair.pattern.ty,
+
+ // these maps are empty to start; cases are
+ // added below in add_cases_to_switch
+ options: Default::default(),
+ },
+ }
+ }
+
+ PatKind::Constant { value } => Test {
+ span: match_pair.pattern.span,
+ kind: TestKind::Eq { value, ty: match_pair.pattern.ty },
+ },
+
+ PatKind::Range(range) => {
+ assert_eq!(range.lo.ty(), match_pair.pattern.ty);
+ assert_eq!(range.hi.ty(), match_pair.pattern.ty);
+ Test { span: match_pair.pattern.span, kind: TestKind::Range(range) }
+ }
+
+ PatKind::Slice { ref prefix, ref slice, ref suffix } => {
+ let len = prefix.len() + suffix.len();
+ let op = if slice.is_some() { BinOp::Ge } else { BinOp::Eq };
+ Test { span: match_pair.pattern.span, kind: TestKind::Len { len: len as u64, op } }
+ }
+
+ PatKind::Or { .. } => bug!("or-patterns should have already been handled"),
+
+ PatKind::AscribeUserType { .. }
+ | PatKind::Array { .. }
+ | PatKind::Wild
+ | PatKind::Binding { .. }
+ | PatKind::Leaf { .. }
+ | PatKind::Deref { .. } => self.error_simplifyable(match_pair),
+ }
+ }
+
+ pub(super) fn add_cases_to_switch<'pat>(
+ &mut self,
+ test_place: &PlaceBuilder<'tcx>,
+ candidate: &Candidate<'pat, 'tcx>,
+ switch_ty: Ty<'tcx>,
+ options: &mut FxIndexMap<ConstantKind<'tcx>, u128>,
+ ) -> bool {
+ let Some(match_pair) = candidate.match_pairs.iter().find(|mp| mp.place == *test_place) else {
+ return false;
+ };
+
+ match *match_pair.pattern.kind {
+ PatKind::Constant { value } => {
+ options
+ .entry(value)
+ .or_insert_with(|| value.eval_bits(self.tcx, self.param_env, switch_ty));
+ true
+ }
+ PatKind::Variant { .. } => {
+ panic!("you should have called add_variants_to_switch instead!");
+ }
+ PatKind::Range(range) => {
+ // Check that none of the switch values are in the range.
+ self.values_not_contained_in_range(range, options).unwrap_or(false)
+ }
+ PatKind::Slice { .. }
+ | PatKind::Array { .. }
+ | PatKind::Wild
+ | PatKind::Or { .. }
+ | PatKind::Binding { .. }
+ | PatKind::AscribeUserType { .. }
+ | PatKind::Leaf { .. }
+ | PatKind::Deref { .. } => {
+ // don't know how to add these patterns to a switch
+ false
+ }
+ }
+ }
+
+ pub(super) fn add_variants_to_switch<'pat>(
+ &mut self,
+ test_place: &PlaceBuilder<'tcx>,
+ candidate: &Candidate<'pat, 'tcx>,
+ variants: &mut BitSet<VariantIdx>,
+ ) -> bool {
+ let Some(match_pair) = candidate.match_pairs.iter().find(|mp| mp.place == *test_place) else {
+ return false;
+ };
+
+ match *match_pair.pattern.kind {
+ PatKind::Variant { adt_def: _, variant_index, .. } => {
+ // We have a pattern testing for variant `variant_index`
+ // set the corresponding index to true
+ variants.insert(variant_index);
+ true
+ }
+ _ => {
+ // don't know how to add these patterns to a switch
+ false
+ }
+ }
+ }
+
+ pub(super) fn perform_test(
+ &mut self,
+ match_start_span: Span,
+ scrutinee_span: Span,
+ block: BasicBlock,
+ place_builder: PlaceBuilder<'tcx>,
+ test: &Test<'tcx>,
+ make_target_blocks: impl FnOnce(&mut Self) -> Vec<BasicBlock>,
+ ) {
+ let place: Place<'tcx>;
+ if let Ok(test_place_builder) =
+ place_builder.try_upvars_resolved(self.tcx, self.typeck_results)
+ {
+ place = test_place_builder.into_place(self.tcx, self.typeck_results);
+ } else {
+ return;
+ }
+ debug!(
+ "perform_test({:?}, {:?}: {:?}, {:?})",
+ block,
+ place,
+ place.ty(&self.local_decls, self.tcx),
+ test
+ );
+
+ let source_info = self.source_info(test.span);
+ match test.kind {
+ TestKind::Switch { adt_def, ref variants } => {
+ let target_blocks = make_target_blocks(self);
+ // Variants is a BitVec of indexes into adt_def.variants.
+ let num_enum_variants = adt_def.variants().len();
+ debug_assert_eq!(target_blocks.len(), num_enum_variants + 1);
+ let otherwise_block = *target_blocks.last().unwrap();
+ let tcx = self.tcx;
+ let switch_targets = SwitchTargets::new(
+ adt_def.discriminants(tcx).filter_map(|(idx, discr)| {
+ if variants.contains(idx) {
+ debug_assert_ne!(
+ target_blocks[idx.index()],
+ otherwise_block,
+ "no canididates for tested discriminant: {:?}",
+ discr,
+ );
+ Some((discr.val, target_blocks[idx.index()]))
+ } else {
+ debug_assert_eq!(
+ target_blocks[idx.index()],
+ otherwise_block,
+ "found canididates for untested discriminant: {:?}",
+ discr,
+ );
+ None
+ }
+ }),
+ otherwise_block,
+ );
+ debug!("num_enum_variants: {}, variants: {:?}", num_enum_variants, variants);
+ let discr_ty = adt_def.repr().discr_type().to_ty(tcx);
+ let discr = self.temp(discr_ty, test.span);
+ self.cfg.push_assign(
+ block,
+ self.source_info(scrutinee_span),
+ discr,
+ Rvalue::Discriminant(place),
+ );
+ self.cfg.terminate(
+ block,
+ self.source_info(match_start_span),
+ TerminatorKind::SwitchInt {
+ discr: Operand::Move(discr),
+ switch_ty: discr_ty,
+ targets: switch_targets,
+ },
+ );
+ }
+
+ TestKind::SwitchInt { switch_ty, ref options } => {
+ let target_blocks = make_target_blocks(self);
+ let terminator = if *switch_ty.kind() == ty::Bool {
+ assert!(!options.is_empty() && options.len() <= 2);
+ let [first_bb, second_bb] = *target_blocks else {
+ bug!("`TestKind::SwitchInt` on `bool` should have two targets")
+ };
+ let (true_bb, false_bb) = match options[0] {
+ 1 => (first_bb, second_bb),
+ 0 => (second_bb, first_bb),
+ v => span_bug!(test.span, "expected boolean value but got {:?}", v),
+ };
+ TerminatorKind::if_(self.tcx, Operand::Copy(place), true_bb, false_bb)
+ } else {
+ // The switch may be inexhaustive so we have a catch all block
+ debug_assert_eq!(options.len() + 1, target_blocks.len());
+ let otherwise_block = *target_blocks.last().unwrap();
+ let switch_targets = SwitchTargets::new(
+ options.values().copied().zip(target_blocks),
+ otherwise_block,
+ );
+ TerminatorKind::SwitchInt {
+ discr: Operand::Copy(place),
+ switch_ty,
+ targets: switch_targets,
+ }
+ };
+ self.cfg.terminate(block, self.source_info(match_start_span), terminator);
+ }
+
+ TestKind::Eq { value, ty } => {
+ if !ty.is_scalar() {
+ // Use `PartialEq::eq` instead of `BinOp::Eq`
+ // (the binop can only handle primitives)
+ self.non_scalar_compare(
+ block,
+ make_target_blocks,
+ source_info,
+ value,
+ place,
+ ty,
+ );
+ } else if let [success, fail] = *make_target_blocks(self) {
+ assert_eq!(value.ty(), ty);
+ let expect = self.literal_operand(test.span, value);
+ let val = Operand::Copy(place);
+ self.compare(block, success, fail, source_info, BinOp::Eq, expect, val);
+ } else {
+ bug!("`TestKind::Eq` should have two target blocks");
+ }
+ }
+
+ TestKind::Range(PatRange { lo, hi, ref end }) => {
+ let lower_bound_success = self.cfg.start_new_block();
+ let target_blocks = make_target_blocks(self);
+
+ // Test `val` by computing `lo <= val && val <= hi`, using primitive comparisons.
+ let lo = self.literal_operand(test.span, lo);
+ let hi = self.literal_operand(test.span, hi);
+ let val = Operand::Copy(place);
+
+ let [success, fail] = *target_blocks else {
+ bug!("`TestKind::Range` should have two target blocks");
+ };
+ self.compare(
+ block,
+ lower_bound_success,
+ fail,
+ source_info,
+ BinOp::Le,
+ lo,
+ val.clone(),
+ );
+ let op = match *end {
+ RangeEnd::Included => BinOp::Le,
+ RangeEnd::Excluded => BinOp::Lt,
+ };
+ self.compare(lower_bound_success, success, fail, source_info, op, val, hi);
+ }
+
+ TestKind::Len { len, op } => {
+ let target_blocks = make_target_blocks(self);
+
+ let usize_ty = self.tcx.types.usize;
+ let actual = self.temp(usize_ty, test.span);
+
+ // actual = len(place)
+ self.cfg.push_assign(block, source_info, actual, Rvalue::Len(place));
+
+ // expected = <N>
+ let expected = self.push_usize(block, source_info, len);
+
+ let [true_bb, false_bb] = *target_blocks else {
+ bug!("`TestKind::Len` should have two target blocks");
+ };
+ // result = actual == expected OR result = actual < expected
+ // branch based on result
+ self.compare(
+ block,
+ true_bb,
+ false_bb,
+ source_info,
+ op,
+ Operand::Move(actual),
+ Operand::Move(expected),
+ );
+ }
+ }
+ }
+
+ /// Compare using the provided built-in comparison operator
+ fn compare(
+ &mut self,
+ block: BasicBlock,
+ success_block: BasicBlock,
+ fail_block: BasicBlock,
+ source_info: SourceInfo,
+ op: BinOp,
+ left: Operand<'tcx>,
+ right: Operand<'tcx>,
+ ) {
+ let bool_ty = self.tcx.types.bool;
+ let result = self.temp(bool_ty, source_info.span);
+
+ // result = op(left, right)
+ self.cfg.push_assign(
+ block,
+ source_info,
+ result,
+ Rvalue::BinaryOp(op, Box::new((left, right))),
+ );
+
+ // branch based on result
+ self.cfg.terminate(
+ block,
+ source_info,
+ TerminatorKind::if_(self.tcx, Operand::Move(result), success_block, fail_block),
+ );
+ }
+
+ /// Compare two `&T` values using `<T as std::compare::PartialEq>::eq`
+ fn non_scalar_compare(
+ &mut self,
+ block: BasicBlock,
+ make_target_blocks: impl FnOnce(&mut Self) -> Vec<BasicBlock>,
+ source_info: SourceInfo,
+ value: ConstantKind<'tcx>,
+ place: Place<'tcx>,
+ mut ty: Ty<'tcx>,
+ ) {
+ let mut expect = self.literal_operand(source_info.span, value);
+ let mut val = Operand::Copy(place);
+
+ // If we're using `b"..."` as a pattern, we need to insert an
+ // unsizing coercion, as the byte string has the type `&[u8; N]`.
+ //
+ // We want to do this even when the scrutinee is a reference to an
+ // array, so we can call `<[u8]>::eq` rather than having to find an
+ // `<[u8; N]>::eq`.
+ let unsize = |ty: Ty<'tcx>| match ty.kind() {
+ ty::Ref(region, rty, _) => match rty.kind() {
+ ty::Array(inner_ty, n) => Some((region, inner_ty, n)),
+ _ => None,
+ },
+ _ => None,
+ };
+ let opt_ref_ty = unsize(ty);
+ let opt_ref_test_ty = unsize(value.ty());
+ match (opt_ref_ty, opt_ref_test_ty) {
+ // nothing to do, neither is an array
+ (None, None) => {}
+ (Some((region, elem_ty, _)), _) | (None, Some((region, elem_ty, _))) => {
+ let tcx = self.tcx;
+ // make both a slice
+ ty = tcx.mk_imm_ref(*region, tcx.mk_slice(*elem_ty));
+ if opt_ref_ty.is_some() {
+ let temp = self.temp(ty, source_info.span);
+ self.cfg.push_assign(
+ block,
+ source_info,
+ temp,
+ Rvalue::Cast(CastKind::Pointer(PointerCast::Unsize), val, ty),
+ );
+ val = Operand::Move(temp);
+ }
+ if opt_ref_test_ty.is_some() {
+ let slice = self.temp(ty, source_info.span);
+ self.cfg.push_assign(
+ block,
+ source_info,
+ slice,
+ Rvalue::Cast(CastKind::Pointer(PointerCast::Unsize), expect, ty),
+ );
+ expect = Operand::Move(slice);
+ }
+ }
+ }
+
+ let ty::Ref(_, deref_ty, _) = *ty.kind() else {
+ bug!("non_scalar_compare called on non-reference type: {}", ty);
+ };
+
+ let eq_def_id = self.tcx.require_lang_item(LangItem::PartialEq, None);
+ let method = trait_method(self.tcx, eq_def_id, sym::eq, deref_ty, &[deref_ty.into()]);
+
+ let bool_ty = self.tcx.types.bool;
+ let eq_result = self.temp(bool_ty, source_info.span);
+ let eq_block = self.cfg.start_new_block();
+ self.cfg.terminate(
+ block,
+ source_info,
+ TerminatorKind::Call {
+ func: Operand::Constant(Box::new(Constant {
+ span: source_info.span,
+
+ // FIXME(#54571): This constant comes from user input (a
+ // constant in a pattern). Are there forms where users can add
+ // type annotations here? For example, an associated constant?
+ // Need to experiment.
+ user_ty: None,
+
+ literal: method,
+ })),
+ args: vec![val, expect],
+ destination: eq_result,
+ target: Some(eq_block),
+ cleanup: None,
+ from_hir_call: false,
+ fn_span: source_info.span,
+ },
+ );
+ self.diverge_from(block);
+
+ let [success_block, fail_block] = *make_target_blocks(self) else {
+ bug!("`TestKind::Eq` should have two target blocks")
+ };
+ // check the result
+ self.cfg.terminate(
+ eq_block,
+ source_info,
+ TerminatorKind::if_(self.tcx, Operand::Move(eq_result), success_block, fail_block),
+ );
+ }
+
+ /// Given that we are performing `test` against `test_place`, this job
+ /// sorts out what the status of `candidate` will be after the test. See
+ /// `test_candidates` for the usage of this function. The returned index is
+ /// the index that this candidate should be placed in the
+ /// `target_candidates` vec. The candidate may be modified to update its
+ /// `match_pairs`.
+ ///
+ /// So, for example, if this candidate is `x @ Some(P0)` and the `Test` is
+ /// a variant test, then we would modify the candidate to be `(x as
+ /// Option).0 @ P0` and return the index corresponding to the variant
+ /// `Some`.
+ ///
+ /// However, in some cases, the test may just not be relevant to candidate.
+ /// For example, suppose we are testing whether `foo.x == 22`, but in one
+ /// match arm we have `Foo { x: _, ... }`... in that case, the test for
+ /// what value `x` has has no particular relevance to this candidate. In
+ /// such cases, this function just returns None without doing anything.
+ /// This is used by the overall `match_candidates` algorithm to structure
+ /// the match as a whole. See `match_candidates` for more details.
+ ///
+ /// FIXME(#29623). In some cases, we have some tricky choices to make. for
+ /// example, if we are testing that `x == 22`, but the candidate is `x @
+ /// 13..55`, what should we do? In the event that the test is true, we know
+ /// that the candidate applies, but in the event of false, we don't know
+ /// that it *doesn't* apply. For now, we return false, indicate that the
+ /// test does not apply to this candidate, but it might be we can get
+ /// tighter match code if we do something a bit different.
+ pub(super) fn sort_candidate<'pat>(
+ &mut self,
+ test_place: &PlaceBuilder<'tcx>,
+ test: &Test<'tcx>,
+ candidate: &mut Candidate<'pat, 'tcx>,
+ ) -> Option<usize> {
+ // Find the match_pair for this place (if any). At present,
+ // afaik, there can be at most one. (In the future, if we
+ // adopted a more general `@` operator, there might be more
+ // than one, but it'd be very unusual to have two sides that
+ // both require tests; you'd expect one side to be simplified
+ // away.)
+ let (match_pair_index, match_pair) =
+ candidate.match_pairs.iter().enumerate().find(|&(_, mp)| mp.place == *test_place)?;
+
+ match (&test.kind, &*match_pair.pattern.kind) {
+ // If we are performing a variant switch, then this
+ // informs variant patterns, but nothing else.
+ (
+ &TestKind::Switch { adt_def: tested_adt_def, .. },
+ &PatKind::Variant { adt_def, variant_index, ref subpatterns, .. },
+ ) => {
+ assert_eq!(adt_def, tested_adt_def);
+ self.candidate_after_variant_switch(
+ match_pair_index,
+ adt_def,
+ variant_index,
+ subpatterns,
+ candidate,
+ );
+ Some(variant_index.as_usize())
+ }
+
+ (&TestKind::Switch { .. }, _) => None,
+
+ // If we are performing a switch over integers, then this informs integer
+ // equality, but nothing else.
+ //
+ // FIXME(#29623) we could use PatKind::Range to rule
+ // things out here, in some cases.
+ (
+ &TestKind::SwitchInt { switch_ty: _, ref options },
+ &PatKind::Constant { ref value },
+ ) if is_switch_ty(match_pair.pattern.ty) => {
+ let index = options.get_index_of(value).unwrap();
+ self.candidate_without_match_pair(match_pair_index, candidate);
+ Some(index)
+ }
+
+ (&TestKind::SwitchInt { switch_ty: _, ref options }, &PatKind::Range(range)) => {
+ let not_contained =
+ self.values_not_contained_in_range(range, options).unwrap_or(false);
+
+ if not_contained {
+ // No switch values are contained in the pattern range,
+ // so the pattern can be matched only if this test fails.
+ let otherwise = options.len();
+ Some(otherwise)
+ } else {
+ None
+ }
+ }
+
+ (&TestKind::SwitchInt { .. }, _) => None,
+
+ (
+ &TestKind::Len { len: test_len, op: BinOp::Eq },
+ &PatKind::Slice { ref prefix, ref slice, ref suffix },
+ ) => {
+ let pat_len = (prefix.len() + suffix.len()) as u64;
+ match (test_len.cmp(&pat_len), slice) {
+ (Ordering::Equal, &None) => {
+ // on true, min_len = len = $actual_length,
+ // on false, len != $actual_length
+ self.candidate_after_slice_test(
+ match_pair_index,
+ candidate,
+ prefix,
+ slice.as_ref(),
+ suffix,
+ );
+ Some(0)
+ }
+ (Ordering::Less, _) => {
+ // test_len < pat_len. If $actual_len = test_len,
+ // then $actual_len < pat_len and we don't have
+ // enough elements.
+ Some(1)
+ }
+ (Ordering::Equal | Ordering::Greater, &Some(_)) => {
+ // This can match both if $actual_len = test_len >= pat_len,
+ // and if $actual_len > test_len. We can't advance.
+ None
+ }
+ (Ordering::Greater, &None) => {
+ // test_len != pat_len, so if $actual_len = test_len, then
+ // $actual_len != pat_len.
+ Some(1)
+ }
+ }
+ }
+
+ (
+ &TestKind::Len { len: test_len, op: BinOp::Ge },
+ &PatKind::Slice { ref prefix, ref slice, ref suffix },
+ ) => {
+ // the test is `$actual_len >= test_len`
+ let pat_len = (prefix.len() + suffix.len()) as u64;
+ match (test_len.cmp(&pat_len), slice) {
+ (Ordering::Equal, &Some(_)) => {
+ // $actual_len >= test_len = pat_len,
+ // so we can match.
+ self.candidate_after_slice_test(
+ match_pair_index,
+ candidate,
+ prefix,
+ slice.as_ref(),
+ suffix,
+ );
+ Some(0)
+ }
+ (Ordering::Less, _) | (Ordering::Equal, &None) => {
+ // test_len <= pat_len. If $actual_len < test_len,
+ // then it is also < pat_len, so the test passing is
+ // necessary (but insufficient).
+ Some(0)
+ }
+ (Ordering::Greater, &None) => {
+ // test_len > pat_len. If $actual_len >= test_len > pat_len,
+ // then we know we won't have a match.
+ Some(1)
+ }
+ (Ordering::Greater, &Some(_)) => {
+ // test_len < pat_len, and is therefore less
+ // strict. This can still go both ways.
+ None
+ }
+ }
+ }
+
+ (&TestKind::Range(test), &PatKind::Range(pat)) => {
+ use std::cmp::Ordering::*;
+
+ if test == pat {
+ self.candidate_without_match_pair(match_pair_index, candidate);
+ return Some(0);
+ }
+
+ // For performance, it's important to only do the second
+ // `compare_const_vals` if necessary.
+ let no_overlap = if matches!(
+ (compare_const_vals(self.tcx, test.hi, pat.lo, self.param_env)?, test.end),
+ (Less, _) | (Equal, RangeEnd::Excluded) // test < pat
+ ) || matches!(
+ (compare_const_vals(self.tcx, test.lo, pat.hi, self.param_env)?, pat.end),
+ (Greater, _) | (Equal, RangeEnd::Excluded) // test > pat
+ ) {
+ Some(1)
+ } else {
+ None
+ };
+
+ // If the testing range does not overlap with pattern range,
+ // the pattern can be matched only if this test fails.
+ no_overlap
+ }
+
+ (&TestKind::Range(range), &PatKind::Constant { value }) => {
+ if let Some(false) = self.const_range_contains(range, value) {
+ // `value` is not contained in the testing range,
+ // so `value` can be matched only if this test fails.
+ Some(1)
+ } else {
+ None
+ }
+ }
+
+ (&TestKind::Range { .. }, _) => None,
+
+ (&TestKind::Eq { .. } | &TestKind::Len { .. }, _) => {
+ // The call to `self.test(&match_pair)` below is not actually used to generate any
+ // MIR. Instead, we just want to compare with `test` (the parameter of the method)
+ // to see if it is the same.
+ //
+ // However, at this point we can still encounter or-patterns that were extracted
+ // from previous calls to `sort_candidate`, so we need to manually address that
+ // case to avoid panicking in `self.test()`.
+ if let PatKind::Or { .. } = &*match_pair.pattern.kind {
+ return None;
+ }
+
+ // These are all binary tests.
+ //
+ // FIXME(#29623) we can be more clever here
+ let pattern_test = self.test(&match_pair);
+ if pattern_test.kind == test.kind {
+ self.candidate_without_match_pair(match_pair_index, candidate);
+ Some(0)
+ } else {
+ None
+ }
+ }
+ }
+ }
+
+ fn candidate_without_match_pair(
+ &mut self,
+ match_pair_index: usize,
+ candidate: &mut Candidate<'_, 'tcx>,
+ ) {
+ candidate.match_pairs.remove(match_pair_index);
+ }
+
+ fn candidate_after_slice_test<'pat>(
+ &mut self,
+ match_pair_index: usize,
+ candidate: &mut Candidate<'pat, 'tcx>,
+ prefix: &'pat [Pat<'tcx>],
+ opt_slice: Option<&'pat Pat<'tcx>>,
+ suffix: &'pat [Pat<'tcx>],
+ ) {
+ let removed_place = candidate.match_pairs.remove(match_pair_index).place;
+ self.prefix_slice_suffix(
+ &mut candidate.match_pairs,
+ &removed_place,
+ prefix,
+ opt_slice,
+ suffix,
+ );
+ }
+
+ fn candidate_after_variant_switch<'pat>(
+ &mut self,
+ match_pair_index: usize,
+ adt_def: ty::AdtDef<'tcx>,
+ variant_index: VariantIdx,
+ subpatterns: &'pat [FieldPat<'tcx>],
+ candidate: &mut Candidate<'pat, 'tcx>,
+ ) {
+ let match_pair = candidate.match_pairs.remove(match_pair_index);
+
+ // So, if we have a match-pattern like `x @ Enum::Variant(P1, P2)`,
+ // we want to create a set of derived match-patterns like
+ // `(x as Variant).0 @ P1` and `(x as Variant).1 @ P1`.
+ let elem =
+ ProjectionElem::Downcast(Some(adt_def.variant(variant_index).name), variant_index);
+ let downcast_place = match_pair.place.project(elem); // `(x as Variant)`
+ let consequent_match_pairs = subpatterns.iter().map(|subpattern| {
+ // e.g., `(x as Variant).0`
+ let place = downcast_place.clone().field(subpattern.field, subpattern.pattern.ty);
+ // e.g., `(x as Variant).0 @ P1`
+ MatchPair::new(place, &subpattern.pattern)
+ });
+
+ candidate.match_pairs.extend(consequent_match_pairs);
+ }
+
+ fn error_simplifyable<'pat>(&mut self, match_pair: &MatchPair<'pat, 'tcx>) -> ! {
+ span_bug!(match_pair.pattern.span, "simplifyable pattern found: {:?}", match_pair.pattern)
+ }
+
+ fn const_range_contains(
+ &self,
+ range: PatRange<'tcx>,
+ value: ConstantKind<'tcx>,
+ ) -> Option<bool> {
+ use std::cmp::Ordering::*;
+
+ // For performance, it's important to only do the second
+ // `compare_const_vals` if necessary.
+ Some(
+ matches!(compare_const_vals(self.tcx, range.lo, value, self.param_env)?, Less | Equal)
+ && matches!(
+ (compare_const_vals(self.tcx, value, range.hi, self.param_env)?, range.end),
+ (Less, _) | (Equal, RangeEnd::Included)
+ ),
+ )
+ }
+
+ fn values_not_contained_in_range(
+ &self,
+ range: PatRange<'tcx>,
+ options: &FxIndexMap<ConstantKind<'tcx>, u128>,
+ ) -> Option<bool> {
+ for &val in options.keys() {
+ if self.const_range_contains(range, val)? {
+ return Some(false);
+ }
+ }
+
+ Some(true)
+ }
+}
+
+impl Test<'_> {
+ pub(super) fn targets(&self) -> usize {
+ match self.kind {
+ TestKind::Eq { .. } | TestKind::Range(_) | TestKind::Len { .. } => 2,
+ TestKind::Switch { adt_def, .. } => {
+ // While the switch that we generate doesn't test for all
+ // variants, we have a target for each variant and the
+ // otherwise case, and we make sure that all of the cases not
+ // specified have the same block.
+ adt_def.variants().len() + 1
+ }
+ TestKind::SwitchInt { switch_ty, ref options, .. } => {
+ if switch_ty.is_bool() {
+ // `bool` is special cased in `perform_test` to always
+ // branch to two blocks.
+ 2
+ } else {
+ options.len() + 1
+ }
+ }
+ }
+ }
+}
+
+fn is_switch_ty(ty: Ty<'_>) -> bool {
+ ty.is_integral() || ty.is_char() || ty.is_bool()
+}
+
+fn trait_method<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ trait_def_id: DefId,
+ method_name: Symbol,
+ self_ty: Ty<'tcx>,
+ params: &[GenericArg<'tcx>],
+) -> ConstantKind<'tcx> {
+ let substs = tcx.mk_substs_trait(self_ty, params);
+
+ // The unhygienic comparison here is acceptable because this is only
+ // used on known traits.
+ let item = tcx
+ .associated_items(trait_def_id)
+ .filter_by_name_unhygienic(method_name)
+ .find(|item| item.kind == ty::AssocKind::Fn)
+ .expect("trait method not found");
+
+ let method_ty = tcx.bound_type_of(item.def_id);
+ let method_ty = method_ty.subst(tcx, substs);
+
+ ConstantKind::zero_sized(method_ty)
+}
diff --git a/compiler/rustc_mir_build/src/build/matches/util.rs b/compiler/rustc_mir_build/src/build/matches/util.rs
new file mode 100644
index 000000000..9a1e98d3b
--- /dev/null
+++ b/compiler/rustc_mir_build/src/build/matches/util.rs
@@ -0,0 +1,109 @@
+use crate::build::expr::as_place::PlaceBuilder;
+use crate::build::matches::MatchPair;
+use crate::build::Builder;
+use rustc_middle::mir::*;
+use rustc_middle::thir::*;
+use rustc_middle::ty;
+use smallvec::SmallVec;
+use std::convert::TryInto;
+
+impl<'a, 'tcx> Builder<'a, 'tcx> {
+ pub(crate) fn field_match_pairs<'pat>(
+ &mut self,
+ place: PlaceBuilder<'tcx>,
+ subpatterns: &'pat [FieldPat<'tcx>],
+ ) -> Vec<MatchPair<'pat, 'tcx>> {
+ subpatterns
+ .iter()
+ .map(|fieldpat| {
+ let place = place.clone().field(fieldpat.field, fieldpat.pattern.ty);
+ MatchPair::new(place, &fieldpat.pattern)
+ })
+ .collect()
+ }
+
+ pub(crate) fn prefix_slice_suffix<'pat>(
+ &mut self,
+ match_pairs: &mut SmallVec<[MatchPair<'pat, 'tcx>; 1]>,
+ place: &PlaceBuilder<'tcx>,
+ prefix: &'pat [Pat<'tcx>],
+ opt_slice: Option<&'pat Pat<'tcx>>,
+ suffix: &'pat [Pat<'tcx>],
+ ) {
+ let tcx = self.tcx;
+ let (min_length, exact_size) = if let Ok(place_resolved) =
+ place.clone().try_upvars_resolved(tcx, self.typeck_results)
+ {
+ match place_resolved
+ .into_place(tcx, self.typeck_results)
+ .ty(&self.local_decls, tcx)
+ .ty
+ .kind()
+ {
+ ty::Array(_, length) => (length.eval_usize(tcx, self.param_env), true),
+ _ => ((prefix.len() + suffix.len()).try_into().unwrap(), false),
+ }
+ } else {
+ ((prefix.len() + suffix.len()).try_into().unwrap(), false)
+ };
+
+ match_pairs.extend(prefix.iter().enumerate().map(|(idx, subpattern)| {
+ let elem =
+ ProjectionElem::ConstantIndex { offset: idx as u64, min_length, from_end: false };
+ let place = place.clone().project(elem);
+ MatchPair::new(place, subpattern)
+ }));
+
+ if let Some(subslice_pat) = opt_slice {
+ let suffix_len = suffix.len() as u64;
+ let subslice = place.clone().project(ProjectionElem::Subslice {
+ from: prefix.len() as u64,
+ to: if exact_size { min_length - suffix_len } else { suffix_len },
+ from_end: !exact_size,
+ });
+ match_pairs.push(MatchPair::new(subslice, subslice_pat));
+ }
+
+ match_pairs.extend(suffix.iter().rev().enumerate().map(|(idx, subpattern)| {
+ let end_offset = (idx + 1) as u64;
+ let elem = ProjectionElem::ConstantIndex {
+ offset: if exact_size { min_length - end_offset } else { end_offset },
+ min_length,
+ from_end: !exact_size,
+ };
+ let place = place.clone().project(elem);
+ MatchPair::new(place, subpattern)
+ }));
+ }
+
+ /// Creates a false edge to `imaginary_target` and a real edge to
+ /// real_target. If `imaginary_target` is none, or is the same as the real
+ /// target, a Goto is generated instead to simplify the generated MIR.
+ pub(crate) fn false_edges(
+ &mut self,
+ from_block: BasicBlock,
+ real_target: BasicBlock,
+ imaginary_target: Option<BasicBlock>,
+ source_info: SourceInfo,
+ ) {
+ match imaginary_target {
+ Some(target) if target != real_target => {
+ self.cfg.terminate(
+ from_block,
+ source_info,
+ TerminatorKind::FalseEdge { real_target, imaginary_target: target },
+ );
+ }
+ _ => self.cfg.goto(from_block, source_info, real_target),
+ }
+ }
+}
+
+impl<'pat, 'tcx> MatchPair<'pat, 'tcx> {
+ pub(crate) fn new(
+ place: PlaceBuilder<'tcx>,
+ pattern: &'pat Pat<'tcx>,
+ ) -> MatchPair<'pat, 'tcx> {
+ MatchPair { place, pattern }
+ }
+}
diff --git a/compiler/rustc_mir_build/src/build/misc.rs b/compiler/rustc_mir_build/src/build/misc.rs
new file mode 100644
index 000000000..86f466ff7
--- /dev/null
+++ b/compiler/rustc_mir_build/src/build/misc.rs
@@ -0,0 +1,75 @@
+//! Miscellaneous builder routines that are not specific to building any particular
+//! kind of thing.
+
+use crate::build::Builder;
+
+use rustc_middle::mir::*;
+use rustc_middle::ty::{self, Ty};
+use rustc_span::{Span, DUMMY_SP};
+use rustc_trait_selection::infer::InferCtxtExt;
+
+impl<'a, 'tcx> Builder<'a, 'tcx> {
+ /// Adds a new temporary value of type `ty` storing the result of
+ /// evaluating `expr`.
+ ///
+ /// N.B., **No cleanup is scheduled for this temporary.** You should
+ /// call `schedule_drop` once the temporary is initialized.
+ pub(crate) fn temp(&mut self, ty: Ty<'tcx>, span: Span) -> Place<'tcx> {
+ // Mark this local as internal to avoid temporaries with types not present in the
+ // user's code resulting in ICEs from the generator transform.
+ let temp = self.local_decls.push(LocalDecl::new(ty, span).internal());
+ let place = Place::from(temp);
+ debug!("temp: created temp {:?} with type {:?}", place, self.local_decls[temp].ty);
+ place
+ }
+
+ /// Convenience function for creating a literal operand, one
+ /// without any user type annotation.
+ pub(crate) fn literal_operand(
+ &mut self,
+ span: Span,
+ literal: ConstantKind<'tcx>,
+ ) -> Operand<'tcx> {
+ let constant = Box::new(Constant { span, user_ty: None, literal });
+ Operand::Constant(constant)
+ }
+
+ // Returns a zero literal operand for the appropriate type, works for
+ // bool, char and integers.
+ pub(crate) fn zero_literal(&mut self, span: Span, ty: Ty<'tcx>) -> Operand<'tcx> {
+ let literal = ConstantKind::from_bits(self.tcx, 0, ty::ParamEnv::empty().and(ty));
+
+ self.literal_operand(span, literal)
+ }
+
+ pub(crate) fn push_usize(
+ &mut self,
+ block: BasicBlock,
+ source_info: SourceInfo,
+ value: u64,
+ ) -> Place<'tcx> {
+ let usize_ty = self.tcx.types.usize;
+ let temp = self.temp(usize_ty, source_info.span);
+ self.cfg.push_assign_constant(
+ block,
+ source_info,
+ temp,
+ Constant {
+ span: source_info.span,
+ user_ty: None,
+ literal: ConstantKind::from_usize(self.tcx, value),
+ },
+ );
+ temp
+ }
+
+ pub(crate) fn consume_by_copy_or_move(&self, place: Place<'tcx>) -> Operand<'tcx> {
+ let tcx = self.tcx;
+ let ty = place.ty(&self.local_decls, tcx).ty;
+ if !self.infcx.type_is_copy_modulo_regions(self.param_env, ty, DUMMY_SP) {
+ Operand::Move(place)
+ } else {
+ Operand::Copy(place)
+ }
+ }
+}
diff --git a/compiler/rustc_mir_build/src/build/mod.rs b/compiler/rustc_mir_build/src/build/mod.rs
new file mode 100644
index 000000000..12b8ceede
--- /dev/null
+++ b/compiler/rustc_mir_build/src/build/mod.rs
@@ -0,0 +1,1171 @@
+use crate::build;
+pub(crate) use crate::build::expr::as_constant::lit_to_mir_constant;
+use crate::build::expr::as_place::PlaceBuilder;
+use crate::build::scope::DropKind;
+use crate::thir::pattern::pat_from_hir;
+use rustc_apfloat::ieee::{Double, Single};
+use rustc_apfloat::Float;
+use rustc_data_structures::fx::FxHashMap;
+use rustc_errors::ErrorGuaranteed;
+use rustc_hir as hir;
+use rustc_hir::def_id::{DefId, LocalDefId};
+use rustc_hir::lang_items::LangItem;
+use rustc_hir::{GeneratorKind, Node};
+use rustc_index::vec::{Idx, IndexVec};
+use rustc_infer::infer::{InferCtxt, TyCtxtInferExt};
+use rustc_middle::hir::place::PlaceBase as HirPlaceBase;
+use rustc_middle::middle::region;
+use rustc_middle::mir::interpret::ConstValue;
+use rustc_middle::mir::interpret::Scalar;
+use rustc_middle::mir::*;
+use rustc_middle::thir::{BindingMode, Expr, ExprId, LintLevel, LocalVarId, PatKind, Thir};
+use rustc_middle::ty::subst::Subst;
+use rustc_middle::ty::{self, Ty, TyCtxt, TypeVisitable, TypeckResults};
+use rustc_span::symbol::sym;
+use rustc_span::Span;
+use rustc_span::Symbol;
+use rustc_target::spec::abi::Abi;
+
+use super::lints;
+
+pub(crate) fn mir_built<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ def: ty::WithOptConstParam<LocalDefId>,
+) -> &'tcx rustc_data_structures::steal::Steal<Body<'tcx>> {
+ if let Some(def) = def.try_upgrade(tcx) {
+ return tcx.mir_built(def);
+ }
+
+ let mut body = mir_build(tcx, def);
+ if def.const_param_did.is_some() {
+ assert!(matches!(body.source.instance, ty::InstanceDef::Item(_)));
+ body.source = MirSource::from_instance(ty::InstanceDef::Item(def.to_global()));
+ }
+
+ tcx.alloc_steal_mir(body)
+}
+
+/// Construct the MIR for a given `DefId`.
+fn mir_build(tcx: TyCtxt<'_>, def: ty::WithOptConstParam<LocalDefId>) -> Body<'_> {
+ let id = tcx.hir().local_def_id_to_hir_id(def.did);
+ let body_owner_kind = tcx.hir().body_owner_kind(def.did);
+ let typeck_results = tcx.typeck_opt_const_arg(def);
+
+ // Ensure unsafeck and abstract const building is ran before we steal the THIR.
+ // We can't use `ensure()` for `thir_abstract_const` as it doesn't compute the query
+ // if inputs are green. This can cause ICEs when calling `thir_abstract_const` after
+ // THIR has been stolen if we haven't computed this query yet.
+ match def {
+ ty::WithOptConstParam { did, const_param_did: Some(const_param_did) } => {
+ tcx.ensure().thir_check_unsafety_for_const_arg((did, const_param_did));
+ drop(tcx.thir_abstract_const_of_const_arg((did, const_param_did)));
+ }
+ ty::WithOptConstParam { did, const_param_did: None } => {
+ tcx.ensure().thir_check_unsafety(did);
+ drop(tcx.thir_abstract_const(did));
+ }
+ }
+
+ // Figure out what primary body this item has.
+ let (body_id, return_ty_span, span_with_body) = match tcx.hir().get(id) {
+ Node::Expr(hir::Expr {
+ kind: hir::ExprKind::Closure(hir::Closure { fn_decl, body, .. }),
+ ..
+ }) => (*body, fn_decl.output.span(), None),
+ Node::Item(hir::Item {
+ kind: hir::ItemKind::Fn(hir::FnSig { decl, .. }, _, body_id),
+ span,
+ ..
+ })
+ | Node::ImplItem(hir::ImplItem {
+ kind: hir::ImplItemKind::Fn(hir::FnSig { decl, .. }, body_id),
+ span,
+ ..
+ })
+ | Node::TraitItem(hir::TraitItem {
+ kind: hir::TraitItemKind::Fn(hir::FnSig { decl, .. }, hir::TraitFn::Provided(body_id)),
+ span,
+ ..
+ }) => {
+ // Use the `Span` of the `Item/ImplItem/TraitItem` as the body span,
+ // since the def span of a function does not include the body
+ (*body_id, decl.output.span(), Some(*span))
+ }
+ Node::Item(hir::Item {
+ kind: hir::ItemKind::Static(ty, _, body_id) | hir::ItemKind::Const(ty, body_id),
+ ..
+ })
+ | Node::ImplItem(hir::ImplItem { kind: hir::ImplItemKind::Const(ty, body_id), .. })
+ | Node::TraitItem(hir::TraitItem {
+ kind: hir::TraitItemKind::Const(ty, Some(body_id)),
+ ..
+ }) => (*body_id, ty.span, None),
+ Node::AnonConst(hir::AnonConst { body, hir_id, .. }) => {
+ (*body, tcx.hir().span(*hir_id), None)
+ }
+
+ _ => span_bug!(tcx.hir().span(id), "can't build MIR for {:?}", def.did),
+ };
+
+ // If we don't have a specialized span for the body, just use the
+ // normal def span.
+ let span_with_body = span_with_body.unwrap_or_else(|| tcx.hir().span(id));
+
+ tcx.infer_ctxt().enter(|infcx| {
+ let body = if let Some(error_reported) = typeck_results.tainted_by_errors {
+ build::construct_error(&infcx, def, id, body_id, body_owner_kind, error_reported)
+ } else if body_owner_kind.is_fn_or_closure() {
+ // fetch the fully liberated fn signature (that is, all bound
+ // types/lifetimes replaced)
+ let fn_sig = typeck_results.liberated_fn_sigs()[id];
+ let fn_def_id = tcx.hir().local_def_id(id);
+
+ let safety = match fn_sig.unsafety {
+ hir::Unsafety::Normal => Safety::Safe,
+ hir::Unsafety::Unsafe => Safety::FnUnsafe,
+ };
+
+ let body = tcx.hir().body(body_id);
+ let (thir, expr) = tcx
+ .thir_body(def)
+ .unwrap_or_else(|_| (tcx.alloc_steal_thir(Thir::new()), ExprId::from_u32(0)));
+ // We ran all queries that depended on THIR at the beginning
+ // of `mir_build`, so now we can steal it
+ let thir = thir.steal();
+ let ty = tcx.type_of(fn_def_id);
+ let mut abi = fn_sig.abi;
+ let implicit_argument = match ty.kind() {
+ ty::Closure(..) => {
+ // HACK(eddyb) Avoid having RustCall on closures,
+ // as it adds unnecessary (and wrong) auto-tupling.
+ abi = Abi::Rust;
+ vec![ArgInfo(liberated_closure_env_ty(tcx, id, body_id), None, None, None)]
+ }
+ ty::Generator(..) => {
+ let gen_ty = tcx.typeck_body(body_id).node_type(id);
+
+ // The resume argument may be missing, in that case we need to provide it here.
+ // It will always be `()` in this case.
+ if body.params.is_empty() {
+ vec![
+ ArgInfo(gen_ty, None, None, None),
+ ArgInfo(tcx.mk_unit(), None, None, None),
+ ]
+ } else {
+ vec![ArgInfo(gen_ty, None, None, None)]
+ }
+ }
+ _ => vec![],
+ };
+
+ let explicit_arguments = body.params.iter().enumerate().map(|(index, arg)| {
+ let owner_id = tcx.hir().body_owner(body_id);
+ let opt_ty_info;
+ let self_arg;
+ if let Some(ref fn_decl) = tcx.hir().fn_decl_by_hir_id(owner_id) {
+ opt_ty_info = fn_decl
+ .inputs
+ .get(index)
+ // Make sure that inferred closure args have no type span
+ .and_then(|ty| if arg.pat.span != ty.span { Some(ty.span) } else { None });
+ self_arg = if index == 0 && fn_decl.implicit_self.has_implicit_self() {
+ match fn_decl.implicit_self {
+ hir::ImplicitSelfKind::Imm => Some(ImplicitSelfKind::Imm),
+ hir::ImplicitSelfKind::Mut => Some(ImplicitSelfKind::Mut),
+ hir::ImplicitSelfKind::ImmRef => Some(ImplicitSelfKind::ImmRef),
+ hir::ImplicitSelfKind::MutRef => Some(ImplicitSelfKind::MutRef),
+ _ => None,
+ }
+ } else {
+ None
+ };
+ } else {
+ opt_ty_info = None;
+ self_arg = None;
+ }
+
+ // C-variadic fns also have a `VaList` input that's not listed in `fn_sig`
+ // (as it's created inside the body itself, not passed in from outside).
+ let ty = if fn_sig.c_variadic && index == fn_sig.inputs().len() {
+ let va_list_did = tcx.require_lang_item(LangItem::VaList, Some(arg.span));
+
+ tcx.bound_type_of(va_list_did).subst(tcx, &[tcx.lifetimes.re_erased.into()])
+ } else {
+ fn_sig.inputs()[index]
+ };
+
+ ArgInfo(ty, opt_ty_info, Some(&arg), self_arg)
+ });
+
+ let arguments = implicit_argument.into_iter().chain(explicit_arguments);
+
+ let (yield_ty, return_ty) = if body.generator_kind.is_some() {
+ let gen_ty = tcx.typeck_body(body_id).node_type(id);
+ let gen_sig = match gen_ty.kind() {
+ ty::Generator(_, gen_substs, ..) => gen_substs.as_generator().sig(),
+ _ => span_bug!(tcx.hir().span(id), "generator w/o generator type: {:?}", ty),
+ };
+ (Some(gen_sig.yield_ty), gen_sig.return_ty)
+ } else {
+ (None, fn_sig.output())
+ };
+
+ let mut mir = build::construct_fn(
+ &thir,
+ &infcx,
+ def,
+ id,
+ arguments,
+ safety,
+ abi,
+ return_ty,
+ return_ty_span,
+ body,
+ expr,
+ span_with_body,
+ );
+ if yield_ty.is_some() {
+ mir.generator.as_mut().unwrap().yield_ty = yield_ty;
+ }
+ mir
+ } else {
+ // Get the revealed type of this const. This is *not* the adjusted
+ // type of its body, which may be a subtype of this type. For
+ // example:
+ //
+ // fn foo(_: &()) {}
+ // static X: fn(&'static ()) = foo;
+ //
+ // The adjusted type of the body of X is `for<'a> fn(&'a ())` which
+ // is not the same as the type of X. We need the type of the return
+ // place to be the type of the constant because NLL typeck will
+ // equate them.
+
+ let return_ty = typeck_results.node_type(id);
+
+ let (thir, expr) = tcx
+ .thir_body(def)
+ .unwrap_or_else(|_| (tcx.alloc_steal_thir(Thir::new()), ExprId::from_u32(0)));
+ // We ran all queries that depended on THIR at the beginning
+ // of `mir_build`, so now we can steal it
+ let thir = thir.steal();
+
+ build::construct_const(&thir, &infcx, expr, def, id, return_ty, return_ty_span)
+ };
+
+ lints::check(tcx, &body);
+
+ // The borrow checker will replace all the regions here with its own
+ // inference variables. There's no point having non-erased regions here.
+ // The exception is `body.user_type_annotations`, which is used unmodified
+ // by borrow checking.
+ debug_assert!(
+ !(body.local_decls.has_free_regions()
+ || body.basic_blocks().has_free_regions()
+ || body.var_debug_info.has_free_regions()
+ || body.yield_ty().has_free_regions()),
+ "Unexpected free regions in MIR: {:?}",
+ body,
+ );
+
+ body
+ })
+}
+
+///////////////////////////////////////////////////////////////////////////
+// BuildMir -- walks a crate, looking for fn items and methods to build MIR from
+
+fn liberated_closure_env_ty(
+ tcx: TyCtxt<'_>,
+ closure_expr_id: hir::HirId,
+ body_id: hir::BodyId,
+) -> Ty<'_> {
+ let closure_ty = tcx.typeck_body(body_id).node_type(closure_expr_id);
+
+ let ty::Closure(closure_def_id, closure_substs) = *closure_ty.kind() else {
+ bug!("closure expr does not have closure type: {:?}", closure_ty);
+ };
+
+ let bound_vars =
+ tcx.mk_bound_variable_kinds(std::iter::once(ty::BoundVariableKind::Region(ty::BrEnv)));
+ let br =
+ ty::BoundRegion { var: ty::BoundVar::from_usize(bound_vars.len() - 1), kind: ty::BrEnv };
+ let env_region = ty::ReLateBound(ty::INNERMOST, br);
+ let closure_env_ty = tcx.closure_env_ty(closure_def_id, closure_substs, env_region).unwrap();
+ tcx.erase_late_bound_regions(ty::Binder::bind_with_vars(closure_env_ty, bound_vars))
+}
+
+#[derive(Debug, PartialEq, Eq)]
+enum BlockFrame {
+ /// Evaluation is currently within a statement.
+ ///
+ /// Examples include:
+ /// 1. `EXPR;`
+ /// 2. `let _ = EXPR;`
+ /// 3. `let x = EXPR;`
+ Statement {
+ /// If true, then statement discards result from evaluating
+ /// the expression (such as examples 1 and 2 above).
+ ignores_expr_result: bool,
+ },
+
+ /// Evaluation is currently within the tail expression of a block.
+ ///
+ /// Example: `{ STMT_1; STMT_2; EXPR }`
+ TailExpr {
+ /// If true, then the surrounding context of the block ignores
+ /// the result of evaluating the block's tail expression.
+ ///
+ /// Example: `let _ = { STMT_1; EXPR };`
+ tail_result_is_ignored: bool,
+
+ /// `Span` of the tail expression.
+ span: Span,
+ },
+
+ /// Generic mark meaning that the block occurred as a subexpression
+ /// where the result might be used.
+ ///
+ /// Examples: `foo(EXPR)`, `match EXPR { ... }`
+ SubExpr,
+}
+
+impl BlockFrame {
+ fn is_tail_expr(&self) -> bool {
+ match *self {
+ BlockFrame::TailExpr { .. } => true,
+
+ BlockFrame::Statement { .. } | BlockFrame::SubExpr => false,
+ }
+ }
+ fn is_statement(&self) -> bool {
+ match *self {
+ BlockFrame::Statement { .. } => true,
+
+ BlockFrame::TailExpr { .. } | BlockFrame::SubExpr => false,
+ }
+ }
+}
+
+#[derive(Debug)]
+struct BlockContext(Vec<BlockFrame>);
+
+struct Builder<'a, 'tcx> {
+ tcx: TyCtxt<'tcx>,
+ infcx: &'a InferCtxt<'a, 'tcx>,
+ typeck_results: &'tcx TypeckResults<'tcx>,
+ region_scope_tree: &'tcx region::ScopeTree,
+ param_env: ty::ParamEnv<'tcx>,
+
+ thir: &'a Thir<'tcx>,
+ cfg: CFG<'tcx>,
+
+ def_id: DefId,
+ hir_id: hir::HirId,
+ parent_module: DefId,
+ check_overflow: bool,
+ fn_span: Span,
+ arg_count: usize,
+ generator_kind: Option<GeneratorKind>,
+
+ /// The current set of scopes, updated as we traverse;
+ /// see the `scope` module for more details.
+ scopes: scope::Scopes<'tcx>,
+
+ /// The block-context: each time we build the code within an thir::Block,
+ /// we push a frame here tracking whether we are building a statement or
+ /// if we are pushing the tail expression of the block. This is used to
+ /// embed information in generated temps about whether they were created
+ /// for a block tail expression or not.
+ ///
+ /// It would be great if we could fold this into `self.scopes`
+ /// somehow, but right now I think that is very tightly tied to
+ /// the code generation in ways that we cannot (or should not)
+ /// start just throwing new entries onto that vector in order to
+ /// distinguish the context of EXPR1 from the context of EXPR2 in
+ /// `{ STMTS; EXPR1 } + EXPR2`.
+ block_context: BlockContext,
+
+ /// The current unsafe block in scope
+ in_scope_unsafe: Safety,
+
+ /// The vector of all scopes that we have created thus far;
+ /// we track this for debuginfo later.
+ source_scopes: IndexVec<SourceScope, SourceScopeData<'tcx>>,
+ source_scope: SourceScope,
+
+ /// The guard-context: each time we build the guard expression for
+ /// a match arm, we push onto this stack, and then pop when we
+ /// finish building it.
+ guard_context: Vec<GuardFrame>,
+
+ /// Maps `HirId`s of variable bindings to the `Local`s created for them.
+ /// (A match binding can have two locals; the 2nd is for the arm's guard.)
+ var_indices: FxHashMap<LocalVarId, LocalsForNode>,
+ local_decls: IndexVec<Local, LocalDecl<'tcx>>,
+ canonical_user_type_annotations: ty::CanonicalUserTypeAnnotations<'tcx>,
+ upvar_mutbls: Vec<Mutability>,
+ unit_temp: Option<Place<'tcx>>,
+
+ var_debug_info: Vec<VarDebugInfo<'tcx>>,
+}
+
+impl<'a, 'tcx> Builder<'a, 'tcx> {
+ fn is_bound_var_in_guard(&self, id: LocalVarId) -> bool {
+ self.guard_context.iter().any(|frame| frame.locals.iter().any(|local| local.id == id))
+ }
+
+ fn var_local_id(&self, id: LocalVarId, for_guard: ForGuard) -> Local {
+ self.var_indices[&id].local_id(for_guard)
+ }
+}
+
+impl BlockContext {
+ fn new() -> Self {
+ BlockContext(vec![])
+ }
+ fn push(&mut self, bf: BlockFrame) {
+ self.0.push(bf);
+ }
+ fn pop(&mut self) -> Option<BlockFrame> {
+ self.0.pop()
+ }
+
+ /// Traverses the frames on the `BlockContext`, searching for either
+ /// the first block-tail expression frame with no intervening
+ /// statement frame.
+ ///
+ /// Notably, this skips over `SubExpr` frames; this method is
+ /// meant to be used in the context of understanding the
+ /// relationship of a temp (created within some complicated
+ /// expression) with its containing expression, and whether the
+ /// value of that *containing expression* (not the temp!) is
+ /// ignored.
+ fn currently_in_block_tail(&self) -> Option<BlockTailInfo> {
+ for bf in self.0.iter().rev() {
+ match bf {
+ BlockFrame::SubExpr => continue,
+ BlockFrame::Statement { .. } => break,
+ &BlockFrame::TailExpr { tail_result_is_ignored, span } => {
+ return Some(BlockTailInfo { tail_result_is_ignored, span });
+ }
+ }
+ }
+
+ None
+ }
+
+ /// Looks at the topmost frame on the BlockContext and reports
+ /// whether its one that would discard a block tail result.
+ ///
+ /// Unlike `currently_within_ignored_tail_expression`, this does
+ /// *not* skip over `SubExpr` frames: here, we want to know
+ /// whether the block result itself is discarded.
+ fn currently_ignores_tail_results(&self) -> bool {
+ match self.0.last() {
+ // no context: conservatively assume result is read
+ None => false,
+
+ // sub-expression: block result feeds into some computation
+ Some(BlockFrame::SubExpr) => false,
+
+ // otherwise: use accumulated is_ignored state.
+ Some(
+ BlockFrame::TailExpr { tail_result_is_ignored: ignored, .. }
+ | BlockFrame::Statement { ignores_expr_result: ignored },
+ ) => *ignored,
+ }
+ }
+}
+
+#[derive(Debug)]
+enum LocalsForNode {
+ /// In the usual case, a `HirId` for an identifier maps to at most
+ /// one `Local` declaration.
+ One(Local),
+
+ /// The exceptional case is identifiers in a match arm's pattern
+ /// that are referenced in a guard of that match arm. For these,
+ /// we have `2` Locals.
+ ///
+ /// * `for_arm_body` is the Local used in the arm body (which is
+ /// just like the `One` case above),
+ ///
+ /// * `ref_for_guard` is the Local used in the arm's guard (which
+ /// is a reference to a temp that is an alias of
+ /// `for_arm_body`).
+ ForGuard { ref_for_guard: Local, for_arm_body: Local },
+}
+
+#[derive(Debug)]
+struct GuardFrameLocal {
+ id: LocalVarId,
+}
+
+impl GuardFrameLocal {
+ fn new(id: LocalVarId, _binding_mode: BindingMode) -> Self {
+ GuardFrameLocal { id }
+ }
+}
+
+#[derive(Debug)]
+struct GuardFrame {
+ /// These are the id's of names that are bound by patterns of the
+ /// arm of *this* guard.
+ ///
+ /// (Frames higher up the stack will have the id's bound in arms
+ /// further out, such as in a case like:
+ ///
+ /// match E1 {
+ /// P1(id1) if (... (match E2 { P2(id2) if ... => B2 })) => B1,
+ /// }
+ ///
+ /// here, when building for FIXME.
+ locals: Vec<GuardFrameLocal>,
+}
+
+/// `ForGuard` indicates whether we are talking about:
+/// 1. The variable for use outside of guard expressions, or
+/// 2. The temp that holds reference to (1.), which is actually what the
+/// guard expressions see.
+#[derive(Copy, Clone, Debug, PartialEq, Eq)]
+enum ForGuard {
+ RefWithinGuard,
+ OutsideGuard,
+}
+
+impl LocalsForNode {
+ fn local_id(&self, for_guard: ForGuard) -> Local {
+ match (self, for_guard) {
+ (&LocalsForNode::One(local_id), ForGuard::OutsideGuard)
+ | (
+ &LocalsForNode::ForGuard { ref_for_guard: local_id, .. },
+ ForGuard::RefWithinGuard,
+ )
+ | (&LocalsForNode::ForGuard { for_arm_body: local_id, .. }, ForGuard::OutsideGuard) => {
+ local_id
+ }
+
+ (&LocalsForNode::One(_), ForGuard::RefWithinGuard) => {
+ bug!("anything with one local should never be within a guard.")
+ }
+ }
+ }
+}
+
+struct CFG<'tcx> {
+ basic_blocks: IndexVec<BasicBlock, BasicBlockData<'tcx>>,
+}
+
+rustc_index::newtype_index! {
+ struct ScopeId { .. }
+}
+
+#[derive(Debug)]
+enum NeedsTemporary {
+ /// Use this variant when whatever you are converting with `as_operand`
+ /// is the last thing you are converting. This means that if we introduced
+ /// an intermediate temporary, we'd only read it immediately after, so we can
+ /// also avoid it.
+ No,
+ /// For all cases where you aren't sure or that are too expensive to compute
+ /// for now. It is always safe to fall back to this.
+ Maybe,
+}
+
+///////////////////////////////////////////////////////////////////////////
+/// The `BlockAnd` "monad" packages up the new basic block along with a
+/// produced value (sometimes just unit, of course). The `unpack!`
+/// macro (and methods below) makes working with `BlockAnd` much more
+/// convenient.
+
+#[must_use = "if you don't use one of these results, you're leaving a dangling edge"]
+struct BlockAnd<T>(BasicBlock, T);
+
+trait BlockAndExtension {
+ fn and<T>(self, v: T) -> BlockAnd<T>;
+ fn unit(self) -> BlockAnd<()>;
+}
+
+impl BlockAndExtension for BasicBlock {
+ fn and<T>(self, v: T) -> BlockAnd<T> {
+ BlockAnd(self, v)
+ }
+
+ fn unit(self) -> BlockAnd<()> {
+ BlockAnd(self, ())
+ }
+}
+
+/// Update a block pointer and return the value.
+/// Use it like `let x = unpack!(block = self.foo(block, foo))`.
+macro_rules! unpack {
+ ($x:ident = $c:expr) => {{
+ let BlockAnd(b, v) = $c;
+ $x = b;
+ v
+ }};
+
+ ($c:expr) => {{
+ let BlockAnd(b, ()) = $c;
+ b
+ }};
+}
+
+///////////////////////////////////////////////////////////////////////////
+/// the main entry point for building MIR for a function
+
+struct ArgInfo<'tcx>(
+ Ty<'tcx>,
+ Option<Span>,
+ Option<&'tcx hir::Param<'tcx>>,
+ Option<ImplicitSelfKind>,
+);
+
+fn construct_fn<'tcx, A>(
+ thir: &Thir<'tcx>,
+ infcx: &InferCtxt<'_, 'tcx>,
+ fn_def: ty::WithOptConstParam<LocalDefId>,
+ fn_id: hir::HirId,
+ arguments: A,
+ safety: Safety,
+ abi: Abi,
+ return_ty: Ty<'tcx>,
+ return_ty_span: Span,
+ body: &'tcx hir::Body<'tcx>,
+ expr: ExprId,
+ span_with_body: Span,
+) -> Body<'tcx>
+where
+ A: Iterator<Item = ArgInfo<'tcx>>,
+{
+ let arguments: Vec<_> = arguments.collect();
+
+ let tcx = infcx.tcx;
+ let span = tcx.hir().span(fn_id);
+
+ let mut builder = Builder::new(
+ thir,
+ infcx,
+ fn_def,
+ fn_id,
+ span_with_body,
+ arguments.len(),
+ safety,
+ return_ty,
+ return_ty_span,
+ body.generator_kind,
+ );
+
+ let call_site_scope =
+ region::Scope { id: body.value.hir_id.local_id, data: region::ScopeData::CallSite };
+ let arg_scope =
+ region::Scope { id: body.value.hir_id.local_id, data: region::ScopeData::Arguments };
+ let source_info = builder.source_info(span);
+ let call_site_s = (call_site_scope, source_info);
+ unpack!(builder.in_scope(call_site_s, LintLevel::Inherited, |builder| {
+ let arg_scope_s = (arg_scope, source_info);
+ // Attribute epilogue to function's closing brace
+ let fn_end = span_with_body.shrink_to_hi();
+ let return_block =
+ unpack!(builder.in_breakable_scope(None, Place::return_place(), fn_end, |builder| {
+ Some(builder.in_scope(arg_scope_s, LintLevel::Inherited, |builder| {
+ builder.args_and_body(
+ START_BLOCK,
+ fn_def.did,
+ &arguments,
+ arg_scope,
+ &thir[expr],
+ )
+ }))
+ }));
+ let source_info = builder.source_info(fn_end);
+ builder.cfg.terminate(return_block, source_info, TerminatorKind::Return);
+ builder.build_drop_trees();
+ return_block.unit()
+ }));
+
+ let spread_arg = if abi == Abi::RustCall {
+ // RustCall pseudo-ABI untuples the last argument.
+ Some(Local::new(arguments.len()))
+ } else {
+ None
+ };
+
+ let mut body = builder.finish();
+ body.spread_arg = spread_arg;
+ body
+}
+
+fn construct_const<'a, 'tcx>(
+ thir: &'a Thir<'tcx>,
+ infcx: &'a InferCtxt<'a, 'tcx>,
+ expr: ExprId,
+ def: ty::WithOptConstParam<LocalDefId>,
+ hir_id: hir::HirId,
+ const_ty: Ty<'tcx>,
+ const_ty_span: Span,
+) -> Body<'tcx> {
+ let tcx = infcx.tcx;
+ let span = tcx.hir().span(hir_id);
+ let mut builder = Builder::new(
+ thir,
+ infcx,
+ def,
+ hir_id,
+ span,
+ 0,
+ Safety::Safe,
+ const_ty,
+ const_ty_span,
+ None,
+ );
+
+ let mut block = START_BLOCK;
+ unpack!(block = builder.expr_into_dest(Place::return_place(), block, &thir[expr]));
+
+ let source_info = builder.source_info(span);
+ builder.cfg.terminate(block, source_info, TerminatorKind::Return);
+
+ builder.build_drop_trees();
+
+ builder.finish()
+}
+
+/// Construct MIR for an item that has had errors in type checking.
+///
+/// This is required because we may still want to run MIR passes on an item
+/// with type errors, but normal MIR construction can't handle that in general.
+fn construct_error<'a, 'tcx>(
+ infcx: &'a InferCtxt<'a, 'tcx>,
+ def: ty::WithOptConstParam<LocalDefId>,
+ hir_id: hir::HirId,
+ body_id: hir::BodyId,
+ body_owner_kind: hir::BodyOwnerKind,
+ err: ErrorGuaranteed,
+) -> Body<'tcx> {
+ let tcx = infcx.tcx;
+ let span = tcx.hir().span(hir_id);
+ let ty = tcx.ty_error();
+ let generator_kind = tcx.hir().body(body_id).generator_kind;
+ let num_params = match body_owner_kind {
+ hir::BodyOwnerKind::Fn => tcx.hir().fn_decl_by_hir_id(hir_id).unwrap().inputs.len(),
+ hir::BodyOwnerKind::Closure => {
+ if generator_kind.is_some() {
+ // Generators have an implicit `self` parameter *and* a possibly
+ // implicit resume parameter.
+ 2
+ } else {
+ // The implicit self parameter adds another local in MIR.
+ 1 + tcx.hir().fn_decl_by_hir_id(hir_id).unwrap().inputs.len()
+ }
+ }
+ hir::BodyOwnerKind::Const => 0,
+ hir::BodyOwnerKind::Static(_) => 0,
+ };
+ let mut cfg = CFG { basic_blocks: IndexVec::new() };
+ let mut source_scopes = IndexVec::new();
+ let mut local_decls = IndexVec::from_elem_n(LocalDecl::new(ty, span), 1);
+
+ cfg.start_new_block();
+ source_scopes.push(SourceScopeData {
+ span,
+ parent_scope: None,
+ inlined: None,
+ inlined_parent_scope: None,
+ local_data: ClearCrossCrate::Set(SourceScopeLocalData {
+ lint_root: hir_id,
+ safety: Safety::Safe,
+ }),
+ });
+ let source_info = SourceInfo { span, scope: OUTERMOST_SOURCE_SCOPE };
+
+ // Some MIR passes will expect the number of parameters to match the
+ // function declaration.
+ for _ in 0..num_params {
+ local_decls.push(LocalDecl::with_source_info(ty, source_info));
+ }
+ cfg.terminate(START_BLOCK, source_info, TerminatorKind::Unreachable);
+
+ let mut body = Body::new(
+ MirSource::item(def.did.to_def_id()),
+ cfg.basic_blocks,
+ source_scopes,
+ local_decls,
+ IndexVec::new(),
+ num_params,
+ vec![],
+ span,
+ generator_kind,
+ Some(err),
+ );
+ body.generator.as_mut().map(|gen| gen.yield_ty = Some(ty));
+ body
+}
+
+impl<'a, 'tcx> Builder<'a, 'tcx> {
+ fn new(
+ thir: &'a Thir<'tcx>,
+ infcx: &'a InferCtxt<'a, 'tcx>,
+ def: ty::WithOptConstParam<LocalDefId>,
+ hir_id: hir::HirId,
+ span: Span,
+ arg_count: usize,
+ safety: Safety,
+ return_ty: Ty<'tcx>,
+ return_span: Span,
+ generator_kind: Option<GeneratorKind>,
+ ) -> Builder<'a, 'tcx> {
+ let tcx = infcx.tcx;
+ let attrs = tcx.hir().attrs(hir_id);
+ // Some functions always have overflow checks enabled,
+ // however, they may not get codegen'd, depending on
+ // the settings for the crate they are codegened in.
+ let mut check_overflow = tcx.sess.contains_name(attrs, sym::rustc_inherit_overflow_checks);
+ // Respect -C overflow-checks.
+ check_overflow |= tcx.sess.overflow_checks();
+ // Constants always need overflow checks.
+ check_overflow |= matches!(
+ tcx.hir().body_owner_kind(def.did),
+ hir::BodyOwnerKind::Const | hir::BodyOwnerKind::Static(_)
+ );
+
+ let lint_level = LintLevel::Explicit(hir_id);
+ let param_env = tcx.param_env(def.did);
+ let mut builder = Builder {
+ thir,
+ tcx,
+ infcx,
+ typeck_results: tcx.typeck_opt_const_arg(def),
+ region_scope_tree: tcx.region_scope_tree(def.did),
+ param_env,
+ def_id: def.did.to_def_id(),
+ hir_id,
+ parent_module: tcx.parent_module(hir_id).to_def_id(),
+ check_overflow,
+ cfg: CFG { basic_blocks: IndexVec::new() },
+ fn_span: span,
+ arg_count,
+ generator_kind,
+ scopes: scope::Scopes::new(),
+ block_context: BlockContext::new(),
+ source_scopes: IndexVec::new(),
+ source_scope: OUTERMOST_SOURCE_SCOPE,
+ guard_context: vec![],
+ in_scope_unsafe: safety,
+ local_decls: IndexVec::from_elem_n(LocalDecl::new(return_ty, return_span), 1),
+ canonical_user_type_annotations: IndexVec::new(),
+ upvar_mutbls: vec![],
+ var_indices: Default::default(),
+ unit_temp: None,
+ var_debug_info: vec![],
+ };
+
+ assert_eq!(builder.cfg.start_new_block(), START_BLOCK);
+ assert_eq!(
+ builder.new_source_scope(span, lint_level, Some(safety)),
+ OUTERMOST_SOURCE_SCOPE
+ );
+ builder.source_scopes[OUTERMOST_SOURCE_SCOPE].parent_scope = None;
+
+ builder
+ }
+
+ fn finish(self) -> Body<'tcx> {
+ for (index, block) in self.cfg.basic_blocks.iter().enumerate() {
+ if block.terminator.is_none() {
+ span_bug!(self.fn_span, "no terminator on block {:?}", index);
+ }
+ }
+
+ Body::new(
+ MirSource::item(self.def_id),
+ self.cfg.basic_blocks,
+ self.source_scopes,
+ self.local_decls,
+ self.canonical_user_type_annotations,
+ self.arg_count,
+ self.var_debug_info,
+ self.fn_span,
+ self.generator_kind,
+ self.typeck_results.tainted_by_errors,
+ )
+ }
+
+ fn args_and_body(
+ &mut self,
+ mut block: BasicBlock,
+ fn_def_id: LocalDefId,
+ arguments: &[ArgInfo<'tcx>],
+ argument_scope: region::Scope,
+ expr: &Expr<'tcx>,
+ ) -> BlockAnd<()> {
+ // Allocate locals for the function arguments
+ for &ArgInfo(ty, _, arg_opt, _) in arguments.iter() {
+ let source_info =
+ SourceInfo::outermost(arg_opt.map_or(self.fn_span, |arg| arg.pat.span));
+ let arg_local = self.local_decls.push(LocalDecl::with_source_info(ty, source_info));
+
+ // If this is a simple binding pattern, give debuginfo a nice name.
+ if let Some(arg) = arg_opt && let Some(ident) = arg.pat.simple_ident() {
+ self.var_debug_info.push(VarDebugInfo {
+ name: ident.name,
+ source_info,
+ value: VarDebugInfoContents::Place(arg_local.into()),
+ });
+ }
+ }
+
+ let tcx = self.tcx;
+ let tcx_hir = tcx.hir();
+ let hir_typeck_results = self.typeck_results;
+
+ // In analyze_closure() in upvar.rs we gathered a list of upvars used by an
+ // indexed closure and we stored in a map called closure_min_captures in TypeckResults
+ // with the closure's DefId. Here, we run through that vec of UpvarIds for
+ // the given closure and use the necessary information to create upvar
+ // debuginfo and to fill `self.upvar_mutbls`.
+ if hir_typeck_results.closure_min_captures.get(&fn_def_id).is_some() {
+ let mut closure_env_projs = vec![];
+ let mut closure_ty = self.local_decls[ty::CAPTURE_STRUCT_LOCAL].ty;
+ if let ty::Ref(_, ty, _) = closure_ty.kind() {
+ closure_env_projs.push(ProjectionElem::Deref);
+ closure_ty = *ty;
+ }
+ let upvar_substs = match closure_ty.kind() {
+ ty::Closure(_, substs) => ty::UpvarSubsts::Closure(substs),
+ ty::Generator(_, substs, _) => ty::UpvarSubsts::Generator(substs),
+ _ => span_bug!(self.fn_span, "upvars with non-closure env ty {:?}", closure_ty),
+ };
+ let def_id = self.def_id.as_local().unwrap();
+ let capture_syms = tcx.symbols_for_closure_captures((def_id, fn_def_id));
+ let capture_tys = upvar_substs.upvar_tys();
+ let captures_with_tys = hir_typeck_results
+ .closure_min_captures_flattened(fn_def_id)
+ .zip(capture_tys.zip(capture_syms));
+
+ self.upvar_mutbls = captures_with_tys
+ .enumerate()
+ .map(|(i, (captured_place, (ty, sym)))| {
+ let capture = captured_place.info.capture_kind;
+ let var_id = match captured_place.place.base {
+ HirPlaceBase::Upvar(upvar_id) => upvar_id.var_path.hir_id,
+ _ => bug!("Expected an upvar"),
+ };
+
+ let mutability = captured_place.mutability;
+
+ let mut projs = closure_env_projs.clone();
+ projs.push(ProjectionElem::Field(Field::new(i), ty));
+ match capture {
+ ty::UpvarCapture::ByValue => {}
+ ty::UpvarCapture::ByRef(..) => {
+ projs.push(ProjectionElem::Deref);
+ }
+ };
+
+ self.var_debug_info.push(VarDebugInfo {
+ name: *sym,
+ source_info: SourceInfo::outermost(tcx_hir.span(var_id)),
+ value: VarDebugInfoContents::Place(Place {
+ local: ty::CAPTURE_STRUCT_LOCAL,
+ projection: tcx.intern_place_elems(&projs),
+ }),
+ });
+
+ mutability
+ })
+ .collect();
+ }
+
+ let mut scope = None;
+ // Bind the argument patterns
+ for (index, arg_info) in arguments.iter().enumerate() {
+ // Function arguments always get the first Local indices after the return place
+ let local = Local::new(index + 1);
+ let place = Place::from(local);
+ let &ArgInfo(_, opt_ty_info, arg_opt, ref self_binding) = arg_info;
+
+ // Make sure we drop (parts of) the argument even when not matched on.
+ self.schedule_drop(
+ arg_opt.as_ref().map_or(expr.span, |arg| arg.pat.span),
+ argument_scope,
+ local,
+ DropKind::Value,
+ );
+
+ let Some(arg) = arg_opt else {
+ continue;
+ };
+ let pat = match tcx.hir().get(arg.pat.hir_id) {
+ Node::Pat(pat) => pat,
+ node => bug!("pattern became {:?}", node),
+ };
+ let pattern = pat_from_hir(tcx, self.param_env, self.typeck_results, pat);
+ let original_source_scope = self.source_scope;
+ let span = pattern.span;
+ self.set_correct_source_scope_for_arg(arg.hir_id, original_source_scope, span);
+ match *pattern.kind {
+ // Don't introduce extra copies for simple bindings
+ PatKind::Binding {
+ mutability,
+ var,
+ mode: BindingMode::ByValue,
+ subpattern: None,
+ ..
+ } => {
+ self.local_decls[local].mutability = mutability;
+ self.local_decls[local].source_info.scope = self.source_scope;
+ self.local_decls[local].local_info = if let Some(kind) = self_binding {
+ Some(Box::new(LocalInfo::User(ClearCrossCrate::Set(
+ BindingForm::ImplicitSelf(*kind),
+ ))))
+ } else {
+ let binding_mode = ty::BindingMode::BindByValue(mutability);
+ Some(Box::new(LocalInfo::User(ClearCrossCrate::Set(BindingForm::Var(
+ VarBindingForm {
+ binding_mode,
+ opt_ty_info,
+ opt_match_place: Some((Some(place), span)),
+ pat_span: span,
+ },
+ )))))
+ };
+ self.var_indices.insert(var, LocalsForNode::One(local));
+ }
+ _ => {
+ scope = self.declare_bindings(
+ scope,
+ expr.span,
+ &pattern,
+ matches::ArmHasGuard(false),
+ Some((Some(&place), span)),
+ );
+ let place_builder = PlaceBuilder::from(local);
+ unpack!(block = self.place_into_pattern(block, pattern, place_builder, false));
+ }
+ }
+ self.source_scope = original_source_scope;
+ }
+
+ // Enter the argument pattern bindings source scope, if it exists.
+ if let Some(source_scope) = scope {
+ self.source_scope = source_scope;
+ }
+
+ self.expr_into_dest(Place::return_place(), block, &expr)
+ }
+
+ fn set_correct_source_scope_for_arg(
+ &mut self,
+ arg_hir_id: hir::HirId,
+ original_source_scope: SourceScope,
+ pattern_span: Span,
+ ) {
+ let tcx = self.tcx;
+ let current_root = tcx.maybe_lint_level_root_bounded(arg_hir_id, self.hir_id);
+ let parent_root = tcx.maybe_lint_level_root_bounded(
+ self.source_scopes[original_source_scope]
+ .local_data
+ .as_ref()
+ .assert_crate_local()
+ .lint_root,
+ self.hir_id,
+ );
+ if current_root != parent_root {
+ self.source_scope =
+ self.new_source_scope(pattern_span, LintLevel::Explicit(current_root), None);
+ }
+ }
+
+ fn get_unit_temp(&mut self) -> Place<'tcx> {
+ match self.unit_temp {
+ Some(tmp) => tmp,
+ None => {
+ let ty = self.tcx.mk_unit();
+ let fn_span = self.fn_span;
+ let tmp = self.temp(ty, fn_span);
+ self.unit_temp = Some(tmp);
+ tmp
+ }
+ }
+ }
+}
+
+fn parse_float_into_constval<'tcx>(
+ num: Symbol,
+ float_ty: ty::FloatTy,
+ neg: bool,
+) -> Option<ConstValue<'tcx>> {
+ parse_float_into_scalar(num, float_ty, neg).map(ConstValue::Scalar)
+}
+
+pub(crate) fn parse_float_into_scalar(
+ num: Symbol,
+ float_ty: ty::FloatTy,
+ neg: bool,
+) -> Option<Scalar> {
+ let num = num.as_str();
+ match float_ty {
+ ty::FloatTy::F32 => {
+ let Ok(rust_f) = num.parse::<f32>() else { return None };
+ let mut f = num.parse::<Single>().unwrap_or_else(|e| {
+ panic!("apfloat::ieee::Single failed to parse `{}`: {:?}", num, e)
+ });
+
+ assert!(
+ u128::from(rust_f.to_bits()) == f.to_bits(),
+ "apfloat::ieee::Single gave different result for `{}`: \
+ {}({:#x}) vs Rust's {}({:#x})",
+ rust_f,
+ f,
+ f.to_bits(),
+ Single::from_bits(rust_f.to_bits().into()),
+ rust_f.to_bits()
+ );
+
+ if neg {
+ f = -f;
+ }
+
+ Some(Scalar::from_f32(f))
+ }
+ ty::FloatTy::F64 => {
+ let Ok(rust_f) = num.parse::<f64>() else { return None };
+ let mut f = num.parse::<Double>().unwrap_or_else(|e| {
+ panic!("apfloat::ieee::Double failed to parse `{}`: {:?}", num, e)
+ });
+
+ assert!(
+ u128::from(rust_f.to_bits()) == f.to_bits(),
+ "apfloat::ieee::Double gave different result for `{}`: \
+ {}({:#x}) vs Rust's {}({:#x})",
+ rust_f,
+ f,
+ f.to_bits(),
+ Double::from_bits(rust_f.to_bits().into()),
+ rust_f.to_bits()
+ );
+
+ if neg {
+ f = -f;
+ }
+
+ Some(Scalar::from_f64(f))
+ }
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////
+// Builder methods are broken up into modules, depending on what kind
+// of thing is being lowered. Note that they use the `unpack` macro
+// above extensively.
+
+mod block;
+mod cfg;
+mod expr;
+mod matches;
+mod misc;
+mod scope;
+
+pub(crate) use expr::category::Category as ExprCategory;
diff --git a/compiler/rustc_mir_build/src/build/scope.rs b/compiler/rustc_mir_build/src/build/scope.rs
new file mode 100644
index 000000000..b2fd9f25b
--- /dev/null
+++ b/compiler/rustc_mir_build/src/build/scope.rs
@@ -0,0 +1,1395 @@
+/*!
+Managing the scope stack. The scopes are tied to lexical scopes, so as
+we descend the THIR, we push a scope on the stack, build its
+contents, and then pop it off. Every scope is named by a
+`region::Scope`.
+
+### SEME Regions
+
+When pushing a new [Scope], we record the current point in the graph (a
+basic block); this marks the entry to the scope. We then generate more
+stuff in the control-flow graph. Whenever the scope is exited, either
+via a `break` or `return` or just by fallthrough, that marks an exit
+from the scope. Each lexical scope thus corresponds to a single-entry,
+multiple-exit (SEME) region in the control-flow graph.
+
+For now, we record the `region::Scope` to each SEME region for later reference
+(see caveat in next paragraph). This is because destruction scopes are tied to
+them. This may change in the future so that MIR lowering determines its own
+destruction scopes.
+
+### Not so SEME Regions
+
+In the course of building matches, it sometimes happens that certain code
+(namely guards) gets executed multiple times. This means that the scope lexical
+scope may in fact correspond to multiple, disjoint SEME regions. So in fact our
+mapping is from one scope to a vector of SEME regions. Since the SEME regions
+are disjoint, the mapping is still one-to-one for the set of SEME regions that
+we're currently in.
+
+Also in matches, the scopes assigned to arms are not always even SEME regions!
+Each arm has a single region with one entry for each pattern. We manually
+manipulate the scheduled drops in this scope to avoid dropping things multiple
+times.
+
+### Drops
+
+The primary purpose for scopes is to insert drops: while building
+the contents, we also accumulate places that need to be dropped upon
+exit from each scope. This is done by calling `schedule_drop`. Once a
+drop is scheduled, whenever we branch out we will insert drops of all
+those places onto the outgoing edge. Note that we don't know the full
+set of scheduled drops up front, and so whenever we exit from the
+scope we only drop the values scheduled thus far. For example, consider
+the scope S corresponding to this loop:
+
+```
+# let cond = true;
+loop {
+ let x = ..;
+ if cond { break; }
+ let y = ..;
+}
+```
+
+When processing the `let x`, we will add one drop to the scope for
+`x`. The break will then insert a drop for `x`. When we process `let
+y`, we will add another drop (in fact, to a subscope, but let's ignore
+that for now); any later drops would also drop `y`.
+
+### Early exit
+
+There are numerous "normal" ways to early exit a scope: `break`,
+`continue`, `return` (panics are handled separately). Whenever an
+early exit occurs, the method `break_scope` is called. It is given the
+current point in execution where the early exit occurs, as well as the
+scope you want to branch to (note that all early exits from to some
+other enclosing scope). `break_scope` will record the set of drops currently
+scheduled in a [DropTree]. Later, before `in_breakable_scope` exits, the drops
+will be added to the CFG.
+
+Panics are handled in a similar fashion, except that the drops are added to the
+MIR once the rest of the function has finished being lowered. If a terminator
+can panic, call `diverge_from(block)` with the block containing the terminator
+`block`.
+
+### Breakable scopes
+
+In addition to the normal scope stack, we track a loop scope stack
+that contains only loops and breakable blocks. It tracks where a `break`,
+`continue` or `return` should go to.
+
+*/
+
+use std::mem;
+
+use crate::build::{BlockAnd, BlockAndExtension, BlockFrame, Builder, CFG};
+use rustc_data_structures::fx::FxHashMap;
+use rustc_index::vec::IndexVec;
+use rustc_middle::middle::region;
+use rustc_middle::mir::*;
+use rustc_middle::thir::{Expr, LintLevel};
+
+use rustc_span::{Span, DUMMY_SP};
+
+#[derive(Debug)]
+pub struct Scopes<'tcx> {
+ scopes: Vec<Scope>,
+
+ /// The current set of breakable scopes. See module comment for more details.
+ breakable_scopes: Vec<BreakableScope<'tcx>>,
+
+ /// The scope of the innermost if-then currently being lowered.
+ if_then_scope: Option<IfThenScope>,
+
+ /// Drops that need to be done on unwind paths. See the comment on
+ /// [DropTree] for more details.
+ unwind_drops: DropTree,
+
+ /// Drops that need to be done on paths to the `GeneratorDrop` terminator.
+ generator_drops: DropTree,
+}
+
+#[derive(Debug)]
+struct Scope {
+ /// The source scope this scope was created in.
+ source_scope: SourceScope,
+
+ /// the region span of this scope within source code.
+ region_scope: region::Scope,
+
+ /// set of places to drop when exiting this scope. This starts
+ /// out empty but grows as variables are declared during the
+ /// building process. This is a stack, so we always drop from the
+ /// end of the vector (top of the stack) first.
+ drops: Vec<DropData>,
+
+ moved_locals: Vec<Local>,
+
+ /// The drop index that will drop everything in and below this scope on an
+ /// unwind path.
+ cached_unwind_block: Option<DropIdx>,
+
+ /// The drop index that will drop everything in and below this scope on a
+ /// generator drop path.
+ cached_generator_drop_block: Option<DropIdx>,
+}
+
+#[derive(Clone, Copy, Debug)]
+struct DropData {
+ /// The `Span` where drop obligation was incurred (typically where place was
+ /// declared)
+ source_info: SourceInfo,
+
+ /// local to drop
+ local: Local,
+
+ /// Whether this is a value Drop or a StorageDead.
+ kind: DropKind,
+}
+
+#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
+pub(crate) enum DropKind {
+ Value,
+ Storage,
+}
+
+#[derive(Debug)]
+struct BreakableScope<'tcx> {
+ /// Region scope of the loop
+ region_scope: region::Scope,
+ /// The destination of the loop/block expression itself (i.e., where to put
+ /// the result of a `break` or `return` expression)
+ break_destination: Place<'tcx>,
+ /// Drops that happen on the `break`/`return` path.
+ break_drops: DropTree,
+ /// Drops that happen on the `continue` path.
+ continue_drops: Option<DropTree>,
+}
+
+#[derive(Debug)]
+struct IfThenScope {
+ /// The if-then scope or arm scope
+ region_scope: region::Scope,
+ /// Drops that happen on the `else` path.
+ else_drops: DropTree,
+}
+
+/// The target of an expression that breaks out of a scope
+#[derive(Clone, Copy, Debug)]
+pub(crate) enum BreakableTarget {
+ Continue(region::Scope),
+ Break(region::Scope),
+ Return,
+}
+
+rustc_index::newtype_index! {
+ struct DropIdx { .. }
+}
+
+const ROOT_NODE: DropIdx = DropIdx::from_u32(0);
+
+/// A tree of drops that we have deferred lowering. It's used for:
+///
+/// * Drops on unwind paths
+/// * Drops on generator drop paths (when a suspended generator is dropped)
+/// * Drops on return and loop exit paths
+/// * Drops on the else path in an `if let` chain
+///
+/// Once no more nodes could be added to the tree, we lower it to MIR in one go
+/// in `build_mir`.
+#[derive(Debug)]
+struct DropTree {
+ /// Drops in the tree.
+ drops: IndexVec<DropIdx, (DropData, DropIdx)>,
+ /// Map for finding the inverse of the `next_drop` relation:
+ ///
+ /// `previous_drops[(drops[i].1, drops[i].0.local, drops[i].0.kind)] == i`
+ previous_drops: FxHashMap<(DropIdx, Local, DropKind), DropIdx>,
+ /// Edges into the `DropTree` that need to be added once it's lowered.
+ entry_points: Vec<(DropIdx, BasicBlock)>,
+}
+
+impl Scope {
+ /// Whether there's anything to do for the cleanup path, that is,
+ /// when unwinding through this scope. This includes destructors,
+ /// but not StorageDead statements, which don't get emitted at all
+ /// for unwinding, for several reasons:
+ /// * clang doesn't emit llvm.lifetime.end for C++ unwinding
+ /// * LLVM's memory dependency analysis can't handle it atm
+ /// * polluting the cleanup MIR with StorageDead creates
+ /// landing pads even though there's no actual destructors
+ /// * freeing up stack space has no effect during unwinding
+ /// Note that for generators we do emit StorageDeads, for the
+ /// use of optimizations in the MIR generator transform.
+ fn needs_cleanup(&self) -> bool {
+ self.drops.iter().any(|drop| match drop.kind {
+ DropKind::Value => true,
+ DropKind::Storage => false,
+ })
+ }
+
+ fn invalidate_cache(&mut self) {
+ self.cached_unwind_block = None;
+ self.cached_generator_drop_block = None;
+ }
+}
+
+/// A trait that determined how [DropTree] creates its blocks and
+/// links to any entry nodes.
+trait DropTreeBuilder<'tcx> {
+ /// Create a new block for the tree. This should call either
+ /// `cfg.start_new_block()` or `cfg.start_new_cleanup_block()`.
+ fn make_block(cfg: &mut CFG<'tcx>) -> BasicBlock;
+
+ /// Links a block outside the drop tree, `from`, to the block `to` inside
+ /// the drop tree.
+ fn add_entry(cfg: &mut CFG<'tcx>, from: BasicBlock, to: BasicBlock);
+}
+
+impl DropTree {
+ fn new() -> Self {
+ // The root node of the tree doesn't represent a drop, but instead
+ // represents the block in the tree that should be jumped to once all
+ // of the required drops have been performed.
+ let fake_source_info = SourceInfo::outermost(DUMMY_SP);
+ let fake_data =
+ DropData { source_info: fake_source_info, local: Local::MAX, kind: DropKind::Storage };
+ let drop_idx = DropIdx::MAX;
+ let drops = IndexVec::from_elem_n((fake_data, drop_idx), 1);
+ Self { drops, entry_points: Vec::new(), previous_drops: FxHashMap::default() }
+ }
+
+ fn add_drop(&mut self, drop: DropData, next: DropIdx) -> DropIdx {
+ let drops = &mut self.drops;
+ *self
+ .previous_drops
+ .entry((next, drop.local, drop.kind))
+ .or_insert_with(|| drops.push((drop, next)))
+ }
+
+ fn add_entry(&mut self, from: BasicBlock, to: DropIdx) {
+ debug_assert!(to < self.drops.next_index());
+ self.entry_points.push((to, from));
+ }
+
+ /// Builds the MIR for a given drop tree.
+ ///
+ /// `blocks` should have the same length as `self.drops`, and may have its
+ /// first value set to some already existing block.
+ fn build_mir<'tcx, T: DropTreeBuilder<'tcx>>(
+ &mut self,
+ cfg: &mut CFG<'tcx>,
+ blocks: &mut IndexVec<DropIdx, Option<BasicBlock>>,
+ ) {
+ debug!("DropTree::build_mir(drops = {:#?})", self);
+ assert_eq!(blocks.len(), self.drops.len());
+
+ self.assign_blocks::<T>(cfg, blocks);
+ self.link_blocks(cfg, blocks)
+ }
+
+ /// Assign blocks for all of the drops in the drop tree that need them.
+ fn assign_blocks<'tcx, T: DropTreeBuilder<'tcx>>(
+ &mut self,
+ cfg: &mut CFG<'tcx>,
+ blocks: &mut IndexVec<DropIdx, Option<BasicBlock>>,
+ ) {
+ // StorageDead statements can share blocks with each other and also with
+ // a Drop terminator. We iterate through the drops to find which drops
+ // need their own block.
+ #[derive(Clone, Copy)]
+ enum Block {
+ // This drop is unreachable
+ None,
+ // This drop is only reachable through the `StorageDead` with the
+ // specified index.
+ Shares(DropIdx),
+ // This drop has more than one way of being reached, or it is
+ // branched to from outside the tree, or its predecessor is a
+ // `Value` drop.
+ Own,
+ }
+
+ let mut needs_block = IndexVec::from_elem(Block::None, &self.drops);
+ if blocks[ROOT_NODE].is_some() {
+ // In some cases (such as drops for `continue`) the root node
+ // already has a block. In this case, make sure that we don't
+ // override it.
+ needs_block[ROOT_NODE] = Block::Own;
+ }
+
+ // Sort so that we only need to check the last value.
+ let entry_points = &mut self.entry_points;
+ entry_points.sort();
+
+ for (drop_idx, drop_data) in self.drops.iter_enumerated().rev() {
+ if entry_points.last().map_or(false, |entry_point| entry_point.0 == drop_idx) {
+ let block = *blocks[drop_idx].get_or_insert_with(|| T::make_block(cfg));
+ needs_block[drop_idx] = Block::Own;
+ while entry_points.last().map_or(false, |entry_point| entry_point.0 == drop_idx) {
+ let entry_block = entry_points.pop().unwrap().1;
+ T::add_entry(cfg, entry_block, block);
+ }
+ }
+ match needs_block[drop_idx] {
+ Block::None => continue,
+ Block::Own => {
+ blocks[drop_idx].get_or_insert_with(|| T::make_block(cfg));
+ }
+ Block::Shares(pred) => {
+ blocks[drop_idx] = blocks[pred];
+ }
+ }
+ if let DropKind::Value = drop_data.0.kind {
+ needs_block[drop_data.1] = Block::Own;
+ } else if drop_idx != ROOT_NODE {
+ match &mut needs_block[drop_data.1] {
+ pred @ Block::None => *pred = Block::Shares(drop_idx),
+ pred @ Block::Shares(_) => *pred = Block::Own,
+ Block::Own => (),
+ }
+ }
+ }
+
+ debug!("assign_blocks: blocks = {:#?}", blocks);
+ assert!(entry_points.is_empty());
+ }
+
+ fn link_blocks<'tcx>(
+ &self,
+ cfg: &mut CFG<'tcx>,
+ blocks: &IndexVec<DropIdx, Option<BasicBlock>>,
+ ) {
+ for (drop_idx, drop_data) in self.drops.iter_enumerated().rev() {
+ let Some(block) = blocks[drop_idx] else { continue };
+ match drop_data.0.kind {
+ DropKind::Value => {
+ let terminator = TerminatorKind::Drop {
+ target: blocks[drop_data.1].unwrap(),
+ // The caller will handle this if needed.
+ unwind: None,
+ place: drop_data.0.local.into(),
+ };
+ cfg.terminate(block, drop_data.0.source_info, terminator);
+ }
+ // Root nodes don't correspond to a drop.
+ DropKind::Storage if drop_idx == ROOT_NODE => {}
+ DropKind::Storage => {
+ let stmt = Statement {
+ source_info: drop_data.0.source_info,
+ kind: StatementKind::StorageDead(drop_data.0.local),
+ };
+ cfg.push(block, stmt);
+ let target = blocks[drop_data.1].unwrap();
+ if target != block {
+ // Diagnostics don't use this `Span` but debuginfo
+ // might. Since we don't want breakpoints to be placed
+ // here, especially when this is on an unwind path, we
+ // use `DUMMY_SP`.
+ let source_info = SourceInfo { span: DUMMY_SP, ..drop_data.0.source_info };
+ let terminator = TerminatorKind::Goto { target };
+ cfg.terminate(block, source_info, terminator);
+ }
+ }
+ }
+ }
+ }
+}
+
+impl<'tcx> Scopes<'tcx> {
+ pub(crate) fn new() -> Self {
+ Self {
+ scopes: Vec::new(),
+ breakable_scopes: Vec::new(),
+ if_then_scope: None,
+ unwind_drops: DropTree::new(),
+ generator_drops: DropTree::new(),
+ }
+ }
+
+ fn push_scope(&mut self, region_scope: (region::Scope, SourceInfo), vis_scope: SourceScope) {
+ debug!("push_scope({:?})", region_scope);
+ self.scopes.push(Scope {
+ source_scope: vis_scope,
+ region_scope: region_scope.0,
+ drops: vec![],
+ moved_locals: vec![],
+ cached_unwind_block: None,
+ cached_generator_drop_block: None,
+ });
+ }
+
+ fn pop_scope(&mut self, region_scope: (region::Scope, SourceInfo)) -> Scope {
+ let scope = self.scopes.pop().unwrap();
+ assert_eq!(scope.region_scope, region_scope.0);
+ scope
+ }
+
+ fn scope_index(&self, region_scope: region::Scope, span: Span) -> usize {
+ self.scopes
+ .iter()
+ .rposition(|scope| scope.region_scope == region_scope)
+ .unwrap_or_else(|| span_bug!(span, "region_scope {:?} does not enclose", region_scope))
+ }
+
+ /// Returns the topmost active scope, which is known to be alive until
+ /// the next scope expression.
+ fn topmost(&self) -> region::Scope {
+ self.scopes.last().expect("topmost_scope: no scopes present").region_scope
+ }
+}
+
+impl<'a, 'tcx> Builder<'a, 'tcx> {
+ // Adding and removing scopes
+ // ==========================
+ // Start a breakable scope, which tracks where `continue`, `break` and
+ // `return` should branch to.
+ pub(crate) fn in_breakable_scope<F>(
+ &mut self,
+ loop_block: Option<BasicBlock>,
+ break_destination: Place<'tcx>,
+ span: Span,
+ f: F,
+ ) -> BlockAnd<()>
+ where
+ F: FnOnce(&mut Builder<'a, 'tcx>) -> Option<BlockAnd<()>>,
+ {
+ let region_scope = self.scopes.topmost();
+ let scope = BreakableScope {
+ region_scope,
+ break_destination,
+ break_drops: DropTree::new(),
+ continue_drops: loop_block.map(|_| DropTree::new()),
+ };
+ self.scopes.breakable_scopes.push(scope);
+ let normal_exit_block = f(self);
+ let breakable_scope = self.scopes.breakable_scopes.pop().unwrap();
+ assert!(breakable_scope.region_scope == region_scope);
+ let break_block = self.build_exit_tree(breakable_scope.break_drops, None);
+ if let Some(drops) = breakable_scope.continue_drops {
+ self.build_exit_tree(drops, loop_block);
+ }
+ match (normal_exit_block, break_block) {
+ (Some(block), None) | (None, Some(block)) => block,
+ (None, None) => self.cfg.start_new_block().unit(),
+ (Some(normal_block), Some(exit_block)) => {
+ let target = self.cfg.start_new_block();
+ let source_info = self.source_info(span);
+ self.cfg.terminate(
+ unpack!(normal_block),
+ source_info,
+ TerminatorKind::Goto { target },
+ );
+ self.cfg.terminate(
+ unpack!(exit_block),
+ source_info,
+ TerminatorKind::Goto { target },
+ );
+ target.unit()
+ }
+ }
+ }
+
+ /// Start an if-then scope which tracks drop for `if` expressions and `if`
+ /// guards.
+ ///
+ /// For an if-let chain:
+ ///
+ /// if let Some(x) = a && let Some(y) = b && let Some(z) = c { ... }
+ ///
+ /// There are three possible ways the condition can be false and we may have
+ /// to drop `x`, `x` and `y`, or neither depending on which binding fails.
+ /// To handle this correctly we use a `DropTree` in a similar way to a
+ /// `loop` expression and 'break' out on all of the 'else' paths.
+ ///
+ /// Notes:
+ /// - We don't need to keep a stack of scopes in the `Builder` because the
+ /// 'else' paths will only leave the innermost scope.
+ /// - This is also used for match guards.
+ pub(crate) fn in_if_then_scope<F>(
+ &mut self,
+ region_scope: region::Scope,
+ f: F,
+ ) -> (BasicBlock, BasicBlock)
+ where
+ F: FnOnce(&mut Builder<'a, 'tcx>) -> BlockAnd<()>,
+ {
+ let scope = IfThenScope { region_scope, else_drops: DropTree::new() };
+ let previous_scope = mem::replace(&mut self.scopes.if_then_scope, Some(scope));
+
+ let then_block = unpack!(f(self));
+
+ let if_then_scope = mem::replace(&mut self.scopes.if_then_scope, previous_scope).unwrap();
+ assert!(if_then_scope.region_scope == region_scope);
+
+ let else_block = self
+ .build_exit_tree(if_then_scope.else_drops, None)
+ .map_or_else(|| self.cfg.start_new_block(), |else_block_and| unpack!(else_block_and));
+
+ (then_block, else_block)
+ }
+
+ pub(crate) fn in_opt_scope<F, R>(
+ &mut self,
+ opt_scope: Option<(region::Scope, SourceInfo)>,
+ f: F,
+ ) -> BlockAnd<R>
+ where
+ F: FnOnce(&mut Builder<'a, 'tcx>) -> BlockAnd<R>,
+ {
+ debug!("in_opt_scope(opt_scope={:?})", opt_scope);
+ if let Some(region_scope) = opt_scope {
+ self.push_scope(region_scope);
+ }
+ let mut block;
+ let rv = unpack!(block = f(self));
+ if let Some(region_scope) = opt_scope {
+ unpack!(block = self.pop_scope(region_scope, block));
+ }
+ debug!("in_scope: exiting opt_scope={:?} block={:?}", opt_scope, block);
+ block.and(rv)
+ }
+
+ /// Convenience wrapper that pushes a scope and then executes `f`
+ /// to build its contents, popping the scope afterwards.
+ pub(crate) fn in_scope<F, R>(
+ &mut self,
+ region_scope: (region::Scope, SourceInfo),
+ lint_level: LintLevel,
+ f: F,
+ ) -> BlockAnd<R>
+ where
+ F: FnOnce(&mut Builder<'a, 'tcx>) -> BlockAnd<R>,
+ {
+ debug!("in_scope(region_scope={:?})", region_scope);
+ let source_scope = self.source_scope;
+ let tcx = self.tcx;
+ if let LintLevel::Explicit(current_hir_id) = lint_level {
+ // Use `maybe_lint_level_root_bounded` with `root_lint_level` as a bound
+ // to avoid adding Hir dependencies on our parents.
+ // We estimate the true lint roots here to avoid creating a lot of source scopes.
+
+ let parent_root = tcx.maybe_lint_level_root_bounded(
+ self.source_scopes[source_scope].local_data.as_ref().assert_crate_local().lint_root,
+ self.hir_id,
+ );
+ let current_root = tcx.maybe_lint_level_root_bounded(current_hir_id, self.hir_id);
+
+ if parent_root != current_root {
+ self.source_scope = self.new_source_scope(
+ region_scope.1.span,
+ LintLevel::Explicit(current_root),
+ None,
+ );
+ }
+ }
+ self.push_scope(region_scope);
+ let mut block;
+ let rv = unpack!(block = f(self));
+ unpack!(block = self.pop_scope(region_scope, block));
+ self.source_scope = source_scope;
+ debug!("in_scope: exiting region_scope={:?} block={:?}", region_scope, block);
+ block.and(rv)
+ }
+
+ /// Push a scope onto the stack. You can then build code in this
+ /// scope and call `pop_scope` afterwards. Note that these two
+ /// calls must be paired; using `in_scope` as a convenience
+ /// wrapper maybe preferable.
+ pub(crate) fn push_scope(&mut self, region_scope: (region::Scope, SourceInfo)) {
+ self.scopes.push_scope(region_scope, self.source_scope);
+ }
+
+ /// Pops a scope, which should have region scope `region_scope`,
+ /// adding any drops onto the end of `block` that are needed.
+ /// This must match 1-to-1 with `push_scope`.
+ pub(crate) fn pop_scope(
+ &mut self,
+ region_scope: (region::Scope, SourceInfo),
+ mut block: BasicBlock,
+ ) -> BlockAnd<()> {
+ debug!("pop_scope({:?}, {:?})", region_scope, block);
+
+ block = self.leave_top_scope(block);
+
+ self.scopes.pop_scope(region_scope);
+
+ block.unit()
+ }
+
+ /// Sets up the drops for breaking from `block` to `target`.
+ pub(crate) fn break_scope(
+ &mut self,
+ mut block: BasicBlock,
+ value: Option<&Expr<'tcx>>,
+ target: BreakableTarget,
+ source_info: SourceInfo,
+ ) -> BlockAnd<()> {
+ let span = source_info.span;
+
+ let get_scope_index = |scope: region::Scope| {
+ // find the loop-scope by its `region::Scope`.
+ self.scopes
+ .breakable_scopes
+ .iter()
+ .rposition(|breakable_scope| breakable_scope.region_scope == scope)
+ .unwrap_or_else(|| span_bug!(span, "no enclosing breakable scope found"))
+ };
+ let (break_index, destination) = match target {
+ BreakableTarget::Return => {
+ let scope = &self.scopes.breakable_scopes[0];
+ if scope.break_destination != Place::return_place() {
+ span_bug!(span, "`return` in item with no return scope");
+ }
+ (0, Some(scope.break_destination))
+ }
+ BreakableTarget::Break(scope) => {
+ let break_index = get_scope_index(scope);
+ let scope = &self.scopes.breakable_scopes[break_index];
+ (break_index, Some(scope.break_destination))
+ }
+ BreakableTarget::Continue(scope) => {
+ let break_index = get_scope_index(scope);
+ (break_index, None)
+ }
+ };
+
+ if let Some(destination) = destination {
+ if let Some(value) = value {
+ debug!("stmt_expr Break val block_context.push(SubExpr)");
+ self.block_context.push(BlockFrame::SubExpr);
+ unpack!(block = self.expr_into_dest(destination, block, value));
+ self.block_context.pop();
+ } else {
+ self.cfg.push_assign_unit(block, source_info, destination, self.tcx)
+ }
+ } else {
+ assert!(value.is_none(), "`return` and `break` should have a destination");
+ if self.tcx.sess.instrument_coverage() {
+ // Unlike `break` and `return`, which push an `Assign` statement to MIR, from which
+ // a Coverage code region can be generated, `continue` needs no `Assign`; but
+ // without one, the `InstrumentCoverage` MIR pass cannot generate a code region for
+ // `continue`. Coverage will be missing unless we add a dummy `Assign` to MIR.
+ self.add_dummy_assignment(span, block, source_info);
+ }
+ }
+
+ let region_scope = self.scopes.breakable_scopes[break_index].region_scope;
+ let scope_index = self.scopes.scope_index(region_scope, span);
+ let drops = if destination.is_some() {
+ &mut self.scopes.breakable_scopes[break_index].break_drops
+ } else {
+ self.scopes.breakable_scopes[break_index].continue_drops.as_mut().unwrap()
+ };
+ let mut drop_idx = ROOT_NODE;
+ for scope in &self.scopes.scopes[scope_index + 1..] {
+ for drop in &scope.drops {
+ drop_idx = drops.add_drop(*drop, drop_idx);
+ }
+ }
+ drops.add_entry(block, drop_idx);
+
+ // `build_drop_trees` doesn't have access to our source_info, so we
+ // create a dummy terminator now. `TerminatorKind::Resume` is used
+ // because MIR type checking will panic if it hasn't been overwritten.
+ self.cfg.terminate(block, source_info, TerminatorKind::Resume);
+
+ self.cfg.start_new_block().unit()
+ }
+
+ pub(crate) fn break_for_else(
+ &mut self,
+ block: BasicBlock,
+ target: region::Scope,
+ source_info: SourceInfo,
+ ) {
+ let scope_index = self.scopes.scope_index(target, source_info.span);
+ let if_then_scope = self
+ .scopes
+ .if_then_scope
+ .as_mut()
+ .unwrap_or_else(|| span_bug!(source_info.span, "no if-then scope found"));
+
+ assert_eq!(if_then_scope.region_scope, target, "breaking to incorrect scope");
+
+ let mut drop_idx = ROOT_NODE;
+ let drops = &mut if_then_scope.else_drops;
+ for scope in &self.scopes.scopes[scope_index + 1..] {
+ for drop in &scope.drops {
+ drop_idx = drops.add_drop(*drop, drop_idx);
+ }
+ }
+ drops.add_entry(block, drop_idx);
+
+ // `build_drop_trees` doesn't have access to our source_info, so we
+ // create a dummy terminator now. `TerminatorKind::Resume` is used
+ // because MIR type checking will panic if it hasn't been overwritten.
+ self.cfg.terminate(block, source_info, TerminatorKind::Resume);
+ }
+
+ // Add a dummy `Assign` statement to the CFG, with the span for the source code's `continue`
+ // statement.
+ fn add_dummy_assignment(&mut self, span: Span, block: BasicBlock, source_info: SourceInfo) {
+ let local_decl = LocalDecl::new(self.tcx.mk_unit(), span).internal();
+ let temp_place = Place::from(self.local_decls.push(local_decl));
+ self.cfg.push_assign_unit(block, source_info, temp_place, self.tcx);
+ }
+
+ fn leave_top_scope(&mut self, block: BasicBlock) -> BasicBlock {
+ // If we are emitting a `drop` statement, we need to have the cached
+ // diverge cleanup pads ready in case that drop panics.
+ let needs_cleanup = self.scopes.scopes.last().map_or(false, |scope| scope.needs_cleanup());
+ let is_generator = self.generator_kind.is_some();
+ let unwind_to = if needs_cleanup { self.diverge_cleanup() } else { DropIdx::MAX };
+
+ let scope = self.scopes.scopes.last().expect("leave_top_scope called with no scopes");
+ unpack!(build_scope_drops(
+ &mut self.cfg,
+ &mut self.scopes.unwind_drops,
+ scope,
+ block,
+ unwind_to,
+ is_generator && needs_cleanup,
+ self.arg_count,
+ ))
+ }
+
+ /// Creates a new source scope, nested in the current one.
+ pub(crate) fn new_source_scope(
+ &mut self,
+ span: Span,
+ lint_level: LintLevel,
+ safety: Option<Safety>,
+ ) -> SourceScope {
+ let parent = self.source_scope;
+ debug!(
+ "new_source_scope({:?}, {:?}, {:?}) - parent({:?})={:?}",
+ span,
+ lint_level,
+ safety,
+ parent,
+ self.source_scopes.get(parent)
+ );
+ let scope_local_data = SourceScopeLocalData {
+ lint_root: if let LintLevel::Explicit(lint_root) = lint_level {
+ lint_root
+ } else {
+ self.source_scopes[parent].local_data.as_ref().assert_crate_local().lint_root
+ },
+ safety: safety.unwrap_or_else(|| {
+ self.source_scopes[parent].local_data.as_ref().assert_crate_local().safety
+ }),
+ };
+ self.source_scopes.push(SourceScopeData {
+ span,
+ parent_scope: Some(parent),
+ inlined: None,
+ inlined_parent_scope: None,
+ local_data: ClearCrossCrate::Set(scope_local_data),
+ })
+ }
+
+ /// Given a span and the current source scope, make a SourceInfo.
+ pub(crate) fn source_info(&self, span: Span) -> SourceInfo {
+ SourceInfo { span, scope: self.source_scope }
+ }
+
+ // Finding scopes
+ // ==============
+ /// Returns the scope that we should use as the lifetime of an
+ /// operand. Basically, an operand must live until it is consumed.
+ /// This is similar to, but not quite the same as, the temporary
+ /// scope (which can be larger or smaller).
+ ///
+ /// Consider:
+ /// ```ignore (illustrative)
+ /// let x = foo(bar(X, Y));
+ /// ```
+ /// We wish to pop the storage for X and Y after `bar()` is
+ /// called, not after the whole `let` is completed.
+ ///
+ /// As another example, if the second argument diverges:
+ /// ```ignore (illustrative)
+ /// foo(Box::new(2), panic!())
+ /// ```
+ /// We would allocate the box but then free it on the unwinding
+ /// path; we would also emit a free on the 'success' path from
+ /// panic, but that will turn out to be removed as dead-code.
+ pub(crate) fn local_scope(&self) -> region::Scope {
+ self.scopes.topmost()
+ }
+
+ // Scheduling drops
+ // ================
+ pub(crate) fn schedule_drop_storage_and_value(
+ &mut self,
+ span: Span,
+ region_scope: region::Scope,
+ local: Local,
+ ) {
+ self.schedule_drop(span, region_scope, local, DropKind::Storage);
+ self.schedule_drop(span, region_scope, local, DropKind::Value);
+ }
+
+ /// Indicates that `place` should be dropped on exit from `region_scope`.
+ ///
+ /// When called with `DropKind::Storage`, `place` shouldn't be the return
+ /// place, or a function parameter.
+ pub(crate) fn schedule_drop(
+ &mut self,
+ span: Span,
+ region_scope: region::Scope,
+ local: Local,
+ drop_kind: DropKind,
+ ) {
+ let needs_drop = match drop_kind {
+ DropKind::Value => {
+ if !self.local_decls[local].ty.needs_drop(self.tcx, self.param_env) {
+ return;
+ }
+ true
+ }
+ DropKind::Storage => {
+ if local.index() <= self.arg_count {
+ span_bug!(
+ span,
+ "`schedule_drop` called with local {:?} and arg_count {}",
+ local,
+ self.arg_count,
+ )
+ }
+ false
+ }
+ };
+
+ // When building drops, we try to cache chains of drops to reduce the
+ // number of `DropTree::add_drop` calls. This, however, means that
+ // whenever we add a drop into a scope which already had some entries
+ // in the drop tree built (and thus, cached) for it, we must invalidate
+ // all caches which might branch into the scope which had a drop just
+ // added to it. This is necessary, because otherwise some other code
+ // might use the cache to branch into already built chain of drops,
+ // essentially ignoring the newly added drop.
+ //
+ // For example consider there’s two scopes with a drop in each. These
+ // are built and thus the caches are filled:
+ //
+ // +--------------------------------------------------------+
+ // | +---------------------------------+ |
+ // | | +--------+ +-------------+ | +---------------+ |
+ // | | | return | <-+ | drop(outer) | <-+ | drop(middle) | |
+ // | | +--------+ +-------------+ | +---------------+ |
+ // | +------------|outer_scope cache|--+ |
+ // +------------------------------|middle_scope cache|------+
+ //
+ // Now, a new, inner-most scope is added along with a new drop into
+ // both inner-most and outer-most scopes:
+ //
+ // +------------------------------------------------------------+
+ // | +----------------------------------+ |
+ // | | +--------+ +-------------+ | +---------------+ | +-------------+
+ // | | | return | <+ | drop(new) | <-+ | drop(middle) | <--+| drop(inner) |
+ // | | +--------+ | | drop(outer) | | +---------------+ | +-------------+
+ // | | +-+ +-------------+ | |
+ // | +---|invalid outer_scope cache|----+ |
+ // +----=----------------|invalid middle_scope cache|-----------+
+ //
+ // If, when adding `drop(new)` we do not invalidate the cached blocks for both
+ // outer_scope and middle_scope, then, when building drops for the inner (right-most)
+ // scope, the old, cached blocks, without `drop(new)` will get used, producing the
+ // wrong results.
+ //
+ // Note that this code iterates scopes from the inner-most to the outer-most,
+ // invalidating caches of each scope visited. This way bare minimum of the
+ // caches gets invalidated. i.e., if a new drop is added into the middle scope, the
+ // cache of outer scope stays intact.
+ //
+ // Since we only cache drops for the unwind path and the generator drop
+ // path, we only need to invalidate the cache for drops that happen on
+ // the unwind or generator drop paths. This means that for
+ // non-generators we don't need to invalidate caches for `DropKind::Storage`.
+ let invalidate_caches = needs_drop || self.generator_kind.is_some();
+ for scope in self.scopes.scopes.iter_mut().rev() {
+ if invalidate_caches {
+ scope.invalidate_cache();
+ }
+
+ if scope.region_scope == region_scope {
+ let region_scope_span = region_scope.span(self.tcx, &self.region_scope_tree);
+ // Attribute scope exit drops to scope's closing brace.
+ let scope_end = self.tcx.sess.source_map().end_point(region_scope_span);
+
+ scope.drops.push(DropData {
+ source_info: SourceInfo { span: scope_end, scope: scope.source_scope },
+ local,
+ kind: drop_kind,
+ });
+
+ return;
+ }
+ }
+
+ span_bug!(span, "region scope {:?} not in scope to drop {:?}", region_scope, local);
+ }
+
+ /// Indicates that the "local operand" stored in `local` is
+ /// *moved* at some point during execution (see `local_scope` for
+ /// more information about what a "local operand" is -- in short,
+ /// it's an intermediate operand created as part of preparing some
+ /// MIR instruction). We use this information to suppress
+ /// redundant drops on the non-unwind paths. This results in less
+ /// MIR, but also avoids spurious borrow check errors
+ /// (c.f. #64391).
+ ///
+ /// Example: when compiling the call to `foo` here:
+ ///
+ /// ```ignore (illustrative)
+ /// foo(bar(), ...)
+ /// ```
+ ///
+ /// we would evaluate `bar()` to an operand `_X`. We would also
+ /// schedule `_X` to be dropped when the expression scope for
+ /// `foo(bar())` is exited. This is relevant, for example, if the
+ /// later arguments should unwind (it would ensure that `_X` gets
+ /// dropped). However, if no unwind occurs, then `_X` will be
+ /// unconditionally consumed by the `call`:
+ ///
+ /// ```ignore (illustrative)
+ /// bb {
+ /// ...
+ /// _R = CALL(foo, _X, ...)
+ /// }
+ /// ```
+ ///
+ /// However, `_X` is still registered to be dropped, and so if we
+ /// do nothing else, we would generate a `DROP(_X)` that occurs
+ /// after the call. This will later be optimized out by the
+ /// drop-elaboration code, but in the meantime it can lead to
+ /// spurious borrow-check errors -- the problem, ironically, is
+ /// not the `DROP(_X)` itself, but the (spurious) unwind pathways
+ /// that it creates. See #64391 for an example.
+ pub(crate) fn record_operands_moved(&mut self, operands: &[Operand<'tcx>]) {
+ let local_scope = self.local_scope();
+ let scope = self.scopes.scopes.last_mut().unwrap();
+
+ assert_eq!(scope.region_scope, local_scope, "local scope is not the topmost scope!",);
+
+ // look for moves of a local variable, like `MOVE(_X)`
+ let locals_moved = operands.iter().flat_map(|operand| match operand {
+ Operand::Copy(_) | Operand::Constant(_) => None,
+ Operand::Move(place) => place.as_local(),
+ });
+
+ for local in locals_moved {
+ // check if we have a Drop for this operand and -- if so
+ // -- add it to the list of moved operands. Note that this
+ // local might not have been an operand created for this
+ // call, it could come from other places too.
+ if scope.drops.iter().any(|drop| drop.local == local && drop.kind == DropKind::Value) {
+ scope.moved_locals.push(local);
+ }
+ }
+ }
+
+ // Other
+ // =====
+ /// Returns the [DropIdx] for the innermost drop if the function unwound at
+ /// this point. The `DropIdx` will be created if it doesn't already exist.
+ fn diverge_cleanup(&mut self) -> DropIdx {
+ let is_generator = self.generator_kind.is_some();
+ let (uncached_scope, mut cached_drop) = self
+ .scopes
+ .scopes
+ .iter()
+ .enumerate()
+ .rev()
+ .find_map(|(scope_idx, scope)| {
+ scope.cached_unwind_block.map(|cached_block| (scope_idx + 1, cached_block))
+ })
+ .unwrap_or((0, ROOT_NODE));
+
+ for scope in &mut self.scopes.scopes[uncached_scope..] {
+ for drop in &scope.drops {
+ if is_generator || drop.kind == DropKind::Value {
+ cached_drop = self.scopes.unwind_drops.add_drop(*drop, cached_drop);
+ }
+ }
+ scope.cached_unwind_block = Some(cached_drop);
+ }
+
+ cached_drop
+ }
+
+ /// Prepares to create a path that performs all required cleanup for a
+ /// terminator that can unwind at the given basic block.
+ ///
+ /// This path terminates in Resume. The path isn't created until after all
+ /// of the non-unwind paths in this item have been lowered.
+ pub(crate) fn diverge_from(&mut self, start: BasicBlock) {
+ debug_assert!(
+ matches!(
+ self.cfg.block_data(start).terminator().kind,
+ TerminatorKind::Assert { .. }
+ | TerminatorKind::Call { .. }
+ | TerminatorKind::Drop { .. }
+ | TerminatorKind::DropAndReplace { .. }
+ | TerminatorKind::FalseUnwind { .. }
+ | TerminatorKind::InlineAsm { .. }
+ ),
+ "diverge_from called on block with terminator that cannot unwind."
+ );
+
+ let next_drop = self.diverge_cleanup();
+ self.scopes.unwind_drops.add_entry(start, next_drop);
+ }
+
+ /// Sets up a path that performs all required cleanup for dropping a
+ /// generator, starting from the given block that ends in
+ /// [TerminatorKind::Yield].
+ ///
+ /// This path terminates in GeneratorDrop.
+ pub(crate) fn generator_drop_cleanup(&mut self, yield_block: BasicBlock) {
+ debug_assert!(
+ matches!(
+ self.cfg.block_data(yield_block).terminator().kind,
+ TerminatorKind::Yield { .. }
+ ),
+ "generator_drop_cleanup called on block with non-yield terminator."
+ );
+ let (uncached_scope, mut cached_drop) = self
+ .scopes
+ .scopes
+ .iter()
+ .enumerate()
+ .rev()
+ .find_map(|(scope_idx, scope)| {
+ scope.cached_generator_drop_block.map(|cached_block| (scope_idx + 1, cached_block))
+ })
+ .unwrap_or((0, ROOT_NODE));
+
+ for scope in &mut self.scopes.scopes[uncached_scope..] {
+ for drop in &scope.drops {
+ cached_drop = self.scopes.generator_drops.add_drop(*drop, cached_drop);
+ }
+ scope.cached_generator_drop_block = Some(cached_drop);
+ }
+
+ self.scopes.generator_drops.add_entry(yield_block, cached_drop);
+ }
+
+ /// Utility function for *non*-scope code to build their own drops
+ pub(crate) fn build_drop_and_replace(
+ &mut self,
+ block: BasicBlock,
+ span: Span,
+ place: Place<'tcx>,
+ value: Operand<'tcx>,
+ ) -> BlockAnd<()> {
+ let source_info = self.source_info(span);
+ let next_target = self.cfg.start_new_block();
+
+ self.cfg.terminate(
+ block,
+ source_info,
+ TerminatorKind::DropAndReplace { place, value, target: next_target, unwind: None },
+ );
+ self.diverge_from(block);
+
+ next_target.unit()
+ }
+
+ /// Creates an `Assert` terminator and return the success block.
+ /// If the boolean condition operand is not the expected value,
+ /// a runtime panic will be caused with the given message.
+ pub(crate) fn assert(
+ &mut self,
+ block: BasicBlock,
+ cond: Operand<'tcx>,
+ expected: bool,
+ msg: AssertMessage<'tcx>,
+ span: Span,
+ ) -> BasicBlock {
+ let source_info = self.source_info(span);
+ let success_block = self.cfg.start_new_block();
+
+ self.cfg.terminate(
+ block,
+ source_info,
+ TerminatorKind::Assert { cond, expected, msg, target: success_block, cleanup: None },
+ );
+ self.diverge_from(block);
+
+ success_block
+ }
+
+ /// Unschedules any drops in the top scope.
+ ///
+ /// This is only needed for `match` arm scopes, because they have one
+ /// entrance per pattern, but only one exit.
+ pub(crate) fn clear_top_scope(&mut self, region_scope: region::Scope) {
+ let top_scope = self.scopes.scopes.last_mut().unwrap();
+
+ assert_eq!(top_scope.region_scope, region_scope);
+
+ top_scope.drops.clear();
+ top_scope.invalidate_cache();
+ }
+}
+
+/// Builds drops for `pop_scope` and `leave_top_scope`.
+fn build_scope_drops<'tcx>(
+ cfg: &mut CFG<'tcx>,
+ unwind_drops: &mut DropTree,
+ scope: &Scope,
+ mut block: BasicBlock,
+ mut unwind_to: DropIdx,
+ storage_dead_on_unwind: bool,
+ arg_count: usize,
+) -> BlockAnd<()> {
+ debug!("build_scope_drops({:?} -> {:?})", block, scope);
+
+ // Build up the drops in evaluation order. The end result will
+ // look like:
+ //
+ // [SDs, drops[n]] --..> [SDs, drop[1]] -> [SDs, drop[0]] -> [[SDs]]
+ // | | |
+ // : | |
+ // V V
+ // [drop[n]] -...-> [drop[1]] ------> [drop[0]] ------> [last_unwind_to]
+ //
+ // The horizontal arrows represent the execution path when the drops return
+ // successfully. The downwards arrows represent the execution path when the
+ // drops panic (panicking while unwinding will abort, so there's no need for
+ // another set of arrows).
+ //
+ // For generators, we unwind from a drop on a local to its StorageDead
+ // statement. For other functions we don't worry about StorageDead. The
+ // drops for the unwind path should have already been generated by
+ // `diverge_cleanup_gen`.
+
+ for drop_data in scope.drops.iter().rev() {
+ let source_info = drop_data.source_info;
+ let local = drop_data.local;
+
+ match drop_data.kind {
+ DropKind::Value => {
+ // `unwind_to` should drop the value that we're about to
+ // schedule. If dropping this value panics, then we continue
+ // with the *next* value on the unwind path.
+ debug_assert_eq!(unwind_drops.drops[unwind_to].0.local, drop_data.local);
+ debug_assert_eq!(unwind_drops.drops[unwind_to].0.kind, drop_data.kind);
+ unwind_to = unwind_drops.drops[unwind_to].1;
+
+ // If the operand has been moved, and we are not on an unwind
+ // path, then don't generate the drop. (We only take this into
+ // account for non-unwind paths so as not to disturb the
+ // caching mechanism.)
+ if scope.moved_locals.iter().any(|&o| o == local) {
+ continue;
+ }
+
+ unwind_drops.add_entry(block, unwind_to);
+
+ let next = cfg.start_new_block();
+ cfg.terminate(
+ block,
+ source_info,
+ TerminatorKind::Drop { place: local.into(), target: next, unwind: None },
+ );
+ block = next;
+ }
+ DropKind::Storage => {
+ if storage_dead_on_unwind {
+ debug_assert_eq!(unwind_drops.drops[unwind_to].0.local, drop_data.local);
+ debug_assert_eq!(unwind_drops.drops[unwind_to].0.kind, drop_data.kind);
+ unwind_to = unwind_drops.drops[unwind_to].1;
+ }
+ // Only temps and vars need their storage dead.
+ assert!(local.index() > arg_count);
+ cfg.push(block, Statement { source_info, kind: StatementKind::StorageDead(local) });
+ }
+ }
+ }
+ block.unit()
+}
+
+impl<'a, 'tcx: 'a> Builder<'a, 'tcx> {
+ /// Build a drop tree for a breakable scope.
+ ///
+ /// If `continue_block` is `Some`, then the tree is for `continue` inside a
+ /// loop. Otherwise this is for `break` or `return`.
+ fn build_exit_tree(
+ &mut self,
+ mut drops: DropTree,
+ continue_block: Option<BasicBlock>,
+ ) -> Option<BlockAnd<()>> {
+ let mut blocks = IndexVec::from_elem(None, &drops.drops);
+ blocks[ROOT_NODE] = continue_block;
+
+ drops.build_mir::<ExitScopes>(&mut self.cfg, &mut blocks);
+
+ // Link the exit drop tree to unwind drop tree.
+ if drops.drops.iter().any(|(drop, _)| drop.kind == DropKind::Value) {
+ let unwind_target = self.diverge_cleanup();
+ let mut unwind_indices = IndexVec::from_elem_n(unwind_target, 1);
+ for (drop_idx, drop_data) in drops.drops.iter_enumerated().skip(1) {
+ match drop_data.0.kind {
+ DropKind::Storage => {
+ if self.generator_kind.is_some() {
+ let unwind_drop = self
+ .scopes
+ .unwind_drops
+ .add_drop(drop_data.0, unwind_indices[drop_data.1]);
+ unwind_indices.push(unwind_drop);
+ } else {
+ unwind_indices.push(unwind_indices[drop_data.1]);
+ }
+ }
+ DropKind::Value => {
+ let unwind_drop = self
+ .scopes
+ .unwind_drops
+ .add_drop(drop_data.0, unwind_indices[drop_data.1]);
+ self.scopes
+ .unwind_drops
+ .add_entry(blocks[drop_idx].unwrap(), unwind_indices[drop_data.1]);
+ unwind_indices.push(unwind_drop);
+ }
+ }
+ }
+ }
+ blocks[ROOT_NODE].map(BasicBlock::unit)
+ }
+
+ /// Build the unwind and generator drop trees.
+ pub(crate) fn build_drop_trees(&mut self) {
+ if self.generator_kind.is_some() {
+ self.build_generator_drop_trees();
+ } else {
+ Self::build_unwind_tree(
+ &mut self.cfg,
+ &mut self.scopes.unwind_drops,
+ self.fn_span,
+ &mut None,
+ );
+ }
+ }
+
+ fn build_generator_drop_trees(&mut self) {
+ // Build the drop tree for dropping the generator while it's suspended.
+ let drops = &mut self.scopes.generator_drops;
+ let cfg = &mut self.cfg;
+ let fn_span = self.fn_span;
+ let mut blocks = IndexVec::from_elem(None, &drops.drops);
+ drops.build_mir::<GeneratorDrop>(cfg, &mut blocks);
+ if let Some(root_block) = blocks[ROOT_NODE] {
+ cfg.terminate(
+ root_block,
+ SourceInfo::outermost(fn_span),
+ TerminatorKind::GeneratorDrop,
+ );
+ }
+
+ // Build the drop tree for unwinding in the normal control flow paths.
+ let resume_block = &mut None;
+ let unwind_drops = &mut self.scopes.unwind_drops;
+ Self::build_unwind_tree(cfg, unwind_drops, fn_span, resume_block);
+
+ // Build the drop tree for unwinding when dropping a suspended
+ // generator.
+ //
+ // This is a different tree to the standard unwind paths here to
+ // prevent drop elaboration from creating drop flags that would have
+ // to be captured by the generator. I'm not sure how important this
+ // optimization is, but it is here.
+ for (drop_idx, drop_data) in drops.drops.iter_enumerated() {
+ if let DropKind::Value = drop_data.0.kind {
+ debug_assert!(drop_data.1 < drops.drops.next_index());
+ drops.entry_points.push((drop_data.1, blocks[drop_idx].unwrap()));
+ }
+ }
+ Self::build_unwind_tree(cfg, drops, fn_span, resume_block);
+ }
+
+ fn build_unwind_tree(
+ cfg: &mut CFG<'tcx>,
+ drops: &mut DropTree,
+ fn_span: Span,
+ resume_block: &mut Option<BasicBlock>,
+ ) {
+ let mut blocks = IndexVec::from_elem(None, &drops.drops);
+ blocks[ROOT_NODE] = *resume_block;
+ drops.build_mir::<Unwind>(cfg, &mut blocks);
+ if let (None, Some(resume)) = (*resume_block, blocks[ROOT_NODE]) {
+ cfg.terminate(resume, SourceInfo::outermost(fn_span), TerminatorKind::Resume);
+
+ *resume_block = blocks[ROOT_NODE];
+ }
+ }
+}
+
+// DropTreeBuilder implementations.
+
+struct ExitScopes;
+
+impl<'tcx> DropTreeBuilder<'tcx> for ExitScopes {
+ fn make_block(cfg: &mut CFG<'tcx>) -> BasicBlock {
+ cfg.start_new_block()
+ }
+ fn add_entry(cfg: &mut CFG<'tcx>, from: BasicBlock, to: BasicBlock) {
+ cfg.block_data_mut(from).terminator_mut().kind = TerminatorKind::Goto { target: to };
+ }
+}
+
+struct GeneratorDrop;
+
+impl<'tcx> DropTreeBuilder<'tcx> for GeneratorDrop {
+ fn make_block(cfg: &mut CFG<'tcx>) -> BasicBlock {
+ cfg.start_new_block()
+ }
+ fn add_entry(cfg: &mut CFG<'tcx>, from: BasicBlock, to: BasicBlock) {
+ let term = cfg.block_data_mut(from).terminator_mut();
+ if let TerminatorKind::Yield { ref mut drop, .. } = term.kind {
+ *drop = Some(to);
+ } else {
+ span_bug!(
+ term.source_info.span,
+ "cannot enter generator drop tree from {:?}",
+ term.kind
+ )
+ }
+ }
+}
+
+struct Unwind;
+
+impl<'tcx> DropTreeBuilder<'tcx> for Unwind {
+ fn make_block(cfg: &mut CFG<'tcx>) -> BasicBlock {
+ cfg.start_new_cleanup_block()
+ }
+ fn add_entry(cfg: &mut CFG<'tcx>, from: BasicBlock, to: BasicBlock) {
+ let term = &mut cfg.block_data_mut(from).terminator_mut();
+ match &mut term.kind {
+ TerminatorKind::Drop { unwind, .. }
+ | TerminatorKind::DropAndReplace { unwind, .. }
+ | TerminatorKind::FalseUnwind { unwind, .. }
+ | TerminatorKind::Call { cleanup: unwind, .. }
+ | TerminatorKind::Assert { cleanup: unwind, .. }
+ | TerminatorKind::InlineAsm { cleanup: unwind, .. } => {
+ *unwind = Some(to);
+ }
+ TerminatorKind::Goto { .. }
+ | TerminatorKind::SwitchInt { .. }
+ | TerminatorKind::Resume
+ | TerminatorKind::Abort
+ | TerminatorKind::Return
+ | TerminatorKind::Unreachable
+ | TerminatorKind::Yield { .. }
+ | TerminatorKind::GeneratorDrop
+ | TerminatorKind::FalseEdge { .. } => {
+ span_bug!(term.source_info.span, "cannot unwind from {:?}", term.kind)
+ }
+ }
+ }
+}