diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-06-07 05:48:48 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-06-07 05:48:48 +0000 |
commit | ef24de24a82fe681581cc130f342363c47c0969a (patch) | |
tree | 0d494f7e1a38b95c92426f58fe6eaa877303a86c /compiler/rustc_mir_build/src/build | |
parent | Releasing progress-linux version 1.74.1+dfsg1-1~progress7.99u1. (diff) | |
download | rustc-ef24de24a82fe681581cc130f342363c47c0969a.tar.xz rustc-ef24de24a82fe681581cc130f342363c47c0969a.zip |
Merging upstream version 1.75.0+dfsg1.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'compiler/rustc_mir_build/src/build')
-rw-r--r-- | compiler/rustc_mir_build/src/build/custom/mod.rs | 3 | ||||
-rw-r--r-- | compiler/rustc_mir_build/src/build/expr/as_place.rs | 4 | ||||
-rw-r--r-- | compiler/rustc_mir_build/src/build/expr/as_rvalue.rs | 60 | ||||
-rw-r--r-- | compiler/rustc_mir_build/src/build/expr/as_temp.rs | 2 | ||||
-rw-r--r-- | compiler/rustc_mir_build/src/build/expr/into.rs | 2 | ||||
-rw-r--r-- | compiler/rustc_mir_build/src/build/expr/mod.rs | 2 | ||||
-rw-r--r-- | compiler/rustc_mir_build/src/build/expr/stmt.rs | 47 | ||||
-rw-r--r-- | compiler/rustc_mir_build/src/build/matches/mod.rs | 86 | ||||
-rw-r--r-- | compiler/rustc_mir_build/src/build/matches/simplify.rs | 52 | ||||
-rw-r--r-- | compiler/rustc_mir_build/src/build/matches/test.rs | 87 | ||||
-rw-r--r-- | compiler/rustc_mir_build/src/build/misc.rs | 4 | ||||
-rw-r--r-- | compiler/rustc_mir_build/src/build/mod.rs | 140 | ||||
-rw-r--r-- | compiler/rustc_mir_build/src/build/scope.rs | 88 |
13 files changed, 296 insertions, 281 deletions
diff --git a/compiler/rustc_mir_build/src/build/custom/mod.rs b/compiler/rustc_mir_build/src/build/custom/mod.rs index e5c2cc6c7..3de2f45ad 100644 --- a/compiler/rustc_mir_build/src/build/custom/mod.rs +++ b/compiler/rustc_mir_build/src/build/custom/mod.rs @@ -48,7 +48,7 @@ pub(super) fn build_custom_mir<'tcx>( source: MirSource::item(did), phase: MirPhase::Built, source_scopes: IndexVec::new(), - generator: None, + coroutine: None, local_decls: IndexVec::new(), user_type_annotations: IndexVec::new(), arg_count: params.len(), @@ -60,6 +60,7 @@ pub(super) fn build_custom_mir<'tcx>( tainted_by_errors: None, injection_phase: None, pass_count: 0, + function_coverage_info: None, }; body.local_decls.push(LocalDecl::new(return_ty, return_ty_span)); diff --git a/compiler/rustc_mir_build/src/build/expr/as_place.rs b/compiler/rustc_mir_build/src/build/expr/as_place.rs index 5bccba4fd..7e9191a37 100644 --- a/compiler/rustc_mir_build/src/build/expr/as_place.rs +++ b/compiler/rustc_mir_build/src/build/expr/as_place.rs @@ -75,7 +75,7 @@ pub(in crate::build) struct PlaceBuilder<'tcx> { /// Given a list of MIR projections, convert them to list of HIR ProjectionKind. /// The projections are truncated to represent a path that might be captured by a -/// closure/generator. This implies the vector returned from this function doesn't contain +/// closure/coroutine. This implies the vector returned from this function doesn't contain /// ProjectionElems `Downcast`, `ConstantIndex`, `Index`, or `Subslice` because those will never be /// part of a path that is captured by a closure. We stop applying projections once we see the first /// projection that isn't captured by a closure. @@ -213,7 +213,7 @@ fn to_upvars_resolved_place_builder<'tcx>( /// projections. /// /// Supports only HIR projection kinds that represent a path that might be -/// captured by a closure or a generator, i.e., an `Index` or a `Subslice` +/// captured by a closure or a coroutine, i.e., an `Index` or a `Subslice` /// projection kinds are unsupported. fn strip_prefix<'a, 'tcx>( mut base_ty: Ty<'tcx>, diff --git a/compiler/rustc_mir_build/src/build/expr/as_rvalue.rs b/compiler/rustc_mir_build/src/build/expr/as_rvalue.rs index d4089eef4..eece8684e 100644 --- a/compiler/rustc_mir_build/src/build/expr/as_rvalue.rs +++ b/compiler/rustc_mir_build/src/build/expr/as_rvalue.rs @@ -181,9 +181,9 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { block = success; // The `Box<T>` temporary created here is not a part of the HIR, - // and therefore is not considered during generator auto-trait + // and therefore is not considered during coroutine auto-trait // determination. See the comment about `box` at `yield_in_scope`. - let result = this.local_decls.push(LocalDecl::new(expr.ty, expr_span).internal()); + let result = this.local_decls.push(LocalDecl::new(expr.ty, expr_span)); this.cfg.push( block, Statement { source_info, kind: StatementKind::StorageLive(result) }, @@ -213,7 +213,9 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { // Casting an enum to an integer is equivalent to computing the discriminant and casting the // discriminant. Previously every backend had to repeat the logic for this operation. Now we // create all the steps directly in MIR with operations all backends need to support anyway. - let (source, ty) = if let ty::Adt(adt_def, ..) = source.ty.kind() && adt_def.is_enum() { + let (source, ty) = if let ty::Adt(adt_def, ..) = source.ty.kind() + && adt_def.is_enum() + { let discr_ty = adt_def.repr().discr_type().to_ty(this.tcx); let temp = unpack!(block = this.as_temp(block, scope, source, Mutability::Not)); let layout = this.tcx.layout_of(this.param_env.and(source.ty)); @@ -224,7 +226,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { discr, Rvalue::Discriminant(temp.into()), ); - let (op,ty) = (Operand::Move(discr), discr_ty); + let (op, ty) = (Operand::Move(discr), discr_ty); if let Abi::Scalar(scalar) = layout.unwrap().abi && !scalar.is_always_valid(&this.tcx) @@ -236,27 +238,30 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { block, source_info, unsigned_place, - Rvalue::Cast(CastKind::IntToInt, Operand::Copy(discr), unsigned_ty)); + Rvalue::Cast(CastKind::IntToInt, Operand::Copy(discr), unsigned_ty), + ); let bool_ty = this.tcx.types.bool; let range = scalar.valid_range(&this.tcx); let merge_op = - if range.start <= range.end { - BinOp::BitAnd - } else { - BinOp::BitOr - }; + if range.start <= range.end { BinOp::BitAnd } else { BinOp::BitOr }; let mut comparer = |range: u128, bin_op: BinOp| -> Place<'tcx> { - let range_val = - Const::from_bits(this.tcx, range, ty::ParamEnv::empty().and(unsigned_ty)); + let range_val = Const::from_bits( + this.tcx, + range, + ty::ParamEnv::empty().and(unsigned_ty), + ); let lit_op = this.literal_operand(expr.span, range_val); let is_bin_op = this.temp(bool_ty, expr_span); this.cfg.push_assign( block, source_info, is_bin_op, - Rvalue::BinaryOp(bin_op, Box::new((Operand::Copy(unsigned_place), lit_op))), + Rvalue::BinaryOp( + bin_op, + Box::new((Operand::Copy(unsigned_place), lit_op)), + ), ); is_bin_op }; @@ -270,7 +275,13 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { block, source_info, merge_place, - Rvalue::BinaryOp(merge_op, Box::new((Operand::Move(start_place), Operand::Move(end_place)))), + Rvalue::BinaryOp( + merge_op, + Box::new(( + Operand::Move(start_place), + Operand::Move(end_place), + )), + ), ); merge_place }; @@ -278,19 +289,24 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { block, Statement { source_info, - kind: StatementKind::Intrinsic(Box::new(NonDivergingIntrinsic::Assume( - Operand::Move(assert_place), - ))), + kind: StatementKind::Intrinsic(Box::new( + NonDivergingIntrinsic::Assume(Operand::Move(assert_place)), + )), }, ); } - (op,ty) - + (op, ty) } else { let ty = source.ty; let source = unpack!( - block = this.as_operand(block, scope, source, LocalInfo::Boring, NeedsTemporary::No) + block = this.as_operand( + block, + scope, + source, + LocalInfo::Boring, + NeedsTemporary::No + ) ); (source, ty) }; @@ -471,11 +487,11 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { .collect(); let result = match args { - UpvarArgs::Generator(args) => { + UpvarArgs::Coroutine(args) => { // We implicitly set the discriminant to 0. See // librustc_mir/transform/deaggregator.rs for details. let movability = movability.unwrap(); - Box::new(AggregateKind::Generator(closure_id.to_def_id(), args, movability)) + Box::new(AggregateKind::Coroutine(closure_id.to_def_id(), args, movability)) } UpvarArgs::Closure(args) => { Box::new(AggregateKind::Closure(closure_id.to_def_id(), args)) diff --git a/compiler/rustc_mir_build/src/build/expr/as_temp.rs b/compiler/rustc_mir_build/src/build/expr/as_temp.rs index c8910c272..a4ab365fa 100644 --- a/compiler/rustc_mir_build/src/build/expr/as_temp.rs +++ b/compiler/rustc_mir_build/src/build/expr/as_temp.rs @@ -52,12 +52,10 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { let local_info = match expr.kind { ExprKind::StaticRef { def_id, .. } => { assert!(!this.tcx.is_thread_local_static(def_id)); - local_decl.internal = true; LocalInfo::StaticRef { def_id, is_thread_local: false } } ExprKind::ThreadLocalRef(def_id) => { assert!(this.tcx.is_thread_local_static(def_id)); - local_decl.internal = true; LocalInfo::StaticRef { def_id, is_thread_local: true } } ExprKind::NamedConst { def_id, .. } | ExprKind::ConstParam { def_id, .. } => { diff --git a/compiler/rustc_mir_build/src/build/expr/into.rs b/compiler/rustc_mir_build/src/build/expr/into.rs index a4de42d45..054661cf2 100644 --- a/compiler/rustc_mir_build/src/build/expr/into.rs +++ b/compiler/rustc_mir_build/src/build/expr/into.rs @@ -547,7 +547,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { source_info, TerminatorKind::Yield { value, resume, resume_arg: destination, drop: None }, ); - this.generator_drop_cleanup(block); + this.coroutine_drop_cleanup(block); resume.unit() } diff --git a/compiler/rustc_mir_build/src/build/expr/mod.rs b/compiler/rustc_mir_build/src/build/expr/mod.rs index f5ae060d6..dfe85b858 100644 --- a/compiler/rustc_mir_build/src/build/expr/mod.rs +++ b/compiler/rustc_mir_build/src/build/expr/mod.rs @@ -44,7 +44,7 @@ //! the most suitable spot to implement it, and then just let the //! other fns cycle around. The handoff works like this: //! -//! - `into(place)` -> fallback is to create a rvalue with `as_rvalue` and assign it to `place` +//! - `into(place)` -> fallback is to create an rvalue with `as_rvalue` and assign it to `place` //! - `as_rvalue` -> fallback is to create an Operand with `as_operand` and use `Rvalue::use` //! - `as_operand` -> either invokes `as_constant` or `as_temp` //! - `as_constant` -> (no fallback) diff --git a/compiler/rustc_mir_build/src/build/expr/stmt.rs b/compiler/rustc_mir_build/src/build/expr/stmt.rs index 396f82c27..7beaef602 100644 --- a/compiler/rustc_mir_build/src/build/expr/stmt.rs +++ b/compiler/rustc_mir_build/src/build/expr/stmt.rs @@ -120,32 +120,31 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { // // it is usually better to focus on `the_value` rather // than the entirety of block(s) surrounding it. - let adjusted_span = - if let ExprKind::Block { block } = expr.kind - && let Some(tail_ex) = this.thir[block].expr - { - let mut expr = &this.thir[tail_ex]; - loop { - match expr.kind { - ExprKind::Block { block } - if let Some(nested_expr) = this.thir[block].expr => - { - expr = &this.thir[nested_expr]; - } - ExprKind::Scope { value: nested_expr, .. } => { - expr = &this.thir[nested_expr]; - } - _ => break, + let adjusted_span = if let ExprKind::Block { block } = expr.kind + && let Some(tail_ex) = this.thir[block].expr + { + let mut expr = &this.thir[tail_ex]; + loop { + match expr.kind { + ExprKind::Block { block } + if let Some(nested_expr) = this.thir[block].expr => + { + expr = &this.thir[nested_expr]; } + ExprKind::Scope { value: nested_expr, .. } => { + expr = &this.thir[nested_expr]; + } + _ => break, } - this.block_context.push(BlockFrame::TailExpr { - tail_result_is_ignored: true, - span: expr.span, - }); - Some(expr.span) - } else { - None - }; + } + this.block_context.push(BlockFrame::TailExpr { + tail_result_is_ignored: true, + span: expr.span, + }); + Some(expr.span) + } else { + None + }; let temp = unpack!(block = this.as_temp(block, statement_scope, expr, Mutability::Not)); diff --git a/compiler/rustc_mir_build/src/build/matches/mod.rs b/compiler/rustc_mir_build/src/build/matches/mod.rs index 6baf8c7d7..304870274 100644 --- a/compiler/rustc_mir_build/src/build/matches/mod.rs +++ b/compiler/rustc_mir_build/src/build/matches/mod.rs @@ -157,7 +157,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { /// [ 0. Pre-match ] /// | /// [ 1. Evaluate Scrutinee (expression being matched on) ] - /// [ (fake read of scrutinee) ] + /// [ (PlaceMention of scrutinee) ] /// | /// [ 2. Decision tree -- check discriminants ] <--------+ /// | | @@ -184,7 +184,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { /// /// We generate MIR in the following steps: /// - /// 1. Evaluate the scrutinee and add the fake read of it ([Builder::lower_scrutinee]). + /// 1. Evaluate the scrutinee and add the PlaceMention of it ([Builder::lower_scrutinee]). /// 2. Create the decision tree ([Builder::lower_match_tree]). /// 3. Determine the fake borrows that are needed from the places that were /// matched against and create the required temporaries for them @@ -223,6 +223,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { let fake_borrow_temps = self.lower_match_tree( block, scrutinee_span, + &scrutinee_place, match_start_span, match_has_guard, &mut candidates, @@ -238,7 +239,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { ) } - /// Evaluate the scrutinee and add the fake read of it. + /// Evaluate the scrutinee and add the PlaceMention for it. fn lower_scrutinee( &mut self, mut block: BasicBlock, @@ -246,26 +247,9 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { scrutinee_span: Span, ) -> BlockAnd<PlaceBuilder<'tcx>> { let scrutinee_place_builder = unpack!(block = self.as_place_builder(block, scrutinee)); - // Matching on a `scrutinee_place` with an uninhabited type doesn't - // generate any memory reads by itself, and so if the place "expression" - // contains unsafe operations like raw pointer dereferences or union - // field projections, we wouldn't know to require an `unsafe` block - // around a `match` equivalent to `std::intrinsics::unreachable()`. - // See issue #47412 for this hole being discovered in the wild. - // - // HACK(eddyb) Work around the above issue by adding a dummy inspection - // of `scrutinee_place`, specifically by applying `ReadForMatch`. - // - // NOTE: ReadForMatch also checks that the scrutinee is initialized. - // This is currently needed to not allow matching on an uninitialized, - // uninhabited value. If we get never patterns, those will check that - // the place is initialized, and so this read would only be used to - // check safety. - let cause_matched_place = FakeReadCause::ForMatchedPlace(None); - let source_info = self.source_info(scrutinee_span); - if let Some(scrutinee_place) = scrutinee_place_builder.try_to_place(self) { - self.cfg.push_fake_read(block, source_info, cause_matched_place, scrutinee_place); + let source_info = self.source_info(scrutinee_span); + self.cfg.push_place_mention(block, source_info, scrutinee_place); } block.and(scrutinee_place_builder) @@ -304,6 +288,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { &mut self, block: BasicBlock, scrutinee_span: Span, + scrutinee_place_builder: &PlaceBuilder<'tcx>, match_start_span: Span, match_has_guard: bool, candidates: &mut [&mut Candidate<'pat, 'tcx>], @@ -331,6 +316,33 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { // otherwise block. Match checking will ensure this is actually // unreachable. let source_info = self.source_info(scrutinee_span); + + // Matching on a `scrutinee_place` with an uninhabited type doesn't + // generate any memory reads by itself, and so if the place "expression" + // contains unsafe operations like raw pointer dereferences or union + // field projections, we wouldn't know to require an `unsafe` block + // around a `match` equivalent to `std::intrinsics::unreachable()`. + // See issue #47412 for this hole being discovered in the wild. + // + // HACK(eddyb) Work around the above issue by adding a dummy inspection + // of `scrutinee_place`, specifically by applying `ReadForMatch`. + // + // NOTE: ReadForMatch also checks that the scrutinee is initialized. + // This is currently needed to not allow matching on an uninitialized, + // uninhabited value. If we get never patterns, those will check that + // the place is initialized, and so this read would only be used to + // check safety. + let cause_matched_place = FakeReadCause::ForMatchedPlace(None); + + if let Some(scrutinee_place) = scrutinee_place_builder.try_to_place(self) { + self.cfg.push_fake_read( + otherwise_block, + source_info, + cause_matched_place, + scrutinee_place, + ); + } + self.cfg.terminate(otherwise_block, source_info, TerminatorKind::Unreachable); } @@ -599,13 +611,8 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { } _ => { - let place_builder = unpack!(block = self.as_place_builder(block, initializer)); - - if let Some(place) = place_builder.try_to_place(self) { - let source_info = self.source_info(initializer.span); - self.cfg.push_place_mention(block, source_info, place); - } - + let place_builder = + unpack!(block = self.lower_scrutinee(block, initializer, initializer.span)); self.place_into_pattern(block, &irrefutable_pat, place_builder, true) } } @@ -622,6 +629,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { let fake_borrow_temps = self.lower_match_tree( block, irrefutable_pat.span, + &initializer, irrefutable_pat.span, false, &mut [&mut candidate], @@ -736,7 +744,9 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { self.cfg.push(block, Statement { source_info, kind: StatementKind::StorageLive(local_id) }); // Although there is almost always scope for given variable in corner cases // like #92893 we might get variable with no scope. - if let Some(region_scope) = self.region_scope_tree.var_scope(var.0.local_id) && schedule_drop { + if let Some(region_scope) = self.region_scope_tree.var_scope(var.0.local_id) + && schedule_drop + { self.schedule_drop(span, region_scope, local_id, DropKind::Storage); } Place::from(local_id) @@ -814,7 +824,10 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { } } - PatKind::Constant { .. } | PatKind::Range { .. } | PatKind::Wild => {} + PatKind::Constant { .. } + | PatKind::Range { .. } + | PatKind::Wild + | PatKind::Error(_) => {} PatKind::Deref { ref subpattern } => { self.visit_primary_bindings(subpattern, pattern_user_ty.deref(), f); @@ -842,6 +855,10 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { self.visit_primary_bindings(subpattern, subpattern_user_ty, f) } + PatKind::InlineConstant { ref subpattern, .. } => { + self.visit_primary_bindings(subpattern, pattern_user_ty, f) + } + PatKind::Leaf { ref subpatterns } => { for subpattern in subpatterns { let subpattern_user_ty = pattern_user_ty.clone().leaf(subpattern.field); @@ -1018,7 +1035,7 @@ enum TestKind<'tcx> { ty: Ty<'tcx>, }, - /// Test whether the value falls within an inclusive or exclusive range + /// Test whether the value falls within an inclusive or exclusive range. Range(Box<PatRange<'tcx>>), /// Test that the length of the slice is equal to `len`. @@ -1798,7 +1815,6 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { let fake_borrow_ty = Ty::new_imm_ref(tcx, tcx.lifetimes.re_erased, fake_borrow_deref_ty); let mut fake_borrow_temp = LocalDecl::new(fake_borrow_ty, temp_span); - fake_borrow_temp.internal = self.local_decls[matched_place.local].internal; fake_borrow_temp.local_info = ClearCrossCrate::Set(Box::new(LocalInfo::FakeBorrow)); let fake_borrow_temp = self.local_decls.push(fake_borrow_temp); @@ -1833,6 +1849,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { let fake_borrow_temps = self.lower_match_tree( block, pat.span, + &expr_place_builder, pat.span, false, &mut [&mut guard_candidate, &mut otherwise_candidate], @@ -2268,7 +2285,6 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { ty: var_ty, user_ty: if user_ty.is_empty() { None } else { Some(Box::new(user_ty)) }, source_info, - internal: false, local_info: ClearCrossCrate::Set(Box::new(LocalInfo::User(BindingForm::Var( VarBindingForm { binding_mode, @@ -2298,7 +2314,6 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { ty: Ty::new_imm_ref(tcx, tcx.lifetimes.re_erased, var_ty), user_ty: None, source_info, - internal: false, local_info: ClearCrossCrate::Set(Box::new(LocalInfo::User( BindingForm::RefForGuard, ))), @@ -2336,6 +2351,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { let fake_borrow_temps = this.lower_match_tree( block, initializer_span, + &scrutinee, pattern.span, false, &mut [&mut candidate, &mut wildcard], diff --git a/compiler/rustc_mir_build/src/build/matches/simplify.rs b/compiler/rustc_mir_build/src/build/matches/simplify.rs index 17ac1f4e0..6a40c8d84 100644 --- a/compiler/rustc_mir_build/src/build/matches/simplify.rs +++ b/compiler/rustc_mir_build/src/build/matches/simplify.rs @@ -15,11 +15,7 @@ use crate::build::expr::as_place::PlaceBuilder; use crate::build::matches::{Ascription, Binding, Candidate, MatchPair}; use crate::build::Builder; -use rustc_hir::RangeEnd; use rustc_middle::thir::{self, *}; -use rustc_middle::ty; -use rustc_middle::ty::layout::IntegerExt; -use rustc_target::abi::{Integer, Size}; use std::mem; @@ -148,7 +144,6 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { match_pair: MatchPair<'pat, 'tcx>, candidate: &mut Candidate<'pat, 'tcx>, ) -> Result<(), MatchPair<'pat, 'tcx>> { - let tcx = self.tcx; match match_pair.pattern.kind { PatKind::AscribeUserType { ref subpattern, @@ -168,7 +163,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { Ok(()) } - PatKind::Wild => { + PatKind::Wild | PatKind::Error(_) => { // nothing left to do Ok(()) } @@ -204,41 +199,16 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { Err(match_pair) } - PatKind::Range(box PatRange { lo, hi, end }) => { - let (range, bias) = match *lo.ty().kind() { - ty::Char => { - (Some(('\u{0000}' as u128, '\u{10FFFF}' as u128, Size::from_bits(32))), 0) - } - ty::Int(ity) => { - let size = Integer::from_int_ty(&tcx, ity).size(); - let max = size.truncate(u128::MAX); - let bias = 1u128 << (size.bits() - 1); - (Some((0, max, size)), bias) - } - ty::Uint(uty) => { - let size = Integer::from_uint_ty(&tcx, uty).size(); - let max = size.truncate(u128::MAX); - (Some((0, max, size)), 0) - } - _ => (None, 0), - }; - if let Some((min, max, sz)) = range { - // We want to compare ranges numerically, but the order of the bitwise - // representation of signed integers does not match their numeric order. Thus, - // to correct the ordering, we need to shift the range of signed integers to - // correct the comparison. This is achieved by XORing with a bias (see - // pattern/_match.rs for another pertinent example of this pattern). - // - // Also, for performance, it's important to only do the second `try_to_bits` if - // necessary. - let lo = lo.try_to_bits(sz).unwrap() ^ bias; - if lo <= min { - let hi = hi.try_to_bits(sz).unwrap() ^ bias; - if hi > max || hi == max && end == RangeEnd::Included { - // Irrefutable pattern match. - return Ok(()); - } - } + PatKind::InlineConstant { subpattern: ref pattern, def: _ } => { + candidate.match_pairs.push(MatchPair::new(match_pair.place, pattern, self)); + + Ok(()) + } + + PatKind::Range(ref range) => { + if let Some(true) = range.is_full_range(self.tcx) { + // Irrefutable pattern match. + return Ok(()); } Err(match_pair) } diff --git a/compiler/rustc_mir_build/src/build/matches/test.rs b/compiler/rustc_mir_build/src/build/matches/test.rs index 795d1db8e..bdd4f2011 100644 --- a/compiler/rustc_mir_build/src/build/matches/test.rs +++ b/compiler/rustc_mir_build/src/build/matches/test.rs @@ -8,7 +8,6 @@ use crate::build::expr::as_place::PlaceBuilder; use crate::build::matches::{Candidate, MatchPair, Test, TestKind}; use crate::build::Builder; -use crate::thir::pattern::compare_const_vals; use rustc_data_structures::fx::FxIndexMap; use rustc_hir::{LangItem, RangeEnd}; use rustc_index::bit_set::BitSet; @@ -59,8 +58,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { }, PatKind::Range(ref range) => { - assert_eq!(range.lo.ty(), match_pair.pattern.ty); - assert_eq!(range.hi.ty(), match_pair.pattern.ty); + assert_eq!(range.ty, match_pair.pattern.ty); Test { span: match_pair.pattern.span, kind: TestKind::Range(range.clone()) } } @@ -73,11 +71,13 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { PatKind::Or { .. } => bug!("or-patterns should have already been handled"), PatKind::AscribeUserType { .. } + | PatKind::InlineConstant { .. } | PatKind::Array { .. } | PatKind::Wild | PatKind::Binding { .. } | PatKind::Leaf { .. } - | PatKind::Deref { .. } => self.error_simplifiable(match_pair), + | PatKind::Deref { .. } + | PatKind::Error(_) => self.error_simplifiable(match_pair), } } @@ -110,8 +110,10 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { | PatKind::Or { .. } | PatKind::Binding { .. } | PatKind::AscribeUserType { .. } + | PatKind::InlineConstant { .. } | PatKind::Leaf { .. } - | PatKind::Deref { .. } => { + | PatKind::Deref { .. } + | PatKind::Error(_) => { // don't know how to add these patterns to a switch false } @@ -236,18 +238,27 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { TestKind::Eq { value, ty } => { let tcx = self.tcx; - if let ty::Adt(def, _) = ty.kind() && Some(def.did()) == tcx.lang_items().string() { + if let ty::Adt(def, _) = ty.kind() + && Some(def.did()) == tcx.lang_items().string() + { if !tcx.features().string_deref_patterns { - bug!("matching on `String` went through without enabling string_deref_patterns"); + bug!( + "matching on `String` went through without enabling string_deref_patterns" + ); } let re_erased = tcx.lifetimes.re_erased; - let ref_string = self.temp(Ty::new_imm_ref(tcx,re_erased, ty), test.span); - let ref_str_ty = Ty::new_imm_ref(tcx,re_erased, tcx.types.str_); + let ref_string = self.temp(Ty::new_imm_ref(tcx, re_erased, ty), test.span); + let ref_str_ty = Ty::new_imm_ref(tcx, re_erased, tcx.types.str_); let ref_str = self.temp(ref_str_ty, test.span); let deref = tcx.require_lang_item(LangItem::Deref, None); let method = trait_method(tcx, deref, sym::deref, [ty]); let eq_block = self.cfg.start_new_block(); - self.cfg.push_assign(block, source_info, ref_string, Rvalue::Ref(re_erased, BorrowKind::Shared, place)); + self.cfg.push_assign( + block, + source_info, + ref_string, + Rvalue::Ref(re_erased, BorrowKind::Shared, place), + ); self.cfg.terminate( block, source_info, @@ -262,10 +273,17 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { target: Some(eq_block), unwind: UnwindAction::Continue, call_source: CallSource::Misc, - fn_span: source_info.span - } + fn_span: source_info.span, + }, + ); + self.non_scalar_compare( + eq_block, + make_target_blocks, + source_info, + value, + ref_str, + ref_str_ty, ); - self.non_scalar_compare(eq_block, make_target_blocks, source_info, value, ref_str, ref_str_ty); return; } if !ty.is_scalar() { @@ -289,11 +307,14 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { } } - TestKind::Range(box PatRange { lo, hi, ref end }) => { + TestKind::Range(ref range) => { let lower_bound_success = self.cfg.start_new_block(); let target_blocks = make_target_blocks(self); // Test `val` by computing `lo <= val && val <= hi`, using primitive comparisons. + // FIXME: skip useless comparison when the range is half-open. + let lo = range.lo.to_const(range.ty, self.tcx); + let hi = range.hi.to_const(range.ty, self.tcx); let lo = self.literal_operand(test.span, lo); let hi = self.literal_operand(test.span, hi); let val = Operand::Copy(place); @@ -310,7 +331,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { lo, val.clone(), ); - let op = match *end { + let op = match range.end { RangeEnd::Included => BinOp::Le, RangeEnd::Excluded => BinOp::Lt, }; @@ -678,34 +699,18 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { } (TestKind::Range(test), PatKind::Range(pat)) => { - use std::cmp::Ordering::*; - if test == pat { self.candidate_without_match_pair(match_pair_index, candidate); return Some(0); } - // For performance, it's important to only do the second - // `compare_const_vals` if necessary. - let no_overlap = if matches!( - (compare_const_vals(self.tcx, test.hi, pat.lo, self.param_env)?, test.end), - (Less, _) | (Equal, RangeEnd::Excluded) // test < pat - ) || matches!( - (compare_const_vals(self.tcx, test.lo, pat.hi, self.param_env)?, pat.end), - (Greater, _) | (Equal, RangeEnd::Excluded) // test > pat - ) { - Some(1) - } else { - None - }; - // If the testing range does not overlap with pattern range, // the pattern can be matched only if this test fails. - no_overlap + if !test.overlaps(pat, self.tcx, self.param_env)? { Some(1) } else { None } } (TestKind::Range(range), &PatKind::Constant { value }) => { - if let Some(false) = self.const_range_contains(&*range, value) { + if !range.contains(value, self.tcx, self.param_env)? { // `value` is not contained in the testing range, // so `value` can be matched only if this test fails. Some(1) @@ -797,27 +802,13 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { span_bug!(match_pair.pattern.span, "simplifiable pattern found: {:?}", match_pair.pattern) } - fn const_range_contains(&self, range: &PatRange<'tcx>, value: Const<'tcx>) -> Option<bool> { - use std::cmp::Ordering::*; - - // For performance, it's important to only do the second - // `compare_const_vals` if necessary. - Some( - matches!(compare_const_vals(self.tcx, range.lo, value, self.param_env)?, Less | Equal) - && matches!( - (compare_const_vals(self.tcx, value, range.hi, self.param_env)?, range.end), - (Less, _) | (Equal, RangeEnd::Included) - ), - ) - } - fn values_not_contained_in_range( &self, range: &PatRange<'tcx>, options: &FxIndexMap<Const<'tcx>, u128>, ) -> Option<bool> { for &val in options.keys() { - if self.const_range_contains(range, val)? { + if range.contains(val, self.tcx, self.param_env)? { return Some(false); } } diff --git a/compiler/rustc_mir_build/src/build/misc.rs b/compiler/rustc_mir_build/src/build/misc.rs index c96e99ef0..c263de79c 100644 --- a/compiler/rustc_mir_build/src/build/misc.rs +++ b/compiler/rustc_mir_build/src/build/misc.rs @@ -15,9 +15,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { /// N.B., **No cleanup is scheduled for this temporary.** You should /// call `schedule_drop` once the temporary is initialized. pub(crate) fn temp(&mut self, ty: Ty<'tcx>, span: Span) -> Place<'tcx> { - // Mark this local as internal to avoid temporaries with types not present in the - // user's code resulting in ICEs from the generator transform. - let temp = self.local_decls.push(LocalDecl::new(ty, span).internal()); + let temp = self.local_decls.push(LocalDecl::new(ty, span)); let place = Place::from(temp); debug!("temp: created temp {:?} with type {:?}", place, self.local_decls[temp].ty); place diff --git a/compiler/rustc_mir_build/src/build/mod.rs b/compiler/rustc_mir_build/src/build/mod.rs index bba470564..886d80545 100644 --- a/compiler/rustc_mir_build/src/build/mod.rs +++ b/compiler/rustc_mir_build/src/build/mod.rs @@ -9,7 +9,7 @@ use rustc_errors::ErrorGuaranteed; use rustc_hir as hir; use rustc_hir::def::DefKind; use rustc_hir::def_id::{DefId, LocalDefId}; -use rustc_hir::{GeneratorKind, Node}; +use rustc_hir::{CoroutineKind, Node}; use rustc_index::bit_set::GrowableBitSet; use rustc_index::{Idx, IndexSlice, IndexVec}; use rustc_infer::infer::{InferCtxt, TyCtxtInferExt}; @@ -53,10 +53,7 @@ pub(crate) fn closure_saved_names_of_captured_variables<'tcx>( } /// Construct the MIR for a given `DefId`. -fn mir_build(tcx: TyCtxt<'_>, def: LocalDefId) -> Body<'_> { - // Ensure unsafeck and abstract const building is ran before we steal the THIR. - tcx.ensure_with_value() - .thir_check_unsafety(tcx.typeck_root_def_id(def.to_def_id()).expect_local()); +fn mir_build<'tcx>(tcx: TyCtxt<'tcx>, def: LocalDefId) -> Body<'tcx> { tcx.ensure_with_value().thir_abstract_const(def); if let Err(e) = tcx.check_match(def) { return construct_error(tcx, def, e); @@ -65,20 +62,27 @@ fn mir_build(tcx: TyCtxt<'_>, def: LocalDefId) -> Body<'_> { let body = match tcx.thir_body(def) { Err(error_reported) => construct_error(tcx, def, error_reported), Ok((thir, expr)) => { - // We ran all queries that depended on THIR at the beginning - // of `mir_build`, so now we can steal it - let thir = thir.steal(); + let build_mir = |thir: &Thir<'tcx>| match thir.body_type { + thir::BodyTy::Fn(fn_sig) => construct_fn(tcx, def, thir, expr, fn_sig), + thir::BodyTy::Const(ty) => construct_const(tcx, def, thir, expr, ty), + }; - tcx.ensure().check_match(def); // this must run before MIR dump, because // "not all control paths return a value" is reported here. // // maybe move the check to a MIR pass? tcx.ensure().check_liveness(def); - match thir.body_type { - thir::BodyTy::Fn(fn_sig) => construct_fn(tcx, def, &thir, expr, fn_sig), - thir::BodyTy::Const(ty) => construct_const(tcx, def, &thir, expr, ty), + if tcx.sess.opts.unstable_opts.thir_unsafeck { + // Don't steal here if THIR unsafeck is being used. Instead + // steal in unsafeck. This is so that pattern inline constants + // can be evaluated as part of building the THIR of the parent + // function without a cycle. + build_mir(&thir.borrow()) + } else { + // We ran all queries that depended on THIR at the beginning + // of `mir_build`, so now we can steal it + build_mir(&thir.steal()) } } }; @@ -173,7 +177,7 @@ struct Builder<'a, 'tcx> { check_overflow: bool, fn_span: Span, arg_count: usize, - generator_kind: Option<GeneratorKind>, + coroutine_kind: Option<CoroutineKind>, /// The current set of scopes, updated as we traverse; /// see the `scope` module for more details. @@ -448,7 +452,7 @@ fn construct_fn<'tcx>( ) -> Body<'tcx> { let span = tcx.def_span(fn_def); let fn_id = tcx.hir().local_def_id_to_hir_id(fn_def); - let generator_kind = tcx.generator_kind(fn_def); + let coroutine_kind = tcx.coroutine_kind(fn_def); // The representation of thir for `-Zunpretty=thir-tree` relies on // the entry expression being the last element of `thir.exprs`. @@ -478,15 +482,15 @@ fn construct_fn<'tcx>( let arguments = &thir.params; - let (yield_ty, return_ty) = if generator_kind.is_some() { - let gen_ty = arguments[thir::UPVAR_ENV_PARAM].ty; - let gen_sig = match gen_ty.kind() { - ty::Generator(_, gen_args, ..) => gen_args.as_generator().sig(), + let (yield_ty, return_ty) = if coroutine_kind.is_some() { + let coroutine_ty = arguments[thir::UPVAR_ENV_PARAM].ty; + let coroutine_sig = match coroutine_ty.kind() { + ty::Coroutine(_, gen_args, ..) => gen_args.as_coroutine().sig(), _ => { - span_bug!(span, "generator w/o generator type: {:?}", gen_ty) + span_bug!(span, "coroutine w/o coroutine type: {:?}", coroutine_ty) } }; - (Some(gen_sig.yield_ty), gen_sig.return_ty) + (Some(coroutine_sig.yield_ty), coroutine_sig.return_ty) } else { (None, fn_sig.output()) }; @@ -519,7 +523,7 @@ fn construct_fn<'tcx>( safety, return_ty, return_ty_span, - generator_kind, + coroutine_kind, ); let call_site_scope = @@ -553,7 +557,7 @@ fn construct_fn<'tcx>( None }; if yield_ty.is_some() { - body.generator.as_mut().unwrap().yield_ty = yield_ty; + body.coroutine.as_mut().unwrap().yield_ty = yield_ty; } body } @@ -616,29 +620,53 @@ fn construct_const<'a, 'tcx>( /// /// This is required because we may still want to run MIR passes on an item /// with type errors, but normal MIR construction can't handle that in general. -fn construct_error(tcx: TyCtxt<'_>, def: LocalDefId, err: ErrorGuaranteed) -> Body<'_> { - let span = tcx.def_span(def); - let hir_id = tcx.hir().local_def_id_to_hir_id(def); - let generator_kind = tcx.generator_kind(def); - let body_owner_kind = tcx.hir().body_owner_kind(def); - - let ty = Ty::new_error(tcx, err); - let num_params = match body_owner_kind { - hir::BodyOwnerKind::Fn => tcx.fn_sig(def).skip_binder().inputs().skip_binder().len(), - hir::BodyOwnerKind::Closure => { - let ty = tcx.type_of(def).instantiate_identity(); - match ty.kind() { - ty::Closure(_, args) => 1 + args.as_closure().sig().inputs().skip_binder().len(), - ty::Generator(..) => 2, - _ => bug!("expected closure or generator, found {ty:?}"), - } +fn construct_error(tcx: TyCtxt<'_>, def_id: LocalDefId, guar: ErrorGuaranteed) -> Body<'_> { + let span = tcx.def_span(def_id); + let hir_id = tcx.hir().local_def_id_to_hir_id(def_id); + let coroutine_kind = tcx.coroutine_kind(def_id); + + let (inputs, output, yield_ty) = match tcx.def_kind(def_id) { + DefKind::Const + | DefKind::AssocConst + | DefKind::AnonConst + | DefKind::InlineConst + | DefKind::Static(_) => (vec![], tcx.type_of(def_id).instantiate_identity(), None), + DefKind::Ctor(..) | DefKind::Fn | DefKind::AssocFn => { + let sig = tcx.liberate_late_bound_regions( + def_id.to_def_id(), + tcx.fn_sig(def_id).instantiate_identity(), + ); + (sig.inputs().to_vec(), sig.output(), None) + } + DefKind::Closure => { + let closure_ty = tcx.type_of(def_id).instantiate_identity(); + let ty::Closure(_, args) = closure_ty.kind() else { bug!() }; + let args = args.as_closure(); + let sig = tcx.liberate_late_bound_regions(def_id.to_def_id(), args.sig()); + let self_ty = match args.kind() { + ty::ClosureKind::Fn => Ty::new_imm_ref(tcx, tcx.lifetimes.re_erased, closure_ty), + ty::ClosureKind::FnMut => Ty::new_mut_ref(tcx, tcx.lifetimes.re_erased, closure_ty), + ty::ClosureKind::FnOnce => closure_ty, + }; + ([self_ty].into_iter().chain(sig.inputs().to_vec()).collect(), sig.output(), None) + } + DefKind::Coroutine => { + let coroutine_ty = tcx.type_of(def_id).instantiate_identity(); + let ty::Coroutine(_, args, _) = coroutine_ty.kind() else { bug!() }; + let args = args.as_coroutine(); + let yield_ty = args.yield_ty(); + let return_ty = args.return_ty(); + (vec![coroutine_ty, args.resume_ty()], return_ty, Some(yield_ty)) } - hir::BodyOwnerKind::Const { .. } => 0, - hir::BodyOwnerKind::Static(_) => 0, + dk => bug!("{:?} is not a body: {:?}", def_id, dk), }; + + let source_info = SourceInfo { span, scope: OUTERMOST_SOURCE_SCOPE }; + let local_decls = IndexVec::from_iter( + [output].iter().chain(&inputs).map(|ty| LocalDecl::with_source_info(*ty, source_info)), + ); let mut cfg = CFG { basic_blocks: IndexVec::new() }; let mut source_scopes = IndexVec::new(); - let mut local_decls = IndexVec::from_elem_n(LocalDecl::new(ty, span), 1); cfg.start_new_block(); source_scopes.push(SourceScopeData { @@ -651,28 +679,24 @@ fn construct_error(tcx: TyCtxt<'_>, def: LocalDefId, err: ErrorGuaranteed) -> Bo safety: Safety::Safe, }), }); - let source_info = SourceInfo { span, scope: OUTERMOST_SOURCE_SCOPE }; - // Some MIR passes will expect the number of parameters to match the - // function declaration. - for _ in 0..num_params { - local_decls.push(LocalDecl::with_source_info(ty, source_info)); - } cfg.terminate(START_BLOCK, source_info, TerminatorKind::Unreachable); let mut body = Body::new( - MirSource::item(def.to_def_id()), + MirSource::item(def_id.to_def_id()), cfg.basic_blocks, source_scopes, local_decls, IndexVec::new(), - num_params, + inputs.len(), vec![], span, - generator_kind, - Some(err), + coroutine_kind, + Some(guar), ); - body.generator.as_mut().map(|gen| gen.yield_ty = Some(ty)); + + body.coroutine.as_mut().map(|gen| gen.yield_ty = yield_ty); + body } @@ -687,7 +711,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { safety: Safety, return_ty: Ty<'tcx>, return_span: Span, - generator_kind: Option<GeneratorKind>, + coroutine_kind: Option<CoroutineKind>, ) -> Builder<'a, 'tcx> { let tcx = infcx.tcx; let attrs = tcx.hir().attrs(hir_id); @@ -718,7 +742,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { cfg: CFG { basic_blocks: IndexVec::new() }, fn_span: span, arg_count, - generator_kind, + coroutine_kind, scopes: scope::Scopes::new(), block_context: BlockContext::new(), source_scopes: IndexVec::new(), @@ -760,7 +784,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { self.arg_count, self.var_debug_info, self.fn_span, - self.generator_kind, + self.coroutine_kind, None, ) } @@ -777,7 +801,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { let upvar_args = match closure_ty.kind() { ty::Closure(_, args) => ty::UpvarArgs::Closure(args), - ty::Generator(_, args, _) => ty::UpvarArgs::Generator(args), + ty::Coroutine(_, args, _) => ty::UpvarArgs::Coroutine(args), _ => return, }; @@ -847,7 +871,9 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { self.local_decls.push(LocalDecl::with_source_info(param.ty, source_info)); // If this is a simple binding pattern, give debuginfo a nice name. - if let Some(ref pat) = param.pat && let Some(name) = pat.simple_ident() { + if let Some(ref pat) = param.pat + && let Some(name) = pat.simple_ident() + { self.var_debug_info.push(VarDebugInfo { name, source_info, diff --git a/compiler/rustc_mir_build/src/build/scope.rs b/compiler/rustc_mir_build/src/build/scope.rs index 4cf6a349a..b3d3863b5 100644 --- a/compiler/rustc_mir_build/src/build/scope.rs +++ b/compiler/rustc_mir_build/src/build/scope.rs @@ -108,8 +108,8 @@ pub struct Scopes<'tcx> { /// [DropTree] for more details. unwind_drops: DropTree, - /// Drops that need to be done on paths to the `GeneratorDrop` terminator. - generator_drops: DropTree, + /// Drops that need to be done on paths to the `CoroutineDrop` terminator. + coroutine_drops: DropTree, } #[derive(Debug)] @@ -133,8 +133,8 @@ struct Scope { cached_unwind_block: Option<DropIdx>, /// The drop index that will drop everything in and below this scope on a - /// generator drop path. - cached_generator_drop_block: Option<DropIdx>, + /// coroutine drop path. + cached_coroutine_drop_block: Option<DropIdx>, } #[derive(Clone, Copy, Debug)] @@ -194,7 +194,7 @@ const ROOT_NODE: DropIdx = DropIdx::from_u32(0); /// A tree of drops that we have deferred lowering. It's used for: /// /// * Drops on unwind paths -/// * Drops on generator drop paths (when a suspended generator is dropped) +/// * Drops on coroutine drop paths (when a suspended coroutine is dropped) /// * Drops on return and loop exit paths /// * Drops on the else path in an `if let` chain /// @@ -222,8 +222,8 @@ impl Scope { /// * polluting the cleanup MIR with StorageDead creates /// landing pads even though there's no actual destructors /// * freeing up stack space has no effect during unwinding - /// Note that for generators we do emit StorageDeads, for the - /// use of optimizations in the MIR generator transform. + /// Note that for coroutines we do emit StorageDeads, for the + /// use of optimizations in the MIR coroutine transform. fn needs_cleanup(&self) -> bool { self.drops.iter().any(|drop| match drop.kind { DropKind::Value => true, @@ -233,7 +233,7 @@ impl Scope { fn invalidate_cache(&mut self) { self.cached_unwind_block = None; - self.cached_generator_drop_block = None; + self.cached_coroutine_drop_block = None; } } @@ -407,7 +407,7 @@ impl<'tcx> Scopes<'tcx> { breakable_scopes: Vec::new(), if_then_scope: None, unwind_drops: DropTree::new(), - generator_drops: DropTree::new(), + coroutine_drops: DropTree::new(), } } @@ -419,7 +419,7 @@ impl<'tcx> Scopes<'tcx> { drops: vec![], moved_locals: vec![], cached_unwind_block: None, - cached_generator_drop_block: None, + cached_coroutine_drop_block: None, }); } @@ -725,7 +725,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { // Add a dummy `Assign` statement to the CFG, with the span for the source code's `continue` // statement. fn add_dummy_assignment(&mut self, span: Span, block: BasicBlock, source_info: SourceInfo) { - let local_decl = LocalDecl::new(Ty::new_unit(self.tcx), span).internal(); + let local_decl = LocalDecl::new(Ty::new_unit(self.tcx), span); let temp_place = Place::from(self.local_decls.push(local_decl)); self.cfg.push_assign_unit(block, source_info, temp_place, self.tcx); } @@ -734,7 +734,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { // If we are emitting a `drop` statement, we need to have the cached // diverge cleanup pads ready in case that drop panics. let needs_cleanup = self.scopes.scopes.last().is_some_and(|scope| scope.needs_cleanup()); - let is_generator = self.generator_kind.is_some(); + let is_coroutine = self.coroutine_kind.is_some(); let unwind_to = if needs_cleanup { self.diverge_cleanup() } else { DropIdx::MAX }; let scope = self.scopes.scopes.last().expect("leave_top_scope called with no scopes"); @@ -744,7 +744,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { scope, block, unwind_to, - is_generator && needs_cleanup, + is_coroutine && needs_cleanup, self.arg_count, )) } @@ -984,11 +984,11 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { // caches gets invalidated. i.e., if a new drop is added into the middle scope, the // cache of outer scope stays intact. // - // Since we only cache drops for the unwind path and the generator drop + // Since we only cache drops for the unwind path and the coroutine drop // path, we only need to invalidate the cache for drops that happen on - // the unwind or generator drop paths. This means that for - // non-generators we don't need to invalidate caches for `DropKind::Storage`. - let invalidate_caches = needs_drop || self.generator_kind.is_some(); + // the unwind or coroutine drop paths. This means that for + // non-coroutines we don't need to invalidate caches for `DropKind::Storage`. + let invalidate_caches = needs_drop || self.coroutine_kind.is_some(); for scope in self.scopes.scopes.iter_mut().rev() { if invalidate_caches { scope.invalidate_cache(); @@ -1101,10 +1101,10 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { return cached_drop; } - let is_generator = self.generator_kind.is_some(); + let is_coroutine = self.coroutine_kind.is_some(); for scope in &mut self.scopes.scopes[uncached_scope..=target] { for drop in &scope.drops { - if is_generator || drop.kind == DropKind::Value { + if is_coroutine || drop.kind == DropKind::Value { cached_drop = self.scopes.unwind_drops.add_drop(*drop, cached_drop); } } @@ -1137,17 +1137,17 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { } /// Sets up a path that performs all required cleanup for dropping a - /// generator, starting from the given block that ends in + /// coroutine, starting from the given block that ends in /// [TerminatorKind::Yield]. /// - /// This path terminates in GeneratorDrop. - pub(crate) fn generator_drop_cleanup(&mut self, yield_block: BasicBlock) { + /// This path terminates in CoroutineDrop. + pub(crate) fn coroutine_drop_cleanup(&mut self, yield_block: BasicBlock) { debug_assert!( matches!( self.cfg.block_data(yield_block).terminator().kind, TerminatorKind::Yield { .. } ), - "generator_drop_cleanup called on block with non-yield terminator." + "coroutine_drop_cleanup called on block with non-yield terminator." ); let (uncached_scope, mut cached_drop) = self .scopes @@ -1156,18 +1156,18 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { .enumerate() .rev() .find_map(|(scope_idx, scope)| { - scope.cached_generator_drop_block.map(|cached_block| (scope_idx + 1, cached_block)) + scope.cached_coroutine_drop_block.map(|cached_block| (scope_idx + 1, cached_block)) }) .unwrap_or((0, ROOT_NODE)); for scope in &mut self.scopes.scopes[uncached_scope..] { for drop in &scope.drops { - cached_drop = self.scopes.generator_drops.add_drop(*drop, cached_drop); + cached_drop = self.scopes.coroutine_drops.add_drop(*drop, cached_drop); } - scope.cached_generator_drop_block = Some(cached_drop); + scope.cached_coroutine_drop_block = Some(cached_drop); } - self.scopes.generator_drops.add_entry(yield_block, cached_drop); + self.scopes.coroutine_drops.add_entry(yield_block, cached_drop); } /// Utility function for *non*-scope code to build their own drops @@ -1274,7 +1274,7 @@ fn build_scope_drops<'tcx>( // drops panic (panicking while unwinding will abort, so there's no need for // another set of arrows). // - // For generators, we unwind from a drop on a local to its StorageDead + // For coroutines, we unwind from a drop on a local to its StorageDead // statement. For other functions we don't worry about StorageDead. The // drops for the unwind path should have already been generated by // `diverge_cleanup_gen`. @@ -1346,7 +1346,7 @@ impl<'a, 'tcx: 'a> Builder<'a, 'tcx> { blocks[ROOT_NODE] = continue_block; drops.build_mir::<ExitScopes>(&mut self.cfg, &mut blocks); - let is_generator = self.generator_kind.is_some(); + let is_coroutine = self.coroutine_kind.is_some(); // Link the exit drop tree to unwind drop tree. if drops.drops.iter().any(|(drop, _)| drop.kind == DropKind::Value) { @@ -1355,7 +1355,7 @@ impl<'a, 'tcx: 'a> Builder<'a, 'tcx> { for (drop_idx, drop_data) in drops.drops.iter_enumerated().skip(1) { match drop_data.0.kind { DropKind::Storage => { - if is_generator { + if is_coroutine { let unwind_drop = self .scopes .unwind_drops @@ -1381,10 +1381,10 @@ impl<'a, 'tcx: 'a> Builder<'a, 'tcx> { blocks[ROOT_NODE].map(BasicBlock::unit) } - /// Build the unwind and generator drop trees. + /// Build the unwind and coroutine drop trees. pub(crate) fn build_drop_trees(&mut self) { - if self.generator_kind.is_some() { - self.build_generator_drop_trees(); + if self.coroutine_kind.is_some() { + self.build_coroutine_drop_trees(); } else { Self::build_unwind_tree( &mut self.cfg, @@ -1395,18 +1395,18 @@ impl<'a, 'tcx: 'a> Builder<'a, 'tcx> { } } - fn build_generator_drop_trees(&mut self) { - // Build the drop tree for dropping the generator while it's suspended. - let drops = &mut self.scopes.generator_drops; + fn build_coroutine_drop_trees(&mut self) { + // Build the drop tree for dropping the coroutine while it's suspended. + let drops = &mut self.scopes.coroutine_drops; let cfg = &mut self.cfg; let fn_span = self.fn_span; let mut blocks = IndexVec::from_elem(None, &drops.drops); - drops.build_mir::<GeneratorDrop>(cfg, &mut blocks); + drops.build_mir::<CoroutineDrop>(cfg, &mut blocks); if let Some(root_block) = blocks[ROOT_NODE] { cfg.terminate( root_block, SourceInfo::outermost(fn_span), - TerminatorKind::GeneratorDrop, + TerminatorKind::CoroutineDrop, ); } @@ -1416,11 +1416,11 @@ impl<'a, 'tcx: 'a> Builder<'a, 'tcx> { Self::build_unwind_tree(cfg, unwind_drops, fn_span, resume_block); // Build the drop tree for unwinding when dropping a suspended - // generator. + // coroutine. // // This is a different tree to the standard unwind paths here to // prevent drop elaboration from creating drop flags that would have - // to be captured by the generator. I'm not sure how important this + // to be captured by the coroutine. I'm not sure how important this // optimization is, but it is here. for (drop_idx, drop_data) in drops.drops.iter_enumerated() { if let DropKind::Value = drop_data.0.kind { @@ -1461,9 +1461,9 @@ impl<'tcx> DropTreeBuilder<'tcx> for ExitScopes { } } -struct GeneratorDrop; +struct CoroutineDrop; -impl<'tcx> DropTreeBuilder<'tcx> for GeneratorDrop { +impl<'tcx> DropTreeBuilder<'tcx> for CoroutineDrop { fn make_block(cfg: &mut CFG<'tcx>) -> BasicBlock { cfg.start_new_block() } @@ -1474,7 +1474,7 @@ impl<'tcx> DropTreeBuilder<'tcx> for GeneratorDrop { } else { span_bug!( term.source_info.span, - "cannot enter generator drop tree from {:?}", + "cannot enter coroutine drop tree from {:?}", term.kind ) } @@ -1511,7 +1511,7 @@ impl<'tcx> DropTreeBuilder<'tcx> for Unwind { | TerminatorKind::Return | TerminatorKind::Unreachable | TerminatorKind::Yield { .. } - | TerminatorKind::GeneratorDrop + | TerminatorKind::CoroutineDrop | TerminatorKind::FalseEdge { .. } => { span_bug!(term.source_info.span, "cannot unwind from {:?}", term.kind) } |