From 20431706a863f92cb37dc512fef6e48d192aaf2c Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Wed, 17 Apr 2024 14:11:38 +0200 Subject: Merging upstream version 1.66.0+dfsg1. Signed-off-by: Daniel Baumann --- compiler/rustc_codegen_ssa/src/mir/block.rs | 104 ++++++++++++++------------ compiler/rustc_codegen_ssa/src/mir/mod.rs | 20 ++--- compiler/rustc_codegen_ssa/src/mir/operand.rs | 2 +- compiler/rustc_codegen_ssa/src/mir/place.rs | 16 ++++ compiler/rustc_codegen_ssa/src/mir/rvalue.rs | 28 +++---- 5 files changed, 99 insertions(+), 71 deletions(-) (limited to 'compiler/rustc_codegen_ssa/src/mir') diff --git a/compiler/rustc_codegen_ssa/src/mir/block.rs b/compiler/rustc_codegen_ssa/src/mir/block.rs index a6b226ef7..29b7c9b0a 100644 --- a/compiler/rustc_codegen_ssa/src/mir/block.rs +++ b/compiler/rustc_codegen_ssa/src/mir/block.rs @@ -63,7 +63,9 @@ impl<'a, 'tcx> TerminatorCodegenHelper<'tcx> { } } - fn lltarget>( + /// Get a basic block (creating it if necessary), possibly with a landing + /// pad next to it. + fn llbb_with_landing_pad>( &self, fx: &mut FunctionCx<'a, 'tcx, Bx>, target: mir::BasicBlock, @@ -73,32 +75,36 @@ impl<'a, 'tcx> TerminatorCodegenHelper<'tcx> { let target_funclet = fx.cleanup_kinds[target].funclet_bb(target); match (self.funclet_bb, target_funclet) { (None, None) => (lltarget, false), - (Some(f), Some(t_f)) if f == t_f || !base::wants_msvc_seh(fx.cx.tcx().sess) => { - (lltarget, false) - } // jump *into* cleanup - need a landing pad if GNU, cleanup pad if MSVC (None, Some(_)) => (fx.landing_pad_for(target), false), (Some(_), None) => span_bug!(span, "{:?} - jump out of cleanup?", self.terminator), - (Some(_), Some(_)) => (fx.landing_pad_for(target), true), + (Some(f), Some(t_f)) => { + if f == t_f || !base::wants_msvc_seh(fx.cx.tcx().sess) { + (lltarget, false) + } else { + (fx.landing_pad_for(target), true) + } + } } } - /// Create a basic block. - fn llblock>( + /// Get a basic block (creating it if necessary), possibly with cleanup + /// stuff in it or next to it. + fn llbb_with_cleanup>( &self, fx: &mut FunctionCx<'a, 'tcx, Bx>, target: mir::BasicBlock, ) -> Bx::BasicBlock { - let (lltarget, is_cleanupret) = self.lltarget(fx, target); + let (lltarget, is_cleanupret) = self.llbb_with_landing_pad(fx, target); if is_cleanupret { // MSVC cross-funclet jump - need a trampoline - - debug!("llblock: creating cleanup trampoline for {:?}", target); + debug_assert!(base::wants_msvc_seh(fx.cx.tcx().sess)); + debug!("llbb_with_cleanup: creating cleanup trampoline for {:?}", target); let name = &format!("{:?}_cleanup_trampoline_{:?}", self.bb, target); - let trampoline = Bx::append_block(fx.cx, fx.llfn, name); - let mut trampoline_bx = Bx::build(fx.cx, trampoline); + let trampoline_llbb = Bx::append_block(fx.cx, fx.llfn, name); + let mut trampoline_bx = Bx::build(fx.cx, trampoline_llbb); trampoline_bx.cleanup_ret(self.funclet(fx).unwrap(), Some(lltarget)); - trampoline + trampoline_llbb } else { lltarget } @@ -110,10 +116,11 @@ impl<'a, 'tcx> TerminatorCodegenHelper<'tcx> { bx: &mut Bx, target: mir::BasicBlock, ) { - let (lltarget, is_cleanupret) = self.lltarget(fx, target); + let (lltarget, is_cleanupret) = self.llbb_with_landing_pad(fx, target); if is_cleanupret { - // micro-optimization: generate a `ret` rather than a jump + // MSVC micro-optimization: generate a `ret` rather than a jump // to a trampoline. + debug_assert!(base::wants_msvc_seh(fx.cx.tcx().sess)); bx.cleanup_ret(self.funclet(fx).unwrap(), Some(lltarget)); } else { bx.br(lltarget); @@ -138,7 +145,7 @@ impl<'a, 'tcx> TerminatorCodegenHelper<'tcx> { let fn_ty = bx.fn_decl_backend_type(&fn_abi); let unwind_block = if let Some(cleanup) = cleanup.filter(|_| fn_abi.can_unwind) { - Some(self.llblock(fx, cleanup)) + Some(self.llbb_with_cleanup(fx, cleanup)) } else if fx.mir[self.bb].is_cleanup && fn_abi.can_unwind && !base::wants_msvc_seh(fx.cx.tcx().sess) @@ -162,9 +169,15 @@ impl<'a, 'tcx> TerminatorCodegenHelper<'tcx> { } else { fx.unreachable_block() }; - let invokeret = - bx.invoke(fn_ty, fn_ptr, &llargs, ret_llbb, unwind_block, self.funclet(fx)); - bx.apply_attrs_callsite(&fn_abi, invokeret); + let invokeret = bx.invoke( + fn_ty, + Some(&fn_abi), + fn_ptr, + &llargs, + ret_llbb, + unwind_block, + self.funclet(fx), + ); if fx.mir[self.bb].is_cleanup { bx.do_not_inline(invokeret); } @@ -178,8 +191,7 @@ impl<'a, 'tcx> TerminatorCodegenHelper<'tcx> { fx.store_return(bx, ret_dest, &fn_abi.ret, invokeret); } } else { - let llret = bx.call(fn_ty, fn_ptr, &llargs, self.funclet(fx)); - bx.apply_attrs_callsite(&fn_abi, llret); + let llret = bx.call(fn_ty, Some(&fn_abi), fn_ptr, &llargs, self.funclet(fx)); if fx.mir[self.bb].is_cleanup { // Cleanup is always the cold path. Don't inline // drop glue. Also, when there is a deeply-nested @@ -226,7 +238,7 @@ impl<'a, 'tcx> TerminatorCodegenHelper<'tcx> { options, line_spans, instance, - Some((ret_llbb, self.llblock(fx, cleanup), self.funclet(fx))), + Some((ret_llbb, self.llbb_with_cleanup(fx, cleanup), self.funclet(fx))), ); } else { bx.codegen_inline_asm(template, &operands, options, line_spans, instance, None); @@ -276,8 +288,8 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { if target_iter.len() == 1 { // If there are two targets (one conditional, one fallback), emit br instead of switch let (test_value, target) = target_iter.next().unwrap(); - let lltrue = helper.llblock(self, target); - let llfalse = helper.llblock(self, targets.otherwise()); + let lltrue = helper.llbb_with_cleanup(self, target); + let llfalse = helper.llbb_with_cleanup(self, targets.otherwise()); if switch_ty == bx.tcx().types.bool { // Don't generate trivial icmps when switching on bool match test_value { @@ -294,8 +306,8 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { } else { bx.switch( discr.immediate(), - helper.llblock(self, targets.otherwise()), - target_iter.map(|(value, target)| (value, helper.llblock(self, target))), + helper.llbb_with_cleanup(self, targets.otherwise()), + target_iter.map(|(value, target)| (value, helper.llbb_with_cleanup(self, target))), ); } } @@ -525,7 +537,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { let cond = bx.expect(cond, expected); // Create the failure block and the conditional branch to it. - let lltarget = helper.llblock(self, target); + let lltarget = helper.llbb_with_cleanup(self, target); let panic_block = bx.append_sibling_block("panic"); if expected { bx.cond_br(cond, lltarget, panic_block); @@ -1454,20 +1466,20 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { // bar(); // } Some(&mir::TerminatorKind::Abort) => { - let cs_bb = + let cs_llbb = Bx::append_block(self.cx, self.llfn, &format!("cs_funclet{:?}", bb)); - let cp_bb = + let cp_llbb = Bx::append_block(self.cx, self.llfn, &format!("cp_funclet{:?}", bb)); - ret_llbb = cs_bb; + ret_llbb = cs_llbb; - let mut cs_bx = Bx::build(self.cx, cs_bb); - let cs = cs_bx.catch_switch(None, None, &[cp_bb]); + let mut cs_bx = Bx::build(self.cx, cs_llbb); + let cs = cs_bx.catch_switch(None, None, &[cp_llbb]); // The "null" here is actually a RTTI type descriptor for the // C++ personality function, but `catch (...)` has no type so // it's null. The 64 here is actually a bitfield which // represents that this is a catch-all block. - let mut cp_bx = Bx::build(self.cx, cp_bb); + let mut cp_bx = Bx::build(self.cx, cp_llbb); let null = cp_bx.const_null( cp_bx.type_i8p_ext(cp_bx.cx().data_layout().instruction_address_space), ); @@ -1476,10 +1488,10 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { cp_bx.br(llbb); } _ => { - let cleanup_bb = + let cleanup_llbb = Bx::append_block(self.cx, self.llfn, &format!("funclet_{:?}", bb)); - ret_llbb = cleanup_bb; - let mut cleanup_bx = Bx::build(self.cx, cleanup_bb); + ret_llbb = cleanup_llbb; + let mut cleanup_bx = Bx::build(self.cx, cleanup_llbb); funclet = cleanup_bx.cleanup_pad(None, &[]); cleanup_bx.br(llbb); } @@ -1487,19 +1499,20 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { self.funclets[bb] = Some(funclet); ret_llbb } else { - let bb = Bx::append_block(self.cx, self.llfn, "cleanup"); - let mut bx = Bx::build(self.cx, bb); + let cleanup_llbb = Bx::append_block(self.cx, self.llfn, "cleanup"); + let mut cleanup_bx = Bx::build(self.cx, cleanup_llbb); let llpersonality = self.cx.eh_personality(); let llretty = self.landing_pad_type(); - let lp = bx.cleanup_landing_pad(llretty, llpersonality); + let lp = cleanup_bx.cleanup_landing_pad(llretty, llpersonality); - let slot = self.get_personality_slot(&mut bx); - slot.storage_live(&mut bx); - Pair(bx.extract_value(lp, 0), bx.extract_value(lp, 1)).store(&mut bx, slot); + let slot = self.get_personality_slot(&mut cleanup_bx); + slot.storage_live(&mut cleanup_bx); + Pair(cleanup_bx.extract_value(lp, 0), cleanup_bx.extract_value(lp, 1)) + .store(&mut cleanup_bx, slot); - bx.br(llbb); - bx.llbb() + cleanup_bx.br(llbb); + cleanup_llbb } } @@ -1533,8 +1546,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { let (fn_abi, fn_ptr) = common::build_langcall(&bx, None, LangItem::PanicNoUnwind); let fn_ty = bx.fn_decl_backend_type(&fn_abi); - let llret = bx.call(fn_ty, fn_ptr, &[], None); - bx.apply_attrs_callsite(&fn_abi, llret); + let llret = bx.call(fn_ty, Some(&fn_abi), fn_ptr, &[], None); bx.do_not_inline(llret); bx.unreachable(); diff --git a/compiler/rustc_codegen_ssa/src/mir/mod.rs b/compiler/rustc_codegen_ssa/src/mir/mod.rs index 2b931bfc9..da9aaf00e 100644 --- a/compiler/rustc_codegen_ssa/src/mir/mod.rs +++ b/compiler/rustc_codegen_ssa/src/mir/mod.rs @@ -148,10 +148,10 @@ pub fn codegen_mir<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( let debug_context = cx.create_function_debug_context(instance, &fn_abi, llfn, &mir); let start_llbb = Bx::append_block(cx, llfn, "start"); - let mut bx = Bx::build(cx, start_llbb); + let mut start_bx = Bx::build(cx, start_llbb); if mir.basic_blocks.iter().any(|bb| bb.is_cleanup) { - bx.set_personality_fn(cx.eh_personality()); + start_bx.set_personality_fn(cx.eh_personality()); } let cleanup_kinds = analyze::cleanup_kinds(&mir); @@ -180,7 +180,7 @@ pub fn codegen_mir<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( caller_location: None, }; - fx.per_local_var_debug_info = fx.compute_per_local_var_debug_info(&mut bx); + fx.per_local_var_debug_info = fx.compute_per_local_var_debug_info(&mut start_bx); // Evaluate all required consts; codegen later assumes that CTFE will never fail. let mut all_consts_ok = true; @@ -206,29 +206,29 @@ pub fn codegen_mir<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( // Allocate variable and temp allocas fx.locals = { - let args = arg_local_refs(&mut bx, &mut fx, &memory_locals); + let args = arg_local_refs(&mut start_bx, &mut fx, &memory_locals); let mut allocate_local = |local| { let decl = &mir.local_decls[local]; - let layout = bx.layout_of(fx.monomorphize(decl.ty)); + let layout = start_bx.layout_of(fx.monomorphize(decl.ty)); assert!(!layout.ty.has_erasable_regions()); if local == mir::RETURN_PLACE && fx.fn_abi.ret.is_indirect() { debug!("alloc: {:?} (return place) -> place", local); - let llretptr = bx.get_param(0); + let llretptr = start_bx.get_param(0); return LocalRef::Place(PlaceRef::new_sized(llretptr, layout)); } if memory_locals.contains(local) { debug!("alloc: {:?} -> place", local); if layout.is_unsized() { - LocalRef::UnsizedPlace(PlaceRef::alloca_unsized_indirect(&mut bx, layout)) + LocalRef::UnsizedPlace(PlaceRef::alloca_unsized_indirect(&mut start_bx, layout)) } else { - LocalRef::Place(PlaceRef::alloca(&mut bx, layout)) + LocalRef::Place(PlaceRef::alloca(&mut start_bx, layout)) } } else { debug!("alloc: {:?} -> operand", local); - LocalRef::new_operand(&mut bx, layout) + LocalRef::new_operand(&mut start_bx, layout) } }; @@ -240,7 +240,7 @@ pub fn codegen_mir<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( }; // Apply debuginfo to the newly allocated locals. - fx.debug_introduce_locals(&mut bx); + fx.debug_introduce_locals(&mut start_bx); // Codegen the body of each block using reverse postorder for (bb, _) in traversal::reverse_postorder(&mir) { diff --git a/compiler/rustc_codegen_ssa/src/mir/operand.rs b/compiler/rustc_codegen_ssa/src/mir/operand.rs index 37b1e0362..e6ba642a7 100644 --- a/compiler/rustc_codegen_ssa/src/mir/operand.rs +++ b/compiler/rustc_codegen_ssa/src/mir/operand.rs @@ -352,7 +352,7 @@ impl<'a, 'tcx, V: CodegenObject> OperandValue { // Allocate an appropriate region on the stack, and copy the value into it let (llsize, _) = glue::size_and_align_of_dst(bx, unsized_ty, Some(llextra)); - let lldst = bx.array_alloca(bx.cx().type_i8(), llsize, max_align); + let lldst = bx.byte_array_alloca(llsize, max_align); bx.memcpy(lldst, max_align, llptr, min_align, llsize, flags); // Store the allocated region and the extra to the indirect place. diff --git a/compiler/rustc_codegen_ssa/src/mir/place.rs b/compiler/rustc_codegen_ssa/src/mir/place.rs index 13d8f6edd..9c18df564 100644 --- a/compiler/rustc_codegen_ssa/src/mir/place.rs +++ b/compiler/rustc_codegen_ssa/src/mir/place.rs @@ -400,6 +400,21 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> { downcast } + pub fn project_type>( + &self, + bx: &mut Bx, + ty: Ty<'tcx>, + ) -> Self { + let mut downcast = *self; + downcast.layout = bx.cx().layout_of(ty); + + // Cast to the appropriate type. + let variant_ty = bx.cx().backend_type(downcast.layout); + downcast.llval = bx.pointercast(downcast.llval, bx.cx().type_ptr_to(variant_ty)); + + downcast + } + pub fn storage_live>(&self, bx: &mut Bx) { bx.lifetime_start(self.llval, self.layout.size); } @@ -442,6 +457,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { mir::ProjectionElem::Field(ref field, _) => { cg_base.project_field(bx, field.index()) } + mir::ProjectionElem::OpaqueCast(ty) => cg_base.project_type(bx, ty), mir::ProjectionElem::Index(index) => { let index = &mir::Operand::Copy(mir::Place::from(index)); let index = self.codegen_operand(bx, index); diff --git a/compiler/rustc_codegen_ssa/src/mir/rvalue.rs b/compiler/rustc_codegen_ssa/src/mir/rvalue.rs index 56852b0fc..4aab31fbf 100644 --- a/compiler/rustc_codegen_ssa/src/mir/rvalue.rs +++ b/compiler/rustc_codegen_ssa/src/mir/rvalue.rs @@ -4,7 +4,6 @@ use super::{FunctionCx, LocalRef}; use crate::base; use crate::common::{self, IntPredicate}; -use crate::meth::get_vtable; use crate::traits::*; use crate::MemFlags; @@ -250,7 +249,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { OperandValue::Pair(lldata, llextra) } mir::CastKind::Pointer(PointerCast::MutToConstPointer) - | mir::CastKind::Misc + | mir::CastKind::PtrToPtr if bx.cx().is_backend_scalar_pair(operand.layout) => { if let OperandValue::Pair(data_ptr, meta) = operand.val { @@ -273,24 +272,25 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { } } mir::CastKind::DynStar => { - let data = match operand.val { + let (lldata, llextra) = match operand.val { OperandValue::Ref(_, _, _) => todo!(), - OperandValue::Immediate(v) => v, - OperandValue::Pair(_, _) => todo!(), + OperandValue::Immediate(v) => (v, None), + OperandValue::Pair(v, l) => (v, Some(l)), }; - let trait_ref = - if let ty::Dynamic(data, _, ty::DynStar) = cast.ty.kind() { - data.principal() - } else { - bug!("Only valid to do a DynStar cast into a DynStar type") - }; - let vtable = get_vtable(bx.cx(), source.ty(self.mir, bx.tcx()), trait_ref); - OperandValue::Pair(data, vtable) + let (lldata, llextra) = + base::cast_to_dyn_star(&mut bx, lldata, operand.layout, cast.ty, llextra); + OperandValue::Pair(lldata, llextra) } mir::CastKind::Pointer( PointerCast::MutToConstPointer | PointerCast::ArrayToPointer, ) - | mir::CastKind::Misc + | mir::CastKind::IntToInt + | mir::CastKind::FloatToInt + | mir::CastKind::FloatToFloat + | mir::CastKind::IntToFloat + | mir::CastKind::PtrToPtr + | mir::CastKind::FnPtrToPtr + // Since int2ptr can have arbitrary integer types as input (so we have to do // sign extension and all that), it is currently best handled in the same code // path as the other integer-to-X casts. -- cgit v1.2.3