diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-30 03:57:31 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-30 03:57:31 +0000 |
commit | dc0db358abe19481e475e10c32149b53370f1a1c (patch) | |
tree | ab8ce99c4b255ce46f99ef402c27916055b899ee /compiler/rustc_codegen_ssa/src/mir | |
parent | Releasing progress-linux version 1.71.1+dfsg1-2~progress7.99u1. (diff) | |
download | rustc-dc0db358abe19481e475e10c32149b53370f1a1c.tar.xz rustc-dc0db358abe19481e475e10c32149b53370f1a1c.zip |
Merging upstream version 1.72.1+dfsg1.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'compiler/rustc_codegen_ssa/src/mir')
-rw-r--r-- | compiler/rustc_codegen_ssa/src/mir/block.rs | 62 | ||||
-rw-r--r-- | compiler/rustc_codegen_ssa/src/mir/constant.rs | 64 | ||||
-rw-r--r-- | compiler/rustc_codegen_ssa/src/mir/coverageinfo.rs | 41 | ||||
-rw-r--r-- | compiler/rustc_codegen_ssa/src/mir/debuginfo.rs | 23 | ||||
-rw-r--r-- | compiler/rustc_codegen_ssa/src/mir/intrinsic.rs | 65 | ||||
-rw-r--r-- | compiler/rustc_codegen_ssa/src/mir/locals.rs | 75 | ||||
-rw-r--r-- | compiler/rustc_codegen_ssa/src/mir/mod.rs | 61 | ||||
-rw-r--r-- | compiler/rustc_codegen_ssa/src/mir/operand.rs | 122 | ||||
-rw-r--r-- | compiler/rustc_codegen_ssa/src/mir/place.rs | 2 | ||||
-rw-r--r-- | compiler/rustc_codegen_ssa/src/mir/rvalue.rs | 143 | ||||
-rw-r--r-- | compiler/rustc_codegen_ssa/src/mir/statement.rs | 4 |
11 files changed, 408 insertions, 254 deletions
diff --git a/compiler/rustc_codegen_ssa/src/mir/block.rs b/compiler/rustc_codegen_ssa/src/mir/block.rs index 3f0b64b11..9d1b3ce82 100644 --- a/compiler/rustc_codegen_ssa/src/mir/block.rs +++ b/compiler/rustc_codegen_ssa/src/mir/block.rs @@ -1,5 +1,5 @@ use super::operand::OperandRef; -use super::operand::OperandValue::{Immediate, Pair, Ref}; +use super::operand::OperandValue::{Immediate, Pair, Ref, ZeroSized}; use super::place::PlaceRef; use super::{CachedLlbb, FunctionCx, LocalRef}; @@ -79,8 +79,8 @@ impl<'a, 'tcx> TerminatorCodegenHelper<'tcx> { lltarget = fx.landing_pad_for(target); } if is_cleanupret { - // MSVC cross-funclet jump - need a trampoline - debug_assert!(base::wants_msvc_seh(fx.cx.tcx().sess)); + // Cross-funclet jump - need a trampoline + debug_assert!(base::wants_new_eh_instructions(fx.cx.tcx().sess)); debug!("llbb_with_cleanup: creating cleanup trampoline for {:?}", target); let name = &format!("{:?}_cleanup_trampoline_{:?}", self.bb, target); let trampoline_llbb = Bx::append_block(fx.cx, fx.llfn, name); @@ -177,9 +177,16 @@ impl<'a, 'tcx> TerminatorCodegenHelper<'tcx> { mir::UnwindAction::Continue => None, mir::UnwindAction::Unreachable => None, mir::UnwindAction::Terminate => { - if fx.mir[self.bb].is_cleanup && base::wants_msvc_seh(fx.cx.tcx().sess) { - // SEH will abort automatically if an exception tries to + if fx.mir[self.bb].is_cleanup && base::wants_new_eh_instructions(fx.cx.tcx().sess) { + // MSVC SEH will abort automatically if an exception tries to // propagate out from cleanup. + + // FIXME(@mirkootter): For wasm, we currently do not support terminate during + // cleanup, because this requires a few more changes: The current code + // caches the `terminate_block` for each function; funclet based code - however - + // requires a different terminate_block for each funclet + // Until this is implemented, we just do not unwind inside cleanup blocks + None } else { Some(fx.terminate_block()) @@ -427,6 +434,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { assert_eq!(align, op.layout.align.abi, "return place is unaligned!"); llval } + ZeroSized => bug!("ZST return value shouldn't be in PassMode::Cast"), }; let ty = bx.cast_backend_type(cast_ty); let addr = bx.pointercast(llslot, bx.type_ptr_to(ty)); @@ -615,7 +623,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { AssertKind::MisalignedPointerDereference { ref required, ref found } => { let required = self.codegen_operand(bx, required).immediate(); let found = self.codegen_operand(bx, found).immediate(); - // It's `fn panic_bounds_check(index: usize, len: usize)`, + // It's `fn panic_misaligned_pointer_dereference(required: usize, found: usize)`, // and `#[track_caller]` adds an implicit third argument. (LangItem::PanicMisalignedPointerDereference, vec![required, found, location]) } @@ -862,13 +870,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { // promotes any complex rvalues to constants. if i == 2 && intrinsic.as_str().starts_with("simd_shuffle") { if let mir::Operand::Constant(constant) = arg { - let c = self.eval_mir_constant(constant); - let (llval, ty) = self.simd_shuffle_indices( - &bx, - constant.span, - self.monomorphize(constant.ty()), - c, - ); + let (llval, ty) = self.simd_shuffle_indices(&bx, constant); return OperandRef { val: Immediate(llval), layout: bx.layout_of(ty), @@ -1279,7 +1281,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { destination, target, unwind, - from_hir_call: _, + call_source: _, fn_span, } => self.codegen_call_terminator( helper, @@ -1386,6 +1388,16 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { (llval, align, true) } } + ZeroSized => match arg.mode { + PassMode::Indirect { .. } => { + // Though `extern "Rust"` doesn't pass ZSTs, some ABIs pass + // a pointer for `repr(C)` structs even when empty, so get + // one from an `alloca` (which can be left uninitialized). + let scratch = PlaceRef::alloca(bx, arg.layout); + (scratch.llval, scratch.align, true) + } + _ => bug!("ZST {op:?} wasn't ignored, but was passed with abi {arg:?}"), + }, }; if by_ref && !arg.is_indirect() { @@ -1493,9 +1505,10 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { if let Some(slot) = self.personality_slot { slot } else { - let layout = cx.layout_of( - cx.tcx().mk_tup(&[cx.tcx().mk_mut_ptr(cx.tcx().types.u8), cx.tcx().types.i32]), - ); + let layout = cx.layout_of(Ty::new_tup( + cx.tcx(), + &[Ty::new_mut_ptr(cx.tcx(), cx.tcx().types.u8), cx.tcx().types.i32], + )); let slot = PlaceRef::alloca(bx, layout); self.personality_slot = Some(slot); slot @@ -1517,7 +1530,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { // FIXME(eddyb) rename this to `eh_pad_for_uncached`. fn landing_pad_for_uncached(&mut self, bb: mir::BasicBlock) -> Bx::BasicBlock { let llbb = self.llbb(bb); - if base::wants_msvc_seh(self.cx.sess()) { + if base::wants_new_eh_instructions(self.cx.sess()) { let cleanup_bb = Bx::append_block(self.cx, self.llfn, &format!("funclet_{:?}", bb)); let mut cleanup_bx = Bx::build(self.cx, cleanup_bb); let funclet = cleanup_bx.cleanup_pad(None, &[]); @@ -1576,6 +1589,15 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { // } catch (...) { // bar(); // } + // + // which creates an IR snippet like + // + // cs_terminate: + // %cs = catchswitch within none [%cp_terminate] unwind to caller + // cp_terminate: + // %cp = catchpad within %cs [null, i32 64, null] + // ... + llbb = Bx::append_block(self.cx, self.llfn, "cs_terminate"); let cp_llbb = Bx::append_block(self.cx, self.llfn, "cp_terminate"); @@ -1718,7 +1740,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { IndirectOperand(tmp, index) => { let op = bx.load_operand(tmp); tmp.storage_dead(bx); - self.locals[index] = LocalRef::Operand(op); + self.overwrite_local(index, LocalRef::Operand(op)); self.debug_introduce_local(bx, index); } DirectOperand(index) => { @@ -1733,7 +1755,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { } else { OperandRef::from_immediate_or_packed_pair(bx, llval, ret_abi.layout) }; - self.locals[index] = LocalRef::Operand(op); + self.overwrite_local(index, LocalRef::Operand(op)); self.debug_introduce_local(bx, index); } } diff --git a/compiler/rustc_codegen_ssa/src/mir/constant.rs b/compiler/rustc_codegen_ssa/src/mir/constant.rs index 14fe84a14..babcf9bee 100644 --- a/compiler/rustc_codegen_ssa/src/mir/constant.rs +++ b/compiler/rustc_codegen_ssa/src/mir/constant.rs @@ -5,7 +5,6 @@ use rustc_middle::mir; use rustc_middle::mir::interpret::{ConstValue, ErrorHandled}; use rustc_middle::ty::layout::HasTyCtxt; use rustc_middle::ty::{self, Ty}; -use rustc_span::source_map::Span; use rustc_target::abi::Abi; use super::FunctionCx; @@ -59,22 +58,54 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { }) } + /// This is a convenience helper for `simd_shuffle_indices`. It has the precondition + /// that the given `constant` is an `ConstantKind::Unevaluated` and must be convertible to + /// a `ValTree`. If you want a more general version of this, talk to `wg-const-eval` on zulip. + pub fn eval_unevaluated_mir_constant_to_valtree( + &self, + constant: &mir::Constant<'tcx>, + ) -> Result<Option<ty::ValTree<'tcx>>, ErrorHandled> { + let uv = match self.monomorphize(constant.literal) { + mir::ConstantKind::Unevaluated(uv, _) => uv.shrink(), + mir::ConstantKind::Ty(c) => match c.kind() { + // A constant that came from a const generic but was then used as an argument to old-style + // simd_shuffle (passing as argument instead of as a generic param). + rustc_type_ir::ConstKind::Value(valtree) => return Ok(Some(valtree)), + other => span_bug!(constant.span, "{other:#?}"), + }, + // We should never encounter `ConstantKind::Val` unless MIR opts (like const prop) evaluate + // a constant and write that value back into `Operand`s. This could happen, but is unlikely. + // Also: all users of `simd_shuffle` are on unstable and already need to take a lot of care + // around intrinsics. For an issue to happen here, it would require a macro expanding to a + // `simd_shuffle` call without wrapping the constant argument in a `const {}` block, but + // the user pass through arbitrary expressions. + // FIXME(oli-obk): replace the magic const generic argument of `simd_shuffle` with a real + // const generic. + other => span_bug!(constant.span, "{other:#?}"), + }; + let uv = self.monomorphize(uv); + self.cx.tcx().const_eval_resolve_for_typeck( + ty::ParamEnv::reveal_all(), + uv, + Some(constant.span), + ) + } + /// process constant containing SIMD shuffle indices pub fn simd_shuffle_indices( &mut self, bx: &Bx, - span: Span, - ty: Ty<'tcx>, - constant: Result<ConstValue<'tcx>, ErrorHandled>, + constant: &mir::Constant<'tcx>, ) -> (Bx::Value, Ty<'tcx>) { - constant + let ty = self.monomorphize(constant.ty()); + let val = self + .eval_unevaluated_mir_constant_to_valtree(constant) + .ok() + .flatten() .map(|val| { let field_ty = ty.builtin_index().unwrap(); - let c = mir::ConstantKind::from_value(val, ty); - let values: Vec<_> = bx - .tcx() - .destructure_mir_constant(ty::ParamEnv::reveal_all(), c) - .fields + let values: Vec<_> = val + .unwrap_branch() .iter() .map(|field| { if let Some(prim) = field.try_to_scalar() { @@ -88,15 +119,14 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { } }) .collect(); - let llval = bx.const_struct(&values, false); - (llval, c.ty()) + bx.const_struct(&values, false) }) - .unwrap_or_else(|_| { - bx.tcx().sess.emit_err(errors::ShuffleIndicesEvaluation { span }); + .unwrap_or_else(|| { + bx.tcx().sess.emit_err(errors::ShuffleIndicesEvaluation { span: constant.span }); // We've errored, so we don't have to produce working code. - let ty = self.monomorphize(ty); let llty = bx.backend_type(bx.layout_of(ty)); - (bx.const_undef(llty), ty) - }) + bx.const_undef(llty) + }); + (val, ty) } } diff --git a/compiler/rustc_codegen_ssa/src/mir/coverageinfo.rs b/compiler/rustc_codegen_ssa/src/mir/coverageinfo.rs index f1fe49528..ee7046596 100644 --- a/compiler/rustc_codegen_ssa/src/mir/coverageinfo.rs +++ b/compiler/rustc_codegen_ssa/src/mir/coverageinfo.rs @@ -1,13 +1,12 @@ use crate::traits::*; -use rustc_middle::mir::coverage::*; use rustc_middle::mir::Coverage; use rustc_middle::mir::SourceScope; use super::FunctionCx; impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { - pub fn codegen_coverage(&self, bx: &mut Bx, coverage: Coverage, scope: SourceScope) { + pub fn codegen_coverage(&self, bx: &mut Bx, coverage: &Coverage, scope: SourceScope) { // Determine the instance that coverage data was originally generated for. let instance = if let Some(inlined) = scope.inlined_instance(&self.mir.source_scopes) { self.monomorphize(inlined) @@ -15,41 +14,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { self.instance }; - let Coverage { kind, code_region } = coverage; - match kind { - CoverageKind::Counter { function_source_hash, id } => { - if bx.set_function_source_hash(instance, function_source_hash) { - // If `set_function_source_hash()` returned true, the coverage map is enabled, - // so continue adding the counter. - if let Some(code_region) = code_region { - // Note: Some counters do not have code regions, but may still be referenced - // from expressions. In that case, don't add the counter to the coverage map, - // but do inject the counter intrinsic. - bx.add_coverage_counter(instance, id, code_region); - } - - let coverageinfo = bx.tcx().coverageinfo(instance.def); - - let fn_name = bx.get_pgo_func_name_var(instance); - let hash = bx.const_u64(function_source_hash); - let num_counters = bx.const_u32(coverageinfo.num_counters); - let index = bx.const_u32(id.zero_based_index()); - debug!( - "codegen intrinsic instrprof.increment(fn_name={:?}, hash={:?}, num_counters={:?}, index={:?})", - fn_name, hash, num_counters, index, - ); - bx.instrprof_increment(fn_name, hash, num_counters, index); - } - } - CoverageKind::Expression { id, lhs, op, rhs } => { - bx.add_coverage_counter_expression(instance, id, lhs, op, rhs, code_region); - } - CoverageKind::Unreachable => { - bx.add_coverage_unreachable( - instance, - code_region.expect("unreachable regions always have code regions"), - ); - } - } + // Handle the coverage info in a backend-specific way. + bx.add_coverage(instance, coverage); } } diff --git a/compiler/rustc_codegen_ssa/src/mir/debuginfo.rs b/compiler/rustc_codegen_ssa/src/mir/debuginfo.rs index bba2800fb..1ee89b3d5 100644 --- a/compiler/rustc_codegen_ssa/src/mir/debuginfo.rs +++ b/compiler/rustc_codegen_ssa/src/mir/debuginfo.rs @@ -5,6 +5,7 @@ use rustc_middle::mir; use rustc_middle::ty; use rustc_middle::ty::layout::TyAndLayout; use rustc_middle::ty::layout::{HasTyCtxt, LayoutOf}; +use rustc_middle::ty::Ty; use rustc_session::config::DebugInfo; use rustc_span::symbol::{kw, Symbol}; use rustc_span::{BytePos, Span}; @@ -248,7 +249,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { } fn spill_operand_to_stack( - operand: &OperandRef<'tcx, Bx::Value>, + operand: OperandRef<'tcx, Bx::Value>, name: Option<String>, bx: &mut Bx, ) -> PlaceRef<'tcx, Bx::Value> { @@ -352,6 +353,9 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { bx.set_var_name(a, &(name.clone() + ".0")); bx.set_var_name(b, &(name.clone() + ".1")); } + OperandValue::ZeroSized => { + // These never have a value to talk about + } }, LocalRef::PendingOperand => {} } @@ -372,7 +376,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { return; } - Self::spill_operand_to_stack(operand, name, bx) + Self::spill_operand_to_stack(*operand, name, bx) } LocalRef::Place(place) => *place, @@ -418,9 +422,10 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { let create_alloca = |bx: &mut Bx, place: PlaceRef<'tcx, Bx::Value>, refcount| { // Create a variable which will be a pointer to the actual value - let ptr_ty = bx - .tcx() - .mk_ptr(ty::TypeAndMut { mutbl: mir::Mutability::Mut, ty: place.layout.ty }); + let ptr_ty = Ty::new_ptr( + bx.tcx(), + ty::TypeAndMut { mutbl: mir::Mutability::Mut, ty: place.layout.ty }, + ); let ptr_layout = bx.layout_of(ptr_ty); let alloca = PlaceRef::alloca(bx, ptr_layout); bx.set_var_name(alloca.llval, &format!("{}.ref{}.dbg.spill", var.name, refcount)); @@ -522,8 +527,10 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { }; for _ in 0..var.references { - var_ty = - bx.tcx().mk_ptr(ty::TypeAndMut { mutbl: mir::Mutability::Mut, ty: var_ty }); + var_ty = Ty::new_ptr( + bx.tcx(), + ty::TypeAndMut { mutbl: mir::Mutability::Mut, ty: var_ty }, + ); } self.cx.create_dbg_var(var.name, var_ty, dbg_scope, var_kind, span) @@ -547,7 +554,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { if let Ok(operand) = self.eval_mir_constant_to_operand(bx, &c) { self.set_debug_loc(bx, var.source_info); let base = Self::spill_operand_to_stack( - &operand, + operand, Some(var.name.to_string()), bx, ); diff --git a/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs b/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs index 1479242f2..8a65dd593 100644 --- a/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs +++ b/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs @@ -211,68 +211,15 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { args[1].val.unaligned_volatile_store(bx, dst); return; } - | sym::unchecked_div - | sym::unchecked_rem - | sym::unchecked_shl - | sym::unchecked_shr - | sym::unchecked_add - | sym::unchecked_sub - | sym::unchecked_mul - | sym::exact_div => { + sym::exact_div => { let ty = arg_tys[0]; match int_type_width_signed(ty, bx.tcx()) { - Some((_width, signed)) => match name { - sym::exact_div => { - if signed { - bx.exactsdiv(args[0].immediate(), args[1].immediate()) - } else { - bx.exactudiv(args[0].immediate(), args[1].immediate()) - } - } - sym::unchecked_div => { - if signed { - bx.sdiv(args[0].immediate(), args[1].immediate()) - } else { - bx.udiv(args[0].immediate(), args[1].immediate()) - } - } - sym::unchecked_rem => { - if signed { - bx.srem(args[0].immediate(), args[1].immediate()) - } else { - bx.urem(args[0].immediate(), args[1].immediate()) - } - } - sym::unchecked_shl => bx.shl(args[0].immediate(), args[1].immediate()), - sym::unchecked_shr => { - if signed { - bx.ashr(args[0].immediate(), args[1].immediate()) - } else { - bx.lshr(args[0].immediate(), args[1].immediate()) - } - } - sym::unchecked_add => { - if signed { - bx.unchecked_sadd(args[0].immediate(), args[1].immediate()) - } else { - bx.unchecked_uadd(args[0].immediate(), args[1].immediate()) - } - } - sym::unchecked_sub => { - if signed { - bx.unchecked_ssub(args[0].immediate(), args[1].immediate()) - } else { - bx.unchecked_usub(args[0].immediate(), args[1].immediate()) - } - } - sym::unchecked_mul => { - if signed { - bx.unchecked_smul(args[0].immediate(), args[1].immediate()) - } else { - bx.unchecked_umul(args[0].immediate(), args[1].immediate()) - } + Some((_width, signed)) => { + if signed { + bx.exactsdiv(args[0].immediate(), args[1].immediate()) + } else { + bx.exactudiv(args[0].immediate(), args[1].immediate()) } - _ => bug!(), }, None => { bx.tcx().sess.emit_err(InvalidMonomorphization::BasicIntegerType { span, name, ty }); diff --git a/compiler/rustc_codegen_ssa/src/mir/locals.rs b/compiler/rustc_codegen_ssa/src/mir/locals.rs new file mode 100644 index 000000000..378c54013 --- /dev/null +++ b/compiler/rustc_codegen_ssa/src/mir/locals.rs @@ -0,0 +1,75 @@ +//! Locals are in a private module as updating `LocalRef::Operand` has to +//! be careful wrt to subtyping. To deal with this we only allow updates by using +//! `FunctionCx::overwrite_local` which handles it automatically. +use crate::mir::{FunctionCx, LocalRef}; +use crate::traits::BuilderMethods; +use rustc_index::IndexVec; +use rustc_middle::mir; +use rustc_middle::ty::print::with_no_trimmed_paths; +use std::ops::{Index, IndexMut}; +pub(super) struct Locals<'tcx, V> { + values: IndexVec<mir::Local, LocalRef<'tcx, V>>, +} + +impl<'tcx, V> Index<mir::Local> for Locals<'tcx, V> { + type Output = LocalRef<'tcx, V>; + #[inline] + fn index(&self, index: mir::Local) -> &LocalRef<'tcx, V> { + &self.values[index] + } +} + +/// To mutate locals, use `FunctionCx::overwrite_local` instead. +impl<'tcx, V, Idx: ?Sized> !IndexMut<Idx> for Locals<'tcx, V> {} + +impl<'tcx, V> Locals<'tcx, V> { + pub(super) fn empty() -> Locals<'tcx, V> { + Locals { values: IndexVec::default() } + } + + pub(super) fn indices(&self) -> impl DoubleEndedIterator<Item = mir::Local> + Clone + 'tcx { + self.values.indices() + } +} + +impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { + pub(super) fn initialize_locals(&mut self, values: Vec<LocalRef<'tcx, Bx::Value>>) { + assert!(self.locals.values.is_empty()); + // FIXME(#115215): After #115025 get's merged this might not be necessary + for (local, value) in values.into_iter().enumerate() { + match value { + LocalRef::Place(_) | LocalRef::UnsizedPlace(_) | LocalRef::PendingOperand => (), + LocalRef::Operand(op) => { + let local = mir::Local::from_usize(local); + let expected_ty = self.monomorphize(self.mir.local_decls[local].ty); + if expected_ty != op.layout.ty { + warn!("Unexpected initial operand type. See the issues/114858"); + } + } + } + self.locals.values.push(value); + } + } + + pub(super) fn overwrite_local( + &mut self, + local: mir::Local, + mut value: LocalRef<'tcx, Bx::Value>, + ) { + match value { + LocalRef::Place(_) | LocalRef::UnsizedPlace(_) | LocalRef::PendingOperand => (), + LocalRef::Operand(ref mut op) => { + let local_ty = self.monomorphize(self.mir.local_decls[local].ty); + if local_ty != op.layout.ty { + // FIXME(#112651): This can be changed to an ICE afterwards. + debug!("updating type of operand due to subtyping"); + with_no_trimmed_paths!(debug!(?op.layout.ty)); + with_no_trimmed_paths!(debug!(?local_ty)); + op.layout.ty = local_ty; + } + } + }; + + self.locals.values[local] = value; + } +} diff --git a/compiler/rustc_codegen_ssa/src/mir/mod.rs b/compiler/rustc_codegen_ssa/src/mir/mod.rs index 1204c99e5..9ff6a2497 100644 --- a/compiler/rustc_codegen_ssa/src/mir/mod.rs +++ b/compiler/rustc_codegen_ssa/src/mir/mod.rs @@ -1,21 +1,31 @@ use crate::base; use crate::traits::*; +use rustc_index::bit_set::BitSet; +use rustc_index::IndexVec; use rustc_middle::mir; use rustc_middle::mir::interpret::ErrorHandled; +use rustc_middle::mir::traversal; use rustc_middle::ty::layout::{FnAbiOf, HasTyCtxt, TyAndLayout}; use rustc_middle::ty::{self, Instance, Ty, TyCtxt, TypeFoldable, TypeVisitableExt}; use rustc_target::abi::call::{FnAbi, PassMode}; use std::iter; -use rustc_index::bit_set::BitSet; -use rustc_index::IndexVec; +mod analyze; +mod block; +pub mod constant; +pub mod coverageinfo; +pub mod debuginfo; +mod intrinsic; +mod locals; +pub mod operand; +pub mod place; +mod rvalue; +mod statement; use self::debuginfo::{FunctionDebugContext, PerLocalVarDebugInfo}; -use self::place::PlaceRef; -use rustc_middle::mir::traversal; - use self::operand::{OperandRef, OperandValue}; +use self::place::PlaceRef; // Used for tracking the state of generated basic blocks. enum CachedLlbb<T> { @@ -91,7 +101,7 @@ pub struct FunctionCx<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> { /// /// Avoiding allocs can also be important for certain intrinsics, /// notably `expect`. - locals: IndexVec<mir::Local, LocalRef<'tcx, Bx::Value>>, + locals: locals::Locals<'tcx, Bx::Value>, /// All `VarDebugInfo` from the MIR body, partitioned by `Local`. /// This is `None` if no var`#[non_exhaustive]`iable debuginfo/names are needed. @@ -111,7 +121,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { self.instance.subst_mir_and_normalize_erasing_regions( self.cx.tcx(), ty::ParamEnv::reveal_all(), - ty::EarlyBinder(value), + ty::EarlyBinder::bind(value), ) } } @@ -129,16 +139,13 @@ enum LocalRef<'tcx, V> { PendingOperand, } -impl<'a, 'tcx, V: CodegenObject> LocalRef<'tcx, V> { - fn new_operand<Bx: BuilderMethods<'a, 'tcx, Value = V>>( - bx: &mut Bx, - layout: TyAndLayout<'tcx>, - ) -> LocalRef<'tcx, V> { +impl<'tcx, V: CodegenObject> LocalRef<'tcx, V> { + fn new_operand(layout: TyAndLayout<'tcx>) -> LocalRef<'tcx, V> { if layout.is_zst() { // Zero-size temporaries aren't always initialized, which // doesn't matter because they don't contain data, but // we need something in the operand. - LocalRef::Operand(OperandRef::new_zst(bx, layout)) + LocalRef::Operand(OperandRef::zero_sized(layout)) } else { LocalRef::PendingOperand } @@ -172,7 +179,8 @@ pub fn codegen_mir<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( start_bx.set_personality_fn(cx.eh_personality()); } - let cleanup_kinds = base::wants_msvc_seh(cx.tcx().sess).then(|| analyze::cleanup_kinds(&mir)); + let cleanup_kinds = + base::wants_new_eh_instructions(cx.tcx().sess).then(|| analyze::cleanup_kinds(&mir)); let cached_llbbs: IndexVec<mir::BasicBlock, CachedLlbb<Bx::BasicBlock>> = mir.basic_blocks @@ -195,7 +203,7 @@ pub fn codegen_mir<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( cleanup_kinds, landing_pads: IndexVec::from_elem(None, &mir.basic_blocks), funclets: IndexVec::from_fn_n(|_| None, mir.basic_blocks.len()), - locals: IndexVec::new(), + locals: locals::Locals::empty(), debug_context, per_local_var_debug_info: None, caller_location: None, @@ -226,7 +234,7 @@ pub fn codegen_mir<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( let memory_locals = analyze::non_ssa_locals(&fx); // Allocate variable and temp allocas - fx.locals = { + let local_values = { let args = arg_local_refs(&mut start_bx, &mut fx, &memory_locals); let mut allocate_local = |local| { @@ -249,7 +257,7 @@ pub fn codegen_mir<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( } } else { debug!("alloc: {:?} -> operand", local); - LocalRef::new_operand(&mut start_bx, layout) + LocalRef::new_operand(layout) } }; @@ -259,6 +267,7 @@ pub fn codegen_mir<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( .chain(mir.vars_and_temps_iter().map(allocate_local)) .collect() }; + fx.initialize_locals(local_values); // Apply debuginfo to the newly allocated locals. fx.debug_introduce_locals(&mut start_bx); @@ -292,14 +301,13 @@ fn arg_local_refs<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( .enumerate() .map(|(arg_index, local)| { let arg_decl = &mir.local_decls[local]; + let arg_ty = fx.monomorphize(arg_decl.ty); if Some(local) == mir.spread_arg { // This argument (e.g., the last argument in the "rust-call" ABI) // is a tuple that was spread at the ABI level and now we have // to reconstruct it into a tuple local variable, from multiple // individual LLVM function arguments. - - let arg_ty = fx.monomorphize(arg_decl.ty); let ty::Tuple(tupled_arg_tys) = arg_ty.kind() else { bug!("spread argument isn't a tuple?!"); }; @@ -334,8 +342,6 @@ fn arg_local_refs<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( } if fx.fn_abi.c_variadic && arg_index == fx.fn_abi.args.len() { - let arg_ty = fx.monomorphize(arg_decl.ty); - let va_list = PlaceRef::alloca(bx, bx.layout_of(arg_ty)); bx.va_start(va_list.llval); @@ -355,7 +361,7 @@ fn arg_local_refs<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( let local = |op| LocalRef::Operand(op); match arg.mode { PassMode::Ignore => { - return local(OperandRef::new_zst(bx, arg.layout)); + return local(OperandRef::zero_sized(arg.layout)); } PassMode::Direct(_) => { let llarg = bx.get_param(llarg_idx); @@ -432,14 +438,3 @@ fn arg_local_refs<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( args } - -mod analyze; -mod block; -pub mod constant; -pub mod coverageinfo; -pub mod debuginfo; -mod intrinsic; -pub mod operand; -pub mod place; -mod rvalue; -mod statement; diff --git a/compiler/rustc_codegen_ssa/src/mir/operand.rs b/compiler/rustc_codegen_ssa/src/mir/operand.rs index 2301c3ef1..31c293d7c 100644 --- a/compiler/rustc_codegen_ssa/src/mir/operand.rs +++ b/compiler/rustc_codegen_ssa/src/mir/operand.rs @@ -8,10 +8,10 @@ use crate::traits::*; use crate::MemFlags; use rustc_middle::mir; -use rustc_middle::mir::interpret::{ConstValue, Pointer, Scalar}; +use rustc_middle::mir::interpret::{alloc_range, ConstValue, Pointer, Scalar}; use rustc_middle::ty::layout::{LayoutOf, TyAndLayout}; use rustc_middle::ty::Ty; -use rustc_target::abi::{Abi, Align, Size}; +use rustc_target::abi::{self, Abi, Align, Size}; use std::fmt; @@ -45,6 +45,14 @@ pub enum OperandValue<V> { /// as returned by [`LayoutTypeMethods::scalar_pair_element_backend_type`] /// with `immediate: true`. Pair(V, V), + /// A value taking no bytes, and which therefore needs no LLVM value at all. + /// + /// If you ever need a `V` to pass to something, get a fresh poison value + /// from [`ConstMethods::const_poison`]. + /// + /// An `OperandValue` *must* be this variant for any type for which + /// `is_zst` on its `Layout` returns `true`. + ZeroSized, } /// An `OperandRef` is an "SSA" reference to a Rust value, along with @@ -71,15 +79,9 @@ impl<V: CodegenObject> fmt::Debug for OperandRef<'_, V> { } impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> { - pub fn new_zst<Bx: BuilderMethods<'a, 'tcx, Value = V>>( - bx: &mut Bx, - layout: TyAndLayout<'tcx>, - ) -> OperandRef<'tcx, V> { + pub fn zero_sized(layout: TyAndLayout<'tcx>) -> OperandRef<'tcx, V> { assert!(layout.is_zst()); - OperandRef { - val: OperandValue::Immediate(bx.const_poison(bx.immediate_backend_type(layout))), - layout, - } + OperandRef { val: OperandValue::ZeroSized, layout } } pub fn from_const<Bx: BuilderMethods<'a, 'tcx, Value = V>>( @@ -97,7 +99,7 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> { let llval = bx.scalar_to_backend(x, scalar, bx.immediate_backend_type(layout)); OperandValue::Immediate(llval) } - ConstValue::ZeroSized => return OperandRef::new_zst(bx, layout), + ConstValue::ZeroSized => return OperandRef::zero_sized(layout), ConstValue::Slice { data, start, end } => { let Abi::ScalarPair(a_scalar, _) = layout.abi else { bug!("from_const: invalid ScalarPair layout: {:#?}", layout); @@ -115,13 +117,82 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> { OperandValue::Pair(a_llval, b_llval) } ConstValue::ByRef { alloc, offset } => { - return bx.load_operand(bx.from_const_alloc(layout, alloc, offset)); + return Self::from_const_alloc(bx, layout, alloc, offset); } }; OperandRef { val, layout } } + fn from_const_alloc<Bx: BuilderMethods<'a, 'tcx, Value = V>>( + bx: &mut Bx, + layout: TyAndLayout<'tcx>, + alloc: rustc_middle::mir::interpret::ConstAllocation<'tcx>, + offset: Size, + ) -> Self { + let alloc_align = alloc.inner().align; + assert_eq!(alloc_align, layout.align.abi); + let ty = bx.type_ptr_to(bx.cx().backend_type(layout)); + + let read_scalar = |start, size, s: abi::Scalar, ty| { + let val = alloc + .0 + .read_scalar( + bx, + alloc_range(start, size), + /*read_provenance*/ matches!(s.primitive(), abi::Pointer(_)), + ) + .unwrap(); + bx.scalar_to_backend(val, s, ty) + }; + + // It may seem like all types with `Scalar` or `ScalarPair` ABI are fair game at this point. + // However, `MaybeUninit<u64>` is considered a `Scalar` as far as its layout is concerned -- + // and yet cannot be represented by an interpreter `Scalar`, since we have to handle the + // case where some of the bytes are initialized and others are not. So, we need an extra + // check that walks over the type of `mplace` to make sure it is truly correct to treat this + // like a `Scalar` (or `ScalarPair`). + match layout.abi { + Abi::Scalar(s @ abi::Scalar::Initialized { .. }) => { + let size = s.size(bx); + assert_eq!(size, layout.size, "abi::Scalar size does not match layout size"); + let val = read_scalar(Size::ZERO, size, s, ty); + OperandRef { val: OperandValue::Immediate(val), layout } + } + Abi::ScalarPair( + a @ abi::Scalar::Initialized { .. }, + b @ abi::Scalar::Initialized { .. }, + ) => { + let (a_size, b_size) = (a.size(bx), b.size(bx)); + let b_offset = a_size.align_to(b.align(bx).abi); + assert!(b_offset.bytes() > 0); + let a_val = read_scalar( + Size::ZERO, + a_size, + a, + bx.scalar_pair_element_backend_type(layout, 0, true), + ); + let b_val = read_scalar( + b_offset, + b_size, + b, + bx.scalar_pair_element_backend_type(layout, 1, true), + ); + OperandRef { val: OperandValue::Pair(a_val, b_val), layout } + } + _ if layout.is_zst() => OperandRef::zero_sized(layout), + _ => { + // Neither a scalar nor scalar pair. Load from a place + let init = bx.const_data_from_alloc(alloc); + let base_addr = bx.static_addr_of(init, alloc_align, None); + + let llval = bx.const_ptr_byte_offset(base_addr, offset); + let llval = bx.const_bitcast(llval, ty); + bx.load_operand(PlaceRef::new_sized(llval, layout)) + } + } + } + /// Asserts that this operand refers to a scalar and returns /// a reference to its value. pub fn immediate(self) -> V { @@ -147,6 +218,7 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> { OperandValue::Immediate(llptr) => (llptr, None), OperandValue::Pair(llptr, llextra) => (llptr, Some(llextra)), OperandValue::Ref(..) => bug!("Deref of by-Ref operand {:?}", self), + OperandValue::ZeroSized => bug!("Deref of ZST operand {:?}", self), }; let layout = cx.layout_of(projected_ty); PlaceRef { llval: llptr, llextra, layout, align: layout.align.abi } @@ -204,9 +276,7 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> { let mut val = match (self.val, self.layout.abi) { // If the field is ZST, it has no data. - _ if field.is_zst() => { - return OperandRef::new_zst(bx, field); - } + _ if field.is_zst() => OperandValue::ZeroSized, // Newtype of a scalar, scalar pair or vector. (OperandValue::Immediate(_) | OperandValue::Pair(..), _) @@ -237,6 +307,7 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> { }; match (&mut val, field.abi) { + (OperandValue::ZeroSized, _) => {} ( OperandValue::Immediate(llval), Abi::Scalar(_) | Abi::ScalarPair(..) | Abi::Vector { .. }, @@ -290,8 +361,8 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> { impl<'a, 'tcx, V: CodegenObject> OperandValue<V> { /// Returns an `OperandValue` that's generally UB to use in any way. /// - /// Depending on the `layout`, returns an `Immediate` or `Pair` containing - /// poison value(s), or a `Ref` containing a poison pointer. + /// Depending on the `layout`, returns `ZeroSized` for ZSTs, an `Immediate` or + /// `Pair` containing poison value(s), or a `Ref` containing a poison pointer. /// /// Supports sized types only. pub fn poison<Bx: BuilderMethods<'a, 'tcx, Value = V>>( @@ -299,7 +370,9 @@ impl<'a, 'tcx, V: CodegenObject> OperandValue<V> { layout: TyAndLayout<'tcx>, ) -> OperandValue<V> { assert!(layout.is_sized()); - if bx.cx().is_backend_immediate(layout) { + if layout.is_zst() { + OperandValue::ZeroSized + } else if bx.cx().is_backend_immediate(layout) { let ibty = bx.cx().immediate_backend_type(layout); OperandValue::Immediate(bx.const_poison(ibty)) } else if bx.cx().is_backend_scalar_pair(layout) { @@ -352,12 +425,11 @@ impl<'a, 'tcx, V: CodegenObject> OperandValue<V> { flags: MemFlags, ) { debug!("OperandRef::store: operand={:?}, dest={:?}", self, dest); - // Avoid generating stores of zero-sized values, because the only way to have a zero-sized - // value is through `undef`, and store itself is useless. - if dest.layout.is_zst() { - return; - } match self { + OperandValue::ZeroSized => { + // Avoid generating stores of zero-sized values, because the only way to have a zero-sized + // value is through `undef`/`poison`, and the store itself is useless. + } OperandValue::Ref(r, None, source_align) => { if flags.contains(MemFlags::NONTEMPORAL) { // HACK(nox): This is inefficient but there is no nontemporal memcpy. @@ -458,7 +530,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { // checks in `codegen_consume` and `extract_field`. let elem = o.layout.field(bx.cx(), 0); if elem.is_zst() { - o = OperandRef::new_zst(bx, elem); + o = OperandRef::zero_sized(elem); } else { return None; } @@ -492,7 +564,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { // ZSTs don't require any actual memory access. if layout.is_zst() { - return OperandRef::new_zst(bx, layout); + return OperandRef::zero_sized(layout); } if let Some(o) = self.maybe_codegen_consume_direct(bx, place_ref) { diff --git a/compiler/rustc_codegen_ssa/src/mir/place.rs b/compiler/rustc_codegen_ssa/src/mir/place.rs index a58a61cd5..ab493ae5c 100644 --- a/compiler/rustc_codegen_ssa/src/mir/place.rs +++ b/compiler/rustc_codegen_ssa/src/mir/place.rs @@ -61,7 +61,7 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> { layout: TyAndLayout<'tcx>, ) -> Self { assert!(layout.is_unsized(), "tried to allocate indirect place for sized values"); - let ptr_ty = bx.cx().tcx().mk_mut_ptr(layout.ty); + let ptr_ty = Ty::new_mut_ptr(bx.cx().tcx(), layout.ty); let ptr_layout = bx.cx().layout_of(ptr_ty); Self::alloca(bx, ptr_layout) } diff --git a/compiler/rustc_codegen_ssa/src/mir/rvalue.rs b/compiler/rustc_codegen_ssa/src/mir/rvalue.rs index 6e7065713..956f03d25 100644 --- a/compiler/rustc_codegen_ssa/src/mir/rvalue.rs +++ b/compiler/rustc_codegen_ssa/src/mir/rvalue.rs @@ -11,7 +11,7 @@ use rustc_middle::mir; use rustc_middle::mir::Operand; use rustc_middle::ty::cast::{CastTy, IntTy}; use rustc_middle::ty::layout::{HasTyCtxt, LayoutOf, TyAndLayout}; -use rustc_middle::ty::{self, adjustment::PointerCast, Instance, Ty, TyCtxt}; +use rustc_middle::ty::{self, adjustment::PointerCoercion, Instance, Ty, TyCtxt}; use rustc_session::config::OptLevel; use rustc_span::source_map::{Span, DUMMY_SP}; use rustc_target::abi::{self, FIRST_VARIANT}; @@ -32,7 +32,11 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { cg_operand.val.store(bx, dest); } - mir::Rvalue::Cast(mir::CastKind::Pointer(PointerCast::Unsize), ref source, _) => { + mir::Rvalue::Cast( + mir::CastKind::PointerCoercion(PointerCoercion::Unsize), + ref source, + _, + ) => { // The destination necessarily contains a fat pointer, so if // it's a scalar pair, it's a fat pointer or newtype thereof. if bx.cx().is_backend_scalar_pair(dest.layout) { @@ -70,6 +74,9 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { OperandValue::Ref(_, Some(_), _) => { bug!("unsized coercion on an unsized rvalue"); } + OperandValue::ZeroSized => { + bug!("unsized coercion on a ZST rvalue"); + } } } @@ -165,11 +172,11 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { } match src.val { - OperandValue::Ref(..) => { + OperandValue::Ref(..) | OperandValue::ZeroSized => { span_bug!( self.mir.span, "Operand path should have handled transmute \ - from `Ref` {src:?} to place {dst:?}" + from {src:?} to place {dst:?}" ); } OperandValue::Immediate(..) | OperandValue::Pair(..) => { @@ -220,17 +227,22 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { let fake_place = PlaceRef::new_sized_aligned(cast_ptr, cast, align); Some(bx.load_operand(fake_place).val) } + OperandValue::ZeroSized => { + let OperandValueKind::ZeroSized = operand_kind else { + bug!("Found {operand_kind:?} for operand {operand:?}"); + }; + if let OperandValueKind::ZeroSized = cast_kind { + Some(OperandValue::ZeroSized) + } else { + None + } + } OperandValue::Immediate(imm) => { let OperandValueKind::Immediate(in_scalar) = operand_kind else { bug!("Found {operand_kind:?} for operand {operand:?}"); }; - if let OperandValueKind::Immediate(out_scalar) = cast_kind { - match (in_scalar, out_scalar) { - (ScalarOrZst::Zst, ScalarOrZst::Zst) => { - Some(OperandRef::new_zst(bx, cast).val) - } - (ScalarOrZst::Scalar(in_scalar), ScalarOrZst::Scalar(out_scalar)) - if in_scalar.size(self.cx) == out_scalar.size(self.cx) => + if let OperandValueKind::Immediate(out_scalar) = cast_kind + && in_scalar.size(self.cx) == out_scalar.size(self.cx) { let operand_bty = bx.backend_type(operand.layout); let cast_bty = bx.backend_type(cast); @@ -242,9 +254,6 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { out_scalar, cast_bty, ))) - } - _ => None, - } } else { None } @@ -406,7 +415,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { let lladdr = bx.ptrtoint(llptr, llcast_ty); OperandValue::Immediate(lladdr) } - mir::CastKind::Pointer(PointerCast::ReifyFnPointer) => { + mir::CastKind::PointerCoercion(PointerCoercion::ReifyFnPointer) => { match *operand.layout.ty.kind() { ty::FnDef(def_id, substs) => { let instance = ty::Instance::resolve_for_fn_ptr( @@ -422,7 +431,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { _ => bug!("{} cannot be reified to a fn ptr", operand.layout.ty), } } - mir::CastKind::Pointer(PointerCast::ClosureFnPointer(_)) => { + mir::CastKind::PointerCoercion(PointerCoercion::ClosureFnPointer(_)) => { match *operand.layout.ty.kind() { ty::Closure(def_id, substs) => { let instance = Instance::resolve_closure( @@ -438,11 +447,11 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { _ => bug!("{} cannot be cast to a fn ptr", operand.layout.ty), } } - mir::CastKind::Pointer(PointerCast::UnsafeFnPointer) => { + mir::CastKind::PointerCoercion(PointerCoercion::UnsafeFnPointer) => { // This is a no-op at the LLVM level. operand.val } - mir::CastKind::Pointer(PointerCast::Unsize) => { + mir::CastKind::PointerCoercion(PointerCoercion::Unsize) => { assert!(bx.cx().is_backend_scalar_pair(cast)); let (lldata, llextra) = match operand.val { OperandValue::Pair(lldata, llextra) => { @@ -457,12 +466,15 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { OperandValue::Ref(..) => { bug!("by-ref operand {:?} in `codegen_rvalue_operand`", operand); } + OperandValue::ZeroSized => { + bug!("zero-sized operand {:?} in `codegen_rvalue_operand`", operand); + } }; let (lldata, llextra) = base::unsize_ptr(bx, lldata, operand.layout.ty, cast.ty, llextra); OperandValue::Pair(lldata, llextra) } - mir::CastKind::Pointer(PointerCast::MutToConstPointer) + mir::CastKind::PointerCoercion(PointerCoercion::MutToConstPointer) | mir::CastKind::PtrToPtr if bx.cx().is_backend_scalar_pair(operand.layout) => { @@ -490,13 +502,14 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { OperandValue::Ref(_, _, _) => todo!(), OperandValue::Immediate(v) => (v, None), OperandValue::Pair(v, l) => (v, Some(l)), + OperandValue::ZeroSized => bug!("ZST -- which is not PointerLike -- in DynStar"), }; let (lldata, llextra) = base::cast_to_dyn_star(bx, lldata, operand.layout, cast.ty, llextra); OperandValue::Pair(lldata, llextra) } - mir::CastKind::Pointer( - PointerCast::MutToConstPointer | PointerCast::ArrayToPointer, + mir::CastKind::PointerCoercion( + PointerCoercion::MutToConstPointer | PointerCoercion::ArrayToPointer, ) | mir::CastKind::IntToInt | mir::CastKind::FloatToInt @@ -572,7 +585,8 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { mir::Rvalue::Ref(_, bk, place) => { let mk_ref = move |tcx: TyCtxt<'tcx>, ty: Ty<'tcx>| { - tcx.mk_ref( + Ty::new_ref( + tcx, tcx.lifetimes.re_erased, ty::TypeAndMut { ty, mutbl: bk.to_mutbl_lossy() }, ) @@ -583,7 +597,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { mir::Rvalue::CopyForDeref(place) => self.codegen_operand(bx, &Operand::Copy(place)), mir::Rvalue::AddressOf(mutability, place) => { let mk_ptr = move |tcx: TyCtxt<'tcx>, ty: Ty<'tcx>| { - tcx.mk_ptr(ty::TypeAndMut { ty, mutbl: mutability }) + Ty::new_ptr(tcx, ty::TypeAndMut { ty, mutbl: mutability }) }; self.codegen_place_to_pointer(bx, place, mk_ptr) } @@ -635,7 +649,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { lhs.layout.ty, ); let val_ty = op.ty(bx.tcx(), lhs.layout.ty, rhs.layout.ty); - let operand_ty = bx.tcx().mk_tup(&[val_ty, bx.tcx().types.bool]); + let operand_ty = Ty::new_tup(bx.tcx(), &[val_ty, bx.tcx().types.bool]); OperandRef { val: result, layout: bx.cx().layout_of(operand_ty) } } @@ -668,11 +682,16 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { mir::Rvalue::NullaryOp(ref null_op, ty) => { let ty = self.monomorphize(ty); - assert!(bx.cx().type_is_sized(ty)); let layout = bx.cx().layout_of(ty); let val = match null_op { - mir::NullOp::SizeOf => layout.size.bytes(), - mir::NullOp::AlignOf => layout.align.abi.bytes(), + mir::NullOp::SizeOf => { + assert!(bx.cx().type_is_sized(ty)); + layout.size.bytes() + } + mir::NullOp::AlignOf => { + assert!(bx.cx().type_is_sized(ty)); + layout.align.abi.bytes() + } mir::NullOp::OffsetOf(fields) => { layout.offset_of_subfield(bx.cx(), fields.iter().map(|f| f.index())).bytes() } @@ -713,14 +732,14 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { // According to `rvalue_creates_operand`, only ZST // aggregate rvalues are allowed to be operands. let ty = rvalue.ty(self.mir, self.cx.tcx()); - OperandRef::new_zst(bx, self.cx.layout_of(self.monomorphize(ty))) + OperandRef::zero_sized(self.cx.layout_of(self.monomorphize(ty))) } mir::Rvalue::ShallowInitBox(ref operand, content_ty) => { let operand = self.codegen_operand(bx, operand); let lloperand = operand.immediate(); let content_ty = self.monomorphize(content_ty); - let box_layout = bx.cx().layout_of(bx.tcx().mk_box(content_ty)); + let box_layout = bx.cx().layout_of(Ty::new_box(bx.tcx(), content_ty)); let llty_ptr = bx.cx().backend_type(box_layout); let val = bx.pointercast(lloperand, llty_ptr); @@ -784,6 +803,13 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { bx.add(lhs, rhs) } } + mir::BinOp::AddUnchecked => { + if is_signed { + bx.unchecked_sadd(lhs, rhs) + } else { + bx.unchecked_uadd(lhs, rhs) + } + } mir::BinOp::Sub => { if is_float { bx.fsub(lhs, rhs) @@ -791,6 +817,13 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { bx.sub(lhs, rhs) } } + mir::BinOp::SubUnchecked => { + if is_signed { + bx.unchecked_ssub(lhs, rhs) + } else { + bx.unchecked_usub(lhs, rhs) + } + } mir::BinOp::Mul => { if is_float { bx.fmul(lhs, rhs) @@ -798,6 +831,13 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { bx.mul(lhs, rhs) } } + mir::BinOp::MulUnchecked => { + if is_signed { + bx.unchecked_smul(lhs, rhs) + } else { + bx.unchecked_umul(lhs, rhs) + } + } mir::BinOp::Div => { if is_float { bx.fdiv(lhs, rhs) @@ -834,8 +874,16 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { bx.inbounds_gep(llty, lhs, &[rhs]) } } - mir::BinOp::Shl => common::build_unchecked_lshift(bx, lhs, rhs), - mir::BinOp::Shr => common::build_unchecked_rshift(bx, input_ty, lhs, rhs), + mir::BinOp::Shl => common::build_masked_lshift(bx, lhs, rhs), + mir::BinOp::ShlUnchecked => { + let rhs = base::cast_shift_expr_rhs(bx, lhs, rhs); + bx.shl(lhs, rhs) + } + mir::BinOp::Shr => common::build_masked_rshift(bx, input_ty, lhs, rhs), + mir::BinOp::ShrUnchecked => { + let rhs = base::cast_shift_expr_rhs(bx, lhs, rhs); + if is_signed { bx.ashr(lhs, rhs) } else { bx.lshr(lhs, rhs) } + } mir::BinOp::Ne | mir::BinOp::Lt | mir::BinOp::Gt @@ -931,6 +979,12 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { // Can always load from a pointer as needed (OperandValueKind::Ref, _) => true, + // ZST-to-ZST is the easiest thing ever + (OperandValueKind::ZeroSized, OperandValueKind::ZeroSized) => true, + + // But if only one of them is a ZST the sizes can't match + (OperandValueKind::ZeroSized, _) | (_, OperandValueKind::ZeroSized) => false, + // Need to generate an `alloc` to get a pointer from an immediate (OperandValueKind::Immediate(..) | OperandValueKind::Pair(..), OperandValueKind::Ref) => false, @@ -974,12 +1028,13 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { /// Gets which variant of [`OperandValue`] is expected for a particular type. fn value_kind(&self, layout: TyAndLayout<'tcx>) -> OperandValueKind { - if self.cx.is_backend_immediate(layout) { + if layout.is_zst() { + OperandValueKind::ZeroSized + } else if self.cx.is_backend_immediate(layout) { debug_assert!(!self.cx.is_backend_scalar_pair(layout)); OperandValueKind::Immediate(match layout.abi { - abi::Abi::Scalar(s) => ScalarOrZst::Scalar(s), - abi::Abi::Vector { element, .. } => ScalarOrZst::Scalar(element), - _ if layout.is_zst() => ScalarOrZst::Zst, + abi::Abi::Scalar(s) => s, + abi::Abi::Vector { element, .. } => element, x => span_bug!(self.mir.span, "Couldn't translate {x:?} as backend immediate"), }) } else if self.cx.is_backend_scalar_pair(layout) { @@ -1002,21 +1057,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { #[derive(Debug, Copy, Clone)] enum OperandValueKind { Ref, - Immediate(ScalarOrZst), + Immediate(abi::Scalar), Pair(abi::Scalar, abi::Scalar), -} - -#[derive(Debug, Copy, Clone)] -enum ScalarOrZst { - Zst, - Scalar(abi::Scalar), -} - -impl ScalarOrZst { - pub fn size(self, cx: &impl abi::HasDataLayout) -> abi::Size { - match self { - ScalarOrZst::Zst => abi::Size::ZERO, - ScalarOrZst::Scalar(s) => s.size(cx), - } - } + ZeroSized, } diff --git a/compiler/rustc_codegen_ssa/src/mir/statement.rs b/compiler/rustc_codegen_ssa/src/mir/statement.rs index 3fd7397ad..899e41265 100644 --- a/compiler/rustc_codegen_ssa/src/mir/statement.rs +++ b/compiler/rustc_codegen_ssa/src/mir/statement.rs @@ -20,7 +20,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { } LocalRef::PendingOperand => { let operand = self.codegen_rvalue_operand(bx, rvalue); - self.locals[index] = LocalRef::Operand(operand); + self.overwrite_local(index, LocalRef::Operand(operand)); self.debug_introduce_local(bx, index); } LocalRef::Operand(op) => { @@ -65,7 +65,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { } } mir::StatementKind::Coverage(box ref coverage) => { - self.codegen_coverage(bx, coverage.clone(), statement.source_info.scope); + self.codegen_coverage(bx, coverage, statement.source_info.scope); } mir::StatementKind::Intrinsic(box NonDivergingIntrinsic::Assume(ref op)) => { let op_val = self.codegen_operand(bx, op); |