summaryrefslogtreecommitdiffstats
path: root/compiler/rustc_codegen_ssa/src/mir/block.rs
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/rustc_codegen_ssa/src/mir/block.rs')
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/block.rs114
1 files changed, 65 insertions, 49 deletions
diff --git a/compiler/rustc_codegen_ssa/src/mir/block.rs b/compiler/rustc_codegen_ssa/src/mir/block.rs
index 9d1b3ce82..4f26383ed 100644
--- a/compiler/rustc_codegen_ssa/src/mir/block.rs
+++ b/compiler/rustc_codegen_ssa/src/mir/block.rs
@@ -23,6 +23,8 @@ use rustc_target::abi::call::{ArgAbi, FnAbi, PassMode, Reg};
use rustc_target::abi::{self, HasDataLayout, WrappingRange};
use rustc_target::spec::abi::Abi;
+use std::cmp;
+
// Indicates if we are in the middle of merging a BB's successor into it. This
// can happen when BB jumps directly to its successor and the successor has no
// other predecessors.
@@ -437,8 +439,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
ZeroSized => bug!("ZST return value shouldn't be in PassMode::Cast"),
};
let ty = bx.cast_backend_type(cast_ty);
- let addr = bx.pointercast(llslot, bx.type_ptr_to(ty));
- bx.load(ty, addr, self.fn_abi.ret.layout.align.abi)
+ bx.load(ty, llslot, self.fn_abi.ret.layout.align.abi)
}
};
bx.ret(llval);
@@ -491,7 +492,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
//
let virtual_drop = Instance {
def: ty::InstanceDef::Virtual(drop_fn.def_id(), 0),
- substs: drop_fn.substs,
+ args: drop_fn.args,
};
debug!("ty = {:?}", ty);
debug!("drop_fn = {:?}", drop_fn);
@@ -531,7 +532,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
// SO THEN WE CAN USE THE ABOVE CODE.
let virtual_drop = Instance {
def: ty::InstanceDef::Virtual(drop_fn.def_id(), 0),
- substs: drop_fn.substs,
+ args: drop_fn.args,
};
debug!("ty = {:?}", ty);
debug!("drop_fn = {:?}", drop_fn);
@@ -687,7 +688,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
// which mentions the offending type, even from a const context.
let panic_intrinsic = intrinsic.and_then(|s| ValidityRequirement::from_intrinsic(s));
if let Some(requirement) = panic_intrinsic {
- let ty = instance.unwrap().substs.type_at(0);
+ let ty = instance.unwrap().args.type_at(0);
let do_panic = !bx
.tcx()
@@ -701,13 +702,12 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
with_no_trimmed_paths!({
if layout.abi.is_uninhabited() {
// Use this error even for the other intrinsics as it is more precise.
- format!("attempted to instantiate uninhabited type `{}`", ty)
+ format!("attempted to instantiate uninhabited type `{ty}`")
} else if requirement == ValidityRequirement::Zero {
- format!("attempted to zero-initialize type `{}`, which is invalid", ty)
+ format!("attempted to zero-initialize type `{ty}`, which is invalid")
} else {
format!(
- "attempted to leave type `{}` uninitialized, which is invalid",
- ty
+ "attempted to leave type `{ty}` uninitialized, which is invalid"
)
}
})
@@ -760,13 +760,13 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
let callee = self.codegen_operand(bx, func);
let (instance, mut llfn) = match *callee.layout.ty.kind() {
- ty::FnDef(def_id, substs) => (
+ ty::FnDef(def_id, args) => (
Some(
ty::Instance::expect_resolve(
bx.tcx(),
ty::ParamEnv::reveal_all(),
def_id,
- substs,
+ args,
)
.polymorphize(bx.tcx()),
),
@@ -851,9 +851,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
Some(intrinsic) => {
let dest = match ret_dest {
_ if fn_abi.ret.is_indirect() => llargs[0],
- ReturnDest::Nothing => {
- bx.const_undef(bx.type_ptr_to(bx.arg_memory_ty(&fn_abi.ret)))
- }
+ ReturnDest::Nothing => bx.const_undef(bx.type_ptr()),
ReturnDest::IndirectOperand(dst, _) | ReturnDest::Store(dst) => dst.llval,
ReturnDest::DirectOperand(_) => {
bug!("Cannot use direct operand with an intrinsic call")
@@ -864,11 +862,11 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
.iter()
.enumerate()
.map(|(i, arg)| {
- // The indices passed to simd_shuffle* in the
+ // The indices passed to simd_shuffle in the
// third argument must be constant. This is
// checked by const-qualification, which also
// promotes any complex rvalues to constants.
- if i == 2 && intrinsic.as_str().starts_with("simd_shuffle") {
+ if i == 2 && intrinsic == sym::simd_shuffle {
if let mir::Operand::Constant(constant) = arg {
let (llval, ty) = self.simd_shuffle_indices(&bx, constant);
return OperandRef {
@@ -1043,10 +1041,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
assert_eq!(
fn_abi.args.len(),
mir_args + 1,
- "#[track_caller] fn's must have 1 more argument in their ABI than in their MIR: {:?} {:?} {:?}",
- instance,
- fn_span,
- fn_abi,
+ "#[track_caller] fn's must have 1 more argument in their ABI than in their MIR: {instance:?} {fn_span:?} {fn_abi:?}",
);
let location =
self.get_caller_location(bx, mir::SourceInfo { span: fn_span, ..source_info });
@@ -1125,12 +1120,12 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
}
mir::InlineAsmOperand::SymFn { ref value } => {
let literal = self.monomorphize(value.literal);
- if let ty::FnDef(def_id, substs) = *literal.ty().kind() {
+ if let ty::FnDef(def_id, args) = *literal.ty().kind() {
let instance = ty::Instance::resolve_for_fn_ptr(
bx.tcx(),
ty::ParamEnv::reveal_all(),
def_id,
- substs,
+ args,
)
.unwrap();
InlineAsmOperandRef::SymFn { instance }
@@ -1360,36 +1355,58 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
// Force by-ref if we have to load through a cast pointer.
let (mut llval, align, by_ref) = match op.val {
Immediate(_) | Pair(..) => match arg.mode {
- PassMode::Indirect { .. } | PassMode::Cast(..) => {
+ PassMode::Indirect { attrs, .. } => {
+ // Indirect argument may have higher alignment requirements than the type's alignment.
+ // This can happen, e.g. when passing types with <4 byte alignment on the stack on x86.
+ let required_align = match attrs.pointee_align {
+ Some(pointee_align) => cmp::max(pointee_align, arg.layout.align.abi),
+ None => arg.layout.align.abi,
+ };
+ let scratch = PlaceRef::alloca_aligned(bx, arg.layout, required_align);
+ op.val.store(bx, scratch);
+ (scratch.llval, scratch.align, true)
+ }
+ PassMode::Cast(..) => {
let scratch = PlaceRef::alloca(bx, arg.layout);
op.val.store(bx, scratch);
(scratch.llval, scratch.align, true)
}
_ => (op.immediate_or_packed_pair(bx), arg.layout.align.abi, false),
},
- Ref(llval, _, align) => {
- if arg.is_indirect() && align < arg.layout.align.abi {
- // `foo(packed.large_field)`. We can't pass the (unaligned) field directly. I
- // think that ATM (Rust 1.16) we only pass temporaries, but we shouldn't
- // have scary latent bugs around.
-
- let scratch = PlaceRef::alloca(bx, arg.layout);
- base::memcpy_ty(
- bx,
- scratch.llval,
- scratch.align,
- llval,
- align,
- op.layout,
- MemFlags::empty(),
- );
- (scratch.llval, scratch.align, true)
- } else {
- (llval, align, true)
+ Ref(llval, _, align) => match arg.mode {
+ PassMode::Indirect { attrs, .. } => {
+ let required_align = match attrs.pointee_align {
+ Some(pointee_align) => cmp::max(pointee_align, arg.layout.align.abi),
+ None => arg.layout.align.abi,
+ };
+ if align < required_align {
+ // For `foo(packed.large_field)`, and types with <4 byte alignment on x86,
+ // alignment requirements may be higher than the type's alignment, so copy
+ // to a higher-aligned alloca.
+ let scratch = PlaceRef::alloca_aligned(bx, arg.layout, required_align);
+ base::memcpy_ty(
+ bx,
+ scratch.llval,
+ scratch.align,
+ llval,
+ align,
+ op.layout,
+ MemFlags::empty(),
+ );
+ (scratch.llval, scratch.align, true)
+ } else {
+ (llval, align, true)
+ }
}
- }
+ _ => (llval, align, true),
+ },
ZeroSized => match arg.mode {
- PassMode::Indirect { .. } => {
+ PassMode::Indirect { on_stack, .. } => {
+ if on_stack {
+ // It doesn't seem like any target can have `byval` ZSTs, so this assert
+ // is here to replace a would-be untested codepath.
+ bug!("ZST {op:?} passed on stack with abi {arg:?}");
+ }
// Though `extern "Rust"` doesn't pass ZSTs, some ABIs pass
// a pointer for `repr(C)` structs even when empty, so get
// one from an `alloca` (which can be left uninitialized).
@@ -1404,8 +1421,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
// Have to load the argument, maybe while casting it.
if let PassMode::Cast(ty, _) = &arg.mode {
let llty = bx.cast_backend_type(ty);
- let addr = bx.pointercast(llval, bx.type_ptr_to(llty));
- llval = bx.load(llty, addr, align.min(arg.layout.align.abi));
+ llval = bx.load(llty, llval, align.min(arg.layout.align.abi));
} else {
// We can't use `PlaceRef::load` here because the argument
// may have a type we don't treat as immediate, but the ABI
@@ -1531,7 +1547,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
fn landing_pad_for_uncached(&mut self, bb: mir::BasicBlock) -> Bx::BasicBlock {
let llbb = self.llbb(bb);
if base::wants_new_eh_instructions(self.cx.sess()) {
- let cleanup_bb = Bx::append_block(self.cx, self.llfn, &format!("funclet_{:?}", bb));
+ let cleanup_bb = Bx::append_block(self.cx, self.llfn, &format!("funclet_{bb:?}"));
let mut cleanup_bx = Bx::build(self.cx, cleanup_bb);
let funclet = cleanup_bx.cleanup_pad(None, &[]);
cleanup_bx.br(llbb);
@@ -1610,7 +1626,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
// represents that this is a catch-all block.
bx = Bx::build(self.cx, cp_llbb);
let null =
- bx.const_null(bx.type_i8p_ext(bx.cx().data_layout().instruction_address_space));
+ bx.const_null(bx.type_ptr_ext(bx.cx().data_layout().instruction_address_space));
let sixty_four = bx.const_i32(64);
funclet = Some(bx.catch_pad(cs, &[null, sixty_four, null]));
} else {
@@ -1651,7 +1667,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
match self.cached_llbbs[bb] {
CachedLlbb::None => {
// FIXME(eddyb) only name the block if `fewer_names` is `false`.
- let llbb = Bx::append_block(self.cx, self.llfn, &format!("{:?}", bb));
+ let llbb = Bx::append_block(self.cx, self.llfn, &format!("{bb:?}"));
self.cached_llbbs[bb] = CachedLlbb::Some(llbb);
Some(llbb)
}