summaryrefslogtreecommitdiffstats
path: root/compiler/rustc_codegen_ssa/src/mir/block.rs
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/block.rs325
1 files changed, 207 insertions, 118 deletions
diff --git a/compiler/rustc_codegen_ssa/src/mir/block.rs b/compiler/rustc_codegen_ssa/src/mir/block.rs
index 3eee58d9d..29b7c9b0a 100644
--- a/compiler/rustc_codegen_ssa/src/mir/block.rs
+++ b/compiler/rustc_codegen_ssa/src/mir/block.rs
@@ -13,15 +13,14 @@ use rustc_ast as ast;
use rustc_ast::{InlineAsmOptions, InlineAsmTemplatePiece};
use rustc_hir::lang_items::LangItem;
use rustc_index::vec::Idx;
-use rustc_middle::mir::AssertKind;
-use rustc_middle::mir::{self, SwitchTargets};
+use rustc_middle::mir::{self, AssertKind, SwitchTargets};
use rustc_middle::ty::layout::{HasTyCtxt, LayoutOf};
use rustc_middle::ty::print::{with_no_trimmed_paths, with_no_visible_paths};
use rustc_middle::ty::{self, Instance, Ty, TypeVisitable};
use rustc_span::source_map::Span;
use rustc_span::{sym, Symbol};
use rustc_symbol_mangling::typeid::typeid_for_fnabi;
-use rustc_target::abi::call::{ArgAbi, FnAbi, PassMode};
+use rustc_target::abi::call::{ArgAbi, FnAbi, PassMode, Reg};
use rustc_target::abi::{self, HasDataLayout, WrappingRange};
use rustc_target::spec::abi::Abi;
@@ -64,7 +63,9 @@ impl<'a, 'tcx> TerminatorCodegenHelper<'tcx> {
}
}
- fn lltarget<Bx: BuilderMethods<'a, 'tcx>>(
+ /// Get a basic block (creating it if necessary), possibly with a landing
+ /// pad next to it.
+ fn llbb_with_landing_pad<Bx: BuilderMethods<'a, 'tcx>>(
&self,
fx: &mut FunctionCx<'a, 'tcx, Bx>,
target: mir::BasicBlock,
@@ -74,32 +75,36 @@ impl<'a, 'tcx> TerminatorCodegenHelper<'tcx> {
let target_funclet = fx.cleanup_kinds[target].funclet_bb(target);
match (self.funclet_bb, target_funclet) {
(None, None) => (lltarget, false),
- (Some(f), Some(t_f)) if f == t_f || !base::wants_msvc_seh(fx.cx.tcx().sess) => {
- (lltarget, false)
- }
// jump *into* cleanup - need a landing pad if GNU, cleanup pad if MSVC
(None, Some(_)) => (fx.landing_pad_for(target), false),
(Some(_), None) => span_bug!(span, "{:?} - jump out of cleanup?", self.terminator),
- (Some(_), Some(_)) => (fx.landing_pad_for(target), true),
+ (Some(f), Some(t_f)) => {
+ if f == t_f || !base::wants_msvc_seh(fx.cx.tcx().sess) {
+ (lltarget, false)
+ } else {
+ (fx.landing_pad_for(target), true)
+ }
+ }
}
}
- /// Create a basic block.
- fn llblock<Bx: BuilderMethods<'a, 'tcx>>(
+ /// Get a basic block (creating it if necessary), possibly with cleanup
+ /// stuff in it or next to it.
+ fn llbb_with_cleanup<Bx: BuilderMethods<'a, 'tcx>>(
&self,
fx: &mut FunctionCx<'a, 'tcx, Bx>,
target: mir::BasicBlock,
) -> Bx::BasicBlock {
- let (lltarget, is_cleanupret) = self.lltarget(fx, target);
+ let (lltarget, is_cleanupret) = self.llbb_with_landing_pad(fx, target);
if is_cleanupret {
// MSVC cross-funclet jump - need a trampoline
-
- debug!("llblock: creating cleanup trampoline for {:?}", target);
+ debug_assert!(base::wants_msvc_seh(fx.cx.tcx().sess));
+ debug!("llbb_with_cleanup: creating cleanup trampoline for {:?}", target);
let name = &format!("{:?}_cleanup_trampoline_{:?}", self.bb, target);
- let trampoline = Bx::append_block(fx.cx, fx.llfn, name);
- let mut trampoline_bx = Bx::build(fx.cx, trampoline);
+ let trampoline_llbb = Bx::append_block(fx.cx, fx.llfn, name);
+ let mut trampoline_bx = Bx::build(fx.cx, trampoline_llbb);
trampoline_bx.cleanup_ret(self.funclet(fx).unwrap(), Some(lltarget));
- trampoline
+ trampoline_llbb
} else {
lltarget
}
@@ -111,10 +116,11 @@ impl<'a, 'tcx> TerminatorCodegenHelper<'tcx> {
bx: &mut Bx,
target: mir::BasicBlock,
) {
- let (lltarget, is_cleanupret) = self.lltarget(fx, target);
+ let (lltarget, is_cleanupret) = self.llbb_with_landing_pad(fx, target);
if is_cleanupret {
- // micro-optimization: generate a `ret` rather than a jump
+ // MSVC micro-optimization: generate a `ret` rather than a jump
// to a trampoline.
+ debug_assert!(base::wants_msvc_seh(fx.cx.tcx().sess));
bx.cleanup_ret(self.funclet(fx).unwrap(), Some(lltarget));
} else {
bx.br(lltarget);
@@ -139,7 +145,7 @@ impl<'a, 'tcx> TerminatorCodegenHelper<'tcx> {
let fn_ty = bx.fn_decl_backend_type(&fn_abi);
let unwind_block = if let Some(cleanup) = cleanup.filter(|_| fn_abi.can_unwind) {
- Some(self.llblock(fx, cleanup))
+ Some(self.llbb_with_cleanup(fx, cleanup))
} else if fx.mir[self.bb].is_cleanup
&& fn_abi.can_unwind
&& !base::wants_msvc_seh(fx.cx.tcx().sess)
@@ -163,9 +169,15 @@ impl<'a, 'tcx> TerminatorCodegenHelper<'tcx> {
} else {
fx.unreachable_block()
};
- let invokeret =
- bx.invoke(fn_ty, fn_ptr, &llargs, ret_llbb, unwind_block, self.funclet(fx));
- bx.apply_attrs_callsite(&fn_abi, invokeret);
+ let invokeret = bx.invoke(
+ fn_ty,
+ Some(&fn_abi),
+ fn_ptr,
+ &llargs,
+ ret_llbb,
+ unwind_block,
+ self.funclet(fx),
+ );
if fx.mir[self.bb].is_cleanup {
bx.do_not_inline(invokeret);
}
@@ -179,8 +191,7 @@ impl<'a, 'tcx> TerminatorCodegenHelper<'tcx> {
fx.store_return(bx, ret_dest, &fn_abi.ret, invokeret);
}
} else {
- let llret = bx.call(fn_ty, fn_ptr, &llargs, self.funclet(fx));
- bx.apply_attrs_callsite(&fn_abi, llret);
+ let llret = bx.call(fn_ty, Some(&fn_abi), fn_ptr, &llargs, self.funclet(fx));
if fx.mir[self.bb].is_cleanup {
// Cleanup is always the cold path. Don't inline
// drop glue. Also, when there is a deeply-nested
@@ -227,7 +238,7 @@ impl<'a, 'tcx> TerminatorCodegenHelper<'tcx> {
options,
line_spans,
instance,
- Some((ret_llbb, self.llblock(fx, cleanup), self.funclet(fx))),
+ Some((ret_llbb, self.llbb_with_cleanup(fx, cleanup), self.funclet(fx))),
);
} else {
bx.codegen_inline_asm(template, &operands, options, line_spans, instance, None);
@@ -277,8 +288,8 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
if target_iter.len() == 1 {
// If there are two targets (one conditional, one fallback), emit br instead of switch
let (test_value, target) = target_iter.next().unwrap();
- let lltrue = helper.llblock(self, target);
- let llfalse = helper.llblock(self, targets.otherwise());
+ let lltrue = helper.llbb_with_cleanup(self, target);
+ let llfalse = helper.llbb_with_cleanup(self, targets.otherwise());
if switch_ty == bx.tcx().types.bool {
// Don't generate trivial icmps when switching on bool
match test_value {
@@ -295,8 +306,8 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
} else {
bx.switch(
discr.immediate(),
- helper.llblock(self, targets.otherwise()),
- target_iter.map(|(value, target)| (value, helper.llblock(self, target))),
+ helper.llbb_with_cleanup(self, targets.otherwise()),
+ target_iter.map(|(value, target)| (value, helper.llbb_with_cleanup(self, target))),
);
}
}
@@ -324,7 +335,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
bx.unreachable();
return;
}
- let llval = match self.fn_abi.ret.mode {
+ let llval = match &self.fn_abi.ret.mode {
PassMode::Ignore | PassMode::Indirect { .. } => {
bx.ret_void();
return;
@@ -339,7 +350,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
}
}
- PassMode::Cast(cast_ty) => {
+ PassMode::Cast(cast_ty, _) => {
let op = match self.locals[mir::RETURN_PLACE] {
LocalRef::Operand(Some(op)) => op,
LocalRef::Operand(None) => bug!("use of return before def"),
@@ -360,7 +371,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
llval
}
};
- let ty = bx.cast_backend_type(&cast_ty);
+ let ty = bx.cast_backend_type(cast_ty);
let addr = bx.pointercast(llslot, bx.type_ptr_to(ty));
bx.load(ty, addr, self.fn_abi.ret.layout.align.abi)
}
@@ -368,6 +379,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
bx.ret(llval);
}
+ #[tracing::instrument(level = "trace", skip(self, helper, bx))]
fn codegen_drop_terminator(
&mut self,
helper: TerminatorCodegenHelper<'tcx>,
@@ -398,14 +410,75 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
let (drop_fn, fn_abi) = match ty.kind() {
// FIXME(eddyb) perhaps move some of this logic into
// `Instance::resolve_drop_in_place`?
- ty::Dynamic(..) => {
+ ty::Dynamic(_, _, ty::Dyn) => {
+ // IN THIS ARM, WE HAVE:
+ // ty = *mut (dyn Trait)
+ // which is: exists<T> ( *mut T, Vtable<T: Trait> )
+ // args[0] args[1]
+ //
+ // args = ( Data, Vtable )
+ // |
+ // v
+ // /-------\
+ // | ... |
+ // \-------/
+ //
let virtual_drop = Instance {
def: ty::InstanceDef::Virtual(drop_fn.def_id(), 0),
substs: drop_fn.substs,
};
+ debug!("ty = {:?}", ty);
+ debug!("drop_fn = {:?}", drop_fn);
+ debug!("args = {:?}", args);
let fn_abi = bx.fn_abi_of_instance(virtual_drop, ty::List::empty());
let vtable = args[1];
+ // Truncate vtable off of args list
+ args = &args[..1];
+ (
+ meth::VirtualIndex::from_index(ty::COMMON_VTABLE_ENTRIES_DROPINPLACE)
+ .get_fn(&mut bx, vtable, ty, &fn_abi),
+ fn_abi,
+ )
+ }
+ ty::Dynamic(_, _, ty::DynStar) => {
+ // IN THIS ARM, WE HAVE:
+ // ty = *mut (dyn* Trait)
+ // which is: *mut exists<T: sizeof(T) == sizeof(usize)> (T, Vtable<T: Trait>)
+ //
+ // args = [ * ]
+ // |
+ // v
+ // ( Data, Vtable )
+ // |
+ // v
+ // /-------\
+ // | ... |
+ // \-------/
+ //
+ //
+ // WE CAN CONVERT THIS INTO THE ABOVE LOGIC BY DOING
+ //
+ // data = &(*args[0]).0 // gives a pointer to Data above (really the same pointer)
+ // vtable = (*args[0]).1 // loads the vtable out
+ // (data, vtable) // an equivalent Rust `*mut dyn Trait`
+ //
+ // SO THEN WE CAN USE THE ABOVE CODE.
+ let virtual_drop = Instance {
+ def: ty::InstanceDef::Virtual(drop_fn.def_id(), 0),
+ substs: drop_fn.substs,
+ };
+ debug!("ty = {:?}", ty);
+ debug!("drop_fn = {:?}", drop_fn);
+ debug!("args = {:?}", args);
+ let fn_abi = bx.fn_abi_of_instance(virtual_drop, ty::List::empty());
+ let data = args[0];
+ let data_ty = bx.cx().backend_type(place.layout);
+ let vtable_ptr =
+ bx.gep(data_ty, data, &[bx.cx().const_i32(0), bx.cx().const_i32(1)]);
+ let vtable = bx.load(bx.type_i8p(), vtable_ptr, abi::Align::ONE);
+ // Truncate vtable off of args list
args = &args[..1];
+ debug!("args' = {:?}", args);
(
meth::VirtualIndex::from_index(ty::COMMON_VTABLE_ENTRIES_DROPINPLACE)
.get_fn(&mut bx, vtable, ty, &fn_abi),
@@ -464,7 +537,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
let cond = bx.expect(cond, expected);
// Create the failure block and the conditional branch to it.
- let lltarget = helper.llblock(self, target);
+ let lltarget = helper.llbb_with_cleanup(self, target);
let panic_block = bx.append_sibling_block("panic");
if expected {
bx.cond_br(cond, lltarget, panic_block);
@@ -798,58 +871,78 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
let mut op = self.codegen_operand(&mut bx, arg);
if let (0, Some(ty::InstanceDef::Virtual(_, idx))) = (i, def) {
- if let Pair(..) = op.val {
- // In the case of Rc<Self>, we need to explicitly pass a
- // *mut RcBox<Self> with a Scalar (not ScalarPair) ABI. This is a hack
- // that is understood elsewhere in the compiler as a method on
- // `dyn Trait`.
- // To get a `*mut RcBox<Self>`, we just keep unwrapping newtypes until
- // we get a value of a built-in pointer type
- 'descend_newtypes: while !op.layout.ty.is_unsafe_ptr()
- && !op.layout.ty.is_region_ptr()
- {
- for i in 0..op.layout.fields.count() {
- let field = op.extract_field(&mut bx, i);
- if !field.layout.is_zst() {
- // we found the one non-zero-sized field that is allowed
- // now find *its* non-zero-sized field, or stop if it's a
- // pointer
- op = field;
- continue 'descend_newtypes;
+ match op.val {
+ Pair(data_ptr, meta) => {
+ // In the case of Rc<Self>, we need to explicitly pass a
+ // *mut RcBox<Self> with a Scalar (not ScalarPair) ABI. This is a hack
+ // that is understood elsewhere in the compiler as a method on
+ // `dyn Trait`.
+ // To get a `*mut RcBox<Self>`, we just keep unwrapping newtypes until
+ // we get a value of a built-in pointer type
+ 'descend_newtypes: while !op.layout.ty.is_unsafe_ptr()
+ && !op.layout.ty.is_region_ptr()
+ {
+ for i in 0..op.layout.fields.count() {
+ let field = op.extract_field(&mut bx, i);
+ if !field.layout.is_zst() {
+ // we found the one non-zero-sized field that is allowed
+ // now find *its* non-zero-sized field, or stop if it's a
+ // pointer
+ op = field;
+ continue 'descend_newtypes;
+ }
}
+
+ span_bug!(span, "receiver has no non-zero-sized fields {:?}", op);
}
- span_bug!(span, "receiver has no non-zero-sized fields {:?}", op);
+ // now that we have `*dyn Trait` or `&dyn Trait`, split it up into its
+ // data pointer and vtable. Look up the method in the vtable, and pass
+ // the data pointer as the first argument
+ llfn = Some(meth::VirtualIndex::from_index(idx).get_fn(
+ &mut bx,
+ meta,
+ op.layout.ty,
+ &fn_abi,
+ ));
+ llargs.push(data_ptr);
+ continue 'make_args;
}
-
- // now that we have `*dyn Trait` or `&dyn Trait`, split it up into its
- // data pointer and vtable. Look up the method in the vtable, and pass
- // the data pointer as the first argument
- match op.val {
- Pair(data_ptr, meta) => {
- llfn = Some(meth::VirtualIndex::from_index(idx).get_fn(
- &mut bx,
- meta,
- op.layout.ty,
- &fn_abi,
- ));
- llargs.push(data_ptr);
- continue 'make_args;
+ Ref(data_ptr, Some(meta), _) => {
+ // by-value dynamic dispatch
+ llfn = Some(meth::VirtualIndex::from_index(idx).get_fn(
+ &mut bx,
+ meta,
+ op.layout.ty,
+ &fn_abi,
+ ));
+ llargs.push(data_ptr);
+ continue;
+ }
+ Immediate(_) => {
+ let ty::Ref(_, ty, _) = op.layout.ty.kind() else {
+ span_bug!(span, "can't codegen a virtual call on {:#?}", op);
+ };
+ if !ty.is_dyn_star() {
+ span_bug!(span, "can't codegen a virtual call on {:#?}", op);
}
- other => bug!("expected a Pair, got {:?}", other),
+ // FIXME(dyn-star): Make sure this is done on a &dyn* receiver
+ let place = op.deref(bx.cx());
+ let data_ptr = place.project_field(&mut bx, 0);
+ let meta_ptr = place.project_field(&mut bx, 1);
+ let meta = bx.load_operand(meta_ptr);
+ llfn = Some(meth::VirtualIndex::from_index(idx).get_fn(
+ &mut bx,
+ meta.immediate(),
+ op.layout.ty,
+ &fn_abi,
+ ));
+ llargs.push(data_ptr.llval);
+ continue;
+ }
+ _ => {
+ span_bug!(span, "can't codegen a virtual call on {:#?}", op);
}
- } else if let Ref(data_ptr, Some(meta), _) = op.val {
- // by-value dynamic dispatch
- llfn = Some(meth::VirtualIndex::from_index(idx).get_fn(
- &mut bx,
- meta,
- op.layout.ty,
- &fn_abi,
- ));
- llargs.push(data_ptr);
- continue;
- } else {
- span_bug!(span, "can't codegen a virtual call on {:?}", op);
}
}
@@ -1161,39 +1254,35 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
llargs: &mut Vec<Bx::Value>,
arg: &ArgAbi<'tcx, Ty<'tcx>>,
) {
- // Fill padding with undef value, where applicable.
- if let Some(ty) = arg.pad {
- llargs.push(bx.const_undef(bx.reg_backend_type(&ty)))
- }
-
- if arg.is_ignore() {
- return;
- }
-
- if let PassMode::Pair(..) = arg.mode {
- match op.val {
+ match arg.mode {
+ PassMode::Ignore => return,
+ PassMode::Cast(_, true) => {
+ // Fill padding with undef value, where applicable.
+ llargs.push(bx.const_undef(bx.reg_backend_type(&Reg::i32())));
+ }
+ PassMode::Pair(..) => match op.val {
Pair(a, b) => {
llargs.push(a);
llargs.push(b);
return;
}
_ => bug!("codegen_argument: {:?} invalid for pair argument", op),
- }
- } else if arg.is_unsized_indirect() {
- match op.val {
+ },
+ PassMode::Indirect { attrs: _, extra_attrs: Some(_), on_stack: _ } => match op.val {
Ref(a, Some(b), _) => {
llargs.push(a);
llargs.push(b);
return;
}
_ => bug!("codegen_argument: {:?} invalid for unsized indirect argument", op),
- }
+ },
+ _ => {}
}
// Force by-ref if we have to load through a cast pointer.
let (mut llval, align, by_ref) = match op.val {
Immediate(_) | Pair(..) => match arg.mode {
- PassMode::Indirect { .. } | PassMode::Cast(_) => {
+ PassMode::Indirect { .. } | PassMode::Cast(..) => {
let scratch = PlaceRef::alloca(bx, arg.layout);
op.val.store(bx, scratch);
(scratch.llval, scratch.align, true)
@@ -1225,8 +1314,8 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
if by_ref && !arg.is_indirect() {
// Have to load the argument, maybe while casting it.
- if let PassMode::Cast(ty) = arg.mode {
- let llty = bx.cast_backend_type(&ty);
+ if let PassMode::Cast(ty, _) = &arg.mode {
+ let llty = bx.cast_backend_type(ty);
let addr = bx.pointercast(llval, bx.type_ptr_to(llty));
llval = bx.load(llty, addr, align.min(arg.layout.align.abi));
} else {
@@ -1377,20 +1466,20 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
// bar();
// }
Some(&mir::TerminatorKind::Abort) => {
- let cs_bb =
+ let cs_llbb =
Bx::append_block(self.cx, self.llfn, &format!("cs_funclet{:?}", bb));
- let cp_bb =
+ let cp_llbb =
Bx::append_block(self.cx, self.llfn, &format!("cp_funclet{:?}", bb));
- ret_llbb = cs_bb;
+ ret_llbb = cs_llbb;
- let mut cs_bx = Bx::build(self.cx, cs_bb);
- let cs = cs_bx.catch_switch(None, None, &[cp_bb]);
+ let mut cs_bx = Bx::build(self.cx, cs_llbb);
+ let cs = cs_bx.catch_switch(None, None, &[cp_llbb]);
// The "null" here is actually a RTTI type descriptor for the
// C++ personality function, but `catch (...)` has no type so
// it's null. The 64 here is actually a bitfield which
// represents that this is a catch-all block.
- let mut cp_bx = Bx::build(self.cx, cp_bb);
+ let mut cp_bx = Bx::build(self.cx, cp_llbb);
let null = cp_bx.const_null(
cp_bx.type_i8p_ext(cp_bx.cx().data_layout().instruction_address_space),
);
@@ -1399,10 +1488,10 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
cp_bx.br(llbb);
}
_ => {
- let cleanup_bb =
+ let cleanup_llbb =
Bx::append_block(self.cx, self.llfn, &format!("funclet_{:?}", bb));
- ret_llbb = cleanup_bb;
- let mut cleanup_bx = Bx::build(self.cx, cleanup_bb);
+ ret_llbb = cleanup_llbb;
+ let mut cleanup_bx = Bx::build(self.cx, cleanup_llbb);
funclet = cleanup_bx.cleanup_pad(None, &[]);
cleanup_bx.br(llbb);
}
@@ -1410,19 +1499,20 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
self.funclets[bb] = Some(funclet);
ret_llbb
} else {
- let bb = Bx::append_block(self.cx, self.llfn, "cleanup");
- let mut bx = Bx::build(self.cx, bb);
+ let cleanup_llbb = Bx::append_block(self.cx, self.llfn, "cleanup");
+ let mut cleanup_bx = Bx::build(self.cx, cleanup_llbb);
let llpersonality = self.cx.eh_personality();
let llretty = self.landing_pad_type();
- let lp = bx.cleanup_landing_pad(llretty, llpersonality);
+ let lp = cleanup_bx.cleanup_landing_pad(llretty, llpersonality);
- let slot = self.get_personality_slot(&mut bx);
- slot.storage_live(&mut bx);
- Pair(bx.extract_value(lp, 0), bx.extract_value(lp, 1)).store(&mut bx, slot);
+ let slot = self.get_personality_slot(&mut cleanup_bx);
+ slot.storage_live(&mut cleanup_bx);
+ Pair(cleanup_bx.extract_value(lp, 0), cleanup_bx.extract_value(lp, 1))
+ .store(&mut cleanup_bx, slot);
- bx.br(llbb);
- bx.llbb()
+ cleanup_bx.br(llbb);
+ cleanup_llbb
}
}
@@ -1456,8 +1546,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
let (fn_abi, fn_ptr) = common::build_langcall(&bx, None, LangItem::PanicNoUnwind);
let fn_ty = bx.fn_decl_backend_type(&fn_abi);
- let llret = bx.call(fn_ty, fn_ptr, &[], None);
- bx.apply_attrs_callsite(&fn_abi, llret);
+ let llret = bx.call(fn_ty, Some(&fn_abi), fn_ptr, &[], None);
bx.do_not_inline(llret);
bx.unreachable();
@@ -1625,7 +1714,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
}
DirectOperand(index) => {
// If there is a cast, we have to store and reload.
- let op = if let PassMode::Cast(_) = ret_abi.mode {
+ let op = if let PassMode::Cast(..) = ret_abi.mode {
let tmp = PlaceRef::alloca(bx, ret_abi.layout);
tmp.storage_live(bx);
bx.store_arg(&ret_abi, llval, tmp);