summaryrefslogtreecommitdiffstats
path: root/compiler/rustc_codegen_ssa/src/mir
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/analyze.rs6
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/block.rs325
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/constant.rs36
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/debuginfo.rs8
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/intrinsic.rs16
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/mod.rs34
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/operand.rs11
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/place.rs35
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/rvalue.rs22
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/statement.rs14
10 files changed, 303 insertions, 204 deletions
diff --git a/compiler/rustc_codegen_ssa/src/mir/analyze.rs b/compiler/rustc_codegen_ssa/src/mir/analyze.rs
index 24da48ead..c7617d2e4 100644
--- a/compiler/rustc_codegen_ssa/src/mir/analyze.rs
+++ b/compiler/rustc_codegen_ssa/src/mir/analyze.rs
@@ -266,7 +266,7 @@ pub fn cleanup_kinds(mir: &mir::Body<'_>) -> IndexVec<mir::BasicBlock, CleanupKi
result: &mut IndexVec<mir::BasicBlock, CleanupKind>,
mir: &mir::Body<'tcx>,
) {
- for (bb, data) in mir.basic_blocks().iter_enumerated() {
+ for (bb, data) in mir.basic_blocks.iter_enumerated() {
match data.terminator().kind {
TerminatorKind::Goto { .. }
| TerminatorKind::Resume
@@ -296,7 +296,7 @@ pub fn cleanup_kinds(mir: &mir::Body<'_>) -> IndexVec<mir::BasicBlock, CleanupKi
}
fn propagate<'tcx>(result: &mut IndexVec<mir::BasicBlock, CleanupKind>, mir: &mir::Body<'tcx>) {
- let mut funclet_succs = IndexVec::from_elem(None, mir.basic_blocks());
+ let mut funclet_succs = IndexVec::from_elem(None, &mir.basic_blocks);
let mut set_successor = |funclet: mir::BasicBlock, succ| match funclet_succs[funclet] {
ref mut s @ None => {
@@ -359,7 +359,7 @@ pub fn cleanup_kinds(mir: &mir::Body<'_>) -> IndexVec<mir::BasicBlock, CleanupKi
}
}
- let mut result = IndexVec::from_elem(CleanupKind::NotCleanup, mir.basic_blocks());
+ let mut result = IndexVec::from_elem(CleanupKind::NotCleanup, &mir.basic_blocks);
discover_masters(&mut result, mir);
propagate(&mut result, mir);
diff --git a/compiler/rustc_codegen_ssa/src/mir/block.rs b/compiler/rustc_codegen_ssa/src/mir/block.rs
index 3eee58d9d..29b7c9b0a 100644
--- a/compiler/rustc_codegen_ssa/src/mir/block.rs
+++ b/compiler/rustc_codegen_ssa/src/mir/block.rs
@@ -13,15 +13,14 @@ use rustc_ast as ast;
use rustc_ast::{InlineAsmOptions, InlineAsmTemplatePiece};
use rustc_hir::lang_items::LangItem;
use rustc_index::vec::Idx;
-use rustc_middle::mir::AssertKind;
-use rustc_middle::mir::{self, SwitchTargets};
+use rustc_middle::mir::{self, AssertKind, SwitchTargets};
use rustc_middle::ty::layout::{HasTyCtxt, LayoutOf};
use rustc_middle::ty::print::{with_no_trimmed_paths, with_no_visible_paths};
use rustc_middle::ty::{self, Instance, Ty, TypeVisitable};
use rustc_span::source_map::Span;
use rustc_span::{sym, Symbol};
use rustc_symbol_mangling::typeid::typeid_for_fnabi;
-use rustc_target::abi::call::{ArgAbi, FnAbi, PassMode};
+use rustc_target::abi::call::{ArgAbi, FnAbi, PassMode, Reg};
use rustc_target::abi::{self, HasDataLayout, WrappingRange};
use rustc_target::spec::abi::Abi;
@@ -64,7 +63,9 @@ impl<'a, 'tcx> TerminatorCodegenHelper<'tcx> {
}
}
- fn lltarget<Bx: BuilderMethods<'a, 'tcx>>(
+ /// Get a basic block (creating it if necessary), possibly with a landing
+ /// pad next to it.
+ fn llbb_with_landing_pad<Bx: BuilderMethods<'a, 'tcx>>(
&self,
fx: &mut FunctionCx<'a, 'tcx, Bx>,
target: mir::BasicBlock,
@@ -74,32 +75,36 @@ impl<'a, 'tcx> TerminatorCodegenHelper<'tcx> {
let target_funclet = fx.cleanup_kinds[target].funclet_bb(target);
match (self.funclet_bb, target_funclet) {
(None, None) => (lltarget, false),
- (Some(f), Some(t_f)) if f == t_f || !base::wants_msvc_seh(fx.cx.tcx().sess) => {
- (lltarget, false)
- }
// jump *into* cleanup - need a landing pad if GNU, cleanup pad if MSVC
(None, Some(_)) => (fx.landing_pad_for(target), false),
(Some(_), None) => span_bug!(span, "{:?} - jump out of cleanup?", self.terminator),
- (Some(_), Some(_)) => (fx.landing_pad_for(target), true),
+ (Some(f), Some(t_f)) => {
+ if f == t_f || !base::wants_msvc_seh(fx.cx.tcx().sess) {
+ (lltarget, false)
+ } else {
+ (fx.landing_pad_for(target), true)
+ }
+ }
}
}
- /// Create a basic block.
- fn llblock<Bx: BuilderMethods<'a, 'tcx>>(
+ /// Get a basic block (creating it if necessary), possibly with cleanup
+ /// stuff in it or next to it.
+ fn llbb_with_cleanup<Bx: BuilderMethods<'a, 'tcx>>(
&self,
fx: &mut FunctionCx<'a, 'tcx, Bx>,
target: mir::BasicBlock,
) -> Bx::BasicBlock {
- let (lltarget, is_cleanupret) = self.lltarget(fx, target);
+ let (lltarget, is_cleanupret) = self.llbb_with_landing_pad(fx, target);
if is_cleanupret {
// MSVC cross-funclet jump - need a trampoline
-
- debug!("llblock: creating cleanup trampoline for {:?}", target);
+ debug_assert!(base::wants_msvc_seh(fx.cx.tcx().sess));
+ debug!("llbb_with_cleanup: creating cleanup trampoline for {:?}", target);
let name = &format!("{:?}_cleanup_trampoline_{:?}", self.bb, target);
- let trampoline = Bx::append_block(fx.cx, fx.llfn, name);
- let mut trampoline_bx = Bx::build(fx.cx, trampoline);
+ let trampoline_llbb = Bx::append_block(fx.cx, fx.llfn, name);
+ let mut trampoline_bx = Bx::build(fx.cx, trampoline_llbb);
trampoline_bx.cleanup_ret(self.funclet(fx).unwrap(), Some(lltarget));
- trampoline
+ trampoline_llbb
} else {
lltarget
}
@@ -111,10 +116,11 @@ impl<'a, 'tcx> TerminatorCodegenHelper<'tcx> {
bx: &mut Bx,
target: mir::BasicBlock,
) {
- let (lltarget, is_cleanupret) = self.lltarget(fx, target);
+ let (lltarget, is_cleanupret) = self.llbb_with_landing_pad(fx, target);
if is_cleanupret {
- // micro-optimization: generate a `ret` rather than a jump
+ // MSVC micro-optimization: generate a `ret` rather than a jump
// to a trampoline.
+ debug_assert!(base::wants_msvc_seh(fx.cx.tcx().sess));
bx.cleanup_ret(self.funclet(fx).unwrap(), Some(lltarget));
} else {
bx.br(lltarget);
@@ -139,7 +145,7 @@ impl<'a, 'tcx> TerminatorCodegenHelper<'tcx> {
let fn_ty = bx.fn_decl_backend_type(&fn_abi);
let unwind_block = if let Some(cleanup) = cleanup.filter(|_| fn_abi.can_unwind) {
- Some(self.llblock(fx, cleanup))
+ Some(self.llbb_with_cleanup(fx, cleanup))
} else if fx.mir[self.bb].is_cleanup
&& fn_abi.can_unwind
&& !base::wants_msvc_seh(fx.cx.tcx().sess)
@@ -163,9 +169,15 @@ impl<'a, 'tcx> TerminatorCodegenHelper<'tcx> {
} else {
fx.unreachable_block()
};
- let invokeret =
- bx.invoke(fn_ty, fn_ptr, &llargs, ret_llbb, unwind_block, self.funclet(fx));
- bx.apply_attrs_callsite(&fn_abi, invokeret);
+ let invokeret = bx.invoke(
+ fn_ty,
+ Some(&fn_abi),
+ fn_ptr,
+ &llargs,
+ ret_llbb,
+ unwind_block,
+ self.funclet(fx),
+ );
if fx.mir[self.bb].is_cleanup {
bx.do_not_inline(invokeret);
}
@@ -179,8 +191,7 @@ impl<'a, 'tcx> TerminatorCodegenHelper<'tcx> {
fx.store_return(bx, ret_dest, &fn_abi.ret, invokeret);
}
} else {
- let llret = bx.call(fn_ty, fn_ptr, &llargs, self.funclet(fx));
- bx.apply_attrs_callsite(&fn_abi, llret);
+ let llret = bx.call(fn_ty, Some(&fn_abi), fn_ptr, &llargs, self.funclet(fx));
if fx.mir[self.bb].is_cleanup {
// Cleanup is always the cold path. Don't inline
// drop glue. Also, when there is a deeply-nested
@@ -227,7 +238,7 @@ impl<'a, 'tcx> TerminatorCodegenHelper<'tcx> {
options,
line_spans,
instance,
- Some((ret_llbb, self.llblock(fx, cleanup), self.funclet(fx))),
+ Some((ret_llbb, self.llbb_with_cleanup(fx, cleanup), self.funclet(fx))),
);
} else {
bx.codegen_inline_asm(template, &operands, options, line_spans, instance, None);
@@ -277,8 +288,8 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
if target_iter.len() == 1 {
// If there are two targets (one conditional, one fallback), emit br instead of switch
let (test_value, target) = target_iter.next().unwrap();
- let lltrue = helper.llblock(self, target);
- let llfalse = helper.llblock(self, targets.otherwise());
+ let lltrue = helper.llbb_with_cleanup(self, target);
+ let llfalse = helper.llbb_with_cleanup(self, targets.otherwise());
if switch_ty == bx.tcx().types.bool {
// Don't generate trivial icmps when switching on bool
match test_value {
@@ -295,8 +306,8 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
} else {
bx.switch(
discr.immediate(),
- helper.llblock(self, targets.otherwise()),
- target_iter.map(|(value, target)| (value, helper.llblock(self, target))),
+ helper.llbb_with_cleanup(self, targets.otherwise()),
+ target_iter.map(|(value, target)| (value, helper.llbb_with_cleanup(self, target))),
);
}
}
@@ -324,7 +335,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
bx.unreachable();
return;
}
- let llval = match self.fn_abi.ret.mode {
+ let llval = match &self.fn_abi.ret.mode {
PassMode::Ignore | PassMode::Indirect { .. } => {
bx.ret_void();
return;
@@ -339,7 +350,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
}
}
- PassMode::Cast(cast_ty) => {
+ PassMode::Cast(cast_ty, _) => {
let op = match self.locals[mir::RETURN_PLACE] {
LocalRef::Operand(Some(op)) => op,
LocalRef::Operand(None) => bug!("use of return before def"),
@@ -360,7 +371,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
llval
}
};
- let ty = bx.cast_backend_type(&cast_ty);
+ let ty = bx.cast_backend_type(cast_ty);
let addr = bx.pointercast(llslot, bx.type_ptr_to(ty));
bx.load(ty, addr, self.fn_abi.ret.layout.align.abi)
}
@@ -368,6 +379,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
bx.ret(llval);
}
+ #[tracing::instrument(level = "trace", skip(self, helper, bx))]
fn codegen_drop_terminator(
&mut self,
helper: TerminatorCodegenHelper<'tcx>,
@@ -398,14 +410,75 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
let (drop_fn, fn_abi) = match ty.kind() {
// FIXME(eddyb) perhaps move some of this logic into
// `Instance::resolve_drop_in_place`?
- ty::Dynamic(..) => {
+ ty::Dynamic(_, _, ty::Dyn) => {
+ // IN THIS ARM, WE HAVE:
+ // ty = *mut (dyn Trait)
+ // which is: exists<T> ( *mut T, Vtable<T: Trait> )
+ // args[0] args[1]
+ //
+ // args = ( Data, Vtable )
+ // |
+ // v
+ // /-------\
+ // | ... |
+ // \-------/
+ //
let virtual_drop = Instance {
def: ty::InstanceDef::Virtual(drop_fn.def_id(), 0),
substs: drop_fn.substs,
};
+ debug!("ty = {:?}", ty);
+ debug!("drop_fn = {:?}", drop_fn);
+ debug!("args = {:?}", args);
let fn_abi = bx.fn_abi_of_instance(virtual_drop, ty::List::empty());
let vtable = args[1];
+ // Truncate vtable off of args list
+ args = &args[..1];
+ (
+ meth::VirtualIndex::from_index(ty::COMMON_VTABLE_ENTRIES_DROPINPLACE)
+ .get_fn(&mut bx, vtable, ty, &fn_abi),
+ fn_abi,
+ )
+ }
+ ty::Dynamic(_, _, ty::DynStar) => {
+ // IN THIS ARM, WE HAVE:
+ // ty = *mut (dyn* Trait)
+ // which is: *mut exists<T: sizeof(T) == sizeof(usize)> (T, Vtable<T: Trait>)
+ //
+ // args = [ * ]
+ // |
+ // v
+ // ( Data, Vtable )
+ // |
+ // v
+ // /-------\
+ // | ... |
+ // \-------/
+ //
+ //
+ // WE CAN CONVERT THIS INTO THE ABOVE LOGIC BY DOING
+ //
+ // data = &(*args[0]).0 // gives a pointer to Data above (really the same pointer)
+ // vtable = (*args[0]).1 // loads the vtable out
+ // (data, vtable) // an equivalent Rust `*mut dyn Trait`
+ //
+ // SO THEN WE CAN USE THE ABOVE CODE.
+ let virtual_drop = Instance {
+ def: ty::InstanceDef::Virtual(drop_fn.def_id(), 0),
+ substs: drop_fn.substs,
+ };
+ debug!("ty = {:?}", ty);
+ debug!("drop_fn = {:?}", drop_fn);
+ debug!("args = {:?}", args);
+ let fn_abi = bx.fn_abi_of_instance(virtual_drop, ty::List::empty());
+ let data = args[0];
+ let data_ty = bx.cx().backend_type(place.layout);
+ let vtable_ptr =
+ bx.gep(data_ty, data, &[bx.cx().const_i32(0), bx.cx().const_i32(1)]);
+ let vtable = bx.load(bx.type_i8p(), vtable_ptr, abi::Align::ONE);
+ // Truncate vtable off of args list
args = &args[..1];
+ debug!("args' = {:?}", args);
(
meth::VirtualIndex::from_index(ty::COMMON_VTABLE_ENTRIES_DROPINPLACE)
.get_fn(&mut bx, vtable, ty, &fn_abi),
@@ -464,7 +537,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
let cond = bx.expect(cond, expected);
// Create the failure block and the conditional branch to it.
- let lltarget = helper.llblock(self, target);
+ let lltarget = helper.llbb_with_cleanup(self, target);
let panic_block = bx.append_sibling_block("panic");
if expected {
bx.cond_br(cond, lltarget, panic_block);
@@ -798,58 +871,78 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
let mut op = self.codegen_operand(&mut bx, arg);
if let (0, Some(ty::InstanceDef::Virtual(_, idx))) = (i, def) {
- if let Pair(..) = op.val {
- // In the case of Rc<Self>, we need to explicitly pass a
- // *mut RcBox<Self> with a Scalar (not ScalarPair) ABI. This is a hack
- // that is understood elsewhere in the compiler as a method on
- // `dyn Trait`.
- // To get a `*mut RcBox<Self>`, we just keep unwrapping newtypes until
- // we get a value of a built-in pointer type
- 'descend_newtypes: while !op.layout.ty.is_unsafe_ptr()
- && !op.layout.ty.is_region_ptr()
- {
- for i in 0..op.layout.fields.count() {
- let field = op.extract_field(&mut bx, i);
- if !field.layout.is_zst() {
- // we found the one non-zero-sized field that is allowed
- // now find *its* non-zero-sized field, or stop if it's a
- // pointer
- op = field;
- continue 'descend_newtypes;
+ match op.val {
+ Pair(data_ptr, meta) => {
+ // In the case of Rc<Self>, we need to explicitly pass a
+ // *mut RcBox<Self> with a Scalar (not ScalarPair) ABI. This is a hack
+ // that is understood elsewhere in the compiler as a method on
+ // `dyn Trait`.
+ // To get a `*mut RcBox<Self>`, we just keep unwrapping newtypes until
+ // we get a value of a built-in pointer type
+ 'descend_newtypes: while !op.layout.ty.is_unsafe_ptr()
+ && !op.layout.ty.is_region_ptr()
+ {
+ for i in 0..op.layout.fields.count() {
+ let field = op.extract_field(&mut bx, i);
+ if !field.layout.is_zst() {
+ // we found the one non-zero-sized field that is allowed
+ // now find *its* non-zero-sized field, or stop if it's a
+ // pointer
+ op = field;
+ continue 'descend_newtypes;
+ }
}
+
+ span_bug!(span, "receiver has no non-zero-sized fields {:?}", op);
}
- span_bug!(span, "receiver has no non-zero-sized fields {:?}", op);
+ // now that we have `*dyn Trait` or `&dyn Trait`, split it up into its
+ // data pointer and vtable. Look up the method in the vtable, and pass
+ // the data pointer as the first argument
+ llfn = Some(meth::VirtualIndex::from_index(idx).get_fn(
+ &mut bx,
+ meta,
+ op.layout.ty,
+ &fn_abi,
+ ));
+ llargs.push(data_ptr);
+ continue 'make_args;
}
-
- // now that we have `*dyn Trait` or `&dyn Trait`, split it up into its
- // data pointer and vtable. Look up the method in the vtable, and pass
- // the data pointer as the first argument
- match op.val {
- Pair(data_ptr, meta) => {
- llfn = Some(meth::VirtualIndex::from_index(idx).get_fn(
- &mut bx,
- meta,
- op.layout.ty,
- &fn_abi,
- ));
- llargs.push(data_ptr);
- continue 'make_args;
+ Ref(data_ptr, Some(meta), _) => {
+ // by-value dynamic dispatch
+ llfn = Some(meth::VirtualIndex::from_index(idx).get_fn(
+ &mut bx,
+ meta,
+ op.layout.ty,
+ &fn_abi,
+ ));
+ llargs.push(data_ptr);
+ continue;
+ }
+ Immediate(_) => {
+ let ty::Ref(_, ty, _) = op.layout.ty.kind() else {
+ span_bug!(span, "can't codegen a virtual call on {:#?}", op);
+ };
+ if !ty.is_dyn_star() {
+ span_bug!(span, "can't codegen a virtual call on {:#?}", op);
}
- other => bug!("expected a Pair, got {:?}", other),
+ // FIXME(dyn-star): Make sure this is done on a &dyn* receiver
+ let place = op.deref(bx.cx());
+ let data_ptr = place.project_field(&mut bx, 0);
+ let meta_ptr = place.project_field(&mut bx, 1);
+ let meta = bx.load_operand(meta_ptr);
+ llfn = Some(meth::VirtualIndex::from_index(idx).get_fn(
+ &mut bx,
+ meta.immediate(),
+ op.layout.ty,
+ &fn_abi,
+ ));
+ llargs.push(data_ptr.llval);
+ continue;
+ }
+ _ => {
+ span_bug!(span, "can't codegen a virtual call on {:#?}", op);
}
- } else if let Ref(data_ptr, Some(meta), _) = op.val {
- // by-value dynamic dispatch
- llfn = Some(meth::VirtualIndex::from_index(idx).get_fn(
- &mut bx,
- meta,
- op.layout.ty,
- &fn_abi,
- ));
- llargs.push(data_ptr);
- continue;
- } else {
- span_bug!(span, "can't codegen a virtual call on {:?}", op);
}
}
@@ -1161,39 +1254,35 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
llargs: &mut Vec<Bx::Value>,
arg: &ArgAbi<'tcx, Ty<'tcx>>,
) {
- // Fill padding with undef value, where applicable.
- if let Some(ty) = arg.pad {
- llargs.push(bx.const_undef(bx.reg_backend_type(&ty)))
- }
-
- if arg.is_ignore() {
- return;
- }
-
- if let PassMode::Pair(..) = arg.mode {
- match op.val {
+ match arg.mode {
+ PassMode::Ignore => return,
+ PassMode::Cast(_, true) => {
+ // Fill padding with undef value, where applicable.
+ llargs.push(bx.const_undef(bx.reg_backend_type(&Reg::i32())));
+ }
+ PassMode::Pair(..) => match op.val {
Pair(a, b) => {
llargs.push(a);
llargs.push(b);
return;
}
_ => bug!("codegen_argument: {:?} invalid for pair argument", op),
- }
- } else if arg.is_unsized_indirect() {
- match op.val {
+ },
+ PassMode::Indirect { attrs: _, extra_attrs: Some(_), on_stack: _ } => match op.val {
Ref(a, Some(b), _) => {
llargs.push(a);
llargs.push(b);
return;
}
_ => bug!("codegen_argument: {:?} invalid for unsized indirect argument", op),
- }
+ },
+ _ => {}
}
// Force by-ref if we have to load through a cast pointer.
let (mut llval, align, by_ref) = match op.val {
Immediate(_) | Pair(..) => match arg.mode {
- PassMode::Indirect { .. } | PassMode::Cast(_) => {
+ PassMode::Indirect { .. } | PassMode::Cast(..) => {
let scratch = PlaceRef::alloca(bx, arg.layout);
op.val.store(bx, scratch);
(scratch.llval, scratch.align, true)
@@ -1225,8 +1314,8 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
if by_ref && !arg.is_indirect() {
// Have to load the argument, maybe while casting it.
- if let PassMode::Cast(ty) = arg.mode {
- let llty = bx.cast_backend_type(&ty);
+ if let PassMode::Cast(ty, _) = &arg.mode {
+ let llty = bx.cast_backend_type(ty);
let addr = bx.pointercast(llval, bx.type_ptr_to(llty));
llval = bx.load(llty, addr, align.min(arg.layout.align.abi));
} else {
@@ -1377,20 +1466,20 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
// bar();
// }
Some(&mir::TerminatorKind::Abort) => {
- let cs_bb =
+ let cs_llbb =
Bx::append_block(self.cx, self.llfn, &format!("cs_funclet{:?}", bb));
- let cp_bb =
+ let cp_llbb =
Bx::append_block(self.cx, self.llfn, &format!("cp_funclet{:?}", bb));
- ret_llbb = cs_bb;
+ ret_llbb = cs_llbb;
- let mut cs_bx = Bx::build(self.cx, cs_bb);
- let cs = cs_bx.catch_switch(None, None, &[cp_bb]);
+ let mut cs_bx = Bx::build(self.cx, cs_llbb);
+ let cs = cs_bx.catch_switch(None, None, &[cp_llbb]);
// The "null" here is actually a RTTI type descriptor for the
// C++ personality function, but `catch (...)` has no type so
// it's null. The 64 here is actually a bitfield which
// represents that this is a catch-all block.
- let mut cp_bx = Bx::build(self.cx, cp_bb);
+ let mut cp_bx = Bx::build(self.cx, cp_llbb);
let null = cp_bx.const_null(
cp_bx.type_i8p_ext(cp_bx.cx().data_layout().instruction_address_space),
);
@@ -1399,10 +1488,10 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
cp_bx.br(llbb);
}
_ => {
- let cleanup_bb =
+ let cleanup_llbb =
Bx::append_block(self.cx, self.llfn, &format!("funclet_{:?}", bb));
- ret_llbb = cleanup_bb;
- let mut cleanup_bx = Bx::build(self.cx, cleanup_bb);
+ ret_llbb = cleanup_llbb;
+ let mut cleanup_bx = Bx::build(self.cx, cleanup_llbb);
funclet = cleanup_bx.cleanup_pad(None, &[]);
cleanup_bx.br(llbb);
}
@@ -1410,19 +1499,20 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
self.funclets[bb] = Some(funclet);
ret_llbb
} else {
- let bb = Bx::append_block(self.cx, self.llfn, "cleanup");
- let mut bx = Bx::build(self.cx, bb);
+ let cleanup_llbb = Bx::append_block(self.cx, self.llfn, "cleanup");
+ let mut cleanup_bx = Bx::build(self.cx, cleanup_llbb);
let llpersonality = self.cx.eh_personality();
let llretty = self.landing_pad_type();
- let lp = bx.cleanup_landing_pad(llretty, llpersonality);
+ let lp = cleanup_bx.cleanup_landing_pad(llretty, llpersonality);
- let slot = self.get_personality_slot(&mut bx);
- slot.storage_live(&mut bx);
- Pair(bx.extract_value(lp, 0), bx.extract_value(lp, 1)).store(&mut bx, slot);
+ let slot = self.get_personality_slot(&mut cleanup_bx);
+ slot.storage_live(&mut cleanup_bx);
+ Pair(cleanup_bx.extract_value(lp, 0), cleanup_bx.extract_value(lp, 1))
+ .store(&mut cleanup_bx, slot);
- bx.br(llbb);
- bx.llbb()
+ cleanup_bx.br(llbb);
+ cleanup_llbb
}
}
@@ -1456,8 +1546,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
let (fn_abi, fn_ptr) = common::build_langcall(&bx, None, LangItem::PanicNoUnwind);
let fn_ty = bx.fn_decl_backend_type(&fn_abi);
- let llret = bx.call(fn_ty, fn_ptr, &[], None);
- bx.apply_attrs_callsite(&fn_abi, llret);
+ let llret = bx.call(fn_ty, Some(&fn_abi), fn_ptr, &[], None);
bx.do_not_inline(llret);
bx.unreachable();
@@ -1625,7 +1714,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
}
DirectOperand(index) => {
// If there is a cast, we have to store and reload.
- let op = if let PassMode::Cast(_) = ret_abi.mode {
+ let op = if let PassMode::Cast(..) = ret_abi.mode {
let tmp = PlaceRef::alloca(bx, ret_abi.layout);
tmp.storage_live(bx);
bx.store_arg(&ret_abi, llval, tmp);
diff --git a/compiler/rustc_codegen_ssa/src/mir/constant.rs b/compiler/rustc_codegen_ssa/src/mir/constant.rs
index 9a995fbf6..4c6ab457c 100644
--- a/compiler/rustc_codegen_ssa/src/mir/constant.rs
+++ b/compiler/rustc_codegen_ssa/src/mir/constant.rs
@@ -25,26 +25,26 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
constant: &mir::Constant<'tcx>,
) -> Result<ConstValue<'tcx>, ErrorHandled> {
let ct = self.monomorphize(constant.literal);
- let ct = match ct {
- mir::ConstantKind::Ty(ct) => ct,
+ let uv = match ct {
+ mir::ConstantKind::Ty(ct) => match ct.kind() {
+ ty::ConstKind::Unevaluated(uv) => uv.expand(),
+ ty::ConstKind::Value(val) => {
+ return Ok(self.cx.tcx().valtree_to_const_val((ct.ty(), val)));
+ }
+ err => span_bug!(
+ constant.span,
+ "encountered bad ConstKind after monomorphizing: {:?}",
+ err
+ ),
+ },
+ mir::ConstantKind::Unevaluated(uv, _) => uv,
mir::ConstantKind::Val(val, _) => return Ok(val),
};
- match ct.kind() {
- ty::ConstKind::Unevaluated(ct) => self
- .cx
- .tcx()
- .const_eval_resolve(ty::ParamEnv::reveal_all(), ct, None)
- .map_err(|err| {
- self.cx.tcx().sess.span_err(constant.span, "erroneous constant encountered");
- err
- }),
- ty::ConstKind::Value(val) => Ok(self.cx.tcx().valtree_to_const_val((ct.ty(), val))),
- err => span_bug!(
- constant.span,
- "encountered bad ConstKind after monomorphizing: {:?}",
- err
- ),
- }
+
+ self.cx.tcx().const_eval_resolve(ty::ParamEnv::reveal_all(), uv, None).map_err(|err| {
+ self.cx.tcx().sess.span_err(constant.span, "erroneous constant encountered");
+ err
+ })
}
/// process constant containing SIMD shuffle indices
diff --git a/compiler/rustc_codegen_ssa/src/mir/debuginfo.rs b/compiler/rustc_codegen_ssa/src/mir/debuginfo.rs
index 8c3186efc..157c1c823 100644
--- a/compiler/rustc_codegen_ssa/src/mir/debuginfo.rs
+++ b/compiler/rustc_codegen_ssa/src/mir/debuginfo.rs
@@ -3,7 +3,7 @@ use rustc_index::vec::IndexVec;
use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags;
use rustc_middle::mir;
use rustc_middle::ty;
-use rustc_middle::ty::layout::LayoutOf;
+use rustc_middle::ty::layout::{HasTyCtxt, LayoutOf};
use rustc_session::config::DebugInfo;
use rustc_span::symbol::{kw, Symbol};
use rustc_span::{BytePos, Span};
@@ -93,15 +93,15 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
}
/// In order to have a good line stepping behavior in debugger, we overwrite debug
- /// locations of macro expansions with that of the outermost expansion site
- /// (unless the crate is being compiled with `-Z debug-macros`).
+ /// locations of macro expansions with that of the outermost expansion site (when the macro is
+ /// annotated with `#[collapse_debuginfo]` or when `-Zdebug-macros` is provided).
fn adjust_span_for_debugging(&self, mut span: Span) -> Span {
// Bail out if debug info emission is not enabled.
if self.debug_context.is_none() {
return span;
}
- if span.from_expansion() && !self.cx.sess().opts.unstable_opts.debug_macros {
+ if self.cx.tcx().should_collapse_debuginfo(span) {
// Walk up the macro expansion chain until we reach a non-expanded span.
// We also stop at the function body level because no line stepping can occur
// at the level above that.
diff --git a/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs b/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs
index 94ac71a4d..215edbe02 100644
--- a/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs
+++ b/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs
@@ -77,10 +77,6 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
let result = PlaceRef::new_sized(llresult, fn_abi.ret.layout);
let llval = match name {
- sym::assume => {
- bx.assume(args[0].immediate());
- return;
- }
sym::abort => {
bx.abort();
return;
@@ -555,14 +551,10 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
return;
}
- sym::ptr_guaranteed_eq | sym::ptr_guaranteed_ne => {
+ sym::ptr_guaranteed_cmp => {
let a = args[0].immediate();
let b = args[1].immediate();
- if name == sym::ptr_guaranteed_eq {
- bx.icmp(IntPredicate::IntEQ, a, b)
- } else {
- bx.icmp(IntPredicate::IntNE, a, b)
- }
+ bx.icmp(IntPredicate::IntEQ, a, b)
}
sym::ptr_offset_from | sym::ptr_offset_from_unsigned => {
@@ -597,8 +589,8 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
};
if !fn_abi.ret.is_ignore() {
- if let PassMode::Cast(ty) = fn_abi.ret.mode {
- let ptr_llty = bx.type_ptr_to(bx.cast_backend_type(&ty));
+ if let PassMode::Cast(ty, _) = &fn_abi.ret.mode {
+ let ptr_llty = bx.type_ptr_to(bx.cast_backend_type(ty));
let ptr = bx.pointercast(result.llval, ptr_llty);
bx.store(llval, ptr, result.align);
} else {
diff --git a/compiler/rustc_codegen_ssa/src/mir/mod.rs b/compiler/rustc_codegen_ssa/src/mir/mod.rs
index 8ee375fa9..da9aaf00e 100644
--- a/compiler/rustc_codegen_ssa/src/mir/mod.rs
+++ b/compiler/rustc_codegen_ssa/src/mir/mod.rs
@@ -148,15 +148,15 @@ pub fn codegen_mir<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
let debug_context = cx.create_function_debug_context(instance, &fn_abi, llfn, &mir);
let start_llbb = Bx::append_block(cx, llfn, "start");
- let mut bx = Bx::build(cx, start_llbb);
+ let mut start_bx = Bx::build(cx, start_llbb);
- if mir.basic_blocks().iter().any(|bb| bb.is_cleanup) {
- bx.set_personality_fn(cx.eh_personality());
+ if mir.basic_blocks.iter().any(|bb| bb.is_cleanup) {
+ start_bx.set_personality_fn(cx.eh_personality());
}
let cleanup_kinds = analyze::cleanup_kinds(&mir);
let cached_llbbs: IndexVec<mir::BasicBlock, Option<Bx::BasicBlock>> = mir
- .basic_blocks()
+ .basic_blocks
.indices()
.map(|bb| if bb == mir::START_BLOCK { Some(start_llbb) } else { None })
.collect();
@@ -172,15 +172,15 @@ pub fn codegen_mir<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
unreachable_block: None,
double_unwind_guard: None,
cleanup_kinds,
- landing_pads: IndexVec::from_elem(None, mir.basic_blocks()),
- funclets: IndexVec::from_fn_n(|_| None, mir.basic_blocks().len()),
+ landing_pads: IndexVec::from_elem(None, &mir.basic_blocks),
+ funclets: IndexVec::from_fn_n(|_| None, mir.basic_blocks.len()),
locals: IndexVec::new(),
debug_context,
per_local_var_debug_info: None,
caller_location: None,
};
- fx.per_local_var_debug_info = fx.compute_per_local_var_debug_info(&mut bx);
+ fx.per_local_var_debug_info = fx.compute_per_local_var_debug_info(&mut start_bx);
// Evaluate all required consts; codegen later assumes that CTFE will never fail.
let mut all_consts_ok = true;
@@ -191,7 +191,7 @@ pub fn codegen_mir<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
// errored or at least linted
ErrorHandled::Reported(_) | ErrorHandled::Linted => {}
ErrorHandled::TooGeneric => {
- span_bug!(const_.span, "codgen encountered polymorphic constant: {:?}", err)
+ span_bug!(const_.span, "codegen encountered polymorphic constant: {:?}", err)
}
}
}
@@ -206,29 +206,29 @@ pub fn codegen_mir<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
// Allocate variable and temp allocas
fx.locals = {
- let args = arg_local_refs(&mut bx, &mut fx, &memory_locals);
+ let args = arg_local_refs(&mut start_bx, &mut fx, &memory_locals);
let mut allocate_local = |local| {
let decl = &mir.local_decls[local];
- let layout = bx.layout_of(fx.monomorphize(decl.ty));
+ let layout = start_bx.layout_of(fx.monomorphize(decl.ty));
assert!(!layout.ty.has_erasable_regions());
if local == mir::RETURN_PLACE && fx.fn_abi.ret.is_indirect() {
debug!("alloc: {:?} (return place) -> place", local);
- let llretptr = bx.get_param(0);
+ let llretptr = start_bx.get_param(0);
return LocalRef::Place(PlaceRef::new_sized(llretptr, layout));
}
if memory_locals.contains(local) {
debug!("alloc: {:?} -> place", local);
if layout.is_unsized() {
- LocalRef::UnsizedPlace(PlaceRef::alloca_unsized_indirect(&mut bx, layout))
+ LocalRef::UnsizedPlace(PlaceRef::alloca_unsized_indirect(&mut start_bx, layout))
} else {
- LocalRef::Place(PlaceRef::alloca(&mut bx, layout))
+ LocalRef::Place(PlaceRef::alloca(&mut start_bx, layout))
}
} else {
debug!("alloc: {:?} -> operand", local);
- LocalRef::new_operand(&mut bx, layout)
+ LocalRef::new_operand(&mut start_bx, layout)
}
};
@@ -240,7 +240,7 @@ pub fn codegen_mir<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
};
// Apply debuginfo to the newly allocated locals.
- fx.debug_introduce_locals(&mut bx);
+ fx.debug_introduce_locals(&mut start_bx);
// Codegen the body of each block using reverse postorder
for (bb, _) in traversal::reverse_postorder(&mir) {
@@ -283,7 +283,7 @@ fn arg_local_refs<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
for i in 0..tupled_arg_tys.len() {
let arg = &fx.fn_abi.args[idx];
idx += 1;
- if arg.pad.is_some() {
+ if let PassMode::Cast(_, true) = arg.mode {
llarg_idx += 1;
}
let pr_field = place.project_field(bx, i);
@@ -309,7 +309,7 @@ fn arg_local_refs<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
let arg = &fx.fn_abi.args[idx];
idx += 1;
- if arg.pad.is_some() {
+ if let PassMode::Cast(_, true) = arg.mode {
llarg_idx += 1;
}
diff --git a/compiler/rustc_codegen_ssa/src/mir/operand.rs b/compiler/rustc_codegen_ssa/src/mir/operand.rs
index c612634fc..e6ba642a7 100644
--- a/compiler/rustc_codegen_ssa/src/mir/operand.rs
+++ b/compiler/rustc_codegen_ssa/src/mir/operand.rs
@@ -72,10 +72,6 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> {
) -> Self {
let layout = bx.layout_of(ty);
- if layout.is_zst() {
- return OperandRef::new_zst(bx, layout);
- }
-
let val = match val {
ConstValue::Scalar(x) => {
let Abi::Scalar(scalar) = layout.abi else {
@@ -84,10 +80,7 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> {
let llval = bx.scalar_to_backend(x, scalar, bx.immediate_backend_type(layout));
OperandValue::Immediate(llval)
}
- ConstValue::ZeroSized => {
- let llval = bx.zst_to_backend(bx.immediate_backend_type(layout));
- OperandValue::Immediate(llval)
- }
+ ConstValue::ZeroSized => return OperandRef::new_zst(bx, layout),
ConstValue::Slice { data, start, end } => {
let Abi::ScalarPair(a_scalar, _) = layout.abi else {
bug!("from_const: invalid ScalarPair layout: {:#?}", layout);
@@ -359,7 +352,7 @@ impl<'a, 'tcx, V: CodegenObject> OperandValue<V> {
// Allocate an appropriate region on the stack, and copy the value into it
let (llsize, _) = glue::size_and_align_of_dst(bx, unsized_ty, Some(llextra));
- let lldst = bx.array_alloca(bx.cx().type_i8(), llsize, max_align);
+ let lldst = bx.byte_array_alloca(llsize, max_align);
bx.memcpy(lldst, max_align, llptr, min_align, llsize, flags);
// Store the allocated region and the extra to the indirect place.
diff --git a/compiler/rustc_codegen_ssa/src/mir/place.rs b/compiler/rustc_codegen_ssa/src/mir/place.rs
index 268c4d765..9c18df564 100644
--- a/compiler/rustc_codegen_ssa/src/mir/place.rs
+++ b/compiler/rustc_codegen_ssa/src/mir/place.rs
@@ -4,7 +4,6 @@ use super::{FunctionCx, LocalRef};
use crate::common::IntPredicate;
use crate::glue;
use crate::traits::*;
-use crate::MemFlags;
use rustc_middle::mir;
use rustc_middle::mir::tcx::PlaceTy;
@@ -245,7 +244,7 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
};
bx.intcast(tag.immediate(), cast_to, signed)
}
- TagEncoding::Niche { dataful_variant, ref niche_variants, niche_start } => {
+ TagEncoding::Niche { untagged_variant, ref niche_variants, niche_start } => {
// Rebase from niche values to discriminants, and check
// whether the result is in range for the niche variants.
let niche_llty = bx.cx().immediate_backend_type(tag.layout);
@@ -303,7 +302,7 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
bx.select(
is_niche,
niche_discr,
- bx.cx().const_uint(cast_to, dataful_variant.as_u32() as u64),
+ bx.cx().const_uint(cast_to, untagged_variant.as_u32() as u64),
)
}
}
@@ -338,21 +337,11 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
}
Variants::Multiple {
tag_encoding:
- TagEncoding::Niche { dataful_variant, ref niche_variants, niche_start },
+ TagEncoding::Niche { untagged_variant, ref niche_variants, niche_start },
tag_field,
..
} => {
- if variant_index != dataful_variant {
- if bx.cx().sess().target.arch == "arm"
- || bx.cx().sess().target.arch == "aarch64"
- {
- // FIXME(#34427): as workaround for LLVM bug on ARM,
- // use memset of 0 before assigning niche value.
- let fill_byte = bx.cx().const_u8(0);
- let size = bx.cx().const_usize(self.layout.size.bytes());
- bx.memset(self.llval, fill_byte, size, self.align, MemFlags::empty());
- }
-
+ if variant_index != untagged_variant {
let niche = self.project_field(bx, tag_field);
let niche_llty = bx.cx().immediate_backend_type(niche.layout);
let niche_value = variant_index.as_u32() - niche_variants.start().as_u32();
@@ -411,6 +400,21 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
downcast
}
+ pub fn project_type<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
+ &self,
+ bx: &mut Bx,
+ ty: Ty<'tcx>,
+ ) -> Self {
+ let mut downcast = *self;
+ downcast.layout = bx.cx().layout_of(ty);
+
+ // Cast to the appropriate type.
+ let variant_ty = bx.cx().backend_type(downcast.layout);
+ downcast.llval = bx.pointercast(downcast.llval, bx.cx().type_ptr_to(variant_ty));
+
+ downcast
+ }
+
pub fn storage_live<Bx: BuilderMethods<'a, 'tcx, Value = V>>(&self, bx: &mut Bx) {
bx.lifetime_start(self.llval, self.layout.size);
}
@@ -453,6 +457,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
mir::ProjectionElem::Field(ref field, _) => {
cg_base.project_field(bx, field.index())
}
+ mir::ProjectionElem::OpaqueCast(ty) => cg_base.project_type(bx, ty),
mir::ProjectionElem::Index(index) => {
let index = &mir::Operand::Copy(mir::Place::from(index));
let index = self.codegen_operand(bx, index);
diff --git a/compiler/rustc_codegen_ssa/src/mir/rvalue.rs b/compiler/rustc_codegen_ssa/src/mir/rvalue.rs
index 26b9fbf44..4aab31fbf 100644
--- a/compiler/rustc_codegen_ssa/src/mir/rvalue.rs
+++ b/compiler/rustc_codegen_ssa/src/mir/rvalue.rs
@@ -87,7 +87,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
let size = bx.const_usize(dest.layout.size.bytes());
// Use llvm.memset.p0i8.* to initialize all zero arrays
- if bx.cx().const_to_opt_uint(v) == Some(0) {
+ if bx.cx().const_to_opt_u128(v, false) == Some(0) {
let fill = bx.cx().const_u8(0);
bx.memset(start, fill, size, dest.align, MemFlags::empty());
return bx;
@@ -249,7 +249,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
OperandValue::Pair(lldata, llextra)
}
mir::CastKind::Pointer(PointerCast::MutToConstPointer)
- | mir::CastKind::Misc
+ | mir::CastKind::PtrToPtr
if bx.cx().is_backend_scalar_pair(operand.layout) =>
{
if let OperandValue::Pair(data_ptr, meta) = operand.val {
@@ -271,10 +271,26 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
bug!("unexpected non-pair operand");
}
}
+ mir::CastKind::DynStar => {
+ let (lldata, llextra) = match operand.val {
+ OperandValue::Ref(_, _, _) => todo!(),
+ OperandValue::Immediate(v) => (v, None),
+ OperandValue::Pair(v, l) => (v, Some(l)),
+ };
+ let (lldata, llextra) =
+ base::cast_to_dyn_star(&mut bx, lldata, operand.layout, cast.ty, llextra);
+ OperandValue::Pair(lldata, llextra)
+ }
mir::CastKind::Pointer(
PointerCast::MutToConstPointer | PointerCast::ArrayToPointer,
)
- | mir::CastKind::Misc
+ | mir::CastKind::IntToInt
+ | mir::CastKind::FloatToInt
+ | mir::CastKind::FloatToFloat
+ | mir::CastKind::IntToFloat
+ | mir::CastKind::PtrToPtr
+ | mir::CastKind::FnPtrToPtr
+
// Since int2ptr can have arbitrary integer types as input (so we have to do
// sign extension and all that), it is currently best handled in the same code
// path as the other integer-to-X casts.
diff --git a/compiler/rustc_codegen_ssa/src/mir/statement.rs b/compiler/rustc_codegen_ssa/src/mir/statement.rs
index f452f2988..1db0fb3a6 100644
--- a/compiler/rustc_codegen_ssa/src/mir/statement.rs
+++ b/compiler/rustc_codegen_ssa/src/mir/statement.rs
@@ -1,4 +1,5 @@
use rustc_middle::mir;
+use rustc_middle::mir::NonDivergingIntrinsic;
use super::FunctionCx;
use super::LocalRef;
@@ -73,11 +74,14 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
self.codegen_coverage(&mut bx, coverage.clone(), statement.source_info.scope);
bx
}
- mir::StatementKind::CopyNonOverlapping(box mir::CopyNonOverlapping {
- ref src,
- ref dst,
- ref count,
- }) => {
+ mir::StatementKind::Intrinsic(box NonDivergingIntrinsic::Assume(ref op)) => {
+ let op_val = self.codegen_operand(&mut bx, op);
+ bx.assume(op_val.immediate());
+ bx
+ }
+ mir::StatementKind::Intrinsic(box NonDivergingIntrinsic::CopyNonOverlapping(
+ mir::CopyNonOverlapping { ref count, ref src, ref dst },
+ )) => {
let dst_val = self.codegen_operand(&mut bx, dst);
let src_val = self.codegen_operand(&mut bx, src);
let count = self.codegen_operand(&mut bx, count).immediate();