summaryrefslogtreecommitdiffstats
path: root/compiler/rustc_codegen_ssa/src/mir
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/rustc_codegen_ssa/src/mir')
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/analyze.rs33
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/block.rs289
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/debuginfo.rs7
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/intrinsic.rs25
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/mod.rs10
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/place.rs17
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/rvalue.rs41
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/statement.rs1
8 files changed, 195 insertions, 228 deletions
diff --git a/compiler/rustc_codegen_ssa/src/mir/analyze.rs b/compiler/rustc_codegen_ssa/src/mir/analyze.rs
index dd1ac2c74..95aad10fd 100644
--- a/compiler/rustc_codegen_ssa/src/mir/analyze.rs
+++ b/compiler/rustc_codegen_ssa/src/mir/analyze.rs
@@ -36,7 +36,7 @@ pub fn non_ssa_locals<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
// Arguments get assigned to by means of the function being called
for arg in mir.args_iter() {
- analyzer.assign(arg, mir::START_BLOCK.start_location());
+ analyzer.assign(arg, DefLocation::Argument);
}
// If there exists a local definition that dominates all uses of that local,
@@ -64,7 +64,22 @@ enum LocalKind {
/// A scalar or a scalar pair local that is neither defined nor used.
Unused,
/// A scalar or a scalar pair local with a single definition that dominates all uses.
- SSA(mir::Location),
+ SSA(DefLocation),
+}
+
+#[derive(Copy, Clone, PartialEq, Eq)]
+enum DefLocation {
+ Argument,
+ Body(Location),
+}
+
+impl DefLocation {
+ fn dominates(self, location: Location, dominators: &Dominators<mir::BasicBlock>) -> bool {
+ match self {
+ DefLocation::Argument => true,
+ DefLocation::Body(def) => def.successor_within_block().dominates(location, dominators),
+ }
+ }
}
struct LocalAnalyzer<'mir, 'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> {
@@ -74,17 +89,13 @@ struct LocalAnalyzer<'mir, 'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> {
}
impl<'mir, 'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> LocalAnalyzer<'mir, 'a, 'tcx, Bx> {
- fn assign(&mut self, local: mir::Local, location: Location) {
+ fn assign(&mut self, local: mir::Local, location: DefLocation) {
let kind = &mut self.locals[local];
match *kind {
LocalKind::ZST => {}
LocalKind::Memory => {}
- LocalKind::Unused => {
- *kind = LocalKind::SSA(location);
- }
- LocalKind::SSA(_) => {
- *kind = LocalKind::Memory;
- }
+ LocalKind::Unused => *kind = LocalKind::SSA(location),
+ LocalKind::SSA(_) => *kind = LocalKind::Memory,
}
}
@@ -166,7 +177,7 @@ impl<'mir, 'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> Visitor<'tcx>
debug!("visit_assign(place={:?}, rvalue={:?})", place, rvalue);
if let Some(local) = place.as_local() {
- self.assign(local, location);
+ self.assign(local, DefLocation::Body(location));
if self.locals[local] != LocalKind::Memory {
let decl_span = self.fx.mir.local_decls[local].source_info.span;
if !self.fx.rvalue_creates_operand(rvalue, decl_span) {
@@ -189,7 +200,7 @@ impl<'mir, 'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> Visitor<'tcx>
match context {
PlaceContext::MutatingUse(MutatingUseContext::Call)
| PlaceContext::MutatingUse(MutatingUseContext::Yield) => {
- self.assign(local, location);
+ self.assign(local, DefLocation::Body(location));
}
PlaceContext::NonUse(_) | PlaceContext::MutatingUse(MutatingUseContext::Retag) => {}
diff --git a/compiler/rustc_codegen_ssa/src/mir/block.rs b/compiler/rustc_codegen_ssa/src/mir/block.rs
index 978aff511..57a19a4ab 100644
--- a/compiler/rustc_codegen_ssa/src/mir/block.rs
+++ b/compiler/rustc_codegen_ssa/src/mir/block.rs
@@ -14,9 +14,9 @@ use rustc_ast::{InlineAsmOptions, InlineAsmTemplatePiece};
use rustc_hir::lang_items::LangItem;
use rustc_index::vec::Idx;
use rustc_middle::mir::{self, AssertKind, SwitchTargets};
-use rustc_middle::ty::layout::{HasTyCtxt, LayoutOf};
+use rustc_middle::ty::layout::{HasTyCtxt, LayoutOf, ValidityRequirement};
use rustc_middle::ty::print::{with_no_trimmed_paths, with_no_visible_paths};
-use rustc_middle::ty::{self, Instance, Ty, TypeVisitable};
+use rustc_middle::ty::{self, Instance, Ty, TypeVisitableExt};
use rustc_session::config::OptLevel;
use rustc_span::source_map::Span;
use rustc_span::{sym, Symbol};
@@ -39,7 +39,6 @@ enum MergingSucc {
struct TerminatorCodegenHelper<'tcx> {
bb: mir::BasicBlock,
terminator: &'tcx mir::Terminator<'tcx>,
- funclet_bb: Option<mir::BasicBlock>,
}
impl<'a, 'tcx> TerminatorCodegenHelper<'tcx> {
@@ -49,28 +48,24 @@ impl<'a, 'tcx> TerminatorCodegenHelper<'tcx> {
&self,
fx: &'b mut FunctionCx<'a, 'tcx, Bx>,
) -> Option<&'b Bx::Funclet> {
- let funclet_bb = self.funclet_bb?;
- if base::wants_msvc_seh(fx.cx.tcx().sess) {
- // If `landing_pad_for` hasn't been called yet to create the `Funclet`,
- // it has to be now. This may not seem necessary, as RPO should lead
- // to all the unwind edges being visited (and so to `landing_pad_for`
- // getting called for them), before building any of the blocks inside
- // the funclet itself - however, if MIR contains edges that end up not
- // being needed in the LLVM IR after monomorphization, the funclet may
- // be unreachable, and we don't have yet a way to skip building it in
- // such an eventuality (which may be a better solution than this).
- if fx.funclets[funclet_bb].is_none() {
- fx.landing_pad_for(funclet_bb);
- }
-
- Some(
- fx.funclets[funclet_bb]
- .as_ref()
- .expect("landing_pad_for didn't also create funclets entry"),
- )
- } else {
- None
+ let cleanup_kinds = (&fx.cleanup_kinds).as_ref()?;
+ let funclet_bb = cleanup_kinds[self.bb].funclet_bb(self.bb)?;
+ // If `landing_pad_for` hasn't been called yet to create the `Funclet`,
+ // it has to be now. This may not seem necessary, as RPO should lead
+ // to all the unwind edges being visited (and so to `landing_pad_for`
+ // getting called for them), before building any of the blocks inside
+ // the funclet itself - however, if MIR contains edges that end up not
+ // being needed in the LLVM IR after monomorphization, the funclet may
+ // be unreachable, and we don't have yet a way to skip building it in
+ // such an eventuality (which may be a better solution than this).
+ if fx.funclets[funclet_bb].is_none() {
+ fx.landing_pad_for(funclet_bb);
}
+ Some(
+ fx.funclets[funclet_bb]
+ .as_ref()
+ .expect("landing_pad_for didn't also create funclets entry"),
+ )
}
/// Get a basic block (creating it if necessary), possibly with cleanup
@@ -104,23 +99,24 @@ impl<'a, 'tcx> TerminatorCodegenHelper<'tcx> {
fx: &mut FunctionCx<'a, 'tcx, Bx>,
target: mir::BasicBlock,
) -> (bool, bool) {
- let target_funclet = fx.cleanup_kinds[target].funclet_bb(target);
- let (needs_landing_pad, is_cleanupret) = match (self.funclet_bb, target_funclet) {
- (None, None) => (false, false),
- (None, Some(_)) => (true, false),
- (Some(_), None) => {
- let span = self.terminator.source_info.span;
- span_bug!(span, "{:?} - jump out of cleanup?", self.terminator);
- }
- (Some(f), Some(t_f)) => {
- if f == t_f || !base::wants_msvc_seh(fx.cx.tcx().sess) {
- (false, false)
- } else {
- (true, true)
+ if let Some(ref cleanup_kinds) = fx.cleanup_kinds {
+ let funclet_bb = cleanup_kinds[self.bb].funclet_bb(self.bb);
+ let target_funclet = cleanup_kinds[target].funclet_bb(target);
+ let (needs_landing_pad, is_cleanupret) = match (funclet_bb, target_funclet) {
+ (None, None) => (false, false),
+ (None, Some(_)) => (true, false),
+ (Some(f), Some(t_f)) => (f != t_f, f != t_f),
+ (Some(_), None) => {
+ let span = self.terminator.source_info.span;
+ span_bug!(span, "{:?} - jump out of cleanup?", self.terminator);
}
- }
- };
- (needs_landing_pad, is_cleanupret)
+ };
+ (needs_landing_pad, is_cleanupret)
+ } else {
+ let needs_landing_pad = !fx.mir[self.bb].is_cleanup && fx.mir[target].is_cleanup;
+ let is_cleanupret = false;
+ (needs_landing_pad, is_cleanupret)
+ }
}
fn funclet_br<Bx: BuilderMethods<'a, 'tcx>>(
@@ -456,86 +452,84 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
args1 = [place.llval];
&args1[..]
};
- let (drop_fn, fn_abi) = match ty.kind() {
- // FIXME(eddyb) perhaps move some of this logic into
- // `Instance::resolve_drop_in_place`?
- ty::Dynamic(_, _, ty::Dyn) => {
- // IN THIS ARM, WE HAVE:
- // ty = *mut (dyn Trait)
- // which is: exists<T> ( *mut T, Vtable<T: Trait> )
- // args[0] args[1]
- //
- // args = ( Data, Vtable )
- // |
- // v
- // /-------\
- // | ... |
- // \-------/
- //
- let virtual_drop = Instance {
- def: ty::InstanceDef::Virtual(drop_fn.def_id(), 0),
- substs: drop_fn.substs,
- };
- debug!("ty = {:?}", ty);
- debug!("drop_fn = {:?}", drop_fn);
- debug!("args = {:?}", args);
- let fn_abi = bx.fn_abi_of_instance(virtual_drop, ty::List::empty());
- let vtable = args[1];
- // Truncate vtable off of args list
- args = &args[..1];
- (
- meth::VirtualIndex::from_index(ty::COMMON_VTABLE_ENTRIES_DROPINPLACE)
- .get_fn(bx, vtable, ty, &fn_abi),
- fn_abi,
- )
- }
- ty::Dynamic(_, _, ty::DynStar) => {
- // IN THIS ARM, WE HAVE:
- // ty = *mut (dyn* Trait)
- // which is: *mut exists<T: sizeof(T) == sizeof(usize)> (T, Vtable<T: Trait>)
- //
- // args = [ * ]
- // |
- // v
- // ( Data, Vtable )
- // |
- // v
- // /-------\
- // | ... |
- // \-------/
- //
- //
- // WE CAN CONVERT THIS INTO THE ABOVE LOGIC BY DOING
- //
- // data = &(*args[0]).0 // gives a pointer to Data above (really the same pointer)
- // vtable = (*args[0]).1 // loads the vtable out
- // (data, vtable) // an equivalent Rust `*mut dyn Trait`
- //
- // SO THEN WE CAN USE THE ABOVE CODE.
- let virtual_drop = Instance {
- def: ty::InstanceDef::Virtual(drop_fn.def_id(), 0),
- substs: drop_fn.substs,
- };
- debug!("ty = {:?}", ty);
- debug!("drop_fn = {:?}", drop_fn);
- debug!("args = {:?}", args);
- let fn_abi = bx.fn_abi_of_instance(virtual_drop, ty::List::empty());
- let data = args[0];
- let data_ty = bx.cx().backend_type(place.layout);
- let vtable_ptr =
- bx.gep(data_ty, data, &[bx.cx().const_i32(0), bx.cx().const_i32(1)]);
- let vtable = bx.load(bx.type_i8p(), vtable_ptr, abi::Align::ONE);
- // Truncate vtable off of args list
- args = &args[..1];
- debug!("args' = {:?}", args);
- (
- meth::VirtualIndex::from_index(ty::COMMON_VTABLE_ENTRIES_DROPINPLACE)
- .get_fn(bx, vtable, ty, &fn_abi),
- fn_abi,
- )
- }
- _ => (bx.get_fn_addr(drop_fn), bx.fn_abi_of_instance(drop_fn, ty::List::empty())),
- };
+ let (drop_fn, fn_abi) =
+ match ty.kind() {
+ // FIXME(eddyb) perhaps move some of this logic into
+ // `Instance::resolve_drop_in_place`?
+ ty::Dynamic(_, _, ty::Dyn) => {
+ // IN THIS ARM, WE HAVE:
+ // ty = *mut (dyn Trait)
+ // which is: exists<T> ( *mut T, Vtable<T: Trait> )
+ // args[0] args[1]
+ //
+ // args = ( Data, Vtable )
+ // |
+ // v
+ // /-------\
+ // | ... |
+ // \-------/
+ //
+ let virtual_drop = Instance {
+ def: ty::InstanceDef::Virtual(drop_fn.def_id(), 0),
+ substs: drop_fn.substs,
+ };
+ debug!("ty = {:?}", ty);
+ debug!("drop_fn = {:?}", drop_fn);
+ debug!("args = {:?}", args);
+ let fn_abi = bx.fn_abi_of_instance(virtual_drop, ty::List::empty());
+ let vtable = args[1];
+ // Truncate vtable off of args list
+ args = &args[..1];
+ (
+ meth::VirtualIndex::from_index(ty::COMMON_VTABLE_ENTRIES_DROPINPLACE)
+ .get_fn(bx, vtable, ty, &fn_abi),
+ fn_abi,
+ )
+ }
+ ty::Dynamic(_, _, ty::DynStar) => {
+ // IN THIS ARM, WE HAVE:
+ // ty = *mut (dyn* Trait)
+ // which is: *mut exists<T: sizeof(T) == sizeof(usize)> (T, Vtable<T: Trait>)
+ //
+ // args = [ * ]
+ // |
+ // v
+ // ( Data, Vtable )
+ // |
+ // v
+ // /-------\
+ // | ... |
+ // \-------/
+ //
+ //
+ // WE CAN CONVERT THIS INTO THE ABOVE LOGIC BY DOING
+ //
+ // data = &(*args[0]).0 // gives a pointer to Data above (really the same pointer)
+ // vtable = (*args[0]).1 // loads the vtable out
+ // (data, vtable) // an equivalent Rust `*mut dyn Trait`
+ //
+ // SO THEN WE CAN USE THE ABOVE CODE.
+ let virtual_drop = Instance {
+ def: ty::InstanceDef::Virtual(drop_fn.def_id(), 0),
+ substs: drop_fn.substs,
+ };
+ debug!("ty = {:?}", ty);
+ debug!("drop_fn = {:?}", drop_fn);
+ debug!("args = {:?}", args);
+ let fn_abi = bx.fn_abi_of_instance(virtual_drop, ty::List::empty());
+ let meta_ptr = place.project_field(bx, 1);
+ let meta = bx.load_operand(meta_ptr);
+ // Truncate vtable off of args list
+ args = &args[..1];
+ debug!("args' = {:?}", args);
+ (
+ meth::VirtualIndex::from_index(ty::COMMON_VTABLE_ENTRIES_DROPINPLACE)
+ .get_fn(bx, meta.immediate(), ty, &fn_abi),
+ fn_abi,
+ )
+ }
+ _ => (bx.get_fn_addr(drop_fn), bx.fn_abi_of_instance(drop_fn, ty::List::empty())),
+ };
helper.do_call(
self,
bx,
@@ -569,11 +563,13 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
// with #[rustc_inherit_overflow_checks] and inlined from
// another crate (mostly core::num generic/#[inline] fns),
// while the current crate doesn't use overflow checks.
- // NOTE: Unlike binops, negation doesn't have its own
- // checked operation, just a comparison with the minimum
- // value, so we have to check for the assert message.
- if !bx.check_overflow() {
- if let AssertKind::OverflowNeg(_) = *msg {
+ if !bx.cx().check_overflow() {
+ let overflow_not_to_check = match msg {
+ AssertKind::OverflowNeg(..) => true,
+ AssertKind::Overflow(op, ..) => op.is_checkable(),
+ _ => false,
+ };
+ if overflow_not_to_check {
const_cond = Some(expected);
}
}
@@ -659,35 +655,24 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
// Emit a panic or a no-op for `assert_*` intrinsics.
// These are intrinsics that compile to panics so that we can get a message
// which mentions the offending type, even from a const context.
- #[derive(Debug, PartialEq)]
- enum AssertIntrinsic {
- Inhabited,
- ZeroValid,
- MemUninitializedValid,
- }
- let panic_intrinsic = intrinsic.and_then(|i| match i {
- sym::assert_inhabited => Some(AssertIntrinsic::Inhabited),
- sym::assert_zero_valid => Some(AssertIntrinsic::ZeroValid),
- sym::assert_mem_uninitialized_valid => Some(AssertIntrinsic::MemUninitializedValid),
- _ => None,
- });
- if let Some(intrinsic) = panic_intrinsic {
- use AssertIntrinsic::*;
-
+ let panic_intrinsic = intrinsic.and_then(|s| ValidityRequirement::from_intrinsic(s));
+ if let Some(requirement) = panic_intrinsic {
let ty = instance.unwrap().substs.type_at(0);
+
+ let do_panic = !bx
+ .tcx()
+ .check_validity_requirement((requirement, bx.param_env().and(ty)))
+ .expect("expect to have layout during codegen");
+
let layout = bx.layout_of(ty);
- let do_panic = match intrinsic {
- Inhabited => layout.abi.is_uninhabited(),
- ZeroValid => !bx.tcx().permits_zero_init(layout),
- MemUninitializedValid => !bx.tcx().permits_uninit_init(layout),
- };
+
Some(if do_panic {
let msg_str = with_no_visible_paths!({
with_no_trimmed_paths!({
if layout.abi.is_uninhabited() {
// Use this error even for the other intrinsics as it is more precise.
format!("attempted to instantiate uninhabited type `{}`", ty)
- } else if intrinsic == ZeroValid {
+ } else if requirement == ValidityRequirement::Zero {
format!("attempted to zero-initialize type `{}`, which is invalid", ty)
} else {
format!(
@@ -781,7 +766,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
};
let extra_args = &args[sig.inputs().skip_binder().len()..];
- let extra_args = bx.tcx().mk_type_list(extra_args.iter().map(|op_arg| {
+ let extra_args = bx.tcx().mk_type_list_from_iter(extra_args.iter().map(|op_arg| {
let op_ty = op_arg.ty(self.mir, bx.tcx());
self.monomorphize(op_ty)
}));
@@ -1253,9 +1238,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
) -> MergingSucc {
debug!("codegen_terminator: {:?}", terminator);
- // Create the cleanup bundle, if needed.
- let funclet_bb = self.cleanup_kinds[bb].funclet_bb(bb);
- let helper = TerminatorCodegenHelper { bb, terminator, funclet_bb };
+ let helper = TerminatorCodegenHelper { bb, terminator };
let mergeable_succ = || {
// Note: any call to `switch_to_block` will invalidate a `true` value
@@ -1547,7 +1530,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
slot
} else {
let layout = cx.layout_of(
- cx.tcx().intern_tup(&[cx.tcx().mk_mut_ptr(cx.tcx().types.u8), cx.tcx().types.i32]),
+ cx.tcx().mk_tup(&[cx.tcx().mk_mut_ptr(cx.tcx().types.u8), cx.tcx().types.i32]),
);
let slot = PlaceRef::alloca(bx, layout);
self.personality_slot = Some(slot);
@@ -1801,8 +1784,8 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
match (src.layout.abi, dst.layout.abi) {
(abi::Abi::Scalar(src_scalar), abi::Abi::Scalar(dst_scalar)) => {
// HACK(eddyb) LLVM doesn't like `bitcast`s between pointers and non-pointers.
- let src_is_ptr = src_scalar.primitive() == abi::Pointer;
- let dst_is_ptr = dst_scalar.primitive() == abi::Pointer;
+ let src_is_ptr = matches!(src_scalar.primitive(), abi::Pointer(_));
+ let dst_is_ptr = matches!(dst_scalar.primitive(), abi::Pointer(_));
if src_is_ptr == dst_is_ptr {
assert_eq!(src.layout.size, dst.layout.size);
diff --git a/compiler/rustc_codegen_ssa/src/mir/debuginfo.rs b/compiler/rustc_codegen_ssa/src/mir/debuginfo.rs
index e9bc40c33..708f3bc0c 100644
--- a/compiler/rustc_codegen_ssa/src/mir/debuginfo.rs
+++ b/compiler/rustc_codegen_ssa/src/mir/debuginfo.rs
@@ -385,10 +385,9 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
calculate_debuginfo_offset(bx, local, &var, base);
// Create a variable which will be a pointer to the actual value
- let ptr_ty = bx.tcx().mk_ty(ty::RawPtr(ty::TypeAndMut {
- mutbl: mir::Mutability::Mut,
- ty: place.layout.ty,
- }));
+ let ptr_ty = bx
+ .tcx()
+ .mk_ptr(ty::TypeAndMut { mutbl: mir::Mutability::Mut, ty: place.layout.ty });
let ptr_layout = bx.layout_of(ptr_ty);
let alloca = PlaceRef::alloca(bx, ptr_layout);
bx.set_var_name(alloca.llval, &(var.name.to_string() + ".dbg.spill"));
diff --git a/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs b/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs
index 766dc74cb..7af7fc92d 100644
--- a/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs
+++ b/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs
@@ -218,9 +218,6 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
args[1].val.unaligned_volatile_store(bx, dst);
return;
}
- sym::add_with_overflow
- | sym::sub_with_overflow
- | sym::mul_with_overflow
| sym::unchecked_div
| sym::unchecked_rem
| sym::unchecked_shl
@@ -232,28 +229,6 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
let ty = arg_tys[0];
match int_type_width_signed(ty, bx.tcx()) {
Some((_width, signed)) => match name {
- sym::add_with_overflow
- | sym::sub_with_overflow
- | sym::mul_with_overflow => {
- let op = match name {
- sym::add_with_overflow => OverflowOp::Add,
- sym::sub_with_overflow => OverflowOp::Sub,
- sym::mul_with_overflow => OverflowOp::Mul,
- _ => bug!(),
- };
- let (val, overflow) =
- bx.checked_binop(op, ty, args[0].immediate(), args[1].immediate());
- // Convert `i1` to a `bool`, and write it to the out parameter
- let val = bx.from_immediate(val);
- let overflow = bx.from_immediate(overflow);
-
- let dest = result.project_field(bx, 0);
- bx.store(val, dest.llval, dest.align);
- let dest = result.project_field(bx, 1);
- bx.store(overflow, dest.llval, dest.align);
-
- return;
- }
sym::exact_div => {
if signed {
bx.exactsdiv(args[0].immediate(), args[1].immediate())
diff --git a/compiler/rustc_codegen_ssa/src/mir/mod.rs b/compiler/rustc_codegen_ssa/src/mir/mod.rs
index 79c66a955..2ec9fdbf4 100644
--- a/compiler/rustc_codegen_ssa/src/mir/mod.rs
+++ b/compiler/rustc_codegen_ssa/src/mir/mod.rs
@@ -1,8 +1,9 @@
+use crate::base;
use crate::traits::*;
use rustc_middle::mir;
use rustc_middle::mir::interpret::ErrorHandled;
use rustc_middle::ty::layout::{FnAbiOf, HasTyCtxt, TyAndLayout};
-use rustc_middle::ty::{self, Instance, Ty, TypeFoldable, TypeVisitable};
+use rustc_middle::ty::{self, Instance, Ty, TyCtxt, TypeFoldable, TypeVisitableExt};
use rustc_target::abi::call::{FnAbi, PassMode};
use std::iter;
@@ -58,7 +59,7 @@ pub struct FunctionCx<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> {
cached_llbbs: IndexVec<mir::BasicBlock, CachedLlbb<Bx::BasicBlock>>,
/// The funclet status of each basic block
- cleanup_kinds: IndexVec<mir::BasicBlock, analyze::CleanupKind>,
+ cleanup_kinds: Option<IndexVec<mir::BasicBlock, analyze::CleanupKind>>,
/// When targeting MSVC, this stores the cleanup info for each funclet BB.
/// This is initialized at the same time as the `landing_pads` entry for the
@@ -104,7 +105,7 @@ pub struct FunctionCx<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> {
impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
pub fn monomorphize<T>(&self, value: T) -> T
where
- T: Copy + TypeFoldable<'tcx>,
+ T: Copy + TypeFoldable<TyCtxt<'tcx>>,
{
debug!("monomorphize: self.instance={:?}", self.instance);
self.instance.subst_mir_and_normalize_erasing_regions(
@@ -166,7 +167,8 @@ pub fn codegen_mir<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
start_bx.set_personality_fn(cx.eh_personality());
}
- let cleanup_kinds = analyze::cleanup_kinds(&mir);
+ let cleanup_kinds = base::wants_msvc_seh(cx.tcx().sess).then(|| analyze::cleanup_kinds(&mir));
+
let cached_llbbs: IndexVec<mir::BasicBlock, CachedLlbb<Bx::BasicBlock>> =
mir.basic_blocks
.indices()
diff --git a/compiler/rustc_codegen_ssa/src/mir/place.rs b/compiler/rustc_codegen_ssa/src/mir/place.rs
index fbe30154a..cf02f59f6 100644
--- a/compiler/rustc_codegen_ssa/src/mir/place.rs
+++ b/compiler/rustc_codegen_ssa/src/mir/place.rs
@@ -9,7 +9,7 @@ use rustc_middle::mir;
use rustc_middle::mir::tcx::PlaceTy;
use rustc_middle::ty::layout::{HasTyCtxt, LayoutOf, TyAndLayout};
use rustc_middle::ty::{self, Ty};
-use rustc_target::abi::{Abi, Align, FieldsShape, Int, TagEncoding};
+use rustc_target::abi::{Abi, Align, FieldsShape, Int, Pointer, TagEncoding};
use rustc_target::abi::{VariantIdx, Variants};
#[derive(Copy, Clone, Debug)]
@@ -209,6 +209,7 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
bx: &mut Bx,
cast_to: Ty<'tcx>,
) -> V {
+ let dl = &bx.tcx().data_layout;
let cast_to_layout = bx.cx().layout_of(cast_to);
let cast_to_size = cast_to_layout.layout.size();
let cast_to = bx.cx().immediate_backend_type(cast_to_layout);
@@ -250,12 +251,14 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
TagEncoding::Niche { untagged_variant, ref niche_variants, niche_start } => {
// Cast to an integer so we don't have to treat a pointer as a
// special case.
- let (tag, tag_llty) = if tag_scalar.primitive().is_ptr() {
- let t = bx.type_isize();
- let tag = bx.ptrtoint(tag_imm, t);
- (tag, t)
- } else {
- (tag_imm, bx.cx().immediate_backend_type(tag_op.layout))
+ let (tag, tag_llty) = match tag_scalar.primitive() {
+ // FIXME(erikdesjardins): handle non-default addrspace ptr sizes
+ Pointer(_) => {
+ let t = bx.type_from_integer(dl.ptr_sized_integer());
+ let tag = bx.ptrtoint(tag_imm, t);
+ (tag, t)
+ }
+ _ => (tag_imm, bx.cx().immediate_backend_type(tag_op.layout)),
};
let tag_size = tag_scalar.size(bx.cx());
diff --git a/compiler/rustc_codegen_ssa/src/mir/rvalue.rs b/compiler/rustc_codegen_ssa/src/mir/rvalue.rs
index 23196c8cb..3d856986f 100644
--- a/compiler/rustc_codegen_ssa/src/mir/rvalue.rs
+++ b/compiler/rustc_codegen_ssa/src/mir/rvalue.rs
@@ -13,6 +13,7 @@ use rustc_middle::ty::cast::{CastTy, IntTy};
use rustc_middle::ty::layout::{HasTyCtxt, LayoutOf};
use rustc_middle::ty::{self, adjustment::PointerCast, Instance, Ty, TyCtxt};
use rustc_span::source_map::{Span, DUMMY_SP};
+use rustc_target::abi::VariantIdx;
impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
#[instrument(level = "trace", skip(self, bx))]
@@ -99,24 +100,24 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
}
}
- let count =
- self.monomorphize(count).eval_usize(bx.cx().tcx(), ty::ParamEnv::reveal_all());
+ let count = self
+ .monomorphize(count)
+ .eval_target_usize(bx.cx().tcx(), ty::ParamEnv::reveal_all());
bx.write_operand_repeatedly(cg_elem, count, dest);
}
mir::Rvalue::Aggregate(ref kind, ref operands) => {
- let (dest, active_field_index) = match **kind {
- mir::AggregateKind::Adt(adt_did, variant_index, _, _, active_field_index) => {
- dest.codegen_set_discr(bx, variant_index);
- if bx.tcx().adt_def(adt_did).is_enum() {
- (dest.project_downcast(bx, variant_index), active_field_index)
- } else {
- (dest, active_field_index)
- }
+ let (variant_index, variant_dest, active_field_index) = match **kind {
+ mir::AggregateKind::Adt(_, variant_index, _, _, active_field_index) => {
+ let variant_dest = dest.project_downcast(bx, variant_index);
+ (variant_index, variant_dest, active_field_index)
}
- _ => (dest, None),
+ _ => (VariantIdx::from_u32(0), dest, None),
};
+ if active_field_index.is_some() {
+ assert_eq!(operands.len(), 1);
+ }
for (i, operand) in operands.iter().enumerate() {
let op = self.codegen_operand(bx, operand);
// Do not generate stores and GEPis for zero-sized fields.
@@ -124,13 +125,14 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
let field_index = active_field_index.unwrap_or(i);
let field = if let mir::AggregateKind::Array(_) = **kind {
let llindex = bx.cx().const_usize(field_index as u64);
- dest.project_index(bx, llindex)
+ variant_dest.project_index(bx, llindex)
} else {
- dest.project_field(bx, field_index)
+ variant_dest.project_field(bx, field_index)
};
op.val.store(bx, field);
}
}
+ dest.codegen_set_discr(bx, variant_index);
}
_ => {
@@ -411,7 +413,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
lhs.layout.ty,
);
let val_ty = op.ty(bx.tcx(), lhs.layout.ty, rhs.layout.ty);
- let operand_ty = bx.tcx().intern_tup(&[val_ty, bx.tcx().types.bool]);
+ let operand_ty = bx.tcx().mk_tup(&[val_ty, bx.tcx().types.bool]);
OperandRef { val: result, layout: bx.cx().layout_of(operand_ty) }
}
@@ -491,7 +493,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
if let Some(index) = place.as_local() {
if let LocalRef::Operand(Some(op)) = self.locals[index] {
if let ty::Array(_, n) = op.layout.ty.kind() {
- let n = n.eval_usize(bx.cx().tcx(), ty::ParamEnv::reveal_all());
+ let n = n.eval_target_usize(bx.cx().tcx(), ty::ParamEnv::reveal_all());
return bx.cx().const_usize(n);
}
}
@@ -650,15 +652,6 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
rhs: Bx::Value,
input_ty: Ty<'tcx>,
) -> OperandValue<Bx::Value> {
- // This case can currently arise only from functions marked
- // with #[rustc_inherit_overflow_checks] and inlined from
- // another crate (mostly core::num generic/#[inline] fns),
- // while the current crate doesn't use overflow checks.
- if !bx.cx().check_overflow() {
- let val = self.codegen_scalar_binop(bx, op, lhs, rhs, input_ty);
- return OperandValue::Pair(val, bx.cx().const_bool(false));
- }
-
let (val, of) = match op {
// These are checked using intrinsics
mir::BinOp::Add | mir::BinOp::Sub | mir::BinOp::Mul => {
diff --git a/compiler/rustc_codegen_ssa/src/mir/statement.rs b/compiler/rustc_codegen_ssa/src/mir/statement.rs
index 19452c8cd..60fbceb34 100644
--- a/compiler/rustc_codegen_ssa/src/mir/statement.rs
+++ b/compiler/rustc_codegen_ssa/src/mir/statement.rs
@@ -91,6 +91,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
mir::StatementKind::FakeRead(..)
| mir::StatementKind::Retag { .. }
| mir::StatementKind::AscribeUserType(..)
+ | mir::StatementKind::ConstEvalCounter
| mir::StatementKind::Nop => {}
}
}