summaryrefslogtreecommitdiffstats
path: root/compiler/rustc_codegen_ssa/src/mir
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-17 12:18:25 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-17 12:18:25 +0000
commit5363f350887b1e5b5dd21a86f88c8af9d7fea6da (patch)
tree35ca005eb6e0e9a1ba3bb5dbc033209ad445dc17 /compiler/rustc_codegen_ssa/src/mir
parentAdding debian version 1.66.0+dfsg1-1. (diff)
downloadrustc-5363f350887b1e5b5dd21a86f88c8af9d7fea6da.tar.xz
rustc-5363f350887b1e5b5dd21a86f88c8af9d7fea6da.zip
Merging upstream version 1.67.1+dfsg1.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'compiler/rustc_codegen_ssa/src/mir')
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/block.rs551
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/constant.rs9
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/debuginfo.rs71
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/mod.rs28
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/operand.rs4
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/place.rs211
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/rvalue.rs173
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/statement.rs47
8 files changed, 691 insertions, 403 deletions
diff --git a/compiler/rustc_codegen_ssa/src/mir/block.rs b/compiler/rustc_codegen_ssa/src/mir/block.rs
index 29b7c9b0a..03d833fbb 100644
--- a/compiler/rustc_codegen_ssa/src/mir/block.rs
+++ b/compiler/rustc_codegen_ssa/src/mir/block.rs
@@ -1,7 +1,7 @@
use super::operand::OperandRef;
use super::operand::OperandValue::{Immediate, Pair, Ref};
use super::place::PlaceRef;
-use super::{FunctionCx, LocalRef};
+use super::{CachedLlbb, FunctionCx, LocalRef};
use crate::base;
use crate::common::{self, IntPredicate};
@@ -17,6 +17,7 @@ use rustc_middle::mir::{self, AssertKind, SwitchTargets};
use rustc_middle::ty::layout::{HasTyCtxt, LayoutOf};
use rustc_middle::ty::print::{with_no_trimmed_paths, with_no_visible_paths};
use rustc_middle::ty::{self, Instance, Ty, TypeVisitable};
+use rustc_session::config::OptLevel;
use rustc_span::source_map::Span;
use rustc_span::{sym, Symbol};
use rustc_symbol_mangling::typeid::typeid_for_fnabi;
@@ -24,6 +25,15 @@ use rustc_target::abi::call::{ArgAbi, FnAbi, PassMode, Reg};
use rustc_target::abi::{self, HasDataLayout, WrappingRange};
use rustc_target::spec::abi::Abi;
+// Indicates if we are in the middle of merging a BB's successor into it. This
+// can happen when BB jumps directly to its successor and the successor has no
+// other predecessors.
+#[derive(Debug, PartialEq)]
+enum MergingSucc {
+ False,
+ True,
+}
+
/// Used by `FunctionCx::codegen_terminator` for emitting common patterns
/// e.g., creating a basic block, calling a function, etc.
struct TerminatorCodegenHelper<'tcx> {
@@ -63,31 +73,6 @@ impl<'a, 'tcx> TerminatorCodegenHelper<'tcx> {
}
}
- /// Get a basic block (creating it if necessary), possibly with a landing
- /// pad next to it.
- fn llbb_with_landing_pad<Bx: BuilderMethods<'a, 'tcx>>(
- &self,
- fx: &mut FunctionCx<'a, 'tcx, Bx>,
- target: mir::BasicBlock,
- ) -> (Bx::BasicBlock, bool) {
- let span = self.terminator.source_info.span;
- let lltarget = fx.llbb(target);
- let target_funclet = fx.cleanup_kinds[target].funclet_bb(target);
- match (self.funclet_bb, target_funclet) {
- (None, None) => (lltarget, false),
- // jump *into* cleanup - need a landing pad if GNU, cleanup pad if MSVC
- (None, Some(_)) => (fx.landing_pad_for(target), false),
- (Some(_), None) => span_bug!(span, "{:?} - jump out of cleanup?", self.terminator),
- (Some(f), Some(t_f)) => {
- if f == t_f || !base::wants_msvc_seh(fx.cx.tcx().sess) {
- (lltarget, false)
- } else {
- (fx.landing_pad_for(target), true)
- }
- }
- }
- }
-
/// Get a basic block (creating it if necessary), possibly with cleanup
/// stuff in it or next to it.
fn llbb_with_cleanup<Bx: BuilderMethods<'a, 'tcx>>(
@@ -95,7 +80,11 @@ impl<'a, 'tcx> TerminatorCodegenHelper<'tcx> {
fx: &mut FunctionCx<'a, 'tcx, Bx>,
target: mir::BasicBlock,
) -> Bx::BasicBlock {
- let (lltarget, is_cleanupret) = self.llbb_with_landing_pad(fx, target);
+ let (needs_landing_pad, is_cleanupret) = self.llbb_characteristics(fx, target);
+ let mut lltarget = fx.llbb(target);
+ if needs_landing_pad {
+ lltarget = fx.landing_pad_for(target);
+ }
if is_cleanupret {
// MSVC cross-funclet jump - need a trampoline
debug_assert!(base::wants_msvc_seh(fx.cx.tcx().sess));
@@ -110,20 +99,54 @@ impl<'a, 'tcx> TerminatorCodegenHelper<'tcx> {
}
}
+ fn llbb_characteristics<Bx: BuilderMethods<'a, 'tcx>>(
+ &self,
+ fx: &mut FunctionCx<'a, 'tcx, Bx>,
+ target: mir::BasicBlock,
+ ) -> (bool, bool) {
+ let target_funclet = fx.cleanup_kinds[target].funclet_bb(target);
+ let (needs_landing_pad, is_cleanupret) = match (self.funclet_bb, target_funclet) {
+ (None, None) => (false, false),
+ (None, Some(_)) => (true, false),
+ (Some(_), None) => {
+ let span = self.terminator.source_info.span;
+ span_bug!(span, "{:?} - jump out of cleanup?", self.terminator);
+ }
+ (Some(f), Some(t_f)) => {
+ if f == t_f || !base::wants_msvc_seh(fx.cx.tcx().sess) {
+ (false, false)
+ } else {
+ (true, true)
+ }
+ }
+ };
+ (needs_landing_pad, is_cleanupret)
+ }
+
fn funclet_br<Bx: BuilderMethods<'a, 'tcx>>(
&self,
fx: &mut FunctionCx<'a, 'tcx, Bx>,
bx: &mut Bx,
target: mir::BasicBlock,
- ) {
- let (lltarget, is_cleanupret) = self.llbb_with_landing_pad(fx, target);
- if is_cleanupret {
- // MSVC micro-optimization: generate a `ret` rather than a jump
- // to a trampoline.
- debug_assert!(base::wants_msvc_seh(fx.cx.tcx().sess));
- bx.cleanup_ret(self.funclet(fx).unwrap(), Some(lltarget));
+ mergeable_succ: bool,
+ ) -> MergingSucc {
+ let (needs_landing_pad, is_cleanupret) = self.llbb_characteristics(fx, target);
+ if mergeable_succ && !needs_landing_pad && !is_cleanupret {
+ // We can merge the successor into this bb, so no need for a `br`.
+ MergingSucc::True
} else {
- bx.br(lltarget);
+ let mut lltarget = fx.llbb(target);
+ if needs_landing_pad {
+ lltarget = fx.landing_pad_for(target);
+ }
+ if is_cleanupret {
+ // micro-optimization: generate a `ret` rather than a jump
+ // to a trampoline.
+ bx.cleanup_ret(self.funclet(fx).unwrap(), Some(lltarget));
+ } else {
+ bx.br(lltarget);
+ }
+ MergingSucc::False
}
}
@@ -139,7 +162,8 @@ impl<'a, 'tcx> TerminatorCodegenHelper<'tcx> {
destination: Option<(ReturnDest<'tcx, Bx::Value>, mir::BasicBlock)>,
cleanup: Option<mir::BasicBlock>,
copied_constant_arguments: &[PlaceRef<'tcx, <Bx as BackendTypes>::Value>],
- ) {
+ mergeable_succ: bool,
+ ) -> MergingSucc {
// If there is a cleanup block and the function we're calling can unwind, then
// do an invoke, otherwise do a call.
let fn_ty = bx.fn_decl_backend_type(&fn_abi);
@@ -190,6 +214,7 @@ impl<'a, 'tcx> TerminatorCodegenHelper<'tcx> {
}
fx.store_return(bx, ret_dest, &fn_abi.ret, invokeret);
}
+ MergingSucc::False
} else {
let llret = bx.call(fn_ty, Some(&fn_abi), fn_ptr, &llargs, self.funclet(fx));
if fx.mir[self.bb].is_cleanup {
@@ -205,9 +230,10 @@ impl<'a, 'tcx> TerminatorCodegenHelper<'tcx> {
bx.lifetime_end(tmp.llval, tmp.layout.size);
}
fx.store_return(bx, ret_dest, &fn_abi.ret, llret);
- self.funclet_br(fx, bx, target);
+ self.funclet_br(fx, bx, target, mergeable_succ)
} else {
bx.unreachable();
+ MergingSucc::False
}
}
}
@@ -224,7 +250,8 @@ impl<'a, 'tcx> TerminatorCodegenHelper<'tcx> {
destination: Option<mir::BasicBlock>,
cleanup: Option<mir::BasicBlock>,
instance: Instance<'_>,
- ) {
+ mergeable_succ: bool,
+ ) -> MergingSucc {
if let Some(cleanup) = cleanup {
let ret_llbb = if let Some(target) = destination {
fx.llbb(target)
@@ -240,13 +267,15 @@ impl<'a, 'tcx> TerminatorCodegenHelper<'tcx> {
instance,
Some((ret_llbb, self.llbb_with_cleanup(fx, cleanup), self.funclet(fx))),
);
+ MergingSucc::False
} else {
bx.codegen_inline_asm(template, &operands, options, line_spans, instance, None);
if let Some(target) = destination {
- self.funclet_br(fx, bx, target);
+ self.funclet_br(fx, bx, target, mergeable_succ)
} else {
bx.unreachable();
+ MergingSucc::False
}
}
}
@@ -255,16 +284,16 @@ impl<'a, 'tcx> TerminatorCodegenHelper<'tcx> {
/// Codegen implementations for some terminator variants.
impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
/// Generates code for a `Resume` terminator.
- fn codegen_resume_terminator(&mut self, helper: TerminatorCodegenHelper<'tcx>, mut bx: Bx) {
+ fn codegen_resume_terminator(&mut self, helper: TerminatorCodegenHelper<'tcx>, bx: &mut Bx) {
if let Some(funclet) = helper.funclet(self) {
bx.cleanup_ret(funclet, None);
} else {
- let slot = self.get_personality_slot(&mut bx);
- let lp0 = slot.project_field(&mut bx, 0);
+ let slot = self.get_personality_slot(bx);
+ let lp0 = slot.project_field(bx, 0);
let lp0 = bx.load_operand(lp0).immediate();
- let lp1 = slot.project_field(&mut bx, 1);
+ let lp1 = slot.project_field(bx, 1);
let lp1 = bx.load_operand(lp1).immediate();
- slot.storage_dead(&mut bx);
+ slot.storage_dead(bx);
let mut lp = bx.const_undef(self.landing_pad_type());
lp = bx.insert_value(lp, lp0, 0);
@@ -276,22 +305,23 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
fn codegen_switchint_terminator(
&mut self,
helper: TerminatorCodegenHelper<'tcx>,
- mut bx: Bx,
+ bx: &mut Bx,
discr: &mir::Operand<'tcx>,
switch_ty: Ty<'tcx>,
targets: &SwitchTargets,
) {
- let discr = self.codegen_operand(&mut bx, &discr);
+ let discr = self.codegen_operand(bx, &discr);
// `switch_ty` is redundant, sanity-check that.
assert_eq!(discr.layout.ty, switch_ty);
let mut target_iter = targets.iter();
if target_iter.len() == 1 {
- // If there are two targets (one conditional, one fallback), emit br instead of switch
+ // If there are two targets (one conditional, one fallback), emit `br` instead of
+ // `switch`.
let (test_value, target) = target_iter.next().unwrap();
let lltrue = helper.llbb_with_cleanup(self, target);
let llfalse = helper.llbb_with_cleanup(self, targets.otherwise());
if switch_ty == bx.tcx().types.bool {
- // Don't generate trivial icmps when switching on bool
+ // Don't generate trivial icmps when switching on bool.
match test_value {
0 => bx.cond_br(discr.immediate(), llfalse, lltrue),
1 => bx.cond_br(discr.immediate(), lltrue, llfalse),
@@ -303,6 +333,30 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
let cmp = bx.icmp(IntPredicate::IntEQ, discr.immediate(), llval);
bx.cond_br(cmp, lltrue, llfalse);
}
+ } else if self.cx.sess().opts.optimize == OptLevel::No
+ && target_iter.len() == 2
+ && self.mir[targets.otherwise()].is_empty_unreachable()
+ {
+ // In unoptimized builds, if there are two normal targets and the `otherwise` target is
+ // an unreachable BB, emit `br` instead of `switch`. This leaves behind the unreachable
+ // BB, which will usually (but not always) be dead code.
+ //
+ // Why only in unoptimized builds?
+ // - In unoptimized builds LLVM uses FastISel which does not support switches, so it
+ // must fall back to the to the slower SelectionDAG isel. Therefore, using `br` gives
+ // significant compile time speedups for unoptimized builds.
+ // - In optimized builds the above doesn't hold, and using `br` sometimes results in
+ // worse generated code because LLVM can no longer tell that the value being switched
+ // on can only have two values, e.g. 0 and 1.
+ //
+ let (test_value1, target1) = target_iter.next().unwrap();
+ let (_test_value2, target2) = target_iter.next().unwrap();
+ let ll1 = helper.llbb_with_cleanup(self, target1);
+ let ll2 = helper.llbb_with_cleanup(self, target2);
+ let switch_llty = bx.immediate_backend_type(bx.layout_of(switch_ty));
+ let llval = bx.const_uint_big(switch_llty, test_value1);
+ let cmp = bx.icmp(IntPredicate::IntEQ, discr.immediate(), llval);
+ bx.cond_br(cmp, ll1, ll2);
} else {
bx.switch(
discr.immediate(),
@@ -312,7 +366,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
}
}
- fn codegen_return_terminator(&mut self, mut bx: Bx) {
+ fn codegen_return_terminator(&mut self, bx: &mut Bx) {
// Call `va_end` if this is the definition of a C-variadic function.
if self.fn_abi.c_variadic {
// The `VaList` "spoofed" argument is just after all the real arguments.
@@ -342,11 +396,11 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
}
PassMode::Direct(_) | PassMode::Pair(..) => {
- let op = self.codegen_consume(&mut bx, mir::Place::return_place().as_ref());
+ let op = self.codegen_consume(bx, mir::Place::return_place().as_ref());
if let Ref(llval, _, align) = op.val {
bx.load(bx.backend_type(op.layout), llval, align)
} else {
- op.immediate_or_packed_pair(&mut bx)
+ op.immediate_or_packed_pair(bx)
}
}
@@ -362,8 +416,8 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
};
let llslot = match op.val {
Immediate(_) | Pair(..) => {
- let scratch = PlaceRef::alloca(&mut bx, self.fn_abi.ret.layout);
- op.val.store(&mut bx, scratch);
+ let scratch = PlaceRef::alloca(bx, self.fn_abi.ret.layout);
+ op.val.store(bx, scratch);
scratch.llval
}
Ref(llval, _, align) => {
@@ -383,22 +437,22 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
fn codegen_drop_terminator(
&mut self,
helper: TerminatorCodegenHelper<'tcx>,
- mut bx: Bx,
+ bx: &mut Bx,
location: mir::Place<'tcx>,
target: mir::BasicBlock,
unwind: Option<mir::BasicBlock>,
- ) {
+ mergeable_succ: bool,
+ ) -> MergingSucc {
let ty = location.ty(self.mir, bx.tcx()).ty;
let ty = self.monomorphize(ty);
let drop_fn = Instance::resolve_drop_in_place(bx.tcx(), ty);
if let ty::InstanceDef::DropGlue(_, None) = drop_fn.def {
// we don't actually need to drop anything.
- helper.funclet_br(self, &mut bx, target);
- return;
+ return helper.funclet_br(self, bx, target, mergeable_succ);
}
- let place = self.codegen_place(&mut bx, location.as_ref());
+ let place = self.codegen_place(bx, location.as_ref());
let (args1, args2);
let mut args = if let Some(llextra) = place.llextra {
args2 = [place.llval, llextra];
@@ -436,7 +490,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
args = &args[..1];
(
meth::VirtualIndex::from_index(ty::COMMON_VTABLE_ENTRIES_DROPINPLACE)
- .get_fn(&mut bx, vtable, ty, &fn_abi),
+ .get_fn(bx, vtable, ty, &fn_abi),
fn_abi,
)
}
@@ -481,7 +535,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
debug!("args' = {:?}", args);
(
meth::VirtualIndex::from_index(ty::COMMON_VTABLE_ENTRIES_DROPINPLACE)
- .get_fn(&mut bx, vtable, ty, &fn_abi),
+ .get_fn(bx, vtable, ty, &fn_abi),
fn_abi,
)
}
@@ -489,29 +543,31 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
};
helper.do_call(
self,
- &mut bx,
+ bx,
fn_abi,
drop_fn,
args,
Some((ReturnDest::Nothing, target)),
unwind,
&[],
- );
+ mergeable_succ,
+ )
}
fn codegen_assert_terminator(
&mut self,
helper: TerminatorCodegenHelper<'tcx>,
- mut bx: Bx,
+ bx: &mut Bx,
terminator: &mir::Terminator<'tcx>,
cond: &mir::Operand<'tcx>,
expected: bool,
msg: &mir::AssertMessage<'tcx>,
target: mir::BasicBlock,
cleanup: Option<mir::BasicBlock>,
- ) {
+ mergeable_succ: bool,
+ ) -> MergingSucc {
let span = terminator.source_info.span;
- let cond = self.codegen_operand(&mut bx, cond).immediate();
+ let cond = self.codegen_operand(bx, cond).immediate();
let mut const_cond = bx.const_to_opt_u128(cond, false).map(|c| c == 1);
// This case can currently arise only from functions marked
@@ -529,8 +585,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
// Don't codegen the panic block if success if known.
if const_cond == Some(expected) {
- helper.funclet_br(self, &mut bx, target);
- return;
+ return helper.funclet_br(self, bx, target, mergeable_succ);
}
// Pass the condition through llvm.expect for branch hinting.
@@ -547,16 +602,16 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
// After this point, bx is the block for the call to panic.
bx.switch_to_block(panic_block);
- self.set_debug_loc(&mut bx, terminator.source_info);
+ self.set_debug_loc(bx, terminator.source_info);
// Get the location information.
- let location = self.get_caller_location(&mut bx, terminator.source_info).immediate();
+ let location = self.get_caller_location(bx, terminator.source_info).immediate();
// Put together the arguments to the panic entry point.
let (lang_item, args) = match msg {
AssertKind::BoundsCheck { ref len, ref index } => {
- let len = self.codegen_operand(&mut bx, len).immediate();
- let index = self.codegen_operand(&mut bx, index).immediate();
+ let len = self.codegen_operand(bx, len).immediate();
+ let index = self.codegen_operand(bx, index).immediate();
// It's `fn panic_bounds_check(index: usize, len: usize)`,
// and `#[track_caller]` adds an implicit third argument.
(LangItem::PanicBoundsCheck, vec![index, len, location])
@@ -569,29 +624,32 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
}
};
- let (fn_abi, llfn) = common::build_langcall(&bx, Some(span), lang_item);
+ let (fn_abi, llfn) = common::build_langcall(bx, Some(span), lang_item);
// Codegen the actual panic invoke/call.
- helper.do_call(self, &mut bx, fn_abi, llfn, &args, None, cleanup, &[]);
+ let merging_succ = helper.do_call(self, bx, fn_abi, llfn, &args, None, cleanup, &[], false);
+ assert_eq!(merging_succ, MergingSucc::False);
+ MergingSucc::False
}
fn codegen_abort_terminator(
&mut self,
helper: TerminatorCodegenHelper<'tcx>,
- mut bx: Bx,
+ bx: &mut Bx,
terminator: &mir::Terminator<'tcx>,
) {
let span = terminator.source_info.span;
- self.set_debug_loc(&mut bx, terminator.source_info);
+ self.set_debug_loc(bx, terminator.source_info);
// Obtain the panic entry point.
- let (fn_abi, llfn) = common::build_langcall(&bx, Some(span), LangItem::PanicNoUnwind);
+ let (fn_abi, llfn) = common::build_langcall(bx, Some(span), LangItem::PanicNoUnwind);
// Codegen the actual panic invoke/call.
- helper.do_call(self, &mut bx, fn_abi, llfn, &[], None, None, &[]);
+ let merging_succ = helper.do_call(self, bx, fn_abi, llfn, &[], None, None, &[], false);
+ assert_eq!(merging_succ, MergingSucc::False);
}
- /// Returns `true` if this is indeed a panic intrinsic and codegen is done.
+ /// Returns `Some` if this is indeed a panic intrinsic and codegen is done.
fn codegen_panic_intrinsic(
&mut self,
helper: &TerminatorCodegenHelper<'tcx>,
@@ -601,7 +659,8 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
source_info: mir::SourceInfo,
target: Option<mir::BasicBlock>,
cleanup: Option<mir::BasicBlock>,
- ) -> bool {
+ mergeable_succ: bool,
+ ) -> Option<MergingSucc> {
// Emit a panic or a no-op for `assert_*` intrinsics.
// These are intrinsics that compile to panics so that we can get a message
// which mentions the offending type, even from a const context.
@@ -627,7 +686,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
ZeroValid => !bx.tcx().permits_zero_init(layout),
UninitValid => !bx.tcx().permits_uninit_init(layout),
};
- if do_panic {
+ Some(if do_panic {
let msg_str = with_no_visible_paths!({
with_no_trimmed_paths!({
if layout.abi.is_uninhabited() {
@@ -660,22 +719,22 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
target.as_ref().map(|bb| (ReturnDest::Nothing, *bb)),
cleanup,
&[],
- );
+ mergeable_succ,
+ )
} else {
// a NOP
let target = target.unwrap();
- helper.funclet_br(self, bx, target)
- }
- true
+ helper.funclet_br(self, bx, target, mergeable_succ)
+ })
} else {
- false
+ None
}
}
fn codegen_call_terminator(
&mut self,
helper: TerminatorCodegenHelper<'tcx>,
- mut bx: Bx,
+ bx: &mut Bx,
terminator: &mir::Terminator<'tcx>,
func: &mir::Operand<'tcx>,
args: &[mir::Operand<'tcx>],
@@ -683,12 +742,13 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
target: Option<mir::BasicBlock>,
cleanup: Option<mir::BasicBlock>,
fn_span: Span,
- ) {
+ mergeable_succ: bool,
+ ) -> MergingSucc {
let source_info = terminator.source_info;
let span = source_info.span;
// Create the callee. This is a fn ptr or zero-sized and hence a kind of scalar.
- let callee = self.codegen_operand(&mut bx, func);
+ let callee = self.codegen_operand(bx, func);
let (instance, mut llfn) = match *callee.layout.ty.kind() {
ty::FnDef(def_id, substs) => (
@@ -708,8 +768,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
if let Some(ty::InstanceDef::DropGlue(_, None)) = def {
// Empty drop glue; a no-op.
let target = target.unwrap();
- helper.funclet_br(self, &mut bx, target);
- return;
+ return helper.funclet_br(self, bx, target, mergeable_succ);
}
// FIXME(eddyb) avoid computing this if possible, when `instance` is
@@ -736,9 +795,9 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
};
if intrinsic == Some(sym::transmute) {
- if let Some(target) = target {
- self.codegen_transmute(&mut bx, &args[0], destination);
- helper.funclet_br(self, &mut bx, target);
+ return if let Some(target) = target {
+ self.codegen_transmute(bx, &args[0], destination);
+ helper.funclet_br(self, bx, target, mergeable_succ)
} else {
// If we are trying to transmute to an uninhabited type,
// it is likely there is no allotted destination. In fact,
@@ -748,20 +807,21 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
// it must be unreachable.
assert_eq!(fn_abi.ret.layout.abi, abi::Abi::Uninhabited);
bx.unreachable();
- }
- return;
+ MergingSucc::False
+ };
}
- if self.codegen_panic_intrinsic(
+ if let Some(merging_succ) = self.codegen_panic_intrinsic(
&helper,
- &mut bx,
+ bx,
intrinsic,
instance,
source_info,
target,
cleanup,
+ mergeable_succ,
) {
- return;
+ return merging_succ;
}
// The arguments we'll be passing. Plus one to account for outptr, if used.
@@ -771,23 +831,24 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
// Prepare the return value destination
let ret_dest = if target.is_some() {
let is_intrinsic = intrinsic.is_some();
- self.make_return_dest(&mut bx, destination, &fn_abi.ret, &mut llargs, is_intrinsic)
+ self.make_return_dest(bx, destination, &fn_abi.ret, &mut llargs, is_intrinsic)
} else {
ReturnDest::Nothing
};
if intrinsic == Some(sym::caller_location) {
- if let Some(target) = target {
- let location = self
- .get_caller_location(&mut bx, mir::SourceInfo { span: fn_span, ..source_info });
+ return if let Some(target) = target {
+ let location =
+ self.get_caller_location(bx, mir::SourceInfo { span: fn_span, ..source_info });
if let ReturnDest::IndirectOperand(tmp, _) = ret_dest {
- location.val.store(&mut bx, tmp);
+ location.val.store(bx, tmp);
}
- self.store_return(&mut bx, ret_dest, &fn_abi.ret, location.immediate());
- helper.funclet_br(self, &mut bx, target);
- }
- return;
+ self.store_return(bx, ret_dest, &fn_abi.ret, location.immediate());
+ helper.funclet_br(self, bx, target, mergeable_succ)
+ } else {
+ MergingSucc::False
+ };
}
match intrinsic {
@@ -831,12 +892,12 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
}
}
- self.codegen_operand(&mut bx, arg)
+ self.codegen_operand(bx, arg)
})
.collect();
Self::codegen_intrinsic_call(
- &mut bx,
+ bx,
*instance.as_ref().unwrap(),
&fn_abi,
&args,
@@ -845,16 +906,15 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
);
if let ReturnDest::IndirectOperand(dst, _) = ret_dest {
- self.store_return(&mut bx, ret_dest, &fn_abi.ret, dst.llval);
+ self.store_return(bx, ret_dest, &fn_abi.ret, dst.llval);
}
- if let Some(target) = target {
- helper.funclet_br(self, &mut bx, target);
+ return if let Some(target) = target {
+ helper.funclet_br(self, bx, target, mergeable_succ)
} else {
bx.unreachable();
- }
-
- return;
+ MergingSucc::False
+ };
}
}
@@ -868,7 +928,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
let mut copied_constant_arguments = vec![];
'make_args: for (i, arg) in first_args.iter().enumerate() {
- let mut op = self.codegen_operand(&mut bx, arg);
+ let mut op = self.codegen_operand(bx, arg);
if let (0, Some(ty::InstanceDef::Virtual(_, idx))) = (i, def) {
match op.val {
@@ -878,12 +938,14 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
// that is understood elsewhere in the compiler as a method on
// `dyn Trait`.
// To get a `*mut RcBox<Self>`, we just keep unwrapping newtypes until
- // we get a value of a built-in pointer type
+ // we get a value of a built-in pointer type.
+ //
+ // This is also relevant for `Pin<&mut Self>`, where we need to peel the `Pin`.
'descend_newtypes: while !op.layout.ty.is_unsafe_ptr()
&& !op.layout.ty.is_region_ptr()
{
for i in 0..op.layout.fields.count() {
- let field = op.extract_field(&mut bx, i);
+ let field = op.extract_field(bx, i);
if !field.layout.is_zst() {
// we found the one non-zero-sized field that is allowed
// now find *its* non-zero-sized field, or stop if it's a
@@ -900,7 +962,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
// data pointer and vtable. Look up the method in the vtable, and pass
// the data pointer as the first argument
llfn = Some(meth::VirtualIndex::from_index(idx).get_fn(
- &mut bx,
+ bx,
meta,
op.layout.ty,
&fn_abi,
@@ -911,7 +973,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
Ref(data_ptr, Some(meta), _) => {
// by-value dynamic dispatch
llfn = Some(meth::VirtualIndex::from_index(idx).get_fn(
- &mut bx,
+ bx,
meta,
op.layout.ty,
&fn_abi,
@@ -920,19 +982,35 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
continue;
}
Immediate(_) => {
- let ty::Ref(_, ty, _) = op.layout.ty.kind() else {
- span_bug!(span, "can't codegen a virtual call on {:#?}", op);
- };
- if !ty.is_dyn_star() {
+ // See comment above explaining why we peel these newtypes
+ 'descend_newtypes: while !op.layout.ty.is_unsafe_ptr()
+ && !op.layout.ty.is_region_ptr()
+ {
+ for i in 0..op.layout.fields.count() {
+ let field = op.extract_field(bx, i);
+ if !field.layout.is_zst() {
+ // we found the one non-zero-sized field that is allowed
+ // now find *its* non-zero-sized field, or stop if it's a
+ // pointer
+ op = field;
+ continue 'descend_newtypes;
+ }
+ }
+
+ span_bug!(span, "receiver has no non-zero-sized fields {:?}", op);
+ }
+
+ // Make sure that we've actually unwrapped the rcvr down
+ // to a pointer or ref to `dyn* Trait`.
+ if !op.layout.ty.builtin_deref(true).unwrap().ty.is_dyn_star() {
span_bug!(span, "can't codegen a virtual call on {:#?}", op);
}
- // FIXME(dyn-star): Make sure this is done on a &dyn* receiver
let place = op.deref(bx.cx());
- let data_ptr = place.project_field(&mut bx, 0);
- let meta_ptr = place.project_field(&mut bx, 1);
+ let data_ptr = place.project_field(bx, 0);
+ let meta_ptr = place.project_field(bx, 1);
let meta = bx.load_operand(meta_ptr);
llfn = Some(meth::VirtualIndex::from_index(idx).get_fn(
- &mut bx,
+ bx,
meta.immediate(),
op.layout.ty,
&fn_abi,
@@ -951,24 +1029,19 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
match (arg, op.val) {
(&mir::Operand::Copy(_), Ref(_, None, _))
| (&mir::Operand::Constant(_), Ref(_, None, _)) => {
- let tmp = PlaceRef::alloca(&mut bx, op.layout);
+ let tmp = PlaceRef::alloca(bx, op.layout);
bx.lifetime_start(tmp.llval, tmp.layout.size);
- op.val.store(&mut bx, tmp);
+ op.val.store(bx, tmp);
op.val = Ref(tmp.llval, None, tmp.align);
copied_constant_arguments.push(tmp);
}
_ => {}
}
- self.codegen_argument(&mut bx, op, &mut llargs, &fn_abi.args[i]);
+ self.codegen_argument(bx, op, &mut llargs, &fn_abi.args[i]);
}
let num_untupled = untuple.map(|tup| {
- self.codegen_arguments_untupled(
- &mut bx,
- tup,
- &mut llargs,
- &fn_abi.args[first_args.len()..],
- )
+ self.codegen_arguments_untupled(bx, tup, &mut llargs, &fn_abi.args[first_args.len()..])
});
let needs_location =
@@ -988,14 +1061,14 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
fn_abi,
);
let location =
- self.get_caller_location(&mut bx, mir::SourceInfo { span: fn_span, ..source_info });
+ self.get_caller_location(bx, mir::SourceInfo { span: fn_span, ..source_info });
debug!(
"codegen_call_terminator({:?}): location={:?} (fn_span {:?})",
terminator, location, fn_span
);
let last_arg = fn_abi.args.last().unwrap();
- self.codegen_argument(&mut bx, location, &mut llargs, last_arg);
+ self.codegen_argument(bx, location, &mut llargs, last_arg);
}
let (is_indirect_call, fn_ptr) = match (llfn, instance) {
@@ -1020,40 +1093,43 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
bx.cond_br(cond, bb_pass, bb_fail);
bx.switch_to_block(bb_pass);
- helper.do_call(
+ let merging_succ = helper.do_call(
self,
- &mut bx,
+ bx,
fn_abi,
fn_ptr,
&llargs,
target.as_ref().map(|&target| (ret_dest, target)),
cleanup,
&copied_constant_arguments,
+ false,
);
+ assert_eq!(merging_succ, MergingSucc::False);
bx.switch_to_block(bb_fail);
bx.abort();
bx.unreachable();
- return;
+ return MergingSucc::False;
}
helper.do_call(
self,
- &mut bx,
+ bx,
fn_abi,
fn_ptr,
&llargs,
target.as_ref().map(|&target| (ret_dest, target)),
cleanup,
&copied_constant_arguments,
- );
+ mergeable_succ,
+ )
}
fn codegen_asm_terminator(
&mut self,
helper: TerminatorCodegenHelper<'tcx>,
- mut bx: Bx,
+ bx: &mut Bx,
terminator: &mir::Terminator<'tcx>,
template: &[ast::InlineAsmTemplatePiece],
operands: &[mir::InlineAsmOperand<'tcx>],
@@ -1062,24 +1138,25 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
destination: Option<mir::BasicBlock>,
cleanup: Option<mir::BasicBlock>,
instance: Instance<'_>,
- ) {
+ mergeable_succ: bool,
+ ) -> MergingSucc {
let span = terminator.source_info.span;
let operands: Vec<_> = operands
.iter()
.map(|op| match *op {
mir::InlineAsmOperand::In { reg, ref value } => {
- let value = self.codegen_operand(&mut bx, value);
+ let value = self.codegen_operand(bx, value);
InlineAsmOperandRef::In { reg, value }
}
mir::InlineAsmOperand::Out { reg, late, ref place } => {
- let place = place.map(|place| self.codegen_place(&mut bx, place.as_ref()));
+ let place = place.map(|place| self.codegen_place(bx, place.as_ref()));
InlineAsmOperandRef::Out { reg, late, place }
}
mir::InlineAsmOperand::InOut { reg, late, ref in_value, ref out_place } => {
- let in_value = self.codegen_operand(&mut bx, in_value);
+ let in_value = self.codegen_operand(bx, in_value);
let out_place =
- out_place.map(|out_place| self.codegen_place(&mut bx, out_place.as_ref()));
+ out_place.map(|out_place| self.codegen_place(bx, out_place.as_ref()));
InlineAsmOperandRef::InOut { reg, late, in_value, out_place }
}
mir::InlineAsmOperand::Const { ref value } => {
@@ -1117,7 +1194,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
helper.do_inlineasm(
self,
- &mut bx,
+ bx,
template,
&operands,
options,
@@ -1125,71 +1202,128 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
destination,
cleanup,
instance,
- );
+ mergeable_succ,
+ )
}
}
impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
- pub fn codegen_block(&mut self, bb: mir::BasicBlock) {
- let llbb = self.llbb(bb);
- let mut bx = Bx::build(self.cx, llbb);
+ pub fn codegen_block(&mut self, mut bb: mir::BasicBlock) {
+ let llbb = match self.try_llbb(bb) {
+ Some(llbb) => llbb,
+ None => return,
+ };
+ let bx = &mut Bx::build(self.cx, llbb);
let mir = self.mir;
- let data = &mir[bb];
- debug!("codegen_block({:?}={:?})", bb, data);
+ // MIR basic blocks stop at any function call. This may not be the case
+ // for the backend's basic blocks, in which case we might be able to
+ // combine multiple MIR basic blocks into a single backend basic block.
+ loop {
+ let data = &mir[bb];
- for statement in &data.statements {
- bx = self.codegen_statement(bx, statement);
- }
+ debug!("codegen_block({:?}={:?})", bb, data);
+
+ for statement in &data.statements {
+ self.codegen_statement(bx, statement);
+ }
+
+ let merging_succ = self.codegen_terminator(bx, bb, data.terminator());
+ if let MergingSucc::False = merging_succ {
+ break;
+ }
- self.codegen_terminator(bx, bb, data.terminator());
+ // We are merging the successor into the produced backend basic
+ // block. Record that the successor should be skipped when it is
+ // reached.
+ //
+ // Note: we must not have already generated code for the successor.
+ // This is implicitly ensured by the reverse postorder traversal,
+ // and the assertion explicitly guarantees that.
+ let mut successors = data.terminator().successors();
+ let succ = successors.next().unwrap();
+ assert!(matches!(self.cached_llbbs[succ], CachedLlbb::None));
+ self.cached_llbbs[succ] = CachedLlbb::Skip;
+ bb = succ;
+ }
}
fn codegen_terminator(
&mut self,
- mut bx: Bx,
+ bx: &mut Bx,
bb: mir::BasicBlock,
terminator: &'tcx mir::Terminator<'tcx>,
- ) {
+ ) -> MergingSucc {
debug!("codegen_terminator: {:?}", terminator);
// Create the cleanup bundle, if needed.
let funclet_bb = self.cleanup_kinds[bb].funclet_bb(bb);
let helper = TerminatorCodegenHelper { bb, terminator, funclet_bb };
- self.set_debug_loc(&mut bx, terminator.source_info);
+ let mergeable_succ = || {
+ // Note: any call to `switch_to_block` will invalidate a `true` value
+ // of `mergeable_succ`.
+ let mut successors = terminator.successors();
+ if let Some(succ) = successors.next()
+ && successors.next().is_none()
+ && let &[succ_pred] = self.mir.basic_blocks.predecessors()[succ].as_slice()
+ {
+ // bb has a single successor, and bb is its only predecessor. This
+ // makes it a candidate for merging.
+ assert_eq!(succ_pred, bb);
+ true
+ } else {
+ false
+ }
+ };
+
+ self.set_debug_loc(bx, terminator.source_info);
match terminator.kind {
- mir::TerminatorKind::Resume => self.codegen_resume_terminator(helper, bx),
+ mir::TerminatorKind::Resume => {
+ self.codegen_resume_terminator(helper, bx);
+ MergingSucc::False
+ }
mir::TerminatorKind::Abort => {
self.codegen_abort_terminator(helper, bx, terminator);
+ MergingSucc::False
}
mir::TerminatorKind::Goto { target } => {
- helper.funclet_br(self, &mut bx, target);
+ helper.funclet_br(self, bx, target, mergeable_succ())
}
mir::TerminatorKind::SwitchInt { ref discr, switch_ty, ref targets } => {
self.codegen_switchint_terminator(helper, bx, discr, switch_ty, targets);
+ MergingSucc::False
}
mir::TerminatorKind::Return => {
self.codegen_return_terminator(bx);
+ MergingSucc::False
}
mir::TerminatorKind::Unreachable => {
bx.unreachable();
+ MergingSucc::False
}
mir::TerminatorKind::Drop { place, target, unwind } => {
- self.codegen_drop_terminator(helper, bx, place, target, unwind);
+ self.codegen_drop_terminator(helper, bx, place, target, unwind, mergeable_succ())
}
- mir::TerminatorKind::Assert { ref cond, expected, ref msg, target, cleanup } => {
- self.codegen_assert_terminator(
- helper, bx, terminator, cond, expected, msg, target, cleanup,
- );
- }
+ mir::TerminatorKind::Assert { ref cond, expected, ref msg, target, cleanup } => self
+ .codegen_assert_terminator(
+ helper,
+ bx,
+ terminator,
+ cond,
+ expected,
+ msg,
+ target,
+ cleanup,
+ mergeable_succ(),
+ ),
mir::TerminatorKind::DropAndReplace { .. } => {
bug!("undesugared DropAndReplace in codegen: {:?}", terminator);
@@ -1203,19 +1337,18 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
cleanup,
from_hir_call: _,
fn_span,
- } => {
- self.codegen_call_terminator(
- helper,
- bx,
- terminator,
- func,
- args,
- destination,
- target,
- cleanup,
- fn_span,
- );
- }
+ } => self.codegen_call_terminator(
+ helper,
+ bx,
+ terminator,
+ func,
+ args,
+ destination,
+ target,
+ cleanup,
+ fn_span,
+ mergeable_succ(),
+ ),
mir::TerminatorKind::GeneratorDrop | mir::TerminatorKind::Yield { .. } => {
bug!("generator ops in codegen")
}
@@ -1230,20 +1363,19 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
line_spans,
destination,
cleanup,
- } => {
- self.codegen_asm_terminator(
- helper,
- bx,
- terminator,
- template,
- operands,
- options,
- line_spans,
- destination,
- cleanup,
- self.instance,
- );
- }
+ } => self.codegen_asm_terminator(
+ helper,
+ bx,
+ terminator,
+ template,
+ operands,
+ options,
+ line_spans,
+ destination,
+ cleanup,
+ self.instance,
+ mergeable_succ(),
+ ),
}
}
@@ -1561,12 +1693,21 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
// FIXME(eddyb) rename `llbb` and other `ll`-prefixed things to use a
// more backend-agnostic prefix such as `cg` (i.e. this would be `cgbb`).
pub fn llbb(&mut self, bb: mir::BasicBlock) -> Bx::BasicBlock {
- self.cached_llbbs[bb].unwrap_or_else(|| {
- // FIXME(eddyb) only name the block if `fewer_names` is `false`.
- let llbb = Bx::append_block(self.cx, self.llfn, &format!("{:?}", bb));
- self.cached_llbbs[bb] = Some(llbb);
- llbb
- })
+ self.try_llbb(bb).unwrap()
+ }
+
+ /// Like `llbb`, but may fail if the basic block should be skipped.
+ pub fn try_llbb(&mut self, bb: mir::BasicBlock) -> Option<Bx::BasicBlock> {
+ match self.cached_llbbs[bb] {
+ CachedLlbb::None => {
+ // FIXME(eddyb) only name the block if `fewer_names` is `false`.
+ let llbb = Bx::append_block(self.cx, self.llfn, &format!("{:?}", bb));
+ self.cached_llbbs[bb] = CachedLlbb::Some(llbb);
+ Some(llbb)
+ }
+ CachedLlbb::Some(llbb) => Some(llbb),
+ CachedLlbb::Skip => None,
+ }
}
fn make_return_dest(
diff --git a/compiler/rustc_codegen_ssa/src/mir/constant.rs b/compiler/rustc_codegen_ssa/src/mir/constant.rs
index 4c6ab457c..53ff3c240 100644
--- a/compiler/rustc_codegen_ssa/src/mir/constant.rs
+++ b/compiler/rustc_codegen_ssa/src/mir/constant.rs
@@ -42,7 +42,14 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
};
self.cx.tcx().const_eval_resolve(ty::ParamEnv::reveal_all(), uv, None).map_err(|err| {
- self.cx.tcx().sess.span_err(constant.span, "erroneous constant encountered");
+ match err {
+ ErrorHandled::Reported(_) => {
+ self.cx.tcx().sess.span_err(constant.span, "erroneous constant encountered");
+ }
+ ErrorHandled::TooGeneric => {
+ span_bug!(constant.span, "codegen encountered polymorphic constant: {:?}", err);
+ }
+ }
err
})
}
diff --git a/compiler/rustc_codegen_ssa/src/mir/debuginfo.rs b/compiler/rustc_codegen_ssa/src/mir/debuginfo.rs
index 157c1c823..99283d3bb 100644
--- a/compiler/rustc_codegen_ssa/src/mir/debuginfo.rs
+++ b/compiler/rustc_codegen_ssa/src/mir/debuginfo.rs
@@ -14,6 +14,8 @@ use super::operand::{OperandRef, OperandValue};
use super::place::PlaceRef;
use super::{FunctionCx, LocalRef};
+use std::ops::Range;
+
pub struct FunctionDebugContext<S, L> {
pub scopes: IndexVec<mir::SourceScope, DebugScope<S, L>>,
}
@@ -25,7 +27,7 @@ pub enum VariableKind {
}
/// Like `mir::VarDebugInfo`, but within a `mir::Local`.
-#[derive(Copy, Clone)]
+#[derive(Clone)]
pub struct PerLocalVarDebugInfo<'tcx, D> {
pub name: Symbol,
pub source_info: mir::SourceInfo,
@@ -33,6 +35,10 @@ pub struct PerLocalVarDebugInfo<'tcx, D> {
/// `DIVariable` returned by `create_dbg_var`.
pub dbg_var: Option<D>,
+ /// Byte range in the `dbg_var` covered by this fragment,
+ /// if this is a fragment of a composite `VarDebugInfo`.
+ pub fragment: Option<Range<Size>>,
+
/// `.place.projection` from `mir::VarDebugInfo`.
pub projection: &'tcx ty::List<mir::PlaceElem<'tcx>>,
}
@@ -145,7 +151,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
Some(per_local) => &per_local[local],
None => return,
};
- let whole_local_var = vars.iter().find(|var| var.projection.is_empty()).copied();
+ let whole_local_var = vars.iter().find(|var| var.projection.is_empty()).cloned();
let has_proj = || vars.iter().any(|var| !var.projection.is_empty());
let fallback_var = if self.mir.local_kind(local) == mir::LocalKind::Arg {
@@ -187,6 +193,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
name,
source_info: decl.source_info,
dbg_var,
+ fragment: None,
projection: ty::List::empty(),
})
}
@@ -199,7 +206,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
let name = if bx.sess().fewer_names() {
None
} else {
- Some(match whole_local_var.or(fallback_var) {
+ Some(match whole_local_var.or(fallback_var.clone()) {
Some(var) if var.name != kw::Empty => var.name.to_string(),
_ => format!("{:?}", local),
})
@@ -249,7 +256,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
LocalRef::UnsizedPlace(_) => return,
};
- let vars = vars.iter().copied().chain(fallback_var);
+ let vars = vars.iter().cloned().chain(fallback_var);
for var in vars {
let Some(dbg_var) = var.dbg_var else { continue };
@@ -312,9 +319,16 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
bx.store(place.llval, alloca.llval, alloca.align);
// Point the debug info to `*alloca` for the current variable
- bx.dbg_var_addr(dbg_var, dbg_loc, alloca.llval, Size::ZERO, &[Size::ZERO]);
+ bx.dbg_var_addr(dbg_var, dbg_loc, alloca.llval, Size::ZERO, &[Size::ZERO], None);
} else {
- bx.dbg_var_addr(dbg_var, dbg_loc, base.llval, direct_offset, &indirect_offsets);
+ bx.dbg_var_addr(
+ dbg_var,
+ dbg_loc,
+ base.llval,
+ direct_offset,
+ &indirect_offsets,
+ None,
+ );
}
}
}
@@ -382,6 +396,10 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
let ty = self.monomorphize(c.ty());
(ty, VariableKind::LocalVariable)
}
+ mir::VarDebugInfoContents::Composite { ty, fragments: _ } => {
+ let ty = self.monomorphize(ty);
+ (ty, VariableKind::LocalVariable)
+ }
};
self.cx.create_dbg_var(var.name, var_ty, dbg_scope, var_kind, span)
@@ -393,6 +411,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
name: var.name,
source_info: var.source_info,
dbg_var,
+ fragment: None,
projection: place.projection,
});
}
@@ -407,10 +426,48 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
bx,
);
- bx.dbg_var_addr(dbg_var, dbg_loc, base.llval, Size::ZERO, &[]);
+ bx.dbg_var_addr(dbg_var, dbg_loc, base.llval, Size::ZERO, &[], None);
}
}
}
+ mir::VarDebugInfoContents::Composite { ty, ref fragments } => {
+ let var_ty = self.monomorphize(ty);
+ let var_layout = self.cx.layout_of(var_ty);
+ for fragment in fragments {
+ let mut fragment_start = Size::ZERO;
+ let mut fragment_layout = var_layout;
+
+ for elem in &fragment.projection {
+ match *elem {
+ mir::ProjectionElem::Field(field, _) => {
+ let i = field.index();
+ fragment_start += fragment_layout.fields.offset(i);
+ fragment_layout = fragment_layout.field(self.cx, i);
+ }
+ _ => span_bug!(
+ var.source_info.span,
+ "unsupported fragment projection `{:?}`",
+ elem,
+ ),
+ }
+ }
+
+ let place = fragment.contents;
+ per_local[place.local].push(PerLocalVarDebugInfo {
+ name: var.name,
+ source_info: var.source_info,
+ dbg_var,
+ fragment: if fragment_layout.size == var_layout.size {
+ // Fragment covers entire variable, so as far as
+ // DWARF is concerned, it's not really a fragment.
+ None
+ } else {
+ Some(fragment_start..fragment_start + fragment_layout.size)
+ },
+ projection: place.projection,
+ });
+ }
+ }
}
}
Some(per_local)
diff --git a/compiler/rustc_codegen_ssa/src/mir/mod.rs b/compiler/rustc_codegen_ssa/src/mir/mod.rs
index da9aaf00e..79c66a955 100644
--- a/compiler/rustc_codegen_ssa/src/mir/mod.rs
+++ b/compiler/rustc_codegen_ssa/src/mir/mod.rs
@@ -16,6 +16,18 @@ use rustc_middle::mir::traversal;
use self::operand::{OperandRef, OperandValue};
+// Used for tracking the state of generated basic blocks.
+enum CachedLlbb<T> {
+ /// Nothing created yet.
+ None,
+
+ /// Has been created.
+ Some(T),
+
+ /// Nothing created yet, and nothing should be.
+ Skip,
+}
+
/// Master context for codegenning from MIR.
pub struct FunctionCx<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> {
instance: Instance<'tcx>,
@@ -43,7 +55,7 @@ pub struct FunctionCx<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> {
/// as-needed (e.g. RPO reaching it or another block branching to it).
// FIXME(eddyb) rename `llbbs` and other `ll`-prefixed things to use a
// more backend-agnostic prefix such as `cg` (i.e. this would be `cgbbs`).
- cached_llbbs: IndexVec<mir::BasicBlock, Option<Bx::BasicBlock>>,
+ cached_llbbs: IndexVec<mir::BasicBlock, CachedLlbb<Bx::BasicBlock>>,
/// The funclet status of each basic block
cleanup_kinds: IndexVec<mir::BasicBlock, analyze::CleanupKind>,
@@ -155,11 +167,13 @@ pub fn codegen_mir<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
}
let cleanup_kinds = analyze::cleanup_kinds(&mir);
- let cached_llbbs: IndexVec<mir::BasicBlock, Option<Bx::BasicBlock>> = mir
- .basic_blocks
- .indices()
- .map(|bb| if bb == mir::START_BLOCK { Some(start_llbb) } else { None })
- .collect();
+ let cached_llbbs: IndexVec<mir::BasicBlock, CachedLlbb<Bx::BasicBlock>> =
+ mir.basic_blocks
+ .indices()
+ .map(|bb| {
+ if bb == mir::START_BLOCK { CachedLlbb::Some(start_llbb) } else { CachedLlbb::None }
+ })
+ .collect();
let mut fx = FunctionCx {
instance,
@@ -189,7 +203,7 @@ pub fn codegen_mir<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
all_consts_ok = false;
match err {
// errored or at least linted
- ErrorHandled::Reported(_) | ErrorHandled::Linted => {}
+ ErrorHandled::Reported(_) => {}
ErrorHandled::TooGeneric => {
span_bug!(const_.span, "codegen encountered polymorphic constant: {:?}", err)
}
diff --git a/compiler/rustc_codegen_ssa/src/mir/operand.rs b/compiler/rustc_codegen_ssa/src/mir/operand.rs
index e6ba642a7..34a5b638d 100644
--- a/compiler/rustc_codegen_ssa/src/mir/operand.rs
+++ b/compiler/rustc_codegen_ssa/src/mir/operand.rs
@@ -40,10 +40,10 @@ pub enum OperandValue<V> {
/// instead.
#[derive(Copy, Clone)]
pub struct OperandRef<'tcx, V> {
- // The value.
+ /// The value.
pub val: OperandValue<V>,
- // The layout of value, based on its Rust type.
+ /// The layout of value, based on its Rust type.
pub layout: TyAndLayout<'tcx>,
}
diff --git a/compiler/rustc_codegen_ssa/src/mir/place.rs b/compiler/rustc_codegen_ssa/src/mir/place.rs
index 9c18df564..fbe30154a 100644
--- a/compiler/rustc_codegen_ssa/src/mir/place.rs
+++ b/compiler/rustc_codegen_ssa/src/mir/place.rs
@@ -29,7 +29,7 @@ pub struct PlaceRef<'tcx, V> {
impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
pub fn new_sized(llval: V, layout: TyAndLayout<'tcx>) -> PlaceRef<'tcx, V> {
- assert!(!layout.is_unsized());
+ assert!(layout.is_sized());
PlaceRef { llval, llextra: None, layout, align: layout.align.abi }
}
@@ -38,7 +38,7 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
layout: TyAndLayout<'tcx>,
align: Align,
) -> PlaceRef<'tcx, V> {
- assert!(!layout.is_unsized());
+ assert!(layout.is_sized());
PlaceRef { llval, llextra: None, layout, align }
}
@@ -48,7 +48,7 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
bx: &mut Bx,
layout: TyAndLayout<'tcx>,
) -> Self {
- assert!(!layout.is_unsized(), "tried to statically allocate unsized place");
+ assert!(layout.is_sized(), "tried to statically allocate unsized place");
let tmp = bx.alloca(bx.cx().backend_type(layout), layout.align.abi);
Self::new_sized(tmp, layout)
}
@@ -145,7 +145,7 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
);
return simple();
}
- _ if !field.is_unsized() => return simple(),
+ _ if field.is_sized() => return simple(),
ty::Slice(..) | ty::Str | ty::Foreign(..) => return simple(),
ty::Adt(def, _) => {
if def.repr().packed() {
@@ -209,7 +209,9 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
bx: &mut Bx,
cast_to: Ty<'tcx>,
) -> V {
- let cast_to = bx.cx().immediate_backend_type(bx.cx().layout_of(cast_to));
+ let cast_to_layout = bx.cx().layout_of(cast_to);
+ let cast_to_size = cast_to_layout.layout.size();
+ let cast_to = bx.cx().immediate_backend_type(cast_to_layout);
if self.layout.abi.is_uninhabited() {
return bx.cx().const_undef(cast_to);
}
@@ -229,7 +231,8 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
// Read the tag/niche-encoded discriminant from memory.
let tag = self.project_field(bx, tag_field);
- let tag = bx.load_operand(tag);
+ let tag_op = bx.load_operand(tag);
+ let tag_imm = tag_op.immediate();
// Decode the discriminant (specifically if it's niche-encoded).
match *tag_encoding {
@@ -242,68 +245,170 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
Int(_, signed) => !tag_scalar.is_bool() && signed,
_ => false,
};
- bx.intcast(tag.immediate(), cast_to, signed)
+ bx.intcast(tag_imm, cast_to, signed)
}
TagEncoding::Niche { untagged_variant, ref niche_variants, niche_start } => {
- // Rebase from niche values to discriminants, and check
- // whether the result is in range for the niche variants.
- let niche_llty = bx.cx().immediate_backend_type(tag.layout);
- let tag = tag.immediate();
-
- // We first compute the "relative discriminant" (wrt `niche_variants`),
- // that is, if `n = niche_variants.end() - niche_variants.start()`,
- // we remap `niche_start..=niche_start + n` (which may wrap around)
- // to (non-wrap-around) `0..=n`, to be able to check whether the
- // discriminant corresponds to a niche variant with one comparison.
- // We also can't go directly to the (variant index) discriminant
- // and check that it is in the range `niche_variants`, because
- // that might not fit in the same type, on top of needing an extra
- // comparison (see also the comment on `let niche_discr`).
- let relative_discr = if niche_start == 0 {
- // Avoid subtracting `0`, which wouldn't work for pointers.
- // FIXME(eddyb) check the actual primitive type here.
- tag
+ // Cast to an integer so we don't have to treat a pointer as a
+ // special case.
+ let (tag, tag_llty) = if tag_scalar.primitive().is_ptr() {
+ let t = bx.type_isize();
+ let tag = bx.ptrtoint(tag_imm, t);
+ (tag, t)
} else {
- bx.sub(tag, bx.cx().const_uint_big(niche_llty, niche_start))
+ (tag_imm, bx.cx().immediate_backend_type(tag_op.layout))
};
+
+ let tag_size = tag_scalar.size(bx.cx());
+ let max_unsigned = tag_size.unsigned_int_max();
+ let max_signed = tag_size.signed_int_max() as u128;
+ let min_signed = max_signed + 1;
let relative_max = niche_variants.end().as_u32() - niche_variants.start().as_u32();
- let is_niche = if relative_max == 0 {
- // Avoid calling `const_uint`, which wouldn't work for pointers.
- // Also use canonical == 0 instead of non-canonical u<= 0.
- // FIXME(eddyb) check the actual primitive type here.
- bx.icmp(IntPredicate::IntEQ, relative_discr, bx.cx().const_null(niche_llty))
+ let niche_end = niche_start.wrapping_add(relative_max as u128) & max_unsigned;
+ let range = tag_scalar.valid_range(bx.cx());
+
+ let sle = |lhs: u128, rhs: u128| -> bool {
+ // Signed and unsigned comparisons give the same results,
+ // except that in signed comparisons an integer with the
+ // sign bit set is less than one with the sign bit clear.
+ // Toggle the sign bit to do a signed comparison.
+ (lhs ^ min_signed) <= (rhs ^ min_signed)
+ };
+
+ // We have a subrange `niche_start..=niche_end` inside `range`.
+ // If the value of the tag is inside this subrange, it's a
+ // "niche value", an increment of the discriminant. Otherwise it
+ // indicates the untagged variant.
+ // A general algorithm to extract the discriminant from the tag
+ // is:
+ // relative_tag = tag - niche_start
+ // is_niche = relative_tag <= (ule) relative_max
+ // discr = if is_niche {
+ // cast(relative_tag) + niche_variants.start()
+ // } else {
+ // untagged_variant
+ // }
+ // However, we will likely be able to emit simpler code.
+
+ // Find the least and greatest values in `range`, considered
+ // both as signed and unsigned.
+ let (low_unsigned, high_unsigned) = if range.start <= range.end {
+ (range.start, range.end)
} else {
- let relative_max = bx.cx().const_uint(niche_llty, relative_max as u64);
- bx.icmp(IntPredicate::IntULE, relative_discr, relative_max)
+ (0, max_unsigned)
+ };
+ let (low_signed, high_signed) = if sle(range.start, range.end) {
+ (range.start, range.end)
+ } else {
+ (min_signed, max_signed)
};
- // NOTE(eddyb) this addition needs to be performed on the final
- // type, in case the niche itself can't represent all variant
- // indices (e.g. `u8` niche with more than `256` variants,
- // but enough uninhabited variants so that the remaining variants
- // fit in the niche).
- // In other words, `niche_variants.end - niche_variants.start`
- // is representable in the niche, but `niche_variants.end`
- // might not be, in extreme cases.
- let niche_discr = {
- let relative_discr = if relative_max == 0 {
- // HACK(eddyb) since we have only one niche, we know which
- // one it is, and we can avoid having a dynamic value here.
- bx.cx().const_uint(cast_to, 0)
+ let niches_ule = niche_start <= niche_end;
+ let niches_sle = sle(niche_start, niche_end);
+ let cast_smaller = cast_to_size <= tag_size;
+
+ // In the algorithm above, we can change
+ // cast(relative_tag) + niche_variants.start()
+ // into
+ // cast(tag + (niche_variants.start() - niche_start))
+ // if either the casted type is no larger than the original
+ // type, or if the niche values are contiguous (in either the
+ // signed or unsigned sense).
+ let can_incr = cast_smaller || niches_ule || niches_sle;
+
+ let data_for_boundary_niche = || -> Option<(IntPredicate, u128)> {
+ if !can_incr {
+ None
+ } else if niche_start == low_unsigned {
+ Some((IntPredicate::IntULE, niche_end))
+ } else if niche_end == high_unsigned {
+ Some((IntPredicate::IntUGE, niche_start))
+ } else if niche_start == low_signed {
+ Some((IntPredicate::IntSLE, niche_end))
+ } else if niche_end == high_signed {
+ Some((IntPredicate::IntSGE, niche_start))
} else {
- bx.intcast(relative_discr, cast_to, false)
+ None
+ }
+ };
+
+ let (is_niche, tagged_discr, delta) = if relative_max == 0 {
+ // Best case scenario: only one tagged variant. This will
+ // likely become just a comparison and a jump.
+ // The algorithm is:
+ // is_niche = tag == niche_start
+ // discr = if is_niche {
+ // niche_start
+ // } else {
+ // untagged_variant
+ // }
+ let niche_start = bx.cx().const_uint_big(tag_llty, niche_start);
+ let is_niche = bx.icmp(IntPredicate::IntEQ, tag, niche_start);
+ let tagged_discr =
+ bx.cx().const_uint(cast_to, niche_variants.start().as_u32() as u64);
+ (is_niche, tagged_discr, 0)
+ } else if let Some((predicate, constant)) = data_for_boundary_niche() {
+ // The niche values are either the lowest or the highest in
+ // `range`. We can avoid the first subtraction in the
+ // algorithm.
+ // The algorithm is now this:
+ // is_niche = tag <= niche_end
+ // discr = if is_niche {
+ // cast(tag + (niche_variants.start() - niche_start))
+ // } else {
+ // untagged_variant
+ // }
+ // (the first line may instead be tag >= niche_start,
+ // and may be a signed or unsigned comparison)
+ // The arithmetic must be done before the cast, so we can
+ // have the correct wrapping behavior. See issue #104519 for
+ // the consequences of getting this wrong.
+ let is_niche =
+ bx.icmp(predicate, tag, bx.cx().const_uint_big(tag_llty, constant));
+ let delta = (niche_variants.start().as_u32() as u128).wrapping_sub(niche_start);
+ let incr_tag = if delta == 0 {
+ tag
+ } else {
+ bx.add(tag, bx.cx().const_uint_big(tag_llty, delta))
};
- bx.add(
+
+ let cast_tag = if cast_smaller {
+ bx.intcast(incr_tag, cast_to, false)
+ } else if niches_ule {
+ bx.zext(incr_tag, cast_to)
+ } else {
+ bx.sext(incr_tag, cast_to)
+ };
+
+ (is_niche, cast_tag, 0)
+ } else {
+ // The special cases don't apply, so we'll have to go with
+ // the general algorithm.
+ let relative_discr = bx.sub(tag, bx.cx().const_uint_big(tag_llty, niche_start));
+ let cast_tag = bx.intcast(relative_discr, cast_to, false);
+ let is_niche = bx.icmp(
+ IntPredicate::IntULE,
relative_discr,
- bx.cx().const_uint(cast_to, niche_variants.start().as_u32() as u64),
- )
+ bx.cx().const_uint(tag_llty, relative_max as u64),
+ );
+ (is_niche, cast_tag, niche_variants.start().as_u32() as u128)
+ };
+
+ let tagged_discr = if delta == 0 {
+ tagged_discr
+ } else {
+ bx.add(tagged_discr, bx.cx().const_uint_big(cast_to, delta))
};
- bx.select(
+ let discr = bx.select(
is_niche,
- niche_discr,
+ tagged_discr,
bx.cx().const_uint(cast_to, untagged_variant.as_u32() as u64),
- )
+ );
+
+ // In principle we could insert assumes on the possible range of `discr`, but
+ // currently in LLVM this seems to be a pessimization.
+
+ discr
}
}
}
diff --git a/compiler/rustc_codegen_ssa/src/mir/rvalue.rs b/compiler/rustc_codegen_ssa/src/mir/rvalue.rs
index 4aab31fbf..9ad96f7a4 100644
--- a/compiler/rustc_codegen_ssa/src/mir/rvalue.rs
+++ b/compiler/rustc_codegen_ssa/src/mir/rvalue.rs
@@ -18,17 +18,16 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
#[instrument(level = "trace", skip(self, bx))]
pub fn codegen_rvalue(
&mut self,
- mut bx: Bx,
+ bx: &mut Bx,
dest: PlaceRef<'tcx, Bx::Value>,
rvalue: &mir::Rvalue<'tcx>,
- ) -> Bx {
+ ) {
match *rvalue {
mir::Rvalue::Use(ref operand) => {
- let cg_operand = self.codegen_operand(&mut bx, operand);
+ let cg_operand = self.codegen_operand(bx, operand);
// FIXME: consider not copying constants through stack. (Fixable by codegen'ing
// constants into `OperandValue::Ref`; why don’t we do that yet if we don’t?)
- cg_operand.val.store(&mut bx, dest);
- bx
+ cg_operand.val.store(bx, dest);
}
mir::Rvalue::Cast(mir::CastKind::Pointer(PointerCast::Unsize), ref source, _) => {
@@ -37,16 +36,16 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
if bx.cx().is_backend_scalar_pair(dest.layout) {
// Into-coerce of a thin pointer to a fat pointer -- just
// use the operand path.
- let (mut bx, temp) = self.codegen_rvalue_operand(bx, rvalue);
- temp.val.store(&mut bx, dest);
- return bx;
+ let temp = self.codegen_rvalue_operand(bx, rvalue);
+ temp.val.store(bx, dest);
+ return;
}
// Unsize of a nontrivial struct. I would prefer for
// this to be eliminated by MIR building, but
// `CoerceUnsized` can be passed by a where-clause,
// so the (generic) MIR may not be able to expand it.
- let operand = self.codegen_operand(&mut bx, source);
+ let operand = self.codegen_operand(bx, source);
match operand.val {
OperandValue::Pair(..) | OperandValue::Immediate(_) => {
// Unsize from an immediate structure. We don't
@@ -56,63 +55,62 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
// index into the struct, and this case isn't
// important enough for it.
debug!("codegen_rvalue: creating ugly alloca");
- let scratch = PlaceRef::alloca(&mut bx, operand.layout);
- scratch.storage_live(&mut bx);
- operand.val.store(&mut bx, scratch);
- base::coerce_unsized_into(&mut bx, scratch, dest);
- scratch.storage_dead(&mut bx);
+ let scratch = PlaceRef::alloca(bx, operand.layout);
+ scratch.storage_live(bx);
+ operand.val.store(bx, scratch);
+ base::coerce_unsized_into(bx, scratch, dest);
+ scratch.storage_dead(bx);
}
OperandValue::Ref(llref, None, align) => {
let source = PlaceRef::new_sized_aligned(llref, operand.layout, align);
- base::coerce_unsized_into(&mut bx, source, dest);
+ base::coerce_unsized_into(bx, source, dest);
}
OperandValue::Ref(_, Some(_), _) => {
bug!("unsized coercion on an unsized rvalue");
}
}
- bx
}
mir::Rvalue::Repeat(ref elem, count) => {
- let cg_elem = self.codegen_operand(&mut bx, elem);
+ let cg_elem = self.codegen_operand(bx, elem);
// Do not generate the loop for zero-sized elements or empty arrays.
if dest.layout.is_zst() {
- return bx;
+ return;
}
if let OperandValue::Immediate(v) = cg_elem.val {
let zero = bx.const_usize(0);
- let start = dest.project_index(&mut bx, zero).llval;
+ let start = dest.project_index(bx, zero).llval;
let size = bx.const_usize(dest.layout.size.bytes());
// Use llvm.memset.p0i8.* to initialize all zero arrays
if bx.cx().const_to_opt_u128(v, false) == Some(0) {
let fill = bx.cx().const_u8(0);
bx.memset(start, fill, size, dest.align, MemFlags::empty());
- return bx;
+ return;
}
// Use llvm.memset.p0i8.* to initialize byte arrays
let v = bx.from_immediate(v);
if bx.cx().val_ty(v) == bx.cx().type_i8() {
bx.memset(start, v, size, dest.align, MemFlags::empty());
- return bx;
+ return;
}
}
let count =
self.monomorphize(count).eval_usize(bx.cx().tcx(), ty::ParamEnv::reveal_all());
- bx.write_operand_repeatedly(cg_elem, count, dest)
+ bx.write_operand_repeatedly(cg_elem, count, dest);
}
mir::Rvalue::Aggregate(ref kind, ref operands) => {
let (dest, active_field_index) = match **kind {
mir::AggregateKind::Adt(adt_did, variant_index, _, _, active_field_index) => {
- dest.codegen_set_discr(&mut bx, variant_index);
+ dest.codegen_set_discr(bx, variant_index);
if bx.tcx().adt_def(adt_did).is_enum() {
- (dest.project_downcast(&mut bx, variant_index), active_field_index)
+ (dest.project_downcast(bx, variant_index), active_field_index)
} else {
(dest, active_field_index)
}
@@ -120,37 +118,35 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
_ => (dest, None),
};
for (i, operand) in operands.iter().enumerate() {
- let op = self.codegen_operand(&mut bx, operand);
+ let op = self.codegen_operand(bx, operand);
// Do not generate stores and GEPis for zero-sized fields.
if !op.layout.is_zst() {
let field_index = active_field_index.unwrap_or(i);
let field = if let mir::AggregateKind::Array(_) = **kind {
let llindex = bx.cx().const_usize(field_index as u64);
- dest.project_index(&mut bx, llindex)
+ dest.project_index(bx, llindex)
} else {
- dest.project_field(&mut bx, field_index)
+ dest.project_field(bx, field_index)
};
- op.val.store(&mut bx, field);
+ op.val.store(bx, field);
}
}
- bx
}
_ => {
assert!(self.rvalue_creates_operand(rvalue, DUMMY_SP));
- let (mut bx, temp) = self.codegen_rvalue_operand(bx, rvalue);
- temp.val.store(&mut bx, dest);
- bx
+ let temp = self.codegen_rvalue_operand(bx, rvalue);
+ temp.val.store(bx, dest);
}
}
}
pub fn codegen_rvalue_unsized(
&mut self,
- mut bx: Bx,
+ bx: &mut Bx,
indirect_dest: PlaceRef<'tcx, Bx::Value>,
rvalue: &mir::Rvalue<'tcx>,
- ) -> Bx {
+ ) {
debug!(
"codegen_rvalue_unsized(indirect_dest.llval={:?}, rvalue={:?})",
indirect_dest.llval, rvalue
@@ -158,9 +154,8 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
match *rvalue {
mir::Rvalue::Use(ref operand) => {
- let cg_operand = self.codegen_operand(&mut bx, operand);
- cg_operand.val.store_unsized(&mut bx, indirect_dest);
- bx
+ let cg_operand = self.codegen_operand(bx, operand);
+ cg_operand.val.store_unsized(bx, indirect_dest);
}
_ => bug!("unsized assignment other than `Rvalue::Use`"),
@@ -169,9 +164,9 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
pub fn codegen_rvalue_operand(
&mut self,
- mut bx: Bx,
+ bx: &mut Bx,
rvalue: &mir::Rvalue<'tcx>,
- ) -> (Bx, OperandRef<'tcx, Bx::Value>) {
+ ) -> OperandRef<'tcx, Bx::Value> {
assert!(
self.rvalue_creates_operand(rvalue, DUMMY_SP),
"cannot codegen {:?} to operand",
@@ -180,7 +175,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
match *rvalue {
mir::Rvalue::Cast(ref kind, ref source, mir_cast_ty) => {
- let operand = self.codegen_operand(&mut bx, source);
+ let operand = self.codegen_operand(bx, source);
debug!("cast operand is {:?}", operand);
let cast = bx.cx().layout_of(self.monomorphize(mir_cast_ty));
@@ -245,7 +240,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
}
};
let (lldata, llextra) =
- base::unsize_ptr(&mut bx, lldata, operand.layout.ty, cast.ty, llextra);
+ base::unsize_ptr(bx, lldata, operand.layout.ty, cast.ty, llextra);
OperandValue::Pair(lldata, llextra)
}
mir::CastKind::Pointer(PointerCast::MutToConstPointer)
@@ -278,7 +273,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
OperandValue::Pair(v, l) => (v, Some(l)),
};
let (lldata, llextra) =
- base::cast_to_dyn_star(&mut bx, lldata, operand.layout, cast.ty, llextra);
+ base::cast_to_dyn_star(bx, lldata, operand.layout, cast.ty, llextra);
OperandValue::Pair(lldata, llextra)
}
mir::CastKind::Pointer(
@@ -299,7 +294,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
let ll_t_out = bx.cx().immediate_backend_type(cast);
if operand.layout.abi.is_uninhabited() {
let val = OperandValue::Immediate(bx.cx().const_undef(ll_t_out));
- return (bx, OperandRef { val, layout: cast });
+ return OperandRef { val, layout: cast };
}
let r_t_in =
CastTy::from_ty(operand.layout.ty).expect("bad input type for cast");
@@ -348,7 +343,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
OperandValue::Immediate(newval)
}
};
- (bx, OperandRef { val, layout: cast })
+ OperandRef { val, layout: cast }
}
mir::Rvalue::Ref(_, bk, place) => {
@@ -361,10 +356,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
self.codegen_place_to_pointer(bx, place, mk_ref)
}
- mir::Rvalue::CopyForDeref(place) => {
- let operand = self.codegen_operand(&mut bx, &Operand::Copy(place));
- (bx, operand)
- }
+ mir::Rvalue::CopyForDeref(place) => self.codegen_operand(bx, &Operand::Copy(place)),
mir::Rvalue::AddressOf(mutability, place) => {
let mk_ptr = move |tcx: TyCtxt<'tcx>, ty: Ty<'tcx>| {
tcx.mk_ptr(ty::TypeAndMut { ty, mutbl: mutability })
@@ -373,23 +365,22 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
}
mir::Rvalue::Len(place) => {
- let size = self.evaluate_array_len(&mut bx, place);
- let operand = OperandRef {
+ let size = self.evaluate_array_len(bx, place);
+ OperandRef {
val: OperandValue::Immediate(size),
layout: bx.cx().layout_of(bx.tcx().types.usize),
- };
- (bx, operand)
+ }
}
mir::Rvalue::BinaryOp(op, box (ref lhs, ref rhs)) => {
- let lhs = self.codegen_operand(&mut bx, lhs);
- let rhs = self.codegen_operand(&mut bx, rhs);
+ let lhs = self.codegen_operand(bx, lhs);
+ let rhs = self.codegen_operand(bx, rhs);
let llresult = match (lhs.val, rhs.val) {
(
OperandValue::Pair(lhs_addr, lhs_extra),
OperandValue::Pair(rhs_addr, rhs_extra),
) => self.codegen_fat_ptr_binop(
- &mut bx,
+ bx,
op,
lhs_addr,
lhs_extra,
@@ -399,22 +390,21 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
),
(OperandValue::Immediate(lhs_val), OperandValue::Immediate(rhs_val)) => {
- self.codegen_scalar_binop(&mut bx, op, lhs_val, rhs_val, lhs.layout.ty)
+ self.codegen_scalar_binop(bx, op, lhs_val, rhs_val, lhs.layout.ty)
}
_ => bug!(),
};
- let operand = OperandRef {
+ OperandRef {
val: OperandValue::Immediate(llresult),
layout: bx.cx().layout_of(op.ty(bx.tcx(), lhs.layout.ty, rhs.layout.ty)),
- };
- (bx, operand)
+ }
}
mir::Rvalue::CheckedBinaryOp(op, box (ref lhs, ref rhs)) => {
- let lhs = self.codegen_operand(&mut bx, lhs);
- let rhs = self.codegen_operand(&mut bx, rhs);
+ let lhs = self.codegen_operand(bx, lhs);
+ let rhs = self.codegen_operand(bx, rhs);
let result = self.codegen_scalar_checked_binop(
- &mut bx,
+ bx,
op,
lhs.immediate(),
rhs.immediate(),
@@ -422,13 +412,11 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
);
let val_ty = op.ty(bx.tcx(), lhs.layout.ty, rhs.layout.ty);
let operand_ty = bx.tcx().intern_tup(&[val_ty, bx.tcx().types.bool]);
- let operand = OperandRef { val: result, layout: bx.cx().layout_of(operand_ty) };
-
- (bx, operand)
+ OperandRef { val: result, layout: bx.cx().layout_of(operand_ty) }
}
mir::Rvalue::UnaryOp(op, ref operand) => {
- let operand = self.codegen_operand(&mut bx, operand);
+ let operand = self.codegen_operand(bx, operand);
let lloperand = operand.immediate();
let is_float = operand.layout.ty.is_floating_point();
let llval = match op {
@@ -441,22 +429,17 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
}
}
};
- (bx, OperandRef { val: OperandValue::Immediate(llval), layout: operand.layout })
+ OperandRef { val: OperandValue::Immediate(llval), layout: operand.layout }
}
mir::Rvalue::Discriminant(ref place) => {
let discr_ty = rvalue.ty(self.mir, bx.tcx());
let discr_ty = self.monomorphize(discr_ty);
- let discr = self
- .codegen_place(&mut bx, place.as_ref())
- .codegen_get_discr(&mut bx, discr_ty);
- (
- bx,
- OperandRef {
- val: OperandValue::Immediate(discr),
- layout: self.cx.layout_of(discr_ty),
- },
- )
+ let discr = self.codegen_place(bx, place.as_ref()).codegen_get_discr(bx, discr_ty);
+ OperandRef {
+ val: OperandValue::Immediate(discr),
+ layout: self.cx.layout_of(discr_ty),
+ }
}
mir::Rvalue::NullaryOp(null_op, ty) => {
@@ -469,36 +452,27 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
};
let val = bx.cx().const_usize(val);
let tcx = self.cx.tcx();
- (
- bx,
- OperandRef {
- val: OperandValue::Immediate(val),
- layout: self.cx.layout_of(tcx.types.usize),
- },
- )
+ OperandRef {
+ val: OperandValue::Immediate(val),
+ layout: self.cx.layout_of(tcx.types.usize),
+ }
}
mir::Rvalue::ThreadLocalRef(def_id) => {
assert!(bx.cx().tcx().is_static(def_id));
let static_ = bx.get_static(def_id);
let layout = bx.layout_of(bx.cx().tcx().static_ptr_ty(def_id));
- let operand = OperandRef::from_immediate_or_packed_pair(&mut bx, static_, layout);
- (bx, operand)
- }
- mir::Rvalue::Use(ref operand) => {
- let operand = self.codegen_operand(&mut bx, operand);
- (bx, operand)
+ OperandRef::from_immediate_or_packed_pair(bx, static_, layout)
}
+ mir::Rvalue::Use(ref operand) => self.codegen_operand(bx, operand),
mir::Rvalue::Repeat(..) | mir::Rvalue::Aggregate(..) => {
// According to `rvalue_creates_operand`, only ZST
// aggregate rvalues are allowed to be operands.
let ty = rvalue.ty(self.mir, self.cx.tcx());
- let operand =
- OperandRef::new_zst(&mut bx, self.cx.layout_of(self.monomorphize(ty)));
- (bx, operand)
+ OperandRef::new_zst(bx, self.cx.layout_of(self.monomorphize(ty)))
}
mir::Rvalue::ShallowInitBox(ref operand, content_ty) => {
- let operand = self.codegen_operand(&mut bx, operand);
+ let operand = self.codegen_operand(bx, operand);
let lloperand = operand.immediate();
let content_ty = self.monomorphize(content_ty);
@@ -506,8 +480,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
let llty_ptr = bx.cx().backend_type(box_layout);
let val = bx.pointercast(lloperand, llty_ptr);
- let operand = OperandRef { val: OperandValue::Immediate(val), layout: box_layout };
- (bx, operand)
+ OperandRef { val: OperandValue::Immediate(val), layout: box_layout }
}
}
}
@@ -531,11 +504,11 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
/// Codegen an `Rvalue::AddressOf` or `Rvalue::Ref`
fn codegen_place_to_pointer(
&mut self,
- mut bx: Bx,
+ bx: &mut Bx,
place: mir::Place<'tcx>,
mk_ptr_ty: impl FnOnce(TyCtxt<'tcx>, Ty<'tcx>) -> Ty<'tcx>,
- ) -> (Bx, OperandRef<'tcx, Bx::Value>) {
- let cg_place = self.codegen_place(&mut bx, place.as_ref());
+ ) -> OperandRef<'tcx, Bx::Value> {
+ let cg_place = self.codegen_place(bx, place.as_ref());
let ty = cg_place.layout.ty;
@@ -546,7 +519,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
} else {
OperandValue::Pair(cg_place.llval, cg_place.llextra.unwrap())
};
- (bx, OperandRef { val, layout: self.cx.layout_of(mk_ptr_ty(self.cx.tcx(), ty)) })
+ OperandRef { val, layout: self.cx.layout_of(mk_ptr_ty(self.cx.tcx(), ty)) }
}
pub fn codegen_scalar_binop(
diff --git a/compiler/rustc_codegen_ssa/src/mir/statement.rs b/compiler/rustc_codegen_ssa/src/mir/statement.rs
index 1db0fb3a6..19452c8cd 100644
--- a/compiler/rustc_codegen_ssa/src/mir/statement.rs
+++ b/compiler/rustc_codegen_ssa/src/mir/statement.rs
@@ -8,8 +8,8 @@ use crate::traits::*;
impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
#[instrument(level = "debug", skip(self, bx))]
- pub fn codegen_statement(&mut self, mut bx: Bx, statement: &mir::Statement<'tcx>) -> Bx {
- self.set_debug_loc(&mut bx, statement.source_info);
+ pub fn codegen_statement(&mut self, bx: &mut Bx, statement: &mir::Statement<'tcx>) {
+ self.set_debug_loc(bx, statement.source_info);
match statement.kind {
mir::StatementKind::Assign(box (ref place, ref rvalue)) => {
if let Some(index) = place.as_local() {
@@ -19,10 +19,9 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
self.codegen_rvalue_unsized(bx, cg_indirect_dest, rvalue)
}
LocalRef::Operand(None) => {
- let (mut bx, operand) = self.codegen_rvalue_operand(bx, rvalue);
+ let operand = self.codegen_rvalue_operand(bx, rvalue);
self.locals[index] = LocalRef::Operand(Some(operand));
- self.debug_introduce_local(&mut bx, index);
- bx
+ self.debug_introduce_local(bx, index);
}
LocalRef::Operand(Some(op)) => {
if !op.layout.is_zst() {
@@ -35,59 +34,52 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
// If the type is zero-sized, it's already been set here,
// but we still need to make sure we codegen the operand
- self.codegen_rvalue_operand(bx, rvalue).0
+ self.codegen_rvalue_operand(bx, rvalue);
}
}
} else {
- let cg_dest = self.codegen_place(&mut bx, place.as_ref());
- self.codegen_rvalue(bx, cg_dest, rvalue)
+ let cg_dest = self.codegen_place(bx, place.as_ref());
+ self.codegen_rvalue(bx, cg_dest, rvalue);
}
}
mir::StatementKind::SetDiscriminant { box ref place, variant_index } => {
- self.codegen_place(&mut bx, place.as_ref())
- .codegen_set_discr(&mut bx, variant_index);
- bx
+ self.codegen_place(bx, place.as_ref()).codegen_set_discr(bx, variant_index);
}
mir::StatementKind::Deinit(..) => {
// For now, don't codegen this to anything. In the future it may be worth
// experimenting with what kind of information we can emit to LLVM without hurting
// perf here
- bx
}
mir::StatementKind::StorageLive(local) => {
if let LocalRef::Place(cg_place) = self.locals[local] {
- cg_place.storage_live(&mut bx);
+ cg_place.storage_live(bx);
} else if let LocalRef::UnsizedPlace(cg_indirect_place) = self.locals[local] {
- cg_indirect_place.storage_live(&mut bx);
+ cg_indirect_place.storage_live(bx);
}
- bx
}
mir::StatementKind::StorageDead(local) => {
if let LocalRef::Place(cg_place) = self.locals[local] {
- cg_place.storage_dead(&mut bx);
+ cg_place.storage_dead(bx);
} else if let LocalRef::UnsizedPlace(cg_indirect_place) = self.locals[local] {
- cg_indirect_place.storage_dead(&mut bx);
+ cg_indirect_place.storage_dead(bx);
}
- bx
}
mir::StatementKind::Coverage(box ref coverage) => {
- self.codegen_coverage(&mut bx, coverage.clone(), statement.source_info.scope);
- bx
+ self.codegen_coverage(bx, coverage.clone(), statement.source_info.scope);
}
mir::StatementKind::Intrinsic(box NonDivergingIntrinsic::Assume(ref op)) => {
- let op_val = self.codegen_operand(&mut bx, op);
+ let op_val = self.codegen_operand(bx, op);
bx.assume(op_val.immediate());
- bx
}
mir::StatementKind::Intrinsic(box NonDivergingIntrinsic::CopyNonOverlapping(
mir::CopyNonOverlapping { ref count, ref src, ref dst },
)) => {
- let dst_val = self.codegen_operand(&mut bx, dst);
- let src_val = self.codegen_operand(&mut bx, src);
- let count = self.codegen_operand(&mut bx, count).immediate();
+ let dst_val = self.codegen_operand(bx, dst);
+ let src_val = self.codegen_operand(bx, src);
+ let count = self.codegen_operand(bx, count).immediate();
let pointee_layout = dst_val
.layout
- .pointee_info_at(&bx, rustc_target::abi::Size::ZERO)
+ .pointee_info_at(bx, rustc_target::abi::Size::ZERO)
.expect("Expected pointer");
let bytes = bx.mul(count, bx.const_usize(pointee_layout.size.bytes()));
@@ -95,12 +87,11 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
let dst = dst_val.immediate();
let src = src_val.immediate();
bx.memcpy(dst, align, src, align, bytes, crate::MemFlags::empty());
- bx
}
mir::StatementKind::FakeRead(..)
| mir::StatementKind::Retag { .. }
| mir::StatementKind::AscribeUserType(..)
- | mir::StatementKind::Nop => bx,
+ | mir::StatementKind::Nop => {}
}
}
}