summaryrefslogtreecommitdiffstats
path: root/compiler/rustc_codegen_ssa/src/mir
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/rustc_codegen_ssa/src/mir')
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/analyze.rs368
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/block.rs1654
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/constant.rs90
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/coverageinfo.rs55
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/debuginfo.rs418
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/intrinsic.rs636
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/mod.rs410
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/operand.rs461
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/place.rs549
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/rvalue.rs729
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/statement.rs102
11 files changed, 5472 insertions, 0 deletions
diff --git a/compiler/rustc_codegen_ssa/src/mir/analyze.rs b/compiler/rustc_codegen_ssa/src/mir/analyze.rs
new file mode 100644
index 000000000..24da48ead
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/mir/analyze.rs
@@ -0,0 +1,368 @@
+//! An analysis to determine which locals require allocas and
+//! which do not.
+
+use super::FunctionCx;
+use crate::traits::*;
+use rustc_data_structures::graph::dominators::Dominators;
+use rustc_index::bit_set::BitSet;
+use rustc_index::vec::IndexVec;
+use rustc_middle::mir::traversal;
+use rustc_middle::mir::visit::{MutatingUseContext, NonMutatingUseContext, PlaceContext, Visitor};
+use rustc_middle::mir::{self, Location, TerminatorKind};
+use rustc_middle::ty::layout::{HasTyCtxt, LayoutOf};
+
+pub fn non_ssa_locals<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
+ fx: &FunctionCx<'a, 'tcx, Bx>,
+) -> BitSet<mir::Local> {
+ let mir = fx.mir;
+ let dominators = mir.basic_blocks.dominators();
+ let locals = mir
+ .local_decls
+ .iter()
+ .map(|decl| {
+ let ty = fx.monomorphize(decl.ty);
+ let layout = fx.cx.spanned_layout_of(ty, decl.source_info.span);
+ if layout.is_zst() {
+ LocalKind::ZST
+ } else if fx.cx.is_backend_immediate(layout) || fx.cx.is_backend_scalar_pair(layout) {
+ LocalKind::Unused
+ } else {
+ LocalKind::Memory
+ }
+ })
+ .collect();
+
+ let mut analyzer = LocalAnalyzer { fx, dominators, locals };
+
+ // Arguments get assigned to by means of the function being called
+ for arg in mir.args_iter() {
+ analyzer.assign(arg, mir::START_BLOCK.start_location());
+ }
+
+ // If there exists a local definition that dominates all uses of that local,
+ // the definition should be visited first. Traverse blocks in an order that
+ // is a topological sort of dominance partial order.
+ for (bb, data) in traversal::reverse_postorder(&mir) {
+ analyzer.visit_basic_block_data(bb, data);
+ }
+
+ let mut non_ssa_locals = BitSet::new_empty(analyzer.locals.len());
+ for (local, kind) in analyzer.locals.iter_enumerated() {
+ if matches!(kind, LocalKind::Memory) {
+ non_ssa_locals.insert(local);
+ }
+ }
+
+ non_ssa_locals
+}
+
+#[derive(Copy, Clone, PartialEq, Eq)]
+enum LocalKind {
+ ZST,
+ /// A local that requires an alloca.
+ Memory,
+ /// A scalar or a scalar pair local that is neither defined nor used.
+ Unused,
+ /// A scalar or a scalar pair local with a single definition that dominates all uses.
+ SSA(mir::Location),
+}
+
+struct LocalAnalyzer<'mir, 'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> {
+ fx: &'mir FunctionCx<'a, 'tcx, Bx>,
+ dominators: Dominators<mir::BasicBlock>,
+ locals: IndexVec<mir::Local, LocalKind>,
+}
+
+impl<'mir, 'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> LocalAnalyzer<'mir, 'a, 'tcx, Bx> {
+ fn assign(&mut self, local: mir::Local, location: Location) {
+ let kind = &mut self.locals[local];
+ match *kind {
+ LocalKind::ZST => {}
+ LocalKind::Memory => {}
+ LocalKind::Unused => {
+ *kind = LocalKind::SSA(location);
+ }
+ LocalKind::SSA(_) => {
+ *kind = LocalKind::Memory;
+ }
+ }
+ }
+
+ fn process_place(
+ &mut self,
+ place_ref: &mir::PlaceRef<'tcx>,
+ context: PlaceContext,
+ location: Location,
+ ) {
+ let cx = self.fx.cx;
+
+ if let Some((place_base, elem)) = place_ref.last_projection() {
+ let mut base_context = if context.is_mutating_use() {
+ PlaceContext::MutatingUse(MutatingUseContext::Projection)
+ } else {
+ PlaceContext::NonMutatingUse(NonMutatingUseContext::Projection)
+ };
+
+ // Allow uses of projections that are ZSTs or from scalar fields.
+ let is_consume = matches!(
+ context,
+ PlaceContext::NonMutatingUse(
+ NonMutatingUseContext::Copy | NonMutatingUseContext::Move,
+ )
+ );
+ if is_consume {
+ let base_ty = place_base.ty(self.fx.mir, cx.tcx());
+ let base_ty = self.fx.monomorphize(base_ty);
+
+ // ZSTs don't require any actual memory access.
+ let elem_ty = base_ty.projection_ty(cx.tcx(), self.fx.monomorphize(elem)).ty;
+ let span = self.fx.mir.local_decls[place_ref.local].source_info.span;
+ if cx.spanned_layout_of(elem_ty, span).is_zst() {
+ return;
+ }
+
+ if let mir::ProjectionElem::Field(..) = elem {
+ let layout = cx.spanned_layout_of(base_ty.ty, span);
+ if cx.is_backend_immediate(layout) || cx.is_backend_scalar_pair(layout) {
+ // Recurse with the same context, instead of `Projection`,
+ // potentially stopping at non-operand projections,
+ // which would trigger `not_ssa` on locals.
+ base_context = context;
+ }
+ }
+ }
+
+ if let mir::ProjectionElem::Deref = elem {
+ // Deref projections typically only read the pointer.
+ base_context = PlaceContext::NonMutatingUse(NonMutatingUseContext::Copy);
+ }
+
+ self.process_place(&place_base, base_context, location);
+ // HACK(eddyb) this emulates the old `visit_projection_elem`, this
+ // entire `visit_place`-like `process_place` method should be rewritten,
+ // now that we have moved to the "slice of projections" representation.
+ if let mir::ProjectionElem::Index(local) = elem {
+ self.visit_local(
+ local,
+ PlaceContext::NonMutatingUse(NonMutatingUseContext::Copy),
+ location,
+ );
+ }
+ } else {
+ self.visit_local(place_ref.local, context, location);
+ }
+ }
+}
+
+impl<'mir, 'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> Visitor<'tcx>
+ for LocalAnalyzer<'mir, 'a, 'tcx, Bx>
+{
+ fn visit_assign(
+ &mut self,
+ place: &mir::Place<'tcx>,
+ rvalue: &mir::Rvalue<'tcx>,
+ location: Location,
+ ) {
+ debug!("visit_assign(place={:?}, rvalue={:?})", place, rvalue);
+
+ if let Some(local) = place.as_local() {
+ self.assign(local, location);
+ if self.locals[local] != LocalKind::Memory {
+ let decl_span = self.fx.mir.local_decls[local].source_info.span;
+ if !self.fx.rvalue_creates_operand(rvalue, decl_span) {
+ self.locals[local] = LocalKind::Memory;
+ }
+ }
+ } else {
+ self.visit_place(place, PlaceContext::MutatingUse(MutatingUseContext::Store), location);
+ }
+
+ self.visit_rvalue(rvalue, location);
+ }
+
+ fn visit_place(&mut self, place: &mir::Place<'tcx>, context: PlaceContext, location: Location) {
+ debug!("visit_place(place={:?}, context={:?})", place, context);
+ self.process_place(&place.as_ref(), context, location);
+ }
+
+ fn visit_local(&mut self, local: mir::Local, context: PlaceContext, location: Location) {
+ match context {
+ PlaceContext::MutatingUse(MutatingUseContext::Call)
+ | PlaceContext::MutatingUse(MutatingUseContext::Yield) => {
+ self.assign(local, location);
+ }
+
+ PlaceContext::NonUse(_) | PlaceContext::MutatingUse(MutatingUseContext::Retag) => {}
+
+ PlaceContext::NonMutatingUse(
+ NonMutatingUseContext::Copy | NonMutatingUseContext::Move,
+ ) => match &mut self.locals[local] {
+ LocalKind::ZST => {}
+ LocalKind::Memory => {}
+ LocalKind::SSA(def) if def.dominates(location, &self.dominators) => {}
+ // Reads from uninitialized variables (e.g., in dead code, after
+ // optimizations) require locals to be in (uninitialized) memory.
+ // N.B., there can be uninitialized reads of a local visited after
+ // an assignment to that local, if they happen on disjoint paths.
+ kind @ (LocalKind::Unused | LocalKind::SSA(_)) => {
+ *kind = LocalKind::Memory;
+ }
+ },
+
+ PlaceContext::MutatingUse(
+ MutatingUseContext::Store
+ | MutatingUseContext::Deinit
+ | MutatingUseContext::SetDiscriminant
+ | MutatingUseContext::AsmOutput
+ | MutatingUseContext::Borrow
+ | MutatingUseContext::AddressOf
+ | MutatingUseContext::Projection,
+ )
+ | PlaceContext::NonMutatingUse(
+ NonMutatingUseContext::Inspect
+ | NonMutatingUseContext::SharedBorrow
+ | NonMutatingUseContext::UniqueBorrow
+ | NonMutatingUseContext::ShallowBorrow
+ | NonMutatingUseContext::AddressOf
+ | NonMutatingUseContext::Projection,
+ ) => {
+ self.locals[local] = LocalKind::Memory;
+ }
+
+ PlaceContext::MutatingUse(MutatingUseContext::Drop) => {
+ let kind = &mut self.locals[local];
+ if *kind != LocalKind::Memory {
+ let ty = self.fx.mir.local_decls[local].ty;
+ let ty = self.fx.monomorphize(ty);
+ if self.fx.cx.type_needs_drop(ty) {
+ // Only need the place if we're actually dropping it.
+ *kind = LocalKind::Memory;
+ }
+ }
+ }
+ }
+ }
+}
+
+#[derive(Copy, Clone, Debug, PartialEq, Eq)]
+pub enum CleanupKind {
+ NotCleanup,
+ Funclet,
+ Internal { funclet: mir::BasicBlock },
+}
+
+impl CleanupKind {
+ pub fn funclet_bb(self, for_bb: mir::BasicBlock) -> Option<mir::BasicBlock> {
+ match self {
+ CleanupKind::NotCleanup => None,
+ CleanupKind::Funclet => Some(for_bb),
+ CleanupKind::Internal { funclet } => Some(funclet),
+ }
+ }
+}
+
+pub fn cleanup_kinds(mir: &mir::Body<'_>) -> IndexVec<mir::BasicBlock, CleanupKind> {
+ fn discover_masters<'tcx>(
+ result: &mut IndexVec<mir::BasicBlock, CleanupKind>,
+ mir: &mir::Body<'tcx>,
+ ) {
+ for (bb, data) in mir.basic_blocks().iter_enumerated() {
+ match data.terminator().kind {
+ TerminatorKind::Goto { .. }
+ | TerminatorKind::Resume
+ | TerminatorKind::Abort
+ | TerminatorKind::Return
+ | TerminatorKind::GeneratorDrop
+ | TerminatorKind::Unreachable
+ | TerminatorKind::SwitchInt { .. }
+ | TerminatorKind::Yield { .. }
+ | TerminatorKind::FalseEdge { .. }
+ | TerminatorKind::FalseUnwind { .. } => { /* nothing to do */ }
+ TerminatorKind::Call { cleanup: unwind, .. }
+ | TerminatorKind::InlineAsm { cleanup: unwind, .. }
+ | TerminatorKind::Assert { cleanup: unwind, .. }
+ | TerminatorKind::DropAndReplace { unwind, .. }
+ | TerminatorKind::Drop { unwind, .. } => {
+ if let Some(unwind) = unwind {
+ debug!(
+ "cleanup_kinds: {:?}/{:?} registering {:?} as funclet",
+ bb, data, unwind
+ );
+ result[unwind] = CleanupKind::Funclet;
+ }
+ }
+ }
+ }
+ }
+
+ fn propagate<'tcx>(result: &mut IndexVec<mir::BasicBlock, CleanupKind>, mir: &mir::Body<'tcx>) {
+ let mut funclet_succs = IndexVec::from_elem(None, mir.basic_blocks());
+
+ let mut set_successor = |funclet: mir::BasicBlock, succ| match funclet_succs[funclet] {
+ ref mut s @ None => {
+ debug!("set_successor: updating successor of {:?} to {:?}", funclet, succ);
+ *s = Some(succ);
+ }
+ Some(s) => {
+ if s != succ {
+ span_bug!(
+ mir.span,
+ "funclet {:?} has 2 parents - {:?} and {:?}",
+ funclet,
+ s,
+ succ
+ );
+ }
+ }
+ };
+
+ for (bb, data) in traversal::reverse_postorder(mir) {
+ let funclet = match result[bb] {
+ CleanupKind::NotCleanup => continue,
+ CleanupKind::Funclet => bb,
+ CleanupKind::Internal { funclet } => funclet,
+ };
+
+ debug!(
+ "cleanup_kinds: {:?}/{:?}/{:?} propagating funclet {:?}",
+ bb, data, result[bb], funclet
+ );
+
+ for succ in data.terminator().successors() {
+ let kind = result[succ];
+ debug!("cleanup_kinds: propagating {:?} to {:?}/{:?}", funclet, succ, kind);
+ match kind {
+ CleanupKind::NotCleanup => {
+ result[succ] = CleanupKind::Internal { funclet };
+ }
+ CleanupKind::Funclet => {
+ if funclet != succ {
+ set_successor(funclet, succ);
+ }
+ }
+ CleanupKind::Internal { funclet: succ_funclet } => {
+ if funclet != succ_funclet {
+ // `succ` has 2 different funclet going into it, so it must
+ // be a funclet by itself.
+
+ debug!(
+ "promoting {:?} to a funclet and updating {:?}",
+ succ, succ_funclet
+ );
+ result[succ] = CleanupKind::Funclet;
+ set_successor(succ_funclet, succ);
+ set_successor(funclet, succ);
+ }
+ }
+ }
+ }
+ }
+ }
+
+ let mut result = IndexVec::from_elem(CleanupKind::NotCleanup, mir.basic_blocks());
+
+ discover_masters(&mut result, mir);
+ propagate(&mut result, mir);
+ debug!("cleanup_kinds: result={:?}", result);
+ result
+}
diff --git a/compiler/rustc_codegen_ssa/src/mir/block.rs b/compiler/rustc_codegen_ssa/src/mir/block.rs
new file mode 100644
index 000000000..3eee58d9d
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/mir/block.rs
@@ -0,0 +1,1654 @@
+use super::operand::OperandRef;
+use super::operand::OperandValue::{Immediate, Pair, Ref};
+use super::place::PlaceRef;
+use super::{FunctionCx, LocalRef};
+
+use crate::base;
+use crate::common::{self, IntPredicate};
+use crate::meth;
+use crate::traits::*;
+use crate::MemFlags;
+
+use rustc_ast as ast;
+use rustc_ast::{InlineAsmOptions, InlineAsmTemplatePiece};
+use rustc_hir::lang_items::LangItem;
+use rustc_index::vec::Idx;
+use rustc_middle::mir::AssertKind;
+use rustc_middle::mir::{self, SwitchTargets};
+use rustc_middle::ty::layout::{HasTyCtxt, LayoutOf};
+use rustc_middle::ty::print::{with_no_trimmed_paths, with_no_visible_paths};
+use rustc_middle::ty::{self, Instance, Ty, TypeVisitable};
+use rustc_span::source_map::Span;
+use rustc_span::{sym, Symbol};
+use rustc_symbol_mangling::typeid::typeid_for_fnabi;
+use rustc_target::abi::call::{ArgAbi, FnAbi, PassMode};
+use rustc_target::abi::{self, HasDataLayout, WrappingRange};
+use rustc_target::spec::abi::Abi;
+
+/// Used by `FunctionCx::codegen_terminator` for emitting common patterns
+/// e.g., creating a basic block, calling a function, etc.
+struct TerminatorCodegenHelper<'tcx> {
+ bb: mir::BasicBlock,
+ terminator: &'tcx mir::Terminator<'tcx>,
+ funclet_bb: Option<mir::BasicBlock>,
+}
+
+impl<'a, 'tcx> TerminatorCodegenHelper<'tcx> {
+ /// Returns the appropriate `Funclet` for the current funclet, if on MSVC,
+ /// either already previously cached, or newly created, by `landing_pad_for`.
+ fn funclet<'b, Bx: BuilderMethods<'a, 'tcx>>(
+ &self,
+ fx: &'b mut FunctionCx<'a, 'tcx, Bx>,
+ ) -> Option<&'b Bx::Funclet> {
+ let funclet_bb = self.funclet_bb?;
+ if base::wants_msvc_seh(fx.cx.tcx().sess) {
+ // If `landing_pad_for` hasn't been called yet to create the `Funclet`,
+ // it has to be now. This may not seem necessary, as RPO should lead
+ // to all the unwind edges being visited (and so to `landing_pad_for`
+ // getting called for them), before building any of the blocks inside
+ // the funclet itself - however, if MIR contains edges that end up not
+ // being needed in the LLVM IR after monomorphization, the funclet may
+ // be unreachable, and we don't have yet a way to skip building it in
+ // such an eventuality (which may be a better solution than this).
+ if fx.funclets[funclet_bb].is_none() {
+ fx.landing_pad_for(funclet_bb);
+ }
+
+ Some(
+ fx.funclets[funclet_bb]
+ .as_ref()
+ .expect("landing_pad_for didn't also create funclets entry"),
+ )
+ } else {
+ None
+ }
+ }
+
+ fn lltarget<Bx: BuilderMethods<'a, 'tcx>>(
+ &self,
+ fx: &mut FunctionCx<'a, 'tcx, Bx>,
+ target: mir::BasicBlock,
+ ) -> (Bx::BasicBlock, bool) {
+ let span = self.terminator.source_info.span;
+ let lltarget = fx.llbb(target);
+ let target_funclet = fx.cleanup_kinds[target].funclet_bb(target);
+ match (self.funclet_bb, target_funclet) {
+ (None, None) => (lltarget, false),
+ (Some(f), Some(t_f)) if f == t_f || !base::wants_msvc_seh(fx.cx.tcx().sess) => {
+ (lltarget, false)
+ }
+ // jump *into* cleanup - need a landing pad if GNU, cleanup pad if MSVC
+ (None, Some(_)) => (fx.landing_pad_for(target), false),
+ (Some(_), None) => span_bug!(span, "{:?} - jump out of cleanup?", self.terminator),
+ (Some(_), Some(_)) => (fx.landing_pad_for(target), true),
+ }
+ }
+
+ /// Create a basic block.
+ fn llblock<Bx: BuilderMethods<'a, 'tcx>>(
+ &self,
+ fx: &mut FunctionCx<'a, 'tcx, Bx>,
+ target: mir::BasicBlock,
+ ) -> Bx::BasicBlock {
+ let (lltarget, is_cleanupret) = self.lltarget(fx, target);
+ if is_cleanupret {
+ // MSVC cross-funclet jump - need a trampoline
+
+ debug!("llblock: creating cleanup trampoline for {:?}", target);
+ let name = &format!("{:?}_cleanup_trampoline_{:?}", self.bb, target);
+ let trampoline = Bx::append_block(fx.cx, fx.llfn, name);
+ let mut trampoline_bx = Bx::build(fx.cx, trampoline);
+ trampoline_bx.cleanup_ret(self.funclet(fx).unwrap(), Some(lltarget));
+ trampoline
+ } else {
+ lltarget
+ }
+ }
+
+ fn funclet_br<Bx: BuilderMethods<'a, 'tcx>>(
+ &self,
+ fx: &mut FunctionCx<'a, 'tcx, Bx>,
+ bx: &mut Bx,
+ target: mir::BasicBlock,
+ ) {
+ let (lltarget, is_cleanupret) = self.lltarget(fx, target);
+ if is_cleanupret {
+ // micro-optimization: generate a `ret` rather than a jump
+ // to a trampoline.
+ bx.cleanup_ret(self.funclet(fx).unwrap(), Some(lltarget));
+ } else {
+ bx.br(lltarget);
+ }
+ }
+
+ /// Call `fn_ptr` of `fn_abi` with the arguments `llargs`, the optional
+ /// return destination `destination` and the cleanup function `cleanup`.
+ fn do_call<Bx: BuilderMethods<'a, 'tcx>>(
+ &self,
+ fx: &mut FunctionCx<'a, 'tcx, Bx>,
+ bx: &mut Bx,
+ fn_abi: &'tcx FnAbi<'tcx, Ty<'tcx>>,
+ fn_ptr: Bx::Value,
+ llargs: &[Bx::Value],
+ destination: Option<(ReturnDest<'tcx, Bx::Value>, mir::BasicBlock)>,
+ cleanup: Option<mir::BasicBlock>,
+ copied_constant_arguments: &[PlaceRef<'tcx, <Bx as BackendTypes>::Value>],
+ ) {
+ // If there is a cleanup block and the function we're calling can unwind, then
+ // do an invoke, otherwise do a call.
+ let fn_ty = bx.fn_decl_backend_type(&fn_abi);
+
+ let unwind_block = if let Some(cleanup) = cleanup.filter(|_| fn_abi.can_unwind) {
+ Some(self.llblock(fx, cleanup))
+ } else if fx.mir[self.bb].is_cleanup
+ && fn_abi.can_unwind
+ && !base::wants_msvc_seh(fx.cx.tcx().sess)
+ {
+ // Exception must not propagate out of the execution of a cleanup (doing so
+ // can cause undefined behaviour). We insert a double unwind guard for
+ // functions that can potentially unwind to protect against this.
+ //
+ // This is not necessary for SEH which does not use successive unwinding
+ // like Itanium EH. EH frames in SEH are different from normal function
+ // frames and SEH will abort automatically if an exception tries to
+ // propagate out from cleanup.
+ Some(fx.double_unwind_guard())
+ } else {
+ None
+ };
+
+ if let Some(unwind_block) = unwind_block {
+ let ret_llbb = if let Some((_, target)) = destination {
+ fx.llbb(target)
+ } else {
+ fx.unreachable_block()
+ };
+ let invokeret =
+ bx.invoke(fn_ty, fn_ptr, &llargs, ret_llbb, unwind_block, self.funclet(fx));
+ bx.apply_attrs_callsite(&fn_abi, invokeret);
+ if fx.mir[self.bb].is_cleanup {
+ bx.do_not_inline(invokeret);
+ }
+
+ if let Some((ret_dest, target)) = destination {
+ bx.switch_to_block(fx.llbb(target));
+ fx.set_debug_loc(bx, self.terminator.source_info);
+ for tmp in copied_constant_arguments {
+ bx.lifetime_end(tmp.llval, tmp.layout.size);
+ }
+ fx.store_return(bx, ret_dest, &fn_abi.ret, invokeret);
+ }
+ } else {
+ let llret = bx.call(fn_ty, fn_ptr, &llargs, self.funclet(fx));
+ bx.apply_attrs_callsite(&fn_abi, llret);
+ if fx.mir[self.bb].is_cleanup {
+ // Cleanup is always the cold path. Don't inline
+ // drop glue. Also, when there is a deeply-nested
+ // struct, there are "symmetry" issues that cause
+ // exponential inlining - see issue #41696.
+ bx.do_not_inline(llret);
+ }
+
+ if let Some((ret_dest, target)) = destination {
+ for tmp in copied_constant_arguments {
+ bx.lifetime_end(tmp.llval, tmp.layout.size);
+ }
+ fx.store_return(bx, ret_dest, &fn_abi.ret, llret);
+ self.funclet_br(fx, bx, target);
+ } else {
+ bx.unreachable();
+ }
+ }
+ }
+
+ /// Generates inline assembly with optional `destination` and `cleanup`.
+ fn do_inlineasm<Bx: BuilderMethods<'a, 'tcx>>(
+ &self,
+ fx: &mut FunctionCx<'a, 'tcx, Bx>,
+ bx: &mut Bx,
+ template: &[InlineAsmTemplatePiece],
+ operands: &[InlineAsmOperandRef<'tcx, Bx>],
+ options: InlineAsmOptions,
+ line_spans: &[Span],
+ destination: Option<mir::BasicBlock>,
+ cleanup: Option<mir::BasicBlock>,
+ instance: Instance<'_>,
+ ) {
+ if let Some(cleanup) = cleanup {
+ let ret_llbb = if let Some(target) = destination {
+ fx.llbb(target)
+ } else {
+ fx.unreachable_block()
+ };
+
+ bx.codegen_inline_asm(
+ template,
+ &operands,
+ options,
+ line_spans,
+ instance,
+ Some((ret_llbb, self.llblock(fx, cleanup), self.funclet(fx))),
+ );
+ } else {
+ bx.codegen_inline_asm(template, &operands, options, line_spans, instance, None);
+
+ if let Some(target) = destination {
+ self.funclet_br(fx, bx, target);
+ } else {
+ bx.unreachable();
+ }
+ }
+ }
+}
+
+/// Codegen implementations for some terminator variants.
+impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
+ /// Generates code for a `Resume` terminator.
+ fn codegen_resume_terminator(&mut self, helper: TerminatorCodegenHelper<'tcx>, mut bx: Bx) {
+ if let Some(funclet) = helper.funclet(self) {
+ bx.cleanup_ret(funclet, None);
+ } else {
+ let slot = self.get_personality_slot(&mut bx);
+ let lp0 = slot.project_field(&mut bx, 0);
+ let lp0 = bx.load_operand(lp0).immediate();
+ let lp1 = slot.project_field(&mut bx, 1);
+ let lp1 = bx.load_operand(lp1).immediate();
+ slot.storage_dead(&mut bx);
+
+ let mut lp = bx.const_undef(self.landing_pad_type());
+ lp = bx.insert_value(lp, lp0, 0);
+ lp = bx.insert_value(lp, lp1, 1);
+ bx.resume(lp);
+ }
+ }
+
+ fn codegen_switchint_terminator(
+ &mut self,
+ helper: TerminatorCodegenHelper<'tcx>,
+ mut bx: Bx,
+ discr: &mir::Operand<'tcx>,
+ switch_ty: Ty<'tcx>,
+ targets: &SwitchTargets,
+ ) {
+ let discr = self.codegen_operand(&mut bx, &discr);
+ // `switch_ty` is redundant, sanity-check that.
+ assert_eq!(discr.layout.ty, switch_ty);
+ let mut target_iter = targets.iter();
+ if target_iter.len() == 1 {
+ // If there are two targets (one conditional, one fallback), emit br instead of switch
+ let (test_value, target) = target_iter.next().unwrap();
+ let lltrue = helper.llblock(self, target);
+ let llfalse = helper.llblock(self, targets.otherwise());
+ if switch_ty == bx.tcx().types.bool {
+ // Don't generate trivial icmps when switching on bool
+ match test_value {
+ 0 => bx.cond_br(discr.immediate(), llfalse, lltrue),
+ 1 => bx.cond_br(discr.immediate(), lltrue, llfalse),
+ _ => bug!(),
+ }
+ } else {
+ let switch_llty = bx.immediate_backend_type(bx.layout_of(switch_ty));
+ let llval = bx.const_uint_big(switch_llty, test_value);
+ let cmp = bx.icmp(IntPredicate::IntEQ, discr.immediate(), llval);
+ bx.cond_br(cmp, lltrue, llfalse);
+ }
+ } else {
+ bx.switch(
+ discr.immediate(),
+ helper.llblock(self, targets.otherwise()),
+ target_iter.map(|(value, target)| (value, helper.llblock(self, target))),
+ );
+ }
+ }
+
+ fn codegen_return_terminator(&mut self, mut bx: Bx) {
+ // Call `va_end` if this is the definition of a C-variadic function.
+ if self.fn_abi.c_variadic {
+ // The `VaList` "spoofed" argument is just after all the real arguments.
+ let va_list_arg_idx = self.fn_abi.args.len();
+ match self.locals[mir::Local::new(1 + va_list_arg_idx)] {
+ LocalRef::Place(va_list) => {
+ bx.va_end(va_list.llval);
+ }
+ _ => bug!("C-variadic function must have a `VaList` place"),
+ }
+ }
+ if self.fn_abi.ret.layout.abi.is_uninhabited() {
+ // Functions with uninhabited return values are marked `noreturn`,
+ // so we should make sure that we never actually do.
+ // We play it safe by using a well-defined `abort`, but we could go for immediate UB
+ // if that turns out to be helpful.
+ bx.abort();
+ // `abort` does not terminate the block, so we still need to generate
+ // an `unreachable` terminator after it.
+ bx.unreachable();
+ return;
+ }
+ let llval = match self.fn_abi.ret.mode {
+ PassMode::Ignore | PassMode::Indirect { .. } => {
+ bx.ret_void();
+ return;
+ }
+
+ PassMode::Direct(_) | PassMode::Pair(..) => {
+ let op = self.codegen_consume(&mut bx, mir::Place::return_place().as_ref());
+ if let Ref(llval, _, align) = op.val {
+ bx.load(bx.backend_type(op.layout), llval, align)
+ } else {
+ op.immediate_or_packed_pair(&mut bx)
+ }
+ }
+
+ PassMode::Cast(cast_ty) => {
+ let op = match self.locals[mir::RETURN_PLACE] {
+ LocalRef::Operand(Some(op)) => op,
+ LocalRef::Operand(None) => bug!("use of return before def"),
+ LocalRef::Place(cg_place) => OperandRef {
+ val: Ref(cg_place.llval, None, cg_place.align),
+ layout: cg_place.layout,
+ },
+ LocalRef::UnsizedPlace(_) => bug!("return type must be sized"),
+ };
+ let llslot = match op.val {
+ Immediate(_) | Pair(..) => {
+ let scratch = PlaceRef::alloca(&mut bx, self.fn_abi.ret.layout);
+ op.val.store(&mut bx, scratch);
+ scratch.llval
+ }
+ Ref(llval, _, align) => {
+ assert_eq!(align, op.layout.align.abi, "return place is unaligned!");
+ llval
+ }
+ };
+ let ty = bx.cast_backend_type(&cast_ty);
+ let addr = bx.pointercast(llslot, bx.type_ptr_to(ty));
+ bx.load(ty, addr, self.fn_abi.ret.layout.align.abi)
+ }
+ };
+ bx.ret(llval);
+ }
+
+ fn codegen_drop_terminator(
+ &mut self,
+ helper: TerminatorCodegenHelper<'tcx>,
+ mut bx: Bx,
+ location: mir::Place<'tcx>,
+ target: mir::BasicBlock,
+ unwind: Option<mir::BasicBlock>,
+ ) {
+ let ty = location.ty(self.mir, bx.tcx()).ty;
+ let ty = self.monomorphize(ty);
+ let drop_fn = Instance::resolve_drop_in_place(bx.tcx(), ty);
+
+ if let ty::InstanceDef::DropGlue(_, None) = drop_fn.def {
+ // we don't actually need to drop anything.
+ helper.funclet_br(self, &mut bx, target);
+ return;
+ }
+
+ let place = self.codegen_place(&mut bx, location.as_ref());
+ let (args1, args2);
+ let mut args = if let Some(llextra) = place.llextra {
+ args2 = [place.llval, llextra];
+ &args2[..]
+ } else {
+ args1 = [place.llval];
+ &args1[..]
+ };
+ let (drop_fn, fn_abi) = match ty.kind() {
+ // FIXME(eddyb) perhaps move some of this logic into
+ // `Instance::resolve_drop_in_place`?
+ ty::Dynamic(..) => {
+ let virtual_drop = Instance {
+ def: ty::InstanceDef::Virtual(drop_fn.def_id(), 0),
+ substs: drop_fn.substs,
+ };
+ let fn_abi = bx.fn_abi_of_instance(virtual_drop, ty::List::empty());
+ let vtable = args[1];
+ args = &args[..1];
+ (
+ meth::VirtualIndex::from_index(ty::COMMON_VTABLE_ENTRIES_DROPINPLACE)
+ .get_fn(&mut bx, vtable, ty, &fn_abi),
+ fn_abi,
+ )
+ }
+ _ => (bx.get_fn_addr(drop_fn), bx.fn_abi_of_instance(drop_fn, ty::List::empty())),
+ };
+ helper.do_call(
+ self,
+ &mut bx,
+ fn_abi,
+ drop_fn,
+ args,
+ Some((ReturnDest::Nothing, target)),
+ unwind,
+ &[],
+ );
+ }
+
+ fn codegen_assert_terminator(
+ &mut self,
+ helper: TerminatorCodegenHelper<'tcx>,
+ mut bx: Bx,
+ terminator: &mir::Terminator<'tcx>,
+ cond: &mir::Operand<'tcx>,
+ expected: bool,
+ msg: &mir::AssertMessage<'tcx>,
+ target: mir::BasicBlock,
+ cleanup: Option<mir::BasicBlock>,
+ ) {
+ let span = terminator.source_info.span;
+ let cond = self.codegen_operand(&mut bx, cond).immediate();
+ let mut const_cond = bx.const_to_opt_u128(cond, false).map(|c| c == 1);
+
+ // This case can currently arise only from functions marked
+ // with #[rustc_inherit_overflow_checks] and inlined from
+ // another crate (mostly core::num generic/#[inline] fns),
+ // while the current crate doesn't use overflow checks.
+ // NOTE: Unlike binops, negation doesn't have its own
+ // checked operation, just a comparison with the minimum
+ // value, so we have to check for the assert message.
+ if !bx.check_overflow() {
+ if let AssertKind::OverflowNeg(_) = *msg {
+ const_cond = Some(expected);
+ }
+ }
+
+ // Don't codegen the panic block if success if known.
+ if const_cond == Some(expected) {
+ helper.funclet_br(self, &mut bx, target);
+ return;
+ }
+
+ // Pass the condition through llvm.expect for branch hinting.
+ let cond = bx.expect(cond, expected);
+
+ // Create the failure block and the conditional branch to it.
+ let lltarget = helper.llblock(self, target);
+ let panic_block = bx.append_sibling_block("panic");
+ if expected {
+ bx.cond_br(cond, lltarget, panic_block);
+ } else {
+ bx.cond_br(cond, panic_block, lltarget);
+ }
+
+ // After this point, bx is the block for the call to panic.
+ bx.switch_to_block(panic_block);
+ self.set_debug_loc(&mut bx, terminator.source_info);
+
+ // Get the location information.
+ let location = self.get_caller_location(&mut bx, terminator.source_info).immediate();
+
+ // Put together the arguments to the panic entry point.
+ let (lang_item, args) = match msg {
+ AssertKind::BoundsCheck { ref len, ref index } => {
+ let len = self.codegen_operand(&mut bx, len).immediate();
+ let index = self.codegen_operand(&mut bx, index).immediate();
+ // It's `fn panic_bounds_check(index: usize, len: usize)`,
+ // and `#[track_caller]` adds an implicit third argument.
+ (LangItem::PanicBoundsCheck, vec![index, len, location])
+ }
+ _ => {
+ let msg = bx.const_str(msg.description());
+ // It's `pub fn panic(expr: &str)`, with the wide reference being passed
+ // as two arguments, and `#[track_caller]` adds an implicit third argument.
+ (LangItem::Panic, vec![msg.0, msg.1, location])
+ }
+ };
+
+ let (fn_abi, llfn) = common::build_langcall(&bx, Some(span), lang_item);
+
+ // Codegen the actual panic invoke/call.
+ helper.do_call(self, &mut bx, fn_abi, llfn, &args, None, cleanup, &[]);
+ }
+
+ fn codegen_abort_terminator(
+ &mut self,
+ helper: TerminatorCodegenHelper<'tcx>,
+ mut bx: Bx,
+ terminator: &mir::Terminator<'tcx>,
+ ) {
+ let span = terminator.source_info.span;
+ self.set_debug_loc(&mut bx, terminator.source_info);
+
+ // Obtain the panic entry point.
+ let (fn_abi, llfn) = common::build_langcall(&bx, Some(span), LangItem::PanicNoUnwind);
+
+ // Codegen the actual panic invoke/call.
+ helper.do_call(self, &mut bx, fn_abi, llfn, &[], None, None, &[]);
+ }
+
+ /// Returns `true` if this is indeed a panic intrinsic and codegen is done.
+ fn codegen_panic_intrinsic(
+ &mut self,
+ helper: &TerminatorCodegenHelper<'tcx>,
+ bx: &mut Bx,
+ intrinsic: Option<Symbol>,
+ instance: Option<Instance<'tcx>>,
+ source_info: mir::SourceInfo,
+ target: Option<mir::BasicBlock>,
+ cleanup: Option<mir::BasicBlock>,
+ ) -> bool {
+ // Emit a panic or a no-op for `assert_*` intrinsics.
+ // These are intrinsics that compile to panics so that we can get a message
+ // which mentions the offending type, even from a const context.
+ #[derive(Debug, PartialEq)]
+ enum AssertIntrinsic {
+ Inhabited,
+ ZeroValid,
+ UninitValid,
+ }
+ let panic_intrinsic = intrinsic.and_then(|i| match i {
+ sym::assert_inhabited => Some(AssertIntrinsic::Inhabited),
+ sym::assert_zero_valid => Some(AssertIntrinsic::ZeroValid),
+ sym::assert_uninit_valid => Some(AssertIntrinsic::UninitValid),
+ _ => None,
+ });
+ if let Some(intrinsic) = panic_intrinsic {
+ use AssertIntrinsic::*;
+
+ let ty = instance.unwrap().substs.type_at(0);
+ let layout = bx.layout_of(ty);
+ let do_panic = match intrinsic {
+ Inhabited => layout.abi.is_uninhabited(),
+ ZeroValid => !bx.tcx().permits_zero_init(layout),
+ UninitValid => !bx.tcx().permits_uninit_init(layout),
+ };
+ if do_panic {
+ let msg_str = with_no_visible_paths!({
+ with_no_trimmed_paths!({
+ if layout.abi.is_uninhabited() {
+ // Use this error even for the other intrinsics as it is more precise.
+ format!("attempted to instantiate uninhabited type `{}`", ty)
+ } else if intrinsic == ZeroValid {
+ format!("attempted to zero-initialize type `{}`, which is invalid", ty)
+ } else {
+ format!(
+ "attempted to leave type `{}` uninitialized, which is invalid",
+ ty
+ )
+ }
+ })
+ });
+ let msg = bx.const_str(&msg_str);
+ let location = self.get_caller_location(bx, source_info).immediate();
+
+ // Obtain the panic entry point.
+ let (fn_abi, llfn) =
+ common::build_langcall(bx, Some(source_info.span), LangItem::Panic);
+
+ // Codegen the actual panic invoke/call.
+ helper.do_call(
+ self,
+ bx,
+ fn_abi,
+ llfn,
+ &[msg.0, msg.1, location],
+ target.as_ref().map(|bb| (ReturnDest::Nothing, *bb)),
+ cleanup,
+ &[],
+ );
+ } else {
+ // a NOP
+ let target = target.unwrap();
+ helper.funclet_br(self, bx, target)
+ }
+ true
+ } else {
+ false
+ }
+ }
+
+ fn codegen_call_terminator(
+ &mut self,
+ helper: TerminatorCodegenHelper<'tcx>,
+ mut bx: Bx,
+ terminator: &mir::Terminator<'tcx>,
+ func: &mir::Operand<'tcx>,
+ args: &[mir::Operand<'tcx>],
+ destination: mir::Place<'tcx>,
+ target: Option<mir::BasicBlock>,
+ cleanup: Option<mir::BasicBlock>,
+ fn_span: Span,
+ ) {
+ let source_info = terminator.source_info;
+ let span = source_info.span;
+
+ // Create the callee. This is a fn ptr or zero-sized and hence a kind of scalar.
+ let callee = self.codegen_operand(&mut bx, func);
+
+ let (instance, mut llfn) = match *callee.layout.ty.kind() {
+ ty::FnDef(def_id, substs) => (
+ Some(
+ ty::Instance::resolve(bx.tcx(), ty::ParamEnv::reveal_all(), def_id, substs)
+ .unwrap()
+ .unwrap()
+ .polymorphize(bx.tcx()),
+ ),
+ None,
+ ),
+ ty::FnPtr(_) => (None, Some(callee.immediate())),
+ _ => bug!("{} is not callable", callee.layout.ty),
+ };
+ let def = instance.map(|i| i.def);
+
+ if let Some(ty::InstanceDef::DropGlue(_, None)) = def {
+ // Empty drop glue; a no-op.
+ let target = target.unwrap();
+ helper.funclet_br(self, &mut bx, target);
+ return;
+ }
+
+ // FIXME(eddyb) avoid computing this if possible, when `instance` is
+ // available - right now `sig` is only needed for getting the `abi`
+ // and figuring out how many extra args were passed to a C-variadic `fn`.
+ let sig = callee.layout.ty.fn_sig(bx.tcx());
+ let abi = sig.abi();
+
+ // Handle intrinsics old codegen wants Expr's for, ourselves.
+ let intrinsic = match def {
+ Some(ty::InstanceDef::Intrinsic(def_id)) => Some(bx.tcx().item_name(def_id)),
+ _ => None,
+ };
+
+ let extra_args = &args[sig.inputs().skip_binder().len()..];
+ let extra_args = bx.tcx().mk_type_list(extra_args.iter().map(|op_arg| {
+ let op_ty = op_arg.ty(self.mir, bx.tcx());
+ self.monomorphize(op_ty)
+ }));
+
+ let fn_abi = match instance {
+ Some(instance) => bx.fn_abi_of_instance(instance, extra_args),
+ None => bx.fn_abi_of_fn_ptr(sig, extra_args),
+ };
+
+ if intrinsic == Some(sym::transmute) {
+ if let Some(target) = target {
+ self.codegen_transmute(&mut bx, &args[0], destination);
+ helper.funclet_br(self, &mut bx, target);
+ } else {
+ // If we are trying to transmute to an uninhabited type,
+ // it is likely there is no allotted destination. In fact,
+ // transmuting to an uninhabited type is UB, which means
+ // we can do what we like. Here, we declare that transmuting
+ // into an uninhabited type is impossible, so anything following
+ // it must be unreachable.
+ assert_eq!(fn_abi.ret.layout.abi, abi::Abi::Uninhabited);
+ bx.unreachable();
+ }
+ return;
+ }
+
+ if self.codegen_panic_intrinsic(
+ &helper,
+ &mut bx,
+ intrinsic,
+ instance,
+ source_info,
+ target,
+ cleanup,
+ ) {
+ return;
+ }
+
+ // The arguments we'll be passing. Plus one to account for outptr, if used.
+ let arg_count = fn_abi.args.len() + fn_abi.ret.is_indirect() as usize;
+ let mut llargs = Vec::with_capacity(arg_count);
+
+ // Prepare the return value destination
+ let ret_dest = if target.is_some() {
+ let is_intrinsic = intrinsic.is_some();
+ self.make_return_dest(&mut bx, destination, &fn_abi.ret, &mut llargs, is_intrinsic)
+ } else {
+ ReturnDest::Nothing
+ };
+
+ if intrinsic == Some(sym::caller_location) {
+ if let Some(target) = target {
+ let location = self
+ .get_caller_location(&mut bx, mir::SourceInfo { span: fn_span, ..source_info });
+
+ if let ReturnDest::IndirectOperand(tmp, _) = ret_dest {
+ location.val.store(&mut bx, tmp);
+ }
+ self.store_return(&mut bx, ret_dest, &fn_abi.ret, location.immediate());
+ helper.funclet_br(self, &mut bx, target);
+ }
+ return;
+ }
+
+ match intrinsic {
+ None | Some(sym::drop_in_place) => {}
+ Some(sym::copy_nonoverlapping) => unreachable!(),
+ Some(intrinsic) => {
+ let dest = match ret_dest {
+ _ if fn_abi.ret.is_indirect() => llargs[0],
+ ReturnDest::Nothing => {
+ bx.const_undef(bx.type_ptr_to(bx.arg_memory_ty(&fn_abi.ret)))
+ }
+ ReturnDest::IndirectOperand(dst, _) | ReturnDest::Store(dst) => dst.llval,
+ ReturnDest::DirectOperand(_) => {
+ bug!("Cannot use direct operand with an intrinsic call")
+ }
+ };
+
+ let args: Vec<_> = args
+ .iter()
+ .enumerate()
+ .map(|(i, arg)| {
+ // The indices passed to simd_shuffle* in the
+ // third argument must be constant. This is
+ // checked by const-qualification, which also
+ // promotes any complex rvalues to constants.
+ if i == 2 && intrinsic.as_str().starts_with("simd_shuffle") {
+ if let mir::Operand::Constant(constant) = arg {
+ let c = self.eval_mir_constant(constant);
+ let (llval, ty) = self.simd_shuffle_indices(
+ &bx,
+ constant.span,
+ self.monomorphize(constant.ty()),
+ c,
+ );
+ return OperandRef {
+ val: Immediate(llval),
+ layout: bx.layout_of(ty),
+ };
+ } else {
+ span_bug!(span, "shuffle indices must be constant");
+ }
+ }
+
+ self.codegen_operand(&mut bx, arg)
+ })
+ .collect();
+
+ Self::codegen_intrinsic_call(
+ &mut bx,
+ *instance.as_ref().unwrap(),
+ &fn_abi,
+ &args,
+ dest,
+ span,
+ );
+
+ if let ReturnDest::IndirectOperand(dst, _) = ret_dest {
+ self.store_return(&mut bx, ret_dest, &fn_abi.ret, dst.llval);
+ }
+
+ if let Some(target) = target {
+ helper.funclet_br(self, &mut bx, target);
+ } else {
+ bx.unreachable();
+ }
+
+ return;
+ }
+ }
+
+ // Split the rust-call tupled arguments off.
+ let (first_args, untuple) = if abi == Abi::RustCall && !args.is_empty() {
+ let (tup, args) = args.split_last().unwrap();
+ (args, Some(tup))
+ } else {
+ (args, None)
+ };
+
+ let mut copied_constant_arguments = vec![];
+ 'make_args: for (i, arg) in first_args.iter().enumerate() {
+ let mut op = self.codegen_operand(&mut bx, arg);
+
+ if let (0, Some(ty::InstanceDef::Virtual(_, idx))) = (i, def) {
+ if let Pair(..) = op.val {
+ // In the case of Rc<Self>, we need to explicitly pass a
+ // *mut RcBox<Self> with a Scalar (not ScalarPair) ABI. This is a hack
+ // that is understood elsewhere in the compiler as a method on
+ // `dyn Trait`.
+ // To get a `*mut RcBox<Self>`, we just keep unwrapping newtypes until
+ // we get a value of a built-in pointer type
+ 'descend_newtypes: while !op.layout.ty.is_unsafe_ptr()
+ && !op.layout.ty.is_region_ptr()
+ {
+ for i in 0..op.layout.fields.count() {
+ let field = op.extract_field(&mut bx, i);
+ if !field.layout.is_zst() {
+ // we found the one non-zero-sized field that is allowed
+ // now find *its* non-zero-sized field, or stop if it's a
+ // pointer
+ op = field;
+ continue 'descend_newtypes;
+ }
+ }
+
+ span_bug!(span, "receiver has no non-zero-sized fields {:?}", op);
+ }
+
+ // now that we have `*dyn Trait` or `&dyn Trait`, split it up into its
+ // data pointer and vtable. Look up the method in the vtable, and pass
+ // the data pointer as the first argument
+ match op.val {
+ Pair(data_ptr, meta) => {
+ llfn = Some(meth::VirtualIndex::from_index(idx).get_fn(
+ &mut bx,
+ meta,
+ op.layout.ty,
+ &fn_abi,
+ ));
+ llargs.push(data_ptr);
+ continue 'make_args;
+ }
+ other => bug!("expected a Pair, got {:?}", other),
+ }
+ } else if let Ref(data_ptr, Some(meta), _) = op.val {
+ // by-value dynamic dispatch
+ llfn = Some(meth::VirtualIndex::from_index(idx).get_fn(
+ &mut bx,
+ meta,
+ op.layout.ty,
+ &fn_abi,
+ ));
+ llargs.push(data_ptr);
+ continue;
+ } else {
+ span_bug!(span, "can't codegen a virtual call on {:?}", op);
+ }
+ }
+
+ // The callee needs to own the argument memory if we pass it
+ // by-ref, so make a local copy of non-immediate constants.
+ match (arg, op.val) {
+ (&mir::Operand::Copy(_), Ref(_, None, _))
+ | (&mir::Operand::Constant(_), Ref(_, None, _)) => {
+ let tmp = PlaceRef::alloca(&mut bx, op.layout);
+ bx.lifetime_start(tmp.llval, tmp.layout.size);
+ op.val.store(&mut bx, tmp);
+ op.val = Ref(tmp.llval, None, tmp.align);
+ copied_constant_arguments.push(tmp);
+ }
+ _ => {}
+ }
+
+ self.codegen_argument(&mut bx, op, &mut llargs, &fn_abi.args[i]);
+ }
+ let num_untupled = untuple.map(|tup| {
+ self.codegen_arguments_untupled(
+ &mut bx,
+ tup,
+ &mut llargs,
+ &fn_abi.args[first_args.len()..],
+ )
+ });
+
+ let needs_location =
+ instance.map_or(false, |i| i.def.requires_caller_location(self.cx.tcx()));
+ if needs_location {
+ let mir_args = if let Some(num_untupled) = num_untupled {
+ first_args.len() + num_untupled
+ } else {
+ args.len()
+ };
+ assert_eq!(
+ fn_abi.args.len(),
+ mir_args + 1,
+ "#[track_caller] fn's must have 1 more argument in their ABI than in their MIR: {:?} {:?} {:?}",
+ instance,
+ fn_span,
+ fn_abi,
+ );
+ let location =
+ self.get_caller_location(&mut bx, mir::SourceInfo { span: fn_span, ..source_info });
+ debug!(
+ "codegen_call_terminator({:?}): location={:?} (fn_span {:?})",
+ terminator, location, fn_span
+ );
+
+ let last_arg = fn_abi.args.last().unwrap();
+ self.codegen_argument(&mut bx, location, &mut llargs, last_arg);
+ }
+
+ let (is_indirect_call, fn_ptr) = match (llfn, instance) {
+ (Some(llfn), _) => (true, llfn),
+ (None, Some(instance)) => (false, bx.get_fn_addr(instance)),
+ _ => span_bug!(span, "no llfn for call"),
+ };
+
+ // For backends that support CFI using type membership (i.e., testing whether a given
+ // pointer is associated with a type identifier).
+ if bx.tcx().sess.is_sanitizer_cfi_enabled() && is_indirect_call {
+ // Emit type metadata and checks.
+ // FIXME(rcvalle): Add support for generalized identifiers.
+ // FIXME(rcvalle): Create distinct unnamed MDNodes for internal identifiers.
+ let typeid = typeid_for_fnabi(bx.tcx(), fn_abi);
+ let typeid_metadata = self.cx.typeid_metadata(typeid);
+
+ // Test whether the function pointer is associated with the type identifier.
+ let cond = bx.type_test(fn_ptr, typeid_metadata);
+ let bb_pass = bx.append_sibling_block("type_test.pass");
+ let bb_fail = bx.append_sibling_block("type_test.fail");
+ bx.cond_br(cond, bb_pass, bb_fail);
+
+ bx.switch_to_block(bb_pass);
+ helper.do_call(
+ self,
+ &mut bx,
+ fn_abi,
+ fn_ptr,
+ &llargs,
+ target.as_ref().map(|&target| (ret_dest, target)),
+ cleanup,
+ &copied_constant_arguments,
+ );
+
+ bx.switch_to_block(bb_fail);
+ bx.abort();
+ bx.unreachable();
+
+ return;
+ }
+
+ helper.do_call(
+ self,
+ &mut bx,
+ fn_abi,
+ fn_ptr,
+ &llargs,
+ target.as_ref().map(|&target| (ret_dest, target)),
+ cleanup,
+ &copied_constant_arguments,
+ );
+ }
+
+ fn codegen_asm_terminator(
+ &mut self,
+ helper: TerminatorCodegenHelper<'tcx>,
+ mut bx: Bx,
+ terminator: &mir::Terminator<'tcx>,
+ template: &[ast::InlineAsmTemplatePiece],
+ operands: &[mir::InlineAsmOperand<'tcx>],
+ options: ast::InlineAsmOptions,
+ line_spans: &[Span],
+ destination: Option<mir::BasicBlock>,
+ cleanup: Option<mir::BasicBlock>,
+ instance: Instance<'_>,
+ ) {
+ let span = terminator.source_info.span;
+
+ let operands: Vec<_> = operands
+ .iter()
+ .map(|op| match *op {
+ mir::InlineAsmOperand::In { reg, ref value } => {
+ let value = self.codegen_operand(&mut bx, value);
+ InlineAsmOperandRef::In { reg, value }
+ }
+ mir::InlineAsmOperand::Out { reg, late, ref place } => {
+ let place = place.map(|place| self.codegen_place(&mut bx, place.as_ref()));
+ InlineAsmOperandRef::Out { reg, late, place }
+ }
+ mir::InlineAsmOperand::InOut { reg, late, ref in_value, ref out_place } => {
+ let in_value = self.codegen_operand(&mut bx, in_value);
+ let out_place =
+ out_place.map(|out_place| self.codegen_place(&mut bx, out_place.as_ref()));
+ InlineAsmOperandRef::InOut { reg, late, in_value, out_place }
+ }
+ mir::InlineAsmOperand::Const { ref value } => {
+ let const_value = self
+ .eval_mir_constant(value)
+ .unwrap_or_else(|_| span_bug!(span, "asm const cannot be resolved"));
+ let string = common::asm_const_to_str(
+ bx.tcx(),
+ span,
+ const_value,
+ bx.layout_of(value.ty()),
+ );
+ InlineAsmOperandRef::Const { string }
+ }
+ mir::InlineAsmOperand::SymFn { ref value } => {
+ let literal = self.monomorphize(value.literal);
+ if let ty::FnDef(def_id, substs) = *literal.ty().kind() {
+ let instance = ty::Instance::resolve_for_fn_ptr(
+ bx.tcx(),
+ ty::ParamEnv::reveal_all(),
+ def_id,
+ substs,
+ )
+ .unwrap();
+ InlineAsmOperandRef::SymFn { instance }
+ } else {
+ span_bug!(span, "invalid type for asm sym (fn)");
+ }
+ }
+ mir::InlineAsmOperand::SymStatic { def_id } => {
+ InlineAsmOperandRef::SymStatic { def_id }
+ }
+ })
+ .collect();
+
+ helper.do_inlineasm(
+ self,
+ &mut bx,
+ template,
+ &operands,
+ options,
+ line_spans,
+ destination,
+ cleanup,
+ instance,
+ );
+ }
+}
+
+impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
+ pub fn codegen_block(&mut self, bb: mir::BasicBlock) {
+ let llbb = self.llbb(bb);
+ let mut bx = Bx::build(self.cx, llbb);
+ let mir = self.mir;
+ let data = &mir[bb];
+
+ debug!("codegen_block({:?}={:?})", bb, data);
+
+ for statement in &data.statements {
+ bx = self.codegen_statement(bx, statement);
+ }
+
+ self.codegen_terminator(bx, bb, data.terminator());
+ }
+
+ fn codegen_terminator(
+ &mut self,
+ mut bx: Bx,
+ bb: mir::BasicBlock,
+ terminator: &'tcx mir::Terminator<'tcx>,
+ ) {
+ debug!("codegen_terminator: {:?}", terminator);
+
+ // Create the cleanup bundle, if needed.
+ let funclet_bb = self.cleanup_kinds[bb].funclet_bb(bb);
+ let helper = TerminatorCodegenHelper { bb, terminator, funclet_bb };
+
+ self.set_debug_loc(&mut bx, terminator.source_info);
+ match terminator.kind {
+ mir::TerminatorKind::Resume => self.codegen_resume_terminator(helper, bx),
+
+ mir::TerminatorKind::Abort => {
+ self.codegen_abort_terminator(helper, bx, terminator);
+ }
+
+ mir::TerminatorKind::Goto { target } => {
+ helper.funclet_br(self, &mut bx, target);
+ }
+
+ mir::TerminatorKind::SwitchInt { ref discr, switch_ty, ref targets } => {
+ self.codegen_switchint_terminator(helper, bx, discr, switch_ty, targets);
+ }
+
+ mir::TerminatorKind::Return => {
+ self.codegen_return_terminator(bx);
+ }
+
+ mir::TerminatorKind::Unreachable => {
+ bx.unreachable();
+ }
+
+ mir::TerminatorKind::Drop { place, target, unwind } => {
+ self.codegen_drop_terminator(helper, bx, place, target, unwind);
+ }
+
+ mir::TerminatorKind::Assert { ref cond, expected, ref msg, target, cleanup } => {
+ self.codegen_assert_terminator(
+ helper, bx, terminator, cond, expected, msg, target, cleanup,
+ );
+ }
+
+ mir::TerminatorKind::DropAndReplace { .. } => {
+ bug!("undesugared DropAndReplace in codegen: {:?}", terminator);
+ }
+
+ mir::TerminatorKind::Call {
+ ref func,
+ ref args,
+ destination,
+ target,
+ cleanup,
+ from_hir_call: _,
+ fn_span,
+ } => {
+ self.codegen_call_terminator(
+ helper,
+ bx,
+ terminator,
+ func,
+ args,
+ destination,
+ target,
+ cleanup,
+ fn_span,
+ );
+ }
+ mir::TerminatorKind::GeneratorDrop | mir::TerminatorKind::Yield { .. } => {
+ bug!("generator ops in codegen")
+ }
+ mir::TerminatorKind::FalseEdge { .. } | mir::TerminatorKind::FalseUnwind { .. } => {
+ bug!("borrowck false edges in codegen")
+ }
+
+ mir::TerminatorKind::InlineAsm {
+ template,
+ ref operands,
+ options,
+ line_spans,
+ destination,
+ cleanup,
+ } => {
+ self.codegen_asm_terminator(
+ helper,
+ bx,
+ terminator,
+ template,
+ operands,
+ options,
+ line_spans,
+ destination,
+ cleanup,
+ self.instance,
+ );
+ }
+ }
+ }
+
+ fn codegen_argument(
+ &mut self,
+ bx: &mut Bx,
+ op: OperandRef<'tcx, Bx::Value>,
+ llargs: &mut Vec<Bx::Value>,
+ arg: &ArgAbi<'tcx, Ty<'tcx>>,
+ ) {
+ // Fill padding with undef value, where applicable.
+ if let Some(ty) = arg.pad {
+ llargs.push(bx.const_undef(bx.reg_backend_type(&ty)))
+ }
+
+ if arg.is_ignore() {
+ return;
+ }
+
+ if let PassMode::Pair(..) = arg.mode {
+ match op.val {
+ Pair(a, b) => {
+ llargs.push(a);
+ llargs.push(b);
+ return;
+ }
+ _ => bug!("codegen_argument: {:?} invalid for pair argument", op),
+ }
+ } else if arg.is_unsized_indirect() {
+ match op.val {
+ Ref(a, Some(b), _) => {
+ llargs.push(a);
+ llargs.push(b);
+ return;
+ }
+ _ => bug!("codegen_argument: {:?} invalid for unsized indirect argument", op),
+ }
+ }
+
+ // Force by-ref if we have to load through a cast pointer.
+ let (mut llval, align, by_ref) = match op.val {
+ Immediate(_) | Pair(..) => match arg.mode {
+ PassMode::Indirect { .. } | PassMode::Cast(_) => {
+ let scratch = PlaceRef::alloca(bx, arg.layout);
+ op.val.store(bx, scratch);
+ (scratch.llval, scratch.align, true)
+ }
+ _ => (op.immediate_or_packed_pair(bx), arg.layout.align.abi, false),
+ },
+ Ref(llval, _, align) => {
+ if arg.is_indirect() && align < arg.layout.align.abi {
+ // `foo(packed.large_field)`. We can't pass the (unaligned) field directly. I
+ // think that ATM (Rust 1.16) we only pass temporaries, but we shouldn't
+ // have scary latent bugs around.
+
+ let scratch = PlaceRef::alloca(bx, arg.layout);
+ base::memcpy_ty(
+ bx,
+ scratch.llval,
+ scratch.align,
+ llval,
+ align,
+ op.layout,
+ MemFlags::empty(),
+ );
+ (scratch.llval, scratch.align, true)
+ } else {
+ (llval, align, true)
+ }
+ }
+ };
+
+ if by_ref && !arg.is_indirect() {
+ // Have to load the argument, maybe while casting it.
+ if let PassMode::Cast(ty) = arg.mode {
+ let llty = bx.cast_backend_type(&ty);
+ let addr = bx.pointercast(llval, bx.type_ptr_to(llty));
+ llval = bx.load(llty, addr, align.min(arg.layout.align.abi));
+ } else {
+ // We can't use `PlaceRef::load` here because the argument
+ // may have a type we don't treat as immediate, but the ABI
+ // used for this call is passing it by-value. In that case,
+ // the load would just produce `OperandValue::Ref` instead
+ // of the `OperandValue::Immediate` we need for the call.
+ llval = bx.load(bx.backend_type(arg.layout), llval, align);
+ if let abi::Abi::Scalar(scalar) = arg.layout.abi {
+ if scalar.is_bool() {
+ bx.range_metadata(llval, WrappingRange { start: 0, end: 1 });
+ }
+ }
+ // We store bools as `i8` so we need to truncate to `i1`.
+ llval = bx.to_immediate(llval, arg.layout);
+ }
+ }
+
+ llargs.push(llval);
+ }
+
+ fn codegen_arguments_untupled(
+ &mut self,
+ bx: &mut Bx,
+ operand: &mir::Operand<'tcx>,
+ llargs: &mut Vec<Bx::Value>,
+ args: &[ArgAbi<'tcx, Ty<'tcx>>],
+ ) -> usize {
+ let tuple = self.codegen_operand(bx, operand);
+
+ // Handle both by-ref and immediate tuples.
+ if let Ref(llval, None, align) = tuple.val {
+ let tuple_ptr = PlaceRef::new_sized_aligned(llval, tuple.layout, align);
+ for i in 0..tuple.layout.fields.count() {
+ let field_ptr = tuple_ptr.project_field(bx, i);
+ let field = bx.load_operand(field_ptr);
+ self.codegen_argument(bx, field, llargs, &args[i]);
+ }
+ } else if let Ref(_, Some(_), _) = tuple.val {
+ bug!("closure arguments must be sized")
+ } else {
+ // If the tuple is immediate, the elements are as well.
+ for i in 0..tuple.layout.fields.count() {
+ let op = tuple.extract_field(bx, i);
+ self.codegen_argument(bx, op, llargs, &args[i]);
+ }
+ }
+ tuple.layout.fields.count()
+ }
+
+ fn get_caller_location(
+ &mut self,
+ bx: &mut Bx,
+ mut source_info: mir::SourceInfo,
+ ) -> OperandRef<'tcx, Bx::Value> {
+ let tcx = bx.tcx();
+
+ let mut span_to_caller_location = |span: Span| {
+ let topmost = span.ctxt().outer_expn().expansion_cause().unwrap_or(span);
+ let caller = tcx.sess.source_map().lookup_char_pos(topmost.lo());
+ let const_loc = tcx.const_caller_location((
+ Symbol::intern(&caller.file.name.prefer_remapped().to_string_lossy()),
+ caller.line as u32,
+ caller.col_display as u32 + 1,
+ ));
+ OperandRef::from_const(bx, const_loc, bx.tcx().caller_location_ty())
+ };
+
+ // Walk up the `SourceScope`s, in case some of them are from MIR inlining.
+ // If so, the starting `source_info.span` is in the innermost inlined
+ // function, and will be replaced with outer callsite spans as long
+ // as the inlined functions were `#[track_caller]`.
+ loop {
+ let scope_data = &self.mir.source_scopes[source_info.scope];
+
+ if let Some((callee, callsite_span)) = scope_data.inlined {
+ // Stop inside the most nested non-`#[track_caller]` function,
+ // before ever reaching its caller (which is irrelevant).
+ if !callee.def.requires_caller_location(tcx) {
+ return span_to_caller_location(source_info.span);
+ }
+ source_info.span = callsite_span;
+ }
+
+ // Skip past all of the parents with `inlined: None`.
+ match scope_data.inlined_parent_scope {
+ Some(parent) => source_info.scope = parent,
+ None => break,
+ }
+ }
+
+ // No inlined `SourceScope`s, or all of them were `#[track_caller]`.
+ self.caller_location.unwrap_or_else(|| span_to_caller_location(source_info.span))
+ }
+
+ fn get_personality_slot(&mut self, bx: &mut Bx) -> PlaceRef<'tcx, Bx::Value> {
+ let cx = bx.cx();
+ if let Some(slot) = self.personality_slot {
+ slot
+ } else {
+ let layout = cx.layout_of(
+ cx.tcx().intern_tup(&[cx.tcx().mk_mut_ptr(cx.tcx().types.u8), cx.tcx().types.i32]),
+ );
+ let slot = PlaceRef::alloca(bx, layout);
+ self.personality_slot = Some(slot);
+ slot
+ }
+ }
+
+ /// Returns the landing/cleanup pad wrapper around the given basic block.
+ // FIXME(eddyb) rename this to `eh_pad_for`.
+ fn landing_pad_for(&mut self, bb: mir::BasicBlock) -> Bx::BasicBlock {
+ if let Some(landing_pad) = self.landing_pads[bb] {
+ return landing_pad;
+ }
+
+ let landing_pad = self.landing_pad_for_uncached(bb);
+ self.landing_pads[bb] = Some(landing_pad);
+ landing_pad
+ }
+
+ // FIXME(eddyb) rename this to `eh_pad_for_uncached`.
+ fn landing_pad_for_uncached(&mut self, bb: mir::BasicBlock) -> Bx::BasicBlock {
+ let llbb = self.llbb(bb);
+ if base::wants_msvc_seh(self.cx.sess()) {
+ let funclet;
+ let ret_llbb;
+ match self.mir[bb].terminator.as_ref().map(|t| &t.kind) {
+ // This is a basic block that we're aborting the program for,
+ // notably in an `extern` function. These basic blocks are inserted
+ // so that we assert that `extern` functions do indeed not panic,
+ // and if they do we abort the process.
+ //
+ // On MSVC these are tricky though (where we're doing funclets). If
+ // we were to do a cleanuppad (like below) the normal functions like
+ // `longjmp` would trigger the abort logic, terminating the
+ // program. Instead we insert the equivalent of `catch(...)` for C++
+ // which magically doesn't trigger when `longjmp` files over this
+ // frame.
+ //
+ // Lots more discussion can be found on #48251 but this codegen is
+ // modeled after clang's for:
+ //
+ // try {
+ // foo();
+ // } catch (...) {
+ // bar();
+ // }
+ Some(&mir::TerminatorKind::Abort) => {
+ let cs_bb =
+ Bx::append_block(self.cx, self.llfn, &format!("cs_funclet{:?}", bb));
+ let cp_bb =
+ Bx::append_block(self.cx, self.llfn, &format!("cp_funclet{:?}", bb));
+ ret_llbb = cs_bb;
+
+ let mut cs_bx = Bx::build(self.cx, cs_bb);
+ let cs = cs_bx.catch_switch(None, None, &[cp_bb]);
+
+ // The "null" here is actually a RTTI type descriptor for the
+ // C++ personality function, but `catch (...)` has no type so
+ // it's null. The 64 here is actually a bitfield which
+ // represents that this is a catch-all block.
+ let mut cp_bx = Bx::build(self.cx, cp_bb);
+ let null = cp_bx.const_null(
+ cp_bx.type_i8p_ext(cp_bx.cx().data_layout().instruction_address_space),
+ );
+ let sixty_four = cp_bx.const_i32(64);
+ funclet = cp_bx.catch_pad(cs, &[null, sixty_four, null]);
+ cp_bx.br(llbb);
+ }
+ _ => {
+ let cleanup_bb =
+ Bx::append_block(self.cx, self.llfn, &format!("funclet_{:?}", bb));
+ ret_llbb = cleanup_bb;
+ let mut cleanup_bx = Bx::build(self.cx, cleanup_bb);
+ funclet = cleanup_bx.cleanup_pad(None, &[]);
+ cleanup_bx.br(llbb);
+ }
+ }
+ self.funclets[bb] = Some(funclet);
+ ret_llbb
+ } else {
+ let bb = Bx::append_block(self.cx, self.llfn, "cleanup");
+ let mut bx = Bx::build(self.cx, bb);
+
+ let llpersonality = self.cx.eh_personality();
+ let llretty = self.landing_pad_type();
+ let lp = bx.cleanup_landing_pad(llretty, llpersonality);
+
+ let slot = self.get_personality_slot(&mut bx);
+ slot.storage_live(&mut bx);
+ Pair(bx.extract_value(lp, 0), bx.extract_value(lp, 1)).store(&mut bx, slot);
+
+ bx.br(llbb);
+ bx.llbb()
+ }
+ }
+
+ fn landing_pad_type(&self) -> Bx::Type {
+ let cx = self.cx;
+ cx.type_struct(&[cx.type_i8p(), cx.type_i32()], false)
+ }
+
+ fn unreachable_block(&mut self) -> Bx::BasicBlock {
+ self.unreachable_block.unwrap_or_else(|| {
+ let llbb = Bx::append_block(self.cx, self.llfn, "unreachable");
+ let mut bx = Bx::build(self.cx, llbb);
+ bx.unreachable();
+ self.unreachable_block = Some(llbb);
+ llbb
+ })
+ }
+
+ fn double_unwind_guard(&mut self) -> Bx::BasicBlock {
+ self.double_unwind_guard.unwrap_or_else(|| {
+ assert!(!base::wants_msvc_seh(self.cx.sess()));
+
+ let llbb = Bx::append_block(self.cx, self.llfn, "abort");
+ let mut bx = Bx::build(self.cx, llbb);
+ self.set_debug_loc(&mut bx, mir::SourceInfo::outermost(self.mir.span));
+
+ let llpersonality = self.cx.eh_personality();
+ let llretty = self.landing_pad_type();
+ bx.cleanup_landing_pad(llretty, llpersonality);
+
+ let (fn_abi, fn_ptr) = common::build_langcall(&bx, None, LangItem::PanicNoUnwind);
+ let fn_ty = bx.fn_decl_backend_type(&fn_abi);
+
+ let llret = bx.call(fn_ty, fn_ptr, &[], None);
+ bx.apply_attrs_callsite(&fn_abi, llret);
+ bx.do_not_inline(llret);
+
+ bx.unreachable();
+
+ self.double_unwind_guard = Some(llbb);
+ llbb
+ })
+ }
+
+ /// Get the backend `BasicBlock` for a MIR `BasicBlock`, either already
+ /// cached in `self.cached_llbbs`, or created on demand (and cached).
+ // FIXME(eddyb) rename `llbb` and other `ll`-prefixed things to use a
+ // more backend-agnostic prefix such as `cg` (i.e. this would be `cgbb`).
+ pub fn llbb(&mut self, bb: mir::BasicBlock) -> Bx::BasicBlock {
+ self.cached_llbbs[bb].unwrap_or_else(|| {
+ // FIXME(eddyb) only name the block if `fewer_names` is `false`.
+ let llbb = Bx::append_block(self.cx, self.llfn, &format!("{:?}", bb));
+ self.cached_llbbs[bb] = Some(llbb);
+ llbb
+ })
+ }
+
+ fn make_return_dest(
+ &mut self,
+ bx: &mut Bx,
+ dest: mir::Place<'tcx>,
+ fn_ret: &ArgAbi<'tcx, Ty<'tcx>>,
+ llargs: &mut Vec<Bx::Value>,
+ is_intrinsic: bool,
+ ) -> ReturnDest<'tcx, Bx::Value> {
+ // If the return is ignored, we can just return a do-nothing `ReturnDest`.
+ if fn_ret.is_ignore() {
+ return ReturnDest::Nothing;
+ }
+ let dest = if let Some(index) = dest.as_local() {
+ match self.locals[index] {
+ LocalRef::Place(dest) => dest,
+ LocalRef::UnsizedPlace(_) => bug!("return type must be sized"),
+ LocalRef::Operand(None) => {
+ // Handle temporary places, specifically `Operand` ones, as
+ // they don't have `alloca`s.
+ return if fn_ret.is_indirect() {
+ // Odd, but possible, case, we have an operand temporary,
+ // but the calling convention has an indirect return.
+ let tmp = PlaceRef::alloca(bx, fn_ret.layout);
+ tmp.storage_live(bx);
+ llargs.push(tmp.llval);
+ ReturnDest::IndirectOperand(tmp, index)
+ } else if is_intrinsic {
+ // Currently, intrinsics always need a location to store
+ // the result, so we create a temporary `alloca` for the
+ // result.
+ let tmp = PlaceRef::alloca(bx, fn_ret.layout);
+ tmp.storage_live(bx);
+ ReturnDest::IndirectOperand(tmp, index)
+ } else {
+ ReturnDest::DirectOperand(index)
+ };
+ }
+ LocalRef::Operand(Some(_)) => {
+ bug!("place local already assigned to");
+ }
+ }
+ } else {
+ self.codegen_place(
+ bx,
+ mir::PlaceRef { local: dest.local, projection: &dest.projection },
+ )
+ };
+ if fn_ret.is_indirect() {
+ if dest.align < dest.layout.align.abi {
+ // Currently, MIR code generation does not create calls
+ // that store directly to fields of packed structs (in
+ // fact, the calls it creates write only to temps).
+ //
+ // If someone changes that, please update this code path
+ // to create a temporary.
+ span_bug!(self.mir.span, "can't directly store to unaligned value");
+ }
+ llargs.push(dest.llval);
+ ReturnDest::Nothing
+ } else {
+ ReturnDest::Store(dest)
+ }
+ }
+
+ fn codegen_transmute(&mut self, bx: &mut Bx, src: &mir::Operand<'tcx>, dst: mir::Place<'tcx>) {
+ if let Some(index) = dst.as_local() {
+ match self.locals[index] {
+ LocalRef::Place(place) => self.codegen_transmute_into(bx, src, place),
+ LocalRef::UnsizedPlace(_) => bug!("transmute must not involve unsized locals"),
+ LocalRef::Operand(None) => {
+ let dst_layout = bx.layout_of(self.monomorphized_place_ty(dst.as_ref()));
+ assert!(!dst_layout.ty.has_erasable_regions());
+ let place = PlaceRef::alloca(bx, dst_layout);
+ place.storage_live(bx);
+ self.codegen_transmute_into(bx, src, place);
+ let op = bx.load_operand(place);
+ place.storage_dead(bx);
+ self.locals[index] = LocalRef::Operand(Some(op));
+ self.debug_introduce_local(bx, index);
+ }
+ LocalRef::Operand(Some(op)) => {
+ assert!(op.layout.is_zst(), "assigning to initialized SSAtemp");
+ }
+ }
+ } else {
+ let dst = self.codegen_place(bx, dst.as_ref());
+ self.codegen_transmute_into(bx, src, dst);
+ }
+ }
+
+ fn codegen_transmute_into(
+ &mut self,
+ bx: &mut Bx,
+ src: &mir::Operand<'tcx>,
+ dst: PlaceRef<'tcx, Bx::Value>,
+ ) {
+ let src = self.codegen_operand(bx, src);
+
+ // Special-case transmutes between scalars as simple bitcasts.
+ match (src.layout.abi, dst.layout.abi) {
+ (abi::Abi::Scalar(src_scalar), abi::Abi::Scalar(dst_scalar)) => {
+ // HACK(eddyb) LLVM doesn't like `bitcast`s between pointers and non-pointers.
+ if (src_scalar.primitive() == abi::Pointer)
+ == (dst_scalar.primitive() == abi::Pointer)
+ {
+ assert_eq!(src.layout.size, dst.layout.size);
+
+ // NOTE(eddyb) the `from_immediate` and `to_immediate_scalar`
+ // conversions allow handling `bool`s the same as `u8`s.
+ let src = bx.from_immediate(src.immediate());
+ let src_as_dst = bx.bitcast(src, bx.backend_type(dst.layout));
+ Immediate(bx.to_immediate_scalar(src_as_dst, dst_scalar)).store(bx, dst);
+ return;
+ }
+ }
+ _ => {}
+ }
+
+ let llty = bx.backend_type(src.layout);
+ let cast_ptr = bx.pointercast(dst.llval, bx.type_ptr_to(llty));
+ let align = src.layout.align.abi.min(dst.align);
+ src.val.store(bx, PlaceRef::new_sized_aligned(cast_ptr, src.layout, align));
+ }
+
+ // Stores the return value of a function call into it's final location.
+ fn store_return(
+ &mut self,
+ bx: &mut Bx,
+ dest: ReturnDest<'tcx, Bx::Value>,
+ ret_abi: &ArgAbi<'tcx, Ty<'tcx>>,
+ llval: Bx::Value,
+ ) {
+ use self::ReturnDest::*;
+
+ match dest {
+ Nothing => (),
+ Store(dst) => bx.store_arg(&ret_abi, llval, dst),
+ IndirectOperand(tmp, index) => {
+ let op = bx.load_operand(tmp);
+ tmp.storage_dead(bx);
+ self.locals[index] = LocalRef::Operand(Some(op));
+ self.debug_introduce_local(bx, index);
+ }
+ DirectOperand(index) => {
+ // If there is a cast, we have to store and reload.
+ let op = if let PassMode::Cast(_) = ret_abi.mode {
+ let tmp = PlaceRef::alloca(bx, ret_abi.layout);
+ tmp.storage_live(bx);
+ bx.store_arg(&ret_abi, llval, tmp);
+ let op = bx.load_operand(tmp);
+ tmp.storage_dead(bx);
+ op
+ } else {
+ OperandRef::from_immediate_or_packed_pair(bx, llval, ret_abi.layout)
+ };
+ self.locals[index] = LocalRef::Operand(Some(op));
+ self.debug_introduce_local(bx, index);
+ }
+ }
+ }
+}
+
+enum ReturnDest<'tcx, V> {
+ // Do nothing; the return value is indirect or ignored.
+ Nothing,
+ // Store the return value to the pointer.
+ Store(PlaceRef<'tcx, V>),
+ // Store an indirect return value to an operand local place.
+ IndirectOperand(PlaceRef<'tcx, V>, mir::Local),
+ // Store a direct return value to an operand local place.
+ DirectOperand(mir::Local),
+}
diff --git a/compiler/rustc_codegen_ssa/src/mir/constant.rs b/compiler/rustc_codegen_ssa/src/mir/constant.rs
new file mode 100644
index 000000000..9a995fbf6
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/mir/constant.rs
@@ -0,0 +1,90 @@
+use crate::mir::operand::OperandRef;
+use crate::traits::*;
+use rustc_middle::mir;
+use rustc_middle::mir::interpret::{ConstValue, ErrorHandled};
+use rustc_middle::ty::layout::HasTyCtxt;
+use rustc_middle::ty::{self, Ty};
+use rustc_span::source_map::Span;
+use rustc_target::abi::Abi;
+
+use super::FunctionCx;
+
+impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
+ pub fn eval_mir_constant_to_operand(
+ &self,
+ bx: &mut Bx,
+ constant: &mir::Constant<'tcx>,
+ ) -> Result<OperandRef<'tcx, Bx::Value>, ErrorHandled> {
+ let val = self.eval_mir_constant(constant)?;
+ let ty = self.monomorphize(constant.ty());
+ Ok(OperandRef::from_const(bx, val, ty))
+ }
+
+ pub fn eval_mir_constant(
+ &self,
+ constant: &mir::Constant<'tcx>,
+ ) -> Result<ConstValue<'tcx>, ErrorHandled> {
+ let ct = self.monomorphize(constant.literal);
+ let ct = match ct {
+ mir::ConstantKind::Ty(ct) => ct,
+ mir::ConstantKind::Val(val, _) => return Ok(val),
+ };
+ match ct.kind() {
+ ty::ConstKind::Unevaluated(ct) => self
+ .cx
+ .tcx()
+ .const_eval_resolve(ty::ParamEnv::reveal_all(), ct, None)
+ .map_err(|err| {
+ self.cx.tcx().sess.span_err(constant.span, "erroneous constant encountered");
+ err
+ }),
+ ty::ConstKind::Value(val) => Ok(self.cx.tcx().valtree_to_const_val((ct.ty(), val))),
+ err => span_bug!(
+ constant.span,
+ "encountered bad ConstKind after monomorphizing: {:?}",
+ err
+ ),
+ }
+ }
+
+ /// process constant containing SIMD shuffle indices
+ pub fn simd_shuffle_indices(
+ &mut self,
+ bx: &Bx,
+ span: Span,
+ ty: Ty<'tcx>,
+ constant: Result<ConstValue<'tcx>, ErrorHandled>,
+ ) -> (Bx::Value, Ty<'tcx>) {
+ constant
+ .map(|val| {
+ let field_ty = ty.builtin_index().unwrap();
+ let c = mir::ConstantKind::from_value(val, ty);
+ let values: Vec<_> = bx
+ .tcx()
+ .destructure_mir_constant(ty::ParamEnv::reveal_all(), c)
+ .fields
+ .iter()
+ .map(|field| {
+ if let Some(prim) = field.try_to_scalar() {
+ let layout = bx.layout_of(field_ty);
+ let Abi::Scalar(scalar) = layout.abi else {
+ bug!("from_const: invalid ByVal layout: {:#?}", layout);
+ };
+ bx.scalar_to_backend(prim, scalar, bx.immediate_backend_type(layout))
+ } else {
+ bug!("simd shuffle field {:?}", field)
+ }
+ })
+ .collect();
+ let llval = bx.const_struct(&values, false);
+ (llval, c.ty())
+ })
+ .unwrap_or_else(|_| {
+ bx.tcx().sess.span_err(span, "could not evaluate shuffle_indices at compile time");
+ // We've errored, so we don't have to produce working code.
+ let ty = self.monomorphize(ty);
+ let llty = bx.backend_type(bx.layout_of(ty));
+ (bx.const_undef(llty), ty)
+ })
+ }
+}
diff --git a/compiler/rustc_codegen_ssa/src/mir/coverageinfo.rs b/compiler/rustc_codegen_ssa/src/mir/coverageinfo.rs
new file mode 100644
index 000000000..f1fe49528
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/mir/coverageinfo.rs
@@ -0,0 +1,55 @@
+use crate::traits::*;
+
+use rustc_middle::mir::coverage::*;
+use rustc_middle::mir::Coverage;
+use rustc_middle::mir::SourceScope;
+
+use super::FunctionCx;
+
+impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
+ pub fn codegen_coverage(&self, bx: &mut Bx, coverage: Coverage, scope: SourceScope) {
+ // Determine the instance that coverage data was originally generated for.
+ let instance = if let Some(inlined) = scope.inlined_instance(&self.mir.source_scopes) {
+ self.monomorphize(inlined)
+ } else {
+ self.instance
+ };
+
+ let Coverage { kind, code_region } = coverage;
+ match kind {
+ CoverageKind::Counter { function_source_hash, id } => {
+ if bx.set_function_source_hash(instance, function_source_hash) {
+ // If `set_function_source_hash()` returned true, the coverage map is enabled,
+ // so continue adding the counter.
+ if let Some(code_region) = code_region {
+ // Note: Some counters do not have code regions, but may still be referenced
+ // from expressions. In that case, don't add the counter to the coverage map,
+ // but do inject the counter intrinsic.
+ bx.add_coverage_counter(instance, id, code_region);
+ }
+
+ let coverageinfo = bx.tcx().coverageinfo(instance.def);
+
+ let fn_name = bx.get_pgo_func_name_var(instance);
+ let hash = bx.const_u64(function_source_hash);
+ let num_counters = bx.const_u32(coverageinfo.num_counters);
+ let index = bx.const_u32(id.zero_based_index());
+ debug!(
+ "codegen intrinsic instrprof.increment(fn_name={:?}, hash={:?}, num_counters={:?}, index={:?})",
+ fn_name, hash, num_counters, index,
+ );
+ bx.instrprof_increment(fn_name, hash, num_counters, index);
+ }
+ }
+ CoverageKind::Expression { id, lhs, op, rhs } => {
+ bx.add_coverage_counter_expression(instance, id, lhs, op, rhs, code_region);
+ }
+ CoverageKind::Unreachable => {
+ bx.add_coverage_unreachable(
+ instance,
+ code_region.expect("unreachable regions always have code regions"),
+ );
+ }
+ }
+ }
+}
diff --git a/compiler/rustc_codegen_ssa/src/mir/debuginfo.rs b/compiler/rustc_codegen_ssa/src/mir/debuginfo.rs
new file mode 100644
index 000000000..8c3186efc
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/mir/debuginfo.rs
@@ -0,0 +1,418 @@
+use crate::traits::*;
+use rustc_index::vec::IndexVec;
+use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags;
+use rustc_middle::mir;
+use rustc_middle::ty;
+use rustc_middle::ty::layout::LayoutOf;
+use rustc_session::config::DebugInfo;
+use rustc_span::symbol::{kw, Symbol};
+use rustc_span::{BytePos, Span};
+use rustc_target::abi::Abi;
+use rustc_target::abi::Size;
+
+use super::operand::{OperandRef, OperandValue};
+use super::place::PlaceRef;
+use super::{FunctionCx, LocalRef};
+
+pub struct FunctionDebugContext<S, L> {
+ pub scopes: IndexVec<mir::SourceScope, DebugScope<S, L>>,
+}
+
+#[derive(Copy, Clone)]
+pub enum VariableKind {
+ ArgumentVariable(usize /*index*/),
+ LocalVariable,
+}
+
+/// Like `mir::VarDebugInfo`, but within a `mir::Local`.
+#[derive(Copy, Clone)]
+pub struct PerLocalVarDebugInfo<'tcx, D> {
+ pub name: Symbol,
+ pub source_info: mir::SourceInfo,
+
+ /// `DIVariable` returned by `create_dbg_var`.
+ pub dbg_var: Option<D>,
+
+ /// `.place.projection` from `mir::VarDebugInfo`.
+ pub projection: &'tcx ty::List<mir::PlaceElem<'tcx>>,
+}
+
+#[derive(Clone, Copy, Debug)]
+pub struct DebugScope<S, L> {
+ pub dbg_scope: S,
+
+ /// Call site location, if this scope was inlined from another function.
+ pub inlined_at: Option<L>,
+
+ // Start and end offsets of the file to which this DIScope belongs.
+ // These are used to quickly determine whether some span refers to the same file.
+ pub file_start_pos: BytePos,
+ pub file_end_pos: BytePos,
+}
+
+impl<'tcx, S: Copy, L: Copy> DebugScope<S, L> {
+ /// DILocations inherit source file name from the parent DIScope. Due to macro expansions
+ /// it may so happen that the current span belongs to a different file than the DIScope
+ /// corresponding to span's containing source scope. If so, we need to create a DIScope
+ /// "extension" into that file.
+ pub fn adjust_dbg_scope_for_span<Cx: CodegenMethods<'tcx, DIScope = S, DILocation = L>>(
+ &self,
+ cx: &Cx,
+ span: Span,
+ ) -> S {
+ let pos = span.lo();
+ if pos < self.file_start_pos || pos >= self.file_end_pos {
+ let sm = cx.sess().source_map();
+ cx.extend_scope_to_file(self.dbg_scope, &sm.lookup_char_pos(pos).file)
+ } else {
+ self.dbg_scope
+ }
+ }
+}
+
+impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
+ pub fn set_debug_loc(&self, bx: &mut Bx, source_info: mir::SourceInfo) {
+ bx.set_span(source_info.span);
+ if let Some(dbg_loc) = self.dbg_loc(source_info) {
+ bx.set_dbg_loc(dbg_loc);
+ }
+ }
+
+ fn dbg_loc(&self, source_info: mir::SourceInfo) -> Option<Bx::DILocation> {
+ let (dbg_scope, inlined_at, span) = self.adjusted_span_and_dbg_scope(source_info)?;
+ Some(self.cx.dbg_loc(dbg_scope, inlined_at, span))
+ }
+
+ fn adjusted_span_and_dbg_scope(
+ &self,
+ source_info: mir::SourceInfo,
+ ) -> Option<(Bx::DIScope, Option<Bx::DILocation>, Span)> {
+ let span = self.adjust_span_for_debugging(source_info.span);
+ let scope = &self.debug_context.as_ref()?.scopes[source_info.scope];
+ Some((scope.adjust_dbg_scope_for_span(self.cx, span), scope.inlined_at, span))
+ }
+
+ /// In order to have a good line stepping behavior in debugger, we overwrite debug
+ /// locations of macro expansions with that of the outermost expansion site
+ /// (unless the crate is being compiled with `-Z debug-macros`).
+ fn adjust_span_for_debugging(&self, mut span: Span) -> Span {
+ // Bail out if debug info emission is not enabled.
+ if self.debug_context.is_none() {
+ return span;
+ }
+
+ if span.from_expansion() && !self.cx.sess().opts.unstable_opts.debug_macros {
+ // Walk up the macro expansion chain until we reach a non-expanded span.
+ // We also stop at the function body level because no line stepping can occur
+ // at the level above that.
+ // Use span of the outermost expansion site, while keeping the original lexical scope.
+ span = rustc_span::hygiene::walk_chain(span, self.mir.span.ctxt());
+ }
+
+ span
+ }
+
+ fn spill_operand_to_stack(
+ operand: &OperandRef<'tcx, Bx::Value>,
+ name: Option<String>,
+ bx: &mut Bx,
+ ) -> PlaceRef<'tcx, Bx::Value> {
+ // "Spill" the value onto the stack, for debuginfo,
+ // without forcing non-debuginfo uses of the local
+ // to also load from the stack every single time.
+ // FIXME(#68817) use `llvm.dbg.value` instead,
+ // at least for the cases which LLVM handles correctly.
+ let spill_slot = PlaceRef::alloca(bx, operand.layout);
+ if let Some(name) = name {
+ bx.set_var_name(spill_slot.llval, &(name + ".dbg.spill"));
+ }
+ operand.val.store(bx, spill_slot);
+ spill_slot
+ }
+
+ /// Apply debuginfo and/or name, after creating the `alloca` for a local,
+ /// or initializing the local with an operand (whichever applies).
+ pub fn debug_introduce_local(&self, bx: &mut Bx, local: mir::Local) {
+ let full_debug_info = bx.sess().opts.debuginfo == DebugInfo::Full;
+
+ // FIXME(eddyb) maybe name the return place as `_0` or `return`?
+ if local == mir::RETURN_PLACE && !self.mir.local_decls[mir::RETURN_PLACE].is_user_variable()
+ {
+ return;
+ }
+
+ let vars = match &self.per_local_var_debug_info {
+ Some(per_local) => &per_local[local],
+ None => return,
+ };
+ let whole_local_var = vars.iter().find(|var| var.projection.is_empty()).copied();
+ let has_proj = || vars.iter().any(|var| !var.projection.is_empty());
+
+ let fallback_var = if self.mir.local_kind(local) == mir::LocalKind::Arg {
+ let arg_index = local.index() - 1;
+
+ // Add debuginfo even to unnamed arguments.
+ // FIXME(eddyb) is this really needed?
+ if arg_index == 0 && has_proj() {
+ // Hide closure environments from debuginfo.
+ // FIXME(eddyb) shouldn't `ArgumentVariable` indices
+ // be offset to account for the hidden environment?
+ None
+ } else if whole_local_var.is_some() {
+ // No need to make up anything, there is a `mir::VarDebugInfo`
+ // covering the whole local.
+ // FIXME(eddyb) take `whole_local_var.source_info.scope` into
+ // account, just in case it doesn't use `ArgumentVariable`
+ // (after #67586 gets fixed).
+ None
+ } else {
+ let name = kw::Empty;
+ let decl = &self.mir.local_decls[local];
+ let dbg_var = if full_debug_info {
+ self.adjusted_span_and_dbg_scope(decl.source_info).map(
+ |(dbg_scope, _, span)| {
+ // FIXME(eddyb) is this `+ 1` needed at all?
+ let kind = VariableKind::ArgumentVariable(arg_index + 1);
+
+ let arg_ty = self.monomorphize(decl.ty);
+
+ self.cx.create_dbg_var(name, arg_ty, dbg_scope, kind, span)
+ },
+ )
+ } else {
+ None
+ };
+
+ Some(PerLocalVarDebugInfo {
+ name,
+ source_info: decl.source_info,
+ dbg_var,
+ projection: ty::List::empty(),
+ })
+ }
+ } else {
+ None
+ };
+
+ let local_ref = &self.locals[local];
+
+ let name = if bx.sess().fewer_names() {
+ None
+ } else {
+ Some(match whole_local_var.or(fallback_var) {
+ Some(var) if var.name != kw::Empty => var.name.to_string(),
+ _ => format!("{:?}", local),
+ })
+ };
+
+ if let Some(name) = &name {
+ match local_ref {
+ LocalRef::Place(place) | LocalRef::UnsizedPlace(place) => {
+ bx.set_var_name(place.llval, name);
+ }
+ LocalRef::Operand(Some(operand)) => match operand.val {
+ OperandValue::Ref(x, ..) | OperandValue::Immediate(x) => {
+ bx.set_var_name(x, name);
+ }
+ OperandValue::Pair(a, b) => {
+ // FIXME(eddyb) these are scalar components,
+ // maybe extract the high-level fields?
+ bx.set_var_name(a, &(name.clone() + ".0"));
+ bx.set_var_name(b, &(name.clone() + ".1"));
+ }
+ },
+ LocalRef::Operand(None) => {}
+ }
+ }
+
+ if !full_debug_info || vars.is_empty() && fallback_var.is_none() {
+ return;
+ }
+
+ let base = match local_ref {
+ LocalRef::Operand(None) => return,
+
+ LocalRef::Operand(Some(operand)) => {
+ // Don't spill operands onto the stack in naked functions.
+ // See: https://github.com/rust-lang/rust/issues/42779
+ let attrs = bx.tcx().codegen_fn_attrs(self.instance.def_id());
+ if attrs.flags.contains(CodegenFnAttrFlags::NAKED) {
+ return;
+ }
+
+ Self::spill_operand_to_stack(operand, name, bx)
+ }
+
+ LocalRef::Place(place) => *place,
+
+ // FIXME(eddyb) add debuginfo for unsized places too.
+ LocalRef::UnsizedPlace(_) => return,
+ };
+
+ let vars = vars.iter().copied().chain(fallback_var);
+
+ for var in vars {
+ let Some(dbg_var) = var.dbg_var else { continue };
+ let Some(dbg_loc) = self.dbg_loc(var.source_info) else { continue };
+
+ let mut direct_offset = Size::ZERO;
+ // FIXME(eddyb) use smallvec here.
+ let mut indirect_offsets = vec![];
+ let mut place = base;
+
+ for elem in &var.projection[..] {
+ match *elem {
+ mir::ProjectionElem::Deref => {
+ indirect_offsets.push(Size::ZERO);
+ place = bx.load_operand(place).deref(bx.cx());
+ }
+ mir::ProjectionElem::Field(field, _) => {
+ let i = field.index();
+ let offset = indirect_offsets.last_mut().unwrap_or(&mut direct_offset);
+ *offset += place.layout.fields.offset(i);
+ place = place.project_field(bx, i);
+ }
+ mir::ProjectionElem::Downcast(_, variant) => {
+ place = place.project_downcast(bx, variant);
+ }
+ _ => span_bug!(
+ var.source_info.span,
+ "unsupported var debuginfo place `{:?}`",
+ mir::Place { local, projection: var.projection },
+ ),
+ }
+ }
+
+ // When targeting MSVC, create extra allocas for arguments instead of pointing multiple
+ // dbg_var_addr() calls into the same alloca with offsets. MSVC uses CodeView records
+ // not DWARF and LLVM doesn't support translating the resulting
+ // [DW_OP_deref, DW_OP_plus_uconst, offset, DW_OP_deref] debug info to CodeView.
+ // Creating extra allocas on the stack makes the resulting debug info simple enough
+ // that LLVM can generate correct CodeView records and thus the values appear in the
+ // debugger. (#83709)
+ let should_create_individual_allocas = bx.cx().sess().target.is_like_msvc
+ && self.mir.local_kind(local) == mir::LocalKind::Arg
+ // LLVM can handle simple things but anything more complex than just a direct
+ // offset or one indirect offset of 0 is too complex for it to generate CV records
+ // correctly.
+ && (direct_offset != Size::ZERO
+ || !matches!(&indirect_offsets[..], [Size::ZERO] | []));
+
+ if should_create_individual_allocas {
+ // Create a variable which will be a pointer to the actual value
+ let ptr_ty = bx.tcx().mk_ty(ty::RawPtr(ty::TypeAndMut {
+ mutbl: mir::Mutability::Mut,
+ ty: place.layout.ty,
+ }));
+ let ptr_layout = bx.layout_of(ptr_ty);
+ let alloca = PlaceRef::alloca(bx, ptr_layout);
+ bx.set_var_name(alloca.llval, &(var.name.to_string() + ".dbg.spill"));
+
+ // Write the pointer to the variable
+ bx.store(place.llval, alloca.llval, alloca.align);
+
+ // Point the debug info to `*alloca` for the current variable
+ bx.dbg_var_addr(dbg_var, dbg_loc, alloca.llval, Size::ZERO, &[Size::ZERO]);
+ } else {
+ bx.dbg_var_addr(dbg_var, dbg_loc, base.llval, direct_offset, &indirect_offsets);
+ }
+ }
+ }
+
+ pub fn debug_introduce_locals(&self, bx: &mut Bx) {
+ if bx.sess().opts.debuginfo == DebugInfo::Full || !bx.sess().fewer_names() {
+ for local in self.locals.indices() {
+ self.debug_introduce_local(bx, local);
+ }
+ }
+ }
+
+ /// Partition all `VarDebugInfo` in `self.mir`, by their base `Local`.
+ pub fn compute_per_local_var_debug_info(
+ &self,
+ bx: &mut Bx,
+ ) -> Option<IndexVec<mir::Local, Vec<PerLocalVarDebugInfo<'tcx, Bx::DIVariable>>>> {
+ let full_debug_info = self.cx.sess().opts.debuginfo == DebugInfo::Full;
+
+ let target_is_msvc = self.cx.sess().target.is_like_msvc;
+
+ if !full_debug_info && self.cx.sess().fewer_names() {
+ return None;
+ }
+
+ let mut per_local = IndexVec::from_elem(vec![], &self.mir.local_decls);
+ for var in &self.mir.var_debug_info {
+ let dbg_scope_and_span = if full_debug_info {
+ self.adjusted_span_and_dbg_scope(var.source_info)
+ } else {
+ None
+ };
+
+ let dbg_var = dbg_scope_and_span.map(|(dbg_scope, _, span)| {
+ let (var_ty, var_kind) = match var.value {
+ mir::VarDebugInfoContents::Place(place) => {
+ let var_ty = self.monomorphized_place_ty(place.as_ref());
+ let var_kind = if self.mir.local_kind(place.local) == mir::LocalKind::Arg
+ && place.projection.is_empty()
+ && var.source_info.scope == mir::OUTERMOST_SOURCE_SCOPE
+ {
+ let arg_index = place.local.index() - 1;
+ if target_is_msvc {
+ // ScalarPair parameters are spilled to the stack so they need to
+ // be marked as a `LocalVariable` for MSVC debuggers to visualize
+ // their data correctly. (See #81894 & #88625)
+ let var_ty_layout = self.cx.layout_of(var_ty);
+ if let Abi::ScalarPair(_, _) = var_ty_layout.abi {
+ VariableKind::LocalVariable
+ } else {
+ VariableKind::ArgumentVariable(arg_index + 1)
+ }
+ } else {
+ // FIXME(eddyb) shouldn't `ArgumentVariable` indices be
+ // offset in closures to account for the hidden environment?
+ // Also, is this `+ 1` needed at all?
+ VariableKind::ArgumentVariable(arg_index + 1)
+ }
+ } else {
+ VariableKind::LocalVariable
+ };
+ (var_ty, var_kind)
+ }
+ mir::VarDebugInfoContents::Const(c) => {
+ let ty = self.monomorphize(c.ty());
+ (ty, VariableKind::LocalVariable)
+ }
+ };
+
+ self.cx.create_dbg_var(var.name, var_ty, dbg_scope, var_kind, span)
+ });
+
+ match var.value {
+ mir::VarDebugInfoContents::Place(place) => {
+ per_local[place.local].push(PerLocalVarDebugInfo {
+ name: var.name,
+ source_info: var.source_info,
+ dbg_var,
+ projection: place.projection,
+ });
+ }
+ mir::VarDebugInfoContents::Const(c) => {
+ if let Some(dbg_var) = dbg_var {
+ let Some(dbg_loc) = self.dbg_loc(var.source_info) else { continue };
+
+ if let Ok(operand) = self.eval_mir_constant_to_operand(bx, &c) {
+ let base = Self::spill_operand_to_stack(
+ &operand,
+ Some(var.name.to_string()),
+ bx,
+ );
+
+ bx.dbg_var_addr(dbg_var, dbg_loc, base.llval, Size::ZERO, &[]);
+ }
+ }
+ }
+ }
+ }
+ Some(per_local)
+ }
+}
diff --git a/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs b/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs
new file mode 100644
index 000000000..94ac71a4d
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs
@@ -0,0 +1,636 @@
+use super::operand::{OperandRef, OperandValue};
+use super::place::PlaceRef;
+use super::FunctionCx;
+use crate::common::{span_invalid_monomorphization_error, IntPredicate};
+use crate::glue;
+use crate::meth;
+use crate::traits::*;
+use crate::MemFlags;
+
+use rustc_middle::ty::{self, Ty, TyCtxt};
+use rustc_span::{sym, Span};
+use rustc_target::abi::{
+ call::{FnAbi, PassMode},
+ WrappingRange,
+};
+
+fn copy_intrinsic<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
+ bx: &mut Bx,
+ allow_overlap: bool,
+ volatile: bool,
+ ty: Ty<'tcx>,
+ dst: Bx::Value,
+ src: Bx::Value,
+ count: Bx::Value,
+) {
+ let layout = bx.layout_of(ty);
+ let size = layout.size;
+ let align = layout.align.abi;
+ let size = bx.mul(bx.const_usize(size.bytes()), count);
+ let flags = if volatile { MemFlags::VOLATILE } else { MemFlags::empty() };
+ if allow_overlap {
+ bx.memmove(dst, align, src, align, size, flags);
+ } else {
+ bx.memcpy(dst, align, src, align, size, flags);
+ }
+}
+
+fn memset_intrinsic<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
+ bx: &mut Bx,
+ volatile: bool,
+ ty: Ty<'tcx>,
+ dst: Bx::Value,
+ val: Bx::Value,
+ count: Bx::Value,
+) {
+ let layout = bx.layout_of(ty);
+ let size = layout.size;
+ let align = layout.align.abi;
+ let size = bx.mul(bx.const_usize(size.bytes()), count);
+ let flags = if volatile { MemFlags::VOLATILE } else { MemFlags::empty() };
+ bx.memset(dst, val, size, align, flags);
+}
+
+impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
+ pub fn codegen_intrinsic_call(
+ bx: &mut Bx,
+ instance: ty::Instance<'tcx>,
+ fn_abi: &FnAbi<'tcx, Ty<'tcx>>,
+ args: &[OperandRef<'tcx, Bx::Value>],
+ llresult: Bx::Value,
+ span: Span,
+ ) {
+ let callee_ty = instance.ty(bx.tcx(), ty::ParamEnv::reveal_all());
+
+ let ty::FnDef(def_id, substs) = *callee_ty.kind() else {
+ bug!("expected fn item type, found {}", callee_ty);
+ };
+
+ let sig = callee_ty.fn_sig(bx.tcx());
+ let sig = bx.tcx().normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), sig);
+ let arg_tys = sig.inputs();
+ let ret_ty = sig.output();
+ let name = bx.tcx().item_name(def_id);
+ let name_str = name.as_str();
+
+ let llret_ty = bx.backend_type(bx.layout_of(ret_ty));
+ let result = PlaceRef::new_sized(llresult, fn_abi.ret.layout);
+
+ let llval = match name {
+ sym::assume => {
+ bx.assume(args[0].immediate());
+ return;
+ }
+ sym::abort => {
+ bx.abort();
+ return;
+ }
+
+ sym::va_start => bx.va_start(args[0].immediate()),
+ sym::va_end => bx.va_end(args[0].immediate()),
+ sym::size_of_val => {
+ let tp_ty = substs.type_at(0);
+ if let OperandValue::Pair(_, meta) = args[0].val {
+ let (llsize, _) = glue::size_and_align_of_dst(bx, tp_ty, Some(meta));
+ llsize
+ } else {
+ bx.const_usize(bx.layout_of(tp_ty).size.bytes())
+ }
+ }
+ sym::min_align_of_val => {
+ let tp_ty = substs.type_at(0);
+ if let OperandValue::Pair(_, meta) = args[0].val {
+ let (_, llalign) = glue::size_and_align_of_dst(bx, tp_ty, Some(meta));
+ llalign
+ } else {
+ bx.const_usize(bx.layout_of(tp_ty).align.abi.bytes())
+ }
+ }
+ sym::vtable_size | sym::vtable_align => {
+ let vtable = args[0].immediate();
+ let idx = match name {
+ sym::vtable_size => ty::COMMON_VTABLE_ENTRIES_SIZE,
+ sym::vtable_align => ty::COMMON_VTABLE_ENTRIES_ALIGN,
+ _ => bug!(),
+ };
+ let value = meth::VirtualIndex::from_index(idx).get_usize(bx, vtable);
+ if name == sym::vtable_align {
+ // Alignment is always nonzero.
+ bx.range_metadata(value, WrappingRange { start: 1, end: !0 });
+ };
+ value
+ }
+ sym::pref_align_of
+ | sym::needs_drop
+ | sym::type_id
+ | sym::type_name
+ | sym::variant_count => {
+ let value = bx
+ .tcx()
+ .const_eval_instance(ty::ParamEnv::reveal_all(), instance, None)
+ .unwrap();
+ OperandRef::from_const(bx, value, ret_ty).immediate_or_packed_pair(bx)
+ }
+ sym::offset => {
+ let ty = substs.type_at(0);
+ let layout = bx.layout_of(ty);
+ let ptr = args[0].immediate();
+ let offset = args[1].immediate();
+ bx.inbounds_gep(bx.backend_type(layout), ptr, &[offset])
+ }
+ sym::arith_offset => {
+ let ty = substs.type_at(0);
+ let layout = bx.layout_of(ty);
+ let ptr = args[0].immediate();
+ let offset = args[1].immediate();
+ bx.gep(bx.backend_type(layout), ptr, &[offset])
+ }
+ sym::copy => {
+ copy_intrinsic(
+ bx,
+ true,
+ false,
+ substs.type_at(0),
+ args[1].immediate(),
+ args[0].immediate(),
+ args[2].immediate(),
+ );
+ return;
+ }
+ sym::write_bytes => {
+ memset_intrinsic(
+ bx,
+ false,
+ substs.type_at(0),
+ args[0].immediate(),
+ args[1].immediate(),
+ args[2].immediate(),
+ );
+ return;
+ }
+
+ sym::volatile_copy_nonoverlapping_memory => {
+ copy_intrinsic(
+ bx,
+ false,
+ true,
+ substs.type_at(0),
+ args[0].immediate(),
+ args[1].immediate(),
+ args[2].immediate(),
+ );
+ return;
+ }
+ sym::volatile_copy_memory => {
+ copy_intrinsic(
+ bx,
+ true,
+ true,
+ substs.type_at(0),
+ args[0].immediate(),
+ args[1].immediate(),
+ args[2].immediate(),
+ );
+ return;
+ }
+ sym::volatile_set_memory => {
+ memset_intrinsic(
+ bx,
+ true,
+ substs.type_at(0),
+ args[0].immediate(),
+ args[1].immediate(),
+ args[2].immediate(),
+ );
+ return;
+ }
+ sym::volatile_store => {
+ let dst = args[0].deref(bx.cx());
+ args[1].val.volatile_store(bx, dst);
+ return;
+ }
+ sym::unaligned_volatile_store => {
+ let dst = args[0].deref(bx.cx());
+ args[1].val.unaligned_volatile_store(bx, dst);
+ return;
+ }
+ sym::add_with_overflow
+ | sym::sub_with_overflow
+ | sym::mul_with_overflow
+ | sym::unchecked_div
+ | sym::unchecked_rem
+ | sym::unchecked_shl
+ | sym::unchecked_shr
+ | sym::unchecked_add
+ | sym::unchecked_sub
+ | sym::unchecked_mul
+ | sym::exact_div => {
+ let ty = arg_tys[0];
+ match int_type_width_signed(ty, bx.tcx()) {
+ Some((_width, signed)) => match name {
+ sym::add_with_overflow
+ | sym::sub_with_overflow
+ | sym::mul_with_overflow => {
+ let op = match name {
+ sym::add_with_overflow => OverflowOp::Add,
+ sym::sub_with_overflow => OverflowOp::Sub,
+ sym::mul_with_overflow => OverflowOp::Mul,
+ _ => bug!(),
+ };
+ let (val, overflow) =
+ bx.checked_binop(op, ty, args[0].immediate(), args[1].immediate());
+ // Convert `i1` to a `bool`, and write it to the out parameter
+ let val = bx.from_immediate(val);
+ let overflow = bx.from_immediate(overflow);
+
+ let dest = result.project_field(bx, 0);
+ bx.store(val, dest.llval, dest.align);
+ let dest = result.project_field(bx, 1);
+ bx.store(overflow, dest.llval, dest.align);
+
+ return;
+ }
+ sym::exact_div => {
+ if signed {
+ bx.exactsdiv(args[0].immediate(), args[1].immediate())
+ } else {
+ bx.exactudiv(args[0].immediate(), args[1].immediate())
+ }
+ }
+ sym::unchecked_div => {
+ if signed {
+ bx.sdiv(args[0].immediate(), args[1].immediate())
+ } else {
+ bx.udiv(args[0].immediate(), args[1].immediate())
+ }
+ }
+ sym::unchecked_rem => {
+ if signed {
+ bx.srem(args[0].immediate(), args[1].immediate())
+ } else {
+ bx.urem(args[0].immediate(), args[1].immediate())
+ }
+ }
+ sym::unchecked_shl => bx.shl(args[0].immediate(), args[1].immediate()),
+ sym::unchecked_shr => {
+ if signed {
+ bx.ashr(args[0].immediate(), args[1].immediate())
+ } else {
+ bx.lshr(args[0].immediate(), args[1].immediate())
+ }
+ }
+ sym::unchecked_add => {
+ if signed {
+ bx.unchecked_sadd(args[0].immediate(), args[1].immediate())
+ } else {
+ bx.unchecked_uadd(args[0].immediate(), args[1].immediate())
+ }
+ }
+ sym::unchecked_sub => {
+ if signed {
+ bx.unchecked_ssub(args[0].immediate(), args[1].immediate())
+ } else {
+ bx.unchecked_usub(args[0].immediate(), args[1].immediate())
+ }
+ }
+ sym::unchecked_mul => {
+ if signed {
+ bx.unchecked_smul(args[0].immediate(), args[1].immediate())
+ } else {
+ bx.unchecked_umul(args[0].immediate(), args[1].immediate())
+ }
+ }
+ _ => bug!(),
+ },
+ None => {
+ span_invalid_monomorphization_error(
+ bx.tcx().sess,
+ span,
+ &format!(
+ "invalid monomorphization of `{}` intrinsic: \
+ expected basic integer type, found `{}`",
+ name, ty
+ ),
+ );
+ return;
+ }
+ }
+ }
+ sym::fadd_fast | sym::fsub_fast | sym::fmul_fast | sym::fdiv_fast | sym::frem_fast => {
+ match float_type_width(arg_tys[0]) {
+ Some(_width) => match name {
+ sym::fadd_fast => bx.fadd_fast(args[0].immediate(), args[1].immediate()),
+ sym::fsub_fast => bx.fsub_fast(args[0].immediate(), args[1].immediate()),
+ sym::fmul_fast => bx.fmul_fast(args[0].immediate(), args[1].immediate()),
+ sym::fdiv_fast => bx.fdiv_fast(args[0].immediate(), args[1].immediate()),
+ sym::frem_fast => bx.frem_fast(args[0].immediate(), args[1].immediate()),
+ _ => bug!(),
+ },
+ None => {
+ span_invalid_monomorphization_error(
+ bx.tcx().sess,
+ span,
+ &format!(
+ "invalid monomorphization of `{}` intrinsic: \
+ expected basic float type, found `{}`",
+ name, arg_tys[0]
+ ),
+ );
+ return;
+ }
+ }
+ }
+
+ sym::float_to_int_unchecked => {
+ if float_type_width(arg_tys[0]).is_none() {
+ span_invalid_monomorphization_error(
+ bx.tcx().sess,
+ span,
+ &format!(
+ "invalid monomorphization of `float_to_int_unchecked` \
+ intrinsic: expected basic float type, \
+ found `{}`",
+ arg_tys[0]
+ ),
+ );
+ return;
+ }
+ let Some((_width, signed)) = int_type_width_signed(ret_ty, bx.tcx()) else {
+ span_invalid_monomorphization_error(
+ bx.tcx().sess,
+ span,
+ &format!(
+ "invalid monomorphization of `float_to_int_unchecked` \
+ intrinsic: expected basic integer type, \
+ found `{}`",
+ ret_ty
+ ),
+ );
+ return;
+ };
+ if signed {
+ bx.fptosi(args[0].immediate(), llret_ty)
+ } else {
+ bx.fptoui(args[0].immediate(), llret_ty)
+ }
+ }
+
+ sym::discriminant_value => {
+ if ret_ty.is_integral() {
+ args[0].deref(bx.cx()).codegen_get_discr(bx, ret_ty)
+ } else {
+ span_bug!(span, "Invalid discriminant type for `{:?}`", arg_tys[0])
+ }
+ }
+
+ sym::const_allocate => {
+ // returns a null pointer at runtime.
+ bx.const_null(bx.type_i8p())
+ }
+
+ sym::const_deallocate => {
+ // nop at runtime.
+ return;
+ }
+
+ // This requires that atomic intrinsics follow a specific naming pattern:
+ // "atomic_<operation>[_<ordering>]"
+ name if let Some(atomic) = name_str.strip_prefix("atomic_") => {
+ use crate::common::AtomicOrdering::*;
+ use crate::common::{AtomicRmwBinOp, SynchronizationScope};
+
+ let Some((instruction, ordering)) = atomic.split_once('_') else {
+ bx.sess().fatal("Atomic intrinsic missing memory ordering");
+ };
+
+ let parse_ordering = |bx: &Bx, s| match s {
+ "unordered" => Unordered,
+ "relaxed" => Relaxed,
+ "acquire" => Acquire,
+ "release" => Release,
+ "acqrel" => AcquireRelease,
+ "seqcst" => SequentiallyConsistent,
+ _ => bx.sess().fatal("unknown ordering in atomic intrinsic"),
+ };
+
+ let invalid_monomorphization = |ty| {
+ span_invalid_monomorphization_error(
+ bx.tcx().sess,
+ span,
+ &format!(
+ "invalid monomorphization of `{}` intrinsic: \
+ expected basic integer type, found `{}`",
+ name, ty
+ ),
+ );
+ };
+
+ match instruction {
+ "cxchg" | "cxchgweak" => {
+ let Some((success, failure)) = ordering.split_once('_') else {
+ bx.sess().fatal("Atomic compare-exchange intrinsic missing failure memory ordering");
+ };
+ let ty = substs.type_at(0);
+ if int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_unsafe_ptr() {
+ let weak = instruction == "cxchgweak";
+ let mut dst = args[0].immediate();
+ let mut cmp = args[1].immediate();
+ let mut src = args[2].immediate();
+ if ty.is_unsafe_ptr() {
+ // Some platforms do not support atomic operations on pointers,
+ // so we cast to integer first.
+ let ptr_llty = bx.type_ptr_to(bx.type_isize());
+ dst = bx.pointercast(dst, ptr_llty);
+ cmp = bx.ptrtoint(cmp, bx.type_isize());
+ src = bx.ptrtoint(src, bx.type_isize());
+ }
+ let pair = bx.atomic_cmpxchg(dst, cmp, src, parse_ordering(bx, success), parse_ordering(bx, failure), weak);
+ let val = bx.extract_value(pair, 0);
+ let success = bx.extract_value(pair, 1);
+ let val = bx.from_immediate(val);
+ let success = bx.from_immediate(success);
+
+ let dest = result.project_field(bx, 0);
+ bx.store(val, dest.llval, dest.align);
+ let dest = result.project_field(bx, 1);
+ bx.store(success, dest.llval, dest.align);
+ return;
+ } else {
+ return invalid_monomorphization(ty);
+ }
+ }
+
+ "load" => {
+ let ty = substs.type_at(0);
+ if int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_unsafe_ptr() {
+ let layout = bx.layout_of(ty);
+ let size = layout.size;
+ let mut source = args[0].immediate();
+ if ty.is_unsafe_ptr() {
+ // Some platforms do not support atomic operations on pointers,
+ // so we cast to integer first...
+ let llty = bx.type_isize();
+ let ptr_llty = bx.type_ptr_to(llty);
+ source = bx.pointercast(source, ptr_llty);
+ let result = bx.atomic_load(llty, source, parse_ordering(bx, ordering), size);
+ // ... and then cast the result back to a pointer
+ bx.inttoptr(result, bx.backend_type(layout))
+ } else {
+ bx.atomic_load(bx.backend_type(layout), source, parse_ordering(bx, ordering), size)
+ }
+ } else {
+ return invalid_monomorphization(ty);
+ }
+ }
+
+ "store" => {
+ let ty = substs.type_at(0);
+ if int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_unsafe_ptr() {
+ let size = bx.layout_of(ty).size;
+ let mut val = args[1].immediate();
+ let mut ptr = args[0].immediate();
+ if ty.is_unsafe_ptr() {
+ // Some platforms do not support atomic operations on pointers,
+ // so we cast to integer first.
+ let ptr_llty = bx.type_ptr_to(bx.type_isize());
+ ptr = bx.pointercast(ptr, ptr_llty);
+ val = bx.ptrtoint(val, bx.type_isize());
+ }
+ bx.atomic_store(val, ptr, parse_ordering(bx, ordering), size);
+ return;
+ } else {
+ return invalid_monomorphization(ty);
+ }
+ }
+
+ "fence" => {
+ bx.atomic_fence(parse_ordering(bx, ordering), SynchronizationScope::CrossThread);
+ return;
+ }
+
+ "singlethreadfence" => {
+ bx.atomic_fence(parse_ordering(bx, ordering), SynchronizationScope::SingleThread);
+ return;
+ }
+
+ // These are all AtomicRMW ops
+ op => {
+ let atom_op = match op {
+ "xchg" => AtomicRmwBinOp::AtomicXchg,
+ "xadd" => AtomicRmwBinOp::AtomicAdd,
+ "xsub" => AtomicRmwBinOp::AtomicSub,
+ "and" => AtomicRmwBinOp::AtomicAnd,
+ "nand" => AtomicRmwBinOp::AtomicNand,
+ "or" => AtomicRmwBinOp::AtomicOr,
+ "xor" => AtomicRmwBinOp::AtomicXor,
+ "max" => AtomicRmwBinOp::AtomicMax,
+ "min" => AtomicRmwBinOp::AtomicMin,
+ "umax" => AtomicRmwBinOp::AtomicUMax,
+ "umin" => AtomicRmwBinOp::AtomicUMin,
+ _ => bx.sess().fatal("unknown atomic operation"),
+ };
+
+ let ty = substs.type_at(0);
+ if int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_unsafe_ptr() {
+ let mut ptr = args[0].immediate();
+ let mut val = args[1].immediate();
+ if ty.is_unsafe_ptr() {
+ // Some platforms do not support atomic operations on pointers,
+ // so we cast to integer first.
+ let ptr_llty = bx.type_ptr_to(bx.type_isize());
+ ptr = bx.pointercast(ptr, ptr_llty);
+ val = bx.ptrtoint(val, bx.type_isize());
+ }
+ bx.atomic_rmw(atom_op, ptr, val, parse_ordering(bx, ordering))
+ } else {
+ return invalid_monomorphization(ty);
+ }
+ }
+ }
+ }
+
+ sym::nontemporal_store => {
+ let dst = args[0].deref(bx.cx());
+ args[1].val.nontemporal_store(bx, dst);
+ return;
+ }
+
+ sym::ptr_guaranteed_eq | sym::ptr_guaranteed_ne => {
+ let a = args[0].immediate();
+ let b = args[1].immediate();
+ if name == sym::ptr_guaranteed_eq {
+ bx.icmp(IntPredicate::IntEQ, a, b)
+ } else {
+ bx.icmp(IntPredicate::IntNE, a, b)
+ }
+ }
+
+ sym::ptr_offset_from | sym::ptr_offset_from_unsigned => {
+ let ty = substs.type_at(0);
+ let pointee_size = bx.layout_of(ty).size;
+
+ let a = args[0].immediate();
+ let b = args[1].immediate();
+ let a = bx.ptrtoint(a, bx.type_isize());
+ let b = bx.ptrtoint(b, bx.type_isize());
+ let pointee_size = bx.const_usize(pointee_size.bytes());
+ if name == sym::ptr_offset_from {
+ // This is the same sequence that Clang emits for pointer subtraction.
+ // It can be neither `nsw` nor `nuw` because the input is treated as
+ // unsigned but then the output is treated as signed, so neither works.
+ let d = bx.sub(a, b);
+ // this is where the signed magic happens (notice the `s` in `exactsdiv`)
+ bx.exactsdiv(d, pointee_size)
+ } else {
+ // The `_unsigned` version knows the relative ordering of the pointers,
+ // so can use `sub nuw` and `udiv exact` instead of dealing in signed.
+ let d = bx.unchecked_usub(a, b);
+ bx.exactudiv(d, pointee_size)
+ }
+ }
+
+ _ => {
+ // Need to use backend-specific things in the implementation.
+ bx.codegen_intrinsic_call(instance, fn_abi, args, llresult, span);
+ return;
+ }
+ };
+
+ if !fn_abi.ret.is_ignore() {
+ if let PassMode::Cast(ty) = fn_abi.ret.mode {
+ let ptr_llty = bx.type_ptr_to(bx.cast_backend_type(&ty));
+ let ptr = bx.pointercast(result.llval, ptr_llty);
+ bx.store(llval, ptr, result.align);
+ } else {
+ OperandRef::from_immediate_or_packed_pair(bx, llval, result.layout)
+ .val
+ .store(bx, result);
+ }
+ }
+ }
+}
+
+// Returns the width of an int Ty, and if it's signed or not
+// Returns None if the type is not an integer
+// FIXME: there’s multiple of this functions, investigate using some of the already existing
+// stuffs.
+fn int_type_width_signed(ty: Ty<'_>, tcx: TyCtxt<'_>) -> Option<(u64, bool)> {
+ match ty.kind() {
+ ty::Int(t) => {
+ Some((t.bit_width().unwrap_or(u64::from(tcx.sess.target.pointer_width)), true))
+ }
+ ty::Uint(t) => {
+ Some((t.bit_width().unwrap_or(u64::from(tcx.sess.target.pointer_width)), false))
+ }
+ _ => None,
+ }
+}
+
+// Returns the width of a float Ty
+// Returns None if the type is not a float
+fn float_type_width(ty: Ty<'_>) -> Option<u64> {
+ match ty.kind() {
+ ty::Float(t) => Some(t.bit_width()),
+ _ => None,
+ }
+}
diff --git a/compiler/rustc_codegen_ssa/src/mir/mod.rs b/compiler/rustc_codegen_ssa/src/mir/mod.rs
new file mode 100644
index 000000000..8ee375fa9
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/mir/mod.rs
@@ -0,0 +1,410 @@
+use crate::traits::*;
+use rustc_middle::mir;
+use rustc_middle::mir::interpret::ErrorHandled;
+use rustc_middle::ty::layout::{FnAbiOf, HasTyCtxt, TyAndLayout};
+use rustc_middle::ty::{self, Instance, Ty, TypeFoldable, TypeVisitable};
+use rustc_target::abi::call::{FnAbi, PassMode};
+
+use std::iter;
+
+use rustc_index::bit_set::BitSet;
+use rustc_index::vec::IndexVec;
+
+use self::debuginfo::{FunctionDebugContext, PerLocalVarDebugInfo};
+use self::place::PlaceRef;
+use rustc_middle::mir::traversal;
+
+use self::operand::{OperandRef, OperandValue};
+
+/// Master context for codegenning from MIR.
+pub struct FunctionCx<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> {
+ instance: Instance<'tcx>,
+
+ mir: &'tcx mir::Body<'tcx>,
+
+ debug_context: Option<FunctionDebugContext<Bx::DIScope, Bx::DILocation>>,
+
+ llfn: Bx::Function,
+
+ cx: &'a Bx::CodegenCx,
+
+ fn_abi: &'tcx FnAbi<'tcx, Ty<'tcx>>,
+
+ /// When unwinding is initiated, we have to store this personality
+ /// value somewhere so that we can load it and re-use it in the
+ /// resume instruction. The personality is (afaik) some kind of
+ /// value used for C++ unwinding, which must filter by type: we
+ /// don't really care about it very much. Anyway, this value
+ /// contains an alloca into which the personality is stored and
+ /// then later loaded when generating the DIVERGE_BLOCK.
+ personality_slot: Option<PlaceRef<'tcx, Bx::Value>>,
+
+ /// A backend `BasicBlock` for each MIR `BasicBlock`, created lazily
+ /// as-needed (e.g. RPO reaching it or another block branching to it).
+ // FIXME(eddyb) rename `llbbs` and other `ll`-prefixed things to use a
+ // more backend-agnostic prefix such as `cg` (i.e. this would be `cgbbs`).
+ cached_llbbs: IndexVec<mir::BasicBlock, Option<Bx::BasicBlock>>,
+
+ /// The funclet status of each basic block
+ cleanup_kinds: IndexVec<mir::BasicBlock, analyze::CleanupKind>,
+
+ /// When targeting MSVC, this stores the cleanup info for each funclet BB.
+ /// This is initialized at the same time as the `landing_pads` entry for the
+ /// funclets' head block, i.e. when needed by an unwind / `cleanup_ret` edge.
+ funclets: IndexVec<mir::BasicBlock, Option<Bx::Funclet>>,
+
+ /// This stores the cached landing/cleanup pad block for a given BB.
+ // FIXME(eddyb) rename this to `eh_pads`.
+ landing_pads: IndexVec<mir::BasicBlock, Option<Bx::BasicBlock>>,
+
+ /// Cached unreachable block
+ unreachable_block: Option<Bx::BasicBlock>,
+
+ /// Cached double unwind guarding block
+ double_unwind_guard: Option<Bx::BasicBlock>,
+
+ /// The location where each MIR arg/var/tmp/ret is stored. This is
+ /// usually an `PlaceRef` representing an alloca, but not always:
+ /// sometimes we can skip the alloca and just store the value
+ /// directly using an `OperandRef`, which makes for tighter LLVM
+ /// IR. The conditions for using an `OperandRef` are as follows:
+ ///
+ /// - the type of the local must be judged "immediate" by `is_llvm_immediate`
+ /// - the operand must never be referenced indirectly
+ /// - we should not take its address using the `&` operator
+ /// - nor should it appear in a place path like `tmp.a`
+ /// - the operand must be defined by an rvalue that can generate immediate
+ /// values
+ ///
+ /// Avoiding allocs can also be important for certain intrinsics,
+ /// notably `expect`.
+ locals: IndexVec<mir::Local, LocalRef<'tcx, Bx::Value>>,
+
+ /// All `VarDebugInfo` from the MIR body, partitioned by `Local`.
+ /// This is `None` if no var`#[non_exhaustive]`iable debuginfo/names are needed.
+ per_local_var_debug_info:
+ Option<IndexVec<mir::Local, Vec<PerLocalVarDebugInfo<'tcx, Bx::DIVariable>>>>,
+
+ /// Caller location propagated if this function has `#[track_caller]`.
+ caller_location: Option<OperandRef<'tcx, Bx::Value>>,
+}
+
+impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
+ pub fn monomorphize<T>(&self, value: T) -> T
+ where
+ T: Copy + TypeFoldable<'tcx>,
+ {
+ debug!("monomorphize: self.instance={:?}", self.instance);
+ self.instance.subst_mir_and_normalize_erasing_regions(
+ self.cx.tcx(),
+ ty::ParamEnv::reveal_all(),
+ value,
+ )
+ }
+}
+
+enum LocalRef<'tcx, V> {
+ Place(PlaceRef<'tcx, V>),
+ /// `UnsizedPlace(p)`: `p` itself is a thin pointer (indirect place).
+ /// `*p` is the fat pointer that references the actual unsized place.
+ /// Every time it is initialized, we have to reallocate the place
+ /// and update the fat pointer. That's the reason why it is indirect.
+ UnsizedPlace(PlaceRef<'tcx, V>),
+ Operand(Option<OperandRef<'tcx, V>>),
+}
+
+impl<'a, 'tcx, V: CodegenObject> LocalRef<'tcx, V> {
+ fn new_operand<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
+ bx: &mut Bx,
+ layout: TyAndLayout<'tcx>,
+ ) -> LocalRef<'tcx, V> {
+ if layout.is_zst() {
+ // Zero-size temporaries aren't always initialized, which
+ // doesn't matter because they don't contain data, but
+ // we need something in the operand.
+ LocalRef::Operand(Some(OperandRef::new_zst(bx, layout)))
+ } else {
+ LocalRef::Operand(None)
+ }
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////
+
+#[instrument(level = "debug", skip(cx))]
+pub fn codegen_mir<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
+ cx: &'a Bx::CodegenCx,
+ instance: Instance<'tcx>,
+) {
+ assert!(!instance.substs.needs_infer());
+
+ let llfn = cx.get_fn(instance);
+
+ let mir = cx.tcx().instance_mir(instance.def);
+
+ let fn_abi = cx.fn_abi_of_instance(instance, ty::List::empty());
+ debug!("fn_abi: {:?}", fn_abi);
+
+ let debug_context = cx.create_function_debug_context(instance, &fn_abi, llfn, &mir);
+
+ let start_llbb = Bx::append_block(cx, llfn, "start");
+ let mut bx = Bx::build(cx, start_llbb);
+
+ if mir.basic_blocks().iter().any(|bb| bb.is_cleanup) {
+ bx.set_personality_fn(cx.eh_personality());
+ }
+
+ let cleanup_kinds = analyze::cleanup_kinds(&mir);
+ let cached_llbbs: IndexVec<mir::BasicBlock, Option<Bx::BasicBlock>> = mir
+ .basic_blocks()
+ .indices()
+ .map(|bb| if bb == mir::START_BLOCK { Some(start_llbb) } else { None })
+ .collect();
+
+ let mut fx = FunctionCx {
+ instance,
+ mir,
+ llfn,
+ fn_abi,
+ cx,
+ personality_slot: None,
+ cached_llbbs,
+ unreachable_block: None,
+ double_unwind_guard: None,
+ cleanup_kinds,
+ landing_pads: IndexVec::from_elem(None, mir.basic_blocks()),
+ funclets: IndexVec::from_fn_n(|_| None, mir.basic_blocks().len()),
+ locals: IndexVec::new(),
+ debug_context,
+ per_local_var_debug_info: None,
+ caller_location: None,
+ };
+
+ fx.per_local_var_debug_info = fx.compute_per_local_var_debug_info(&mut bx);
+
+ // Evaluate all required consts; codegen later assumes that CTFE will never fail.
+ let mut all_consts_ok = true;
+ for const_ in &mir.required_consts {
+ if let Err(err) = fx.eval_mir_constant(const_) {
+ all_consts_ok = false;
+ match err {
+ // errored or at least linted
+ ErrorHandled::Reported(_) | ErrorHandled::Linted => {}
+ ErrorHandled::TooGeneric => {
+ span_bug!(const_.span, "codgen encountered polymorphic constant: {:?}", err)
+ }
+ }
+ }
+ }
+ if !all_consts_ok {
+ // We leave the IR in some half-built state here, and rely on this code not even being
+ // submitted to LLVM once an error was raised.
+ return;
+ }
+
+ let memory_locals = analyze::non_ssa_locals(&fx);
+
+ // Allocate variable and temp allocas
+ fx.locals = {
+ let args = arg_local_refs(&mut bx, &mut fx, &memory_locals);
+
+ let mut allocate_local = |local| {
+ let decl = &mir.local_decls[local];
+ let layout = bx.layout_of(fx.monomorphize(decl.ty));
+ assert!(!layout.ty.has_erasable_regions());
+
+ if local == mir::RETURN_PLACE && fx.fn_abi.ret.is_indirect() {
+ debug!("alloc: {:?} (return place) -> place", local);
+ let llretptr = bx.get_param(0);
+ return LocalRef::Place(PlaceRef::new_sized(llretptr, layout));
+ }
+
+ if memory_locals.contains(local) {
+ debug!("alloc: {:?} -> place", local);
+ if layout.is_unsized() {
+ LocalRef::UnsizedPlace(PlaceRef::alloca_unsized_indirect(&mut bx, layout))
+ } else {
+ LocalRef::Place(PlaceRef::alloca(&mut bx, layout))
+ }
+ } else {
+ debug!("alloc: {:?} -> operand", local);
+ LocalRef::new_operand(&mut bx, layout)
+ }
+ };
+
+ let retptr = allocate_local(mir::RETURN_PLACE);
+ iter::once(retptr)
+ .chain(args.into_iter())
+ .chain(mir.vars_and_temps_iter().map(allocate_local))
+ .collect()
+ };
+
+ // Apply debuginfo to the newly allocated locals.
+ fx.debug_introduce_locals(&mut bx);
+
+ // Codegen the body of each block using reverse postorder
+ for (bb, _) in traversal::reverse_postorder(&mir) {
+ fx.codegen_block(bb);
+ }
+}
+
+/// Produces, for each argument, a `Value` pointing at the
+/// argument's value. As arguments are places, these are always
+/// indirect.
+fn arg_local_refs<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
+ bx: &mut Bx,
+ fx: &mut FunctionCx<'a, 'tcx, Bx>,
+ memory_locals: &BitSet<mir::Local>,
+) -> Vec<LocalRef<'tcx, Bx::Value>> {
+ let mir = fx.mir;
+ let mut idx = 0;
+ let mut llarg_idx = fx.fn_abi.ret.is_indirect() as usize;
+
+ let mut num_untupled = None;
+
+ let args = mir
+ .args_iter()
+ .enumerate()
+ .map(|(arg_index, local)| {
+ let arg_decl = &mir.local_decls[local];
+
+ if Some(local) == mir.spread_arg {
+ // This argument (e.g., the last argument in the "rust-call" ABI)
+ // is a tuple that was spread at the ABI level and now we have
+ // to reconstruct it into a tuple local variable, from multiple
+ // individual LLVM function arguments.
+
+ let arg_ty = fx.monomorphize(arg_decl.ty);
+ let ty::Tuple(tupled_arg_tys) = arg_ty.kind() else {
+ bug!("spread argument isn't a tuple?!");
+ };
+
+ let place = PlaceRef::alloca(bx, bx.layout_of(arg_ty));
+ for i in 0..tupled_arg_tys.len() {
+ let arg = &fx.fn_abi.args[idx];
+ idx += 1;
+ if arg.pad.is_some() {
+ llarg_idx += 1;
+ }
+ let pr_field = place.project_field(bx, i);
+ bx.store_fn_arg(arg, &mut llarg_idx, pr_field);
+ }
+ assert_eq!(
+ None,
+ num_untupled.replace(tupled_arg_tys.len()),
+ "Replaced existing num_tupled"
+ );
+
+ return LocalRef::Place(place);
+ }
+
+ if fx.fn_abi.c_variadic && arg_index == fx.fn_abi.args.len() {
+ let arg_ty = fx.monomorphize(arg_decl.ty);
+
+ let va_list = PlaceRef::alloca(bx, bx.layout_of(arg_ty));
+ bx.va_start(va_list.llval);
+
+ return LocalRef::Place(va_list);
+ }
+
+ let arg = &fx.fn_abi.args[idx];
+ idx += 1;
+ if arg.pad.is_some() {
+ llarg_idx += 1;
+ }
+
+ if !memory_locals.contains(local) {
+ // We don't have to cast or keep the argument in the alloca.
+ // FIXME(eddyb): We should figure out how to use llvm.dbg.value instead
+ // of putting everything in allocas just so we can use llvm.dbg.declare.
+ let local = |op| LocalRef::Operand(Some(op));
+ match arg.mode {
+ PassMode::Ignore => {
+ return local(OperandRef::new_zst(bx, arg.layout));
+ }
+ PassMode::Direct(_) => {
+ let llarg = bx.get_param(llarg_idx);
+ llarg_idx += 1;
+ return local(OperandRef::from_immediate_or_packed_pair(
+ bx, llarg, arg.layout,
+ ));
+ }
+ PassMode::Pair(..) => {
+ let (a, b) = (bx.get_param(llarg_idx), bx.get_param(llarg_idx + 1));
+ llarg_idx += 2;
+
+ return local(OperandRef {
+ val: OperandValue::Pair(a, b),
+ layout: arg.layout,
+ });
+ }
+ _ => {}
+ }
+ }
+
+ if arg.is_sized_indirect() {
+ // Don't copy an indirect argument to an alloca, the caller
+ // already put it in a temporary alloca and gave it up.
+ // FIXME: lifetimes
+ let llarg = bx.get_param(llarg_idx);
+ llarg_idx += 1;
+ LocalRef::Place(PlaceRef::new_sized(llarg, arg.layout))
+ } else if arg.is_unsized_indirect() {
+ // As the storage for the indirect argument lives during
+ // the whole function call, we just copy the fat pointer.
+ let llarg = bx.get_param(llarg_idx);
+ llarg_idx += 1;
+ let llextra = bx.get_param(llarg_idx);
+ llarg_idx += 1;
+ let indirect_operand = OperandValue::Pair(llarg, llextra);
+
+ let tmp = PlaceRef::alloca_unsized_indirect(bx, arg.layout);
+ indirect_operand.store(bx, tmp);
+ LocalRef::UnsizedPlace(tmp)
+ } else {
+ let tmp = PlaceRef::alloca(bx, arg.layout);
+ bx.store_fn_arg(arg, &mut llarg_idx, tmp);
+ LocalRef::Place(tmp)
+ }
+ })
+ .collect::<Vec<_>>();
+
+ if fx.instance.def.requires_caller_location(bx.tcx()) {
+ let mir_args = if let Some(num_untupled) = num_untupled {
+ // Subtract off the tupled argument that gets 'expanded'
+ args.len() - 1 + num_untupled
+ } else {
+ args.len()
+ };
+ assert_eq!(
+ fx.fn_abi.args.len(),
+ mir_args + 1,
+ "#[track_caller] instance {:?} must have 1 more argument in their ABI than in their MIR",
+ fx.instance
+ );
+
+ let arg = fx.fn_abi.args.last().unwrap();
+ match arg.mode {
+ PassMode::Direct(_) => (),
+ _ => bug!("caller location must be PassMode::Direct, found {:?}", arg.mode),
+ }
+
+ fx.caller_location = Some(OperandRef {
+ val: OperandValue::Immediate(bx.get_param(llarg_idx)),
+ layout: arg.layout,
+ });
+ }
+
+ args
+}
+
+mod analyze;
+mod block;
+pub mod constant;
+pub mod coverageinfo;
+pub mod debuginfo;
+mod intrinsic;
+pub mod operand;
+pub mod place;
+mod rvalue;
+mod statement;
diff --git a/compiler/rustc_codegen_ssa/src/mir/operand.rs b/compiler/rustc_codegen_ssa/src/mir/operand.rs
new file mode 100644
index 000000000..c612634fc
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/mir/operand.rs
@@ -0,0 +1,461 @@
+use super::place::PlaceRef;
+use super::{FunctionCx, LocalRef};
+
+use crate::base;
+use crate::glue;
+use crate::traits::*;
+use crate::MemFlags;
+
+use rustc_middle::mir;
+use rustc_middle::mir::interpret::{ConstValue, Pointer, Scalar};
+use rustc_middle::ty::layout::{LayoutOf, TyAndLayout};
+use rustc_middle::ty::Ty;
+use rustc_target::abi::{Abi, Align, Size};
+
+use std::fmt;
+
+/// The representation of a Rust value. The enum variant is in fact
+/// uniquely determined by the value's type, but is kept as a
+/// safety check.
+#[derive(Copy, Clone, Debug)]
+pub enum OperandValue<V> {
+ /// A reference to the actual operand. The data is guaranteed
+ /// to be valid for the operand's lifetime.
+ /// The second value, if any, is the extra data (vtable or length)
+ /// which indicates that it refers to an unsized rvalue.
+ Ref(V, Option<V>, Align),
+ /// A single LLVM value.
+ Immediate(V),
+ /// A pair of immediate LLVM values. Used by fat pointers too.
+ Pair(V, V),
+}
+
+/// An `OperandRef` is an "SSA" reference to a Rust value, along with
+/// its type.
+///
+/// NOTE: unless you know a value's type exactly, you should not
+/// generate LLVM opcodes acting on it and instead act via methods,
+/// to avoid nasty edge cases. In particular, using `Builder::store`
+/// directly is sure to cause problems -- use `OperandRef::store`
+/// instead.
+#[derive(Copy, Clone)]
+pub struct OperandRef<'tcx, V> {
+ // The value.
+ pub val: OperandValue<V>,
+
+ // The layout of value, based on its Rust type.
+ pub layout: TyAndLayout<'tcx>,
+}
+
+impl<V: CodegenObject> fmt::Debug for OperandRef<'_, V> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "OperandRef({:?} @ {:?})", self.val, self.layout)
+ }
+}
+
+impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> {
+ pub fn new_zst<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
+ bx: &mut Bx,
+ layout: TyAndLayout<'tcx>,
+ ) -> OperandRef<'tcx, V> {
+ assert!(layout.is_zst());
+ OperandRef {
+ val: OperandValue::Immediate(bx.const_undef(bx.immediate_backend_type(layout))),
+ layout,
+ }
+ }
+
+ pub fn from_const<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
+ bx: &mut Bx,
+ val: ConstValue<'tcx>,
+ ty: Ty<'tcx>,
+ ) -> Self {
+ let layout = bx.layout_of(ty);
+
+ if layout.is_zst() {
+ return OperandRef::new_zst(bx, layout);
+ }
+
+ let val = match val {
+ ConstValue::Scalar(x) => {
+ let Abi::Scalar(scalar) = layout.abi else {
+ bug!("from_const: invalid ByVal layout: {:#?}", layout);
+ };
+ let llval = bx.scalar_to_backend(x, scalar, bx.immediate_backend_type(layout));
+ OperandValue::Immediate(llval)
+ }
+ ConstValue::ZeroSized => {
+ let llval = bx.zst_to_backend(bx.immediate_backend_type(layout));
+ OperandValue::Immediate(llval)
+ }
+ ConstValue::Slice { data, start, end } => {
+ let Abi::ScalarPair(a_scalar, _) = layout.abi else {
+ bug!("from_const: invalid ScalarPair layout: {:#?}", layout);
+ };
+ let a = Scalar::from_pointer(
+ Pointer::new(bx.tcx().create_memory_alloc(data), Size::from_bytes(start)),
+ &bx.tcx(),
+ );
+ let a_llval = bx.scalar_to_backend(
+ a,
+ a_scalar,
+ bx.scalar_pair_element_backend_type(layout, 0, true),
+ );
+ let b_llval = bx.const_usize((end - start) as u64);
+ OperandValue::Pair(a_llval, b_llval)
+ }
+ ConstValue::ByRef { alloc, offset } => {
+ return bx.load_operand(bx.from_const_alloc(layout, alloc, offset));
+ }
+ };
+
+ OperandRef { val, layout }
+ }
+
+ /// Asserts that this operand refers to a scalar and returns
+ /// a reference to its value.
+ pub fn immediate(self) -> V {
+ match self.val {
+ OperandValue::Immediate(s) => s,
+ _ => bug!("not immediate: {:?}", self),
+ }
+ }
+
+ pub fn deref<Cx: LayoutTypeMethods<'tcx>>(self, cx: &Cx) -> PlaceRef<'tcx, V> {
+ if self.layout.ty.is_box() {
+ bug!("dereferencing {:?} in codegen", self.layout.ty);
+ }
+
+ let projected_ty = self
+ .layout
+ .ty
+ .builtin_deref(true)
+ .unwrap_or_else(|| bug!("deref of non-pointer {:?}", self))
+ .ty;
+
+ let (llptr, llextra) = match self.val {
+ OperandValue::Immediate(llptr) => (llptr, None),
+ OperandValue::Pair(llptr, llextra) => (llptr, Some(llextra)),
+ OperandValue::Ref(..) => bug!("Deref of by-Ref operand {:?}", self),
+ };
+ let layout = cx.layout_of(projected_ty);
+ PlaceRef { llval: llptr, llextra, layout, align: layout.align.abi }
+ }
+
+ /// If this operand is a `Pair`, we return an aggregate with the two values.
+ /// For other cases, see `immediate`.
+ pub fn immediate_or_packed_pair<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
+ self,
+ bx: &mut Bx,
+ ) -> V {
+ if let OperandValue::Pair(a, b) = self.val {
+ let llty = bx.cx().backend_type(self.layout);
+ debug!("Operand::immediate_or_packed_pair: packing {:?} into {:?}", self, llty);
+ // Reconstruct the immediate aggregate.
+ let mut llpair = bx.cx().const_undef(llty);
+ let imm_a = bx.from_immediate(a);
+ let imm_b = bx.from_immediate(b);
+ llpair = bx.insert_value(llpair, imm_a, 0);
+ llpair = bx.insert_value(llpair, imm_b, 1);
+ llpair
+ } else {
+ self.immediate()
+ }
+ }
+
+ /// If the type is a pair, we return a `Pair`, otherwise, an `Immediate`.
+ pub fn from_immediate_or_packed_pair<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
+ bx: &mut Bx,
+ llval: V,
+ layout: TyAndLayout<'tcx>,
+ ) -> Self {
+ let val = if let Abi::ScalarPair(a, b) = layout.abi {
+ debug!("Operand::from_immediate_or_packed_pair: unpacking {:?} @ {:?}", llval, layout);
+
+ // Deconstruct the immediate aggregate.
+ let a_llval = bx.extract_value(llval, 0);
+ let a_llval = bx.to_immediate_scalar(a_llval, a);
+ let b_llval = bx.extract_value(llval, 1);
+ let b_llval = bx.to_immediate_scalar(b_llval, b);
+ OperandValue::Pair(a_llval, b_llval)
+ } else {
+ OperandValue::Immediate(llval)
+ };
+ OperandRef { val, layout }
+ }
+
+ pub fn extract_field<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
+ &self,
+ bx: &mut Bx,
+ i: usize,
+ ) -> Self {
+ let field = self.layout.field(bx.cx(), i);
+ let offset = self.layout.fields.offset(i);
+
+ let mut val = match (self.val, self.layout.abi) {
+ // If the field is ZST, it has no data.
+ _ if field.is_zst() => {
+ return OperandRef::new_zst(bx, field);
+ }
+
+ // Newtype of a scalar, scalar pair or vector.
+ (OperandValue::Immediate(_) | OperandValue::Pair(..), _)
+ if field.size == self.layout.size =>
+ {
+ assert_eq!(offset.bytes(), 0);
+ self.val
+ }
+
+ // Extract a scalar component from a pair.
+ (OperandValue::Pair(a_llval, b_llval), Abi::ScalarPair(a, b)) => {
+ if offset.bytes() == 0 {
+ assert_eq!(field.size, a.size(bx.cx()));
+ OperandValue::Immediate(a_llval)
+ } else {
+ assert_eq!(offset, a.size(bx.cx()).align_to(b.align(bx.cx()).abi));
+ assert_eq!(field.size, b.size(bx.cx()));
+ OperandValue::Immediate(b_llval)
+ }
+ }
+
+ // `#[repr(simd)]` types are also immediate.
+ (OperandValue::Immediate(llval), Abi::Vector { .. }) => {
+ OperandValue::Immediate(bx.extract_element(llval, bx.cx().const_usize(i as u64)))
+ }
+
+ _ => bug!("OperandRef::extract_field({:?}): not applicable", self),
+ };
+
+ match (&mut val, field.abi) {
+ (OperandValue::Immediate(llval), _) => {
+ // Bools in union fields needs to be truncated.
+ *llval = bx.to_immediate(*llval, field);
+ // HACK(eddyb) have to bitcast pointers until LLVM removes pointee types.
+ *llval = bx.bitcast(*llval, bx.cx().immediate_backend_type(field));
+ }
+ (OperandValue::Pair(a, b), Abi::ScalarPair(a_abi, b_abi)) => {
+ // Bools in union fields needs to be truncated.
+ *a = bx.to_immediate_scalar(*a, a_abi);
+ *b = bx.to_immediate_scalar(*b, b_abi);
+ // HACK(eddyb) have to bitcast pointers until LLVM removes pointee types.
+ *a = bx.bitcast(*a, bx.cx().scalar_pair_element_backend_type(field, 0, true));
+ *b = bx.bitcast(*b, bx.cx().scalar_pair_element_backend_type(field, 1, true));
+ }
+ (OperandValue::Pair(..), _) => bug!(),
+ (OperandValue::Ref(..), _) => bug!(),
+ }
+
+ OperandRef { val, layout: field }
+ }
+}
+
+impl<'a, 'tcx, V: CodegenObject> OperandValue<V> {
+ pub fn store<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
+ self,
+ bx: &mut Bx,
+ dest: PlaceRef<'tcx, V>,
+ ) {
+ self.store_with_flags(bx, dest, MemFlags::empty());
+ }
+
+ pub fn volatile_store<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
+ self,
+ bx: &mut Bx,
+ dest: PlaceRef<'tcx, V>,
+ ) {
+ self.store_with_flags(bx, dest, MemFlags::VOLATILE);
+ }
+
+ pub fn unaligned_volatile_store<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
+ self,
+ bx: &mut Bx,
+ dest: PlaceRef<'tcx, V>,
+ ) {
+ self.store_with_flags(bx, dest, MemFlags::VOLATILE | MemFlags::UNALIGNED);
+ }
+
+ pub fn nontemporal_store<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
+ self,
+ bx: &mut Bx,
+ dest: PlaceRef<'tcx, V>,
+ ) {
+ self.store_with_flags(bx, dest, MemFlags::NONTEMPORAL);
+ }
+
+ fn store_with_flags<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
+ self,
+ bx: &mut Bx,
+ dest: PlaceRef<'tcx, V>,
+ flags: MemFlags,
+ ) {
+ debug!("OperandRef::store: operand={:?}, dest={:?}", self, dest);
+ // Avoid generating stores of zero-sized values, because the only way to have a zero-sized
+ // value is through `undef`, and store itself is useless.
+ if dest.layout.is_zst() {
+ return;
+ }
+ match self {
+ OperandValue::Ref(r, None, source_align) => {
+ if flags.contains(MemFlags::NONTEMPORAL) {
+ // HACK(nox): This is inefficient but there is no nontemporal memcpy.
+ let ty = bx.backend_type(dest.layout);
+ let ptr = bx.pointercast(r, bx.type_ptr_to(ty));
+ let val = bx.load(ty, ptr, source_align);
+ bx.store_with_flags(val, dest.llval, dest.align, flags);
+ return;
+ }
+ base::memcpy_ty(bx, dest.llval, dest.align, r, source_align, dest.layout, flags)
+ }
+ OperandValue::Ref(_, Some(_), _) => {
+ bug!("cannot directly store unsized values");
+ }
+ OperandValue::Immediate(s) => {
+ let val = bx.from_immediate(s);
+ bx.store_with_flags(val, dest.llval, dest.align, flags);
+ }
+ OperandValue::Pair(a, b) => {
+ let Abi::ScalarPair(a_scalar, b_scalar) = dest.layout.abi else {
+ bug!("store_with_flags: invalid ScalarPair layout: {:#?}", dest.layout);
+ };
+ let ty = bx.backend_type(dest.layout);
+ let b_offset = a_scalar.size(bx).align_to(b_scalar.align(bx).abi);
+
+ let llptr = bx.struct_gep(ty, dest.llval, 0);
+ let val = bx.from_immediate(a);
+ let align = dest.align;
+ bx.store_with_flags(val, llptr, align, flags);
+
+ let llptr = bx.struct_gep(ty, dest.llval, 1);
+ let val = bx.from_immediate(b);
+ let align = dest.align.restrict_for_offset(b_offset);
+ bx.store_with_flags(val, llptr, align, flags);
+ }
+ }
+ }
+
+ pub fn store_unsized<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
+ self,
+ bx: &mut Bx,
+ indirect_dest: PlaceRef<'tcx, V>,
+ ) {
+ debug!("OperandRef::store_unsized: operand={:?}, indirect_dest={:?}", self, indirect_dest);
+ let flags = MemFlags::empty();
+
+ // `indirect_dest` must have `*mut T` type. We extract `T` out of it.
+ let unsized_ty = indirect_dest
+ .layout
+ .ty
+ .builtin_deref(true)
+ .unwrap_or_else(|| bug!("indirect_dest has non-pointer type: {:?}", indirect_dest))
+ .ty;
+
+ let OperandValue::Ref(llptr, Some(llextra), _) = self else {
+ bug!("store_unsized called with a sized value")
+ };
+
+ // FIXME: choose an appropriate alignment, or use dynamic align somehow
+ let max_align = Align::from_bits(128).unwrap();
+ let min_align = Align::from_bits(8).unwrap();
+
+ // Allocate an appropriate region on the stack, and copy the value into it
+ let (llsize, _) = glue::size_and_align_of_dst(bx, unsized_ty, Some(llextra));
+ let lldst = bx.array_alloca(bx.cx().type_i8(), llsize, max_align);
+ bx.memcpy(lldst, max_align, llptr, min_align, llsize, flags);
+
+ // Store the allocated region and the extra to the indirect place.
+ let indirect_operand = OperandValue::Pair(lldst, llextra);
+ indirect_operand.store(bx, indirect_dest);
+ }
+}
+
+impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
+ fn maybe_codegen_consume_direct(
+ &mut self,
+ bx: &mut Bx,
+ place_ref: mir::PlaceRef<'tcx>,
+ ) -> Option<OperandRef<'tcx, Bx::Value>> {
+ debug!("maybe_codegen_consume_direct(place_ref={:?})", place_ref);
+
+ match self.locals[place_ref.local] {
+ LocalRef::Operand(Some(mut o)) => {
+ // Moves out of scalar and scalar pair fields are trivial.
+ for elem in place_ref.projection.iter() {
+ match elem {
+ mir::ProjectionElem::Field(ref f, _) => {
+ o = o.extract_field(bx, f.index());
+ }
+ mir::ProjectionElem::Index(_)
+ | mir::ProjectionElem::ConstantIndex { .. } => {
+ // ZSTs don't require any actual memory access.
+ // FIXME(eddyb) deduplicate this with the identical
+ // checks in `codegen_consume` and `extract_field`.
+ let elem = o.layout.field(bx.cx(), 0);
+ if elem.is_zst() {
+ o = OperandRef::new_zst(bx, elem);
+ } else {
+ return None;
+ }
+ }
+ _ => return None,
+ }
+ }
+
+ Some(o)
+ }
+ LocalRef::Operand(None) => {
+ bug!("use of {:?} before def", place_ref);
+ }
+ LocalRef::Place(..) | LocalRef::UnsizedPlace(..) => {
+ // watch out for locals that do not have an
+ // alloca; they are handled somewhat differently
+ None
+ }
+ }
+ }
+
+ pub fn codegen_consume(
+ &mut self,
+ bx: &mut Bx,
+ place_ref: mir::PlaceRef<'tcx>,
+ ) -> OperandRef<'tcx, Bx::Value> {
+ debug!("codegen_consume(place_ref={:?})", place_ref);
+
+ let ty = self.monomorphized_place_ty(place_ref);
+ let layout = bx.cx().layout_of(ty);
+
+ // ZSTs don't require any actual memory access.
+ if layout.is_zst() {
+ return OperandRef::new_zst(bx, layout);
+ }
+
+ if let Some(o) = self.maybe_codegen_consume_direct(bx, place_ref) {
+ return o;
+ }
+
+ // for most places, to consume them we just load them
+ // out from their home
+ let place = self.codegen_place(bx, place_ref);
+ bx.load_operand(place)
+ }
+
+ pub fn codegen_operand(
+ &mut self,
+ bx: &mut Bx,
+ operand: &mir::Operand<'tcx>,
+ ) -> OperandRef<'tcx, Bx::Value> {
+ debug!("codegen_operand(operand={:?})", operand);
+
+ match *operand {
+ mir::Operand::Copy(ref place) | mir::Operand::Move(ref place) => {
+ self.codegen_consume(bx, place.as_ref())
+ }
+
+ mir::Operand::Constant(ref constant) => {
+ // This cannot fail because we checked all required_consts in advance.
+ self.eval_mir_constant_to_operand(bx, constant).unwrap_or_else(|_err| {
+ span_bug!(constant.span, "erroneous constant not captured by required_consts")
+ })
+ }
+ }
+ }
+}
diff --git a/compiler/rustc_codegen_ssa/src/mir/place.rs b/compiler/rustc_codegen_ssa/src/mir/place.rs
new file mode 100644
index 000000000..268c4d765
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/mir/place.rs
@@ -0,0 +1,549 @@
+use super::operand::OperandValue;
+use super::{FunctionCx, LocalRef};
+
+use crate::common::IntPredicate;
+use crate::glue;
+use crate::traits::*;
+use crate::MemFlags;
+
+use rustc_middle::mir;
+use rustc_middle::mir::tcx::PlaceTy;
+use rustc_middle::ty::layout::{HasTyCtxt, LayoutOf, TyAndLayout};
+use rustc_middle::ty::{self, Ty};
+use rustc_target::abi::{Abi, Align, FieldsShape, Int, TagEncoding};
+use rustc_target::abi::{VariantIdx, Variants};
+
+#[derive(Copy, Clone, Debug)]
+pub struct PlaceRef<'tcx, V> {
+ /// A pointer to the contents of the place.
+ pub llval: V,
+
+ /// This place's extra data if it is unsized, or `None` if null.
+ pub llextra: Option<V>,
+
+ /// The monomorphized type of this place, including variant information.
+ pub layout: TyAndLayout<'tcx>,
+
+ /// The alignment we know for this place.
+ pub align: Align,
+}
+
+impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
+ pub fn new_sized(llval: V, layout: TyAndLayout<'tcx>) -> PlaceRef<'tcx, V> {
+ assert!(!layout.is_unsized());
+ PlaceRef { llval, llextra: None, layout, align: layout.align.abi }
+ }
+
+ pub fn new_sized_aligned(
+ llval: V,
+ layout: TyAndLayout<'tcx>,
+ align: Align,
+ ) -> PlaceRef<'tcx, V> {
+ assert!(!layout.is_unsized());
+ PlaceRef { llval, llextra: None, layout, align }
+ }
+
+ // FIXME(eddyb) pass something else for the name so no work is done
+ // unless LLVM IR names are turned on (e.g. for `--emit=llvm-ir`).
+ pub fn alloca<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
+ bx: &mut Bx,
+ layout: TyAndLayout<'tcx>,
+ ) -> Self {
+ assert!(!layout.is_unsized(), "tried to statically allocate unsized place");
+ let tmp = bx.alloca(bx.cx().backend_type(layout), layout.align.abi);
+ Self::new_sized(tmp, layout)
+ }
+
+ /// Returns a place for an indirect reference to an unsized place.
+ // FIXME(eddyb) pass something else for the name so no work is done
+ // unless LLVM IR names are turned on (e.g. for `--emit=llvm-ir`).
+ pub fn alloca_unsized_indirect<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
+ bx: &mut Bx,
+ layout: TyAndLayout<'tcx>,
+ ) -> Self {
+ assert!(layout.is_unsized(), "tried to allocate indirect place for sized values");
+ let ptr_ty = bx.cx().tcx().mk_mut_ptr(layout.ty);
+ let ptr_layout = bx.cx().layout_of(ptr_ty);
+ Self::alloca(bx, ptr_layout)
+ }
+
+ pub fn len<Cx: ConstMethods<'tcx, Value = V>>(&self, cx: &Cx) -> V {
+ if let FieldsShape::Array { count, .. } = self.layout.fields {
+ if self.layout.is_unsized() {
+ assert_eq!(count, 0);
+ self.llextra.unwrap()
+ } else {
+ cx.const_usize(count)
+ }
+ } else {
+ bug!("unexpected layout `{:#?}` in PlaceRef::len", self.layout)
+ }
+ }
+}
+
+impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
+ /// Access a field, at a point when the value's case is known.
+ pub fn project_field<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
+ self,
+ bx: &mut Bx,
+ ix: usize,
+ ) -> Self {
+ let field = self.layout.field(bx.cx(), ix);
+ let offset = self.layout.fields.offset(ix);
+ let effective_field_align = self.align.restrict_for_offset(offset);
+
+ let mut simple = || {
+ let llval = match self.layout.abi {
+ _ if offset.bytes() == 0 => {
+ // Unions and newtypes only use an offset of 0.
+ // Also handles the first field of Scalar, ScalarPair, and Vector layouts.
+ self.llval
+ }
+ Abi::ScalarPair(a, b)
+ if offset == a.size(bx.cx()).align_to(b.align(bx.cx()).abi) =>
+ {
+ // Offset matches second field.
+ let ty = bx.backend_type(self.layout);
+ bx.struct_gep(ty, self.llval, 1)
+ }
+ Abi::Scalar(_) | Abi::ScalarPair(..) | Abi::Vector { .. } if field.is_zst() => {
+ // ZST fields are not included in Scalar, ScalarPair, and Vector layouts, so manually offset the pointer.
+ let byte_ptr = bx.pointercast(self.llval, bx.cx().type_i8p());
+ bx.gep(bx.cx().type_i8(), byte_ptr, &[bx.const_usize(offset.bytes())])
+ }
+ Abi::Scalar(_) | Abi::ScalarPair(..) => {
+ // All fields of Scalar and ScalarPair layouts must have been handled by this point.
+ // Vector layouts have additional fields for each element of the vector, so don't panic in that case.
+ bug!(
+ "offset of non-ZST field `{:?}` does not match layout `{:#?}`",
+ field,
+ self.layout
+ );
+ }
+ _ => {
+ let ty = bx.backend_type(self.layout);
+ bx.struct_gep(ty, self.llval, bx.cx().backend_field_index(self.layout, ix))
+ }
+ };
+ PlaceRef {
+ // HACK(eddyb): have to bitcast pointers until LLVM removes pointee types.
+ llval: bx.pointercast(llval, bx.cx().type_ptr_to(bx.cx().backend_type(field))),
+ llextra: if bx.cx().type_has_metadata(field.ty) { self.llextra } else { None },
+ layout: field,
+ align: effective_field_align,
+ }
+ };
+
+ // Simple cases, which don't need DST adjustment:
+ // * no metadata available - just log the case
+ // * known alignment - sized types, `[T]`, `str` or a foreign type
+ // * packed struct - there is no alignment padding
+ match field.ty.kind() {
+ _ if self.llextra.is_none() => {
+ debug!(
+ "unsized field `{}`, of `{:?}` has no metadata for adjustment",
+ ix, self.llval
+ );
+ return simple();
+ }
+ _ if !field.is_unsized() => return simple(),
+ ty::Slice(..) | ty::Str | ty::Foreign(..) => return simple(),
+ ty::Adt(def, _) => {
+ if def.repr().packed() {
+ // FIXME(eddyb) generalize the adjustment when we
+ // start supporting packing to larger alignments.
+ assert_eq!(self.layout.align.abi.bytes(), 1);
+ return simple();
+ }
+ }
+ _ => {}
+ }
+
+ // We need to get the pointer manually now.
+ // We do this by casting to a `*i8`, then offsetting it by the appropriate amount.
+ // We do this instead of, say, simply adjusting the pointer from the result of a GEP
+ // because the field may have an arbitrary alignment in the LLVM representation
+ // anyway.
+ //
+ // To demonstrate:
+ //
+ // struct Foo<T: ?Sized> {
+ // x: u16,
+ // y: T
+ // }
+ //
+ // The type `Foo<Foo<Trait>>` is represented in LLVM as `{ u16, { u16, u8 }}`, meaning that
+ // the `y` field has 16-bit alignment.
+
+ let meta = self.llextra;
+
+ let unaligned_offset = bx.cx().const_usize(offset.bytes());
+
+ // Get the alignment of the field
+ let (_, unsized_align) = glue::size_and_align_of_dst(bx, field.ty, meta);
+
+ // Bump the unaligned offset up to the appropriate alignment
+ let offset = round_up_const_value_to_alignment(bx, unaligned_offset, unsized_align);
+
+ debug!("struct_field_ptr: DST field offset: {:?}", offset);
+
+ // Cast and adjust pointer.
+ let byte_ptr = bx.pointercast(self.llval, bx.cx().type_i8p());
+ let byte_ptr = bx.gep(bx.cx().type_i8(), byte_ptr, &[offset]);
+
+ // Finally, cast back to the type expected.
+ let ll_fty = bx.cx().backend_type(field);
+ debug!("struct_field_ptr: Field type is {:?}", ll_fty);
+
+ PlaceRef {
+ llval: bx.pointercast(byte_ptr, bx.cx().type_ptr_to(ll_fty)),
+ llextra: self.llextra,
+ layout: field,
+ align: effective_field_align,
+ }
+ }
+
+ /// Obtain the actual discriminant of a value.
+ #[instrument(level = "trace", skip(bx))]
+ pub fn codegen_get_discr<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
+ self,
+ bx: &mut Bx,
+ cast_to: Ty<'tcx>,
+ ) -> V {
+ let cast_to = bx.cx().immediate_backend_type(bx.cx().layout_of(cast_to));
+ if self.layout.abi.is_uninhabited() {
+ return bx.cx().const_undef(cast_to);
+ }
+ let (tag_scalar, tag_encoding, tag_field) = match self.layout.variants {
+ Variants::Single { index } => {
+ let discr_val = self
+ .layout
+ .ty
+ .discriminant_for_variant(bx.cx().tcx(), index)
+ .map_or(index.as_u32() as u128, |discr| discr.val);
+ return bx.cx().const_uint_big(cast_to, discr_val);
+ }
+ Variants::Multiple { tag, ref tag_encoding, tag_field, .. } => {
+ (tag, tag_encoding, tag_field)
+ }
+ };
+
+ // Read the tag/niche-encoded discriminant from memory.
+ let tag = self.project_field(bx, tag_field);
+ let tag = bx.load_operand(tag);
+
+ // Decode the discriminant (specifically if it's niche-encoded).
+ match *tag_encoding {
+ TagEncoding::Direct => {
+ let signed = match tag_scalar.primitive() {
+ // We use `i1` for bytes that are always `0` or `1`,
+ // e.g., `#[repr(i8)] enum E { A, B }`, but we can't
+ // let LLVM interpret the `i1` as signed, because
+ // then `i1 1` (i.e., `E::B`) is effectively `i8 -1`.
+ Int(_, signed) => !tag_scalar.is_bool() && signed,
+ _ => false,
+ };
+ bx.intcast(tag.immediate(), cast_to, signed)
+ }
+ TagEncoding::Niche { dataful_variant, ref niche_variants, niche_start } => {
+ // Rebase from niche values to discriminants, and check
+ // whether the result is in range for the niche variants.
+ let niche_llty = bx.cx().immediate_backend_type(tag.layout);
+ let tag = tag.immediate();
+
+ // We first compute the "relative discriminant" (wrt `niche_variants`),
+ // that is, if `n = niche_variants.end() - niche_variants.start()`,
+ // we remap `niche_start..=niche_start + n` (which may wrap around)
+ // to (non-wrap-around) `0..=n`, to be able to check whether the
+ // discriminant corresponds to a niche variant with one comparison.
+ // We also can't go directly to the (variant index) discriminant
+ // and check that it is in the range `niche_variants`, because
+ // that might not fit in the same type, on top of needing an extra
+ // comparison (see also the comment on `let niche_discr`).
+ let relative_discr = if niche_start == 0 {
+ // Avoid subtracting `0`, which wouldn't work for pointers.
+ // FIXME(eddyb) check the actual primitive type here.
+ tag
+ } else {
+ bx.sub(tag, bx.cx().const_uint_big(niche_llty, niche_start))
+ };
+ let relative_max = niche_variants.end().as_u32() - niche_variants.start().as_u32();
+ let is_niche = if relative_max == 0 {
+ // Avoid calling `const_uint`, which wouldn't work for pointers.
+ // Also use canonical == 0 instead of non-canonical u<= 0.
+ // FIXME(eddyb) check the actual primitive type here.
+ bx.icmp(IntPredicate::IntEQ, relative_discr, bx.cx().const_null(niche_llty))
+ } else {
+ let relative_max = bx.cx().const_uint(niche_llty, relative_max as u64);
+ bx.icmp(IntPredicate::IntULE, relative_discr, relative_max)
+ };
+
+ // NOTE(eddyb) this addition needs to be performed on the final
+ // type, in case the niche itself can't represent all variant
+ // indices (e.g. `u8` niche with more than `256` variants,
+ // but enough uninhabited variants so that the remaining variants
+ // fit in the niche).
+ // In other words, `niche_variants.end - niche_variants.start`
+ // is representable in the niche, but `niche_variants.end`
+ // might not be, in extreme cases.
+ let niche_discr = {
+ let relative_discr = if relative_max == 0 {
+ // HACK(eddyb) since we have only one niche, we know which
+ // one it is, and we can avoid having a dynamic value here.
+ bx.cx().const_uint(cast_to, 0)
+ } else {
+ bx.intcast(relative_discr, cast_to, false)
+ };
+ bx.add(
+ relative_discr,
+ bx.cx().const_uint(cast_to, niche_variants.start().as_u32() as u64),
+ )
+ };
+
+ bx.select(
+ is_niche,
+ niche_discr,
+ bx.cx().const_uint(cast_to, dataful_variant.as_u32() as u64),
+ )
+ }
+ }
+ }
+
+ /// Sets the discriminant for a new value of the given case of the given
+ /// representation.
+ pub fn codegen_set_discr<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
+ &self,
+ bx: &mut Bx,
+ variant_index: VariantIdx,
+ ) {
+ if self.layout.for_variant(bx.cx(), variant_index).abi.is_uninhabited() {
+ // We play it safe by using a well-defined `abort`, but we could go for immediate UB
+ // if that turns out to be helpful.
+ bx.abort();
+ return;
+ }
+ match self.layout.variants {
+ Variants::Single { index } => {
+ assert_eq!(index, variant_index);
+ }
+ Variants::Multiple { tag_encoding: TagEncoding::Direct, tag_field, .. } => {
+ let ptr = self.project_field(bx, tag_field);
+ let to =
+ self.layout.ty.discriminant_for_variant(bx.tcx(), variant_index).unwrap().val;
+ bx.store(
+ bx.cx().const_uint_big(bx.cx().backend_type(ptr.layout), to),
+ ptr.llval,
+ ptr.align,
+ );
+ }
+ Variants::Multiple {
+ tag_encoding:
+ TagEncoding::Niche { dataful_variant, ref niche_variants, niche_start },
+ tag_field,
+ ..
+ } => {
+ if variant_index != dataful_variant {
+ if bx.cx().sess().target.arch == "arm"
+ || bx.cx().sess().target.arch == "aarch64"
+ {
+ // FIXME(#34427): as workaround for LLVM bug on ARM,
+ // use memset of 0 before assigning niche value.
+ let fill_byte = bx.cx().const_u8(0);
+ let size = bx.cx().const_usize(self.layout.size.bytes());
+ bx.memset(self.llval, fill_byte, size, self.align, MemFlags::empty());
+ }
+
+ let niche = self.project_field(bx, tag_field);
+ let niche_llty = bx.cx().immediate_backend_type(niche.layout);
+ let niche_value = variant_index.as_u32() - niche_variants.start().as_u32();
+ let niche_value = (niche_value as u128).wrapping_add(niche_start);
+ // FIXME(eddyb): check the actual primitive type here.
+ let niche_llval = if niche_value == 0 {
+ // HACK(eddyb): using `c_null` as it works on all types.
+ bx.cx().const_null(niche_llty)
+ } else {
+ bx.cx().const_uint_big(niche_llty, niche_value)
+ };
+ OperandValue::Immediate(niche_llval).store(bx, niche);
+ }
+ }
+ }
+ }
+
+ pub fn project_index<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
+ &self,
+ bx: &mut Bx,
+ llindex: V,
+ ) -> Self {
+ // Statically compute the offset if we can, otherwise just use the element size,
+ // as this will yield the lowest alignment.
+ let layout = self.layout.field(bx, 0);
+ let offset = if let Some(llindex) = bx.const_to_opt_uint(llindex) {
+ layout.size.checked_mul(llindex, bx).unwrap_or(layout.size)
+ } else {
+ layout.size
+ };
+
+ PlaceRef {
+ llval: bx.inbounds_gep(
+ bx.cx().backend_type(self.layout),
+ self.llval,
+ &[bx.cx().const_usize(0), llindex],
+ ),
+ llextra: None,
+ layout,
+ align: self.align.restrict_for_offset(offset),
+ }
+ }
+
+ pub fn project_downcast<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
+ &self,
+ bx: &mut Bx,
+ variant_index: VariantIdx,
+ ) -> Self {
+ let mut downcast = *self;
+ downcast.layout = self.layout.for_variant(bx.cx(), variant_index);
+
+ // Cast to the appropriate variant struct type.
+ let variant_ty = bx.cx().backend_type(downcast.layout);
+ downcast.llval = bx.pointercast(downcast.llval, bx.cx().type_ptr_to(variant_ty));
+
+ downcast
+ }
+
+ pub fn storage_live<Bx: BuilderMethods<'a, 'tcx, Value = V>>(&self, bx: &mut Bx) {
+ bx.lifetime_start(self.llval, self.layout.size);
+ }
+
+ pub fn storage_dead<Bx: BuilderMethods<'a, 'tcx, Value = V>>(&self, bx: &mut Bx) {
+ bx.lifetime_end(self.llval, self.layout.size);
+ }
+}
+
+impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
+ #[instrument(level = "trace", skip(self, bx))]
+ pub fn codegen_place(
+ &mut self,
+ bx: &mut Bx,
+ place_ref: mir::PlaceRef<'tcx>,
+ ) -> PlaceRef<'tcx, Bx::Value> {
+ let cx = self.cx;
+ let tcx = self.cx.tcx();
+
+ let mut base = 0;
+ let mut cg_base = match self.locals[place_ref.local] {
+ LocalRef::Place(place) => place,
+ LocalRef::UnsizedPlace(place) => bx.load_operand(place).deref(cx),
+ LocalRef::Operand(..) => {
+ if place_ref.has_deref() {
+ base = 1;
+ let cg_base = self.codegen_consume(
+ bx,
+ mir::PlaceRef { projection: &place_ref.projection[..0], ..place_ref },
+ );
+ cg_base.deref(bx.cx())
+ } else {
+ bug!("using operand local {:?} as place", place_ref);
+ }
+ }
+ };
+ for elem in place_ref.projection[base..].iter() {
+ cg_base = match *elem {
+ mir::ProjectionElem::Deref => bx.load_operand(cg_base).deref(bx.cx()),
+ mir::ProjectionElem::Field(ref field, _) => {
+ cg_base.project_field(bx, field.index())
+ }
+ mir::ProjectionElem::Index(index) => {
+ let index = &mir::Operand::Copy(mir::Place::from(index));
+ let index = self.codegen_operand(bx, index);
+ let llindex = index.immediate();
+ cg_base.project_index(bx, llindex)
+ }
+ mir::ProjectionElem::ConstantIndex { offset, from_end: false, min_length: _ } => {
+ let lloffset = bx.cx().const_usize(offset as u64);
+ cg_base.project_index(bx, lloffset)
+ }
+ mir::ProjectionElem::ConstantIndex { offset, from_end: true, min_length: _ } => {
+ let lloffset = bx.cx().const_usize(offset as u64);
+ let lllen = cg_base.len(bx.cx());
+ let llindex = bx.sub(lllen, lloffset);
+ cg_base.project_index(bx, llindex)
+ }
+ mir::ProjectionElem::Subslice { from, to, from_end } => {
+ let mut subslice = cg_base.project_index(bx, bx.cx().const_usize(from as u64));
+ let projected_ty =
+ PlaceTy::from_ty(cg_base.layout.ty).projection_ty(tcx, *elem).ty;
+ subslice.layout = bx.cx().layout_of(self.monomorphize(projected_ty));
+
+ if subslice.layout.is_unsized() {
+ assert!(from_end, "slice subslices should be `from_end`");
+ subslice.llextra = Some(bx.sub(
+ cg_base.llextra.unwrap(),
+ bx.cx().const_usize((from as u64) + (to as u64)),
+ ));
+ }
+
+ // Cast the place pointer type to the new
+ // array or slice type (`*[%_; new_len]`).
+ subslice.llval = bx.pointercast(
+ subslice.llval,
+ bx.cx().type_ptr_to(bx.cx().backend_type(subslice.layout)),
+ );
+
+ subslice
+ }
+ mir::ProjectionElem::Downcast(_, v) => cg_base.project_downcast(bx, v),
+ };
+ }
+ debug!("codegen_place(place={:?}) => {:?}", place_ref, cg_base);
+ cg_base
+ }
+
+ pub fn monomorphized_place_ty(&self, place_ref: mir::PlaceRef<'tcx>) -> Ty<'tcx> {
+ let tcx = self.cx.tcx();
+ let place_ty = place_ref.ty(self.mir, tcx);
+ self.monomorphize(place_ty.ty)
+ }
+}
+
+fn round_up_const_value_to_alignment<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
+ bx: &mut Bx,
+ value: Bx::Value,
+ align: Bx::Value,
+) -> Bx::Value {
+ // In pseudo code:
+ //
+ // if value & (align - 1) == 0 {
+ // value
+ // } else {
+ // (value & !(align - 1)) + align
+ // }
+ //
+ // Usually this is written without branches as
+ //
+ // (value + align - 1) & !(align - 1)
+ //
+ // But this formula cannot take advantage of constant `value`. E.g. if `value` is known
+ // at compile time to be `1`, this expression should be optimized to `align`. However,
+ // optimization only holds if `align` is a power of two. Since the optimizer doesn't know
+ // that `align` is a power of two, it cannot perform this optimization.
+ //
+ // Instead we use
+ //
+ // value + (-value & (align - 1))
+ //
+ // Since `align` is used only once, the expression can be optimized. For `value = 0`
+ // its optimized to `0` even in debug mode.
+ //
+ // NB: The previous version of this code used
+ //
+ // (value + align - 1) & -align
+ //
+ // Even though `-align == !(align - 1)`, LLVM failed to optimize this even for
+ // `value = 0`. Bug report: https://bugs.llvm.org/show_bug.cgi?id=48559
+ let one = bx.const_usize(1);
+ let align_minus_1 = bx.sub(align, one);
+ let neg_value = bx.neg(value);
+ let offset = bx.and(neg_value, align_minus_1);
+ bx.add(value, offset)
+}
diff --git a/compiler/rustc_codegen_ssa/src/mir/rvalue.rs b/compiler/rustc_codegen_ssa/src/mir/rvalue.rs
new file mode 100644
index 000000000..26b9fbf44
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/mir/rvalue.rs
@@ -0,0 +1,729 @@
+use super::operand::{OperandRef, OperandValue};
+use super::place::PlaceRef;
+use super::{FunctionCx, LocalRef};
+
+use crate::base;
+use crate::common::{self, IntPredicate};
+use crate::traits::*;
+use crate::MemFlags;
+
+use rustc_middle::mir;
+use rustc_middle::mir::Operand;
+use rustc_middle::ty::cast::{CastTy, IntTy};
+use rustc_middle::ty::layout::{HasTyCtxt, LayoutOf};
+use rustc_middle::ty::{self, adjustment::PointerCast, Instance, Ty, TyCtxt};
+use rustc_span::source_map::{Span, DUMMY_SP};
+
+impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
+ #[instrument(level = "trace", skip(self, bx))]
+ pub fn codegen_rvalue(
+ &mut self,
+ mut bx: Bx,
+ dest: PlaceRef<'tcx, Bx::Value>,
+ rvalue: &mir::Rvalue<'tcx>,
+ ) -> Bx {
+ match *rvalue {
+ mir::Rvalue::Use(ref operand) => {
+ let cg_operand = self.codegen_operand(&mut bx, operand);
+ // FIXME: consider not copying constants through stack. (Fixable by codegen'ing
+ // constants into `OperandValue::Ref`; why don’t we do that yet if we don’t?)
+ cg_operand.val.store(&mut bx, dest);
+ bx
+ }
+
+ mir::Rvalue::Cast(mir::CastKind::Pointer(PointerCast::Unsize), ref source, _) => {
+ // The destination necessarily contains a fat pointer, so if
+ // it's a scalar pair, it's a fat pointer or newtype thereof.
+ if bx.cx().is_backend_scalar_pair(dest.layout) {
+ // Into-coerce of a thin pointer to a fat pointer -- just
+ // use the operand path.
+ let (mut bx, temp) = self.codegen_rvalue_operand(bx, rvalue);
+ temp.val.store(&mut bx, dest);
+ return bx;
+ }
+
+ // Unsize of a nontrivial struct. I would prefer for
+ // this to be eliminated by MIR building, but
+ // `CoerceUnsized` can be passed by a where-clause,
+ // so the (generic) MIR may not be able to expand it.
+ let operand = self.codegen_operand(&mut bx, source);
+ match operand.val {
+ OperandValue::Pair(..) | OperandValue::Immediate(_) => {
+ // Unsize from an immediate structure. We don't
+ // really need a temporary alloca here, but
+ // avoiding it would require us to have
+ // `coerce_unsized_into` use `extractvalue` to
+ // index into the struct, and this case isn't
+ // important enough for it.
+ debug!("codegen_rvalue: creating ugly alloca");
+ let scratch = PlaceRef::alloca(&mut bx, operand.layout);
+ scratch.storage_live(&mut bx);
+ operand.val.store(&mut bx, scratch);
+ base::coerce_unsized_into(&mut bx, scratch, dest);
+ scratch.storage_dead(&mut bx);
+ }
+ OperandValue::Ref(llref, None, align) => {
+ let source = PlaceRef::new_sized_aligned(llref, operand.layout, align);
+ base::coerce_unsized_into(&mut bx, source, dest);
+ }
+ OperandValue::Ref(_, Some(_), _) => {
+ bug!("unsized coercion on an unsized rvalue");
+ }
+ }
+ bx
+ }
+
+ mir::Rvalue::Repeat(ref elem, count) => {
+ let cg_elem = self.codegen_operand(&mut bx, elem);
+
+ // Do not generate the loop for zero-sized elements or empty arrays.
+ if dest.layout.is_zst() {
+ return bx;
+ }
+
+ if let OperandValue::Immediate(v) = cg_elem.val {
+ let zero = bx.const_usize(0);
+ let start = dest.project_index(&mut bx, zero).llval;
+ let size = bx.const_usize(dest.layout.size.bytes());
+
+ // Use llvm.memset.p0i8.* to initialize all zero arrays
+ if bx.cx().const_to_opt_uint(v) == Some(0) {
+ let fill = bx.cx().const_u8(0);
+ bx.memset(start, fill, size, dest.align, MemFlags::empty());
+ return bx;
+ }
+
+ // Use llvm.memset.p0i8.* to initialize byte arrays
+ let v = bx.from_immediate(v);
+ if bx.cx().val_ty(v) == bx.cx().type_i8() {
+ bx.memset(start, v, size, dest.align, MemFlags::empty());
+ return bx;
+ }
+ }
+
+ let count =
+ self.monomorphize(count).eval_usize(bx.cx().tcx(), ty::ParamEnv::reveal_all());
+
+ bx.write_operand_repeatedly(cg_elem, count, dest)
+ }
+
+ mir::Rvalue::Aggregate(ref kind, ref operands) => {
+ let (dest, active_field_index) = match **kind {
+ mir::AggregateKind::Adt(adt_did, variant_index, _, _, active_field_index) => {
+ dest.codegen_set_discr(&mut bx, variant_index);
+ if bx.tcx().adt_def(adt_did).is_enum() {
+ (dest.project_downcast(&mut bx, variant_index), active_field_index)
+ } else {
+ (dest, active_field_index)
+ }
+ }
+ _ => (dest, None),
+ };
+ for (i, operand) in operands.iter().enumerate() {
+ let op = self.codegen_operand(&mut bx, operand);
+ // Do not generate stores and GEPis for zero-sized fields.
+ if !op.layout.is_zst() {
+ let field_index = active_field_index.unwrap_or(i);
+ let field = if let mir::AggregateKind::Array(_) = **kind {
+ let llindex = bx.cx().const_usize(field_index as u64);
+ dest.project_index(&mut bx, llindex)
+ } else {
+ dest.project_field(&mut bx, field_index)
+ };
+ op.val.store(&mut bx, field);
+ }
+ }
+ bx
+ }
+
+ _ => {
+ assert!(self.rvalue_creates_operand(rvalue, DUMMY_SP));
+ let (mut bx, temp) = self.codegen_rvalue_operand(bx, rvalue);
+ temp.val.store(&mut bx, dest);
+ bx
+ }
+ }
+ }
+
+ pub fn codegen_rvalue_unsized(
+ &mut self,
+ mut bx: Bx,
+ indirect_dest: PlaceRef<'tcx, Bx::Value>,
+ rvalue: &mir::Rvalue<'tcx>,
+ ) -> Bx {
+ debug!(
+ "codegen_rvalue_unsized(indirect_dest.llval={:?}, rvalue={:?})",
+ indirect_dest.llval, rvalue
+ );
+
+ match *rvalue {
+ mir::Rvalue::Use(ref operand) => {
+ let cg_operand = self.codegen_operand(&mut bx, operand);
+ cg_operand.val.store_unsized(&mut bx, indirect_dest);
+ bx
+ }
+
+ _ => bug!("unsized assignment other than `Rvalue::Use`"),
+ }
+ }
+
+ pub fn codegen_rvalue_operand(
+ &mut self,
+ mut bx: Bx,
+ rvalue: &mir::Rvalue<'tcx>,
+ ) -> (Bx, OperandRef<'tcx, Bx::Value>) {
+ assert!(
+ self.rvalue_creates_operand(rvalue, DUMMY_SP),
+ "cannot codegen {:?} to operand",
+ rvalue,
+ );
+
+ match *rvalue {
+ mir::Rvalue::Cast(ref kind, ref source, mir_cast_ty) => {
+ let operand = self.codegen_operand(&mut bx, source);
+ debug!("cast operand is {:?}", operand);
+ let cast = bx.cx().layout_of(self.monomorphize(mir_cast_ty));
+
+ let val = match *kind {
+ mir::CastKind::PointerExposeAddress => {
+ assert!(bx.cx().is_backend_immediate(cast));
+ let llptr = operand.immediate();
+ let llcast_ty = bx.cx().immediate_backend_type(cast);
+ let lladdr = bx.ptrtoint(llptr, llcast_ty);
+ OperandValue::Immediate(lladdr)
+ }
+ mir::CastKind::Pointer(PointerCast::ReifyFnPointer) => {
+ match *operand.layout.ty.kind() {
+ ty::FnDef(def_id, substs) => {
+ let instance = ty::Instance::resolve_for_fn_ptr(
+ bx.tcx(),
+ ty::ParamEnv::reveal_all(),
+ def_id,
+ substs,
+ )
+ .unwrap()
+ .polymorphize(bx.cx().tcx());
+ OperandValue::Immediate(bx.get_fn_addr(instance))
+ }
+ _ => bug!("{} cannot be reified to a fn ptr", operand.layout.ty),
+ }
+ }
+ mir::CastKind::Pointer(PointerCast::ClosureFnPointer(_)) => {
+ match *operand.layout.ty.kind() {
+ ty::Closure(def_id, substs) => {
+ let instance = Instance::resolve_closure(
+ bx.cx().tcx(),
+ def_id,
+ substs,
+ ty::ClosureKind::FnOnce,
+ )
+ .expect("failed to normalize and resolve closure during codegen")
+ .polymorphize(bx.cx().tcx());
+ OperandValue::Immediate(bx.cx().get_fn_addr(instance))
+ }
+ _ => bug!("{} cannot be cast to a fn ptr", operand.layout.ty),
+ }
+ }
+ mir::CastKind::Pointer(PointerCast::UnsafeFnPointer) => {
+ // This is a no-op at the LLVM level.
+ operand.val
+ }
+ mir::CastKind::Pointer(PointerCast::Unsize) => {
+ assert!(bx.cx().is_backend_scalar_pair(cast));
+ let (lldata, llextra) = match operand.val {
+ OperandValue::Pair(lldata, llextra) => {
+ // unsize from a fat pointer -- this is a
+ // "trait-object-to-supertrait" coercion.
+ (lldata, Some(llextra))
+ }
+ OperandValue::Immediate(lldata) => {
+ // "standard" unsize
+ (lldata, None)
+ }
+ OperandValue::Ref(..) => {
+ bug!("by-ref operand {:?} in `codegen_rvalue_operand`", operand);
+ }
+ };
+ let (lldata, llextra) =
+ base::unsize_ptr(&mut bx, lldata, operand.layout.ty, cast.ty, llextra);
+ OperandValue::Pair(lldata, llextra)
+ }
+ mir::CastKind::Pointer(PointerCast::MutToConstPointer)
+ | mir::CastKind::Misc
+ if bx.cx().is_backend_scalar_pair(operand.layout) =>
+ {
+ if let OperandValue::Pair(data_ptr, meta) = operand.val {
+ if bx.cx().is_backend_scalar_pair(cast) {
+ let data_cast = bx.pointercast(
+ data_ptr,
+ bx.cx().scalar_pair_element_backend_type(cast, 0, true),
+ );
+ OperandValue::Pair(data_cast, meta)
+ } else {
+ // cast to thin-ptr
+ // Cast of fat-ptr to thin-ptr is an extraction of data-ptr and
+ // pointer-cast of that pointer to desired pointer type.
+ let llcast_ty = bx.cx().immediate_backend_type(cast);
+ let llval = bx.pointercast(data_ptr, llcast_ty);
+ OperandValue::Immediate(llval)
+ }
+ } else {
+ bug!("unexpected non-pair operand");
+ }
+ }
+ mir::CastKind::Pointer(
+ PointerCast::MutToConstPointer | PointerCast::ArrayToPointer,
+ )
+ | mir::CastKind::Misc
+ // Since int2ptr can have arbitrary integer types as input (so we have to do
+ // sign extension and all that), it is currently best handled in the same code
+ // path as the other integer-to-X casts.
+ | mir::CastKind::PointerFromExposedAddress => {
+ assert!(bx.cx().is_backend_immediate(cast));
+ let ll_t_out = bx.cx().immediate_backend_type(cast);
+ if operand.layout.abi.is_uninhabited() {
+ let val = OperandValue::Immediate(bx.cx().const_undef(ll_t_out));
+ return (bx, OperandRef { val, layout: cast });
+ }
+ let r_t_in =
+ CastTy::from_ty(operand.layout.ty).expect("bad input type for cast");
+ let r_t_out = CastTy::from_ty(cast.ty).expect("bad output type for cast");
+ let ll_t_in = bx.cx().immediate_backend_type(operand.layout);
+ let llval = operand.immediate();
+
+ let newval = match (r_t_in, r_t_out) {
+ (CastTy::Int(i), CastTy::Int(_)) => {
+ bx.intcast(llval, ll_t_out, i.is_signed())
+ }
+ (CastTy::Float, CastTy::Float) => {
+ let srcsz = bx.cx().float_width(ll_t_in);
+ let dstsz = bx.cx().float_width(ll_t_out);
+ if dstsz > srcsz {
+ bx.fpext(llval, ll_t_out)
+ } else if srcsz > dstsz {
+ bx.fptrunc(llval, ll_t_out)
+ } else {
+ llval
+ }
+ }
+ (CastTy::Int(i), CastTy::Float) => {
+ if i.is_signed() {
+ bx.sitofp(llval, ll_t_out)
+ } else {
+ bx.uitofp(llval, ll_t_out)
+ }
+ }
+ (CastTy::Ptr(_) | CastTy::FnPtr, CastTy::Ptr(_)) => {
+ bx.pointercast(llval, ll_t_out)
+ }
+ (CastTy::Int(i), CastTy::Ptr(_)) => {
+ let usize_llval =
+ bx.intcast(llval, bx.cx().type_isize(), i.is_signed());
+ bx.inttoptr(usize_llval, ll_t_out)
+ }
+ (CastTy::Float, CastTy::Int(IntTy::I)) => {
+ bx.cast_float_to_int(true, llval, ll_t_out)
+ }
+ (CastTy::Float, CastTy::Int(_)) => {
+ bx.cast_float_to_int(false, llval, ll_t_out)
+ }
+ _ => bug!("unsupported cast: {:?} to {:?}", operand.layout.ty, cast.ty),
+ };
+ OperandValue::Immediate(newval)
+ }
+ };
+ (bx, OperandRef { val, layout: cast })
+ }
+
+ mir::Rvalue::Ref(_, bk, place) => {
+ let mk_ref = move |tcx: TyCtxt<'tcx>, ty: Ty<'tcx>| {
+ tcx.mk_ref(
+ tcx.lifetimes.re_erased,
+ ty::TypeAndMut { ty, mutbl: bk.to_mutbl_lossy() },
+ )
+ };
+ self.codegen_place_to_pointer(bx, place, mk_ref)
+ }
+
+ mir::Rvalue::CopyForDeref(place) => {
+ let operand = self.codegen_operand(&mut bx, &Operand::Copy(place));
+ (bx, operand)
+ }
+ mir::Rvalue::AddressOf(mutability, place) => {
+ let mk_ptr = move |tcx: TyCtxt<'tcx>, ty: Ty<'tcx>| {
+ tcx.mk_ptr(ty::TypeAndMut { ty, mutbl: mutability })
+ };
+ self.codegen_place_to_pointer(bx, place, mk_ptr)
+ }
+
+ mir::Rvalue::Len(place) => {
+ let size = self.evaluate_array_len(&mut bx, place);
+ let operand = OperandRef {
+ val: OperandValue::Immediate(size),
+ layout: bx.cx().layout_of(bx.tcx().types.usize),
+ };
+ (bx, operand)
+ }
+
+ mir::Rvalue::BinaryOp(op, box (ref lhs, ref rhs)) => {
+ let lhs = self.codegen_operand(&mut bx, lhs);
+ let rhs = self.codegen_operand(&mut bx, rhs);
+ let llresult = match (lhs.val, rhs.val) {
+ (
+ OperandValue::Pair(lhs_addr, lhs_extra),
+ OperandValue::Pair(rhs_addr, rhs_extra),
+ ) => self.codegen_fat_ptr_binop(
+ &mut bx,
+ op,
+ lhs_addr,
+ lhs_extra,
+ rhs_addr,
+ rhs_extra,
+ lhs.layout.ty,
+ ),
+
+ (OperandValue::Immediate(lhs_val), OperandValue::Immediate(rhs_val)) => {
+ self.codegen_scalar_binop(&mut bx, op, lhs_val, rhs_val, lhs.layout.ty)
+ }
+
+ _ => bug!(),
+ };
+ let operand = OperandRef {
+ val: OperandValue::Immediate(llresult),
+ layout: bx.cx().layout_of(op.ty(bx.tcx(), lhs.layout.ty, rhs.layout.ty)),
+ };
+ (bx, operand)
+ }
+ mir::Rvalue::CheckedBinaryOp(op, box (ref lhs, ref rhs)) => {
+ let lhs = self.codegen_operand(&mut bx, lhs);
+ let rhs = self.codegen_operand(&mut bx, rhs);
+ let result = self.codegen_scalar_checked_binop(
+ &mut bx,
+ op,
+ lhs.immediate(),
+ rhs.immediate(),
+ lhs.layout.ty,
+ );
+ let val_ty = op.ty(bx.tcx(), lhs.layout.ty, rhs.layout.ty);
+ let operand_ty = bx.tcx().intern_tup(&[val_ty, bx.tcx().types.bool]);
+ let operand = OperandRef { val: result, layout: bx.cx().layout_of(operand_ty) };
+
+ (bx, operand)
+ }
+
+ mir::Rvalue::UnaryOp(op, ref operand) => {
+ let operand = self.codegen_operand(&mut bx, operand);
+ let lloperand = operand.immediate();
+ let is_float = operand.layout.ty.is_floating_point();
+ let llval = match op {
+ mir::UnOp::Not => bx.not(lloperand),
+ mir::UnOp::Neg => {
+ if is_float {
+ bx.fneg(lloperand)
+ } else {
+ bx.neg(lloperand)
+ }
+ }
+ };
+ (bx, OperandRef { val: OperandValue::Immediate(llval), layout: operand.layout })
+ }
+
+ mir::Rvalue::Discriminant(ref place) => {
+ let discr_ty = rvalue.ty(self.mir, bx.tcx());
+ let discr_ty = self.monomorphize(discr_ty);
+ let discr = self
+ .codegen_place(&mut bx, place.as_ref())
+ .codegen_get_discr(&mut bx, discr_ty);
+ (
+ bx,
+ OperandRef {
+ val: OperandValue::Immediate(discr),
+ layout: self.cx.layout_of(discr_ty),
+ },
+ )
+ }
+
+ mir::Rvalue::NullaryOp(null_op, ty) => {
+ let ty = self.monomorphize(ty);
+ assert!(bx.cx().type_is_sized(ty));
+ let layout = bx.cx().layout_of(ty);
+ let val = match null_op {
+ mir::NullOp::SizeOf => layout.size.bytes(),
+ mir::NullOp::AlignOf => layout.align.abi.bytes(),
+ };
+ let val = bx.cx().const_usize(val);
+ let tcx = self.cx.tcx();
+ (
+ bx,
+ OperandRef {
+ val: OperandValue::Immediate(val),
+ layout: self.cx.layout_of(tcx.types.usize),
+ },
+ )
+ }
+
+ mir::Rvalue::ThreadLocalRef(def_id) => {
+ assert!(bx.cx().tcx().is_static(def_id));
+ let static_ = bx.get_static(def_id);
+ let layout = bx.layout_of(bx.cx().tcx().static_ptr_ty(def_id));
+ let operand = OperandRef::from_immediate_or_packed_pair(&mut bx, static_, layout);
+ (bx, operand)
+ }
+ mir::Rvalue::Use(ref operand) => {
+ let operand = self.codegen_operand(&mut bx, operand);
+ (bx, operand)
+ }
+ mir::Rvalue::Repeat(..) | mir::Rvalue::Aggregate(..) => {
+ // According to `rvalue_creates_operand`, only ZST
+ // aggregate rvalues are allowed to be operands.
+ let ty = rvalue.ty(self.mir, self.cx.tcx());
+ let operand =
+ OperandRef::new_zst(&mut bx, self.cx.layout_of(self.monomorphize(ty)));
+ (bx, operand)
+ }
+ mir::Rvalue::ShallowInitBox(ref operand, content_ty) => {
+ let operand = self.codegen_operand(&mut bx, operand);
+ let lloperand = operand.immediate();
+
+ let content_ty = self.monomorphize(content_ty);
+ let box_layout = bx.cx().layout_of(bx.tcx().mk_box(content_ty));
+ let llty_ptr = bx.cx().backend_type(box_layout);
+
+ let val = bx.pointercast(lloperand, llty_ptr);
+ let operand = OperandRef { val: OperandValue::Immediate(val), layout: box_layout };
+ (bx, operand)
+ }
+ }
+ }
+
+ fn evaluate_array_len(&mut self, bx: &mut Bx, place: mir::Place<'tcx>) -> Bx::Value {
+ // ZST are passed as operands and require special handling
+ // because codegen_place() panics if Local is operand.
+ if let Some(index) = place.as_local() {
+ if let LocalRef::Operand(Some(op)) = self.locals[index] {
+ if let ty::Array(_, n) = op.layout.ty.kind() {
+ let n = n.eval_usize(bx.cx().tcx(), ty::ParamEnv::reveal_all());
+ return bx.cx().const_usize(n);
+ }
+ }
+ }
+ // use common size calculation for non zero-sized types
+ let cg_value = self.codegen_place(bx, place.as_ref());
+ cg_value.len(bx.cx())
+ }
+
+ /// Codegen an `Rvalue::AddressOf` or `Rvalue::Ref`
+ fn codegen_place_to_pointer(
+ &mut self,
+ mut bx: Bx,
+ place: mir::Place<'tcx>,
+ mk_ptr_ty: impl FnOnce(TyCtxt<'tcx>, Ty<'tcx>) -> Ty<'tcx>,
+ ) -> (Bx, OperandRef<'tcx, Bx::Value>) {
+ let cg_place = self.codegen_place(&mut bx, place.as_ref());
+
+ let ty = cg_place.layout.ty;
+
+ // Note: places are indirect, so storing the `llval` into the
+ // destination effectively creates a reference.
+ let val = if !bx.cx().type_has_metadata(ty) {
+ OperandValue::Immediate(cg_place.llval)
+ } else {
+ OperandValue::Pair(cg_place.llval, cg_place.llextra.unwrap())
+ };
+ (bx, OperandRef { val, layout: self.cx.layout_of(mk_ptr_ty(self.cx.tcx(), ty)) })
+ }
+
+ pub fn codegen_scalar_binop(
+ &mut self,
+ bx: &mut Bx,
+ op: mir::BinOp,
+ lhs: Bx::Value,
+ rhs: Bx::Value,
+ input_ty: Ty<'tcx>,
+ ) -> Bx::Value {
+ let is_float = input_ty.is_floating_point();
+ let is_signed = input_ty.is_signed();
+ match op {
+ mir::BinOp::Add => {
+ if is_float {
+ bx.fadd(lhs, rhs)
+ } else {
+ bx.add(lhs, rhs)
+ }
+ }
+ mir::BinOp::Sub => {
+ if is_float {
+ bx.fsub(lhs, rhs)
+ } else {
+ bx.sub(lhs, rhs)
+ }
+ }
+ mir::BinOp::Mul => {
+ if is_float {
+ bx.fmul(lhs, rhs)
+ } else {
+ bx.mul(lhs, rhs)
+ }
+ }
+ mir::BinOp::Div => {
+ if is_float {
+ bx.fdiv(lhs, rhs)
+ } else if is_signed {
+ bx.sdiv(lhs, rhs)
+ } else {
+ bx.udiv(lhs, rhs)
+ }
+ }
+ mir::BinOp::Rem => {
+ if is_float {
+ bx.frem(lhs, rhs)
+ } else if is_signed {
+ bx.srem(lhs, rhs)
+ } else {
+ bx.urem(lhs, rhs)
+ }
+ }
+ mir::BinOp::BitOr => bx.or(lhs, rhs),
+ mir::BinOp::BitAnd => bx.and(lhs, rhs),
+ mir::BinOp::BitXor => bx.xor(lhs, rhs),
+ mir::BinOp::Offset => {
+ let pointee_type = input_ty
+ .builtin_deref(true)
+ .unwrap_or_else(|| bug!("deref of non-pointer {:?}", input_ty))
+ .ty;
+ let llty = bx.cx().backend_type(bx.cx().layout_of(pointee_type));
+ bx.inbounds_gep(llty, lhs, &[rhs])
+ }
+ mir::BinOp::Shl => common::build_unchecked_lshift(bx, lhs, rhs),
+ mir::BinOp::Shr => common::build_unchecked_rshift(bx, input_ty, lhs, rhs),
+ mir::BinOp::Ne
+ | mir::BinOp::Lt
+ | mir::BinOp::Gt
+ | mir::BinOp::Eq
+ | mir::BinOp::Le
+ | mir::BinOp::Ge => {
+ if is_float {
+ bx.fcmp(base::bin_op_to_fcmp_predicate(op.to_hir_binop()), lhs, rhs)
+ } else {
+ bx.icmp(base::bin_op_to_icmp_predicate(op.to_hir_binop(), is_signed), lhs, rhs)
+ }
+ }
+ }
+ }
+
+ pub fn codegen_fat_ptr_binop(
+ &mut self,
+ bx: &mut Bx,
+ op: mir::BinOp,
+ lhs_addr: Bx::Value,
+ lhs_extra: Bx::Value,
+ rhs_addr: Bx::Value,
+ rhs_extra: Bx::Value,
+ _input_ty: Ty<'tcx>,
+ ) -> Bx::Value {
+ match op {
+ mir::BinOp::Eq => {
+ let lhs = bx.icmp(IntPredicate::IntEQ, lhs_addr, rhs_addr);
+ let rhs = bx.icmp(IntPredicate::IntEQ, lhs_extra, rhs_extra);
+ bx.and(lhs, rhs)
+ }
+ mir::BinOp::Ne => {
+ let lhs = bx.icmp(IntPredicate::IntNE, lhs_addr, rhs_addr);
+ let rhs = bx.icmp(IntPredicate::IntNE, lhs_extra, rhs_extra);
+ bx.or(lhs, rhs)
+ }
+ mir::BinOp::Le | mir::BinOp::Lt | mir::BinOp::Ge | mir::BinOp::Gt => {
+ // a OP b ~ a.0 STRICT(OP) b.0 | (a.0 == b.0 && a.1 OP a.1)
+ let (op, strict_op) = match op {
+ mir::BinOp::Lt => (IntPredicate::IntULT, IntPredicate::IntULT),
+ mir::BinOp::Le => (IntPredicate::IntULE, IntPredicate::IntULT),
+ mir::BinOp::Gt => (IntPredicate::IntUGT, IntPredicate::IntUGT),
+ mir::BinOp::Ge => (IntPredicate::IntUGE, IntPredicate::IntUGT),
+ _ => bug!(),
+ };
+ let lhs = bx.icmp(strict_op, lhs_addr, rhs_addr);
+ let and_lhs = bx.icmp(IntPredicate::IntEQ, lhs_addr, rhs_addr);
+ let and_rhs = bx.icmp(op, lhs_extra, rhs_extra);
+ let rhs = bx.and(and_lhs, and_rhs);
+ bx.or(lhs, rhs)
+ }
+ _ => {
+ bug!("unexpected fat ptr binop");
+ }
+ }
+ }
+
+ pub fn codegen_scalar_checked_binop(
+ &mut self,
+ bx: &mut Bx,
+ op: mir::BinOp,
+ lhs: Bx::Value,
+ rhs: Bx::Value,
+ input_ty: Ty<'tcx>,
+ ) -> OperandValue<Bx::Value> {
+ // This case can currently arise only from functions marked
+ // with #[rustc_inherit_overflow_checks] and inlined from
+ // another crate (mostly core::num generic/#[inline] fns),
+ // while the current crate doesn't use overflow checks.
+ if !bx.cx().check_overflow() {
+ let val = self.codegen_scalar_binop(bx, op, lhs, rhs, input_ty);
+ return OperandValue::Pair(val, bx.cx().const_bool(false));
+ }
+
+ let (val, of) = match op {
+ // These are checked using intrinsics
+ mir::BinOp::Add | mir::BinOp::Sub | mir::BinOp::Mul => {
+ let oop = match op {
+ mir::BinOp::Add => OverflowOp::Add,
+ mir::BinOp::Sub => OverflowOp::Sub,
+ mir::BinOp::Mul => OverflowOp::Mul,
+ _ => unreachable!(),
+ };
+ bx.checked_binop(oop, input_ty, lhs, rhs)
+ }
+ mir::BinOp::Shl | mir::BinOp::Shr => {
+ let lhs_llty = bx.cx().val_ty(lhs);
+ let rhs_llty = bx.cx().val_ty(rhs);
+ let invert_mask = common::shift_mask_val(bx, lhs_llty, rhs_llty, true);
+ let outer_bits = bx.and(rhs, invert_mask);
+
+ let of = bx.icmp(IntPredicate::IntNE, outer_bits, bx.cx().const_null(rhs_llty));
+ let val = self.codegen_scalar_binop(bx, op, lhs, rhs, input_ty);
+
+ (val, of)
+ }
+ _ => bug!("Operator `{:?}` is not a checkable operator", op),
+ };
+
+ OperandValue::Pair(val, of)
+ }
+}
+
+impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
+ pub fn rvalue_creates_operand(&self, rvalue: &mir::Rvalue<'tcx>, span: Span) -> bool {
+ match *rvalue {
+ mir::Rvalue::Ref(..) |
+ mir::Rvalue::CopyForDeref(..) |
+ mir::Rvalue::AddressOf(..) |
+ mir::Rvalue::Len(..) |
+ mir::Rvalue::Cast(..) | // (*)
+ mir::Rvalue::ShallowInitBox(..) | // (*)
+ mir::Rvalue::BinaryOp(..) |
+ mir::Rvalue::CheckedBinaryOp(..) |
+ mir::Rvalue::UnaryOp(..) |
+ mir::Rvalue::Discriminant(..) |
+ mir::Rvalue::NullaryOp(..) |
+ mir::Rvalue::ThreadLocalRef(_) |
+ mir::Rvalue::Use(..) => // (*)
+ true,
+ mir::Rvalue::Repeat(..) |
+ mir::Rvalue::Aggregate(..) => {
+ let ty = rvalue.ty(self.mir, self.cx.tcx());
+ let ty = self.monomorphize(ty);
+ self.cx.spanned_layout_of(ty, span).is_zst()
+ }
+ }
+
+ // (*) this is only true if the type is suitable
+ }
+}
diff --git a/compiler/rustc_codegen_ssa/src/mir/statement.rs b/compiler/rustc_codegen_ssa/src/mir/statement.rs
new file mode 100644
index 000000000..f452f2988
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/mir/statement.rs
@@ -0,0 +1,102 @@
+use rustc_middle::mir;
+
+use super::FunctionCx;
+use super::LocalRef;
+use crate::traits::BuilderMethods;
+use crate::traits::*;
+
+impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
+ #[instrument(level = "debug", skip(self, bx))]
+ pub fn codegen_statement(&mut self, mut bx: Bx, statement: &mir::Statement<'tcx>) -> Bx {
+ self.set_debug_loc(&mut bx, statement.source_info);
+ match statement.kind {
+ mir::StatementKind::Assign(box (ref place, ref rvalue)) => {
+ if let Some(index) = place.as_local() {
+ match self.locals[index] {
+ LocalRef::Place(cg_dest) => self.codegen_rvalue(bx, cg_dest, rvalue),
+ LocalRef::UnsizedPlace(cg_indirect_dest) => {
+ self.codegen_rvalue_unsized(bx, cg_indirect_dest, rvalue)
+ }
+ LocalRef::Operand(None) => {
+ let (mut bx, operand) = self.codegen_rvalue_operand(bx, rvalue);
+ self.locals[index] = LocalRef::Operand(Some(operand));
+ self.debug_introduce_local(&mut bx, index);
+ bx
+ }
+ LocalRef::Operand(Some(op)) => {
+ if !op.layout.is_zst() {
+ span_bug!(
+ statement.source_info.span,
+ "operand {:?} already assigned",
+ rvalue
+ );
+ }
+
+ // If the type is zero-sized, it's already been set here,
+ // but we still need to make sure we codegen the operand
+ self.codegen_rvalue_operand(bx, rvalue).0
+ }
+ }
+ } else {
+ let cg_dest = self.codegen_place(&mut bx, place.as_ref());
+ self.codegen_rvalue(bx, cg_dest, rvalue)
+ }
+ }
+ mir::StatementKind::SetDiscriminant { box ref place, variant_index } => {
+ self.codegen_place(&mut bx, place.as_ref())
+ .codegen_set_discr(&mut bx, variant_index);
+ bx
+ }
+ mir::StatementKind::Deinit(..) => {
+ // For now, don't codegen this to anything. In the future it may be worth
+ // experimenting with what kind of information we can emit to LLVM without hurting
+ // perf here
+ bx
+ }
+ mir::StatementKind::StorageLive(local) => {
+ if let LocalRef::Place(cg_place) = self.locals[local] {
+ cg_place.storage_live(&mut bx);
+ } else if let LocalRef::UnsizedPlace(cg_indirect_place) = self.locals[local] {
+ cg_indirect_place.storage_live(&mut bx);
+ }
+ bx
+ }
+ mir::StatementKind::StorageDead(local) => {
+ if let LocalRef::Place(cg_place) = self.locals[local] {
+ cg_place.storage_dead(&mut bx);
+ } else if let LocalRef::UnsizedPlace(cg_indirect_place) = self.locals[local] {
+ cg_indirect_place.storage_dead(&mut bx);
+ }
+ bx
+ }
+ mir::StatementKind::Coverage(box ref coverage) => {
+ self.codegen_coverage(&mut bx, coverage.clone(), statement.source_info.scope);
+ bx
+ }
+ mir::StatementKind::CopyNonOverlapping(box mir::CopyNonOverlapping {
+ ref src,
+ ref dst,
+ ref count,
+ }) => {
+ let dst_val = self.codegen_operand(&mut bx, dst);
+ let src_val = self.codegen_operand(&mut bx, src);
+ let count = self.codegen_operand(&mut bx, count).immediate();
+ let pointee_layout = dst_val
+ .layout
+ .pointee_info_at(&bx, rustc_target::abi::Size::ZERO)
+ .expect("Expected pointer");
+ let bytes = bx.mul(count, bx.const_usize(pointee_layout.size.bytes()));
+
+ let align = pointee_layout.align;
+ let dst = dst_val.immediate();
+ let src = src_val.immediate();
+ bx.memcpy(dst, align, src, align, bytes, crate::MemFlags::empty());
+ bx
+ }
+ mir::StatementKind::FakeRead(..)
+ | mir::StatementKind::Retag { .. }
+ | mir::StatementKind::AscribeUserType(..)
+ | mir::StatementKind::Nop => bx,
+ }
+ }
+}