summaryrefslogtreecommitdiffstats
path: root/compiler/rustc_codegen_ssa/src/mir
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-18 02:49:50 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-18 02:49:50 +0000
commit9835e2ae736235810b4ea1c162ca5e65c547e770 (patch)
tree3fcebf40ed70e581d776a8a4c65923e8ec20e026 /compiler/rustc_codegen_ssa/src/mir
parentReleasing progress-linux version 1.70.0+dfsg2-1~progress7.99u1. (diff)
downloadrustc-9835e2ae736235810b4ea1c162ca5e65c547e770.tar.xz
rustc-9835e2ae736235810b4ea1c162ca5e65c547e770.zip
Merging upstream version 1.71.1+dfsg1.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'compiler/rustc_codegen_ssa/src/mir')
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/analyze.rs8
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/block.rs71
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/debuginfo.rs153
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/intrinsic.rs7
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/mod.rs18
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/operand.rs63
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/rvalue.rs95
7 files changed, 274 insertions, 141 deletions
diff --git a/compiler/rustc_codegen_ssa/src/mir/analyze.rs b/compiler/rustc_codegen_ssa/src/mir/analyze.rs
index f43f1d64a..22c1f0597 100644
--- a/compiler/rustc_codegen_ssa/src/mir/analyze.rs
+++ b/compiler/rustc_codegen_ssa/src/mir/analyze.rs
@@ -5,7 +5,7 @@ use super::FunctionCx;
use crate::traits::*;
use rustc_data_structures::graph::dominators::Dominators;
use rustc_index::bit_set::BitSet;
-use rustc_index::vec::{IndexSlice, IndexVec};
+use rustc_index::{IndexSlice, IndexVec};
use rustc_middle::mir::traversal;
use rustc_middle::mir::visit::{MutatingUseContext, NonMutatingUseContext, PlaceContext, Visitor};
use rustc_middle::mir::{self, Location, TerminatorKind};
@@ -84,7 +84,7 @@ impl DefLocation {
struct LocalAnalyzer<'mir, 'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> {
fx: &'mir FunctionCx<'a, 'tcx, Bx>,
- dominators: Dominators<mir::BasicBlock>,
+ dominators: &'mir Dominators<mir::BasicBlock>,
locals: IndexVec<mir::Local, LocalKind>,
}
@@ -203,7 +203,9 @@ impl<'mir, 'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> Visitor<'tcx>
self.assign(local, DefLocation::Body(location));
}
- PlaceContext::NonUse(_) | PlaceContext::MutatingUse(MutatingUseContext::Retag) => {}
+ PlaceContext::NonUse(_)
+ | PlaceContext::NonMutatingUse(NonMutatingUseContext::PlaceMention)
+ | PlaceContext::MutatingUse(MutatingUseContext::Retag) => {}
PlaceContext::NonMutatingUse(
NonMutatingUseContext::Copy | NonMutatingUseContext::Move,
diff --git a/compiler/rustc_codegen_ssa/src/mir/block.rs b/compiler/rustc_codegen_ssa/src/mir/block.rs
index dd8697781..3f0b64b11 100644
--- a/compiler/rustc_codegen_ssa/src/mir/block.rs
+++ b/compiler/rustc_codegen_ssa/src/mir/block.rs
@@ -12,7 +12,6 @@ use crate::MemFlags;
use rustc_ast as ast;
use rustc_ast::{InlineAsmOptions, InlineAsmTemplatePiece};
use rustc_hir::lang_items::LangItem;
-use rustc_index::vec::Idx;
use rustc_middle::mir::{self, AssertKind, SwitchTargets};
use rustc_middle::ty::layout::{HasTyCtxt, LayoutOf, ValidityRequirement};
use rustc_middle::ty::print::{with_no_trimmed_paths, with_no_visible_paths};
@@ -20,7 +19,6 @@ use rustc_middle::ty::{self, Instance, Ty};
use rustc_session::config::OptLevel;
use rustc_span::source_map::Span;
use rustc_span::{sym, Symbol};
-use rustc_symbol_mangling::typeid::typeid_for_fnabi;
use rustc_target::abi::call::{ArgAbi, FnAbi, PassMode, Reg};
use rustc_target::abi::{self, HasDataLayout, WrappingRange};
use rustc_target::spec::abi::Abi;
@@ -164,6 +162,12 @@ impl<'a, 'tcx> TerminatorCodegenHelper<'tcx> {
// do an invoke, otherwise do a call.
let fn_ty = bx.fn_decl_backend_type(&fn_abi);
+ let fn_attrs = if bx.tcx().def_kind(fx.instance.def_id()).has_codegen_attrs() {
+ Some(bx.tcx().codegen_fn_attrs(fx.instance.def_id()))
+ } else {
+ None
+ };
+
if !fn_abi.can_unwind {
unwind = mir::UnwindAction::Unreachable;
}
@@ -191,6 +195,7 @@ impl<'a, 'tcx> TerminatorCodegenHelper<'tcx> {
};
let invokeret = bx.invoke(
fn_ty,
+ fn_attrs,
Some(&fn_abi),
fn_ptr,
&llargs,
@@ -212,7 +217,7 @@ impl<'a, 'tcx> TerminatorCodegenHelper<'tcx> {
}
MergingSucc::False
} else {
- let llret = bx.call(fn_ty, Some(&fn_abi), fn_ptr, &llargs, self.funclet(fx));
+ let llret = bx.call(fn_ty, fn_attrs, Some(&fn_abi), fn_ptr, &llargs, self.funclet(fx));
if fx.mir[self.bb].is_cleanup {
// Cleanup is always the cold path. Don't inline
// drop glue. Also, when there is a deeply-nested
@@ -369,7 +374,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
if self.fn_abi.c_variadic {
// The `VaList` "spoofed" argument is just after all the real arguments.
let va_list_arg_idx = self.fn_abi.args.len();
- match self.locals[mir::Local::new(1 + va_list_arg_idx)] {
+ match self.locals[mir::Local::from_usize(1 + va_list_arg_idx)] {
LocalRef::Place(va_list) => {
bx.va_end(va_list.llval);
}
@@ -1026,7 +1031,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
});
let needs_location =
- instance.map_or(false, |i| i.def.requires_caller_location(self.cx.tcx()));
+ instance.is_some_and(|i| i.def.requires_caller_location(self.cx.tcx()));
if needs_location {
let mir_args = if let Some(num_untupled) = num_untupled {
first_args.len() + num_untupled
@@ -1052,48 +1057,12 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
self.codegen_argument(bx, location, &mut llargs, last_arg);
}
- let (is_indirect_call, fn_ptr) = match (llfn, instance) {
- (Some(llfn), _) => (true, llfn),
- (None, Some(instance)) => (false, bx.get_fn_addr(instance)),
- _ => span_bug!(span, "no llfn for call"),
+ let fn_ptr = match (instance, llfn) {
+ (Some(instance), None) => bx.get_fn_addr(instance),
+ (_, Some(llfn)) => llfn,
+ _ => span_bug!(span, "no instance or llfn for call"),
};
- // For backends that support CFI using type membership (i.e., testing whether a given
- // pointer is associated with a type identifier).
- if bx.tcx().sess.is_sanitizer_cfi_enabled() && is_indirect_call {
- // Emit type metadata and checks.
- // FIXME(rcvalle): Add support for generalized identifiers.
- // FIXME(rcvalle): Create distinct unnamed MDNodes for internal identifiers.
- let typeid = typeid_for_fnabi(bx.tcx(), fn_abi);
- let typeid_metadata = self.cx.typeid_metadata(typeid);
-
- // Test whether the function pointer is associated with the type identifier.
- let cond = bx.type_test(fn_ptr, typeid_metadata);
- let bb_pass = bx.append_sibling_block("type_test.pass");
- let bb_fail = bx.append_sibling_block("type_test.fail");
- bx.cond_br(cond, bb_pass, bb_fail);
-
- bx.switch_to_block(bb_pass);
- let merging_succ = helper.do_call(
- self,
- bx,
- fn_abi,
- fn_ptr,
- &llargs,
- target.as_ref().map(|&target| (ret_dest, target)),
- unwind,
- &copied_constant_arguments,
- false,
- );
- assert_eq!(merging_succ, MergingSucc::False);
-
- bx.switch_to_block(bb_fail);
- bx.abort();
- bx.unreachable();
-
- return MergingSucc::False;
- }
-
helper.do_call(
self,
bx,
@@ -1287,7 +1256,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
MergingSucc::False
}
- mir::TerminatorKind::Drop { place, target, unwind } => {
+ mir::TerminatorKind::Drop { place, target, unwind, replace: _ } => {
self.codegen_drop_terminator(helper, bx, place, target, unwind, mergeable_succ())
}
@@ -1481,11 +1450,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
) -> OperandRef<'tcx, Bx::Value> {
let tcx = bx.tcx();
- let mut span_to_caller_location = |mut span: Span| {
- // Remove `Inlined` marks as they pollute `expansion_cause`.
- while span.is_inlined() {
- span.remove_mark();
- }
+ let mut span_to_caller_location = |span: Span| {
let topmost = span.ctxt().outer_expn().expansion_cause().unwrap_or(span);
let caller = tcx.sess.source_map().lookup_char_pos(topmost.lo());
let const_loc = tcx.const_caller_location((
@@ -1631,7 +1596,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
bx = Bx::build(self.cx, llbb);
let llpersonality = self.cx.eh_personality();
- bx.cleanup_landing_pad(llpersonality);
+ bx.filter_landing_pad(llpersonality);
funclet = None;
}
@@ -1641,7 +1606,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
let (fn_abi, fn_ptr) = common::build_langcall(&bx, None, LangItem::PanicCannotUnwind);
let fn_ty = bx.fn_decl_backend_type(&fn_abi);
- let llret = bx.call(fn_ty, Some(&fn_abi), fn_ptr, &[], funclet.as_ref());
+ let llret = bx.call(fn_ty, None, Some(&fn_abi), fn_ptr, &[], funclet.as_ref());
bx.do_not_inline(llret);
bx.unreachable();
diff --git a/compiler/rustc_codegen_ssa/src/mir/debuginfo.rs b/compiler/rustc_codegen_ssa/src/mir/debuginfo.rs
index d049bafb8..bba2800fb 100644
--- a/compiler/rustc_codegen_ssa/src/mir/debuginfo.rs
+++ b/compiler/rustc_codegen_ssa/src/mir/debuginfo.rs
@@ -1,5 +1,5 @@
use crate::traits::*;
-use rustc_index::vec::IndexVec;
+use rustc_index::IndexVec;
use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags;
use rustc_middle::mir;
use rustc_middle::ty;
@@ -8,7 +8,7 @@ use rustc_middle::ty::layout::{HasTyCtxt, LayoutOf};
use rustc_session::config::DebugInfo;
use rustc_span::symbol::{kw, Symbol};
use rustc_span::{BytePos, Span};
-use rustc_target::abi::{Abi, FieldIdx, Size, VariantIdx};
+use rustc_target::abi::{Abi, FieldIdx, FieldsShape, Size, VariantIdx};
use super::operand::{OperandRef, OperandValue};
use super::place::PlaceRef;
@@ -41,6 +41,9 @@ pub struct PerLocalVarDebugInfo<'tcx, D> {
/// `.place.projection` from `mir::VarDebugInfo`.
pub projection: &'tcx ty::List<mir::PlaceElem<'tcx>>,
+
+ /// `references` from `mir::VarDebugInfo`.
+ pub references: u8,
}
#[derive(Clone, Copy, Debug)]
@@ -80,6 +83,7 @@ trait DebugInfoOffsetLocation<'tcx, Bx> {
fn deref(&self, bx: &mut Bx) -> Self;
fn layout(&self) -> TyAndLayout<'tcx>;
fn project_field(&self, bx: &mut Bx, field: FieldIdx) -> Self;
+ fn project_constant_index(&self, bx: &mut Bx, offset: u64) -> Self;
fn downcast(&self, bx: &mut Bx, variant: VariantIdx) -> Self;
}
@@ -98,6 +102,11 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> DebugInfoOffsetLocation<'tcx, Bx>
PlaceRef::project_field(*self, bx, field.index())
}
+ fn project_constant_index(&self, bx: &mut Bx, offset: u64) -> Self {
+ let lloffset = bx.cx().const_usize(offset);
+ self.project_index(bx, lloffset)
+ }
+
fn downcast(&self, bx: &mut Bx, variant: VariantIdx) -> Self {
self.project_downcast(bx, variant)
}
@@ -120,6 +129,10 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> DebugInfoOffsetLocation<'tcx, Bx>
self.field(bx.cx(), field.index())
}
+ fn project_constant_index(&self, bx: &mut Bx, index: u64) -> Self {
+ self.field(bx.cx(), index as usize)
+ }
+
fn downcast(&self, bx: &mut Bx, variant: VariantIdx) -> Self {
self.for_variant(bx.cx(), variant)
}
@@ -165,6 +178,18 @@ fn calculate_debuginfo_offset<
mir::ProjectionElem::Downcast(_, variant) => {
place = place.downcast(bx, variant);
}
+ mir::ProjectionElem::ConstantIndex {
+ offset: index,
+ min_length: _,
+ from_end: false,
+ } => {
+ let offset = indirect_offsets.last_mut().unwrap_or(&mut direct_offset);
+ let FieldsShape::Array { stride, count: _ } = place.layout().fields else {
+ span_bug!(var.source_info.span, "ConstantIndex on non-array type {:?}", place.layout())
+ };
+ *offset += stride * index;
+ place = place.project_constant_index(bx, index);
+ }
_ => {
// Sanity check for `can_use_in_debuginfo`.
debug_assert!(!elem.can_use_in_debuginfo());
@@ -293,6 +318,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
dbg_var,
fragment: None,
projection: ty::List::empty(),
+ references: 0,
})
}
} else {
@@ -358,55 +384,74 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
let vars = vars.iter().cloned().chain(fallback_var);
for var in vars {
- let Some(dbg_var) = var.dbg_var else { continue };
- let Some(dbg_loc) = self.dbg_loc(var.source_info) else { continue };
-
- let DebugInfoOffset { direct_offset, indirect_offsets, result: _ } =
- calculate_debuginfo_offset(bx, local, &var, base.layout);
-
- // When targeting MSVC, create extra allocas for arguments instead of pointing multiple
- // dbg_var_addr() calls into the same alloca with offsets. MSVC uses CodeView records
- // not DWARF and LLVM doesn't support translating the resulting
- // [DW_OP_deref, DW_OP_plus_uconst, offset, DW_OP_deref] debug info to CodeView.
- // Creating extra allocas on the stack makes the resulting debug info simple enough
- // that LLVM can generate correct CodeView records and thus the values appear in the
- // debugger. (#83709)
- let should_create_individual_allocas = bx.cx().sess().target.is_like_msvc
- && self.mir.local_kind(local) == mir::LocalKind::Arg
- // LLVM can handle simple things but anything more complex than just a direct
- // offset or one indirect offset of 0 is too complex for it to generate CV records
- // correctly.
- && (direct_offset != Size::ZERO
- || !matches!(&indirect_offsets[..], [Size::ZERO] | []));
-
- if should_create_individual_allocas {
- let DebugInfoOffset { direct_offset: _, indirect_offsets: _, result: place } =
- calculate_debuginfo_offset(bx, local, &var, base);
-
- // Create a variable which will be a pointer to the actual value
- let ptr_ty = bx
- .tcx()
- .mk_ptr(ty::TypeAndMut { mutbl: mir::Mutability::Mut, ty: place.layout.ty });
- let ptr_layout = bx.layout_of(ptr_ty);
- let alloca = PlaceRef::alloca(bx, ptr_layout);
- bx.set_var_name(alloca.llval, &(var.name.to_string() + ".dbg.spill"));
-
- // Write the pointer to the variable
- bx.store(place.llval, alloca.llval, alloca.align);
-
- // Point the debug info to `*alloca` for the current variable
- bx.dbg_var_addr(dbg_var, dbg_loc, alloca.llval, Size::ZERO, &[Size::ZERO], None);
- } else {
- bx.dbg_var_addr(
- dbg_var,
- dbg_loc,
- base.llval,
- direct_offset,
- &indirect_offsets,
- None,
- );
+ self.debug_introduce_local_as_var(bx, local, base, var);
+ }
+ }
+
+ fn debug_introduce_local_as_var(
+ &self,
+ bx: &mut Bx,
+ local: mir::Local,
+ mut base: PlaceRef<'tcx, Bx::Value>,
+ var: PerLocalVarDebugInfo<'tcx, Bx::DIVariable>,
+ ) {
+ let Some(dbg_var) = var.dbg_var else { return };
+ let Some(dbg_loc) = self.dbg_loc(var.source_info) else { return };
+
+ let DebugInfoOffset { mut direct_offset, indirect_offsets, result: _ } =
+ calculate_debuginfo_offset(bx, local, &var, base.layout);
+ let mut indirect_offsets = &indirect_offsets[..];
+
+ // When targeting MSVC, create extra allocas for arguments instead of pointing multiple
+ // dbg_var_addr() calls into the same alloca with offsets. MSVC uses CodeView records
+ // not DWARF and LLVM doesn't support translating the resulting
+ // [DW_OP_deref, DW_OP_plus_uconst, offset, DW_OP_deref] debug info to CodeView.
+ // Creating extra allocas on the stack makes the resulting debug info simple enough
+ // that LLVM can generate correct CodeView records and thus the values appear in the
+ // debugger. (#83709)
+ let should_create_individual_allocas = bx.cx().sess().target.is_like_msvc
+ && self.mir.local_kind(local) == mir::LocalKind::Arg
+ // LLVM can handle simple things but anything more complex than just a direct
+ // offset or one indirect offset of 0 is too complex for it to generate CV records
+ // correctly.
+ && (direct_offset != Size::ZERO || !matches!(indirect_offsets, [Size::ZERO] | []));
+
+ let create_alloca = |bx: &mut Bx, place: PlaceRef<'tcx, Bx::Value>, refcount| {
+ // Create a variable which will be a pointer to the actual value
+ let ptr_ty = bx
+ .tcx()
+ .mk_ptr(ty::TypeAndMut { mutbl: mir::Mutability::Mut, ty: place.layout.ty });
+ let ptr_layout = bx.layout_of(ptr_ty);
+ let alloca = PlaceRef::alloca(bx, ptr_layout);
+ bx.set_var_name(alloca.llval, &format!("{}.ref{}.dbg.spill", var.name, refcount));
+
+ // Write the pointer to the variable
+ bx.store(place.llval, alloca.llval, alloca.align);
+
+ // Point the debug info to `*alloca` for the current variable
+ alloca
+ };
+
+ if var.references > 0 {
+ base = calculate_debuginfo_offset(bx, local, &var, base).result;
+
+ // Point the debug info to `&...&base == alloca` for the current variable
+ for refcount in 0..var.references {
+ base = create_alloca(bx, base, refcount);
}
+
+ direct_offset = Size::ZERO;
+ indirect_offsets = &[];
+ } else if should_create_individual_allocas {
+ let place = calculate_debuginfo_offset(bx, local, &var, base).result;
+
+ // Point the debug info to `*alloca` for the current variable
+ base = create_alloca(bx, place, 0);
+ direct_offset = Size::ZERO;
+ indirect_offsets = &[Size::ZERO];
}
+
+ bx.dbg_var_addr(dbg_var, dbg_loc, base.llval, direct_offset, indirect_offsets, None);
}
pub fn debug_introduce_locals(&self, bx: &mut Bx) {
@@ -439,7 +484,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
};
let dbg_var = dbg_scope_and_span.map(|(dbg_scope, _, span)| {
- let (var_ty, var_kind) = match var.value {
+ let (mut var_ty, var_kind) = match var.value {
mir::VarDebugInfoContents::Place(place) => {
let var_ty = self.monomorphized_place_ty(place.as_ref());
let var_kind = if let Some(arg_index) = var.argument_index
@@ -476,6 +521,11 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
}
};
+ for _ in 0..var.references {
+ var_ty =
+ bx.tcx().mk_ptr(ty::TypeAndMut { mutbl: mir::Mutability::Mut, ty: var_ty });
+ }
+
self.cx.create_dbg_var(var.name, var_ty, dbg_scope, var_kind, span)
});
@@ -487,6 +537,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
dbg_var,
fragment: None,
projection: place.projection,
+ references: var.references,
});
}
mir::VarDebugInfoContents::Const(c) => {
@@ -494,6 +545,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
let Some(dbg_loc) = self.dbg_loc(var.source_info) else { continue };
if let Ok(operand) = self.eval_mir_constant_to_operand(bx, &c) {
+ self.set_debug_loc(bx, var.source_info);
let base = Self::spill_operand_to_stack(
&operand,
Some(var.name.to_string()),
@@ -539,6 +591,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
Some(fragment_start..fragment_start + fragment_layout.size)
},
projection: place.projection,
+ references: var.references,
});
}
}
diff --git a/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs b/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs
index 7af7fc92d..1479242f2 100644
--- a/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs
+++ b/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs
@@ -135,13 +135,6 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
.unwrap();
OperandRef::from_const(bx, value, ret_ty).immediate_or_packed_pair(bx)
}
- sym::offset => {
- let ty = substs.type_at(0);
- let layout = bx.layout_of(ty);
- let ptr = args[0].immediate();
- let offset = args[1].immediate();
- bx.inbounds_gep(bx.backend_type(layout), ptr, &[offset])
- }
sym::arith_offset => {
let ty = substs.type_at(0);
let layout = bx.layout_of(ty);
diff --git a/compiler/rustc_codegen_ssa/src/mir/mod.rs b/compiler/rustc_codegen_ssa/src/mir/mod.rs
index 3dadb33c9..1204c99e5 100644
--- a/compiler/rustc_codegen_ssa/src/mir/mod.rs
+++ b/compiler/rustc_codegen_ssa/src/mir/mod.rs
@@ -9,7 +9,7 @@ use rustc_target::abi::call::{FnAbi, PassMode};
use std::iter;
use rustc_index::bit_set::BitSet;
-use rustc_index::vec::IndexVec;
+use rustc_index::IndexVec;
use self::debuginfo::{FunctionDebugContext, PerLocalVarDebugInfo};
use self::place::PlaceRef;
@@ -111,7 +111,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
self.instance.subst_mir_and_normalize_erasing_regions(
self.cx.tcx(),
ty::ParamEnv::reveal_all(),
- value,
+ ty::EarlyBinder(value),
)
}
}
@@ -152,7 +152,7 @@ pub fn codegen_mir<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
cx: &'a Bx::CodegenCx,
instance: Instance<'tcx>,
) {
- assert!(!instance.substs.needs_infer());
+ assert!(!instance.substs.has_infer());
let llfn = cx.get_fn(instance);
@@ -304,7 +304,17 @@ fn arg_local_refs<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
bug!("spread argument isn't a tuple?!");
};
- let place = PlaceRef::alloca(bx, bx.layout_of(arg_ty));
+ let layout = bx.layout_of(arg_ty);
+
+ // FIXME: support unsized params in "rust-call" ABI
+ if layout.is_unsized() {
+ span_bug!(
+ arg_decl.source_info.span,
+ "\"rust-call\" ABI does not support unsized params",
+ );
+ }
+
+ let place = PlaceRef::alloca(bx, layout);
for i in 0..tupled_arg_tys.len() {
let arg = &fx.fn_abi.args[idx];
idx += 1;
diff --git a/compiler/rustc_codegen_ssa/src/mir/operand.rs b/compiler/rustc_codegen_ssa/src/mir/operand.rs
index b37797fef..2301c3ef1 100644
--- a/compiler/rustc_codegen_ssa/src/mir/operand.rs
+++ b/compiler/rustc_codegen_ssa/src/mir/operand.rs
@@ -2,6 +2,7 @@ use super::place::PlaceRef;
use super::{FunctionCx, LocalRef};
use crate::base;
+use crate::common::TypeKind;
use crate::glue;
use crate::traits::*;
use crate::MemFlags;
@@ -236,19 +237,47 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> {
};
match (&mut val, field.abi) {
- (OperandValue::Immediate(llval), _) => {
+ (
+ OperandValue::Immediate(llval),
+ Abi::Scalar(_) | Abi::ScalarPair(..) | Abi::Vector { .. },
+ ) => {
// Bools in union fields needs to be truncated.
*llval = bx.to_immediate(*llval, field);
// HACK(eddyb) have to bitcast pointers until LLVM removes pointee types.
- *llval = bx.bitcast(*llval, bx.cx().immediate_backend_type(field));
+ let ty = bx.cx().immediate_backend_type(field);
+ if bx.type_kind(ty) == TypeKind::Pointer {
+ *llval = bx.pointercast(*llval, ty);
+ }
}
(OperandValue::Pair(a, b), Abi::ScalarPair(a_abi, b_abi)) => {
// Bools in union fields needs to be truncated.
*a = bx.to_immediate_scalar(*a, a_abi);
*b = bx.to_immediate_scalar(*b, b_abi);
// HACK(eddyb) have to bitcast pointers until LLVM removes pointee types.
- *a = bx.bitcast(*a, bx.cx().scalar_pair_element_backend_type(field, 0, true));
- *b = bx.bitcast(*b, bx.cx().scalar_pair_element_backend_type(field, 1, true));
+ let a_ty = bx.cx().scalar_pair_element_backend_type(field, 0, true);
+ let b_ty = bx.cx().scalar_pair_element_backend_type(field, 1, true);
+ if bx.type_kind(a_ty) == TypeKind::Pointer {
+ *a = bx.pointercast(*a, a_ty);
+ }
+ if bx.type_kind(b_ty) == TypeKind::Pointer {
+ *b = bx.pointercast(*b, b_ty);
+ }
+ }
+ // Newtype vector of array, e.g. #[repr(simd)] struct S([i32; 4]);
+ (OperandValue::Immediate(llval), Abi::Aggregate { sized: true }) => {
+ assert!(matches!(self.layout.abi, Abi::Vector { .. }));
+
+ let llty = bx.cx().backend_type(self.layout);
+ let llfield_ty = bx.cx().backend_type(field);
+
+ // Can't bitcast an aggregate, so round trip through memory.
+ let lltemp = bx.alloca(llfield_ty, field.align.abi);
+ let llptr = bx.pointercast(lltemp, bx.cx().type_ptr_to(llty));
+ bx.store(*llval, llptr, field.align.abi);
+ *llval = bx.load(llfield_ty, lltemp, field.align.abi);
+ }
+ (OperandValue::Immediate(_), Abi::Uninhabited | Abi::Aggregate { sized: false }) => {
+ bug!()
}
(OperandValue::Pair(..), _) => bug!(),
(OperandValue::Ref(..), _) => bug!(),
@@ -373,8 +402,6 @@ impl<'a, 'tcx, V: CodegenObject> OperandValue<V> {
indirect_dest: PlaceRef<'tcx, V>,
) {
debug!("OperandRef::store_unsized: operand={:?}, indirect_dest={:?}", self, indirect_dest);
- let flags = MemFlags::empty();
-
// `indirect_dest` must have `*mut T` type. We extract `T` out of it.
let unsized_ty = indirect_dest
.layout
@@ -387,17 +414,23 @@ impl<'a, 'tcx, V: CodegenObject> OperandValue<V> {
bug!("store_unsized called with a sized value")
};
- // FIXME: choose an appropriate alignment, or use dynamic align somehow
- let max_align = Align::from_bits(128).unwrap();
- let min_align = Align::from_bits(8).unwrap();
-
- // Allocate an appropriate region on the stack, and copy the value into it
- let (llsize, _) = glue::size_and_align_of_dst(bx, unsized_ty, Some(llextra));
- let lldst = bx.byte_array_alloca(llsize, max_align);
- bx.memcpy(lldst, max_align, llptr, min_align, llsize, flags);
+ // Allocate an appropriate region on the stack, and copy the value into it. Since alloca
+ // doesn't support dynamic alignment, we allocate an extra align - 1 bytes, and align the
+ // pointer manually.
+ let (size, align) = glue::size_and_align_of_dst(bx, unsized_ty, Some(llextra));
+ let one = bx.const_usize(1);
+ let align_minus_1 = bx.sub(align, one);
+ let size_extra = bx.add(size, align_minus_1);
+ let min_align = Align::ONE;
+ let alloca = bx.byte_array_alloca(size_extra, min_align);
+ let address = bx.ptrtoint(alloca, bx.type_isize());
+ let neg_address = bx.neg(address);
+ let offset = bx.and(neg_address, align_minus_1);
+ let dst = bx.inbounds_gep(bx.type_i8(), alloca, &[offset]);
+ bx.memcpy(dst, min_align, llptr, min_align, size, MemFlags::empty());
// Store the allocated region and the extra to the indirect place.
- let indirect_operand = OperandValue::Pair(lldst, llextra);
+ let indirect_operand = OperandValue::Pair(dst, llextra);
indirect_operand.store(bx, indirect_dest);
}
}
diff --git a/compiler/rustc_codegen_ssa/src/mir/rvalue.rs b/compiler/rustc_codegen_ssa/src/mir/rvalue.rs
index d88226f5d..6e7065713 100644
--- a/compiler/rustc_codegen_ssa/src/mir/rvalue.rs
+++ b/compiler/rustc_codegen_ssa/src/mir/rvalue.rs
@@ -12,6 +12,7 @@ use rustc_middle::mir::Operand;
use rustc_middle::ty::cast::{CastTy, IntTy};
use rustc_middle::ty::layout::{HasTyCtxt, LayoutOf, TyAndLayout};
use rustc_middle::ty::{self, adjustment::PointerCast, Instance, Ty, TyCtxt};
+use rustc_session::config::OptLevel;
use rustc_span::source_map::{Span, DUMMY_SP};
use rustc_target::abi::{self, FIRST_VARIANT};
@@ -231,10 +232,16 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
(ScalarOrZst::Scalar(in_scalar), ScalarOrZst::Scalar(out_scalar))
if in_scalar.size(self.cx) == out_scalar.size(self.cx) =>
{
+ let operand_bty = bx.backend_type(operand.layout);
let cast_bty = bx.backend_type(cast);
- Some(OperandValue::Immediate(
- self.transmute_immediate(bx, imm, in_scalar, out_scalar, cast_bty),
- ))
+ Some(OperandValue::Immediate(self.transmute_immediate(
+ bx,
+ imm,
+ in_scalar,
+ operand_bty,
+ out_scalar,
+ cast_bty,
+ )))
}
_ => None,
}
@@ -250,11 +257,13 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
&& in_a.size(self.cx) == out_a.size(self.cx)
&& in_b.size(self.cx) == out_b.size(self.cx)
{
+ let in_a_ibty = bx.scalar_pair_element_backend_type(operand.layout, 0, false);
+ let in_b_ibty = bx.scalar_pair_element_backend_type(operand.layout, 1, false);
let out_a_ibty = bx.scalar_pair_element_backend_type(cast, 0, false);
let out_b_ibty = bx.scalar_pair_element_backend_type(cast, 1, false);
Some(OperandValue::Pair(
- self.transmute_immediate(bx, imm_a, in_a, out_a, out_a_ibty),
- self.transmute_immediate(bx, imm_b, in_b, out_b, out_b_ibty),
+ self.transmute_immediate(bx, imm_a, in_a, in_a_ibty, out_a, out_a_ibty),
+ self.transmute_immediate(bx, imm_b, in_b, in_b_ibty, out_b, out_b_ibty),
))
} else {
None
@@ -273,6 +282,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
bx: &mut Bx,
mut imm: Bx::Value,
from_scalar: abi::Scalar,
+ from_backend_ty: Bx::Type,
to_scalar: abi::Scalar,
to_backend_ty: Bx::Type,
) -> Bx::Value {
@@ -280,6 +290,13 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
use abi::Primitive::*;
imm = bx.from_immediate(imm);
+
+ // When scalars are passed by value, there's no metadata recording their
+ // valid ranges. For example, `char`s are passed as just `i32`, with no
+ // way for LLVM to know that they're 0x10FFFF at most. Thus we assume
+ // the range of the input value too, not just the output range.
+ self.assume_scalar_range(bx, imm, from_scalar, from_backend_ty);
+
imm = match (from_scalar.primitive(), to_scalar.primitive()) {
(Int(..) | F32 | F64, Int(..) | F32 | F64) => bx.bitcast(imm, to_backend_ty),
(Pointer(..), Pointer(..)) => bx.pointercast(imm, to_backend_ty),
@@ -294,10 +311,55 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
bx.bitcast(int_imm, to_backend_ty)
}
};
+ self.assume_scalar_range(bx, imm, to_scalar, to_backend_ty);
imm = bx.to_immediate_scalar(imm, to_scalar);
imm
}
+ fn assume_scalar_range(
+ &self,
+ bx: &mut Bx,
+ imm: Bx::Value,
+ scalar: abi::Scalar,
+ backend_ty: Bx::Type,
+ ) {
+ if matches!(self.cx.sess().opts.optimize, OptLevel::No | OptLevel::Less)
+ // For now, the critical niches are all over `Int`eger values.
+ // Should floating-point values or pointers ever get more complex
+ // niches, then this code will probably want to handle them too.
+ || !matches!(scalar.primitive(), abi::Primitive::Int(..))
+ || scalar.is_always_valid(self.cx)
+ {
+ return;
+ }
+
+ let abi::WrappingRange { start, end } = scalar.valid_range(self.cx);
+
+ if start <= end {
+ if start > 0 {
+ let low = bx.const_uint_big(backend_ty, start);
+ let cmp = bx.icmp(IntPredicate::IntUGE, imm, low);
+ bx.assume(cmp);
+ }
+
+ let type_max = scalar.size(self.cx).unsigned_int_max();
+ if end < type_max {
+ let high = bx.const_uint_big(backend_ty, end);
+ let cmp = bx.icmp(IntPredicate::IntULE, imm, high);
+ bx.assume(cmp);
+ }
+ } else {
+ let low = bx.const_uint_big(backend_ty, start);
+ let cmp_low = bx.icmp(IntPredicate::IntUGE, imm, low);
+
+ let high = bx.const_uint_big(backend_ty, end);
+ let cmp_high = bx.icmp(IntPredicate::IntULE, imm, high);
+
+ let or = bx.or(cmp_low, cmp_high);
+ bx.assume(or);
+ }
+ }
+
pub fn codegen_rvalue_unsized(
&mut self,
bx: &mut Bx,
@@ -604,13 +666,16 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
}
}
- mir::Rvalue::NullaryOp(null_op, ty) => {
+ mir::Rvalue::NullaryOp(ref null_op, ty) => {
let ty = self.monomorphize(ty);
assert!(bx.cx().type_is_sized(ty));
let layout = bx.cx().layout_of(ty);
let val = match null_op {
mir::NullOp::SizeOf => layout.size.bytes(),
mir::NullOp::AlignOf => layout.align.abi.bytes(),
+ mir::NullOp::OffsetOf(fields) => {
+ layout.offset_of_subfield(bx.cx(), fields.iter().map(|f| f.index())).bytes()
+ }
};
let val = bx.cx().const_usize(val);
let tcx = self.cx.tcx();
@@ -632,7 +697,12 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
let fn_ptr = bx.get_fn_addr(instance);
let fn_abi = bx.fn_abi_of_instance(instance, ty::List::empty());
let fn_ty = bx.fn_decl_backend_type(&fn_abi);
- bx.call(fn_ty, Some(fn_abi), fn_ptr, &[], None)
+ let fn_attrs = if bx.tcx().def_kind(instance.def_id()).has_codegen_attrs() {
+ Some(bx.tcx().codegen_fn_attrs(instance.def_id()))
+ } else {
+ None
+ };
+ bx.call(fn_ty, fn_attrs, Some(fn_abi), fn_ptr, &[], None)
} else {
bx.get_static(def_id)
};
@@ -754,8 +824,15 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
.builtin_deref(true)
.unwrap_or_else(|| bug!("deref of non-pointer {:?}", input_ty))
.ty;
- let llty = bx.cx().backend_type(bx.cx().layout_of(pointee_type));
- bx.inbounds_gep(llty, lhs, &[rhs])
+ let pointee_layout = bx.cx().layout_of(pointee_type);
+ if pointee_layout.is_zst() {
+ // `Offset` works in terms of the size of pointee,
+ // so offsetting a pointer to ZST is a noop.
+ lhs
+ } else {
+ let llty = bx.cx().backend_type(pointee_layout);
+ bx.inbounds_gep(llty, lhs, &[rhs])
+ }
}
mir::BinOp::Shl => common::build_unchecked_lshift(bx, lhs, rhs),
mir::BinOp::Shr => common::build_unchecked_rshift(bx, input_ty, lhs, rhs),