summaryrefslogtreecommitdiffstats
path: root/compiler/rustc_codegen_cranelift/src
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/rustc_codegen_cranelift/src')
-rw-r--r--compiler/rustc_codegen_cranelift/src/abi/comments.rs9
-rw-r--r--compiler/rustc_codegen_cranelift/src/abi/mod.rs29
-rw-r--r--compiler/rustc_codegen_cranelift/src/allocator.rs4
-rw-r--r--compiler/rustc_codegen_cranelift/src/base.rs30
-rw-r--r--compiler/rustc_codegen_cranelift/src/common.rs2
-rw-r--r--compiler/rustc_codegen_cranelift/src/constant.rs2
-rw-r--r--compiler/rustc_codegen_cranelift/src/debuginfo/line_info.rs2
-rw-r--r--compiler/rustc_codegen_cranelift/src/debuginfo/mod.rs2
-rw-r--r--compiler/rustc_codegen_cranelift/src/driver/jit.rs6
-rw-r--r--compiler/rustc_codegen_cranelift/src/driver/mod.rs10
-rw-r--r--compiler/rustc_codegen_cranelift/src/global_asm.rs2
-rw-r--r--compiler/rustc_codegen_cranelift/src/inline_asm.rs4
-rw-r--r--compiler/rustc_codegen_cranelift/src/intrinsics/llvm.rs20
-rw-r--r--compiler/rustc_codegen_cranelift/src/intrinsics/llvm_aarch64.rs4
-rw-r--r--compiler/rustc_codegen_cranelift/src/intrinsics/llvm_x86.rs278
-rw-r--r--compiler/rustc_codegen_cranelift/src/intrinsics/mod.rs45
-rw-r--r--compiler/rustc_codegen_cranelift/src/intrinsics/simd.rs58
-rw-r--r--compiler/rustc_codegen_cranelift/src/lib.rs9
-rw-r--r--compiler/rustc_codegen_cranelift/src/main_shim.rs6
-rw-r--r--compiler/rustc_codegen_cranelift/src/pretty_clif.rs6
-rw-r--r--compiler/rustc_codegen_cranelift/src/value_and_place.rs115
21 files changed, 471 insertions, 172 deletions
diff --git a/compiler/rustc_codegen_cranelift/src/abi/comments.rs b/compiler/rustc_codegen_cranelift/src/abi/comments.rs
index 364503fd3..ade6968de 100644
--- a/compiler/rustc_codegen_cranelift/src/abi/comments.rs
+++ b/compiler/rustc_codegen_cranelift/src/abi/comments.rs
@@ -80,14 +80,7 @@ pub(super) fn add_local_place_comments<'tcx>(
return;
}
let TyAndLayout { ty, layout } = place.layout();
- let rustc_target::abi::LayoutS {
- size,
- align,
- abi: _,
- variants: _,
- fields: _,
- largest_niche: _,
- } = layout.0.0;
+ let rustc_target::abi::LayoutS { size, align, .. } = layout.0.0;
let (kind, extra) = place.debug_comment();
diff --git a/compiler/rustc_codegen_cranelift/src/abi/mod.rs b/compiler/rustc_codegen_cranelift/src/abi/mod.rs
index 199fa6861..b7f56a298 100644
--- a/compiler/rustc_codegen_cranelift/src/abi/mod.rs
+++ b/compiler/rustc_codegen_cranelift/src/abi/mod.rs
@@ -48,7 +48,9 @@ pub(crate) fn conv_to_call_conv(sess: &Session, c: Conv, default_call_conv: Call
default_call_conv
}
- Conv::X86Intr => sess.fatal("x86-interrupt call conv not yet implemented"),
+ Conv::X86Intr | Conv::RiscvInterrupt { .. } => {
+ sess.fatal(format!("interrupt call conv {c:?} not yet implemented"))
+ }
Conv::ArmAapcs => sess.fatal("aapcs call conv not yet implemented"),
Conv::CCmseNonSecureCall => {
@@ -70,7 +72,7 @@ pub(crate) fn get_function_sig<'tcx>(
default_call_conv: CallConv,
inst: Instance<'tcx>,
) -> Signature {
- assert!(!inst.substs.has_infer());
+ assert!(!inst.args.has_infer());
clif_sig_from_fn_abi(
tcx,
default_call_conv,
@@ -377,16 +379,16 @@ pub(crate) fn codegen_terminator_call<'tcx>(
let ret_place = codegen_place(fx, destination);
// Handle special calls like intrinsics and empty drop glue.
- let instance = if let ty::FnDef(def_id, substs) = *func.layout().ty.kind() {
+ let instance = if let ty::FnDef(def_id, fn_args) = *func.layout().ty.kind() {
let instance =
- ty::Instance::expect_resolve(fx.tcx, ty::ParamEnv::reveal_all(), def_id, substs)
+ ty::Instance::expect_resolve(fx.tcx, ty::ParamEnv::reveal_all(), def_id, fn_args)
.polymorphize(fx.tcx);
if fx.tcx.symbol_name(instance).name.starts_with("llvm.") {
crate::intrinsics::codegen_llvm_intrinsic_call(
fx,
&fx.tcx.symbol_name(instance).name,
- substs,
+ fn_args,
args,
ret_place,
target,
@@ -445,9 +447,14 @@ pub(crate) fn codegen_terminator_call<'tcx>(
// Unpack arguments tuple for closures
let mut args = if fn_sig.abi() == Abi::RustCall {
- assert_eq!(args.len(), 2, "rust-call abi requires two arguments");
- let self_arg = codegen_call_argument_operand(fx, &args[0]);
- let pack_arg = codegen_call_argument_operand(fx, &args[1]);
+ let (self_arg, pack_arg) = match args {
+ [pack_arg] => (None, codegen_call_argument_operand(fx, pack_arg)),
+ [self_arg, pack_arg] => (
+ Some(codegen_call_argument_operand(fx, self_arg)),
+ codegen_call_argument_operand(fx, pack_arg),
+ ),
+ _ => panic!("rust-call abi requires one or two arguments"),
+ };
let tupled_arguments = match pack_arg.value.layout().ty.kind() {
ty::Tuple(ref tupled_arguments) => tupled_arguments,
@@ -455,7 +462,7 @@ pub(crate) fn codegen_terminator_call<'tcx>(
};
let mut args = Vec::with_capacity(1 + tupled_arguments.len());
- args.push(self_arg);
+ args.extend(self_arg);
for i in 0..tupled_arguments.len() {
args.push(CallArgument {
value: pack_arg.value.value_field(fx, FieldIdx::new(i)),
@@ -611,7 +618,7 @@ pub(crate) fn codegen_drop<'tcx>(
// `Instance::resolve_drop_in_place`?
let virtual_drop = Instance {
def: ty::InstanceDef::Virtual(drop_instance.def_id(), 0),
- substs: drop_instance.substs,
+ args: drop_instance.args,
};
let fn_abi =
RevealAllLayoutCx(fx.tcx).fn_abi_of_instance(virtual_drop, ty::List::empty());
@@ -648,7 +655,7 @@ pub(crate) fn codegen_drop<'tcx>(
let virtual_drop = Instance {
def: ty::InstanceDef::Virtual(drop_instance.def_id(), 0),
- substs: drop_instance.substs,
+ args: drop_instance.args,
};
let fn_abi =
RevealAllLayoutCx(fx.tcx).fn_abi_of_instance(virtual_drop, ty::List::empty());
diff --git a/compiler/rustc_codegen_cranelift/src/allocator.rs b/compiler/rustc_codegen_cranelift/src/allocator.rs
index e92280b26..4e4c595de 100644
--- a/compiler/rustc_codegen_cranelift/src/allocator.rs
+++ b/compiler/rustc_codegen_cranelift/src/allocator.rs
@@ -39,8 +39,8 @@ fn codegen_inner(
if kind == AllocatorKind::Default {
for method in ALLOCATOR_METHODS {
let mut arg_tys = Vec::with_capacity(method.inputs.len());
- for ty in method.inputs.iter() {
- match *ty {
+ for input in method.inputs.iter() {
+ match input.ty {
AllocatorTy::Layout => {
arg_tys.push(usize_ty); // size
arg_tys.push(usize_ty); // align
diff --git a/compiler/rustc_codegen_cranelift/src/base.rs b/compiler/rustc_codegen_cranelift/src/base.rs
index 334b2780b..522dd7189 100644
--- a/compiler/rustc_codegen_cranelift/src/base.rs
+++ b/compiler/rustc_codegen_cranelift/src/base.rs
@@ -7,6 +7,8 @@ use rustc_middle::ty::layout::FnAbiOf;
use rustc_middle::ty::print::with_no_trimmed_paths;
use cranelift_codegen::ir::UserFuncName;
+use cranelift_codegen::CodegenError;
+use cranelift_module::ModuleError;
use crate::constant::ConstantCx;
use crate::debuginfo::FunctionDebugContext;
@@ -28,7 +30,7 @@ pub(crate) fn codegen_fn<'tcx>(
module: &mut dyn Module,
instance: Instance<'tcx>,
) -> CodegenedFunction {
- debug_assert!(!instance.substs.has_infer());
+ debug_assert!(!instance.args.has_infer());
let symbol_name = tcx.symbol_name(instance).name.to_string();
let _timer = tcx.prof.generic_activity_with_arg("codegen fn", &*symbol_name);
@@ -172,7 +174,21 @@ pub(crate) fn compile_fn(
// Define function
cx.profiler.generic_activity("define function").run(|| {
context.want_disasm = cx.should_write_ir;
- module.define_function(codegened_func.func_id, context).unwrap();
+ match module.define_function(codegened_func.func_id, context) {
+ Ok(()) => {}
+ Err(ModuleError::Compilation(CodegenError::ImplLimitExceeded)) => {
+ let handler = rustc_session::EarlyErrorHandler::new(
+ rustc_session::config::ErrorOutputType::default(),
+ );
+ handler.early_error(format!(
+ "backend implementation limit exceeded while compiling {name}",
+ name = codegened_func.symbol_name
+ ));
+ }
+ Err(err) => {
+ panic!("Error while defining {name}: {err:?}", name = codegened_func.symbol_name);
+ }
+ }
});
if cx.should_write_ir {
@@ -356,7 +372,7 @@ fn codegen_fn_body(fx: &mut FunctionCx<'_, '_, '_>, start_block: Block) {
codegen_panic_inner(
fx,
- rustc_hir::LangItem::PanicBoundsCheck,
+ rustc_hir::LangItem::PanicMisalignedPointerDereference,
&[required, found, location],
source_info.span,
);
@@ -578,13 +594,13 @@ fn codegen_stmt<'tcx>(
let from_ty = fx.monomorphize(operand.ty(&fx.mir.local_decls, fx.tcx));
let to_layout = fx.layout_of(fx.monomorphize(to_ty));
match *from_ty.kind() {
- ty::FnDef(def_id, substs) => {
+ ty::FnDef(def_id, args) => {
let func_ref = fx.get_function_ref(
Instance::resolve_for_fn_ptr(
fx.tcx,
ParamEnv::reveal_all(),
def_id,
- substs,
+ args,
)
.unwrap()
.polymorphize(fx.tcx),
@@ -668,11 +684,11 @@ fn codegen_stmt<'tcx>(
) => {
let operand = codegen_operand(fx, operand);
match *operand.layout().ty.kind() {
- ty::Closure(def_id, substs) => {
+ ty::Closure(def_id, args) => {
let instance = Instance::resolve_closure(
fx.tcx,
def_id,
- substs,
+ args,
ty::ClosureKind::FnOnce,
)
.expect("failed to normalize and resolve closure during codegen")
diff --git a/compiler/rustc_codegen_cranelift/src/common.rs b/compiler/rustc_codegen_cranelift/src/common.rs
index 67ea20112..3081dcfa2 100644
--- a/compiler/rustc_codegen_cranelift/src/common.rs
+++ b/compiler/rustc_codegen_cranelift/src/common.rs
@@ -477,7 +477,7 @@ impl<'tcx> LayoutOfHelpers<'tcx> for RevealAllLayoutCx<'tcx> {
#[inline]
fn handle_layout_err(&self, err: LayoutError<'tcx>, span: Span, ty: Ty<'tcx>) -> ! {
- if let layout::LayoutError::SizeOverflow(_) = err {
+ if let LayoutError::SizeOverflow(_) | LayoutError::ReferencesError(_) = err {
self.0.sess.span_fatal(span, err.to_string())
} else {
span_bug!(span, "failed to get layout for `{}`: {}", ty, err)
diff --git a/compiler/rustc_codegen_cranelift/src/constant.rs b/compiler/rustc_codegen_cranelift/src/constant.rs
index 427340c33..c31535742 100644
--- a/compiler/rustc_codegen_cranelift/src/constant.rs
+++ b/compiler/rustc_codegen_cranelift/src/constant.rs
@@ -57,7 +57,7 @@ pub(crate) fn codegen_tls_ref<'tcx>(
let tls_ptr = if !def_id.is_local() && fx.tcx.needs_thread_local_shim(def_id) {
let instance = ty::Instance {
def: ty::InstanceDef::ThreadLocalShim(def_id),
- substs: ty::InternalSubsts::empty(),
+ args: ty::GenericArgs::empty(),
};
let func_ref = fx.get_function_ref(instance);
let call = fx.bcx.ins().call(func_ref, &[]);
diff --git a/compiler/rustc_codegen_cranelift/src/debuginfo/line_info.rs b/compiler/rustc_codegen_cranelift/src/debuginfo/line_info.rs
index 1b454b666..50bc7a127 100644
--- a/compiler/rustc_codegen_cranelift/src/debuginfo/line_info.rs
+++ b/compiler/rustc_codegen_cranelift/src/debuginfo/line_info.rs
@@ -165,7 +165,7 @@ impl FunctionDebugContext {
for &MachSrcLoc { start, end, loc } in mcr.buffer.get_srclocs_sorted() {
debug_context.dwarf.unit.line_program.row().address_offset = u64::from(start);
if !loc.is_default() {
- let source_loc = *self.source_loc_set.get_index(loc.bits() as usize).unwrap();
+ let source_loc = self.source_loc_set[loc.bits() as usize];
create_row_for_span(debug_context, source_loc);
} else {
create_row_for_span(debug_context, self.function_source_loc);
diff --git a/compiler/rustc_codegen_cranelift/src/debuginfo/mod.rs b/compiler/rustc_codegen_cranelift/src/debuginfo/mod.rs
index 3a7421d8b..8a4b1cccf 100644
--- a/compiler/rustc_codegen_cranelift/src/debuginfo/mod.rs
+++ b/compiler/rustc_codegen_cranelift/src/debuginfo/mod.rs
@@ -38,7 +38,7 @@ pub(crate) struct DebugContext {
pub(crate) struct FunctionDebugContext {
entry_id: UnitEntryId,
function_source_loc: (FileId, u64, u64),
- source_loc_set: indexmap::IndexSet<(FileId, u64, u64)>,
+ source_loc_set: IndexSet<(FileId, u64, u64)>,
}
impl DebugContext {
diff --git a/compiler/rustc_codegen_cranelift/src/driver/jit.rs b/compiler/rustc_codegen_cranelift/src/driver/jit.rs
index 41e24acef..1c606494f 100644
--- a/compiler/rustc_codegen_cranelift/src/driver/jit.rs
+++ b/compiler/rustc_codegen_cranelift/src/driver/jit.rs
@@ -98,7 +98,7 @@ pub(crate) fn run_jit(tcx: TyCtxt<'_>, backend_config: BackendConfig) -> ! {
tcx.sess.fatal("JIT mode doesn't work with `cargo check`");
}
- if !tcx.sess.crate_types().contains(&rustc_session::config::CrateType::Executable) {
+ if !tcx.crate_types().contains(&rustc_session::config::CrateType::Executable) {
tcx.sess.fatal("can't jit non-executable crate");
}
@@ -114,9 +114,9 @@ pub(crate) fn run_jit(tcx: TyCtxt<'_>, backend_config: BackendConfig) -> ! {
.iter()
.map(|cgu| cgu.items_in_deterministic_order(tcx).into_iter())
.flatten()
- .collect::<FxHashMap<_, (_, _)>>()
+ .collect::<FxHashMap<_, _>>()
.into_iter()
- .collect::<Vec<(_, (_, _))>>();
+ .collect::<Vec<(_, _)>>();
tcx.sess.time("codegen mono items", || {
super::predefine_mono_items(tcx, &mut jit_module, &mono_items);
diff --git a/compiler/rustc_codegen_cranelift/src/driver/mod.rs b/compiler/rustc_codegen_cranelift/src/driver/mod.rs
index 5c52c9c18..12e90b584 100644
--- a/compiler/rustc_codegen_cranelift/src/driver/mod.rs
+++ b/compiler/rustc_codegen_cranelift/src/driver/mod.rs
@@ -5,7 +5,7 @@
//! [`codegen_static`]: crate::constant::codegen_static
use rustc_data_structures::profiling::SelfProfilerRef;
-use rustc_middle::mir::mono::{Linkage as RLinkage, MonoItem, Visibility};
+use rustc_middle::mir::mono::{MonoItem, MonoItemData};
use crate::prelude::*;
@@ -16,11 +16,11 @@ pub(crate) mod jit;
fn predefine_mono_items<'tcx>(
tcx: TyCtxt<'tcx>,
module: &mut dyn Module,
- mono_items: &[(MonoItem<'tcx>, (RLinkage, Visibility))],
+ mono_items: &[(MonoItem<'tcx>, MonoItemData)],
) {
tcx.prof.generic_activity("predefine functions").run(|| {
let is_compiler_builtins = tcx.is_compiler_builtins(LOCAL_CRATE);
- for &(mono_item, (linkage, visibility)) in mono_items {
+ for &(mono_item, data) in mono_items {
match mono_item {
MonoItem::Fn(instance) => {
let name = tcx.symbol_name(instance).name;
@@ -29,8 +29,8 @@ fn predefine_mono_items<'tcx>(
get_function_sig(tcx, module.target_config().default_call_conv, instance);
let linkage = crate::linkage::get_clif_linkage(
mono_item,
- linkage,
- visibility,
+ data.linkage,
+ data.visibility,
is_compiler_builtins,
);
module.declare_function(name, linkage, &sig).unwrap();
diff --git a/compiler/rustc_codegen_cranelift/src/global_asm.rs b/compiler/rustc_codegen_cranelift/src/global_asm.rs
index 63a1f6959..baadd7a9e 100644
--- a/compiler/rustc_codegen_cranelift/src/global_asm.rs
+++ b/compiler/rustc_codegen_cranelift/src/global_asm.rs
@@ -42,7 +42,7 @@ pub(crate) fn codegen_global_asm_item(tcx: TyCtxt<'_>, global_asm: &mut String,
InlineAsmOperand::SymFn { anon_const } => {
let ty = tcx.typeck_body(anon_const.body).node_type(anon_const.hir_id);
let instance = match ty.kind() {
- &ty::FnDef(def_id, substs) => Instance::new(def_id, substs),
+ &ty::FnDef(def_id, args) => Instance::new(def_id, args),
_ => span_bug!(op_sp, "asm sym is not a function"),
};
let symbol = tcx.symbol_name(instance);
diff --git a/compiler/rustc_codegen_cranelift/src/inline_asm.rs b/compiler/rustc_codegen_cranelift/src/inline_asm.rs
index 3ba530c04..518e3da07 100644
--- a/compiler/rustc_codegen_cranelift/src/inline_asm.rs
+++ b/compiler/rustc_codegen_cranelift/src/inline_asm.rs
@@ -254,12 +254,12 @@ pub(crate) fn codegen_inline_asm<'tcx>(
}
InlineAsmOperand::SymFn { ref value } => {
let literal = fx.monomorphize(value.literal);
- if let ty::FnDef(def_id, substs) = *literal.ty().kind() {
+ if let ty::FnDef(def_id, args) = *literal.ty().kind() {
let instance = ty::Instance::resolve_for_fn_ptr(
fx.tcx,
ty::ParamEnv::reveal_all(),
def_id,
- substs,
+ args,
)
.unwrap();
let symbol = fx.tcx.symbol_name(instance);
diff --git a/compiler/rustc_codegen_cranelift/src/intrinsics/llvm.rs b/compiler/rustc_codegen_cranelift/src/intrinsics/llvm.rs
index f67fdb592..63b5402f2 100644
--- a/compiler/rustc_codegen_cranelift/src/intrinsics/llvm.rs
+++ b/compiler/rustc_codegen_cranelift/src/intrinsics/llvm.rs
@@ -3,23 +3,35 @@
use crate::intrinsics::*;
use crate::prelude::*;
-use rustc_middle::ty::subst::SubstsRef;
+use rustc_middle::ty::GenericArgsRef;
pub(crate) fn codegen_llvm_intrinsic_call<'tcx>(
fx: &mut FunctionCx<'_, '_, 'tcx>,
intrinsic: &str,
- substs: SubstsRef<'tcx>,
+ generic_args: GenericArgsRef<'tcx>,
args: &[mir::Operand<'tcx>],
ret: CPlace<'tcx>,
target: Option<BasicBlock>,
) {
if intrinsic.starts_with("llvm.aarch64") {
return llvm_aarch64::codegen_aarch64_llvm_intrinsic_call(
- fx, intrinsic, substs, args, ret, target,
+ fx,
+ intrinsic,
+ generic_args,
+ args,
+ ret,
+ target,
);
}
if intrinsic.starts_with("llvm.x86") {
- return llvm_x86::codegen_x86_llvm_intrinsic_call(fx, intrinsic, substs, args, ret, target);
+ return llvm_x86::codegen_x86_llvm_intrinsic_call(
+ fx,
+ intrinsic,
+ generic_args,
+ args,
+ ret,
+ target,
+ );
}
match intrinsic {
diff --git a/compiler/rustc_codegen_cranelift/src/intrinsics/llvm_aarch64.rs b/compiler/rustc_codegen_cranelift/src/intrinsics/llvm_aarch64.rs
index 33b2f4702..c20a99159 100644
--- a/compiler/rustc_codegen_cranelift/src/intrinsics/llvm_aarch64.rs
+++ b/compiler/rustc_codegen_cranelift/src/intrinsics/llvm_aarch64.rs
@@ -3,12 +3,12 @@
use crate::intrinsics::*;
use crate::prelude::*;
-use rustc_middle::ty::subst::SubstsRef;
+use rustc_middle::ty::GenericArgsRef;
pub(crate) fn codegen_aarch64_llvm_intrinsic_call<'tcx>(
fx: &mut FunctionCx<'_, '_, 'tcx>,
intrinsic: &str,
- _substs: SubstsRef<'tcx>,
+ _args: GenericArgsRef<'tcx>,
args: &[mir::Operand<'tcx>],
ret: CPlace<'tcx>,
target: Option<BasicBlock>,
diff --git a/compiler/rustc_codegen_cranelift/src/intrinsics/llvm_x86.rs b/compiler/rustc_codegen_cranelift/src/intrinsics/llvm_x86.rs
index 24ad0083a..fdd27a454 100644
--- a/compiler/rustc_codegen_cranelift/src/intrinsics/llvm_x86.rs
+++ b/compiler/rustc_codegen_cranelift/src/intrinsics/llvm_x86.rs
@@ -3,12 +3,12 @@
use crate::intrinsics::*;
use crate::prelude::*;
-use rustc_middle::ty::subst::SubstsRef;
+use rustc_middle::ty::GenericArgsRef;
pub(crate) fn codegen_x86_llvm_intrinsic_call<'tcx>(
fx: &mut FunctionCx<'_, '_, 'tcx>,
intrinsic: &str,
- _substs: SubstsRef<'tcx>,
+ _args: GenericArgsRef<'tcx>,
args: &[mir::Operand<'tcx>],
ret: CPlace<'tcx>,
target: Option<BasicBlock>,
@@ -18,6 +18,20 @@ pub(crate) fn codegen_x86_llvm_intrinsic_call<'tcx>(
// Spin loop hint
}
+ // Used by is_x86_feature_detected!();
+ "llvm.x86.xgetbv" => {
+ // FIXME use the actual xgetbv instruction
+ intrinsic_args!(fx, args => (v); intrinsic);
+
+ let v = v.load_scalar(fx);
+
+ // As of writing on XCR0 exists
+ fx.bcx.ins().trapnz(v, TrapCode::UnreachableCodeReached);
+
+ let res = fx.bcx.ins().iconst(types::I64, 1 /* bit 0 must be set */);
+ ret.write_cvalue(fx, CValue::by_val(res, fx.layout_of(fx.tcx.types.i64)));
+ }
+
// Used by `_mm_movemask_epi8` and `_mm256_movemask_epi8`
"llvm.x86.sse2.pmovmskb.128"
| "llvm.x86.avx2.pmovmskb"
@@ -53,7 +67,7 @@ pub(crate) fn codegen_x86_llvm_intrinsic_call<'tcx>(
let res = CValue::by_val(res, fx.layout_of(fx.tcx.types.i32));
ret.write_cvalue(fx, res);
}
- "llvm.x86.sse2.cmp.ps" | "llvm.x86.sse2.cmp.pd" => {
+ "llvm.x86.sse.cmp.ps" | "llvm.x86.sse2.cmp.pd" => {
let (x, y, kind) = match args {
[x, y, kind] => (x, y, kind),
_ => bug!("wrong number of args for intrinsic {intrinsic}"),
@@ -66,18 +80,95 @@ pub(crate) fn codegen_x86_llvm_intrinsic_call<'tcx>(
let flt_cc = match kind
.try_to_bits(Size::from_bytes(1))
.unwrap_or_else(|| panic!("kind not scalar: {:?}", kind))
+ .try_into()
+ .unwrap()
{
- 0 => FloatCC::Equal,
- 1 => FloatCC::LessThan,
- 2 => FloatCC::LessThanOrEqual,
- 7 => FloatCC::Ordered,
- 3 => FloatCC::Unordered,
- 4 => FloatCC::NotEqual,
- 5 => FloatCC::UnorderedOrGreaterThanOrEqual,
- 6 => FloatCC::UnorderedOrGreaterThan,
+ _CMP_EQ_OQ | _CMP_EQ_OS => FloatCC::Equal,
+ _CMP_LT_OS | _CMP_LT_OQ => FloatCC::LessThan,
+ _CMP_LE_OS | _CMP_LE_OQ => FloatCC::LessThanOrEqual,
+ _CMP_UNORD_Q | _CMP_UNORD_S => FloatCC::Unordered,
+ _CMP_NEQ_UQ | _CMP_NEQ_US => FloatCC::NotEqual,
+ _CMP_NLT_US | _CMP_NLT_UQ => FloatCC::UnorderedOrGreaterThanOrEqual,
+ _CMP_NLE_US | _CMP_NLE_UQ => FloatCC::UnorderedOrGreaterThan,
+ _CMP_ORD_Q | _CMP_ORD_S => FloatCC::Ordered,
+ _CMP_EQ_UQ | _CMP_EQ_US => FloatCC::UnorderedOrEqual,
+ _CMP_NGE_US | _CMP_NGE_UQ => FloatCC::UnorderedOrLessThan,
+ _CMP_NGT_US | _CMP_NGT_UQ => FloatCC::UnorderedOrLessThanOrEqual,
+ _CMP_FALSE_OQ | _CMP_FALSE_OS => todo!(),
+ _CMP_NEQ_OQ | _CMP_NEQ_OS => FloatCC::OrderedNotEqual,
+ _CMP_GE_OS | _CMP_GE_OQ => FloatCC::GreaterThanOrEqual,
+ _CMP_GT_OS | _CMP_GT_OQ => FloatCC::GreaterThan,
+ _CMP_TRUE_UQ | _CMP_TRUE_US => todo!(),
+
kind => unreachable!("kind {:?}", kind),
};
+ // Copied from stdarch
+ /// Equal (ordered, non-signaling)
+ const _CMP_EQ_OQ: i32 = 0x00;
+ /// Less-than (ordered, signaling)
+ const _CMP_LT_OS: i32 = 0x01;
+ /// Less-than-or-equal (ordered, signaling)
+ const _CMP_LE_OS: i32 = 0x02;
+ /// Unordered (non-signaling)
+ const _CMP_UNORD_Q: i32 = 0x03;
+ /// Not-equal (unordered, non-signaling)
+ const _CMP_NEQ_UQ: i32 = 0x04;
+ /// Not-less-than (unordered, signaling)
+ const _CMP_NLT_US: i32 = 0x05;
+ /// Not-less-than-or-equal (unordered, signaling)
+ const _CMP_NLE_US: i32 = 0x06;
+ /// Ordered (non-signaling)
+ const _CMP_ORD_Q: i32 = 0x07;
+ /// Equal (unordered, non-signaling)
+ const _CMP_EQ_UQ: i32 = 0x08;
+ /// Not-greater-than-or-equal (unordered, signaling)
+ const _CMP_NGE_US: i32 = 0x09;
+ /// Not-greater-than (unordered, signaling)
+ const _CMP_NGT_US: i32 = 0x0a;
+ /// False (ordered, non-signaling)
+ const _CMP_FALSE_OQ: i32 = 0x0b;
+ /// Not-equal (ordered, non-signaling)
+ const _CMP_NEQ_OQ: i32 = 0x0c;
+ /// Greater-than-or-equal (ordered, signaling)
+ const _CMP_GE_OS: i32 = 0x0d;
+ /// Greater-than (ordered, signaling)
+ const _CMP_GT_OS: i32 = 0x0e;
+ /// True (unordered, non-signaling)
+ const _CMP_TRUE_UQ: i32 = 0x0f;
+ /// Equal (ordered, signaling)
+ const _CMP_EQ_OS: i32 = 0x10;
+ /// Less-than (ordered, non-signaling)
+ const _CMP_LT_OQ: i32 = 0x11;
+ /// Less-than-or-equal (ordered, non-signaling)
+ const _CMP_LE_OQ: i32 = 0x12;
+ /// Unordered (signaling)
+ const _CMP_UNORD_S: i32 = 0x13;
+ /// Not-equal (unordered, signaling)
+ const _CMP_NEQ_US: i32 = 0x14;
+ /// Not-less-than (unordered, non-signaling)
+ const _CMP_NLT_UQ: i32 = 0x15;
+ /// Not-less-than-or-equal (unordered, non-signaling)
+ const _CMP_NLE_UQ: i32 = 0x16;
+ /// Ordered (signaling)
+ const _CMP_ORD_S: i32 = 0x17;
+ /// Equal (unordered, signaling)
+ const _CMP_EQ_US: i32 = 0x18;
+ /// Not-greater-than-or-equal (unordered, non-signaling)
+ const _CMP_NGE_UQ: i32 = 0x19;
+ /// Not-greater-than (unordered, non-signaling)
+ const _CMP_NGT_UQ: i32 = 0x1a;
+ /// False (ordered, signaling)
+ const _CMP_FALSE_OS: i32 = 0x1b;
+ /// Not-equal (ordered, signaling)
+ const _CMP_NEQ_OS: i32 = 0x1c;
+ /// Greater-than-or-equal (ordered, non-signaling)
+ const _CMP_GE_OQ: i32 = 0x1d;
+ /// Greater-than (ordered, non-signaling)
+ const _CMP_GT_OQ: i32 = 0x1e;
+ /// True (unordered, signaling)
+ const _CMP_TRUE_US: i32 = 0x1f;
+
simd_pair_for_each_lane(fx, x, y, ret, &|fx, lane_ty, res_lane_ty, x_lane, y_lane| {
let res_lane = match lane_ty.kind() {
ty::Float(_) => fx.bcx.ins().fcmp(flt_cc, x_lane, y_lane),
@@ -103,6 +194,23 @@ pub(crate) fn codegen_x86_llvm_intrinsic_call<'tcx>(
_ => fx.bcx.ins().iconst(types::I32, 0),
});
}
+ "llvm.x86.sse2.psrai.d" => {
+ let (a, imm8) = match args {
+ [a, imm8] => (a, imm8),
+ _ => bug!("wrong number of args for intrinsic {intrinsic}"),
+ };
+ let a = codegen_operand(fx, a);
+ let imm8 = crate::constant::mir_operand_get_const_val(fx, imm8)
+ .expect("llvm.x86.sse2.psrai.d imm8 not const");
+
+ simd_for_each_lane(fx, a, ret, &|fx, _lane_ty, _res_lane_ty, lane| match imm8
+ .try_to_bits(Size::from_bytes(4))
+ .unwrap_or_else(|| panic!("imm8 not scalar: {:?}", imm8))
+ {
+ imm8 if imm8 < 32 => fx.bcx.ins().sshr_imm(lane, i64::from(imm8 as u8)),
+ _ => fx.bcx.ins().iconst(types::I32, 0),
+ });
+ }
"llvm.x86.sse2.pslli.d" => {
let (a, imm8) = match args {
[a, imm8] => (a, imm8),
@@ -137,6 +245,23 @@ pub(crate) fn codegen_x86_llvm_intrinsic_call<'tcx>(
_ => fx.bcx.ins().iconst(types::I32, 0),
});
}
+ "llvm.x86.sse2.psrai.w" => {
+ let (a, imm8) = match args {
+ [a, imm8] => (a, imm8),
+ _ => bug!("wrong number of args for intrinsic {intrinsic}"),
+ };
+ let a = codegen_operand(fx, a);
+ let imm8 = crate::constant::mir_operand_get_const_val(fx, imm8)
+ .expect("llvm.x86.sse2.psrai.d imm8 not const");
+
+ simd_for_each_lane(fx, a, ret, &|fx, _lane_ty, _res_lane_ty, lane| match imm8
+ .try_to_bits(Size::from_bytes(4))
+ .unwrap_or_else(|| panic!("imm8 not scalar: {:?}", imm8))
+ {
+ imm8 if imm8 < 16 => fx.bcx.ins().sshr_imm(lane, i64::from(imm8 as u8)),
+ _ => fx.bcx.ins().iconst(types::I32, 0),
+ });
+ }
"llvm.x86.sse2.pslli.w" => {
let (a, imm8) = match args {
[a, imm8] => (a, imm8),
@@ -171,6 +296,57 @@ pub(crate) fn codegen_x86_llvm_intrinsic_call<'tcx>(
_ => fx.bcx.ins().iconst(types::I32, 0),
});
}
+ "llvm.x86.avx.psrai.d" => {
+ let (a, imm8) = match args {
+ [a, imm8] => (a, imm8),
+ _ => bug!("wrong number of args for intrinsic {intrinsic}"),
+ };
+ let a = codegen_operand(fx, a);
+ let imm8 = crate::constant::mir_operand_get_const_val(fx, imm8)
+ .expect("llvm.x86.avx.psrai.d imm8 not const");
+
+ simd_for_each_lane(fx, a, ret, &|fx, _lane_ty, _res_lane_ty, lane| match imm8
+ .try_to_bits(Size::from_bytes(4))
+ .unwrap_or_else(|| panic!("imm8 not scalar: {:?}", imm8))
+ {
+ imm8 if imm8 < 32 => fx.bcx.ins().sshr_imm(lane, i64::from(imm8 as u8)),
+ _ => fx.bcx.ins().iconst(types::I32, 0),
+ });
+ }
+ "llvm.x86.sse2.psrli.q" => {
+ let (a, imm8) = match args {
+ [a, imm8] => (a, imm8),
+ _ => bug!("wrong number of args for intrinsic {intrinsic}"),
+ };
+ let a = codegen_operand(fx, a);
+ let imm8 = crate::constant::mir_operand_get_const_val(fx, imm8)
+ .expect("llvm.x86.avx.psrli.q imm8 not const");
+
+ simd_for_each_lane(fx, a, ret, &|fx, _lane_ty, _res_lane_ty, lane| match imm8
+ .try_to_bits(Size::from_bytes(4))
+ .unwrap_or_else(|| panic!("imm8 not scalar: {:?}", imm8))
+ {
+ imm8 if imm8 < 64 => fx.bcx.ins().ushr_imm(lane, i64::from(imm8 as u8)),
+ _ => fx.bcx.ins().iconst(types::I32, 0),
+ });
+ }
+ "llvm.x86.sse2.pslli.q" => {
+ let (a, imm8) = match args {
+ [a, imm8] => (a, imm8),
+ _ => bug!("wrong number of args for intrinsic {intrinsic}"),
+ };
+ let a = codegen_operand(fx, a);
+ let imm8 = crate::constant::mir_operand_get_const_val(fx, imm8)
+ .expect("llvm.x86.avx.pslli.q imm8 not const");
+
+ simd_for_each_lane(fx, a, ret, &|fx, _lane_ty, _res_lane_ty, lane| match imm8
+ .try_to_bits(Size::from_bytes(4))
+ .unwrap_or_else(|| panic!("imm8 not scalar: {:?}", imm8))
+ {
+ imm8 if imm8 < 64 => fx.bcx.ins().ishl_imm(lane, i64::from(imm8 as u8)),
+ _ => fx.bcx.ins().iconst(types::I32, 0),
+ });
+ }
"llvm.x86.avx.pslli.d" => {
let (a, imm8) = match args {
[a, imm8] => (a, imm8),
@@ -205,6 +381,23 @@ pub(crate) fn codegen_x86_llvm_intrinsic_call<'tcx>(
_ => fx.bcx.ins().iconst(types::I32, 0),
});
}
+ "llvm.x86.avx2.psrai.w" => {
+ let (a, imm8) = match args {
+ [a, imm8] => (a, imm8),
+ _ => bug!("wrong number of args for intrinsic {intrinsic}"),
+ };
+ let a = codegen_operand(fx, a);
+ let imm8 = crate::constant::mir_operand_get_const_val(fx, imm8)
+ .expect("llvm.x86.avx.psrai.w imm8 not const");
+
+ simd_for_each_lane(fx, a, ret, &|fx, _lane_ty, _res_lane_ty, lane| match imm8
+ .try_to_bits(Size::from_bytes(4))
+ .unwrap_or_else(|| panic!("imm8 not scalar: {:?}", imm8))
+ {
+ imm8 if imm8 < 16 => fx.bcx.ins().sshr_imm(lane, i64::from(imm8 as u8)),
+ _ => fx.bcx.ins().iconst(types::I32, 0),
+ });
+ }
"llvm.x86.avx2.pslli.w" => {
let (a, imm8) = match args {
[a, imm8] => (a, imm8),
@@ -313,7 +506,7 @@ pub(crate) fn codegen_x86_llvm_intrinsic_call<'tcx>(
ret.place_lane(fx, 2).to_ptr().store(fx, res_2, MemFlags::trusted());
ret.place_lane(fx, 3).to_ptr().store(fx, res_3, MemFlags::trusted());
}
- "llvm.x86.sse2.storeu.dq" => {
+ "llvm.x86.sse2.storeu.dq" | "llvm.x86.sse2.storeu.pd" => {
intrinsic_args!(fx, args => (mem_addr, a); intrinsic);
let mem_addr = mem_addr.load_scalar(fx);
@@ -321,17 +514,45 @@ pub(crate) fn codegen_x86_llvm_intrinsic_call<'tcx>(
let dest = CPlace::for_ptr(Pointer::new(mem_addr), a.layout());
dest.write_cvalue(fx, a);
}
- "llvm.x86.addcarry.64" => {
+ "llvm.x86.ssse3.pabs.b.128" | "llvm.x86.ssse3.pabs.w.128" | "llvm.x86.ssse3.pabs.d.128" => {
+ let a = match args {
+ [a] => a,
+ _ => bug!("wrong number of args for intrinsic {intrinsic}"),
+ };
+ let a = codegen_operand(fx, a);
+
+ simd_for_each_lane(fx, a, ret, &|fx, _lane_ty, _res_lane_ty, lane| {
+ fx.bcx.ins().iabs(lane)
+ });
+ }
+ "llvm.x86.addcarry.32" | "llvm.x86.addcarry.64" => {
intrinsic_args!(fx, args => (c_in, a, b); intrinsic);
let c_in = c_in.load_scalar(fx);
- llvm_add_sub(fx, BinOp::Add, ret, c_in, a, b);
+ let (cb_out, c) = llvm_add_sub(fx, BinOp::Add, c_in, a, b);
+
+ let layout = fx.layout_of(Ty::new_tup(fx.tcx, &[fx.tcx.types.u8, a.layout().ty]));
+ let val = CValue::by_val_pair(cb_out, c, layout);
+ ret.write_cvalue(fx, val);
}
- "llvm.x86.subborrow.64" => {
+ "llvm.x86.addcarryx.u32" | "llvm.x86.addcarryx.u64" => {
+ intrinsic_args!(fx, args => (c_in, a, b, out); intrinsic);
+ let c_in = c_in.load_scalar(fx);
+
+ let (cb_out, c) = llvm_add_sub(fx, BinOp::Add, c_in, a, b);
+
+ Pointer::new(out.load_scalar(fx)).store(fx, c, MemFlags::trusted());
+ ret.write_cvalue(fx, CValue::by_val(cb_out, fx.layout_of(fx.tcx.types.u8)));
+ }
+ "llvm.x86.subborrow.32" | "llvm.x86.subborrow.64" => {
intrinsic_args!(fx, args => (b_in, a, b); intrinsic);
let b_in = b_in.load_scalar(fx);
- llvm_add_sub(fx, BinOp::Sub, ret, b_in, a, b);
+ let (cb_out, c) = llvm_add_sub(fx, BinOp::Sub, b_in, a, b);
+
+ let layout = fx.layout_of(Ty::new_tup(fx.tcx, &[fx.tcx.types.u8, a.layout().ty]));
+ let val = CValue::by_val_pair(cb_out, c, layout);
+ ret.write_cvalue(fx, val);
}
_ => {
fx.tcx
@@ -356,21 +577,11 @@ pub(crate) fn codegen_x86_llvm_intrinsic_call<'tcx>(
fn llvm_add_sub<'tcx>(
fx: &mut FunctionCx<'_, '_, 'tcx>,
bin_op: BinOp,
- ret: CPlace<'tcx>,
cb_in: Value,
a: CValue<'tcx>,
b: CValue<'tcx>,
-) {
- assert_eq!(
- a.layout().ty,
- fx.tcx.types.u64,
- "llvm.x86.addcarry.64/llvm.x86.subborrow.64 second operand must be u64"
- );
- assert_eq!(
- b.layout().ty,
- fx.tcx.types.u64,
- "llvm.x86.addcarry.64/llvm.x86.subborrow.64 third operand must be u64"
- );
+) -> (Value, Value) {
+ assert_eq!(a.layout().ty, b.layout().ty);
// c + carry -> c + first intermediate carry or borrow respectively
let int0 = crate::num::codegen_checked_int_binop(fx, bin_op, a, b);
@@ -378,15 +589,14 @@ fn llvm_add_sub<'tcx>(
let cb0 = int0.value_field(fx, FieldIdx::new(1)).load_scalar(fx);
// c + carry -> c + second intermediate carry or borrow respectively
- let cb_in_as_u64 = fx.bcx.ins().uextend(types::I64, cb_in);
- let cb_in_as_u64 = CValue::by_val(cb_in_as_u64, fx.layout_of(fx.tcx.types.u64));
- let int1 = crate::num::codegen_checked_int_binop(fx, bin_op, c, cb_in_as_u64);
+ let clif_ty = fx.clif_type(a.layout().ty).unwrap();
+ let cb_in_as_int = fx.bcx.ins().uextend(clif_ty, cb_in);
+ let cb_in_as_int = CValue::by_val(cb_in_as_int, fx.layout_of(a.layout().ty));
+ let int1 = crate::num::codegen_checked_int_binop(fx, bin_op, c, cb_in_as_int);
let (c, cb1) = int1.load_scalar_pair(fx);
// carry0 | carry1 -> carry or borrow respectively
let cb_out = fx.bcx.ins().bor(cb0, cb1);
- let layout = fx.layout_of(Ty::new_tup(fx.tcx, &[fx.tcx.types.u8, fx.tcx.types.u64]));
- let val = CValue::by_val_pair(cb_out, c, layout);
- ret.write_cvalue(fx, val);
+ (cb_out, c)
}
diff --git a/compiler/rustc_codegen_cranelift/src/intrinsics/mod.rs b/compiler/rustc_codegen_cranelift/src/intrinsics/mod.rs
index 5862f1829..36e9ba9c7 100644
--- a/compiler/rustc_codegen_cranelift/src/intrinsics/mod.rs
+++ b/compiler/rustc_codegen_cranelift/src/intrinsics/mod.rs
@@ -24,7 +24,7 @@ pub(crate) use llvm::codegen_llvm_intrinsic_call;
use rustc_middle::ty;
use rustc_middle::ty::layout::{HasParamEnv, ValidityRequirement};
use rustc_middle::ty::print::{with_no_trimmed_paths, with_no_visible_paths};
-use rustc_middle::ty::subst::SubstsRef;
+use rustc_middle::ty::GenericArgsRef;
use rustc_span::symbol::{kw, sym, Symbol};
use crate::prelude::*;
@@ -213,13 +213,13 @@ pub(crate) fn codegen_intrinsic_call<'tcx>(
source_info: mir::SourceInfo,
) {
let intrinsic = fx.tcx.item_name(instance.def_id());
- let substs = instance.substs;
+ let instance_args = instance.args;
if intrinsic.as_str().starts_with("simd_") {
self::simd::codegen_simd_intrinsic_call(
fx,
intrinsic,
- substs,
+ instance_args,
args,
destination,
target.expect("target for simd intrinsic"),
@@ -233,7 +233,7 @@ pub(crate) fn codegen_intrinsic_call<'tcx>(
fx,
instance,
intrinsic,
- substs,
+ instance_args,
args,
destination,
target,
@@ -365,7 +365,7 @@ fn codegen_regular_intrinsic_call<'tcx>(
fx: &mut FunctionCx<'_, '_, 'tcx>,
instance: Instance<'tcx>,
intrinsic: Symbol,
- substs: SubstsRef<'tcx>,
+ generic_args: GenericArgsRef<'tcx>,
args: &[mir::Operand<'tcx>],
ret: CPlace<'tcx>,
destination: Option<BasicBlock>,
@@ -394,7 +394,7 @@ fn codegen_regular_intrinsic_call<'tcx>(
let dst = dst.load_scalar(fx);
let count = count.load_scalar(fx);
- let elem_ty = substs.type_at(0);
+ let elem_ty = generic_args.type_at(0);
let elem_size: u64 = fx.layout_of(elem_ty).size.bytes();
assert_eq!(args.len(), 3);
let byte_amount =
@@ -410,7 +410,7 @@ fn codegen_regular_intrinsic_call<'tcx>(
let src = src.load_scalar(fx);
let count = count.load_scalar(fx);
- let elem_ty = substs.type_at(0);
+ let elem_ty = generic_args.type_at(0);
let elem_size: u64 = fx.layout_of(elem_ty).size.bytes();
assert_eq!(args.len(), 3);
let byte_amount =
@@ -428,7 +428,7 @@ fn codegen_regular_intrinsic_call<'tcx>(
sym::size_of_val => {
intrinsic_args!(fx, args => (ptr); intrinsic);
- let layout = fx.layout_of(substs.type_at(0));
+ let layout = fx.layout_of(generic_args.type_at(0));
// Note: Can't use is_unsized here as truly unsized types need to take the fixed size
// branch
let size = if let Abi::ScalarPair(_, _) = ptr.layout().abi {
@@ -443,7 +443,7 @@ fn codegen_regular_intrinsic_call<'tcx>(
sym::min_align_of_val => {
intrinsic_args!(fx, args => (ptr); intrinsic);
- let layout = fx.layout_of(substs.type_at(0));
+ let layout = fx.layout_of(generic_args.type_at(0));
// Note: Can't use is_unsized here as truly unsized types need to take the fixed size
// branch
let align = if let Abi::ScalarPair(_, _) = ptr.layout().abi {
@@ -602,7 +602,7 @@ fn codegen_regular_intrinsic_call<'tcx>(
sym::assert_inhabited | sym::assert_zero_valid | sym::assert_mem_uninitialized_valid => {
intrinsic_args!(fx, args => (); intrinsic);
- let ty = substs.type_at(0);
+ let ty = generic_args.type_at(0);
let requirement = ValidityRequirement::from_intrinsic(intrinsic);
@@ -647,12 +647,13 @@ fn codegen_regular_intrinsic_call<'tcx>(
let val = CValue::by_ref(Pointer::new(ptr.load_scalar(fx)), inner_layout);
ret.write_cvalue(fx, val);
}
- sym::volatile_store | sym::unaligned_volatile_store => {
+ sym::volatile_store | sym::unaligned_volatile_store | sym::nontemporal_store => {
intrinsic_args!(fx, args => (ptr, val); intrinsic);
let ptr = ptr.load_scalar(fx);
// Cranelift treats stores as volatile by default
// FIXME correctly handle unaligned_volatile_store
+ // FIXME actually do nontemporal stores if requested
let dest = CPlace::for_ptr(Pointer::new(ptr), val.layout());
dest.write_cvalue(fx, val);
}
@@ -674,7 +675,7 @@ fn codegen_regular_intrinsic_call<'tcx>(
intrinsic_args!(fx, args => (ptr, base); intrinsic);
let ptr = ptr.load_scalar(fx);
let base = base.load_scalar(fx);
- let ty = substs.type_at(0);
+ let ty = generic_args.type_at(0);
let pointee_size: u64 = fx.layout_of(ty).size.bytes();
let diff_bytes = fx.bcx.ins().isub(ptr, base);
@@ -720,7 +721,7 @@ fn codegen_regular_intrinsic_call<'tcx>(
intrinsic_args!(fx, args => (ptr); intrinsic);
let ptr = ptr.load_scalar(fx);
- let ty = substs.type_at(0);
+ let ty = generic_args.type_at(0);
match ty.kind() {
ty::Uint(UintTy::U128) | ty::Int(IntTy::I128) => {
// FIXME implement 128bit atomics
@@ -751,7 +752,7 @@ fn codegen_regular_intrinsic_call<'tcx>(
intrinsic_args!(fx, args => (ptr, val); intrinsic);
let ptr = ptr.load_scalar(fx);
- let ty = substs.type_at(0);
+ let ty = generic_args.type_at(0);
match ty.kind() {
ty::Uint(UintTy::U128) | ty::Int(IntTy::I128) => {
// FIXME implement 128bit atomics
@@ -1128,7 +1129,7 @@ fn codegen_regular_intrinsic_call<'tcx>(
let lhs_ref = lhs_ref.load_scalar(fx);
let rhs_ref = rhs_ref.load_scalar(fx);
- let size = fx.layout_of(substs.type_at(0)).layout.size();
+ let size = fx.layout_of(generic_args.type_at(0)).layout.size();
// FIXME add and use emit_small_memcmp
let is_eq_value = if size == Size::ZERO {
// No bytes means they're trivially equal
@@ -1154,6 +1155,20 @@ fn codegen_regular_intrinsic_call<'tcx>(
ret.write_cvalue(fx, CValue::by_val(is_eq_value, ret.layout()));
}
+ sym::compare_bytes => {
+ intrinsic_args!(fx, args => (lhs_ptr, rhs_ptr, bytes_val); intrinsic);
+ let lhs_ptr = lhs_ptr.load_scalar(fx);
+ let rhs_ptr = rhs_ptr.load_scalar(fx);
+ let bytes_val = bytes_val.load_scalar(fx);
+
+ let params = vec![AbiParam::new(fx.pointer_type); 3];
+ let returns = vec![AbiParam::new(types::I32)];
+ let args = &[lhs_ptr, rhs_ptr, bytes_val];
+ // Here we assume that the `memcmp` provided by the target is a NOP for size 0.
+ let cmp = fx.lib_call("memcmp", params, returns, args)[0];
+ ret.write_cvalue(fx, CValue::by_val(cmp, ret.layout()));
+ }
+
sym::const_allocate => {
intrinsic_args!(fx, args => (_size, _align); intrinsic);
diff --git a/compiler/rustc_codegen_cranelift/src/intrinsics/simd.rs b/compiler/rustc_codegen_cranelift/src/intrinsics/simd.rs
index 6741362e8..9863e40b5 100644
--- a/compiler/rustc_codegen_cranelift/src/intrinsics/simd.rs
+++ b/compiler/rustc_codegen_cranelift/src/intrinsics/simd.rs
@@ -1,6 +1,6 @@
//! Codegen `extern "platform-intrinsic"` intrinsics.
-use rustc_middle::ty::subst::SubstsRef;
+use rustc_middle::ty::GenericArgsRef;
use rustc_span::Symbol;
use rustc_target::abi::Endian;
@@ -21,7 +21,7 @@ fn report_simd_type_validation_error(
pub(super) fn codegen_simd_intrinsic_call<'tcx>(
fx: &mut FunctionCx<'_, '_, 'tcx>,
intrinsic: Symbol,
- _substs: SubstsRef<'tcx>,
+ _args: GenericArgsRef<'tcx>,
args: &[mir::Operand<'tcx>],
ret: CPlace<'tcx>,
target: BasicBlock,
@@ -117,8 +117,8 @@ pub(super) fn codegen_simd_intrinsic_call<'tcx>(
});
}
- // simd_shuffle32<T, U>(x: T, y: T, idx: [u32; 32]) -> U
- _ if intrinsic.as_str().starts_with("simd_shuffle") => {
+ // simd_shuffle<T, I, U>(x: T, y: T, idx: I) -> U
+ sym::simd_shuffle => {
let (x, y, idx) = match args {
[x, y, idx] => (x, y, idx),
_ => {
@@ -133,36 +133,26 @@ pub(super) fn codegen_simd_intrinsic_call<'tcx>(
return;
}
- // If this intrinsic is the older "simd_shuffleN" form, simply parse the integer.
- // If there is no suffix, use the index array length.
- let n: u16 = if intrinsic == sym::simd_shuffle {
- // Make sure this is actually an array, since typeck only checks the length-suffixed
- // version of this intrinsic.
- let idx_ty = fx.monomorphize(idx.ty(fx.mir, fx.tcx));
- match idx_ty.kind() {
- ty::Array(ty, len) if matches!(ty.kind(), ty::Uint(ty::UintTy::U32)) => len
- .try_eval_target_usize(fx.tcx, ty::ParamEnv::reveal_all())
- .unwrap_or_else(|| {
- span_bug!(span, "could not evaluate shuffle index array length")
- })
- .try_into()
- .unwrap(),
- _ => {
- fx.tcx.sess.span_err(
- span,
- format!(
- "simd_shuffle index must be an array of `u32`, got `{}`",
- idx_ty,
- ),
- );
- // Prevent verifier error
- fx.bcx.ins().trap(TrapCode::UnreachableCodeReached);
- return;
- }
+ // Make sure this is actually an array, since typeck only checks the length-suffixed
+ // version of this intrinsic.
+ let idx_ty = fx.monomorphize(idx.ty(fx.mir, fx.tcx));
+ let n: u16 = match idx_ty.kind() {
+ ty::Array(ty, len) if matches!(ty.kind(), ty::Uint(ty::UintTy::U32)) => len
+ .try_eval_target_usize(fx.tcx, ty::ParamEnv::reveal_all())
+ .unwrap_or_else(|| {
+ span_bug!(span, "could not evaluate shuffle index array length")
+ })
+ .try_into()
+ .unwrap(),
+ _ => {
+ fx.tcx.sess.span_err(
+ span,
+ format!("simd_shuffle index must be an array of `u32`, got `{}`", idx_ty),
+ );
+ // Prevent verifier error
+ fx.bcx.ins().trap(TrapCode::UnreachableCodeReached);
+ return;
}
- } else {
- // FIXME remove this case
- intrinsic.as_str()["simd_shuffle".len()..].parse().unwrap()
};
assert_eq!(x.layout(), y.layout());
@@ -179,7 +169,7 @@ pub(super) fn codegen_simd_intrinsic_call<'tcx>(
let indexes = {
use rustc_middle::mir::interpret::*;
let idx_const = crate::constant::mir_operand_get_const_val(fx, idx)
- .expect("simd_shuffle* idx not const");
+ .expect("simd_shuffle idx not const");
let idx_bytes = match idx_const {
ConstValue::ByRef { alloc, offset } => {
diff --git a/compiler/rustc_codegen_cranelift/src/lib.rs b/compiler/rustc_codegen_cranelift/src/lib.rs
index 0de2dccda..d01ded8ab 100644
--- a/compiler/rustc_codegen_cranelift/src/lib.rs
+++ b/compiler/rustc_codegen_cranelift/src/lib.rs
@@ -260,6 +260,13 @@ fn build_isa(sess: &Session, backend_config: &BackendConfig) -> Arc<dyn isa::Tar
flags_builder.set("enable_verifier", enable_verifier).unwrap();
flags_builder.set("regalloc_checker", enable_verifier).unwrap();
+ let preserve_frame_pointer = sess.target.options.frame_pointer
+ != rustc_target::spec::FramePointer::MayOmit
+ || matches!(sess.opts.cg.force_frame_pointers, Some(true));
+ if preserve_frame_pointer {
+ flags_builder.set("preserve_frame_pointers", "true").unwrap();
+ }
+
let tls_model = match target_triple.binary_format {
BinaryFormat::Elf => "elf_gd",
BinaryFormat::Macho => "macho",
@@ -268,8 +275,6 @@ fn build_isa(sess: &Session, backend_config: &BackendConfig) -> Arc<dyn isa::Tar
};
flags_builder.set("tls_model", tls_model).unwrap();
- flags_builder.set("enable_simd", "true").unwrap();
-
flags_builder.set("enable_llvm_abi_extensions", "true").unwrap();
use rustc_session::config::OptLevel;
diff --git a/compiler/rustc_codegen_cranelift/src/main_shim.rs b/compiler/rustc_codegen_cranelift/src/main_shim.rs
index 20ba73f38..b5efe44d8 100644
--- a/compiler/rustc_codegen_cranelift/src/main_shim.rs
+++ b/compiler/rustc_codegen_cranelift/src/main_shim.rs
@@ -1,6 +1,6 @@
use rustc_hir::LangItem;
-use rustc_middle::ty::subst::GenericArg;
use rustc_middle::ty::AssocKind;
+use rustc_middle::ty::GenericArg;
use rustc_session::config::{sigpipe, EntryFnType};
use rustc_span::symbol::Ident;
@@ -119,7 +119,7 @@ pub(crate) fn maybe_create_entry_wrapper(
tcx,
ParamEnv::reveal_all(),
report.def_id,
- tcx.mk_substs(&[GenericArg::from(main_ret_ty)]),
+ tcx.mk_args(&[GenericArg::from(main_ret_ty)]),
)
.unwrap()
.unwrap()
@@ -146,7 +146,7 @@ pub(crate) fn maybe_create_entry_wrapper(
tcx,
ParamEnv::reveal_all(),
start_def_id,
- tcx.mk_substs(&[main_ret_ty.into()]),
+ tcx.mk_args(&[main_ret_ty.into()]),
)
.unwrap()
.unwrap()
diff --git a/compiler/rustc_codegen_cranelift/src/pretty_clif.rs b/compiler/rustc_codegen_cranelift/src/pretty_clif.rs
index 5a4f9e804..0ead50c34 100644
--- a/compiler/rustc_codegen_cranelift/src/pretty_clif.rs
+++ b/compiler/rustc_codegen_cranelift/src/pretty_clif.rs
@@ -9,7 +9,7 @@
//!
//! function u0:22(i64) -> i8, i8 system_v {
//! ; symbol _ZN97_$LT$example..IsNotEmpty$u20$as$u20$mini_core..FnOnce$LT$$LP$$RF$$RF$$u5b$u16$u5d$$C$$RP$$GT$$GT$9call_once17hd517c453d67c0915E
-//! ; instance Instance { def: Item(WithOptConstParam { did: DefId(0:42 ~ example[4e51]::{impl#0}::call_once), const_param_did: None }), substs: [ReErased, ReErased] }
+//! ; instance Instance { def: Item(WithOptConstParam { did: DefId(0:42 ~ example[4e51]::{impl#0}::call_once), const_param_did: None }), args: [ReErased, ReErased] }
//! ; abi FnAbi { args: [ArgAbi { layout: TyAndLayout { ty: IsNotEmpty, layout: Layout { size: Size(0 bytes), align: AbiAndPrefAlign { abi: Align(1 bytes), pref: Align(8 bytes) }, abi: Aggregate { sized: true }, fields: Arbitrary { offsets: [], memory_index: [] }, largest_niche: None, variants: Single { index: 0 } } }, mode: Ignore }, ArgAbi { layout: TyAndLayout { ty: &&[u16], layout: Layout { size: Size(8 bytes), align: AbiAndPrefAlign { abi: Align(8 bytes), pref: Align(8 bytes) }, abi: Scalar(Initialized { value: Pointer(AddressSpace(0)), valid_range: 1..=18446744073709551615 }), fields: Primitive, largest_niche: Some(Niche { offset: Size(0 bytes), value: Pointer(AddressSpace(0)), valid_range: 1..=18446744073709551615 }), variants: Single { index: 0 } } }, mode: Direct(ArgAttributes { regular: NonNull | NoUndef, arg_ext: None, pointee_size: Size(0 bytes), pointee_align: Some(Align(8 bytes)) }) }], ret: ArgAbi { layout: TyAndLayout { ty: (u8, u8), layout: Layout { size: Size(2 bytes), align: AbiAndPrefAlign { abi: Align(1 bytes), pref: Align(8 bytes) }, abi: ScalarPair(Initialized { value: Int(I8, false), valid_range: 0..=255 }, Initialized { value: Int(I8, false), valid_range: 0..=255 }), fields: Arbitrary { offsets: [Size(0 bytes), Size(1 bytes)], memory_index: [0, 1] }, largest_niche: None, variants: Single { index: 0 } } }, mode: Pair(ArgAttributes { regular: NoUndef, arg_ext: None, pointee_size: Size(0 bytes), pointee_align: None }, ArgAttributes { regular: NoUndef, arg_ext: None, pointee_size: Size(0 bytes), pointee_align: None }) }, c_variadic: false, fixed_count: 1, conv: Rust, can_unwind: false }
//!
//! ; kind loc.idx param pass mode ty
@@ -25,7 +25,7 @@
//!
//! ss0 = explicit_slot 16
//! sig0 = (i64, i64) -> i8, i8 system_v
-//! fn0 = colocated u0:23 sig0 ; Instance { def: Item(WithOptConstParam { did: DefId(0:46 ~ example[4e51]::{impl#1}::call_mut), const_param_did: None }), substs: [ReErased, ReErased] }
+//! fn0 = colocated u0:23 sig0 ; Instance { def: Item(WithOptConstParam { did: DefId(0:46 ~ example[4e51]::{impl#1}::call_mut), const_param_did: None }), args: [ReErased, ReErased] }
//!
//! block0(v0: i64):
//! nop
@@ -261,7 +261,7 @@ pub(crate) fn write_clif_file(
impl fmt::Debug for FunctionCx<'_, '_, '_> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- writeln!(f, "{:?}", self.instance.substs)?;
+ writeln!(f, "{:?}", self.instance.args)?;
writeln!(f, "{:?}", self.local_map)?;
let mut clif = String::new();
diff --git a/compiler/rustc_codegen_cranelift/src/value_and_place.rs b/compiler/rustc_codegen_cranelift/src/value_and_place.rs
index 133c989b6..ff95141ce 100644
--- a/compiler/rustc_codegen_cranelift/src/value_and_place.rs
+++ b/compiler/rustc_codegen_cranelift/src/value_and_place.rs
@@ -2,6 +2,8 @@
use crate::prelude::*;
+use rustc_middle::ty::FnSig;
+
use cranelift_codegen::entity::EntityRef;
use cranelift_codegen::ir::immediates::Offset32;
@@ -160,6 +162,7 @@ impl<'tcx> CValue<'tcx> {
}
/// Load a value with layout.abi of scalar
+ #[track_caller]
pub(crate) fn load_scalar(self, fx: &mut FunctionCx<'_, '_, 'tcx>) -> Value {
let layout = self.1;
match self.0 {
@@ -182,6 +185,7 @@ impl<'tcx> CValue<'tcx> {
}
/// Load a value pair with layout.abi of scalar pair
+ #[track_caller]
pub(crate) fn load_scalar_pair(self, fx: &mut FunctionCx<'_, '_, 'tcx>) -> (Value, Value) {
let layout = self.1;
match self.0 {
@@ -583,17 +587,25 @@ impl<'tcx> CPlace<'tcx> {
let dst_layout = self.layout();
match self.inner {
CPlaceInner::Var(_local, var) => {
- let data = CValue(from.0, dst_layout).load_scalar(fx);
+ let data = match from.1.abi {
+ Abi::Scalar(_) => CValue(from.0, dst_layout).load_scalar(fx),
+ _ => {
+ let (ptr, meta) = from.force_stack(fx);
+ assert!(meta.is_none());
+ CValue(CValueInner::ByRef(ptr, None), dst_layout).load_scalar(fx)
+ }
+ };
let dst_ty = fx.clif_type(self.layout().ty).unwrap();
transmute_scalar(fx, var, data, dst_ty);
}
CPlaceInner::VarPair(_local, var1, var2) => {
- let (data1, data2) = if from.layout().ty == dst_layout.ty {
- CValue(from.0, dst_layout).load_scalar_pair(fx)
- } else {
- let (ptr, meta) = from.force_stack(fx);
- assert!(meta.is_none());
- CValue(CValueInner::ByRef(ptr, None), dst_layout).load_scalar_pair(fx)
+ let (data1, data2) = match from.1.abi {
+ Abi::ScalarPair(_, _) => CValue(from.0, dst_layout).load_scalar_pair(fx),
+ _ => {
+ let (ptr, meta) = from.force_stack(fx);
+ assert!(meta.is_none());
+ CValue(CValueInner::ByRef(ptr, None), dst_layout).load_scalar_pair(fx)
+ }
};
let (dst_ty1, dst_ty2) = fx.clif_pair_type(self.layout().ty).unwrap();
transmute_scalar(fx, var1, data1, dst_ty1);
@@ -607,30 +619,38 @@ impl<'tcx> CPlace<'tcx> {
let mut flags = MemFlags::new();
flags.set_notrap();
- match from.layout().abi {
- Abi::Scalar(_) => {
- let val = from.load_scalar(fx);
- to_ptr.store(fx, val, flags);
- return;
- }
- Abi::ScalarPair(a_scalar, b_scalar) => {
- let (value, extra) = from.load_scalar_pair(fx);
- let b_offset = scalar_pair_calculate_b_offset(fx.tcx, a_scalar, b_scalar);
- to_ptr.store(fx, value, flags);
- to_ptr.offset(fx, b_offset).store(fx, extra, flags);
- return;
- }
- _ => {}
- }
match from.0 {
CValueInner::ByVal(val) => {
to_ptr.store(fx, val, flags);
}
- CValueInner::ByValPair(_, _) => {
- bug!("Non ScalarPair abi {:?} for ByValPair CValue", dst_layout.abi);
- }
+ CValueInner::ByValPair(val1, val2) => match from.layout().abi {
+ Abi::ScalarPair(a_scalar, b_scalar) => {
+ let b_offset =
+ scalar_pair_calculate_b_offset(fx.tcx, a_scalar, b_scalar);
+ to_ptr.store(fx, val1, flags);
+ to_ptr.offset(fx, b_offset).store(fx, val2, flags);
+ }
+ _ => bug!("Non ScalarPair abi {:?} for ByValPair CValue", dst_layout.abi),
+ },
CValueInner::ByRef(from_ptr, None) => {
+ match from.layout().abi {
+ Abi::Scalar(_) => {
+ let val = from.load_scalar(fx);
+ to_ptr.store(fx, val, flags);
+ return;
+ }
+ Abi::ScalarPair(a_scalar, b_scalar) => {
+ let b_offset =
+ scalar_pair_calculate_b_offset(fx.tcx, a_scalar, b_scalar);
+ let (val1, val2) = from.load_scalar_pair(fx);
+ to_ptr.store(fx, val1, flags);
+ to_ptr.offset(fx, b_offset).store(fx, val2, flags);
+ return;
+ }
+ _ => {}
+ }
+
let from_addr = from_ptr.get_addr(fx);
let to_addr = to_ptr.get_addr(fx);
let src_layout = from.1;
@@ -815,11 +835,42 @@ pub(crate) fn assert_assignable<'tcx>(
ParamEnv::reveal_all(),
from_ty.fn_sig(fx.tcx),
);
+ let FnSig {
+ inputs_and_output: types_from,
+ c_variadic: c_variadic_from,
+ unsafety: unsafety_from,
+ abi: abi_from,
+ } = from_sig;
let to_sig = fx
.tcx
.normalize_erasing_late_bound_regions(ParamEnv::reveal_all(), to_ty.fn_sig(fx.tcx));
+ let FnSig {
+ inputs_and_output: types_to,
+ c_variadic: c_variadic_to,
+ unsafety: unsafety_to,
+ abi: abi_to,
+ } = to_sig;
+ let mut types_from = types_from.iter();
+ let mut types_to = types_to.iter();
+ loop {
+ match (types_from.next(), types_to.next()) {
+ (Some(a), Some(b)) => assert_assignable(fx, a, b, limit - 1),
+ (None, None) => break,
+ (Some(_), None) | (None, Some(_)) => panic!("{:#?}/{:#?}", from_ty, to_ty),
+ }
+ }
+ assert_eq!(
+ c_variadic_from, c_variadic_to,
+ "Can't write fn ptr with incompatible sig {:?} to place with sig {:?}\n\n{:#?}",
+ from_sig, to_sig, fx,
+ );
+ assert_eq!(
+ unsafety_from, unsafety_to,
+ "Can't write fn ptr with incompatible sig {:?} to place with sig {:?}\n\n{:#?}",
+ from_sig, to_sig, fx,
+ );
assert_eq!(
- from_sig, to_sig,
+ abi_from, abi_to,
"Can't write fn ptr with incompatible sig {:?} to place with sig {:?}\n\n{:#?}",
from_sig, to_sig, fx,
);
@@ -850,11 +901,11 @@ pub(crate) fn assert_assignable<'tcx>(
}
}
}
- (&ty::Adt(adt_def_a, substs_a), &ty::Adt(adt_def_b, substs_b))
+ (&ty::Adt(adt_def_a, args_a), &ty::Adt(adt_def_b, args_b))
if adt_def_a.did() == adt_def_b.did() =>
{
- let mut types_a = substs_a.types();
- let mut types_b = substs_b.types();
+ let mut types_a = args_a.types();
+ let mut types_b = args_b.types();
loop {
match (types_a.next(), types_b.next()) {
(Some(a), Some(b)) => assert_assignable(fx, a, b, limit - 1),
@@ -864,11 +915,11 @@ pub(crate) fn assert_assignable<'tcx>(
}
}
(ty::Array(a, _), ty::Array(b, _)) => assert_assignable(fx, *a, *b, limit - 1),
- (&ty::Closure(def_id_a, substs_a), &ty::Closure(def_id_b, substs_b))
+ (&ty::Closure(def_id_a, args_a), &ty::Closure(def_id_b, args_b))
if def_id_a == def_id_b =>
{
- let mut types_a = substs_a.types();
- let mut types_b = substs_b.types();
+ let mut types_a = args_a.types();
+ let mut types_b = args_b.types();
loop {
match (types_a.next(), types_b.next()) {
(Some(a), Some(b)) => assert_assignable(fx, a, b, limit - 1),