summaryrefslogtreecommitdiffstats
path: root/compiler/rustc_codegen_cranelift/src
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/rustc_codegen_cranelift/src')
-rw-r--r--compiler/rustc_codegen_cranelift/src/abi/mod.rs102
-rw-r--r--compiler/rustc_codegen_cranelift/src/abi/returning.rs3
-rw-r--r--compiler/rustc_codegen_cranelift/src/allocator.rs28
-rw-r--r--compiler/rustc_codegen_cranelift/src/analyze.rs30
-rw-r--r--compiler/rustc_codegen_cranelift/src/base.rs68
-rw-r--r--compiler/rustc_codegen_cranelift/src/cast.rs50
-rw-r--r--compiler/rustc_codegen_cranelift/src/codegen_i128.rs85
-rw-r--r--compiler/rustc_codegen_cranelift/src/common.rs2
-rw-r--r--compiler/rustc_codegen_cranelift/src/compiler_builtins.rs1
-rw-r--r--compiler/rustc_codegen_cranelift/src/concurrency_limiter.rs2
-rw-r--r--compiler/rustc_codegen_cranelift/src/constant.rs32
-rw-r--r--compiler/rustc_codegen_cranelift/src/cranelift_native.rs248
-rw-r--r--compiler/rustc_codegen_cranelift/src/debuginfo/emit.rs2
-rw-r--r--compiler/rustc_codegen_cranelift/src/discriminant.rs95
-rw-r--r--compiler/rustc_codegen_cranelift/src/driver/aot.rs4
-rw-r--r--compiler/rustc_codegen_cranelift/src/driver/jit.rs6
-rw-r--r--compiler/rustc_codegen_cranelift/src/global_asm.rs2
-rw-r--r--compiler/rustc_codegen_cranelift/src/inline_asm.rs6
-rw-r--r--compiler/rustc_codegen_cranelift/src/intrinsics/llvm_x86.rs4
-rw-r--r--compiler/rustc_codegen_cranelift/src/intrinsics/mod.rs161
-rw-r--r--compiler/rustc_codegen_cranelift/src/intrinsics/simd.rs11
-rw-r--r--compiler/rustc_codegen_cranelift/src/lib.rs17
-rw-r--r--compiler/rustc_codegen_cranelift/src/main_shim.rs2
-rw-r--r--compiler/rustc_codegen_cranelift/src/num.rs39
-rw-r--r--compiler/rustc_codegen_cranelift/src/pointer.rs5
-rw-r--r--compiler/rustc_codegen_cranelift/src/pretty_clif.rs2
-rw-r--r--compiler/rustc_codegen_cranelift/src/unsize.rs12
-rw-r--r--compiler/rustc_codegen_cranelift/src/value_and_place.rs43
-rw-r--r--compiler/rustc_codegen_cranelift/src/vtable.rs23
29 files changed, 384 insertions, 701 deletions
diff --git a/compiler/rustc_codegen_cranelift/src/abi/mod.rs b/compiler/rustc_codegen_cranelift/src/abi/mod.rs
index 74396a66f..91c085d3d 100644
--- a/compiler/rustc_codegen_cranelift/src/abi/mod.rs
+++ b/compiler/rustc_codegen_cranelift/src/abi/mod.rs
@@ -4,6 +4,8 @@ mod comments;
mod pass_mode;
mod returning;
+use std::borrow::Cow;
+
use cranelift_module::ModuleError;
use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags;
use rustc_middle::ty::layout::FnAbiOf;
@@ -25,7 +27,7 @@ fn clif_sig_from_fn_abi<'tcx>(
) -> Signature {
let call_conv = conv_to_call_conv(tcx.sess, fn_abi.conv, default_call_conv);
- let inputs = fn_abi.args.iter().map(|arg_abi| arg_abi.get_abi_param(tcx).into_iter()).flatten();
+ let inputs = fn_abi.args.iter().flat_map(|arg_abi| arg_abi.get_abi_param(tcx).into_iter());
let (return_ptr, returns) = fn_abi.ret.get_abi_return(tcx);
// Sometimes the first param is an pointer to the place where the return value needs to be stored.
@@ -116,7 +118,52 @@ impl<'tcx> FunctionCx<'_, '_, 'tcx> {
params: Vec<AbiParam>,
returns: Vec<AbiParam>,
args: &[Value],
- ) -> &[Value] {
+ ) -> Cow<'_, [Value]> {
+ if self.tcx.sess.target.is_like_windows {
+ let (mut params, mut args): (Vec<_>, Vec<_>) =
+ params
+ .into_iter()
+ .zip(args)
+ .map(|(param, &arg)| {
+ if param.value_type == types::I128 {
+ let arg_ptr = Pointer::stack_slot(self.bcx.create_sized_stack_slot(
+ StackSlotData { kind: StackSlotKind::ExplicitSlot, size: 16 },
+ ));
+ arg_ptr.store(self, arg, MemFlags::trusted());
+ (AbiParam::new(self.pointer_type), arg_ptr.get_addr(self))
+ } else {
+ (param, arg)
+ }
+ })
+ .unzip();
+
+ let indirect_ret_val = returns.len() == 1 && returns[0].value_type == types::I128;
+
+ if indirect_ret_val {
+ params.insert(0, AbiParam::new(self.pointer_type));
+ let ret_ptr =
+ Pointer::stack_slot(self.bcx.create_sized_stack_slot(StackSlotData {
+ kind: StackSlotKind::ExplicitSlot,
+ size: 16,
+ }));
+ args.insert(0, ret_ptr.get_addr(self));
+ self.lib_call_unadjusted(name, params, vec![], &args);
+ return Cow::Owned(vec![ret_ptr.load(self, types::I128, MemFlags::trusted())]);
+ } else {
+ return self.lib_call_unadjusted(name, params, returns, &args);
+ }
+ }
+
+ self.lib_call_unadjusted(name, params, returns, args)
+ }
+
+ pub(crate) fn lib_call_unadjusted(
+ &mut self,
+ name: &str,
+ params: Vec<AbiParam>,
+ returns: Vec<AbiParam>,
+ args: &[Value],
+ ) -> Cow<'_, [Value]> {
let sig = Signature { params, returns, call_conv: self.target_config.default_call_conv };
let func_id = self.module.declare_function(name, Linkage::Import, &sig).unwrap();
let func_ref = self.module.declare_func_in_func(func_id, &mut self.bcx.func);
@@ -125,41 +172,11 @@ impl<'tcx> FunctionCx<'_, '_, 'tcx> {
}
let call_inst = self.bcx.ins().call(func_ref, args);
if self.clif_comments.enabled() {
- self.add_comment(call_inst, format!("easy_call {}", name));
+ self.add_comment(call_inst, format!("lib_call {}", name));
}
let results = self.bcx.inst_results(call_inst);
assert!(results.len() <= 2, "{}", results.len());
- results
- }
-
- pub(crate) fn easy_call(
- &mut self,
- name: &str,
- args: &[CValue<'tcx>],
- return_ty: Ty<'tcx>,
- ) -> CValue<'tcx> {
- let (input_tys, args): (Vec<_>, Vec<_>) = args
- .iter()
- .map(|arg| {
- (AbiParam::new(self.clif_type(arg.layout().ty).unwrap()), arg.load_scalar(self))
- })
- .unzip();
- let return_layout = self.layout_of(return_ty);
- let return_tys = if let ty::Tuple(tup) = return_ty.kind() {
- tup.iter().map(|ty| AbiParam::new(self.clif_type(ty).unwrap())).collect()
- } else {
- vec![AbiParam::new(self.clif_type(return_ty).unwrap())]
- };
- let ret_vals = self.lib_call(name, input_tys, return_tys, &args);
- match *ret_vals {
- [] => CValue::by_ref(
- Pointer::const_addr(self, i64::from(self.pointer_type.bytes())),
- return_layout,
- ),
- [val] => CValue::by_val(val, return_layout),
- [val, extra] => CValue::by_val_pair(val, extra, return_layout),
- _ => unreachable!(),
- }
+ Cow::Borrowed(results)
}
}
@@ -275,10 +292,6 @@ pub(crate) fn codegen_fn_prelude<'tcx>(fx: &mut FunctionCx<'_, '_, 'tcx>, start_
self::comments::add_locals_header_comment(fx);
for (local, arg_kind, ty) in func_params {
- let layout = fx.layout_of(ty);
-
- let is_ssa = ssa_analyzed[local] == crate::analyze::SsaKind::Ssa;
-
// While this is normally an optimization to prevent an unnecessary copy when an argument is
// not mutated by the current function, this is necessary to support unsized arguments.
if let ArgKind::Normal(Some(val)) = arg_kind {
@@ -300,6 +313,8 @@ pub(crate) fn codegen_fn_prelude<'tcx>(fx: &mut FunctionCx<'_, '_, 'tcx>, start_
}
}
+ let layout = fx.layout_of(ty);
+ let is_ssa = ssa_analyzed[local].is_ssa(fx, ty);
let place = make_local_place(fx, local, layout, is_ssa);
assert_eq!(fx.local_map.push(place), local);
@@ -312,7 +327,7 @@ pub(crate) fn codegen_fn_prelude<'tcx>(fx: &mut FunctionCx<'_, '_, 'tcx>, start_
ArgKind::Spread(params) => {
for (i, param) in params.into_iter().enumerate() {
if let Some(param) = param {
- place.place_field(fx, mir::Field::new(i)).write_cvalue(fx, param);
+ place.place_field(fx, FieldIdx::new(i)).write_cvalue(fx, param);
}
}
}
@@ -323,7 +338,7 @@ pub(crate) fn codegen_fn_prelude<'tcx>(fx: &mut FunctionCx<'_, '_, 'tcx>, start_
let ty = fx.monomorphize(fx.mir.local_decls[local].ty);
let layout = fx.layout_of(ty);
- let is_ssa = ssa_analyzed[local] == crate::analyze::SsaKind::Ssa;
+ let is_ssa = ssa_analyzed[local].is_ssa(fx, ty);
let place = make_local_place(fx, local, layout, is_ssa);
assert_eq!(fx.local_map.push(place), local);
@@ -445,7 +460,7 @@ pub(crate) fn codegen_terminator_call<'tcx>(
args.push(self_arg);
for i in 0..tupled_arguments.len() {
args.push(CallArgument {
- value: pack_arg.value.value_field(fx, mir::Field::new(i)),
+ value: pack_arg.value.value_field(fx, FieldIdx::new(i)),
is_owned: pack_arg.is_owned,
});
}
@@ -515,10 +530,9 @@ pub(crate) fn codegen_terminator_call<'tcx>(
args.into_iter()
.enumerate()
.skip(if first_arg_override.is_some() { 1 } else { 0 })
- .map(|(i, arg)| {
+ .flat_map(|(i, arg)| {
adjust_arg_for_abi(fx, arg.value, &fn_abi.args[i], arg.is_owned).into_iter()
- })
- .flatten(),
+ }),
)
.collect::<Vec<Value>>();
diff --git a/compiler/rustc_codegen_cranelift/src/abi/returning.rs b/compiler/rustc_codegen_cranelift/src/abi/returning.rs
index aaa141876..6d3e8eda2 100644
--- a/compiler/rustc_codegen_cranelift/src/abi/returning.rs
+++ b/compiler/rustc_codegen_cranelift/src/abi/returning.rs
@@ -14,7 +14,8 @@ pub(super) fn codegen_return_param<'tcx>(
) -> CPlace<'tcx> {
let (ret_place, ret_param): (_, SmallVec<[_; 2]>) = match fx.fn_abi.as_ref().unwrap().ret.mode {
PassMode::Ignore | PassMode::Direct(_) | PassMode::Pair(_, _) | PassMode::Cast(..) => {
- let is_ssa = ssa_analyzed[RETURN_PLACE] == crate::analyze::SsaKind::Ssa;
+ let is_ssa =
+ ssa_analyzed[RETURN_PLACE].is_ssa(fx, fx.fn_abi.as_ref().unwrap().ret.layout.ty);
(
super::make_local_place(
fx,
diff --git a/compiler/rustc_codegen_cranelift/src/allocator.rs b/compiler/rustc_codegen_cranelift/src/allocator.rs
index 1c73957ca..2c246ceb3 100644
--- a/compiler/rustc_codegen_cranelift/src/allocator.rs
+++ b/compiler/rustc_codegen_cranelift/src/allocator.rs
@@ -4,6 +4,7 @@
use crate::prelude::*;
use rustc_ast::expand::allocator::{AllocatorKind, AllocatorTy, ALLOCATOR_METHODS};
+use rustc_codegen_ssa::base::allocator_kind_for_codegen;
use rustc_session::config::OomStrategy;
use rustc_span::symbol::sym;
@@ -13,24 +14,15 @@ pub(crate) fn codegen(
module: &mut impl Module,
unwind_context: &mut UnwindContext,
) -> bool {
- let any_dynamic_crate = tcx.dependency_formats(()).iter().any(|(_, list)| {
- use rustc_middle::middle::dependency_format::Linkage;
- list.iter().any(|&linkage| linkage == Linkage::Dynamic)
- });
- if any_dynamic_crate {
- false
- } else if let Some(kind) = tcx.allocator_kind(()) {
- codegen_inner(
- module,
- unwind_context,
- kind,
- tcx.alloc_error_handler_kind(()).unwrap(),
- tcx.sess.opts.unstable_opts.oom,
- );
- true
- } else {
- false
- }
+ let Some(kind) = allocator_kind_for_codegen(tcx) else { return false };
+ codegen_inner(
+ module,
+ unwind_context,
+ kind,
+ tcx.alloc_error_handler_kind(()).unwrap(),
+ tcx.sess.opts.unstable_opts.oom,
+ );
+ true
}
fn codegen_inner(
diff --git a/compiler/rustc_codegen_cranelift/src/analyze.rs b/compiler/rustc_codegen_cranelift/src/analyze.rs
index 0cbb9f3ec..54d5c1c2a 100644
--- a/compiler/rustc_codegen_cranelift/src/analyze.rs
+++ b/compiler/rustc_codegen_cranelift/src/analyze.rs
@@ -4,34 +4,30 @@ use crate::prelude::*;
use rustc_index::vec::IndexVec;
use rustc_middle::mir::StatementKind::*;
+use rustc_middle::ty::Ty;
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
pub(crate) enum SsaKind {
NotSsa,
- Ssa,
+ MaybeSsa,
+}
+
+impl SsaKind {
+ pub(crate) fn is_ssa<'tcx>(self, fx: &FunctionCx<'_, '_, 'tcx>, ty: Ty<'tcx>) -> bool {
+ self == SsaKind::MaybeSsa && (fx.clif_type(ty).is_some() || fx.clif_pair_type(ty).is_some())
+ }
}
pub(crate) fn analyze(fx: &FunctionCx<'_, '_, '_>) -> IndexVec<Local, SsaKind> {
- let mut flag_map = fx
- .mir
- .local_decls
- .iter()
- .map(|local_decl| {
- let ty = fx.monomorphize(local_decl.ty);
- if fx.clif_type(ty).is_some() || fx.clif_pair_type(ty).is_some() {
- SsaKind::Ssa
- } else {
- SsaKind::NotSsa
- }
- })
- .collect::<IndexVec<Local, SsaKind>>();
+ let mut flag_map =
+ fx.mir.local_decls.iter().map(|_| SsaKind::MaybeSsa).collect::<IndexVec<Local, SsaKind>>();
for bb in fx.mir.basic_blocks.iter() {
for stmt in bb.statements.iter() {
match &stmt.kind {
Assign(place_and_rval) => match &place_and_rval.1 {
Rvalue::Ref(_, _, place) | Rvalue::AddressOf(_, place) => {
- not_ssa(&mut flag_map, place.local)
+ flag_map[place.local] = SsaKind::NotSsa;
}
_ => {}
},
@@ -42,7 +38,3 @@ pub(crate) fn analyze(fx: &FunctionCx<'_, '_, '_>) -> IndexVec<Local, SsaKind> {
flag_map
}
-
-fn not_ssa(flag_map: &mut IndexVec<Local, SsaKind>, local: Local) {
- flag_map[local] = SsaKind::NotSsa;
-}
diff --git a/compiler/rustc_codegen_cranelift/src/base.rs b/compiler/rustc_codegen_cranelift/src/base.rs
index 7f857528c..f5301f9f7 100644
--- a/compiler/rustc_codegen_cranelift/src/base.rs
+++ b/compiler/rustc_codegen_cranelift/src/base.rs
@@ -192,7 +192,7 @@ pub(crate) fn compile_fn(
let pass_times = cranelift_codegen::timing::take_current();
// Replace newlines with | as measureme doesn't allow control characters like
// newlines inside strings.
- recorder.record_arg(format!("{}", pass_times).replace("\n", " | "));
+ recorder.record_arg(format!("{}", pass_times).replace('\n', " | "));
recording_args = true;
},
)
@@ -345,18 +345,11 @@ fn codegen_fn_body(fx: &mut FunctionCx<'_, '_, '_>, start_block: Block) {
TerminatorKind::Return => {
crate::abi::codegen_return(fx);
}
- TerminatorKind::Assert { cond, expected, msg, target, cleanup: _ } => {
- if !fx.tcx.sess.overflow_checks() {
- let overflow_not_to_check = match msg {
- AssertKind::OverflowNeg(..) => true,
- AssertKind::Overflow(op, ..) => op.is_checkable(),
- _ => false,
- };
- if overflow_not_to_check {
- let target = fx.get_block(*target);
- fx.bcx.ins().jump(target, &[]);
- continue;
- }
+ TerminatorKind::Assert { cond, expected, msg, target, unwind: _ } => {
+ if !fx.tcx.sess.overflow_checks() && msg.is_optional_overflow_check() {
+ let target = fx.get_block(*target);
+ fx.bcx.ins().jump(target, &[]);
+ continue;
}
let cond = codegen_operand(fx, cond).load_scalar(fx);
@@ -365,11 +358,10 @@ fn codegen_fn_body(fx: &mut FunctionCx<'_, '_, '_>, start_block: Block) {
fx.bcx.set_cold_block(failure);
if *expected {
- fx.bcx.ins().brz(cond, failure, &[]);
+ fx.bcx.ins().brif(cond, target, &[], failure, &[]);
} else {
- fx.bcx.ins().brnz(cond, failure, &[]);
+ fx.bcx.ins().brif(cond, failure, &[], target, &[]);
};
- fx.bcx.ins().jump(target, &[]);
fx.bcx.switch_to_block(failure);
fx.bcx.ins().nop();
@@ -387,6 +379,18 @@ fn codegen_fn_body(fx: &mut FunctionCx<'_, '_, '_>, start_block: Block) {
source_info.span,
);
}
+ AssertKind::MisalignedPointerDereference { ref required, ref found } => {
+ let required = codegen_operand(fx, required).load_scalar(fx);
+ let found = codegen_operand(fx, found).load_scalar(fx);
+ let location = fx.get_caller_location(source_info).load_scalar(fx);
+
+ codegen_panic_inner(
+ fx,
+ rustc_hir::LangItem::PanicBoundsCheck,
+ &[required, found, location],
+ source_info.span,
+ );
+ }
_ => {
let msg_str = msg.description();
codegen_panic(fx, msg_str, source_info);
@@ -425,11 +429,9 @@ fn codegen_fn_body(fx: &mut FunctionCx<'_, '_, '_>, start_block: Block) {
}
} else {
if test_zero {
- fx.bcx.ins().brz(discr, then_block, &[]);
- fx.bcx.ins().jump(else_block, &[]);
+ fx.bcx.ins().brif(discr, else_block, &[], then_block, &[]);
} else {
- fx.bcx.ins().brnz(discr, then_block, &[]);
- fx.bcx.ins().jump(else_block, &[]);
+ fx.bcx.ins().brif(discr, then_block, &[], else_block, &[]);
}
}
} else {
@@ -448,7 +450,7 @@ fn codegen_fn_body(fx: &mut FunctionCx<'_, '_, '_>, start_block: Block) {
destination,
target,
fn_span,
- cleanup: _,
+ unwind: _,
from_hir_call: _,
} => {
fx.tcx.prof.generic_activity("codegen call").run(|| {
@@ -468,7 +470,7 @@ fn codegen_fn_body(fx: &mut FunctionCx<'_, '_, '_>, start_block: Block) {
options,
destination,
line_spans: _,
- cleanup: _,
+ unwind: _,
} => {
if options.contains(InlineAsmOptions::MAY_UNWIND) {
fx.tcx.sess.span_fatal(
@@ -486,7 +488,7 @@ fn codegen_fn_body(fx: &mut FunctionCx<'_, '_, '_>, start_block: Block) {
*destination,
);
}
- TerminatorKind::Abort => {
+ TerminatorKind::Terminate => {
codegen_panic_cannot_unwind(fx, source_info);
}
TerminatorKind::Resume => {
@@ -499,7 +501,6 @@ fn codegen_fn_body(fx: &mut FunctionCx<'_, '_, '_>, start_block: Block) {
TerminatorKind::Yield { .. }
| TerminatorKind::FalseEdge { .. }
| TerminatorKind::FalseUnwind { .. }
- | TerminatorKind::DropAndReplace { .. }
| TerminatorKind::GeneratorDrop => {
bug!("shouldn't exist at codegen {:?}", bb_data.terminator());
}
@@ -720,6 +721,10 @@ fn codegen_stmt<'tcx>(
let operand = codegen_operand(fx, operand);
operand.coerce_dyn_star(fx, lval);
}
+ Rvalue::Cast(CastKind::Transmute, ref operand, _to_ty) => {
+ let operand = codegen_operand(fx, operand);
+ lval.write_cvalue_transmute(fx, operand);
+ }
Rvalue::Discriminant(place) => {
let place = codegen_place(fx, place);
let value = place.to_cvalue(fx);
@@ -751,8 +756,7 @@ fn codegen_stmt<'tcx>(
fx.bcx.switch_to_block(loop_block);
let done = fx.bcx.ins().icmp_imm(IntCC::Equal, index, times as i64);
- fx.bcx.ins().brnz(done, done_block, &[]);
- fx.bcx.ins().jump(loop_block2, &[]);
+ fx.bcx.ins().brif(done, done_block, &[], loop_block2, &[]);
fx.bcx.switch_to_block(loop_block2);
let to = lval.place_index(fx, index);
@@ -793,19 +797,20 @@ fn codegen_stmt<'tcx>(
let variant_dest = lval.downcast_variant(fx, variant_index);
(variant_index, variant_dest, active_field_index)
}
- _ => (VariantIdx::from_u32(0), lval, None),
+ _ => (FIRST_VARIANT, lval, None),
};
if active_field_index.is_some() {
assert_eq!(operands.len(), 1);
}
- for (i, operand) in operands.iter().enumerate() {
+ for (i, operand) in operands.iter_enumerated() {
let operand = codegen_operand(fx, operand);
let field_index = active_field_index.unwrap_or(i);
let to = if let mir::AggregateKind::Array(_) = **kind {
- let index = fx.bcx.ins().iconst(fx.pointer_type, field_index as i64);
+ let array_index = i64::from(field_index.as_u32());
+ let index = fx.bcx.ins().iconst(fx.pointer_type, array_index);
variant_dest.place_index(fx, index)
} else {
- variant_dest.place_field(fx, mir::Field::new(field_index))
+ variant_dest.place_field(fx, field_index)
};
to.write_cvalue(fx, operand);
}
@@ -820,6 +825,7 @@ fn codegen_stmt<'tcx>(
| StatementKind::Nop
| StatementKind::FakeRead(..)
| StatementKind::Retag { .. }
+ | StatementKind::PlaceMention(..)
| StatementKind::AscribeUserType(..) => {}
StatementKind::Coverage { .. } => fx.tcx.sess.fatal("-Zcoverage is unimplemented"),
@@ -997,7 +1003,7 @@ fn codegen_panic_inner<'tcx>(
let symbol_name = fx.tcx.symbol_name(instance).name;
fx.lib_call(
- &*symbol_name,
+ symbol_name,
args.iter().map(|&arg| AbiParam::new(fx.bcx.func.dfg.value_type(arg))).collect(),
vec![],
args,
diff --git a/compiler/rustc_codegen_cranelift/src/cast.rs b/compiler/rustc_codegen_cranelift/src/cast.rs
index 5091c5a9f..032d11510 100644
--- a/compiler/rustc_codegen_cranelift/src/cast.rs
+++ b/compiler/rustc_codegen_cranelift/src/cast.rs
@@ -64,17 +64,12 @@ pub(crate) fn clif_int_or_float_cast(
},
);
- let from_rust_ty = if from_signed { fx.tcx.types.i128 } else { fx.tcx.types.u128 };
-
- let to_rust_ty = match to_ty {
- types::F32 => fx.tcx.types.f32,
- types::F64 => fx.tcx.types.f64,
- _ => unreachable!(),
- };
-
- return fx
- .easy_call(&name, &[CValue::by_val(from, fx.layout_of(from_rust_ty))], to_rust_ty)
- .load_scalar(fx);
+ return fx.lib_call(
+ &name,
+ vec![AbiParam::new(types::I128)],
+ vec![AbiParam::new(to_ty)],
+ &[from],
+ )[0];
}
// int-like -> float
@@ -101,16 +96,29 @@ pub(crate) fn clif_int_or_float_cast(
},
);
- let from_rust_ty = match from_ty {
- types::F32 => fx.tcx.types.f32,
- types::F64 => fx.tcx.types.f64,
- _ => unreachable!(),
- };
-
- let to_rust_ty = if to_signed { fx.tcx.types.i128 } else { fx.tcx.types.u128 };
-
- fx.easy_call(&name, &[CValue::by_val(from, fx.layout_of(from_rust_ty))], to_rust_ty)
- .load_scalar(fx)
+ if fx.tcx.sess.target.is_like_windows {
+ let ret = fx.lib_call(
+ &name,
+ vec![AbiParam::new(from_ty)],
+ vec![AbiParam::new(types::I64X2)],
+ &[from],
+ )[0];
+ // FIXME use bitcast instead of store to get from i64x2 to i128
+ let stack_slot = fx.bcx.create_sized_stack_slot(StackSlotData {
+ kind: StackSlotKind::ExplicitSlot,
+ size: 16,
+ });
+ let ret_ptr = Pointer::stack_slot(stack_slot);
+ ret_ptr.store(fx, ret, MemFlags::trusted());
+ ret_ptr.load(fx, types::I128, MemFlags::trusted())
+ } else {
+ fx.lib_call(
+ &name,
+ vec![AbiParam::new(from_ty)],
+ vec![AbiParam::new(types::I128)],
+ &[from],
+ )[0]
+ }
} else if to_ty == types::I8 || to_ty == types::I16 {
// FIXME implement fcvt_to_*int_sat.i8/i16
let val = if to_signed {
diff --git a/compiler/rustc_codegen_cranelift/src/codegen_i128.rs b/compiler/rustc_codegen_cranelift/src/codegen_i128.rs
index 40bfe7077..f674ce776 100644
--- a/compiler/rustc_codegen_cranelift/src/codegen_i128.rs
+++ b/compiler/rustc_codegen_cranelift/src/codegen_i128.rs
@@ -29,39 +29,24 @@ pub(crate) fn maybe_codegen<'tcx>(
BinOp::Add | BinOp::Sub if !checked => None,
BinOp::Mul if !checked || is_signed => {
if !checked {
- let val_ty = if is_signed { fx.tcx.types.i128 } else { fx.tcx.types.u128 };
- if fx.tcx.sess.target.is_like_windows {
- let ret_place = CPlace::new_stack_slot(fx, lhs.layout());
- let (lhs_ptr, lhs_extra) = lhs.force_stack(fx);
- let (rhs_ptr, rhs_extra) = rhs.force_stack(fx);
- assert!(lhs_extra.is_none());
- assert!(rhs_extra.is_none());
- let args = [
- ret_place.to_ptr().get_addr(fx),
- lhs_ptr.get_addr(fx),
- rhs_ptr.get_addr(fx),
- ];
- fx.lib_call(
- "__multi3",
- vec![
- AbiParam::special(fx.pointer_type, ArgumentPurpose::StructReturn),
- AbiParam::new(fx.pointer_type),
- AbiParam::new(fx.pointer_type),
- ],
- vec![],
- &args,
- );
- Some(ret_place.to_cvalue(fx))
- } else {
- Some(fx.easy_call("__multi3", &[lhs, rhs], val_ty))
- }
+ let args = [lhs.load_scalar(fx), rhs.load_scalar(fx)];
+ let ret_val = fx.lib_call(
+ "__multi3",
+ vec![AbiParam::new(types::I128), AbiParam::new(types::I128)],
+ vec![AbiParam::new(types::I128)],
+ &args,
+ )[0];
+ Some(CValue::by_val(
+ ret_val,
+ fx.layout_of(if is_signed { fx.tcx.types.i128 } else { fx.tcx.types.u128 }),
+ ))
} else {
let out_ty = fx.tcx.mk_tup(&[lhs.layout().ty, fx.tcx.types.bool]);
let oflow = CPlace::new_stack_slot(fx, fx.layout_of(fx.tcx.types.i32));
let lhs = lhs.load_scalar(fx);
let rhs = rhs.load_scalar(fx);
let oflow_ptr = oflow.to_ptr().get_addr(fx);
- let res = fx.lib_call(
+ let res = fx.lib_call_unadjusted(
"__muloti4",
vec![
AbiParam::new(types::I128),
@@ -80,29 +65,12 @@ pub(crate) fn maybe_codegen<'tcx>(
assert!(checked);
let out_ty = fx.tcx.mk_tup(&[lhs.layout().ty, fx.tcx.types.bool]);
let out_place = CPlace::new_stack_slot(fx, fx.layout_of(out_ty));
- let (param_types, args) = if fx.tcx.sess.target.is_like_windows {
- let (lhs_ptr, lhs_extra) = lhs.force_stack(fx);
- let (rhs_ptr, rhs_extra) = rhs.force_stack(fx);
- assert!(lhs_extra.is_none());
- assert!(rhs_extra.is_none());
- (
- vec![
- AbiParam::special(fx.pointer_type, ArgumentPurpose::StructReturn),
- AbiParam::new(fx.pointer_type),
- AbiParam::new(fx.pointer_type),
- ],
- [out_place.to_ptr().get_addr(fx), lhs_ptr.get_addr(fx), rhs_ptr.get_addr(fx)],
- )
- } else {
- (
- vec![
- AbiParam::special(fx.pointer_type, ArgumentPurpose::StructReturn),
- AbiParam::new(types::I128),
- AbiParam::new(types::I128),
- ],
- [out_place.to_ptr().get_addr(fx), lhs.load_scalar(fx), rhs.load_scalar(fx)],
- )
- };
+ let param_types = vec![
+ AbiParam::special(fx.pointer_type, ArgumentPurpose::StructReturn),
+ AbiParam::new(types::I128),
+ AbiParam::new(types::I128),
+ ];
+ let args = [out_place.to_ptr().get_addr(fx), lhs.load_scalar(fx), rhs.load_scalar(fx)];
let name = match (bin_op, is_signed) {
(BinOp::Add, false) => "__rust_u128_addo",
(BinOp::Add, true) => "__rust_i128_addo",
@@ -125,14 +93,10 @@ pub(crate) fn maybe_codegen<'tcx>(
_ => unreachable!(),
};
if fx.tcx.sess.target.is_like_windows {
- let (lhs_ptr, lhs_extra) = lhs.force_stack(fx);
- let (rhs_ptr, rhs_extra) = rhs.force_stack(fx);
- assert!(lhs_extra.is_none());
- assert!(rhs_extra.is_none());
- let args = [lhs_ptr.get_addr(fx), rhs_ptr.get_addr(fx)];
+ let args = [lhs.load_scalar(fx), rhs.load_scalar(fx)];
let ret = fx.lib_call(
name,
- vec![AbiParam::new(fx.pointer_type), AbiParam::new(fx.pointer_type)],
+ vec![AbiParam::new(types::I128), AbiParam::new(types::I128)],
vec![AbiParam::new(types::I64X2)],
&args,
)[0];
@@ -141,7 +105,14 @@ pub(crate) fn maybe_codegen<'tcx>(
ret_place.to_ptr().store(fx, ret, MemFlags::trusted());
Some(ret_place.to_cvalue(fx))
} else {
- Some(fx.easy_call(name, &[lhs, rhs], lhs.layout().ty))
+ let args = [lhs.load_scalar(fx), rhs.load_scalar(fx)];
+ let ret_val = fx.lib_call(
+ name,
+ vec![AbiParam::new(types::I128), AbiParam::new(types::I128)],
+ vec![AbiParam::new(types::I128)],
+ &args,
+ )[0];
+ Some(CValue::by_val(ret_val, lhs.layout()))
}
}
BinOp::Lt | BinOp::Le | BinOp::Eq | BinOp::Ge | BinOp::Gt | BinOp::Ne => {
diff --git a/compiler/rustc_codegen_cranelift/src/common.rs b/compiler/rustc_codegen_cranelift/src/common.rs
index 722e2754e..d39bf7000 100644
--- a/compiler/rustc_codegen_cranelift/src/common.rs
+++ b/compiler/rustc_codegen_cranelift/src/common.rs
@@ -75,7 +75,7 @@ fn clif_type_from_ty<'tcx>(tcx: TyCtxt<'tcx>, ty: Ty<'tcx>) -> Option<types::Typ
ty::Adt(adt_def, _) if adt_def.repr().simd() => {
let (element, count) = match &tcx.layout_of(ParamEnv::reveal_all().and(ty)).unwrap().abi
{
- Abi::Vector { element, count } => (element.clone(), *count),
+ Abi::Vector { element, count } => (*element, *count),
_ => unreachable!(),
};
diff --git a/compiler/rustc_codegen_cranelift/src/compiler_builtins.rs b/compiler/rustc_codegen_cranelift/src/compiler_builtins.rs
index 8a53baa76..f3b963200 100644
--- a/compiler/rustc_codegen_cranelift/src/compiler_builtins.rs
+++ b/compiler/rustc_codegen_cranelift/src/compiler_builtins.rs
@@ -39,6 +39,7 @@ builtin_functions! {
// integers
fn __multi3(a: i128, b: i128) -> i128;
+ fn __muloti4(n: i128, d: i128, oflow: &mut i32) -> i128;
fn __udivti3(n: u128, d: u128) -> u128;
fn __divti3(n: i128, d: i128) -> i128;
fn __umodti3(n: u128, d: u128) -> u128;
diff --git a/compiler/rustc_codegen_cranelift/src/concurrency_limiter.rs b/compiler/rustc_codegen_cranelift/src/concurrency_limiter.rs
index f855e20e0..203219a8a 100644
--- a/compiler/rustc_codegen_cranelift/src/concurrency_limiter.rs
+++ b/compiler/rustc_codegen_cranelift/src/concurrency_limiter.rs
@@ -32,7 +32,7 @@ impl ConcurrencyLimiter {
ConcurrencyLimiter {
helper_thread: Some(helper_thread),
state,
- available_token_condvar: Arc::new(Condvar::new()),
+ available_token_condvar,
finished: false,
}
}
diff --git a/compiler/rustc_codegen_cranelift/src/constant.rs b/compiler/rustc_codegen_cranelift/src/constant.rs
index 49c4f1aaa..e87f4e258 100644
--- a/compiler/rustc_codegen_cranelift/src/constant.rs
+++ b/compiler/rustc_codegen_cranelift/src/constant.rs
@@ -54,12 +54,22 @@ pub(crate) fn codegen_tls_ref<'tcx>(
def_id: DefId,
layout: TyAndLayout<'tcx>,
) -> CValue<'tcx> {
- let data_id = data_id_for_static(fx.tcx, fx.module, def_id, false);
- let local_data_id = fx.module.declare_data_in_func(data_id, &mut fx.bcx.func);
- if fx.clif_comments.enabled() {
- fx.add_comment(local_data_id, format!("tls {:?}", def_id));
- }
- let tls_ptr = fx.bcx.ins().tls_value(fx.pointer_type, local_data_id);
+ let tls_ptr = if !def_id.is_local() && fx.tcx.needs_thread_local_shim(def_id) {
+ let instance = ty::Instance {
+ def: ty::InstanceDef::ThreadLocalShim(def_id),
+ substs: ty::InternalSubsts::empty(),
+ };
+ let func_ref = fx.get_function_ref(instance);
+ let call = fx.bcx.ins().call(func_ref, &[]);
+ fx.bcx.func.dfg.first_result(call)
+ } else {
+ let data_id = data_id_for_static(fx.tcx, fx.module, def_id, false);
+ let local_data_id = fx.module.declare_data_in_func(data_id, &mut fx.bcx.func);
+ if fx.clif_comments.enabled() {
+ fx.add_comment(local_data_id, format!("tls {:?}", def_id));
+ }
+ fx.bcx.ins().tls_value(fx.pointer_type, local_data_id)
+ };
CValue::by_val(tls_ptr, layout)
}
@@ -290,7 +300,7 @@ fn data_id_for_static(
};
let data_id = match module.declare_data(
- &*symbol_name,
+ symbol_name,
linkage,
is_mutable,
attrs.flags.contains(CodegenFnAttrFlags::THREAD_LOCAL),
@@ -338,7 +348,7 @@ fn data_id_for_static(
};
let data_id = match module.declare_data(
- &*symbol_name,
+ symbol_name,
linkage,
is_mutable,
attrs.flags.contains(CodegenFnAttrFlags::THREAD_LOCAL),
@@ -529,6 +539,7 @@ pub(crate) fn mir_operand_get_const_val<'tcx>(
| StatementKind::StorageDead(_)
| StatementKind::Retag(_, _)
| StatementKind::AscribeUserType(_, _)
+ | StatementKind::PlaceMention(..)
| StatementKind::Coverage(_)
| StatementKind::ConstEvalCounter
| StatementKind::Nop => {}
@@ -538,13 +549,12 @@ pub(crate) fn mir_operand_get_const_val<'tcx>(
TerminatorKind::Goto { .. }
| TerminatorKind::SwitchInt { .. }
| TerminatorKind::Resume
- | TerminatorKind::Abort
+ | TerminatorKind::Terminate
| TerminatorKind::Return
| TerminatorKind::Unreachable
| TerminatorKind::Drop { .. }
| TerminatorKind::Assert { .. } => {}
- TerminatorKind::DropAndReplace { .. }
- | TerminatorKind::Yield { .. }
+ TerminatorKind::Yield { .. }
| TerminatorKind::GeneratorDrop
| TerminatorKind::FalseEdge { .. }
| TerminatorKind::FalseUnwind { .. } => unreachable!(),
diff --git a/compiler/rustc_codegen_cranelift/src/cranelift_native.rs b/compiler/rustc_codegen_cranelift/src/cranelift_native.rs
deleted file mode 100644
index 6c4efca44..000000000
--- a/compiler/rustc_codegen_cranelift/src/cranelift_native.rs
+++ /dev/null
@@ -1,248 +0,0 @@
-// Vendored from https://github.com/bytecodealliance/wasmtime/blob/b58a197d33f044193c3d608010f5e6ec394ac07e/cranelift/native/src/lib.rs
-// which is licensed as
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-// unlike rustc_codegen_cranelift itself. Also applies a small change to remove #![cfg_attr] that
-// rust's CI complains about and to fix formatting to match rustc.
-// FIXME revert back to the external crate with Cranelift 0.93
-#![allow(warnings)]
-
-//! Performs autodetection of the host for the purposes of running
-//! Cranelift to generate code to run on the same machine.
-
-#![deny(missing_docs, trivial_numeric_casts, unused_extern_crates, unstable_features)]
-#![warn(unused_import_braces)]
-
-use cranelift_codegen::isa;
-use target_lexicon::Triple;
-
-/// Return an `isa` builder configured for the current host
-/// machine, or `Err(())` if the host machine is not supported
-/// in the current configuration.
-pub fn builder() -> Result<isa::Builder, &'static str> {
- builder_with_options(true)
-}
-
-/// Return an `isa` builder configured for the current host
-/// machine, or `Err(())` if the host machine is not supported
-/// in the current configuration.
-///
-/// Selects the given backend variant specifically; this is
-/// useful when more than oen backend exists for a given target
-/// (e.g., on x86-64).
-pub fn builder_with_options(infer_native_flags: bool) -> Result<isa::Builder, &'static str> {
- let mut isa_builder = isa::lookup(Triple::host()).map_err(|err| match err {
- isa::LookupError::SupportDisabled => "support for architecture disabled at compile time",
- isa::LookupError::Unsupported => "unsupported architecture",
- })?;
-
- #[cfg(target_arch = "x86_64")]
- {
- use cranelift_codegen::settings::Configurable;
-
- if !std::is_x86_feature_detected!("sse2") {
- return Err("x86 support requires SSE2");
- }
-
- if !infer_native_flags {
- return Ok(isa_builder);
- }
-
- // These are temporarily enabled by default (see #3810 for
- // more) so that a default-constructed `Flags` can work with
- // default Wasmtime features. Otherwise, the user must
- // explicitly use native flags or turn these on when on x86-64
- // platforms to avoid a configuration panic. In order for the
- // "enable if detected" logic below to work, we must turn them
- // *off* (differing from the default) and then re-enable below
- // if present.
- isa_builder.set("has_sse3", "false").unwrap();
- isa_builder.set("has_ssse3", "false").unwrap();
- isa_builder.set("has_sse41", "false").unwrap();
- isa_builder.set("has_sse42", "false").unwrap();
-
- if std::is_x86_feature_detected!("sse3") {
- isa_builder.enable("has_sse3").unwrap();
- }
- if std::is_x86_feature_detected!("ssse3") {
- isa_builder.enable("has_ssse3").unwrap();
- }
- if std::is_x86_feature_detected!("sse4.1") {
- isa_builder.enable("has_sse41").unwrap();
- }
- if std::is_x86_feature_detected!("sse4.2") {
- isa_builder.enable("has_sse42").unwrap();
- }
- if std::is_x86_feature_detected!("popcnt") {
- isa_builder.enable("has_popcnt").unwrap();
- }
- if std::is_x86_feature_detected!("avx") {
- isa_builder.enable("has_avx").unwrap();
- }
- if std::is_x86_feature_detected!("avx2") {
- isa_builder.enable("has_avx2").unwrap();
- }
- if std::is_x86_feature_detected!("fma") {
- isa_builder.enable("has_fma").unwrap();
- }
- if std::is_x86_feature_detected!("bmi1") {
- isa_builder.enable("has_bmi1").unwrap();
- }
- if std::is_x86_feature_detected!("bmi2") {
- isa_builder.enable("has_bmi2").unwrap();
- }
- if std::is_x86_feature_detected!("avx512bitalg") {
- isa_builder.enable("has_avx512bitalg").unwrap();
- }
- if std::is_x86_feature_detected!("avx512dq") {
- isa_builder.enable("has_avx512dq").unwrap();
- }
- if std::is_x86_feature_detected!("avx512f") {
- isa_builder.enable("has_avx512f").unwrap();
- }
- if std::is_x86_feature_detected!("avx512vl") {
- isa_builder.enable("has_avx512vl").unwrap();
- }
- if std::is_x86_feature_detected!("avx512vbmi") {
- isa_builder.enable("has_avx512vbmi").unwrap();
- }
- if std::is_x86_feature_detected!("lzcnt") {
- isa_builder.enable("has_lzcnt").unwrap();
- }
- }
-
- #[cfg(target_arch = "aarch64")]
- {
- use cranelift_codegen::settings::Configurable;
-
- if !infer_native_flags {
- return Ok(isa_builder);
- }
-
- if std::arch::is_aarch64_feature_detected!("lse") {
- isa_builder.enable("has_lse").unwrap();
- }
-
- if std::arch::is_aarch64_feature_detected!("paca") {
- isa_builder.enable("has_pauth").unwrap();
- }
-
- if cfg!(target_os = "macos") {
- // Pointer authentication is always available on Apple Silicon.
- isa_builder.enable("sign_return_address").unwrap();
- // macOS enforces the use of the B key for return addresses.
- isa_builder.enable("sign_return_address_with_bkey").unwrap();
- }
- }
-
- // There is no is_s390x_feature_detected macro yet, so for now
- // we use getauxval from the libc crate directly.
- #[cfg(all(target_arch = "s390x", target_os = "linux"))]
- {
- use cranelift_codegen::settings::Configurable;
-
- if !infer_native_flags {
- return Ok(isa_builder);
- }
-
- let v = unsafe { libc::getauxval(libc::AT_HWCAP) };
- const HWCAP_S390X_VXRS_EXT2: libc::c_ulong = 32768;
- if (v & HWCAP_S390X_VXRS_EXT2) != 0 {
- isa_builder.enable("has_vxrs_ext2").unwrap();
- // There is no separate HWCAP bit for mie2, so assume
- // that any machine with vxrs_ext2 also has mie2.
- isa_builder.enable("has_mie2").unwrap();
- }
- }
-
- // `is_riscv_feature_detected` is nightly only for now, use
- // getauxval from the libc crate directly as a temporary measure.
- #[cfg(all(target_arch = "riscv64", target_os = "linux"))]
- {
- use cranelift_codegen::settings::Configurable;
-
- if !infer_native_flags {
- return Ok(isa_builder);
- }
-
- let v = unsafe { libc::getauxval(libc::AT_HWCAP) };
-
- const HWCAP_RISCV_EXT_A: libc::c_ulong = 1 << (b'a' - b'a');
- const HWCAP_RISCV_EXT_C: libc::c_ulong = 1 << (b'c' - b'a');
- const HWCAP_RISCV_EXT_D: libc::c_ulong = 1 << (b'd' - b'a');
- const HWCAP_RISCV_EXT_F: libc::c_ulong = 1 << (b'f' - b'a');
- const HWCAP_RISCV_EXT_M: libc::c_ulong = 1 << (b'm' - b'a');
- const HWCAP_RISCV_EXT_V: libc::c_ulong = 1 << (b'v' - b'a');
-
- if (v & HWCAP_RISCV_EXT_A) != 0 {
- isa_builder.enable("has_a").unwrap();
- }
-
- if (v & HWCAP_RISCV_EXT_C) != 0 {
- isa_builder.enable("has_c").unwrap();
- }
-
- if (v & HWCAP_RISCV_EXT_D) != 0 {
- isa_builder.enable("has_d").unwrap();
- }
-
- if (v & HWCAP_RISCV_EXT_F) != 0 {
- isa_builder.enable("has_f").unwrap();
-
- // TODO: There doesn't seem to be a bit associated with this extension
- // rust enables it with the `f` extension:
- // https://github.com/rust-lang/stdarch/blob/790411f93c4b5eada3c23abb4c9a063fb0b24d99/crates/std_detect/src/detect/os/linux/riscv.rs#L43
- isa_builder.enable("has_zicsr").unwrap();
- }
-
- if (v & HWCAP_RISCV_EXT_M) != 0 {
- isa_builder.enable("has_m").unwrap();
- }
-
- if (v & HWCAP_RISCV_EXT_V) != 0 {
- isa_builder.enable("has_v").unwrap();
- }
-
- // TODO: ZiFencei does not have a bit associated with it
- // TODO: Zbkb does not have a bit associated with it
- }
-
- // squelch warnings about unused mut/variables on some platforms.
- drop(&mut isa_builder);
- drop(infer_native_flags);
-
- Ok(isa_builder)
-}
-
-#[cfg(test)]
-mod tests {
- use super::builder;
- use cranelift_codegen::isa::CallConv;
- use cranelift_codegen::settings;
-
- #[test]
- fn test() {
- if let Ok(isa_builder) = builder() {
- let flag_builder = settings::builder();
- let isa = isa_builder.finish(settings::Flags::new(flag_builder)).unwrap();
-
- if cfg!(all(target_os = "macos", target_arch = "aarch64")) {
- assert_eq!(isa.default_call_conv(), CallConv::AppleAarch64);
- } else if cfg!(any(unix, target_os = "nebulet")) {
- assert_eq!(isa.default_call_conv(), CallConv::SystemV);
- } else if cfg!(windows) {
- assert_eq!(isa.default_call_conv(), CallConv::WindowsFastcall);
- }
-
- if cfg!(target_pointer_width = "64") {
- assert_eq!(isa.pointer_bits(), 64);
- } else if cfg!(target_pointer_width = "32") {
- assert_eq!(isa.pointer_bits(), 32);
- } else if cfg!(target_pointer_width = "16") {
- assert_eq!(isa.pointer_bits(), 16);
- }
- }
- }
-}
-
-/// Version number of this crate.
-pub const VERSION: &str = env!("CARGO_PKG_VERSION");
diff --git a/compiler/rustc_codegen_cranelift/src/debuginfo/emit.rs b/compiler/rustc_codegen_cranelift/src/debuginfo/emit.rs
index 9583cd2ec..c4a5627e6 100644
--- a/compiler/rustc_codegen_cranelift/src/debuginfo/emit.rs
+++ b/compiler/rustc_codegen_cranelift/src/debuginfo/emit.rs
@@ -113,7 +113,7 @@ impl Writer for WriterRelocate {
offset: offset as u32,
size,
name: DebugRelocName::Symbol(symbol),
- addend: addend as i64,
+ addend,
kind: object::RelocationKind::Absolute,
});
self.write_udata(0, size)
diff --git a/compiler/rustc_codegen_cranelift/src/discriminant.rs b/compiler/rustc_codegen_cranelift/src/discriminant.rs
index 3cbf313ad..670384663 100644
--- a/compiler/rustc_codegen_cranelift/src/discriminant.rs
+++ b/compiler/rustc_codegen_cranelift/src/discriminant.rs
@@ -26,7 +26,7 @@ pub(crate) fn codegen_set_discriminant<'tcx>(
tag_encoding: TagEncoding::Direct,
variants: _,
} => {
- let ptr = place.place_field(fx, mir::Field::new(tag_field));
+ let ptr = place.place_field(fx, FieldIdx::new(tag_field));
let to = layout.ty.discriminant_for_variant(fx.tcx, variant_index).unwrap().val;
let to = if ptr.layout().abi.is_signed() {
ty::ScalarInt::try_from_int(
@@ -47,7 +47,7 @@ pub(crate) fn codegen_set_discriminant<'tcx>(
variants: _,
} => {
if variant_index != untagged_variant {
- let niche = place.place_field(fx, mir::Field::new(tag_field));
+ let niche = place.place_field(fx, FieldIdx::new(tag_field));
let niche_type = fx.clif_type(niche.layout().ty).unwrap();
let niche_value = variant_index.as_u32() - niche_variants.start().as_u32();
let niche_value = (niche_value as u128).wrapping_add(niche_start);
@@ -103,11 +103,10 @@ pub(crate) fn codegen_get_discriminant<'tcx>(
}
};
- let cast_to_size = dest_layout.layout.size();
let cast_to = fx.clif_type(dest_layout.ty).unwrap();
// Read the tag/niche-encoded discriminant from memory.
- let tag = value.value_field(fx, mir::Field::new(tag_field));
+ let tag = value.value_field(fx, FieldIdx::new(tag_field));
let tag = tag.load_scalar(fx);
// Decode the discriminant (specifically if it's niche-encoded).
@@ -122,21 +121,7 @@ pub(crate) fn codegen_get_discriminant<'tcx>(
dest.write_cvalue(fx, res);
}
TagEncoding::Niche { untagged_variant, ref niche_variants, niche_start } => {
- let tag_size = tag_scalar.size(fx);
- let max_unsigned = tag_size.unsigned_int_max();
- let max_signed = tag_size.signed_int_max() as u128;
- let min_signed = max_signed + 1;
let relative_max = niche_variants.end().as_u32() - niche_variants.start().as_u32();
- let niche_end = niche_start.wrapping_add(relative_max as u128) & max_unsigned;
- let range = tag_scalar.valid_range(fx);
-
- let sle = |lhs: u128, rhs: u128| -> bool {
- // Signed and unsigned comparisons give the same results,
- // except that in signed comparisons an integer with the
- // sign bit set is less than one with the sign bit clear.
- // Toggle the sign bit to do a signed comparison.
- (lhs ^ min_signed) <= (rhs ^ min_signed)
- };
// We have a subrange `niche_start..=niche_end` inside `range`.
// If the value of the tag is inside this subrange, it's a
@@ -153,45 +138,6 @@ pub(crate) fn codegen_get_discriminant<'tcx>(
// }
// However, we will likely be able to emit simpler code.
- // Find the least and greatest values in `range`, considered
- // both as signed and unsigned.
- let (low_unsigned, high_unsigned) =
- if range.start <= range.end { (range.start, range.end) } else { (0, max_unsigned) };
- let (low_signed, high_signed) = if sle(range.start, range.end) {
- (range.start, range.end)
- } else {
- (min_signed, max_signed)
- };
-
- let niches_ule = niche_start <= niche_end;
- let niches_sle = sle(niche_start, niche_end);
- let cast_smaller = cast_to_size <= tag_size;
-
- // In the algorithm above, we can change
- // cast(relative_tag) + niche_variants.start()
- // into
- // cast(tag + (niche_variants.start() - niche_start))
- // if either the casted type is no larger than the original
- // type, or if the niche values are contiguous (in either the
- // signed or unsigned sense).
- let can_incr = cast_smaller || niches_ule || niches_sle;
-
- let data_for_boundary_niche = || -> Option<(IntCC, u128)> {
- if !can_incr {
- None
- } else if niche_start == low_unsigned {
- Some((IntCC::UnsignedLessThanOrEqual, niche_end))
- } else if niche_end == high_unsigned {
- Some((IntCC::UnsignedGreaterThanOrEqual, niche_start))
- } else if niche_start == low_signed {
- Some((IntCC::SignedLessThanOrEqual, niche_end))
- } else if niche_end == high_signed {
- Some((IntCC::SignedGreaterThanOrEqual, niche_start))
- } else {
- None
- }
- };
-
let (is_niche, tagged_discr, delta) = if relative_max == 0 {
// Best case scenario: only one tagged variant. This will
// likely become just a comparison and a jump.
@@ -206,41 +152,6 @@ pub(crate) fn codegen_get_discriminant<'tcx>(
let tagged_discr =
fx.bcx.ins().iconst(cast_to, niche_variants.start().as_u32() as i64);
(is_niche, tagged_discr, 0)
- } else if let Some((predicate, constant)) = data_for_boundary_niche() {
- // The niche values are either the lowest or the highest in
- // `range`. We can avoid the first subtraction in the
- // algorithm.
- // The algorithm is now this:
- // is_niche = tag <= niche_end
- // discr = if is_niche {
- // cast(tag + (niche_variants.start() - niche_start))
- // } else {
- // untagged_variant
- // }
- // (the first line may instead be tag >= niche_start,
- // and may be a signed or unsigned comparison)
- // The arithmetic must be done before the cast, so we can
- // have the correct wrapping behavior. See issue #104519 for
- // the consequences of getting this wrong.
- let is_niche = codegen_icmp_imm(fx, predicate, tag, constant as i128);
- let delta = (niche_variants.start().as_u32() as u128).wrapping_sub(niche_start);
- let incr_tag = if delta == 0 {
- tag
- } else {
- let delta = match fx.bcx.func.dfg.value_type(tag) {
- types::I128 => {
- let lsb = fx.bcx.ins().iconst(types::I64, delta as u64 as i64);
- let msb = fx.bcx.ins().iconst(types::I64, (delta >> 64) as u64 as i64);
- fx.bcx.ins().iconcat(lsb, msb)
- }
- ty => fx.bcx.ins().iconst(ty, delta as i64),
- };
- fx.bcx.ins().iadd(tag, delta)
- };
-
- let cast_tag = clif_intcast(fx, incr_tag, cast_to, !niches_ule);
-
- (is_niche, cast_tag, 0)
} else {
// The special cases don't apply, so we'll have to go with
// the general algorithm.
diff --git a/compiler/rustc_codegen_cranelift/src/driver/aot.rs b/compiler/rustc_codegen_cranelift/src/driver/aot.rs
index 7c6fd9f6f..3e2e2af96 100644
--- a/compiler/rustc_codegen_cranelift/src/driver/aot.rs
+++ b/compiler/rustc_codegen_cranelift/src/driver/aot.rs
@@ -377,7 +377,7 @@ pub(crate) fn run_aot(
};
if tcx.dep_graph.is_fully_enabled() {
- for cgu in &*cgus {
+ for cgu in cgus {
tcx.ensure().codegen_unit(cgu.name());
}
}
@@ -417,7 +417,7 @@ pub(crate) fn run_aot(
CguReuse::PreLto => unreachable!(),
CguReuse::PostLto => {
concurrency_limiter.job_already_done();
- OngoingModuleCodegen::Sync(reuse_workproduct_for_cgu(tcx, &*cgu))
+ OngoingModuleCodegen::Sync(reuse_workproduct_for_cgu(tcx, cgu))
}
}
})
diff --git a/compiler/rustc_codegen_cranelift/src/driver/jit.rs b/compiler/rustc_codegen_cranelift/src/driver/jit.rs
index 8b5a2da2c..f6a48e325 100644
--- a/compiler/rustc_codegen_cranelift/src/driver/jit.rs
+++ b/compiler/rustc_codegen_cranelift/src/driver/jit.rs
@@ -311,7 +311,11 @@ fn dep_symbol_lookup_fn(
.find(|(crate_type, _data)| *crate_type == rustc_session::config::CrateType::Executable)
.unwrap()
.1;
- for &cnum in &crate_info.used_crates {
+ // `used_crates` is in reverse postorder in terms of dependencies. Reverse the order here to
+ // get a postorder which ensures that all dependencies of a dylib are loaded before the dylib
+ // itself. This helps the dynamic linker to find dylibs not in the regular dynamic library
+ // search path.
+ for &cnum in crate_info.used_crates.iter().rev() {
let src = &crate_info.used_crate_source[&cnum];
match data[cnum.as_usize() - 1] {
Linkage::NotLinked | Linkage::IncludedFromDylib => {}
diff --git a/compiler/rustc_codegen_cranelift/src/global_asm.rs b/compiler/rustc_codegen_cranelift/src/global_asm.rs
index 46c78ce6a..a74f8ffa2 100644
--- a/compiler/rustc_codegen_cranelift/src/global_asm.rs
+++ b/compiler/rustc_codegen_cranelift/src/global_asm.rs
@@ -125,7 +125,7 @@ pub(crate) fn compile_global_asm(
let output_object_file = config.output_filenames.temp_path(OutputType::Object, Some(cgu_name));
// Assemble `global_asm`
- let global_asm_object_file = add_file_stem_postfix(output_object_file.clone(), ".asm");
+ let global_asm_object_file = add_file_stem_postfix(output_object_file, ".asm");
let mut child = Command::new(&config.assembler)
.arg("-o")
.arg(&global_asm_object_file)
diff --git a/compiler/rustc_codegen_cranelift/src/inline_asm.rs b/compiler/rustc_codegen_cranelift/src/inline_asm.rs
index 6206fbf7d..3ba530c04 100644
--- a/compiler/rustc_codegen_cranelift/src/inline_asm.rs
+++ b/compiler/rustc_codegen_cranelift/src/inline_asm.rs
@@ -242,7 +242,7 @@ pub(crate) fn codegen_inline_asm<'tcx>(
}
}
InlineAsmOperand::Const { ref value } => {
- let (const_value, ty) = crate::constant::eval_mir_constant(fx, &*value)
+ let (const_value, ty) = crate::constant::eval_mir_constant(fx, value)
.unwrap_or_else(|| span_bug!(span, "asm const cannot be resolved"));
let value = rustc_codegen_ssa::common::asm_const_to_str(
fx.tcx,
@@ -334,13 +334,13 @@ pub(crate) fn codegen_inline_asm<'tcx>(
}
CInlineAsmOperand::Out { reg: _, late: _, place } => {
if let Some(place) = place {
- outputs.push((asm_gen.stack_slots_output[i].unwrap(), place.clone()));
+ outputs.push((asm_gen.stack_slots_output[i].unwrap(), *place));
}
}
CInlineAsmOperand::InOut { reg: _, _late: _, in_value, out_place } => {
inputs.push((asm_gen.stack_slots_input[i].unwrap(), in_value.load_scalar(fx)));
if let Some(out_place) = out_place {
- outputs.push((asm_gen.stack_slots_output[i].unwrap(), out_place.clone()));
+ outputs.push((asm_gen.stack_slots_output[i].unwrap(), *out_place));
}
}
CInlineAsmOperand::Const { value: _ } | CInlineAsmOperand::Symbol { symbol: _ } => {}
diff --git a/compiler/rustc_codegen_cranelift/src/intrinsics/llvm_x86.rs b/compiler/rustc_codegen_cranelift/src/intrinsics/llvm_x86.rs
index e5c4b244a..0f32d1a25 100644
--- a/compiler/rustc_codegen_cranelift/src/intrinsics/llvm_x86.rs
+++ b/compiler/rustc_codegen_cranelift/src/intrinsics/llvm_x86.rs
@@ -179,8 +179,8 @@ fn llvm_add_sub<'tcx>(
// c + carry -> c + first intermediate carry or borrow respectively
let int0 = crate::num::codegen_checked_int_binop(fx, bin_op, a, b);
- let c = int0.value_field(fx, mir::Field::new(0));
- let cb0 = int0.value_field(fx, mir::Field::new(1)).load_scalar(fx);
+ let c = int0.value_field(fx, FieldIdx::new(0));
+ let cb0 = int0.value_field(fx, FieldIdx::new(1)).load_scalar(fx);
// c + carry -> c + second intermediate carry or borrow respectively
let cb_in_as_u64 = fx.bcx.ins().uextend(types::I64, cb_in);
diff --git a/compiler/rustc_codegen_cranelift/src/intrinsics/mod.rs b/compiler/rustc_codegen_cranelift/src/intrinsics/mod.rs
index e74aabf2f..03f2a65fc 100644
--- a/compiler/rustc_codegen_cranelift/src/intrinsics/mod.rs
+++ b/compiler/rustc_codegen_cranelift/src/intrinsics/mod.rs
@@ -23,7 +23,7 @@ pub(crate) use llvm::codegen_llvm_intrinsic_call;
use rustc_middle::ty;
use rustc_middle::ty::layout::{HasParamEnv, ValidityRequirement};
-use rustc_middle::ty::print::with_no_trimmed_paths;
+use rustc_middle::ty::print::{with_no_trimmed_paths, with_no_visible_paths};
use rustc_middle::ty::subst::SubstsRef;
use rustc_span::symbol::{kw, sym, Symbol};
@@ -252,41 +252,45 @@ fn codegen_float_intrinsic_call<'tcx>(
args: &[mir::Operand<'tcx>],
ret: CPlace<'tcx>,
) -> bool {
- let (name, arg_count, ty) = match intrinsic {
- sym::expf32 => ("expf", 1, fx.tcx.types.f32),
- sym::expf64 => ("exp", 1, fx.tcx.types.f64),
- sym::exp2f32 => ("exp2f", 1, fx.tcx.types.f32),
- sym::exp2f64 => ("exp2", 1, fx.tcx.types.f64),
- sym::sqrtf32 => ("sqrtf", 1, fx.tcx.types.f32),
- sym::sqrtf64 => ("sqrt", 1, fx.tcx.types.f64),
- sym::powif32 => ("__powisf2", 2, fx.tcx.types.f32), // compiler-builtins
- sym::powif64 => ("__powidf2", 2, fx.tcx.types.f64), // compiler-builtins
- sym::powf32 => ("powf", 2, fx.tcx.types.f32),
- sym::powf64 => ("pow", 2, fx.tcx.types.f64),
- sym::logf32 => ("logf", 1, fx.tcx.types.f32),
- sym::logf64 => ("log", 1, fx.tcx.types.f64),
- sym::log2f32 => ("log2f", 1, fx.tcx.types.f32),
- sym::log2f64 => ("log2", 1, fx.tcx.types.f64),
- sym::log10f32 => ("log10f", 1, fx.tcx.types.f32),
- sym::log10f64 => ("log10", 1, fx.tcx.types.f64),
- sym::fabsf32 => ("fabsf", 1, fx.tcx.types.f32),
- sym::fabsf64 => ("fabs", 1, fx.tcx.types.f64),
- sym::fmaf32 => ("fmaf", 3, fx.tcx.types.f32),
- sym::fmaf64 => ("fma", 3, fx.tcx.types.f64),
- sym::copysignf32 => ("copysignf", 2, fx.tcx.types.f32),
- sym::copysignf64 => ("copysign", 2, fx.tcx.types.f64),
- sym::floorf32 => ("floorf", 1, fx.tcx.types.f32),
- sym::floorf64 => ("floor", 1, fx.tcx.types.f64),
- sym::ceilf32 => ("ceilf", 1, fx.tcx.types.f32),
- sym::ceilf64 => ("ceil", 1, fx.tcx.types.f64),
- sym::truncf32 => ("truncf", 1, fx.tcx.types.f32),
- sym::truncf64 => ("trunc", 1, fx.tcx.types.f64),
- sym::roundf32 => ("roundf", 1, fx.tcx.types.f32),
- sym::roundf64 => ("round", 1, fx.tcx.types.f64),
- sym::sinf32 => ("sinf", 1, fx.tcx.types.f32),
- sym::sinf64 => ("sin", 1, fx.tcx.types.f64),
- sym::cosf32 => ("cosf", 1, fx.tcx.types.f32),
- sym::cosf64 => ("cos", 1, fx.tcx.types.f64),
+ let (name, arg_count, ty, clif_ty) = match intrinsic {
+ sym::expf32 => ("expf", 1, fx.tcx.types.f32, types::F32),
+ sym::expf64 => ("exp", 1, fx.tcx.types.f64, types::F64),
+ sym::exp2f32 => ("exp2f", 1, fx.tcx.types.f32, types::F32),
+ sym::exp2f64 => ("exp2", 1, fx.tcx.types.f64, types::F64),
+ sym::sqrtf32 => ("sqrtf", 1, fx.tcx.types.f32, types::F32),
+ sym::sqrtf64 => ("sqrt", 1, fx.tcx.types.f64, types::F64),
+ sym::powif32 => ("__powisf2", 2, fx.tcx.types.f32, types::F32), // compiler-builtins
+ sym::powif64 => ("__powidf2", 2, fx.tcx.types.f64, types::F64), // compiler-builtins
+ sym::powf32 => ("powf", 2, fx.tcx.types.f32, types::F32),
+ sym::powf64 => ("pow", 2, fx.tcx.types.f64, types::F64),
+ sym::logf32 => ("logf", 1, fx.tcx.types.f32, types::F32),
+ sym::logf64 => ("log", 1, fx.tcx.types.f64, types::F64),
+ sym::log2f32 => ("log2f", 1, fx.tcx.types.f32, types::F32),
+ sym::log2f64 => ("log2", 1, fx.tcx.types.f64, types::F64),
+ sym::log10f32 => ("log10f", 1, fx.tcx.types.f32, types::F32),
+ sym::log10f64 => ("log10", 1, fx.tcx.types.f64, types::F64),
+ sym::fabsf32 => ("fabsf", 1, fx.tcx.types.f32, types::F32),
+ sym::fabsf64 => ("fabs", 1, fx.tcx.types.f64, types::F64),
+ sym::fmaf32 => ("fmaf", 3, fx.tcx.types.f32, types::F32),
+ sym::fmaf64 => ("fma", 3, fx.tcx.types.f64, types::F64),
+ sym::copysignf32 => ("copysignf", 2, fx.tcx.types.f32, types::F32),
+ sym::copysignf64 => ("copysign", 2, fx.tcx.types.f64, types::F64),
+ sym::floorf32 => ("floorf", 1, fx.tcx.types.f32, types::F32),
+ sym::floorf64 => ("floor", 1, fx.tcx.types.f64, types::F64),
+ sym::ceilf32 => ("ceilf", 1, fx.tcx.types.f32, types::F32),
+ sym::ceilf64 => ("ceil", 1, fx.tcx.types.f64, types::F64),
+ sym::truncf32 => ("truncf", 1, fx.tcx.types.f32, types::F32),
+ sym::truncf64 => ("trunc", 1, fx.tcx.types.f64, types::F64),
+ sym::rintf32 => ("rintf", 1, fx.tcx.types.f32, types::F32),
+ sym::rintf64 => ("rint", 1, fx.tcx.types.f64, types::F64),
+ sym::roundf32 => ("roundf", 1, fx.tcx.types.f32, types::F32),
+ sym::roundf64 => ("round", 1, fx.tcx.types.f64, types::F64),
+ sym::roundevenf32 => ("roundevenf", 1, fx.tcx.types.f32, types::F32),
+ sym::roundevenf64 => ("roundeven", 1, fx.tcx.types.f64, types::F64),
+ sym::sinf32 => ("sinf", 1, fx.tcx.types.f32, types::F32),
+ sym::sinf64 => ("sin", 1, fx.tcx.types.f64, types::F64),
+ sym::cosf32 => ("cosf", 1, fx.tcx.types.f32, types::F32),
+ sym::cosf64 => ("cos", 1, fx.tcx.types.f64, types::F64),
_ => return false,
};
@@ -297,15 +301,19 @@ fn codegen_float_intrinsic_call<'tcx>(
let (a, b, c);
let args = match args {
[x] => {
- a = [codegen_operand(fx, x)];
+ a = [codegen_operand(fx, x).load_scalar(fx)];
&a as &[_]
}
[x, y] => {
- b = [codegen_operand(fx, x), codegen_operand(fx, y)];
+ b = [codegen_operand(fx, x).load_scalar(fx), codegen_operand(fx, y).load_scalar(fx)];
&b
}
[x, y, z] => {
- c = [codegen_operand(fx, x), codegen_operand(fx, y), codegen_operand(fx, z)];
+ c = [
+ codegen_operand(fx, x).load_scalar(fx),
+ codegen_operand(fx, y).load_scalar(fx),
+ codegen_operand(fx, z).load_scalar(fx),
+ ];
&c
}
_ => unreachable!(),
@@ -314,15 +322,10 @@ fn codegen_float_intrinsic_call<'tcx>(
let layout = fx.layout_of(ty);
let res = match intrinsic {
sym::fmaf32 | sym::fmaf64 => {
- let a = args[0].load_scalar(fx);
- let b = args[1].load_scalar(fx);
- let c = args[2].load_scalar(fx);
- CValue::by_val(fx.bcx.ins().fma(a, b, c), layout)
+ CValue::by_val(fx.bcx.ins().fma(args[0], args[1], args[2]), layout)
}
sym::copysignf32 | sym::copysignf64 => {
- let a = args[0].load_scalar(fx);
- let b = args[1].load_scalar(fx);
- CValue::by_val(fx.bcx.ins().fcopysign(a, b), layout)
+ CValue::by_val(fx.bcx.ins().fcopysign(args[0], args[1]), layout)
}
sym::fabsf32
| sym::fabsf64
@@ -332,21 +335,29 @@ fn codegen_float_intrinsic_call<'tcx>(
| sym::ceilf64
| sym::truncf32
| sym::truncf64 => {
- let a = args[0].load_scalar(fx);
-
let val = match intrinsic {
- sym::fabsf32 | sym::fabsf64 => fx.bcx.ins().fabs(a),
- sym::floorf32 | sym::floorf64 => fx.bcx.ins().floor(a),
- sym::ceilf32 | sym::ceilf64 => fx.bcx.ins().ceil(a),
- sym::truncf32 | sym::truncf64 => fx.bcx.ins().trunc(a),
+ sym::fabsf32 | sym::fabsf64 => fx.bcx.ins().fabs(args[0]),
+ sym::floorf32 | sym::floorf64 => fx.bcx.ins().floor(args[0]),
+ sym::ceilf32 | sym::ceilf64 => fx.bcx.ins().ceil(args[0]),
+ sym::truncf32 | sym::truncf64 => fx.bcx.ins().trunc(args[0]),
_ => unreachable!(),
};
CValue::by_val(val, layout)
}
+
// These intrinsics aren't supported natively by Cranelift.
// Lower them to a libcall.
- _ => fx.easy_call(name, &args, ty),
+ sym::powif32 | sym::powif64 => {
+ let input_tys: Vec<_> = vec![AbiParam::new(clif_ty), AbiParam::new(types::I32)];
+ let ret_val = fx.lib_call(name, input_tys, vec![AbiParam::new(clif_ty)], &args)[0];
+ CValue::by_val(ret_val, fx.layout_of(ty))
+ }
+ _ => {
+ let input_tys: Vec<_> = args.iter().map(|_| AbiParam::new(clif_ty)).collect();
+ let ret_val = fx.lib_call(name, input_tys, vec![AbiParam::new(clif_ty)], &args)[0];
+ CValue::by_val(ret_val, fx.layout_of(ty))
+ }
};
ret.write_cvalue(fx, res);
@@ -381,7 +392,7 @@ fn codegen_regular_intrinsic_call<'tcx>(
fx.bcx.ins().debugtrap();
}
- sym::copy | sym::copy_nonoverlapping => {
+ sym::copy => {
intrinsic_args!(fx, args => (src, dst, count); intrinsic);
let src = src.load_scalar(fx);
let dst = dst.load_scalar(fx);
@@ -393,13 +404,8 @@ fn codegen_regular_intrinsic_call<'tcx>(
let byte_amount =
if elem_size != 1 { fx.bcx.ins().imul_imm(count, elem_size as i64) } else { count };
- if intrinsic == sym::copy_nonoverlapping {
- // FIXME emit_small_memcpy
- fx.bcx.call_memcpy(fx.target_config, dst, src, byte_amount);
- } else {
- // FIXME emit_small_memmove
- fx.bcx.call_memmove(fx.target_config, dst, src, byte_amount);
- }
+ // FIXME emit_small_memmove
+ fx.bcx.call_memmove(fx.target_config, dst, src, byte_amount);
}
sym::volatile_copy_memory | sym::volatile_copy_nonoverlapping_memory => {
// NOTE: the volatile variants have src and dst swapped
@@ -551,16 +557,6 @@ fn codegen_regular_intrinsic_call<'tcx>(
fx.bcx.ins().band(ptr, mask);
}
- sym::transmute => {
- intrinsic_args!(fx, args => (from); intrinsic);
-
- if ret.layout().abi.is_uninhabited() {
- crate::base::codegen_panic(fx, "Transmuting to uninhabited type.", source_info);
- return;
- }
-
- ret.write_cvalue_transmute(fx, from);
- }
sym::write_bytes | sym::volatile_set_memory => {
intrinsic_args!(fx, args => (dst, val, count); intrinsic);
let val = val.load_scalar(fx);
@@ -639,26 +635,25 @@ fn codegen_regular_intrinsic_call<'tcx>(
if do_panic {
let layout = fx.layout_of(ty);
-
- with_no_trimmed_paths!({
- crate::base::codegen_panic_nounwind(
- fx,
- &if layout.abi.is_uninhabited() {
- format!("attempted to instantiate uninhabited type `{}`", layout.ty)
- } else if requirement == ValidityRequirement::Zero {
+ let msg_str = with_no_visible_paths!({
+ with_no_trimmed_paths!({
+ if layout.abi.is_uninhabited() {
+ // Use this error even for the other intrinsics as it is more precise.
+ format!("attempted to instantiate uninhabited type `{}`", ty)
+ } else if intrinsic == sym::assert_zero_valid {
format!(
"attempted to zero-initialize type `{}`, which is invalid",
- layout.ty
+ ty
)
} else {
format!(
"attempted to leave type `{}` uninitialized, which is invalid",
- layout.ty
+ ty
)
- },
- source_info,
- )
+ }
+ })
});
+ crate::base::codegen_panic_nounwind(fx, &msg_str, source_info);
return;
}
}
diff --git a/compiler/rustc_codegen_cranelift/src/intrinsics/simd.rs b/compiler/rustc_codegen_cranelift/src/intrinsics/simd.rs
index a1d63acfb..6f54a8d49 100644
--- a/compiler/rustc_codegen_cranelift/src/intrinsics/simd.rs
+++ b/compiler/rustc_codegen_cranelift/src/intrinsics/simd.rs
@@ -253,7 +253,7 @@ pub(super) fn codegen_simd_intrinsic_call<'tcx>(
}
ret.write_cvalue(fx, base);
- let ret_lane = ret.place_field(fx, mir::Field::new(idx.try_into().unwrap()));
+ let ret_lane = ret.place_field(fx, FieldIdx::new(idx.try_into().unwrap()));
ret_lane.write_cvalue(fx, val);
}
@@ -279,9 +279,8 @@ pub(super) fn codegen_simd_intrinsic_call<'tcx>(
fx.tcx.sess.span_warn(span, "Index argument for `simd_extract` is not a constant");
let trap_block = fx.bcx.create_block();
let true_ = fx.bcx.ins().iconst(types::I8, 1);
- fx.bcx.ins().brnz(true_, trap_block, &[]);
let ret_block = fx.get_block(target);
- fx.bcx.ins().jump(ret_block, &[]);
+ fx.bcx.ins().brif(true_, trap_block, &[], ret_block, &[]);
fx.bcx.switch_to_block(trap_block);
crate::trap::trap_unimplemented(
fx,
@@ -825,8 +824,7 @@ pub(super) fn codegen_simd_intrinsic_call<'tcx>(
let next = fx.bcx.create_block();
let res_lane = fx.bcx.append_block_param(next, lane_clif_ty);
- fx.bcx.ins().brnz(mask_lane, if_enabled, &[]);
- fx.bcx.ins().jump(if_disabled, &[]);
+ fx.bcx.ins().brif(mask_lane, if_enabled, &[], if_disabled, &[]);
fx.bcx.seal_block(if_enabled);
fx.bcx.seal_block(if_disabled);
@@ -864,8 +862,7 @@ pub(super) fn codegen_simd_intrinsic_call<'tcx>(
let if_enabled = fx.bcx.create_block();
let next = fx.bcx.create_block();
- fx.bcx.ins().brnz(mask_lane, if_enabled, &[]);
- fx.bcx.ins().jump(next, &[]);
+ fx.bcx.ins().brif(mask_lane, if_enabled, &[], next, &[]);
fx.bcx.seal_block(if_enabled);
fx.bcx.switch_to_block(if_enabled);
diff --git a/compiler/rustc_codegen_cranelift/src/lib.rs b/compiler/rustc_codegen_cranelift/src/lib.rs
index 80ce3dc93..8cc7f6c34 100644
--- a/compiler/rustc_codegen_cranelift/src/lib.rs
+++ b/compiler/rustc_codegen_cranelift/src/lib.rs
@@ -57,8 +57,6 @@ mod compiler_builtins;
mod concurrency_limiter;
mod config;
mod constant;
-// FIXME revert back to the external crate with Cranelift 0.93
-mod cranelift_native;
mod debuginfo;
mod discriminant;
mod driver;
@@ -88,7 +86,7 @@ mod prelude {
self, FloatTy, Instance, InstanceDef, IntTy, ParamEnv, Ty, TyCtxt, TypeAndMut,
TypeFoldable, TypeVisitableExt, UintTy,
};
- pub(crate) use rustc_target::abi::{Abi, Scalar, Size, VariantIdx};
+ pub(crate) use rustc_target::abi::{Abi, FieldIdx, Scalar, Size, VariantIdx, FIRST_VARIANT};
pub(crate) use rustc_data_structures::fx::FxHashMap;
@@ -251,7 +249,7 @@ fn target_triple(sess: &Session) -> target_lexicon::Triple {
}
}
-fn build_isa(sess: &Session, backend_config: &BackendConfig) -> Box<dyn isa::TargetIsa + 'static> {
+fn build_isa(sess: &Session, backend_config: &BackendConfig) -> Arc<dyn isa::TargetIsa + 'static> {
use target_lexicon::BinaryFormat;
let target_triple = crate::target_triple(sess);
@@ -285,14 +283,17 @@ fn build_isa(sess: &Session, backend_config: &BackendConfig) -> Box<dyn isa::Tar
}
}
- if let target_lexicon::Architecture::Aarch64(_) | target_lexicon::Architecture::X86_64 =
- target_triple.architecture
+ if let target_lexicon::Architecture::Aarch64(_)
+ | target_lexicon::Architecture::Riscv64(_)
+ | target_lexicon::Architecture::X86_64 = target_triple.architecture
{
- // Windows depends on stack probes to grow the committed part of the stack
+ // Windows depends on stack probes to grow the committed part of the stack.
+ // On other platforms it helps prevents stack smashing.
flags_builder.enable("enable_probestack").unwrap();
flags_builder.set("probestack_strategy", "inline").unwrap();
} else {
- // __cranelift_probestack is not provided and inline stack probes are only supported on AArch64 and x86_64
+ // __cranelift_probestack is not provided and inline stack probes are only supported on
+ // AArch64, Riscv64 and x86_64.
flags_builder.set("enable_probestack", "false").unwrap();
}
diff --git a/compiler/rustc_codegen_cranelift/src/main_shim.rs b/compiler/rustc_codegen_cranelift/src/main_shim.rs
index be908df83..205411e8c 100644
--- a/compiler/rustc_codegen_cranelift/src/main_shim.rs
+++ b/compiler/rustc_codegen_cranelift/src/main_shim.rs
@@ -28,7 +28,7 @@ pub(crate) fn maybe_create_entry_wrapper(
if main_def_id.is_local() {
let instance = Instance::mono(tcx, main_def_id).polymorphize(tcx);
- if !is_jit && module.get_name(&*tcx.symbol_name(instance).name).is_none() {
+ if !is_jit && module.get_name(tcx.symbol_name(instance).name).is_none() {
return;
}
} else if !is_primary_cgu {
diff --git a/compiler/rustc_codegen_cranelift/src/num.rs b/compiler/rustc_codegen_cranelift/src/num.rs
index c058ece96..1357b7be1 100644
--- a/compiler/rustc_codegen_cranelift/src/num.rs
+++ b/compiler/rustc_codegen_cranelift/src/num.rs
@@ -170,14 +170,6 @@ pub(crate) fn codegen_checked_int_binop<'tcx>(
in_lhs: CValue<'tcx>,
in_rhs: CValue<'tcx>,
) -> CValue<'tcx> {
- if bin_op != BinOp::Shl && bin_op != BinOp::Shr {
- assert_eq!(
- in_lhs.layout().ty,
- in_rhs.layout().ty,
- "checked int binop requires lhs and rhs of same type"
- );
- }
-
let lhs = in_lhs.load_scalar(fx);
let rhs = in_rhs.load_scalar(fx);
@@ -271,21 +263,6 @@ pub(crate) fn codegen_checked_int_binop<'tcx>(
_ => unreachable!("invalid non-integer type {}", ty),
}
}
- BinOp::Shl => {
- let val = fx.bcx.ins().ishl(lhs, rhs);
- let ty = fx.bcx.func.dfg.value_type(val);
- let max_shift = i64::from(ty.bits()) - 1;
- let has_overflow = fx.bcx.ins().icmp_imm(IntCC::UnsignedGreaterThan, rhs, max_shift);
- (val, has_overflow)
- }
- BinOp::Shr => {
- let val =
- if !signed { fx.bcx.ins().ushr(lhs, rhs) } else { fx.bcx.ins().sshr(lhs, rhs) };
- let ty = fx.bcx.func.dfg.value_type(val);
- let max_shift = i64::from(ty.bits()) - 1;
- let has_overflow = fx.bcx.ins().icmp_imm(IntCC::UnsignedGreaterThan, rhs, max_shift);
- (val, has_overflow)
- }
_ => bug!("binop {:?} on checked int/uint lhs: {:?} rhs: {:?}", bin_op, in_lhs, in_rhs),
};
@@ -347,12 +324,20 @@ pub(crate) fn codegen_float_binop<'tcx>(
BinOp::Mul => b.fmul(lhs, rhs),
BinOp::Div => b.fdiv(lhs, rhs),
BinOp::Rem => {
- let name = match in_lhs.layout().ty.kind() {
- ty::Float(FloatTy::F32) => "fmodf",
- ty::Float(FloatTy::F64) => "fmod",
+ let (name, ty) = match in_lhs.layout().ty.kind() {
+ ty::Float(FloatTy::F32) => ("fmodf", types::F32),
+ ty::Float(FloatTy::F64) => ("fmod", types::F64),
_ => bug!(),
};
- return fx.easy_call(name, &[in_lhs, in_rhs], in_lhs.layout().ty);
+
+ let ret_val = fx.lib_call(
+ name,
+ vec![AbiParam::new(ty), AbiParam::new(ty)],
+ vec![AbiParam::new(ty)],
+ &[lhs, rhs],
+ )[0];
+
+ return CValue::by_val(ret_val, in_lhs.layout());
}
BinOp::Eq | BinOp::Lt | BinOp::Le | BinOp::Ne | BinOp::Ge | BinOp::Gt => {
let fltcc = match bin_op {
diff --git a/compiler/rustc_codegen_cranelift/src/pointer.rs b/compiler/rustc_codegen_cranelift/src/pointer.rs
index 31d827f83..b60e56720 100644
--- a/compiler/rustc_codegen_cranelift/src/pointer.rs
+++ b/compiler/rustc_codegen_cranelift/src/pointer.rs
@@ -30,11 +30,6 @@ impl Pointer {
Pointer { base: PointerBase::Stack(stack_slot), offset: Offset32::new(0) }
}
- pub(crate) fn const_addr(fx: &mut FunctionCx<'_, '_, '_>, addr: i64) -> Self {
- let addr = fx.bcx.ins().iconst(fx.pointer_type, addr);
- Pointer { base: PointerBase::Addr(addr), offset: Offset32::new(0) }
- }
-
pub(crate) fn dangling(align: Align) -> Self {
Pointer { base: PointerBase::Dangling(align), offset: Offset32::new(0) }
}
diff --git a/compiler/rustc_codegen_cranelift/src/pretty_clif.rs b/compiler/rustc_codegen_cranelift/src/pretty_clif.rs
index a7af16268..e0a081c9d 100644
--- a/compiler/rustc_codegen_cranelift/src/pretty_clif.rs
+++ b/compiler/rustc_codegen_cranelift/src/pretty_clif.rs
@@ -245,7 +245,7 @@ pub(crate) fn write_clif_file(
for flag in isa.flags().iter() {
writeln!(file, "set {}", flag)?;
}
- write!(file, "target {}", isa.triple().architecture.to_string())?;
+ write!(file, "target {}", isa.triple().architecture)?;
for isa_flag in isa.isa_flags().iter() {
write!(file, " {}", isa_flag)?;
}
diff --git a/compiler/rustc_codegen_cranelift/src/unsize.rs b/compiler/rustc_codegen_cranelift/src/unsize.rs
index a0745582d..ff0e12410 100644
--- a/compiler/rustc_codegen_cranelift/src/unsize.rs
+++ b/compiler/rustc_codegen_cranelift/src/unsize.rs
@@ -28,9 +28,7 @@ pub(crate) fn unsized_info<'tcx>(
(
&ty::Dynamic(ref data_a, _, src_dyn_kind),
&ty::Dynamic(ref data_b, _, target_dyn_kind),
- ) => {
- assert_eq!(src_dyn_kind, target_dyn_kind);
-
+ ) if src_dyn_kind == target_dyn_kind => {
let old_info =
old_info.expect("unsized_info: missing old info for trait upcasting coercion");
if data_a.principal_def_id() == data_b.principal_def_id() {
@@ -55,7 +53,7 @@ pub(crate) fn unsized_info<'tcx>(
old_info
}
}
- (_, &ty::Dynamic(ref data, ..)) => crate::vtable::get_vtable(fx, source, data.principal()),
+ (_, ty::Dynamic(data, ..)) => crate::vtable::get_vtable(fx, source, data.principal()),
_ => bug!("unsized_info: invalid unsizing {:?} -> {:?}", source, target),
}
}
@@ -148,9 +146,9 @@ pub(crate) fn coerce_unsized_into<'tcx>(
(&ty::Adt(def_a, _), &ty::Adt(def_b, _)) => {
assert_eq!(def_a, def_b);
- for i in 0..def_a.variant(VariantIdx::new(0)).fields.len() {
- let src_f = src.value_field(fx, mir::Field::new(i));
- let dst_f = dst.place_field(fx, mir::Field::new(i));
+ for i in 0..def_a.variant(FIRST_VARIANT).fields.len() {
+ let src_f = src.value_field(fx, FieldIdx::new(i));
+ let dst_f = dst.place_field(fx, FieldIdx::new(i));
if dst_f.layout().is_zst() {
continue;
diff --git a/compiler/rustc_codegen_cranelift/src/value_and_place.rs b/compiler/rustc_codegen_cranelift/src/value_and_place.rs
index cc1edaa97..1b69862ce 100644
--- a/compiler/rustc_codegen_cranelift/src/value_and_place.rs
+++ b/compiler/rustc_codegen_cranelift/src/value_and_place.rs
@@ -3,13 +3,14 @@
use crate::prelude::*;
use cranelift_codegen::ir::immediates::Offset32;
+use cranelift_codegen::ir::{InstructionData, Opcode};
fn codegen_field<'tcx>(
fx: &mut FunctionCx<'_, '_, 'tcx>,
base: Pointer,
extra: Option<Value>,
layout: TyAndLayout<'tcx>,
- field: mir::Field,
+ field: FieldIdx,
) -> (Pointer, TyAndLayout<'tcx>) {
let field_offset = layout.fields.offset(field.index());
let field_layout = layout.field(&*fx, field.index());
@@ -209,7 +210,7 @@ impl<'tcx> CValue<'tcx> {
pub(crate) fn value_field(
self,
fx: &mut FunctionCx<'_, '_, 'tcx>,
- field: mir::Field,
+ field: FieldIdx,
) -> CValue<'tcx> {
let layout = self.1;
match self.0 {
@@ -457,6 +458,7 @@ impl<'tcx> CPlace<'tcx> {
}
}
+ #[track_caller]
pub(crate) fn to_ptr(self) -> Pointer {
match self.to_ptr_maybe_unsized() {
(ptr, None) => ptr,
@@ -464,6 +466,7 @@ impl<'tcx> CPlace<'tcx> {
}
}
+ #[track_caller]
pub(crate) fn to_ptr_maybe_unsized(self) -> (Pointer, Option<Value>) {
match self.inner {
CPlaceInner::Addr(ptr, extra) => (ptr, extra),
@@ -684,7 +687,7 @@ impl<'tcx> CPlace<'tcx> {
pub(crate) fn place_field(
self,
fx: &mut FunctionCx<'_, '_, 'tcx>,
- field: mir::Field,
+ field: FieldIdx,
) -> CPlace<'tcx> {
let layout = self.layout();
@@ -698,7 +701,8 @@ impl<'tcx> CPlace<'tcx> {
};
}
ty::Adt(adt_def, substs) if layout.ty.is_simd() => {
- let f0_ty = adt_def.non_enum_variant().fields[0].ty(fx.tcx, substs);
+ let f0 = &adt_def.non_enum_variant().fields[FieldIdx::from_u32(0)];
+ let f0_ty = f0.ty(fx.tcx, substs);
match f0_ty.kind() {
ty::Array(_, _) => {
@@ -787,7 +791,36 @@ impl<'tcx> CPlace<'tcx> {
index: Value,
) -> CPlace<'tcx> {
let (elem_layout, ptr) = match self.layout().ty.kind() {
- ty::Array(elem_ty, _) => (fx.layout_of(*elem_ty), self.to_ptr()),
+ ty::Array(elem_ty, _) => {
+ let elem_layout = fx.layout_of(*elem_ty);
+ match self.inner {
+ CPlaceInner::Var(local, var) => {
+ // This is a hack to handle `vector_val.0[1]`. It doesn't allow dynamic
+ // indexing.
+ let lane_idx = match fx.bcx.func.dfg.insts
+ [fx.bcx.func.dfg.value_def(index).unwrap_inst()]
+ {
+ InstructionData::UnaryImm { opcode: Opcode::Iconst, imm } => imm,
+ _ => bug!(
+ "Dynamic indexing into a vector type is not supported: {self:?}[{index}]"
+ ),
+ };
+ return CPlace {
+ inner: CPlaceInner::VarLane(
+ local,
+ var,
+ lane_idx.bits().try_into().unwrap(),
+ ),
+ layout: elem_layout,
+ };
+ }
+ CPlaceInner::Addr(addr, None) => (elem_layout, addr),
+ CPlaceInner::Addr(_, Some(_))
+ | CPlaceInner::VarPair(_, _, _)
+ | CPlaceInner::VarLane(_, _, _) => bug!("Can't index into {self:?}"),
+ }
+ // FIXME use VarLane in case of Var with simd type
+ }
ty::Slice(elem_ty) => (fx.layout_of(*elem_ty), self.to_ptr_maybe_unsized().0),
_ => bug!("place_index({:?})", self.layout().ty),
};
diff --git a/compiler/rustc_codegen_cranelift/src/vtable.rs b/compiler/rustc_codegen_cranelift/src/vtable.rs
index f04fb82de..b309695c1 100644
--- a/compiler/rustc_codegen_cranelift/src/vtable.rs
+++ b/compiler/rustc_codegen_cranelift/src/vtable.rs
@@ -43,17 +43,34 @@ pub(crate) fn min_align_of_obj(fx: &mut FunctionCx<'_, '_, '_>, vtable: Value) -
pub(crate) fn get_ptr_and_method_ref<'tcx>(
fx: &mut FunctionCx<'_, '_, 'tcx>,
- arg: CValue<'tcx>,
+ mut arg: CValue<'tcx>,
idx: usize,
) -> (Pointer, Value) {
let (ptr, vtable) = 'block: {
+ if let Abi::Scalar(_) = arg.layout().abi {
+ 'descend_newtypes: while !arg.layout().ty.is_unsafe_ptr() && !arg.layout().ty.is_ref() {
+ for i in 0..arg.layout().fields.count() {
+ let field = arg.value_field(fx, FieldIdx::new(i));
+ if !field.layout().is_zst() {
+ // we found the one non-zero-sized field that is allowed
+ // now find *its* non-zero-sized field, or stop if it's a
+ // pointer
+ arg = field;
+ continue 'descend_newtypes;
+ }
+ }
+
+ bug!("receiver has no non-zero-sized fields {:?}", arg);
+ }
+ }
+
if let ty::Ref(_, ty, _) = arg.layout().ty.kind() {
if ty.is_dyn_star() {
let inner_layout = fx.layout_of(arg.layout().ty.builtin_deref(true).unwrap().ty);
let dyn_star = CPlace::for_ptr(Pointer::new(arg.load_scalar(fx)), inner_layout);
- let ptr = dyn_star.place_field(fx, mir::Field::new(0)).to_ptr();
+ let ptr = dyn_star.place_field(fx, FieldIdx::new(0)).to_ptr();
let vtable =
- dyn_star.place_field(fx, mir::Field::new(1)).to_cvalue(fx).load_scalar(fx);
+ dyn_star.place_field(fx, FieldIdx::new(1)).to_cvalue(fx).load_scalar(fx);
break 'block (ptr, vtable);
}
}