summaryrefslogtreecommitdiffstats
path: root/compiler/rustc_codegen_cranelift/src/intrinsics
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-30 03:59:35 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-30 03:59:35 +0000
commitd1b2d29528b7794b41e66fc2136e395a02f8529b (patch)
treea4a17504b260206dec3cf55b2dca82929a348ac2 /compiler/rustc_codegen_cranelift/src/intrinsics
parentReleasing progress-linux version 1.72.1+dfsg1-1~progress7.99u1. (diff)
downloadrustc-d1b2d29528b7794b41e66fc2136e395a02f8529b.tar.xz
rustc-d1b2d29528b7794b41e66fc2136e395a02f8529b.zip
Merging upstream version 1.73.0+dfsg1.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'compiler/rustc_codegen_cranelift/src/intrinsics')
-rw-r--r--compiler/rustc_codegen_cranelift/src/intrinsics/llvm.rs20
-rw-r--r--compiler/rustc_codegen_cranelift/src/intrinsics/llvm_aarch64.rs4
-rw-r--r--compiler/rustc_codegen_cranelift/src/intrinsics/llvm_x86.rs278
-rw-r--r--compiler/rustc_codegen_cranelift/src/intrinsics/mod.rs45
-rw-r--r--compiler/rustc_codegen_cranelift/src/intrinsics/simd.rs58
5 files changed, 316 insertions, 89 deletions
diff --git a/compiler/rustc_codegen_cranelift/src/intrinsics/llvm.rs b/compiler/rustc_codegen_cranelift/src/intrinsics/llvm.rs
index f67fdb592..63b5402f2 100644
--- a/compiler/rustc_codegen_cranelift/src/intrinsics/llvm.rs
+++ b/compiler/rustc_codegen_cranelift/src/intrinsics/llvm.rs
@@ -3,23 +3,35 @@
use crate::intrinsics::*;
use crate::prelude::*;
-use rustc_middle::ty::subst::SubstsRef;
+use rustc_middle::ty::GenericArgsRef;
pub(crate) fn codegen_llvm_intrinsic_call<'tcx>(
fx: &mut FunctionCx<'_, '_, 'tcx>,
intrinsic: &str,
- substs: SubstsRef<'tcx>,
+ generic_args: GenericArgsRef<'tcx>,
args: &[mir::Operand<'tcx>],
ret: CPlace<'tcx>,
target: Option<BasicBlock>,
) {
if intrinsic.starts_with("llvm.aarch64") {
return llvm_aarch64::codegen_aarch64_llvm_intrinsic_call(
- fx, intrinsic, substs, args, ret, target,
+ fx,
+ intrinsic,
+ generic_args,
+ args,
+ ret,
+ target,
);
}
if intrinsic.starts_with("llvm.x86") {
- return llvm_x86::codegen_x86_llvm_intrinsic_call(fx, intrinsic, substs, args, ret, target);
+ return llvm_x86::codegen_x86_llvm_intrinsic_call(
+ fx,
+ intrinsic,
+ generic_args,
+ args,
+ ret,
+ target,
+ );
}
match intrinsic {
diff --git a/compiler/rustc_codegen_cranelift/src/intrinsics/llvm_aarch64.rs b/compiler/rustc_codegen_cranelift/src/intrinsics/llvm_aarch64.rs
index 33b2f4702..c20a99159 100644
--- a/compiler/rustc_codegen_cranelift/src/intrinsics/llvm_aarch64.rs
+++ b/compiler/rustc_codegen_cranelift/src/intrinsics/llvm_aarch64.rs
@@ -3,12 +3,12 @@
use crate::intrinsics::*;
use crate::prelude::*;
-use rustc_middle::ty::subst::SubstsRef;
+use rustc_middle::ty::GenericArgsRef;
pub(crate) fn codegen_aarch64_llvm_intrinsic_call<'tcx>(
fx: &mut FunctionCx<'_, '_, 'tcx>,
intrinsic: &str,
- _substs: SubstsRef<'tcx>,
+ _args: GenericArgsRef<'tcx>,
args: &[mir::Operand<'tcx>],
ret: CPlace<'tcx>,
target: Option<BasicBlock>,
diff --git a/compiler/rustc_codegen_cranelift/src/intrinsics/llvm_x86.rs b/compiler/rustc_codegen_cranelift/src/intrinsics/llvm_x86.rs
index 24ad0083a..fdd27a454 100644
--- a/compiler/rustc_codegen_cranelift/src/intrinsics/llvm_x86.rs
+++ b/compiler/rustc_codegen_cranelift/src/intrinsics/llvm_x86.rs
@@ -3,12 +3,12 @@
use crate::intrinsics::*;
use crate::prelude::*;
-use rustc_middle::ty::subst::SubstsRef;
+use rustc_middle::ty::GenericArgsRef;
pub(crate) fn codegen_x86_llvm_intrinsic_call<'tcx>(
fx: &mut FunctionCx<'_, '_, 'tcx>,
intrinsic: &str,
- _substs: SubstsRef<'tcx>,
+ _args: GenericArgsRef<'tcx>,
args: &[mir::Operand<'tcx>],
ret: CPlace<'tcx>,
target: Option<BasicBlock>,
@@ -18,6 +18,20 @@ pub(crate) fn codegen_x86_llvm_intrinsic_call<'tcx>(
// Spin loop hint
}
+ // Used by is_x86_feature_detected!();
+ "llvm.x86.xgetbv" => {
+ // FIXME use the actual xgetbv instruction
+ intrinsic_args!(fx, args => (v); intrinsic);
+
+ let v = v.load_scalar(fx);
+
+ // As of writing on XCR0 exists
+ fx.bcx.ins().trapnz(v, TrapCode::UnreachableCodeReached);
+
+ let res = fx.bcx.ins().iconst(types::I64, 1 /* bit 0 must be set */);
+ ret.write_cvalue(fx, CValue::by_val(res, fx.layout_of(fx.tcx.types.i64)));
+ }
+
// Used by `_mm_movemask_epi8` and `_mm256_movemask_epi8`
"llvm.x86.sse2.pmovmskb.128"
| "llvm.x86.avx2.pmovmskb"
@@ -53,7 +67,7 @@ pub(crate) fn codegen_x86_llvm_intrinsic_call<'tcx>(
let res = CValue::by_val(res, fx.layout_of(fx.tcx.types.i32));
ret.write_cvalue(fx, res);
}
- "llvm.x86.sse2.cmp.ps" | "llvm.x86.sse2.cmp.pd" => {
+ "llvm.x86.sse.cmp.ps" | "llvm.x86.sse2.cmp.pd" => {
let (x, y, kind) = match args {
[x, y, kind] => (x, y, kind),
_ => bug!("wrong number of args for intrinsic {intrinsic}"),
@@ -66,18 +80,95 @@ pub(crate) fn codegen_x86_llvm_intrinsic_call<'tcx>(
let flt_cc = match kind
.try_to_bits(Size::from_bytes(1))
.unwrap_or_else(|| panic!("kind not scalar: {:?}", kind))
+ .try_into()
+ .unwrap()
{
- 0 => FloatCC::Equal,
- 1 => FloatCC::LessThan,
- 2 => FloatCC::LessThanOrEqual,
- 7 => FloatCC::Ordered,
- 3 => FloatCC::Unordered,
- 4 => FloatCC::NotEqual,
- 5 => FloatCC::UnorderedOrGreaterThanOrEqual,
- 6 => FloatCC::UnorderedOrGreaterThan,
+ _CMP_EQ_OQ | _CMP_EQ_OS => FloatCC::Equal,
+ _CMP_LT_OS | _CMP_LT_OQ => FloatCC::LessThan,
+ _CMP_LE_OS | _CMP_LE_OQ => FloatCC::LessThanOrEqual,
+ _CMP_UNORD_Q | _CMP_UNORD_S => FloatCC::Unordered,
+ _CMP_NEQ_UQ | _CMP_NEQ_US => FloatCC::NotEqual,
+ _CMP_NLT_US | _CMP_NLT_UQ => FloatCC::UnorderedOrGreaterThanOrEqual,
+ _CMP_NLE_US | _CMP_NLE_UQ => FloatCC::UnorderedOrGreaterThan,
+ _CMP_ORD_Q | _CMP_ORD_S => FloatCC::Ordered,
+ _CMP_EQ_UQ | _CMP_EQ_US => FloatCC::UnorderedOrEqual,
+ _CMP_NGE_US | _CMP_NGE_UQ => FloatCC::UnorderedOrLessThan,
+ _CMP_NGT_US | _CMP_NGT_UQ => FloatCC::UnorderedOrLessThanOrEqual,
+ _CMP_FALSE_OQ | _CMP_FALSE_OS => todo!(),
+ _CMP_NEQ_OQ | _CMP_NEQ_OS => FloatCC::OrderedNotEqual,
+ _CMP_GE_OS | _CMP_GE_OQ => FloatCC::GreaterThanOrEqual,
+ _CMP_GT_OS | _CMP_GT_OQ => FloatCC::GreaterThan,
+ _CMP_TRUE_UQ | _CMP_TRUE_US => todo!(),
+
kind => unreachable!("kind {:?}", kind),
};
+ // Copied from stdarch
+ /// Equal (ordered, non-signaling)
+ const _CMP_EQ_OQ: i32 = 0x00;
+ /// Less-than (ordered, signaling)
+ const _CMP_LT_OS: i32 = 0x01;
+ /// Less-than-or-equal (ordered, signaling)
+ const _CMP_LE_OS: i32 = 0x02;
+ /// Unordered (non-signaling)
+ const _CMP_UNORD_Q: i32 = 0x03;
+ /// Not-equal (unordered, non-signaling)
+ const _CMP_NEQ_UQ: i32 = 0x04;
+ /// Not-less-than (unordered, signaling)
+ const _CMP_NLT_US: i32 = 0x05;
+ /// Not-less-than-or-equal (unordered, signaling)
+ const _CMP_NLE_US: i32 = 0x06;
+ /// Ordered (non-signaling)
+ const _CMP_ORD_Q: i32 = 0x07;
+ /// Equal (unordered, non-signaling)
+ const _CMP_EQ_UQ: i32 = 0x08;
+ /// Not-greater-than-or-equal (unordered, signaling)
+ const _CMP_NGE_US: i32 = 0x09;
+ /// Not-greater-than (unordered, signaling)
+ const _CMP_NGT_US: i32 = 0x0a;
+ /// False (ordered, non-signaling)
+ const _CMP_FALSE_OQ: i32 = 0x0b;
+ /// Not-equal (ordered, non-signaling)
+ const _CMP_NEQ_OQ: i32 = 0x0c;
+ /// Greater-than-or-equal (ordered, signaling)
+ const _CMP_GE_OS: i32 = 0x0d;
+ /// Greater-than (ordered, signaling)
+ const _CMP_GT_OS: i32 = 0x0e;
+ /// True (unordered, non-signaling)
+ const _CMP_TRUE_UQ: i32 = 0x0f;
+ /// Equal (ordered, signaling)
+ const _CMP_EQ_OS: i32 = 0x10;
+ /// Less-than (ordered, non-signaling)
+ const _CMP_LT_OQ: i32 = 0x11;
+ /// Less-than-or-equal (ordered, non-signaling)
+ const _CMP_LE_OQ: i32 = 0x12;
+ /// Unordered (signaling)
+ const _CMP_UNORD_S: i32 = 0x13;
+ /// Not-equal (unordered, signaling)
+ const _CMP_NEQ_US: i32 = 0x14;
+ /// Not-less-than (unordered, non-signaling)
+ const _CMP_NLT_UQ: i32 = 0x15;
+ /// Not-less-than-or-equal (unordered, non-signaling)
+ const _CMP_NLE_UQ: i32 = 0x16;
+ /// Ordered (signaling)
+ const _CMP_ORD_S: i32 = 0x17;
+ /// Equal (unordered, signaling)
+ const _CMP_EQ_US: i32 = 0x18;
+ /// Not-greater-than-or-equal (unordered, non-signaling)
+ const _CMP_NGE_UQ: i32 = 0x19;
+ /// Not-greater-than (unordered, non-signaling)
+ const _CMP_NGT_UQ: i32 = 0x1a;
+ /// False (ordered, signaling)
+ const _CMP_FALSE_OS: i32 = 0x1b;
+ /// Not-equal (ordered, signaling)
+ const _CMP_NEQ_OS: i32 = 0x1c;
+ /// Greater-than-or-equal (ordered, non-signaling)
+ const _CMP_GE_OQ: i32 = 0x1d;
+ /// Greater-than (ordered, non-signaling)
+ const _CMP_GT_OQ: i32 = 0x1e;
+ /// True (unordered, signaling)
+ const _CMP_TRUE_US: i32 = 0x1f;
+
simd_pair_for_each_lane(fx, x, y, ret, &|fx, lane_ty, res_lane_ty, x_lane, y_lane| {
let res_lane = match lane_ty.kind() {
ty::Float(_) => fx.bcx.ins().fcmp(flt_cc, x_lane, y_lane),
@@ -103,6 +194,23 @@ pub(crate) fn codegen_x86_llvm_intrinsic_call<'tcx>(
_ => fx.bcx.ins().iconst(types::I32, 0),
});
}
+ "llvm.x86.sse2.psrai.d" => {
+ let (a, imm8) = match args {
+ [a, imm8] => (a, imm8),
+ _ => bug!("wrong number of args for intrinsic {intrinsic}"),
+ };
+ let a = codegen_operand(fx, a);
+ let imm8 = crate::constant::mir_operand_get_const_val(fx, imm8)
+ .expect("llvm.x86.sse2.psrai.d imm8 not const");
+
+ simd_for_each_lane(fx, a, ret, &|fx, _lane_ty, _res_lane_ty, lane| match imm8
+ .try_to_bits(Size::from_bytes(4))
+ .unwrap_or_else(|| panic!("imm8 not scalar: {:?}", imm8))
+ {
+ imm8 if imm8 < 32 => fx.bcx.ins().sshr_imm(lane, i64::from(imm8 as u8)),
+ _ => fx.bcx.ins().iconst(types::I32, 0),
+ });
+ }
"llvm.x86.sse2.pslli.d" => {
let (a, imm8) = match args {
[a, imm8] => (a, imm8),
@@ -137,6 +245,23 @@ pub(crate) fn codegen_x86_llvm_intrinsic_call<'tcx>(
_ => fx.bcx.ins().iconst(types::I32, 0),
});
}
+ "llvm.x86.sse2.psrai.w" => {
+ let (a, imm8) = match args {
+ [a, imm8] => (a, imm8),
+ _ => bug!("wrong number of args for intrinsic {intrinsic}"),
+ };
+ let a = codegen_operand(fx, a);
+ let imm8 = crate::constant::mir_operand_get_const_val(fx, imm8)
+ .expect("llvm.x86.sse2.psrai.d imm8 not const");
+
+ simd_for_each_lane(fx, a, ret, &|fx, _lane_ty, _res_lane_ty, lane| match imm8
+ .try_to_bits(Size::from_bytes(4))
+ .unwrap_or_else(|| panic!("imm8 not scalar: {:?}", imm8))
+ {
+ imm8 if imm8 < 16 => fx.bcx.ins().sshr_imm(lane, i64::from(imm8 as u8)),
+ _ => fx.bcx.ins().iconst(types::I32, 0),
+ });
+ }
"llvm.x86.sse2.pslli.w" => {
let (a, imm8) = match args {
[a, imm8] => (a, imm8),
@@ -171,6 +296,57 @@ pub(crate) fn codegen_x86_llvm_intrinsic_call<'tcx>(
_ => fx.bcx.ins().iconst(types::I32, 0),
});
}
+ "llvm.x86.avx.psrai.d" => {
+ let (a, imm8) = match args {
+ [a, imm8] => (a, imm8),
+ _ => bug!("wrong number of args for intrinsic {intrinsic}"),
+ };
+ let a = codegen_operand(fx, a);
+ let imm8 = crate::constant::mir_operand_get_const_val(fx, imm8)
+ .expect("llvm.x86.avx.psrai.d imm8 not const");
+
+ simd_for_each_lane(fx, a, ret, &|fx, _lane_ty, _res_lane_ty, lane| match imm8
+ .try_to_bits(Size::from_bytes(4))
+ .unwrap_or_else(|| panic!("imm8 not scalar: {:?}", imm8))
+ {
+ imm8 if imm8 < 32 => fx.bcx.ins().sshr_imm(lane, i64::from(imm8 as u8)),
+ _ => fx.bcx.ins().iconst(types::I32, 0),
+ });
+ }
+ "llvm.x86.sse2.psrli.q" => {
+ let (a, imm8) = match args {
+ [a, imm8] => (a, imm8),
+ _ => bug!("wrong number of args for intrinsic {intrinsic}"),
+ };
+ let a = codegen_operand(fx, a);
+ let imm8 = crate::constant::mir_operand_get_const_val(fx, imm8)
+ .expect("llvm.x86.avx.psrli.q imm8 not const");
+
+ simd_for_each_lane(fx, a, ret, &|fx, _lane_ty, _res_lane_ty, lane| match imm8
+ .try_to_bits(Size::from_bytes(4))
+ .unwrap_or_else(|| panic!("imm8 not scalar: {:?}", imm8))
+ {
+ imm8 if imm8 < 64 => fx.bcx.ins().ushr_imm(lane, i64::from(imm8 as u8)),
+ _ => fx.bcx.ins().iconst(types::I32, 0),
+ });
+ }
+ "llvm.x86.sse2.pslli.q" => {
+ let (a, imm8) = match args {
+ [a, imm8] => (a, imm8),
+ _ => bug!("wrong number of args for intrinsic {intrinsic}"),
+ };
+ let a = codegen_operand(fx, a);
+ let imm8 = crate::constant::mir_operand_get_const_val(fx, imm8)
+ .expect("llvm.x86.avx.pslli.q imm8 not const");
+
+ simd_for_each_lane(fx, a, ret, &|fx, _lane_ty, _res_lane_ty, lane| match imm8
+ .try_to_bits(Size::from_bytes(4))
+ .unwrap_or_else(|| panic!("imm8 not scalar: {:?}", imm8))
+ {
+ imm8 if imm8 < 64 => fx.bcx.ins().ishl_imm(lane, i64::from(imm8 as u8)),
+ _ => fx.bcx.ins().iconst(types::I32, 0),
+ });
+ }
"llvm.x86.avx.pslli.d" => {
let (a, imm8) = match args {
[a, imm8] => (a, imm8),
@@ -205,6 +381,23 @@ pub(crate) fn codegen_x86_llvm_intrinsic_call<'tcx>(
_ => fx.bcx.ins().iconst(types::I32, 0),
});
}
+ "llvm.x86.avx2.psrai.w" => {
+ let (a, imm8) = match args {
+ [a, imm8] => (a, imm8),
+ _ => bug!("wrong number of args for intrinsic {intrinsic}"),
+ };
+ let a = codegen_operand(fx, a);
+ let imm8 = crate::constant::mir_operand_get_const_val(fx, imm8)
+ .expect("llvm.x86.avx.psrai.w imm8 not const");
+
+ simd_for_each_lane(fx, a, ret, &|fx, _lane_ty, _res_lane_ty, lane| match imm8
+ .try_to_bits(Size::from_bytes(4))
+ .unwrap_or_else(|| panic!("imm8 not scalar: {:?}", imm8))
+ {
+ imm8 if imm8 < 16 => fx.bcx.ins().sshr_imm(lane, i64::from(imm8 as u8)),
+ _ => fx.bcx.ins().iconst(types::I32, 0),
+ });
+ }
"llvm.x86.avx2.pslli.w" => {
let (a, imm8) = match args {
[a, imm8] => (a, imm8),
@@ -313,7 +506,7 @@ pub(crate) fn codegen_x86_llvm_intrinsic_call<'tcx>(
ret.place_lane(fx, 2).to_ptr().store(fx, res_2, MemFlags::trusted());
ret.place_lane(fx, 3).to_ptr().store(fx, res_3, MemFlags::trusted());
}
- "llvm.x86.sse2.storeu.dq" => {
+ "llvm.x86.sse2.storeu.dq" | "llvm.x86.sse2.storeu.pd" => {
intrinsic_args!(fx, args => (mem_addr, a); intrinsic);
let mem_addr = mem_addr.load_scalar(fx);
@@ -321,17 +514,45 @@ pub(crate) fn codegen_x86_llvm_intrinsic_call<'tcx>(
let dest = CPlace::for_ptr(Pointer::new(mem_addr), a.layout());
dest.write_cvalue(fx, a);
}
- "llvm.x86.addcarry.64" => {
+ "llvm.x86.ssse3.pabs.b.128" | "llvm.x86.ssse3.pabs.w.128" | "llvm.x86.ssse3.pabs.d.128" => {
+ let a = match args {
+ [a] => a,
+ _ => bug!("wrong number of args for intrinsic {intrinsic}"),
+ };
+ let a = codegen_operand(fx, a);
+
+ simd_for_each_lane(fx, a, ret, &|fx, _lane_ty, _res_lane_ty, lane| {
+ fx.bcx.ins().iabs(lane)
+ });
+ }
+ "llvm.x86.addcarry.32" | "llvm.x86.addcarry.64" => {
intrinsic_args!(fx, args => (c_in, a, b); intrinsic);
let c_in = c_in.load_scalar(fx);
- llvm_add_sub(fx, BinOp::Add, ret, c_in, a, b);
+ let (cb_out, c) = llvm_add_sub(fx, BinOp::Add, c_in, a, b);
+
+ let layout = fx.layout_of(Ty::new_tup(fx.tcx, &[fx.tcx.types.u8, a.layout().ty]));
+ let val = CValue::by_val_pair(cb_out, c, layout);
+ ret.write_cvalue(fx, val);
}
- "llvm.x86.subborrow.64" => {
+ "llvm.x86.addcarryx.u32" | "llvm.x86.addcarryx.u64" => {
+ intrinsic_args!(fx, args => (c_in, a, b, out); intrinsic);
+ let c_in = c_in.load_scalar(fx);
+
+ let (cb_out, c) = llvm_add_sub(fx, BinOp::Add, c_in, a, b);
+
+ Pointer::new(out.load_scalar(fx)).store(fx, c, MemFlags::trusted());
+ ret.write_cvalue(fx, CValue::by_val(cb_out, fx.layout_of(fx.tcx.types.u8)));
+ }
+ "llvm.x86.subborrow.32" | "llvm.x86.subborrow.64" => {
intrinsic_args!(fx, args => (b_in, a, b); intrinsic);
let b_in = b_in.load_scalar(fx);
- llvm_add_sub(fx, BinOp::Sub, ret, b_in, a, b);
+ let (cb_out, c) = llvm_add_sub(fx, BinOp::Sub, b_in, a, b);
+
+ let layout = fx.layout_of(Ty::new_tup(fx.tcx, &[fx.tcx.types.u8, a.layout().ty]));
+ let val = CValue::by_val_pair(cb_out, c, layout);
+ ret.write_cvalue(fx, val);
}
_ => {
fx.tcx
@@ -356,21 +577,11 @@ pub(crate) fn codegen_x86_llvm_intrinsic_call<'tcx>(
fn llvm_add_sub<'tcx>(
fx: &mut FunctionCx<'_, '_, 'tcx>,
bin_op: BinOp,
- ret: CPlace<'tcx>,
cb_in: Value,
a: CValue<'tcx>,
b: CValue<'tcx>,
-) {
- assert_eq!(
- a.layout().ty,
- fx.tcx.types.u64,
- "llvm.x86.addcarry.64/llvm.x86.subborrow.64 second operand must be u64"
- );
- assert_eq!(
- b.layout().ty,
- fx.tcx.types.u64,
- "llvm.x86.addcarry.64/llvm.x86.subborrow.64 third operand must be u64"
- );
+) -> (Value, Value) {
+ assert_eq!(a.layout().ty, b.layout().ty);
// c + carry -> c + first intermediate carry or borrow respectively
let int0 = crate::num::codegen_checked_int_binop(fx, bin_op, a, b);
@@ -378,15 +589,14 @@ fn llvm_add_sub<'tcx>(
let cb0 = int0.value_field(fx, FieldIdx::new(1)).load_scalar(fx);
// c + carry -> c + second intermediate carry or borrow respectively
- let cb_in_as_u64 = fx.bcx.ins().uextend(types::I64, cb_in);
- let cb_in_as_u64 = CValue::by_val(cb_in_as_u64, fx.layout_of(fx.tcx.types.u64));
- let int1 = crate::num::codegen_checked_int_binop(fx, bin_op, c, cb_in_as_u64);
+ let clif_ty = fx.clif_type(a.layout().ty).unwrap();
+ let cb_in_as_int = fx.bcx.ins().uextend(clif_ty, cb_in);
+ let cb_in_as_int = CValue::by_val(cb_in_as_int, fx.layout_of(a.layout().ty));
+ let int1 = crate::num::codegen_checked_int_binop(fx, bin_op, c, cb_in_as_int);
let (c, cb1) = int1.load_scalar_pair(fx);
// carry0 | carry1 -> carry or borrow respectively
let cb_out = fx.bcx.ins().bor(cb0, cb1);
- let layout = fx.layout_of(Ty::new_tup(fx.tcx, &[fx.tcx.types.u8, fx.tcx.types.u64]));
- let val = CValue::by_val_pair(cb_out, c, layout);
- ret.write_cvalue(fx, val);
+ (cb_out, c)
}
diff --git a/compiler/rustc_codegen_cranelift/src/intrinsics/mod.rs b/compiler/rustc_codegen_cranelift/src/intrinsics/mod.rs
index 5862f1829..36e9ba9c7 100644
--- a/compiler/rustc_codegen_cranelift/src/intrinsics/mod.rs
+++ b/compiler/rustc_codegen_cranelift/src/intrinsics/mod.rs
@@ -24,7 +24,7 @@ pub(crate) use llvm::codegen_llvm_intrinsic_call;
use rustc_middle::ty;
use rustc_middle::ty::layout::{HasParamEnv, ValidityRequirement};
use rustc_middle::ty::print::{with_no_trimmed_paths, with_no_visible_paths};
-use rustc_middle::ty::subst::SubstsRef;
+use rustc_middle::ty::GenericArgsRef;
use rustc_span::symbol::{kw, sym, Symbol};
use crate::prelude::*;
@@ -213,13 +213,13 @@ pub(crate) fn codegen_intrinsic_call<'tcx>(
source_info: mir::SourceInfo,
) {
let intrinsic = fx.tcx.item_name(instance.def_id());
- let substs = instance.substs;
+ let instance_args = instance.args;
if intrinsic.as_str().starts_with("simd_") {
self::simd::codegen_simd_intrinsic_call(
fx,
intrinsic,
- substs,
+ instance_args,
args,
destination,
target.expect("target for simd intrinsic"),
@@ -233,7 +233,7 @@ pub(crate) fn codegen_intrinsic_call<'tcx>(
fx,
instance,
intrinsic,
- substs,
+ instance_args,
args,
destination,
target,
@@ -365,7 +365,7 @@ fn codegen_regular_intrinsic_call<'tcx>(
fx: &mut FunctionCx<'_, '_, 'tcx>,
instance: Instance<'tcx>,
intrinsic: Symbol,
- substs: SubstsRef<'tcx>,
+ generic_args: GenericArgsRef<'tcx>,
args: &[mir::Operand<'tcx>],
ret: CPlace<'tcx>,
destination: Option<BasicBlock>,
@@ -394,7 +394,7 @@ fn codegen_regular_intrinsic_call<'tcx>(
let dst = dst.load_scalar(fx);
let count = count.load_scalar(fx);
- let elem_ty = substs.type_at(0);
+ let elem_ty = generic_args.type_at(0);
let elem_size: u64 = fx.layout_of(elem_ty).size.bytes();
assert_eq!(args.len(), 3);
let byte_amount =
@@ -410,7 +410,7 @@ fn codegen_regular_intrinsic_call<'tcx>(
let src = src.load_scalar(fx);
let count = count.load_scalar(fx);
- let elem_ty = substs.type_at(0);
+ let elem_ty = generic_args.type_at(0);
let elem_size: u64 = fx.layout_of(elem_ty).size.bytes();
assert_eq!(args.len(), 3);
let byte_amount =
@@ -428,7 +428,7 @@ fn codegen_regular_intrinsic_call<'tcx>(
sym::size_of_val => {
intrinsic_args!(fx, args => (ptr); intrinsic);
- let layout = fx.layout_of(substs.type_at(0));
+ let layout = fx.layout_of(generic_args.type_at(0));
// Note: Can't use is_unsized here as truly unsized types need to take the fixed size
// branch
let size = if let Abi::ScalarPair(_, _) = ptr.layout().abi {
@@ -443,7 +443,7 @@ fn codegen_regular_intrinsic_call<'tcx>(
sym::min_align_of_val => {
intrinsic_args!(fx, args => (ptr); intrinsic);
- let layout = fx.layout_of(substs.type_at(0));
+ let layout = fx.layout_of(generic_args.type_at(0));
// Note: Can't use is_unsized here as truly unsized types need to take the fixed size
// branch
let align = if let Abi::ScalarPair(_, _) = ptr.layout().abi {
@@ -602,7 +602,7 @@ fn codegen_regular_intrinsic_call<'tcx>(
sym::assert_inhabited | sym::assert_zero_valid | sym::assert_mem_uninitialized_valid => {
intrinsic_args!(fx, args => (); intrinsic);
- let ty = substs.type_at(0);
+ let ty = generic_args.type_at(0);
let requirement = ValidityRequirement::from_intrinsic(intrinsic);
@@ -647,12 +647,13 @@ fn codegen_regular_intrinsic_call<'tcx>(
let val = CValue::by_ref(Pointer::new(ptr.load_scalar(fx)), inner_layout);
ret.write_cvalue(fx, val);
}
- sym::volatile_store | sym::unaligned_volatile_store => {
+ sym::volatile_store | sym::unaligned_volatile_store | sym::nontemporal_store => {
intrinsic_args!(fx, args => (ptr, val); intrinsic);
let ptr = ptr.load_scalar(fx);
// Cranelift treats stores as volatile by default
// FIXME correctly handle unaligned_volatile_store
+ // FIXME actually do nontemporal stores if requested
let dest = CPlace::for_ptr(Pointer::new(ptr), val.layout());
dest.write_cvalue(fx, val);
}
@@ -674,7 +675,7 @@ fn codegen_regular_intrinsic_call<'tcx>(
intrinsic_args!(fx, args => (ptr, base); intrinsic);
let ptr = ptr.load_scalar(fx);
let base = base.load_scalar(fx);
- let ty = substs.type_at(0);
+ let ty = generic_args.type_at(0);
let pointee_size: u64 = fx.layout_of(ty).size.bytes();
let diff_bytes = fx.bcx.ins().isub(ptr, base);
@@ -720,7 +721,7 @@ fn codegen_regular_intrinsic_call<'tcx>(
intrinsic_args!(fx, args => (ptr); intrinsic);
let ptr = ptr.load_scalar(fx);
- let ty = substs.type_at(0);
+ let ty = generic_args.type_at(0);
match ty.kind() {
ty::Uint(UintTy::U128) | ty::Int(IntTy::I128) => {
// FIXME implement 128bit atomics
@@ -751,7 +752,7 @@ fn codegen_regular_intrinsic_call<'tcx>(
intrinsic_args!(fx, args => (ptr, val); intrinsic);
let ptr = ptr.load_scalar(fx);
- let ty = substs.type_at(0);
+ let ty = generic_args.type_at(0);
match ty.kind() {
ty::Uint(UintTy::U128) | ty::Int(IntTy::I128) => {
// FIXME implement 128bit atomics
@@ -1128,7 +1129,7 @@ fn codegen_regular_intrinsic_call<'tcx>(
let lhs_ref = lhs_ref.load_scalar(fx);
let rhs_ref = rhs_ref.load_scalar(fx);
- let size = fx.layout_of(substs.type_at(0)).layout.size();
+ let size = fx.layout_of(generic_args.type_at(0)).layout.size();
// FIXME add and use emit_small_memcmp
let is_eq_value = if size == Size::ZERO {
// No bytes means they're trivially equal
@@ -1154,6 +1155,20 @@ fn codegen_regular_intrinsic_call<'tcx>(
ret.write_cvalue(fx, CValue::by_val(is_eq_value, ret.layout()));
}
+ sym::compare_bytes => {
+ intrinsic_args!(fx, args => (lhs_ptr, rhs_ptr, bytes_val); intrinsic);
+ let lhs_ptr = lhs_ptr.load_scalar(fx);
+ let rhs_ptr = rhs_ptr.load_scalar(fx);
+ let bytes_val = bytes_val.load_scalar(fx);
+
+ let params = vec![AbiParam::new(fx.pointer_type); 3];
+ let returns = vec![AbiParam::new(types::I32)];
+ let args = &[lhs_ptr, rhs_ptr, bytes_val];
+ // Here we assume that the `memcmp` provided by the target is a NOP for size 0.
+ let cmp = fx.lib_call("memcmp", params, returns, args)[0];
+ ret.write_cvalue(fx, CValue::by_val(cmp, ret.layout()));
+ }
+
sym::const_allocate => {
intrinsic_args!(fx, args => (_size, _align); intrinsic);
diff --git a/compiler/rustc_codegen_cranelift/src/intrinsics/simd.rs b/compiler/rustc_codegen_cranelift/src/intrinsics/simd.rs
index 6741362e8..9863e40b5 100644
--- a/compiler/rustc_codegen_cranelift/src/intrinsics/simd.rs
+++ b/compiler/rustc_codegen_cranelift/src/intrinsics/simd.rs
@@ -1,6 +1,6 @@
//! Codegen `extern "platform-intrinsic"` intrinsics.
-use rustc_middle::ty::subst::SubstsRef;
+use rustc_middle::ty::GenericArgsRef;
use rustc_span::Symbol;
use rustc_target::abi::Endian;
@@ -21,7 +21,7 @@ fn report_simd_type_validation_error(
pub(super) fn codegen_simd_intrinsic_call<'tcx>(
fx: &mut FunctionCx<'_, '_, 'tcx>,
intrinsic: Symbol,
- _substs: SubstsRef<'tcx>,
+ _args: GenericArgsRef<'tcx>,
args: &[mir::Operand<'tcx>],
ret: CPlace<'tcx>,
target: BasicBlock,
@@ -117,8 +117,8 @@ pub(super) fn codegen_simd_intrinsic_call<'tcx>(
});
}
- // simd_shuffle32<T, U>(x: T, y: T, idx: [u32; 32]) -> U
- _ if intrinsic.as_str().starts_with("simd_shuffle") => {
+ // simd_shuffle<T, I, U>(x: T, y: T, idx: I) -> U
+ sym::simd_shuffle => {
let (x, y, idx) = match args {
[x, y, idx] => (x, y, idx),
_ => {
@@ -133,36 +133,26 @@ pub(super) fn codegen_simd_intrinsic_call<'tcx>(
return;
}
- // If this intrinsic is the older "simd_shuffleN" form, simply parse the integer.
- // If there is no suffix, use the index array length.
- let n: u16 = if intrinsic == sym::simd_shuffle {
- // Make sure this is actually an array, since typeck only checks the length-suffixed
- // version of this intrinsic.
- let idx_ty = fx.monomorphize(idx.ty(fx.mir, fx.tcx));
- match idx_ty.kind() {
- ty::Array(ty, len) if matches!(ty.kind(), ty::Uint(ty::UintTy::U32)) => len
- .try_eval_target_usize(fx.tcx, ty::ParamEnv::reveal_all())
- .unwrap_or_else(|| {
- span_bug!(span, "could not evaluate shuffle index array length")
- })
- .try_into()
- .unwrap(),
- _ => {
- fx.tcx.sess.span_err(
- span,
- format!(
- "simd_shuffle index must be an array of `u32`, got `{}`",
- idx_ty,
- ),
- );
- // Prevent verifier error
- fx.bcx.ins().trap(TrapCode::UnreachableCodeReached);
- return;
- }
+ // Make sure this is actually an array, since typeck only checks the length-suffixed
+ // version of this intrinsic.
+ let idx_ty = fx.monomorphize(idx.ty(fx.mir, fx.tcx));
+ let n: u16 = match idx_ty.kind() {
+ ty::Array(ty, len) if matches!(ty.kind(), ty::Uint(ty::UintTy::U32)) => len
+ .try_eval_target_usize(fx.tcx, ty::ParamEnv::reveal_all())
+ .unwrap_or_else(|| {
+ span_bug!(span, "could not evaluate shuffle index array length")
+ })
+ .try_into()
+ .unwrap(),
+ _ => {
+ fx.tcx.sess.span_err(
+ span,
+ format!("simd_shuffle index must be an array of `u32`, got `{}`", idx_ty),
+ );
+ // Prevent verifier error
+ fx.bcx.ins().trap(TrapCode::UnreachableCodeReached);
+ return;
}
- } else {
- // FIXME remove this case
- intrinsic.as_str()["simd_shuffle".len()..].parse().unwrap()
};
assert_eq!(x.layout(), y.layout());
@@ -179,7 +169,7 @@ pub(super) fn codegen_simd_intrinsic_call<'tcx>(
let indexes = {
use rustc_middle::mir::interpret::*;
let idx_const = crate::constant::mir_operand_get_const_val(fx, idx)
- .expect("simd_shuffle* idx not const");
+ .expect("simd_shuffle idx not const");
let idx_bytes = match idx_const {
ConstValue::ByRef { alloc, offset } => {