diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-17 12:19:13 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-17 12:19:13 +0000 |
commit | 218caa410aa38c29984be31a5229b9fa717560ee (patch) | |
tree | c54bd55eeb6e4c508940a30e94c0032fbd45d677 /compiler/rustc_codegen_cranelift/src/intrinsics | |
parent | Releasing progress-linux version 1.67.1+dfsg1-1~progress7.99u1. (diff) | |
download | rustc-218caa410aa38c29984be31a5229b9fa717560ee.tar.xz rustc-218caa410aa38c29984be31a5229b9fa717560ee.zip |
Merging upstream version 1.68.2+dfsg1.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'compiler/rustc_codegen_cranelift/src/intrinsics')
5 files changed, 454 insertions, 250 deletions
diff --git a/compiler/rustc_codegen_cranelift/src/intrinsics/llvm.rs b/compiler/rustc_codegen_cranelift/src/intrinsics/llvm.rs index 783d426c3..f722e5228 100644 --- a/compiler/rustc_codegen_cranelift/src/intrinsics/llvm.rs +++ b/compiler/rustc_codegen_cranelift/src/intrinsics/llvm.rs @@ -8,135 +8,37 @@ use rustc_middle::ty::subst::SubstsRef; pub(crate) fn codegen_llvm_intrinsic_call<'tcx>( fx: &mut FunctionCx<'_, '_, 'tcx>, intrinsic: &str, - _substs: SubstsRef<'tcx>, + substs: SubstsRef<'tcx>, args: &[mir::Operand<'tcx>], ret: CPlace<'tcx>, target: Option<BasicBlock>, ) { - match intrinsic { - "llvm.x86.sse2.pause" | "llvm.aarch64.isb" => { - // Spin loop hint - } + if intrinsic.starts_with("llvm.aarch64") { + return llvm_aarch64::codegen_aarch64_llvm_intrinsic_call( + fx, intrinsic, substs, args, ret, target, + ); + } + if intrinsic.starts_with("llvm.x86") { + return llvm_x86::codegen_x86_llvm_intrinsic_call(fx, intrinsic, substs, args, ret, target); + } - // Used by `_mm_movemask_epi8` and `_mm256_movemask_epi8` - "llvm.x86.sse2.pmovmskb.128" | "llvm.x86.avx2.pmovmskb" | "llvm.x86.sse2.movmsk.pd" => { + match intrinsic { + _ if intrinsic.starts_with("llvm.ctlz.v") => { intrinsic_args!(fx, args => (a); intrinsic); - let (lane_count, lane_ty) = a.layout().ty.simd_size_and_type(fx.tcx); - let lane_ty = fx.clif_type(lane_ty).unwrap(); - assert!(lane_count <= 32); - - let mut res = fx.bcx.ins().iconst(types::I32, 0); - - for lane in (0..lane_count).rev() { - let a_lane = a.value_lane(fx, lane).load_scalar(fx); - - // cast float to int - let a_lane = match lane_ty { - types::F32 => fx.bcx.ins().bitcast(types::I32, a_lane), - types::F64 => fx.bcx.ins().bitcast(types::I64, a_lane), - _ => a_lane, - }; - - // extract sign bit of an int - let a_lane_sign = fx.bcx.ins().ushr_imm(a_lane, i64::from(lane_ty.bits() - 1)); - - // shift sign bit into result - let a_lane_sign = clif_intcast(fx, a_lane_sign, types::I32, false); - res = fx.bcx.ins().ishl_imm(res, 1); - res = fx.bcx.ins().bor(res, a_lane_sign); - } - - let res = CValue::by_val(res, fx.layout_of(fx.tcx.types.i32)); - ret.write_cvalue(fx, res); - } - "llvm.x86.sse2.cmp.ps" | "llvm.x86.sse2.cmp.pd" => { - let (x, y, kind) = match args { - [x, y, kind] => (x, y, kind), - _ => bug!("wrong number of args for intrinsic {intrinsic}"), - }; - let x = codegen_operand(fx, x); - let y = codegen_operand(fx, y); - let kind = crate::constant::mir_operand_get_const_val(fx, kind) - .expect("llvm.x86.sse2.cmp.* kind not const"); - - let flt_cc = match kind - .try_to_bits(Size::from_bytes(1)) - .unwrap_or_else(|| panic!("kind not scalar: {:?}", kind)) - { - 0 => FloatCC::Equal, - 1 => FloatCC::LessThan, - 2 => FloatCC::LessThanOrEqual, - 7 => FloatCC::Ordered, - 3 => FloatCC::Unordered, - 4 => FloatCC::NotEqual, - 5 => FloatCC::UnorderedOrGreaterThanOrEqual, - 6 => FloatCC::UnorderedOrGreaterThan, - kind => unreachable!("kind {:?}", kind), - }; - - simd_pair_for_each_lane(fx, x, y, ret, &|fx, lane_ty, res_lane_ty, x_lane, y_lane| { - let res_lane = match lane_ty.kind() { - ty::Float(_) => fx.bcx.ins().fcmp(flt_cc, x_lane, y_lane), - _ => unreachable!("{:?}", lane_ty), - }; - bool_to_zero_or_max_uint(fx, res_lane_ty, res_lane) + simd_for_each_lane(fx, a, ret, &|fx, _lane_ty, _res_lane_ty, lane| { + fx.bcx.ins().clz(lane) }); } - "llvm.x86.sse2.psrli.d" => { - let (a, imm8) = match args { - [a, imm8] => (a, imm8), - _ => bug!("wrong number of args for intrinsic {intrinsic}"), - }; - let a = codegen_operand(fx, a); - let imm8 = crate::constant::mir_operand_get_const_val(fx, imm8) - .expect("llvm.x86.sse2.psrli.d imm8 not const"); - simd_for_each_lane(fx, a, ret, &|fx, _lane_ty, _res_lane_ty, lane| match imm8 - .try_to_bits(Size::from_bytes(4)) - .unwrap_or_else(|| panic!("imm8 not scalar: {:?}", imm8)) - { - imm8 if imm8 < 32 => fx.bcx.ins().ushr_imm(lane, i64::from(imm8 as u8)), - _ => fx.bcx.ins().iconst(types::I32, 0), - }); - } - "llvm.x86.sse2.pslli.d" => { - let (a, imm8) = match args { - [a, imm8] => (a, imm8), - _ => bug!("wrong number of args for intrinsic {intrinsic}"), - }; - let a = codegen_operand(fx, a); - let imm8 = crate::constant::mir_operand_get_const_val(fx, imm8) - .expect("llvm.x86.sse2.psrli.d imm8 not const"); + _ if intrinsic.starts_with("llvm.ctpop.v") => { + intrinsic_args!(fx, args => (a); intrinsic); - simd_for_each_lane(fx, a, ret, &|fx, _lane_ty, _res_lane_ty, lane| match imm8 - .try_to_bits(Size::from_bytes(4)) - .unwrap_or_else(|| panic!("imm8 not scalar: {:?}", imm8)) - { - imm8 if imm8 < 32 => fx.bcx.ins().ishl_imm(lane, i64::from(imm8 as u8)), - _ => fx.bcx.ins().iconst(types::I32, 0), + simd_for_each_lane(fx, a, ret, &|fx, _lane_ty, _res_lane_ty, lane| { + fx.bcx.ins().popcnt(lane) }); } - "llvm.x86.sse2.storeu.dq" => { - intrinsic_args!(fx, args => (mem_addr, a); intrinsic); - let mem_addr = mem_addr.load_scalar(fx); - - // FIXME correctly handle the unalignment - let dest = CPlace::for_ptr(Pointer::new(mem_addr), a.layout()); - dest.write_cvalue(fx, a); - } - "llvm.x86.addcarry.64" => { - intrinsic_args!(fx, args => (c_in, a, b); intrinsic); - let c_in = c_in.load_scalar(fx); - - llvm_add_sub(fx, BinOp::Add, ret, c_in, a, b); - } - "llvm.x86.subborrow.64" => { - intrinsic_args!(fx, args => (b_in, a, b); intrinsic); - let b_in = b_in.load_scalar(fx); - llvm_add_sub(fx, BinOp::Sub, ret, b_in, a, b); - } _ => { fx.tcx .sess @@ -150,47 +52,3 @@ pub(crate) fn codegen_llvm_intrinsic_call<'tcx>( let ret_block = fx.get_block(dest); fx.bcx.ins().jump(ret_block, &[]); } - -// llvm.x86.avx2.vperm2i128 -// llvm.x86.ssse3.pshuf.b.128 -// llvm.x86.avx2.pshuf.b -// llvm.x86.avx2.psrli.w -// llvm.x86.sse2.psrli.w - -fn llvm_add_sub<'tcx>( - fx: &mut FunctionCx<'_, '_, 'tcx>, - bin_op: BinOp, - ret: CPlace<'tcx>, - cb_in: Value, - a: CValue<'tcx>, - b: CValue<'tcx>, -) { - assert_eq!( - a.layout().ty, - fx.tcx.types.u64, - "llvm.x86.addcarry.64/llvm.x86.subborrow.64 second operand must be u64" - ); - assert_eq!( - b.layout().ty, - fx.tcx.types.u64, - "llvm.x86.addcarry.64/llvm.x86.subborrow.64 third operand must be u64" - ); - - // c + carry -> c + first intermediate carry or borrow respectively - let int0 = crate::num::codegen_checked_int_binop(fx, bin_op, a, b); - let c = int0.value_field(fx, mir::Field::new(0)); - let cb0 = int0.value_field(fx, mir::Field::new(1)).load_scalar(fx); - - // c + carry -> c + second intermediate carry or borrow respectively - let cb_in_as_u64 = fx.bcx.ins().uextend(types::I64, cb_in); - let cb_in_as_u64 = CValue::by_val(cb_in_as_u64, fx.layout_of(fx.tcx.types.u64)); - let int1 = crate::num::codegen_checked_int_binop(fx, bin_op, c, cb_in_as_u64); - let (c, cb1) = int1.load_scalar_pair(fx); - - // carry0 | carry1 -> carry or borrow respectively - let cb_out = fx.bcx.ins().bor(cb0, cb1); - - let layout = fx.layout_of(fx.tcx.mk_tup([fx.tcx.types.u8, fx.tcx.types.u64].iter())); - let val = CValue::by_val_pair(cb_out, c, layout); - ret.write_cvalue(fx, val); -} diff --git a/compiler/rustc_codegen_cranelift/src/intrinsics/llvm_aarch64.rs b/compiler/rustc_codegen_cranelift/src/intrinsics/llvm_aarch64.rs new file mode 100644 index 000000000..b431158d2 --- /dev/null +++ b/compiler/rustc_codegen_cranelift/src/intrinsics/llvm_aarch64.rs @@ -0,0 +1,222 @@ +//! Emulate AArch64 LLVM intrinsics + +use crate::intrinsics::*; +use crate::prelude::*; + +use rustc_middle::ty::subst::SubstsRef; + +pub(crate) fn codegen_aarch64_llvm_intrinsic_call<'tcx>( + fx: &mut FunctionCx<'_, '_, 'tcx>, + intrinsic: &str, + _substs: SubstsRef<'tcx>, + args: &[mir::Operand<'tcx>], + ret: CPlace<'tcx>, + target: Option<BasicBlock>, +) { + // llvm.aarch64.neon.sqshl.v*i* + + match intrinsic { + "llvm.aarch64.isb" => { + fx.bcx.ins().fence(); + } + + _ if intrinsic.starts_with("llvm.aarch64.neon.abs.v") => { + intrinsic_args!(fx, args => (a); intrinsic); + + simd_for_each_lane(fx, a, ret, &|fx, _lane_ty, _res_lane_ty, lane| { + fx.bcx.ins().iabs(lane) + }); + } + + _ if intrinsic.starts_with("llvm.aarch64.neon.cls.v") => { + intrinsic_args!(fx, args => (a); intrinsic); + + simd_for_each_lane(fx, a, ret, &|fx, _lane_ty, _res_lane_ty, lane| { + fx.bcx.ins().cls(lane) + }); + } + + _ if intrinsic.starts_with("llvm.aarch64.neon.rbit.v") => { + intrinsic_args!(fx, args => (a); intrinsic); + + simd_for_each_lane(fx, a, ret, &|fx, _lane_ty, _res_lane_ty, lane| { + fx.bcx.ins().bitrev(lane) + }); + } + + _ if intrinsic.starts_with("llvm.aarch64.neon.sqadd.v") => { + intrinsic_args!(fx, args => (x, y); intrinsic); + + simd_pair_for_each_lane_typed(fx, x, y, ret, &|fx, x_lane, y_lane| { + crate::num::codegen_saturating_int_binop(fx, BinOp::Add, x_lane, y_lane) + }); + } + + _ if intrinsic.starts_with("llvm.aarch64.neon.sqsub.v") => { + intrinsic_args!(fx, args => (x, y); intrinsic); + + simd_pair_for_each_lane_typed(fx, x, y, ret, &|fx, x_lane, y_lane| { + crate::num::codegen_saturating_int_binop(fx, BinOp::Sub, x_lane, y_lane) + }); + } + + _ if intrinsic.starts_with("llvm.aarch64.neon.smax.v") => { + intrinsic_args!(fx, args => (x, y); intrinsic); + + simd_pair_for_each_lane( + fx, + x, + y, + ret, + &|fx, _lane_ty, _res_lane_ty, x_lane, y_lane| { + let gt = fx.bcx.ins().icmp(IntCC::SignedGreaterThan, x_lane, y_lane); + fx.bcx.ins().select(gt, x_lane, y_lane) + }, + ); + } + + _ if intrinsic.starts_with("llvm.aarch64.neon.umax.v") => { + intrinsic_args!(fx, args => (x, y); intrinsic); + + simd_pair_for_each_lane( + fx, + x, + y, + ret, + &|fx, _lane_ty, _res_lane_ty, x_lane, y_lane| { + let gt = fx.bcx.ins().icmp(IntCC::UnsignedGreaterThan, x_lane, y_lane); + fx.bcx.ins().select(gt, x_lane, y_lane) + }, + ); + } + + _ if intrinsic.starts_with("llvm.aarch64.neon.smaxv.i") => { + intrinsic_args!(fx, args => (v); intrinsic); + + simd_reduce(fx, v, None, ret, &|fx, _ty, a, b| { + let gt = fx.bcx.ins().icmp(IntCC::SignedGreaterThan, a, b); + fx.bcx.ins().select(gt, a, b) + }); + } + + _ if intrinsic.starts_with("llvm.aarch64.neon.umaxv.i") => { + intrinsic_args!(fx, args => (v); intrinsic); + + simd_reduce(fx, v, None, ret, &|fx, _ty, a, b| { + let gt = fx.bcx.ins().icmp(IntCC::UnsignedGreaterThan, a, b); + fx.bcx.ins().select(gt, a, b) + }); + } + + _ if intrinsic.starts_with("llvm.aarch64.neon.smin.v") => { + intrinsic_args!(fx, args => (x, y); intrinsic); + + simd_pair_for_each_lane( + fx, + x, + y, + ret, + &|fx, _lane_ty, _res_lane_ty, x_lane, y_lane| { + let gt = fx.bcx.ins().icmp(IntCC::SignedLessThan, x_lane, y_lane); + fx.bcx.ins().select(gt, x_lane, y_lane) + }, + ); + } + + _ if intrinsic.starts_with("llvm.aarch64.neon.umin.v") => { + intrinsic_args!(fx, args => (x, y); intrinsic); + + simd_pair_for_each_lane( + fx, + x, + y, + ret, + &|fx, _lane_ty, _res_lane_ty, x_lane, y_lane| { + let gt = fx.bcx.ins().icmp(IntCC::UnsignedLessThan, x_lane, y_lane); + fx.bcx.ins().select(gt, x_lane, y_lane) + }, + ); + } + + _ if intrinsic.starts_with("llvm.aarch64.neon.sminv.i") => { + intrinsic_args!(fx, args => (v); intrinsic); + + simd_reduce(fx, v, None, ret, &|fx, _ty, a, b| { + let gt = fx.bcx.ins().icmp(IntCC::SignedLessThan, a, b); + fx.bcx.ins().select(gt, a, b) + }); + } + + _ if intrinsic.starts_with("llvm.aarch64.neon.uminv.i") => { + intrinsic_args!(fx, args => (v); intrinsic); + + simd_reduce(fx, v, None, ret, &|fx, _ty, a, b| { + let gt = fx.bcx.ins().icmp(IntCC::UnsignedLessThan, a, b); + fx.bcx.ins().select(gt, a, b) + }); + } + + /* + _ if intrinsic.starts_with("llvm.aarch64.neon.sshl.v") + || intrinsic.starts_with("llvm.aarch64.neon.sqshl.v") + // FIXME split this one out once saturating is implemented + || intrinsic.starts_with("llvm.aarch64.neon.sqshlu.v") => + { + intrinsic_args!(fx, args => (a, b); intrinsic); + + simd_pair_for_each_lane(fx, a, b, ret, &|fx, _lane_ty, _res_lane_ty, a, b| { + // FIXME saturate? + fx.bcx.ins().ishl(a, b) + }); + } + + _ if intrinsic.starts_with("llvm.aarch64.neon.sqshrn.v") => { + let (a, imm32) = match args { + [a, imm32] => (a, imm32), + _ => bug!("wrong number of args for intrinsic {intrinsic}"), + }; + let a = codegen_operand(fx, a); + let imm32 = crate::constant::mir_operand_get_const_val(fx, imm32) + .expect("llvm.aarch64.neon.sqshrn.v* imm32 not const"); + + simd_for_each_lane(fx, a, ret, &|fx, _lane_ty, _res_lane_ty, lane| match imm32 + .try_to_bits(Size::from_bytes(4)) + .unwrap_or_else(|| panic!("imm32 not scalar: {:?}", imm32)) + { + imm32 if imm32 < 32 => fx.bcx.ins().sshr_imm(lane, i64::from(imm32 as u8)), + _ => fx.bcx.ins().iconst(types::I32, 0), + }); + } + + _ if intrinsic.starts_with("llvm.aarch64.neon.sqshrun.v") => { + let (a, imm32) = match args { + [a, imm32] => (a, imm32), + _ => bug!("wrong number of args for intrinsic {intrinsic}"), + }; + let a = codegen_operand(fx, a); + let imm32 = crate::constant::mir_operand_get_const_val(fx, imm32) + .expect("llvm.aarch64.neon.sqshrn.v* imm32 not const"); + + simd_for_each_lane(fx, a, ret, &|fx, _lane_ty, _res_lane_ty, lane| match imm32 + .try_to_bits(Size::from_bytes(4)) + .unwrap_or_else(|| panic!("imm32 not scalar: {:?}", imm32)) + { + imm32 if imm32 < 32 => fx.bcx.ins().ushr_imm(lane, i64::from(imm32 as u8)), + _ => fx.bcx.ins().iconst(types::I32, 0), + }); + } + */ + _ => { + fx.tcx.sess.warn(&format!( + "unsupported AArch64 llvm intrinsic {}; replacing with trap", + intrinsic + )); + crate::trap::trap_unimplemented(fx, intrinsic); + return; + } + } + + let dest = target.expect("all llvm intrinsics used by stdlib should return"); + let ret_block = fx.get_block(dest); + fx.bcx.ins().jump(ret_block, &[]); +} diff --git a/compiler/rustc_codegen_cranelift/src/intrinsics/llvm_x86.rs b/compiler/rustc_codegen_cranelift/src/intrinsics/llvm_x86.rs new file mode 100644 index 000000000..7bc161fbe --- /dev/null +++ b/compiler/rustc_codegen_cranelift/src/intrinsics/llvm_x86.rs @@ -0,0 +1,197 @@ +//! Emulate x86 LLVM intrinsics + +use crate::intrinsics::*; +use crate::prelude::*; + +use rustc_middle::ty::subst::SubstsRef; + +pub(crate) fn codegen_x86_llvm_intrinsic_call<'tcx>( + fx: &mut FunctionCx<'_, '_, 'tcx>, + intrinsic: &str, + _substs: SubstsRef<'tcx>, + args: &[mir::Operand<'tcx>], + ret: CPlace<'tcx>, + target: Option<BasicBlock>, +) { + match intrinsic { + "llvm.x86.sse2.pause" | "llvm.aarch64.isb" => { + // Spin loop hint + } + + // Used by `_mm_movemask_epi8` and `_mm256_movemask_epi8` + "llvm.x86.sse2.pmovmskb.128" | "llvm.x86.avx2.pmovmskb" | "llvm.x86.sse2.movmsk.pd" => { + intrinsic_args!(fx, args => (a); intrinsic); + + let (lane_count, lane_ty) = a.layout().ty.simd_size_and_type(fx.tcx); + let lane_ty = fx.clif_type(lane_ty).unwrap(); + assert!(lane_count <= 32); + + let mut res = fx.bcx.ins().iconst(types::I32, 0); + + for lane in (0..lane_count).rev() { + let a_lane = a.value_lane(fx, lane).load_scalar(fx); + + // cast float to int + let a_lane = match lane_ty { + types::F32 => fx.bcx.ins().bitcast(types::I32, a_lane), + types::F64 => fx.bcx.ins().bitcast(types::I64, a_lane), + _ => a_lane, + }; + + // extract sign bit of an int + let a_lane_sign = fx.bcx.ins().ushr_imm(a_lane, i64::from(lane_ty.bits() - 1)); + + // shift sign bit into result + let a_lane_sign = clif_intcast(fx, a_lane_sign, types::I32, false); + res = fx.bcx.ins().ishl_imm(res, 1); + res = fx.bcx.ins().bor(res, a_lane_sign); + } + + let res = CValue::by_val(res, fx.layout_of(fx.tcx.types.i32)); + ret.write_cvalue(fx, res); + } + "llvm.x86.sse2.cmp.ps" | "llvm.x86.sse2.cmp.pd" => { + let (x, y, kind) = match args { + [x, y, kind] => (x, y, kind), + _ => bug!("wrong number of args for intrinsic {intrinsic}"), + }; + let x = codegen_operand(fx, x); + let y = codegen_operand(fx, y); + let kind = crate::constant::mir_operand_get_const_val(fx, kind) + .expect("llvm.x86.sse2.cmp.* kind not const"); + + let flt_cc = match kind + .try_to_bits(Size::from_bytes(1)) + .unwrap_or_else(|| panic!("kind not scalar: {:?}", kind)) + { + 0 => FloatCC::Equal, + 1 => FloatCC::LessThan, + 2 => FloatCC::LessThanOrEqual, + 7 => FloatCC::Ordered, + 3 => FloatCC::Unordered, + 4 => FloatCC::NotEqual, + 5 => FloatCC::UnorderedOrGreaterThanOrEqual, + 6 => FloatCC::UnorderedOrGreaterThan, + kind => unreachable!("kind {:?}", kind), + }; + + simd_pair_for_each_lane(fx, x, y, ret, &|fx, lane_ty, res_lane_ty, x_lane, y_lane| { + let res_lane = match lane_ty.kind() { + ty::Float(_) => fx.bcx.ins().fcmp(flt_cc, x_lane, y_lane), + _ => unreachable!("{:?}", lane_ty), + }; + bool_to_zero_or_max_uint(fx, res_lane_ty, res_lane) + }); + } + "llvm.x86.sse2.psrli.d" => { + let (a, imm8) = match args { + [a, imm8] => (a, imm8), + _ => bug!("wrong number of args for intrinsic {intrinsic}"), + }; + let a = codegen_operand(fx, a); + let imm8 = crate::constant::mir_operand_get_const_val(fx, imm8) + .expect("llvm.x86.sse2.psrli.d imm8 not const"); + + simd_for_each_lane(fx, a, ret, &|fx, _lane_ty, _res_lane_ty, lane| match imm8 + .try_to_bits(Size::from_bytes(4)) + .unwrap_or_else(|| panic!("imm8 not scalar: {:?}", imm8)) + { + imm8 if imm8 < 32 => fx.bcx.ins().ushr_imm(lane, i64::from(imm8 as u8)), + _ => fx.bcx.ins().iconst(types::I32, 0), + }); + } + "llvm.x86.sse2.pslli.d" => { + let (a, imm8) = match args { + [a, imm8] => (a, imm8), + _ => bug!("wrong number of args for intrinsic {intrinsic}"), + }; + let a = codegen_operand(fx, a); + let imm8 = crate::constant::mir_operand_get_const_val(fx, imm8) + .expect("llvm.x86.sse2.psrli.d imm8 not const"); + + simd_for_each_lane(fx, a, ret, &|fx, _lane_ty, _res_lane_ty, lane| match imm8 + .try_to_bits(Size::from_bytes(4)) + .unwrap_or_else(|| panic!("imm8 not scalar: {:?}", imm8)) + { + imm8 if imm8 < 32 => fx.bcx.ins().ishl_imm(lane, i64::from(imm8 as u8)), + _ => fx.bcx.ins().iconst(types::I32, 0), + }); + } + "llvm.x86.sse2.storeu.dq" => { + intrinsic_args!(fx, args => (mem_addr, a); intrinsic); + let mem_addr = mem_addr.load_scalar(fx); + + // FIXME correctly handle the unalignment + let dest = CPlace::for_ptr(Pointer::new(mem_addr), a.layout()); + dest.write_cvalue(fx, a); + } + "llvm.x86.addcarry.64" => { + intrinsic_args!(fx, args => (c_in, a, b); intrinsic); + let c_in = c_in.load_scalar(fx); + + llvm_add_sub(fx, BinOp::Add, ret, c_in, a, b); + } + "llvm.x86.subborrow.64" => { + intrinsic_args!(fx, args => (b_in, a, b); intrinsic); + let b_in = b_in.load_scalar(fx); + + llvm_add_sub(fx, BinOp::Sub, ret, b_in, a, b); + } + _ => { + fx.tcx.sess.warn(&format!( + "unsupported x86 llvm intrinsic {}; replacing with trap", + intrinsic + )); + crate::trap::trap_unimplemented(fx, intrinsic); + return; + } + } + + let dest = target.expect("all llvm intrinsics used by stdlib should return"); + let ret_block = fx.get_block(dest); + fx.bcx.ins().jump(ret_block, &[]); +} + +// llvm.x86.avx2.vperm2i128 +// llvm.x86.ssse3.pshuf.b.128 +// llvm.x86.avx2.pshuf.b +// llvm.x86.avx2.psrli.w +// llvm.x86.sse2.psrli.w + +fn llvm_add_sub<'tcx>( + fx: &mut FunctionCx<'_, '_, 'tcx>, + bin_op: BinOp, + ret: CPlace<'tcx>, + cb_in: Value, + a: CValue<'tcx>, + b: CValue<'tcx>, +) { + assert_eq!( + a.layout().ty, + fx.tcx.types.u64, + "llvm.x86.addcarry.64/llvm.x86.subborrow.64 second operand must be u64" + ); + assert_eq!( + b.layout().ty, + fx.tcx.types.u64, + "llvm.x86.addcarry.64/llvm.x86.subborrow.64 third operand must be u64" + ); + + // c + carry -> c + first intermediate carry or borrow respectively + let int0 = crate::num::codegen_checked_int_binop(fx, bin_op, a, b); + let c = int0.value_field(fx, mir::Field::new(0)); + let cb0 = int0.value_field(fx, mir::Field::new(1)).load_scalar(fx); + + // c + carry -> c + second intermediate carry or borrow respectively + let cb_in_as_u64 = fx.bcx.ins().uextend(types::I64, cb_in); + let cb_in_as_u64 = CValue::by_val(cb_in_as_u64, fx.layout_of(fx.tcx.types.u64)); + let int1 = crate::num::codegen_checked_int_binop(fx, bin_op, c, cb_in_as_u64); + let (c, cb1) = int1.load_scalar_pair(fx); + + // carry0 | carry1 -> carry or borrow respectively + let cb_out = fx.bcx.ins().bor(cb0, cb1); + + let layout = fx.layout_of(fx.tcx.mk_tup([fx.tcx.types.u8, fx.tcx.types.u64].iter())); + let val = CValue::by_val_pair(cb_out, c, layout); + ret.write_cvalue(fx, val); +} diff --git a/compiler/rustc_codegen_cranelift/src/intrinsics/mod.rs b/compiler/rustc_codegen_cranelift/src/intrinsics/mod.rs index 0302b843a..e4ac89a7b 100644 --- a/compiler/rustc_codegen_cranelift/src/intrinsics/mod.rs +++ b/compiler/rustc_codegen_cranelift/src/intrinsics/mod.rs @@ -14,6 +14,8 @@ macro_rules! intrinsic_args { mod cpuid; mod llvm; +mod llvm_aarch64; +mod llvm_x86; mod simd; pub(crate) use cpuid::codegen_cpuid_call; @@ -195,8 +197,7 @@ fn bool_to_zero_or_max_uint<'tcx>( ty => ty, }; - let val = fx.bcx.ins().bint(int_ty, val); - let mut res = fx.bcx.ins().ineg(val); + let mut res = fx.bcx.ins().bmask(int_ty, val); if ty.is_float() { res = fx.bcx.ins().bitcast(ty, res); @@ -632,88 +633,18 @@ fn codegen_regular_intrinsic_call<'tcx>( ret.write_cvalue(fx, res); } sym::bswap => { - // FIXME(CraneStation/cranelift#794) add bswap instruction to cranelift - fn swap(bcx: &mut FunctionBuilder<'_>, v: Value) -> Value { - match bcx.func.dfg.value_type(v) { - types::I8 => v, - - // https://code.woboq.org/gcc/include/bits/byteswap.h.html - types::I16 => { - let tmp1 = bcx.ins().ishl_imm(v, 8); - let n1 = bcx.ins().band_imm(tmp1, 0xFF00); - - let tmp2 = bcx.ins().ushr_imm(v, 8); - let n2 = bcx.ins().band_imm(tmp2, 0x00FF); - - bcx.ins().bor(n1, n2) - } - types::I32 => { - let tmp1 = bcx.ins().ishl_imm(v, 24); - let n1 = bcx.ins().band_imm(tmp1, 0xFF00_0000); - - let tmp2 = bcx.ins().ishl_imm(v, 8); - let n2 = bcx.ins().band_imm(tmp2, 0x00FF_0000); - - let tmp3 = bcx.ins().ushr_imm(v, 8); - let n3 = bcx.ins().band_imm(tmp3, 0x0000_FF00); - - let tmp4 = bcx.ins().ushr_imm(v, 24); - let n4 = bcx.ins().band_imm(tmp4, 0x0000_00FF); - - let or_tmp1 = bcx.ins().bor(n1, n2); - let or_tmp2 = bcx.ins().bor(n3, n4); - bcx.ins().bor(or_tmp1, or_tmp2) - } - types::I64 => { - let tmp1 = bcx.ins().ishl_imm(v, 56); - let n1 = bcx.ins().band_imm(tmp1, 0xFF00_0000_0000_0000u64 as i64); - - let tmp2 = bcx.ins().ishl_imm(v, 40); - let n2 = bcx.ins().band_imm(tmp2, 0x00FF_0000_0000_0000u64 as i64); - - let tmp3 = bcx.ins().ishl_imm(v, 24); - let n3 = bcx.ins().band_imm(tmp3, 0x0000_FF00_0000_0000u64 as i64); - - let tmp4 = bcx.ins().ishl_imm(v, 8); - let n4 = bcx.ins().band_imm(tmp4, 0x0000_00FF_0000_0000u64 as i64); - - let tmp5 = bcx.ins().ushr_imm(v, 8); - let n5 = bcx.ins().band_imm(tmp5, 0x0000_0000_FF00_0000u64 as i64); - - let tmp6 = bcx.ins().ushr_imm(v, 24); - let n6 = bcx.ins().band_imm(tmp6, 0x0000_0000_00FF_0000u64 as i64); - - let tmp7 = bcx.ins().ushr_imm(v, 40); - let n7 = bcx.ins().band_imm(tmp7, 0x0000_0000_0000_FF00u64 as i64); - - let tmp8 = bcx.ins().ushr_imm(v, 56); - let n8 = bcx.ins().band_imm(tmp8, 0x0000_0000_0000_00FFu64 as i64); - - let or_tmp1 = bcx.ins().bor(n1, n2); - let or_tmp2 = bcx.ins().bor(n3, n4); - let or_tmp3 = bcx.ins().bor(n5, n6); - let or_tmp4 = bcx.ins().bor(n7, n8); - - let or_tmp5 = bcx.ins().bor(or_tmp1, or_tmp2); - let or_tmp6 = bcx.ins().bor(or_tmp3, or_tmp4); - bcx.ins().bor(or_tmp5, or_tmp6) - } - types::I128 => { - let (lo, hi) = bcx.ins().isplit(v); - let lo = swap(bcx, lo); - let hi = swap(bcx, hi); - bcx.ins().iconcat(hi, lo) - } - ty => unreachable!("bswap {}", ty), - } - } intrinsic_args!(fx, args => (arg); intrinsic); let val = arg.load_scalar(fx); - let res = CValue::by_val(swap(&mut fx.bcx, val), arg.layout()); + let res = if fx.bcx.func.dfg.value_type(val) == types::I8 { + val + } else { + fx.bcx.ins().bswap(val) + }; + let res = CValue::by_val(res, arg.layout()); ret.write_cvalue(fx, res); } - sym::assert_inhabited | sym::assert_zero_valid | sym::assert_uninit_valid => { + sym::assert_inhabited | sym::assert_zero_valid | sym::assert_mem_uninitialized_valid => { intrinsic_args!(fx, args => (); intrinsic); let layout = fx.layout_of(substs.type_at(0)); @@ -742,7 +673,9 @@ fn codegen_regular_intrinsic_call<'tcx>( return; } - if intrinsic == sym::assert_uninit_valid && !fx.tcx.permits_uninit_init(layout) { + if intrinsic == sym::assert_mem_uninitialized_valid + && !fx.tcx.permits_uninit_init(layout) + { with_no_trimmed_paths!({ crate::base::codegen_panic( fx, @@ -936,8 +869,7 @@ fn codegen_regular_intrinsic_call<'tcx>( let old = fx.bcx.ins().atomic_cas(MemFlags::trusted(), ptr, test_old, new); let is_eq = fx.bcx.ins().icmp(IntCC::Equal, old, test_old); - let ret_val = - CValue::by_val_pair(old, fx.bcx.ins().bint(types::I8, is_eq), ret.layout()); + let ret_val = CValue::by_val_pair(old, is_eq, ret.layout()); ret.write_cvalue(fx, ret_val) } @@ -1259,8 +1191,7 @@ fn codegen_regular_intrinsic_call<'tcx>( flags.set_notrap(); let lhs_val = fx.bcx.ins().load(clty, flags, lhs_ref, 0); let rhs_val = fx.bcx.ins().load(clty, flags, rhs_ref, 0); - let eq = fx.bcx.ins().icmp(IntCC::Equal, lhs_val, rhs_val); - fx.bcx.ins().bint(types::I8, eq) + fx.bcx.ins().icmp(IntCC::Equal, lhs_val, rhs_val) } else { // Just call `memcmp` (like slices do in core) when the // size is too large or it's not a power-of-two. @@ -1270,8 +1201,7 @@ fn codegen_regular_intrinsic_call<'tcx>( let returns = vec![AbiParam::new(types::I32)]; let args = &[lhs_ref, rhs_ref, bytes_val]; let cmp = fx.lib_call("memcmp", params, returns, args)[0]; - let eq = fx.bcx.ins().icmp_imm(IntCC::Equal, cmp, 0); - fx.bcx.ins().bint(types::I8, eq) + fx.bcx.ins().icmp_imm(IntCC::Equal, cmp, 0) }; ret.write_cvalue(fx, CValue::by_val(is_eq_value, ret.layout())); } diff --git a/compiler/rustc_codegen_cranelift/src/intrinsics/simd.rs b/compiler/rustc_codegen_cranelift/src/intrinsics/simd.rs index 51fce8c85..14f5e9187 100644 --- a/compiler/rustc_codegen_cranelift/src/intrinsics/simd.rs +++ b/compiler/rustc_codegen_cranelift/src/intrinsics/simd.rs @@ -112,10 +112,7 @@ pub(super) fn codegen_simd_intrinsic_call<'tcx>( _ => unreachable!(), }; - let ty = fx.clif_type(res_lane_ty).unwrap(); - - let res_lane = fx.bcx.ins().bint(ty, res_lane); - fx.bcx.ins().ineg(res_lane) + bool_to_zero_or_max_uint(fx, res_lane_ty, res_lane) }); } @@ -716,7 +713,7 @@ pub(super) fn codegen_simd_intrinsic_call<'tcx>( let res_type = Type::int_with_byte_size(u16::try_from(expected_bytes).unwrap()).unwrap(); - let mut res = fx.bcx.ins().iconst(res_type, 0); + let mut res = type_zero_value(&mut fx.bcx, res_type); let lanes = match fx.tcx.sess.target.endian { Endian::Big => Box::new(0..lane_count) as Box<dyn Iterator<Item = u64>>, |