summaryrefslogtreecommitdiffstats
path: root/compiler/rustc_codegen_cranelift/src/intrinsics/mod.rs
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--compiler/rustc_codegen_cranelift/src/intrinsics/mod.rs130
1 files changed, 77 insertions, 53 deletions
diff --git a/compiler/rustc_codegen_cranelift/src/intrinsics/mod.rs b/compiler/rustc_codegen_cranelift/src/intrinsics/mod.rs
index b2a83e1d4..0302b843a 100644
--- a/compiler/rustc_codegen_cranelift/src/intrinsics/mod.rs
+++ b/compiler/rustc_codegen_cranelift/src/intrinsics/mod.rs
@@ -44,7 +44,7 @@ fn report_atomic_type_validation_error<'tcx>(
),
);
// Prevent verifier error
- crate::trap::trap_unreachable(fx, "compilation should not have succeeded");
+ fx.bcx.ins().trap(TrapCode::UnreachableCodeReached);
}
pub(crate) fn clif_vector_type<'tcx>(tcx: TyCtxt<'tcx>, layout: TyAndLayout<'tcx>) -> Option<Type> {
@@ -53,7 +53,7 @@ pub(crate) fn clif_vector_type<'tcx>(tcx: TyCtxt<'tcx>, layout: TyAndLayout<'tcx
_ => unreachable!(),
};
- match scalar_to_clif_type(tcx, element).by(u16::try_from(count).unwrap()) {
+ match scalar_to_clif_type(tcx, element).by(u32::try_from(count).unwrap()) {
// Cranelift currently only implements icmp for 128bit vectors.
Some(vector_ty) if vector_ty.bits() == 128 => Some(vector_ty),
_ => None,
@@ -84,6 +84,30 @@ fn simd_for_each_lane<'tcx>(
}
}
+fn simd_pair_for_each_lane_typed<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ x: CValue<'tcx>,
+ y: CValue<'tcx>,
+ ret: CPlace<'tcx>,
+ f: &dyn Fn(&mut FunctionCx<'_, '_, 'tcx>, CValue<'tcx>, CValue<'tcx>) -> CValue<'tcx>,
+) {
+ assert_eq!(x.layout(), y.layout());
+ let layout = x.layout();
+
+ let (lane_count, _lane_ty) = layout.ty.simd_size_and_type(fx.tcx);
+ let (ret_lane_count, _ret_lane_ty) = ret.layout().ty.simd_size_and_type(fx.tcx);
+ assert_eq!(lane_count, ret_lane_count);
+
+ for lane_idx in 0..lane_count {
+ let x_lane = x.value_lane(fx, lane_idx);
+ let y_lane = y.value_lane(fx, lane_idx);
+
+ let res_lane = f(fx, x_lane, y_lane);
+
+ ret.place_lane(fx, lane_idx).write_cvalue(fx, res_lane);
+ }
+}
+
fn simd_pair_for_each_lane<'tcx>(
fx: &mut FunctionCx<'_, '_, 'tcx>,
x: CValue<'tcx>,
@@ -203,7 +227,7 @@ pub(crate) fn codegen_intrinsic_call<'tcx>(
sym::transmute => {
crate::base::codegen_panic(fx, "Transmuting to uninhabited type.", source_info);
}
- _ => unimplemented!("unsupported instrinsic {}", intrinsic),
+ _ => unimplemented!("unsupported intrinsic {}", intrinsic),
}
return;
};
@@ -301,7 +325,44 @@ fn codegen_float_intrinsic_call<'tcx>(
_ => unreachable!(),
};
- let res = fx.easy_call(name, &args, ty);
+ let layout = fx.layout_of(ty);
+ let res = match intrinsic {
+ sym::fmaf32 | sym::fmaf64 => {
+ let a = args[0].load_scalar(fx);
+ let b = args[1].load_scalar(fx);
+ let c = args[2].load_scalar(fx);
+ CValue::by_val(fx.bcx.ins().fma(a, b, c), layout)
+ }
+ sym::copysignf32 | sym::copysignf64 => {
+ let a = args[0].load_scalar(fx);
+ let b = args[1].load_scalar(fx);
+ CValue::by_val(fx.bcx.ins().fcopysign(a, b), layout)
+ }
+ sym::fabsf32
+ | sym::fabsf64
+ | sym::floorf32
+ | sym::floorf64
+ | sym::ceilf32
+ | sym::ceilf64
+ | sym::truncf32
+ | sym::truncf64 => {
+ let a = args[0].load_scalar(fx);
+
+ let val = match intrinsic {
+ sym::fabsf32 | sym::fabsf64 => fx.bcx.ins().fabs(a),
+ sym::floorf32 | sym::floorf64 => fx.bcx.ins().floor(a),
+ sym::ceilf32 | sym::ceilf64 => fx.bcx.ins().ceil(a),
+ sym::truncf32 | sym::truncf64 => fx.bcx.ins().trunc(a),
+ _ => unreachable!(),
+ };
+
+ CValue::by_val(val, layout)
+ }
+ // These intrinsics aren't supported natively by Cranelift.
+ // Lower them to a libcall.
+ _ => fx.easy_call(name, &args, ty),
+ };
+
ret.write_cvalue(fx, res);
true
@@ -320,9 +381,6 @@ fn codegen_regular_intrinsic_call<'tcx>(
let usize_layout = fx.layout_of(fx.tcx.types.usize);
match intrinsic {
- sym::assume => {
- intrinsic_args!(fx, args => (_a); intrinsic);
- }
sym::likely | sym::unlikely => {
intrinsic_args!(fx, args => (a); intrinsic);
@@ -470,37 +528,7 @@ fn codegen_regular_intrinsic_call<'tcx>(
_ => unreachable!(),
};
- let signed = type_sign(lhs.layout().ty);
-
- let checked_res = crate::num::codegen_checked_int_binop(fx, bin_op, lhs, rhs);
-
- let (val, has_overflow) = checked_res.load_scalar_pair(fx);
- let clif_ty = fx.clif_type(lhs.layout().ty).unwrap();
-
- let (min, max) = type_min_max_value(&mut fx.bcx, clif_ty, signed);
-
- let val = match (intrinsic, signed) {
- (sym::saturating_add, false) => fx.bcx.ins().select(has_overflow, max, val),
- (sym::saturating_sub, false) => fx.bcx.ins().select(has_overflow, min, val),
- (sym::saturating_add, true) => {
- let rhs = rhs.load_scalar(fx);
- let rhs_ge_zero =
- fx.bcx.ins().icmp_imm(IntCC::SignedGreaterThanOrEqual, rhs, 0);
- let sat_val = fx.bcx.ins().select(rhs_ge_zero, max, min);
- fx.bcx.ins().select(has_overflow, sat_val, val)
- }
- (sym::saturating_sub, true) => {
- let rhs = rhs.load_scalar(fx);
- let rhs_ge_zero =
- fx.bcx.ins().icmp_imm(IntCC::SignedGreaterThanOrEqual, rhs, 0);
- let sat_val = fx.bcx.ins().select(rhs_ge_zero, min, max);
- fx.bcx.ins().select(has_overflow, sat_val, val)
- }
- _ => unreachable!(),
- };
-
- let res = CValue::by_val(val, lhs.layout());
-
+ let res = crate::num::codegen_saturating_int_binop(fx, bin_op, lhs, rhs);
ret.write_cvalue(fx, res);
}
sym::rotate_left => {
@@ -540,6 +568,13 @@ fn codegen_regular_intrinsic_call<'tcx>(
ret.write_cvalue(fx, CValue::by_val(res, base.layout()));
}
+ sym::ptr_mask => {
+ intrinsic_args!(fx, args => (ptr, mask); intrinsic);
+ let ptr = ptr.load_scalar(fx);
+ let mask = mask.load_scalar(fx);
+ fx.bcx.ins().band(ptr, mask);
+ }
+
sym::transmute => {
intrinsic_args!(fx, args => (from); intrinsic);
@@ -775,18 +810,11 @@ fn codegen_regular_intrinsic_call<'tcx>(
ret.write_cvalue(fx, val);
}
- sym::ptr_guaranteed_eq => {
+ sym::ptr_guaranteed_cmp => {
intrinsic_args!(fx, args => (a, b); intrinsic);
- let val = crate::num::codegen_ptr_binop(fx, BinOp::Eq, a, b);
- ret.write_cvalue(fx, val);
- }
-
- sym::ptr_guaranteed_ne => {
- intrinsic_args!(fx, args => (a, b); intrinsic);
-
- let val = crate::num::codegen_ptr_binop(fx, BinOp::Ne, a, b);
- ret.write_cvalue(fx, val);
+ let val = crate::num::codegen_ptr_binop(fx, BinOp::Eq, a, b).load_scalar(fx);
+ ret.write_cvalue(fx, CValue::by_val(val, fx.layout_of(fx.tcx.types.u8)));
}
sym::caller_location => {
@@ -818,8 +846,6 @@ fn codegen_regular_intrinsic_call<'tcx>(
if fx.tcx.is_compiler_builtins(LOCAL_CRATE) {
// special case for compiler-builtins to avoid having to patch it
crate::trap::trap_unimplemented(fx, "128bit atomics not yet supported");
- let ret_block = fx.get_block(destination.unwrap());
- fx.bcx.ins().jump(ret_block, &[]);
return;
} else {
fx.tcx
@@ -851,8 +877,6 @@ fn codegen_regular_intrinsic_call<'tcx>(
if fx.tcx.is_compiler_builtins(LOCAL_CRATE) {
// special case for compiler-builtins to avoid having to patch it
crate::trap::trap_unimplemented(fx, "128bit atomics not yet supported");
- let ret_block = fx.get_block(destination.unwrap());
- fx.bcx.ins().jump(ret_block, &[]);
return;
} else {
fx.tcx
@@ -1176,7 +1200,7 @@ fn codegen_regular_intrinsic_call<'tcx>(
// FIXME once unwinding is supported, change this to actually catch panics
let f_sig = fx.bcx.func.import_signature(Signature {
call_conv: fx.target_config.default_call_conv,
- params: vec![AbiParam::new(fx.bcx.func.dfg.value_type(data))],
+ params: vec![AbiParam::new(pointer_ty(fx.tcx))],
returns: vec![],
});