From 246f239d9f40f633160f0c18f87a20922d4e77bb Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Wed, 17 Apr 2024 14:06:37 +0200 Subject: Merging debian version 1.65.0+dfsg1-2. Signed-off-by: Daniel Baumann --- .../src/intrinsics/cpuid.rs | 2 +- .../rustc_codegen_cranelift/src/intrinsics/llvm.rs | 1 + .../rustc_codegen_cranelift/src/intrinsics/mod.rs | 68 ++++++++++++++++------ .../rustc_codegen_cranelift/src/intrinsics/simd.rs | 38 ++++++------ 4 files changed, 71 insertions(+), 38 deletions(-) (limited to 'compiler/rustc_codegen_cranelift/src/intrinsics') diff --git a/compiler/rustc_codegen_cranelift/src/intrinsics/cpuid.rs b/compiler/rustc_codegen_cranelift/src/intrinsics/cpuid.rs index d02dfd93c..5120b89c4 100644 --- a/compiler/rustc_codegen_cranelift/src/intrinsics/cpuid.rs +++ b/compiler/rustc_codegen_cranelift/src/intrinsics/cpuid.rs @@ -62,7 +62,7 @@ pub(crate) fn codegen_cpuid_call<'tcx>( fx.bcx.ins().jump(dest, &[zero, zero, proc_info_ecx, proc_info_edx]); fx.bcx.switch_to_block(unsupported_leaf); - crate::trap::trap_unreachable( + crate::trap::trap_unimplemented( fx, "__cpuid_count arch intrinsic doesn't yet support specified leaf", ); diff --git a/compiler/rustc_codegen_cranelift/src/intrinsics/llvm.rs b/compiler/rustc_codegen_cranelift/src/intrinsics/llvm.rs index 869670c8c..a799dca93 100644 --- a/compiler/rustc_codegen_cranelift/src/intrinsics/llvm.rs +++ b/compiler/rustc_codegen_cranelift/src/intrinsics/llvm.rs @@ -139,6 +139,7 @@ pub(crate) fn codegen_llvm_intrinsic_call<'tcx>( .sess .warn(&format!("unsupported llvm intrinsic {}; replacing with trap", intrinsic)); crate::trap::trap_unimplemented(fx, intrinsic); + return; } } diff --git a/compiler/rustc_codegen_cranelift/src/intrinsics/mod.rs b/compiler/rustc_codegen_cranelift/src/intrinsics/mod.rs index b2a83e1d4..2e4ca594f 100644 --- a/compiler/rustc_codegen_cranelift/src/intrinsics/mod.rs +++ b/compiler/rustc_codegen_cranelift/src/intrinsics/mod.rs @@ -44,7 +44,7 @@ fn report_atomic_type_validation_error<'tcx>( ), ); // Prevent verifier error - crate::trap::trap_unreachable(fx, "compilation should not have succeeded"); + fx.bcx.ins().trap(TrapCode::UnreachableCodeReached); } pub(crate) fn clif_vector_type<'tcx>(tcx: TyCtxt<'tcx>, layout: TyAndLayout<'tcx>) -> Option { @@ -53,7 +53,7 @@ pub(crate) fn clif_vector_type<'tcx>(tcx: TyCtxt<'tcx>, layout: TyAndLayout<'tcx _ => unreachable!(), }; - match scalar_to_clif_type(tcx, element).by(u16::try_from(count).unwrap()) { + match scalar_to_clif_type(tcx, element).by(u32::try_from(count).unwrap()) { // Cranelift currently only implements icmp for 128bit vectors. Some(vector_ty) if vector_ty.bits() == 128 => Some(vector_ty), _ => None, @@ -203,7 +203,7 @@ pub(crate) fn codegen_intrinsic_call<'tcx>( sym::transmute => { crate::base::codegen_panic(fx, "Transmuting to uninhabited type.", source_info); } - _ => unimplemented!("unsupported instrinsic {}", intrinsic), + _ => unimplemented!("unsupported intrinsic {}", intrinsic), } return; }; @@ -301,7 +301,44 @@ fn codegen_float_intrinsic_call<'tcx>( _ => unreachable!(), }; - let res = fx.easy_call(name, &args, ty); + let layout = fx.layout_of(ty); + let res = match intrinsic { + sym::fmaf32 | sym::fmaf64 => { + let a = args[0].load_scalar(fx); + let b = args[1].load_scalar(fx); + let c = args[2].load_scalar(fx); + CValue::by_val(fx.bcx.ins().fma(a, b, c), layout) + } + sym::copysignf32 | sym::copysignf64 => { + let a = args[0].load_scalar(fx); + let b = args[1].load_scalar(fx); + CValue::by_val(fx.bcx.ins().fcopysign(a, b), layout) + } + sym::fabsf32 + | sym::fabsf64 + | sym::floorf32 + | sym::floorf64 + | sym::ceilf32 + | sym::ceilf64 + | sym::truncf32 + | sym::truncf64 => { + let a = args[0].load_scalar(fx); + + let val = match intrinsic { + sym::fabsf32 | sym::fabsf64 => fx.bcx.ins().fabs(a), + sym::floorf32 | sym::floorf64 => fx.bcx.ins().floor(a), + sym::ceilf32 | sym::ceilf64 => fx.bcx.ins().ceil(a), + sym::truncf32 | sym::truncf64 => fx.bcx.ins().trunc(a), + _ => unreachable!(), + }; + + CValue::by_val(val, layout) + } + // These intrinsics aren't supported natively by Cranelift. + // Lower them to a libcall. + _ => fx.easy_call(name, &args, ty), + }; + ret.write_cvalue(fx, res); true @@ -320,9 +357,6 @@ fn codegen_regular_intrinsic_call<'tcx>( let usize_layout = fx.layout_of(fx.tcx.types.usize); match intrinsic { - sym::assume => { - intrinsic_args!(fx, args => (_a); intrinsic); - } sym::likely | sym::unlikely => { intrinsic_args!(fx, args => (a); intrinsic); @@ -540,6 +574,13 @@ fn codegen_regular_intrinsic_call<'tcx>( ret.write_cvalue(fx, CValue::by_val(res, base.layout())); } + sym::ptr_mask => { + intrinsic_args!(fx, args => (ptr, mask); intrinsic); + let ptr = ptr.load_scalar(fx); + let mask = mask.load_scalar(fx); + fx.bcx.ins().band(ptr, mask); + } + sym::transmute => { intrinsic_args!(fx, args => (from); intrinsic); @@ -775,20 +816,13 @@ fn codegen_regular_intrinsic_call<'tcx>( ret.write_cvalue(fx, val); } - sym::ptr_guaranteed_eq => { + sym::ptr_guaranteed_cmp => { intrinsic_args!(fx, args => (a, b); intrinsic); let val = crate::num::codegen_ptr_binop(fx, BinOp::Eq, a, b); ret.write_cvalue(fx, val); } - sym::ptr_guaranteed_ne => { - intrinsic_args!(fx, args => (a, b); intrinsic); - - let val = crate::num::codegen_ptr_binop(fx, BinOp::Ne, a, b); - ret.write_cvalue(fx, val); - } - sym::caller_location => { intrinsic_args!(fx, args => (); intrinsic); @@ -818,8 +852,6 @@ fn codegen_regular_intrinsic_call<'tcx>( if fx.tcx.is_compiler_builtins(LOCAL_CRATE) { // special case for compiler-builtins to avoid having to patch it crate::trap::trap_unimplemented(fx, "128bit atomics not yet supported"); - let ret_block = fx.get_block(destination.unwrap()); - fx.bcx.ins().jump(ret_block, &[]); return; } else { fx.tcx @@ -851,8 +883,6 @@ fn codegen_regular_intrinsic_call<'tcx>( if fx.tcx.is_compiler_builtins(LOCAL_CRATE) { // special case for compiler-builtins to avoid having to patch it crate::trap::trap_unimplemented(fx, "128bit atomics not yet supported"); - let ret_block = fx.get_block(destination.unwrap()); - fx.bcx.ins().jump(ret_block, &[]); return; } else { fx.tcx diff --git a/compiler/rustc_codegen_cranelift/src/intrinsics/simd.rs b/compiler/rustc_codegen_cranelift/src/intrinsics/simd.rs index 30e3d1125..1f358b1bb 100644 --- a/compiler/rustc_codegen_cranelift/src/intrinsics/simd.rs +++ b/compiler/rustc_codegen_cranelift/src/intrinsics/simd.rs @@ -14,7 +14,7 @@ fn report_simd_type_validation_error( ) { fx.tcx.sess.span_err(span, &format!("invalid monomorphization of `{}` intrinsic: expected SIMD input type, found non-SIMD `{}`", intrinsic, ty)); // Prevent verifier error - crate::trap::trap_unreachable(fx, "compilation should not have succeeded"); + fx.bcx.ins().trap(TrapCode::UnreachableCodeReached); } pub(super) fn codegen_simd_intrinsic_call<'tcx>( @@ -157,7 +157,7 @@ pub(super) fn codegen_simd_intrinsic_call<'tcx>( ), ); // Prevent verifier error - crate::trap::trap_unreachable(fx, "compilation should not have succeeded"); + fx.bcx.ins().trap(TrapCode::UnreachableCodeReached); return; } } @@ -186,7 +186,10 @@ pub(super) fn codegen_simd_intrinsic_call<'tcx>( let size = Size::from_bytes( 4 * ret_lane_count, /* size_of([u32; ret_lane_count]) */ ); - alloc.inner().get_bytes(fx, alloc_range(offset, size)).unwrap() + alloc + .inner() + .get_bytes_strip_provenance(fx, alloc_range(offset, size)) + .unwrap() } _ => unreachable!("{:?}", idx_const), }; @@ -274,12 +277,17 @@ pub(super) fn codegen_simd_intrinsic_call<'tcx>( idx_const } else { fx.tcx.sess.span_warn(span, "Index argument for `simd_extract` is not a constant"); - let res = crate::trap::trap_unimplemented_ret_value( + let trap_block = fx.bcx.create_block(); + let dummy_block = fx.bcx.create_block(); + let true_ = fx.bcx.ins().iconst(types::I8, 1); + fx.bcx.ins().brnz(true_, trap_block, &[]); + fx.bcx.ins().jump(dummy_block, &[]); + fx.bcx.switch_to_block(trap_block); + crate::trap::trap_unimplemented( fx, - ret.layout(), "Index argument for `simd_extract` is not a constant", ); - ret.write_cvalue(fx, res); + fx.bcx.switch_to_block(dummy_block); return; }; @@ -392,21 +400,15 @@ pub(super) fn codegen_simd_intrinsic_call<'tcx>( let layout = a.layout(); let (lane_count, lane_ty) = layout.ty.simd_size_and_type(fx.tcx); + let res_lane_layout = fx.layout_of(lane_ty); for lane in 0..lane_count { - let a_lane = a.value_lane(fx, lane); - let b_lane = b.value_lane(fx, lane); - let c_lane = c.value_lane(fx, lane); + let a_lane = a.value_lane(fx, lane).load_scalar(fx); + let b_lane = b.value_lane(fx, lane).load_scalar(fx); + let c_lane = c.value_lane(fx, lane).load_scalar(fx); - let res_lane = match lane_ty.kind() { - ty::Float(FloatTy::F32) => { - fx.easy_call("fmaf", &[a_lane, b_lane, c_lane], lane_ty) - } - ty::Float(FloatTy::F64) => { - fx.easy_call("fma", &[a_lane, b_lane, c_lane], lane_ty) - } - _ => unreachable!(), - }; + let res_lane = fx.bcx.ins().fma(a_lane, b_lane, c_lane); + let res_lane = CValue::by_val(res_lane, res_lane_layout); ret.place_lane(fx, lane).write_cvalue(fx, res_lane); } -- cgit v1.2.3