diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-17 12:19:50 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-17 12:19:50 +0000 |
commit | 2e00214b3efbdfeefaa0fe9e8b8fd519de7adc35 (patch) | |
tree | d325add32978dbdc1db975a438b3a77d571b1ab8 /compiler/rustc_codegen_cranelift/src/intrinsics | |
parent | Releasing progress-linux version 1.68.2+dfsg1-1~progress7.99u1. (diff) | |
download | rustc-2e00214b3efbdfeefaa0fe9e8b8fd519de7adc35.tar.xz rustc-2e00214b3efbdfeefaa0fe9e8b8fd519de7adc35.zip |
Merging upstream version 1.69.0+dfsg1.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'compiler/rustc_codegen_cranelift/src/intrinsics')
3 files changed, 168 insertions, 86 deletions
diff --git a/compiler/rustc_codegen_cranelift/src/intrinsics/llvm_x86.rs b/compiler/rustc_codegen_cranelift/src/intrinsics/llvm_x86.rs index 7bc161fbe..e5c4b244a 100644 --- a/compiler/rustc_codegen_cranelift/src/intrinsics/llvm_x86.rs +++ b/compiler/rustc_codegen_cranelift/src/intrinsics/llvm_x86.rs @@ -33,8 +33,8 @@ pub(crate) fn codegen_x86_llvm_intrinsic_call<'tcx>( // cast float to int let a_lane = match lane_ty { - types::F32 => fx.bcx.ins().bitcast(types::I32, a_lane), - types::F64 => fx.bcx.ins().bitcast(types::I64, a_lane), + types::F32 => codegen_bitcast(fx, types::I32, a_lane), + types::F64 => codegen_bitcast(fx, types::I64, a_lane), _ => a_lane, }; @@ -191,7 +191,7 @@ fn llvm_add_sub<'tcx>( // carry0 | carry1 -> carry or borrow respectively let cb_out = fx.bcx.ins().bor(cb0, cb1); - let layout = fx.layout_of(fx.tcx.mk_tup([fx.tcx.types.u8, fx.tcx.types.u64].iter())); + let layout = fx.layout_of(fx.tcx.mk_tup(&[fx.tcx.types.u8, fx.tcx.types.u64])); let val = CValue::by_val_pair(cb_out, c, layout); ret.write_cvalue(fx, val); } diff --git a/compiler/rustc_codegen_cranelift/src/intrinsics/mod.rs b/compiler/rustc_codegen_cranelift/src/intrinsics/mod.rs index e4ac89a7b..e74aabf2f 100644 --- a/compiler/rustc_codegen_cranelift/src/intrinsics/mod.rs +++ b/compiler/rustc_codegen_cranelift/src/intrinsics/mod.rs @@ -21,6 +21,8 @@ mod simd; pub(crate) use cpuid::codegen_cpuid_call; pub(crate) use llvm::codegen_llvm_intrinsic_call; +use rustc_middle::ty; +use rustc_middle::ty::layout::{HasParamEnv, ValidityRequirement}; use rustc_middle::ty::print::with_no_trimmed_paths; use rustc_middle::ty::subst::SubstsRef; use rustc_span::symbol::{kw, sym, Symbol}; @@ -200,7 +202,7 @@ fn bool_to_zero_or_max_uint<'tcx>( let mut res = fx.bcx.ins().bmask(int_ty, val); if ty.is_float() { - res = fx.bcx.ins().bitcast(ty, res); + res = codegen_bitcast(fx, ty, res); } res @@ -217,22 +219,6 @@ pub(crate) fn codegen_intrinsic_call<'tcx>( let intrinsic = fx.tcx.item_name(instance.def_id()); let substs = instance.substs; - let target = if let Some(target) = target { - target - } else { - // Insert non returning intrinsics here - match intrinsic { - sym::abort => { - fx.bcx.ins().trap(TrapCode::User(0)); - } - sym::transmute => { - crate::base::codegen_panic(fx, "Transmuting to uninhabited type.", source_info); - } - _ => unimplemented!("unsupported intrinsic {}", intrinsic), - } - return; - }; - if intrinsic.as_str().starts_with("simd_") { self::simd::codegen_simd_intrinsic_call( fx, @@ -240,12 +226,11 @@ pub(crate) fn codegen_intrinsic_call<'tcx>( substs, args, destination, + target.expect("target for simd intrinsic"), source_info.span, ); - let ret_block = fx.get_block(target); - fx.bcx.ins().jump(ret_block, &[]); } else if codegen_float_intrinsic_call(fx, intrinsic, args, destination) { - let ret_block = fx.get_block(target); + let ret_block = fx.get_block(target.expect("target for float intrinsic")); fx.bcx.ins().jump(ret_block, &[]); } else { codegen_regular_intrinsic_call( @@ -255,7 +240,7 @@ pub(crate) fn codegen_intrinsic_call<'tcx>( substs, args, destination, - Some(target), + target, source_info, ); } @@ -382,6 +367,10 @@ fn codegen_regular_intrinsic_call<'tcx>( let usize_layout = fx.layout_of(fx.tcx.types.usize); match intrinsic { + sym::abort => { + fx.bcx.ins().trap(TrapCode::User(0)); + return; + } sym::likely | sym::unlikely => { intrinsic_args!(fx, args => (a); intrinsic); @@ -505,20 +494,6 @@ fn codegen_regular_intrinsic_call<'tcx>( let res = crate::num::codegen_int_binop(fx, bin_op, x, y); ret.write_cvalue(fx, res); } - sym::add_with_overflow | sym::sub_with_overflow | sym::mul_with_overflow => { - intrinsic_args!(fx, args => (x, y); intrinsic); - - assert_eq!(x.layout().ty, y.layout().ty); - let bin_op = match intrinsic { - sym::add_with_overflow => BinOp::Add, - sym::sub_with_overflow => BinOp::Sub, - sym::mul_with_overflow => BinOp::Mul, - _ => unreachable!(), - }; - - let res = crate::num::codegen_checked_int_binop(fx, bin_op, x, y); - ret.write_cvalue(fx, res); - } sym::saturating_add | sym::saturating_sub => { intrinsic_args!(fx, args => (lhs, rhs); intrinsic); @@ -579,6 +554,11 @@ fn codegen_regular_intrinsic_call<'tcx>( sym::transmute => { intrinsic_args!(fx, args => (from); intrinsic); + if ret.layout().abi.is_uninhabited() { + crate::base::codegen_panic(fx, "Transmuting to uninhabited type.", source_info); + return; + } + ret.write_cvalue_transmute(fx, from); } sym::write_bytes | sym::volatile_set_memory => { @@ -647,46 +627,40 @@ fn codegen_regular_intrinsic_call<'tcx>( sym::assert_inhabited | sym::assert_zero_valid | sym::assert_mem_uninitialized_valid => { intrinsic_args!(fx, args => (); intrinsic); - let layout = fx.layout_of(substs.type_at(0)); - if layout.abi.is_uninhabited() { - with_no_trimmed_paths!({ - crate::base::codegen_panic( - fx, - &format!("attempted to instantiate uninhabited type `{}`", layout.ty), - source_info, - ) - }); - return; - } - - if intrinsic == sym::assert_zero_valid && !fx.tcx.permits_zero_init(layout) { - with_no_trimmed_paths!({ - crate::base::codegen_panic( - fx, - &format!( - "attempted to zero-initialize type `{}`, which is invalid", - layout.ty - ), - source_info, - ); - }); - return; - } + let ty = substs.type_at(0); - if intrinsic == sym::assert_mem_uninitialized_valid - && !fx.tcx.permits_uninit_init(layout) - { - with_no_trimmed_paths!({ - crate::base::codegen_panic( - fx, - &format!( - "attempted to leave type `{}` uninitialized, which is invalid", - layout.ty - ), - source_info, - ) - }); - return; + let requirement = ValidityRequirement::from_intrinsic(intrinsic); + + if let Some(requirement) = requirement { + let do_panic = !fx + .tcx + .check_validity_requirement((requirement, fx.param_env().and(ty))) + .expect("expect to have layout during codegen"); + + if do_panic { + let layout = fx.layout_of(ty); + + with_no_trimmed_paths!({ + crate::base::codegen_panic_nounwind( + fx, + &if layout.abi.is_uninhabited() { + format!("attempted to instantiate uninhabited type `{}`", layout.ty) + } else if requirement == ValidityRequirement::Zero { + format!( + "attempted to zero-initialize type `{}`, which is invalid", + layout.ty + ) + } else { + format!( + "attempted to leave type `{}` uninitialized, which is invalid", + layout.ty + ) + }, + source_info, + ) + }); + return; + } } } diff --git a/compiler/rustc_codegen_cranelift/src/intrinsics/simd.rs b/compiler/rustc_codegen_cranelift/src/intrinsics/simd.rs index 14f5e9187..a1d63acfb 100644 --- a/compiler/rustc_codegen_cranelift/src/intrinsics/simd.rs +++ b/compiler/rustc_codegen_cranelift/src/intrinsics/simd.rs @@ -24,6 +24,7 @@ pub(super) fn codegen_simd_intrinsic_call<'tcx>( _substs: SubstsRef<'tcx>, args: &[mir::Operand<'tcx>], ret: CPlace<'tcx>, + target: BasicBlock, span: Span, ) { match intrinsic { @@ -140,7 +141,7 @@ pub(super) fn codegen_simd_intrinsic_call<'tcx>( let idx_ty = fx.monomorphize(idx.ty(fx.mir, fx.tcx)); match idx_ty.kind() { ty::Array(ty, len) if matches!(ty.kind(), ty::Uint(ty::UintTy::U32)) => len - .try_eval_usize(fx.tcx, ty::ParamEnv::reveal_all()) + .try_eval_target_usize(fx.tcx, ty::ParamEnv::reveal_all()) .unwrap_or_else(|| { span_bug!(span, "could not evaluate shuffle index array length") }) @@ -277,16 +278,15 @@ pub(super) fn codegen_simd_intrinsic_call<'tcx>( } else { fx.tcx.sess.span_warn(span, "Index argument for `simd_extract` is not a constant"); let trap_block = fx.bcx.create_block(); - let dummy_block = fx.bcx.create_block(); let true_ = fx.bcx.ins().iconst(types::I8, 1); fx.bcx.ins().brnz(true_, trap_block, &[]); - fx.bcx.ins().jump(dummy_block, &[]); + let ret_block = fx.get_block(target); + fx.bcx.ins().jump(ret_block, &[]); fx.bcx.switch_to_block(trap_block); crate::trap::trap_unimplemented( fx, "Index argument for `simd_extract` is not a constant", ); - fx.bcx.switch_to_block(dummy_block); return; }; @@ -735,7 +735,7 @@ pub(super) fn codegen_simd_intrinsic_call<'tcx>( ty::Uint(i) if i.bit_width() == Some(expected_int_bits) => {} ty::Array(elem, len) if matches!(elem.kind(), ty::Uint(ty::UintTy::U8)) - && len.try_eval_usize(fx.tcx, ty::ParamEnv::reveal_all()) + && len.try_eval_target_usize(fx.tcx, ty::ParamEnv::reveal_all()) == Some(expected_bytes) => {} _ => { fx.tcx.sess.span_fatal( @@ -770,11 +770,119 @@ pub(super) fn codegen_simd_intrinsic_call<'tcx>( }); } - // simd_arith_offset - // simd_scatter - // simd_gather + sym::simd_expose_addr | sym::simd_from_exposed_addr | sym::simd_cast_ptr => { + intrinsic_args!(fx, args => (arg); intrinsic); + ret.write_cvalue_transmute(fx, arg); + } + + sym::simd_arith_offset => { + intrinsic_args!(fx, args => (ptr, offset); intrinsic); + + let (lane_count, ptr_lane_ty) = ptr.layout().ty.simd_size_and_type(fx.tcx); + let pointee_ty = ptr_lane_ty.builtin_deref(true).unwrap().ty; + let pointee_size = fx.layout_of(pointee_ty).size.bytes(); + let (ret_lane_count, ret_lane_ty) = ret.layout().ty.simd_size_and_type(fx.tcx); + let ret_lane_layout = fx.layout_of(ret_lane_ty); + assert_eq!(lane_count, ret_lane_count); + + for lane_idx in 0..lane_count { + let ptr_lane = ptr.value_lane(fx, lane_idx).load_scalar(fx); + let offset_lane = offset.value_lane(fx, lane_idx).load_scalar(fx); + + let ptr_diff = if pointee_size != 1 { + fx.bcx.ins().imul_imm(offset_lane, pointee_size as i64) + } else { + offset_lane + }; + let res_lane = fx.bcx.ins().iadd(ptr_lane, ptr_diff); + let res_lane = CValue::by_val(res_lane, ret_lane_layout); + + ret.place_lane(fx, lane_idx).write_cvalue(fx, res_lane); + } + } + + sym::simd_gather => { + intrinsic_args!(fx, args => (val, ptr, mask); intrinsic); + + let (val_lane_count, val_lane_ty) = val.layout().ty.simd_size_and_type(fx.tcx); + let (ptr_lane_count, _ptr_lane_ty) = ptr.layout().ty.simd_size_and_type(fx.tcx); + let (mask_lane_count, _mask_lane_ty) = mask.layout().ty.simd_size_and_type(fx.tcx); + let (ret_lane_count, ret_lane_ty) = ret.layout().ty.simd_size_and_type(fx.tcx); + assert_eq!(val_lane_count, ptr_lane_count); + assert_eq!(val_lane_count, mask_lane_count); + assert_eq!(val_lane_count, ret_lane_count); + + let lane_clif_ty = fx.clif_type(val_lane_ty).unwrap(); + let ret_lane_layout = fx.layout_of(ret_lane_ty); + + for lane_idx in 0..ptr_lane_count { + let val_lane = val.value_lane(fx, lane_idx).load_scalar(fx); + let ptr_lane = ptr.value_lane(fx, lane_idx).load_scalar(fx); + let mask_lane = mask.value_lane(fx, lane_idx).load_scalar(fx); + + let if_enabled = fx.bcx.create_block(); + let if_disabled = fx.bcx.create_block(); + let next = fx.bcx.create_block(); + let res_lane = fx.bcx.append_block_param(next, lane_clif_ty); + + fx.bcx.ins().brnz(mask_lane, if_enabled, &[]); + fx.bcx.ins().jump(if_disabled, &[]); + fx.bcx.seal_block(if_enabled); + fx.bcx.seal_block(if_disabled); + + fx.bcx.switch_to_block(if_enabled); + let res = fx.bcx.ins().load(lane_clif_ty, MemFlags::trusted(), ptr_lane, 0); + fx.bcx.ins().jump(next, &[res]); + + fx.bcx.switch_to_block(if_disabled); + fx.bcx.ins().jump(next, &[val_lane]); + + fx.bcx.seal_block(next); + fx.bcx.switch_to_block(next); + + fx.bcx.ins().nop(); + + ret.place_lane(fx, lane_idx) + .write_cvalue(fx, CValue::by_val(res_lane, ret_lane_layout)); + } + } + + sym::simd_scatter => { + intrinsic_args!(fx, args => (val, ptr, mask); intrinsic); + + let (val_lane_count, _val_lane_ty) = val.layout().ty.simd_size_and_type(fx.tcx); + let (ptr_lane_count, _ptr_lane_ty) = ptr.layout().ty.simd_size_and_type(fx.tcx); + let (mask_lane_count, _mask_lane_ty) = mask.layout().ty.simd_size_and_type(fx.tcx); + assert_eq!(val_lane_count, ptr_lane_count); + assert_eq!(val_lane_count, mask_lane_count); + + for lane_idx in 0..ptr_lane_count { + let val_lane = val.value_lane(fx, lane_idx).load_scalar(fx); + let ptr_lane = ptr.value_lane(fx, lane_idx).load_scalar(fx); + let mask_lane = mask.value_lane(fx, lane_idx).load_scalar(fx); + + let if_enabled = fx.bcx.create_block(); + let next = fx.bcx.create_block(); + + fx.bcx.ins().brnz(mask_lane, if_enabled, &[]); + fx.bcx.ins().jump(next, &[]); + fx.bcx.seal_block(if_enabled); + + fx.bcx.switch_to_block(if_enabled); + fx.bcx.ins().store(MemFlags::trusted(), val_lane, ptr_lane, 0); + fx.bcx.ins().jump(next, &[]); + + fx.bcx.seal_block(next); + fx.bcx.switch_to_block(next); + } + } + _ => { - fx.tcx.sess.span_fatal(span, &format!("Unknown SIMD intrinsic {}", intrinsic)); + fx.tcx.sess.span_err(span, &format!("Unknown SIMD intrinsic {}", intrinsic)); + // Prevent verifier error + fx.bcx.ins().trap(TrapCode::UnreachableCodeReached); } } + let ret_block = fx.get_block(target); + fx.bcx.ins().jump(ret_block, &[]); } |