diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-30 03:57:31 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-30 03:57:31 +0000 |
commit | dc0db358abe19481e475e10c32149b53370f1a1c (patch) | |
tree | ab8ce99c4b255ce46f99ef402c27916055b899ee /compiler/rustc_codegen_gcc/src/intrinsic | |
parent | Releasing progress-linux version 1.71.1+dfsg1-2~progress7.99u1. (diff) | |
download | rustc-dc0db358abe19481e475e10c32149b53370f1a1c.tar.xz rustc-dc0db358abe19481e475e10c32149b53370f1a1c.zip |
Merging upstream version 1.72.1+dfsg1.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'compiler/rustc_codegen_gcc/src/intrinsic')
-rw-r--r-- | compiler/rustc_codegen_gcc/src/intrinsic/archs.rs | 46 | ||||
-rw-r--r-- | compiler/rustc_codegen_gcc/src/intrinsic/llvm.rs | 7 | ||||
-rw-r--r-- | compiler/rustc_codegen_gcc/src/intrinsic/mod.rs | 169 | ||||
-rw-r--r-- | compiler/rustc_codegen_gcc/src/intrinsic/simd.rs | 18 |
4 files changed, 64 insertions, 176 deletions
diff --git a/compiler/rustc_codegen_gcc/src/intrinsic/archs.rs b/compiler/rustc_codegen_gcc/src/intrinsic/archs.rs index 8a4559355..438eab789 100644 --- a/compiler/rustc_codegen_gcc/src/intrinsic/archs.rs +++ b/compiler/rustc_codegen_gcc/src/intrinsic/archs.rs @@ -2967,10 +2967,6 @@ match name { "llvm.nvvm.clz.ll" => "__nvvm_clz_ll", "llvm.nvvm.cos.approx.f" => "__nvvm_cos_approx_f", "llvm.nvvm.cos.approx.ftz.f" => "__nvvm_cos_approx_ftz_f", - "llvm.nvvm.cp.async.ca.shared.global.16" => "__nvvm_cp_async_ca_shared_global_16", - "llvm.nvvm.cp.async.ca.shared.global.4" => "__nvvm_cp_async_ca_shared_global_4", - "llvm.nvvm.cp.async.ca.shared.global.8" => "__nvvm_cp_async_ca_shared_global_8", - "llvm.nvvm.cp.async.cg.shared.global.16" => "__nvvm_cp_async_cg_shared_global_16", "llvm.nvvm.cp.async.commit.group" => "__nvvm_cp_async_commit_group", "llvm.nvvm.cp.async.mbarrier.arrive" => "__nvvm_cp_async_mbarrier_arrive", "llvm.nvvm.cp.async.mbarrier.arrive.noinc" => "__nvvm_cp_async_mbarrier_arrive_noinc", @@ -3086,18 +3082,8 @@ match name { "llvm.nvvm.fma.rn.f16" => "__nvvm_fma_rn_f16", "llvm.nvvm.fma.rn.f16x2" => "__nvvm_fma_rn_f16x2", "llvm.nvvm.fma.rn.ftz.f" => "__nvvm_fma_rn_ftz_f", - "llvm.nvvm.fma.rn.ftz.f16" => "__nvvm_fma_rn_ftz_f16", - "llvm.nvvm.fma.rn.ftz.f16x2" => "__nvvm_fma_rn_ftz_f16x2", - "llvm.nvvm.fma.rn.ftz.relu.f16" => "__nvvm_fma_rn_ftz_relu_f16", - "llvm.nvvm.fma.rn.ftz.relu.f16x2" => "__nvvm_fma_rn_ftz_relu_f16x2", - "llvm.nvvm.fma.rn.ftz.sat.f16" => "__nvvm_fma_rn_ftz_sat_f16", - "llvm.nvvm.fma.rn.ftz.sat.f16x2" => "__nvvm_fma_rn_ftz_sat_f16x2", "llvm.nvvm.fma.rn.relu.bf16" => "__nvvm_fma_rn_relu_bf16", "llvm.nvvm.fma.rn.relu.bf16x2" => "__nvvm_fma_rn_relu_bf16x2", - "llvm.nvvm.fma.rn.relu.f16" => "__nvvm_fma_rn_relu_f16", - "llvm.nvvm.fma.rn.relu.f16x2" => "__nvvm_fma_rn_relu_f16x2", - "llvm.nvvm.fma.rn.sat.f16" => "__nvvm_fma_rn_sat_f16", - "llvm.nvvm.fma.rn.sat.f16x2" => "__nvvm_fma_rn_sat_f16x2", "llvm.nvvm.fma.rp.d" => "__nvvm_fma_rp_d", "llvm.nvvm.fma.rp.f" => "__nvvm_fma_rp_f", "llvm.nvvm.fma.rp.ftz.f" => "__nvvm_fma_rp_ftz_f", @@ -3111,32 +3097,18 @@ match name { "llvm.nvvm.fmax.f16" => "__nvvm_fmax_f16", "llvm.nvvm.fmax.f16x2" => "__nvvm_fmax_f16x2", "llvm.nvvm.fmax.ftz.f" => "__nvvm_fmax_ftz_f", - "llvm.nvvm.fmax.ftz.f16" => "__nvvm_fmax_ftz_f16", - "llvm.nvvm.fmax.ftz.f16x2" => "__nvvm_fmax_ftz_f16x2", "llvm.nvvm.fmax.ftz.nan.f" => "__nvvm_fmax_ftz_nan_f", - "llvm.nvvm.fmax.ftz.nan.f16" => "__nvvm_fmax_ftz_nan_f16", - "llvm.nvvm.fmax.ftz.nan.f16x2" => "__nvvm_fmax_ftz_nan_f16x2", "llvm.nvvm.fmax.ftz.nan.xorsign.abs.f" => "__nvvm_fmax_ftz_nan_xorsign_abs_f", - "llvm.nvvm.fmax.ftz.nan.xorsign.abs.f16" => "__nvvm_fmax_ftz_nan_xorsign_abs_f16", - "llvm.nvvm.fmax.ftz.nan.xorsign.abs.f16x2" => "__nvvm_fmax_ftz_nan_xorsign_abs_f16x2", "llvm.nvvm.fmax.ftz.xorsign.abs.f" => "__nvvm_fmax_ftz_xorsign_abs_f", - "llvm.nvvm.fmax.ftz.xorsign.abs.f16" => "__nvvm_fmax_ftz_xorsign_abs_f16", - "llvm.nvvm.fmax.ftz.xorsign.abs.f16x2" => "__nvvm_fmax_ftz_xorsign_abs_f16x2", "llvm.nvvm.fmax.nan.bf16" => "__nvvm_fmax_nan_bf16", "llvm.nvvm.fmax.nan.bf16x2" => "__nvvm_fmax_nan_bf16x2", "llvm.nvvm.fmax.nan.f" => "__nvvm_fmax_nan_f", - "llvm.nvvm.fmax.nan.f16" => "__nvvm_fmax_nan_f16", - "llvm.nvvm.fmax.nan.f16x2" => "__nvvm_fmax_nan_f16x2", "llvm.nvvm.fmax.nan.xorsign.abs.bf16" => "__nvvm_fmax_nan_xorsign_abs_bf16", "llvm.nvvm.fmax.nan.xorsign.abs.bf16x2" => "__nvvm_fmax_nan_xorsign_abs_bf16x2", "llvm.nvvm.fmax.nan.xorsign.abs.f" => "__nvvm_fmax_nan_xorsign_abs_f", - "llvm.nvvm.fmax.nan.xorsign.abs.f16" => "__nvvm_fmax_nan_xorsign_abs_f16", - "llvm.nvvm.fmax.nan.xorsign.abs.f16x2" => "__nvvm_fmax_nan_xorsign_abs_f16x2", "llvm.nvvm.fmax.xorsign.abs.bf16" => "__nvvm_fmax_xorsign_abs_bf16", "llvm.nvvm.fmax.xorsign.abs.bf16x2" => "__nvvm_fmax_xorsign_abs_bf16x2", "llvm.nvvm.fmax.xorsign.abs.f" => "__nvvm_fmax_xorsign_abs_f", - "llvm.nvvm.fmax.xorsign.abs.f16" => "__nvvm_fmax_xorsign_abs_f16", - "llvm.nvvm.fmax.xorsign.abs.f16x2" => "__nvvm_fmax_xorsign_abs_f16x2", "llvm.nvvm.fmin.bf16" => "__nvvm_fmin_bf16", "llvm.nvvm.fmin.bf16x2" => "__nvvm_fmin_bf16x2", "llvm.nvvm.fmin.d" => "__nvvm_fmin_d", @@ -3144,32 +3116,18 @@ match name { "llvm.nvvm.fmin.f16" => "__nvvm_fmin_f16", "llvm.nvvm.fmin.f16x2" => "__nvvm_fmin_f16x2", "llvm.nvvm.fmin.ftz.f" => "__nvvm_fmin_ftz_f", - "llvm.nvvm.fmin.ftz.f16" => "__nvvm_fmin_ftz_f16", - "llvm.nvvm.fmin.ftz.f16x2" => "__nvvm_fmin_ftz_f16x2", "llvm.nvvm.fmin.ftz.nan.f" => "__nvvm_fmin_ftz_nan_f", - "llvm.nvvm.fmin.ftz.nan.f16" => "__nvvm_fmin_ftz_nan_f16", - "llvm.nvvm.fmin.ftz.nan.f16x2" => "__nvvm_fmin_ftz_nan_f16x2", "llvm.nvvm.fmin.ftz.nan.xorsign.abs.f" => "__nvvm_fmin_ftz_nan_xorsign_abs_f", - "llvm.nvvm.fmin.ftz.nan.xorsign.abs.f16" => "__nvvm_fmin_ftz_nan_xorsign_abs_f16", - "llvm.nvvm.fmin.ftz.nan.xorsign.abs.f16x2" => "__nvvm_fmin_ftz_nan_xorsign_abs_f16x2", "llvm.nvvm.fmin.ftz.xorsign.abs.f" => "__nvvm_fmin_ftz_xorsign_abs_f", - "llvm.nvvm.fmin.ftz.xorsign.abs.f16" => "__nvvm_fmin_ftz_xorsign_abs_f16", - "llvm.nvvm.fmin.ftz.xorsign.abs.f16x2" => "__nvvm_fmin_ftz_xorsign_abs_f16x2", "llvm.nvvm.fmin.nan.bf16" => "__nvvm_fmin_nan_bf16", "llvm.nvvm.fmin.nan.bf16x2" => "__nvvm_fmin_nan_bf16x2", "llvm.nvvm.fmin.nan.f" => "__nvvm_fmin_nan_f", - "llvm.nvvm.fmin.nan.f16" => "__nvvm_fmin_nan_f16", - "llvm.nvvm.fmin.nan.f16x2" => "__nvvm_fmin_nan_f16x2", "llvm.nvvm.fmin.nan.xorsign.abs.bf16" => "__nvvm_fmin_nan_xorsign_abs_bf16", "llvm.nvvm.fmin.nan.xorsign.abs.bf16x2" => "__nvvm_fmin_nan_xorsign_abs_bf16x2", "llvm.nvvm.fmin.nan.xorsign.abs.f" => "__nvvm_fmin_nan_xorsign_abs_f", - "llvm.nvvm.fmin.nan.xorsign.abs.f16" => "__nvvm_fmin_nan_xorsign_abs_f16", - "llvm.nvvm.fmin.nan.xorsign.abs.f16x2" => "__nvvm_fmin_nan_xorsign_abs_f16x2", "llvm.nvvm.fmin.xorsign.abs.bf16" => "__nvvm_fmin_xorsign_abs_bf16", "llvm.nvvm.fmin.xorsign.abs.bf16x2" => "__nvvm_fmin_xorsign_abs_bf16x2", "llvm.nvvm.fmin.xorsign.abs.f" => "__nvvm_fmin_xorsign_abs_f", - "llvm.nvvm.fmin.xorsign.abs.f16" => "__nvvm_fmin_xorsign_abs_f16", - "llvm.nvvm.fmin.xorsign.abs.f16x2" => "__nvvm_fmin_xorsign_abs_f16x2", "llvm.nvvm.fns" => "__nvvm_fns", "llvm.nvvm.h2f" => "__nvvm_h2f", "llvm.nvvm.i2d.rm" => "__nvvm_i2d_rm", @@ -7895,6 +7853,10 @@ match name { "llvm.x86.subborrow.u64" => "__builtin_ia32_subborrow_u64", "llvm.x86.tbm.bextri.u32" => "__builtin_ia32_bextri_u32", "llvm.x86.tbm.bextri.u64" => "__builtin_ia32_bextri_u64", + "llvm.x86.tcmmimfp16ps" => "__builtin_ia32_tcmmimfp16ps", + "llvm.x86.tcmmimfp16ps.internal" => "__builtin_ia32_tcmmimfp16ps_internal", + "llvm.x86.tcmmrlfp16ps" => "__builtin_ia32_tcmmrlfp16ps", + "llvm.x86.tcmmrlfp16ps.internal" => "__builtin_ia32_tcmmrlfp16ps_internal", "llvm.x86.tdpbf16ps" => "__builtin_ia32_tdpbf16ps", "llvm.x86.tdpbf16ps.internal" => "__builtin_ia32_tdpbf16ps_internal", "llvm.x86.tdpbssd" => "__builtin_ia32_tdpbssd", diff --git a/compiler/rustc_codegen_gcc/src/intrinsic/llvm.rs b/compiler/rustc_codegen_gcc/src/intrinsic/llvm.rs index 0edec566b..f28348380 100644 --- a/compiler/rustc_codegen_gcc/src/intrinsic/llvm.rs +++ b/compiler/rustc_codegen_gcc/src/intrinsic/llvm.rs @@ -313,6 +313,13 @@ pub fn adjust_intrinsic_arguments<'a, 'b, 'gcc, 'tcx>(builder: &Builder<'a, 'gcc let new_args = args.to_vec(); args = vec![new_args[1], new_args[0], new_args[2], new_args[3], new_args[4]].into(); }, + "__builtin_ia32_vpshrdv_v8di" | "__builtin_ia32_vpshrdv_v4di" | "__builtin_ia32_vpshrdv_v2di" | + "__builtin_ia32_vpshrdv_v16si" | "__builtin_ia32_vpshrdv_v8si" | "__builtin_ia32_vpshrdv_v4si" | + "__builtin_ia32_vpshrdv_v32hi" | "__builtin_ia32_vpshrdv_v16hi" | "__builtin_ia32_vpshrdv_v8hi" => { + // The first two arguments are reversed, compared to LLVM. + let new_args = args.to_vec(); + args = vec![new_args[1], new_args[0], new_args[2]].into(); + }, _ => (), } } diff --git a/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs b/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs index 601768747..0b208be4e 100644 --- a/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs +++ b/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs @@ -551,141 +551,52 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { let context = &self.cx.context; let result = match width { - 8 => { - // First step. - let left = self.and(value, context.new_rvalue_from_int(typ, 0xF0)); - let left = self.lshr(left, context.new_rvalue_from_int(typ, 4)); - let right = self.and(value, context.new_rvalue_from_int(typ, 0x0F)); - let right = self.shl(right, context.new_rvalue_from_int(typ, 4)); - let step1 = self.or(left, right); - - // Second step. - let left = self.and(step1, context.new_rvalue_from_int(typ, 0xCC)); - let left = self.lshr(left, context.new_rvalue_from_int(typ, 2)); - let right = self.and(step1, context.new_rvalue_from_int(typ, 0x33)); - let right = self.shl(right, context.new_rvalue_from_int(typ, 2)); - let step2 = self.or(left, right); - - // Third step. - let left = self.and(step2, context.new_rvalue_from_int(typ, 0xAA)); - let left = self.lshr(left, context.new_rvalue_from_int(typ, 1)); - let right = self.and(step2, context.new_rvalue_from_int(typ, 0x55)); - let right = self.shl(right, context.new_rvalue_from_int(typ, 1)); - let step3 = self.or(left, right); - - step3 - }, - 16 => { - // First step. - let left = self.and(value, context.new_rvalue_from_int(typ, 0x5555)); - let left = self.shl(left, context.new_rvalue_from_int(typ, 1)); - let right = self.and(value, context.new_rvalue_from_int(typ, 0xAAAA)); - let right = self.lshr(right, context.new_rvalue_from_int(typ, 1)); - let step1 = self.or(left, right); - - // Second step. - let left = self.and(step1, context.new_rvalue_from_int(typ, 0x3333)); - let left = self.shl(left, context.new_rvalue_from_int(typ, 2)); - let right = self.and(step1, context.new_rvalue_from_int(typ, 0xCCCC)); - let right = self.lshr(right, context.new_rvalue_from_int(typ, 2)); - let step2 = self.or(left, right); - - // Third step. - let left = self.and(step2, context.new_rvalue_from_int(typ, 0x0F0F)); - let left = self.shl(left, context.new_rvalue_from_int(typ, 4)); - let right = self.and(step2, context.new_rvalue_from_int(typ, 0xF0F0)); - let right = self.lshr(right, context.new_rvalue_from_int(typ, 4)); - let step3 = self.or(left, right); - - // Fourth step. - let left = self.and(step3, context.new_rvalue_from_int(typ, 0x00FF)); - let left = self.shl(left, context.new_rvalue_from_int(typ, 8)); - let right = self.and(step3, context.new_rvalue_from_int(typ, 0xFF00)); - let right = self.lshr(right, context.new_rvalue_from_int(typ, 8)); - let step4 = self.or(left, right); + 8 | 16 | 32 | 64 => { + let mask = ((1u128 << width) - 1) as u64; + let (m0, m1, m2) = if width > 16 { + ( + context.new_rvalue_from_long(typ, (0x5555555555555555u64 & mask) as i64), + context.new_rvalue_from_long(typ, (0x3333333333333333u64 & mask) as i64), + context.new_rvalue_from_long(typ, (0x0f0f0f0f0f0f0f0fu64 & mask) as i64), + ) + } else { + ( + context.new_rvalue_from_int(typ, (0x5555u64 & mask) as i32), + context.new_rvalue_from_int(typ, (0x3333u64 & mask) as i32), + context.new_rvalue_from_int(typ, (0x0f0fu64 & mask) as i32), + ) + }; + let one = context.new_rvalue_from_int(typ, 1); + let two = context.new_rvalue_from_int(typ, 2); + let four = context.new_rvalue_from_int(typ, 4); - step4 - }, - 32 => { - // TODO(antoyo): Refactor with other implementations. // First step. - let left = self.and(value, context.new_rvalue_from_long(typ, 0x55555555)); - let left = self.shl(left, context.new_rvalue_from_long(typ, 1)); - let right = self.and(value, context.new_rvalue_from_long(typ, 0xAAAAAAAA)); - let right = self.lshr(right, context.new_rvalue_from_long(typ, 1)); + let left = self.lshr(value, one); + let left = self.and(left, m0); + let right = self.and(value, m0); + let right = self.shl(right, one); let step1 = self.or(left, right); // Second step. - let left = self.and(step1, context.new_rvalue_from_long(typ, 0x33333333)); - let left = self.shl(left, context.new_rvalue_from_long(typ, 2)); - let right = self.and(step1, context.new_rvalue_from_long(typ, 0xCCCCCCCC)); - let right = self.lshr(right, context.new_rvalue_from_long(typ, 2)); + let left = self.lshr(step1, two); + let left = self.and(left, m1); + let right = self.and(step1, m1); + let right = self.shl(right, two); let step2 = self.or(left, right); // Third step. - let left = self.and(step2, context.new_rvalue_from_long(typ, 0x0F0F0F0F)); - let left = self.shl(left, context.new_rvalue_from_long(typ, 4)); - let right = self.and(step2, context.new_rvalue_from_long(typ, 0xF0F0F0F0)); - let right = self.lshr(right, context.new_rvalue_from_long(typ, 4)); + let left = self.lshr(step2, four); + let left = self.and(left, m2); + let right = self.and(step2, m2); + let right = self.shl(right, four); let step3 = self.or(left, right); // Fourth step. - let left = self.and(step3, context.new_rvalue_from_long(typ, 0x00FF00FF)); - let left = self.shl(left, context.new_rvalue_from_long(typ, 8)); - let right = self.and(step3, context.new_rvalue_from_long(typ, 0xFF00FF00)); - let right = self.lshr(right, context.new_rvalue_from_long(typ, 8)); - let step4 = self.or(left, right); - - // Fifth step. - let left = self.and(step4, context.new_rvalue_from_long(typ, 0x0000FFFF)); - let left = self.shl(left, context.new_rvalue_from_long(typ, 16)); - let right = self.and(step4, context.new_rvalue_from_long(typ, 0xFFFF0000)); - let right = self.lshr(right, context.new_rvalue_from_long(typ, 16)); - let step5 = self.or(left, right); - - step5 - }, - 64 => { - // First step. - let left = self.shl(value, context.new_rvalue_from_long(typ, 32)); - let right = self.lshr(value, context.new_rvalue_from_long(typ, 32)); - let step1 = self.or(left, right); - - // Second step. - let left = self.and(step1, context.new_rvalue_from_long(typ, 0x0001FFFF0001FFFF)); - let left = self.shl(left, context.new_rvalue_from_long(typ, 15)); - let right = self.and(step1, context.new_rvalue_from_long(typ, 0xFFFE0000FFFE0000u64 as i64)); // TODO(antoyo): transmute the number instead? - let right = self.lshr(right, context.new_rvalue_from_long(typ, 17)); - let step2 = self.or(left, right); - - // Third step. - let left = self.lshr(step2, context.new_rvalue_from_long(typ, 10)); - let left = self.xor(step2, left); - let temp = self.and(left, context.new_rvalue_from_long(typ, 0x003F801F003F801F)); - - let left = self.shl(temp, context.new_rvalue_from_long(typ, 10)); - let left = self.or(temp, left); - let step3 = self.xor(left, step2); - - // Fourth step. - let left = self.lshr(step3, context.new_rvalue_from_long(typ, 4)); - let left = self.xor(step3, left); - let temp = self.and(left, context.new_rvalue_from_long(typ, 0x0E0384210E038421)); - - let left = self.shl(temp, context.new_rvalue_from_long(typ, 4)); - let left = self.or(temp, left); - let step4 = self.xor(left, step3); - - // Fifth step. - let left = self.lshr(step4, context.new_rvalue_from_long(typ, 2)); - let left = self.xor(step4, left); - let temp = self.and(left, context.new_rvalue_from_long(typ, 0x2248884222488842)); - - let left = self.shl(temp, context.new_rvalue_from_long(typ, 2)); - let left = self.or(temp, left); - let step5 = self.xor(left, step4); - - step5 + if width == 8 { + step3 + } else { + self.gcc_bswap(step3, width) + } }, 128 => { // TODO(antoyo): find a more efficient implementation? @@ -1236,19 +1147,19 @@ fn get_rust_try_fn<'a, 'gcc, 'tcx>(cx: &'a CodegenCx<'gcc, 'tcx>, codegen: &mut // Define the type up front for the signature of the rust_try function. let tcx = cx.tcx; - let i8p = tcx.mk_mut_ptr(tcx.types.i8); + let i8p = Ty::new_mut_ptr(tcx,tcx.types.i8); // `unsafe fn(*mut i8) -> ()` - let try_fn_ty = tcx.mk_fn_ptr(ty::Binder::dummy(tcx.mk_fn_sig( + let try_fn_ty = Ty::new_fn_ptr(tcx,ty::Binder::dummy(tcx.mk_fn_sig( iter::once(i8p), - tcx.mk_unit(), + Ty::new_unit(tcx,), false, rustc_hir::Unsafety::Unsafe, Abi::Rust, ))); // `unsafe fn(*mut i8, *mut i8) -> ()` - let catch_fn_ty = tcx.mk_fn_ptr(ty::Binder::dummy(tcx.mk_fn_sig( + let catch_fn_ty = Ty::new_fn_ptr(tcx,ty::Binder::dummy(tcx.mk_fn_sig( [i8p, i8p].iter().cloned(), - tcx.mk_unit(), + Ty::new_unit(tcx,), false, rustc_hir::Unsafety::Unsafe, Abi::Rust, diff --git a/compiler/rustc_codegen_gcc/src/intrinsic/simd.rs b/compiler/rustc_codegen_gcc/src/intrinsic/simd.rs index b59c3a64f..9115cf971 100644 --- a/compiler/rustc_codegen_gcc/src/intrinsic/simd.rs +++ b/compiler/rustc_codegen_gcc/src/intrinsic/simd.rs @@ -165,10 +165,15 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>( InvalidMonomorphizationReturnIntegerType { span, name, ret_ty, out_ty } ); + let arg1 = args[0].immediate(); + // NOTE: we get different vector types for the same vector type and libgccjit doesn't + // compare them as equal, so bitcast. + // FIXME(antoyo): allow comparing vector types as equal in libgccjit. + let arg2 = bx.context.new_bitcast(None, args[1].immediate(), arg1.get_type()); return Ok(compare_simd_types( bx, - args[0].immediate(), - args[1].immediate(), + arg1, + arg2, in_elem, llret_ty, cmp_op, @@ -341,7 +346,8 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>( // endian and MSB-first for big endian. let vector = args[0].immediate(); - let vector_type = vector.get_type().dyncast_vector().expect("vector type"); + // TODO(antoyo): dyncast_vector should not require a call to unqualified. + let vector_type = vector.get_type().unqualified().dyncast_vector().expect("vector type"); let elem_type = vector_type.get_element_type(); let expected_int_bits = in_len.max(8); @@ -848,7 +854,8 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>( (true, true) => { // Algorithm from: https://codereview.stackexchange.com/questions/115869/saturated-signed-addition // TODO(antoyo): improve using conditional operators if possible. - let arg_type = lhs.get_type(); + // TODO(antoyo): dyncast_vector should not require a call to unqualified. + let arg_type = lhs.get_type().unqualified(); // TODO(antoyo): convert lhs and rhs to unsigned. let sum = lhs + rhs; let vector_type = arg_type.dyncast_vector().expect("vector type"); @@ -878,7 +885,8 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>( res & cmp }, (true, false) => { - let arg_type = lhs.get_type(); + // TODO(antoyo): dyncast_vector should not require a call to unqualified. + let arg_type = lhs.get_type().unqualified(); // TODO(antoyo): this uses the same algorithm from saturating add, but add the // negative of the right operand. Find a proper subtraction algorithm. let rhs = bx.context.new_unary_op(None, UnaryOp::Minus, arg_type, rhs); |