diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-30 03:59:24 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-30 03:59:24 +0000 |
commit | 023939b627b7dc93b01471f7d41fb8553ddb4ffa (patch) | |
tree | 60fc59477c605c72b0a1051409062ddecc43f877 /tests/codegen/simd-intrinsic | |
parent | Adding debian version 1.72.1+dfsg1-1. (diff) | |
download | rustc-023939b627b7dc93b01471f7d41fb8553ddb4ffa.tar.xz rustc-023939b627b7dc93b01471f7d41fb8553ddb4ffa.zip |
Merging upstream version 1.73.0+dfsg1.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'tests/codegen/simd-intrinsic')
5 files changed, 95 insertions, 96 deletions
diff --git a/tests/codegen/simd-intrinsic/simd-intrinsic-generic-arithmetic-saturating.rs b/tests/codegen/simd-intrinsic/simd-intrinsic-generic-arithmetic-saturating.rs index faac7566a..0bcfacec6 100644 --- a/tests/codegen/simd-intrinsic/simd-intrinsic-generic-arithmetic-saturating.rs +++ b/tests/codegen/simd-intrinsic/simd-intrinsic-generic-arithmetic-saturating.rs @@ -116,150 +116,150 @@ extern "platform-intrinsic" { fn simd_saturating_sub<T>(x: T, y: T) -> T; } -// NOTE(eddyb) `%{{x|1}}` is used because on some targets (e.g. WASM) +// NOTE(eddyb) `%{{x|0}}` is used because on some targets (e.g. WASM) // SIMD vectors are passed directly, resulting in `%x` being a vector, // while on others they're passed indirectly, resulting in `%x` being -// a pointer to a vector, and `%1` a vector loaded from that pointer. +// a pointer to a vector, and `%0` a vector loaded from that pointer. // This is controlled by the target spec option `simd_types_indirect`. -// The same applies to `%{{y|2}}` as well. +// The same applies to `%{{y|1}}` as well. // CHECK-LABEL: @sadd_i8x2 #[no_mangle] pub unsafe fn sadd_i8x2(x: i8x2, y: i8x2) -> i8x2 { - // CHECK: %{{[0-9]+}} = call <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8> %{{x|1}}, <2 x i8> %{{y|2}}) + // CHECK: %{{[0-9]+}} = call <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8> %{{x|0}}, <2 x i8> %{{y|1}}) simd_saturating_add(x, y) } // CHECK-LABEL: @sadd_i8x4 #[no_mangle] pub unsafe fn sadd_i8x4(x: i8x4, y: i8x4) -> i8x4 { - // CHECK: %{{[0-9]+}} = call <4 x i8> @llvm.sadd.sat.v4i8(<4 x i8> %{{x|1}}, <4 x i8> %{{y|2}}) + // CHECK: %{{[0-9]+}} = call <4 x i8> @llvm.sadd.sat.v4i8(<4 x i8> %{{x|0}}, <4 x i8> %{{y|1}}) simd_saturating_add(x, y) } // CHECK-LABEL: @sadd_i8x8 #[no_mangle] pub unsafe fn sadd_i8x8(x: i8x8, y: i8x8) -> i8x8 { - // CHECK: %{{[0-9]+}} = call <8 x i8> @llvm.sadd.sat.v8i8(<8 x i8> %{{x|1}}, <8 x i8> %{{y|2}}) + // CHECK: %{{[0-9]+}} = call <8 x i8> @llvm.sadd.sat.v8i8(<8 x i8> %{{x|0}}, <8 x i8> %{{y|1}}) simd_saturating_add(x, y) } // CHECK-LABEL: @sadd_i8x16 #[no_mangle] pub unsafe fn sadd_i8x16(x: i8x16, y: i8x16) -> i8x16 { - // CHECK: %{{[0-9]+}} = call <16 x i8> @llvm.sadd.sat.v16i8(<16 x i8> %{{x|1}}, <16 x i8> %{{y|2}}) + // CHECK: %{{[0-9]+}} = call <16 x i8> @llvm.sadd.sat.v16i8(<16 x i8> %{{x|0}}, <16 x i8> %{{y|1}}) simd_saturating_add(x, y) } // CHECK-LABEL: @sadd_i8x32 #[no_mangle] pub unsafe fn sadd_i8x32(x: i8x32, y: i8x32) -> i8x32 { - // CHECK: %{{[0-9]+}} = call <32 x i8> @llvm.sadd.sat.v32i8(<32 x i8> %{{x|1}}, <32 x i8> %{{y|2}}) + // CHECK: %{{[0-9]+}} = call <32 x i8> @llvm.sadd.sat.v32i8(<32 x i8> %{{x|0}}, <32 x i8> %{{y|1}}) simd_saturating_add(x, y) } // CHECK-LABEL: @sadd_i8x64 #[no_mangle] pub unsafe fn sadd_i8x64(x: i8x64, y: i8x64) -> i8x64 { - // CHECK: %{{[0-9]+}} = call <64 x i8> @llvm.sadd.sat.v64i8(<64 x i8> %{{x|1}}, <64 x i8> %{{y|2}}) + // CHECK: %{{[0-9]+}} = call <64 x i8> @llvm.sadd.sat.v64i8(<64 x i8> %{{x|0}}, <64 x i8> %{{y|1}}) simd_saturating_add(x, y) } // CHECK-LABEL: @sadd_i16x2 #[no_mangle] pub unsafe fn sadd_i16x2(x: i16x2, y: i16x2) -> i16x2 { - // CHECK: %{{[0-9]+}} = call <2 x i16> @llvm.sadd.sat.v2i16(<2 x i16> %{{x|1}}, <2 x i16> %{{y|2}}) + // CHECK: %{{[0-9]+}} = call <2 x i16> @llvm.sadd.sat.v2i16(<2 x i16> %{{x|0}}, <2 x i16> %{{y|1}}) simd_saturating_add(x, y) } // CHECK-LABEL: @sadd_i16x4 #[no_mangle] pub unsafe fn sadd_i16x4(x: i16x4, y: i16x4) -> i16x4 { - // CHECK: %{{[0-9]+}} = call <4 x i16> @llvm.sadd.sat.v4i16(<4 x i16> %{{x|1}}, <4 x i16> %{{y|2}}) + // CHECK: %{{[0-9]+}} = call <4 x i16> @llvm.sadd.sat.v4i16(<4 x i16> %{{x|0}}, <4 x i16> %{{y|1}}) simd_saturating_add(x, y) } // CHECK-LABEL: @sadd_i16x8 #[no_mangle] pub unsafe fn sadd_i16x8(x: i16x8, y: i16x8) -> i16x8 { - // CHECK: %{{[0-9]+}} = call <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16> %{{x|1}}, <8 x i16> %{{y|2}}) + // CHECK: %{{[0-9]+}} = call <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16> %{{x|0}}, <8 x i16> %{{y|1}}) simd_saturating_add(x, y) } // CHECK-LABEL: @sadd_i16x16 #[no_mangle] pub unsafe fn sadd_i16x16(x: i16x16, y: i16x16) -> i16x16 { - // CHECK: %{{[0-9]+}} = call <16 x i16> @llvm.sadd.sat.v16i16(<16 x i16> %{{x|1}}, <16 x i16> %{{y|2}}) + // CHECK: %{{[0-9]+}} = call <16 x i16> @llvm.sadd.sat.v16i16(<16 x i16> %{{x|0}}, <16 x i16> %{{y|1}}) simd_saturating_add(x, y) } // CHECK-LABEL: @sadd_i16x32 #[no_mangle] pub unsafe fn sadd_i16x32(x: i16x32, y: i16x32) -> i16x32 { - // CHECK: %{{[0-9]+}} = call <32 x i16> @llvm.sadd.sat.v32i16(<32 x i16> %{{x|1}}, <32 x i16> %{{y|2}}) + // CHECK: %{{[0-9]+}} = call <32 x i16> @llvm.sadd.sat.v32i16(<32 x i16> %{{x|0}}, <32 x i16> %{{y|1}}) simd_saturating_add(x, y) } // CHECK-LABEL: @sadd_i32x2 #[no_mangle] pub unsafe fn sadd_i32x2(x: i32x2, y: i32x2) -> i32x2 { - // CHECK: %{{[0-9]+}} = call <2 x i32> @llvm.sadd.sat.v2i32(<2 x i32> %{{x|1}}, <2 x i32> %{{y|2}}) + // CHECK: %{{[0-9]+}} = call <2 x i32> @llvm.sadd.sat.v2i32(<2 x i32> %{{x|0}}, <2 x i32> %{{y|1}}) simd_saturating_add(x, y) } // CHECK-LABEL: @sadd_i32x4 #[no_mangle] pub unsafe fn sadd_i32x4(x: i32x4, y: i32x4) -> i32x4 { - // CHECK: %{{[0-9]+}} = call <4 x i32> @llvm.sadd.sat.v4i32(<4 x i32> %{{x|1}}, <4 x i32> %{{y|2}}) + // CHECK: %{{[0-9]+}} = call <4 x i32> @llvm.sadd.sat.v4i32(<4 x i32> %{{x|0}}, <4 x i32> %{{y|1}}) simd_saturating_add(x, y) } // CHECK-LABEL: @sadd_i32x8 #[no_mangle] pub unsafe fn sadd_i32x8(x: i32x8, y: i32x8) -> i32x8 { - // CHECK: %{{[0-9]+}} = call <8 x i32> @llvm.sadd.sat.v8i32(<8 x i32> %{{x|1}}, <8 x i32> %{{y|2}}) + // CHECK: %{{[0-9]+}} = call <8 x i32> @llvm.sadd.sat.v8i32(<8 x i32> %{{x|0}}, <8 x i32> %{{y|1}}) simd_saturating_add(x, y) } // CHECK-LABEL: @sadd_i32x16 #[no_mangle] pub unsafe fn sadd_i32x16(x: i32x16, y: i32x16) -> i32x16 { - // CHECK: %{{[0-9]+}} = call <16 x i32> @llvm.sadd.sat.v16i32(<16 x i32> %{{x|1}}, <16 x i32> %{{y|2}}) + // CHECK: %{{[0-9]+}} = call <16 x i32> @llvm.sadd.sat.v16i32(<16 x i32> %{{x|0}}, <16 x i32> %{{y|1}}) simd_saturating_add(x, y) } // CHECK-LABEL: @sadd_i64x2 #[no_mangle] pub unsafe fn sadd_i64x2(x: i64x2, y: i64x2) -> i64x2 { - // CHECK: %{{[0-9]+}} = call <2 x i64> @llvm.sadd.sat.v2i64(<2 x i64> %{{x|1}}, <2 x i64> %{{y|2}}) + // CHECK: %{{[0-9]+}} = call <2 x i64> @llvm.sadd.sat.v2i64(<2 x i64> %{{x|0}}, <2 x i64> %{{y|1}}) simd_saturating_add(x, y) } // CHECK-LABEL: @sadd_i64x4 #[no_mangle] pub unsafe fn sadd_i64x4(x: i64x4, y: i64x4) -> i64x4 { - // CHECK: %{{[0-9]+}} = call <4 x i64> @llvm.sadd.sat.v4i64(<4 x i64> %{{x|1}}, <4 x i64> %{{y|2}}) + // CHECK: %{{[0-9]+}} = call <4 x i64> @llvm.sadd.sat.v4i64(<4 x i64> %{{x|0}}, <4 x i64> %{{y|1}}) simd_saturating_add(x, y) } // CHECK-LABEL: @sadd_i64x8 #[no_mangle] pub unsafe fn sadd_i64x8(x: i64x8, y: i64x8) -> i64x8 { - // CHECK: %{{[0-9]+}} = call <8 x i64> @llvm.sadd.sat.v8i64(<8 x i64> %{{x|1}}, <8 x i64> %{{y|2}}) + // CHECK: %{{[0-9]+}} = call <8 x i64> @llvm.sadd.sat.v8i64(<8 x i64> %{{x|0}}, <8 x i64> %{{y|1}}) simd_saturating_add(x, y) } // CHECK-LABEL: @sadd_i128x2 #[no_mangle] pub unsafe fn sadd_i128x2(x: i128x2, y: i128x2) -> i128x2 { - // CHECK: %{{[0-9]+}} = call <2 x i128> @llvm.sadd.sat.v2i128(<2 x i128> %{{x|1}}, <2 x i128> %{{y|2}}) + // CHECK: %{{[0-9]+}} = call <2 x i128> @llvm.sadd.sat.v2i128(<2 x i128> %{{x|0}}, <2 x i128> %{{y|1}}) simd_saturating_add(x, y) } // CHECK-LABEL: @sadd_i128x4 #[no_mangle] pub unsafe fn sadd_i128x4(x: i128x4, y: i128x4) -> i128x4 { - // CHECK: %{{[0-9]+}} = call <4 x i128> @llvm.sadd.sat.v4i128(<4 x i128> %{{x|1}}, <4 x i128> %{{y|2}}) + // CHECK: %{{[0-9]+}} = call <4 x i128> @llvm.sadd.sat.v4i128(<4 x i128> %{{x|0}}, <4 x i128> %{{y|1}}) simd_saturating_add(x, y) } @@ -268,140 +268,140 @@ pub unsafe fn sadd_i128x4(x: i128x4, y: i128x4) -> i128x4 { // CHECK-LABEL: @uadd_u8x2 #[no_mangle] pub unsafe fn uadd_u8x2(x: u8x2, y: u8x2) -> u8x2 { - // CHECK: %{{[0-9]+}} = call <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8> %{{x|1}}, <2 x i8> %{{y|2}}) + // CHECK: %{{[0-9]+}} = call <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8> %{{x|0}}, <2 x i8> %{{y|1}}) simd_saturating_add(x, y) } // CHECK-LABEL: @uadd_u8x4 #[no_mangle] pub unsafe fn uadd_u8x4(x: u8x4, y: u8x4) -> u8x4 { - // CHECK: %{{[0-9]+}} = call <4 x i8> @llvm.uadd.sat.v4i8(<4 x i8> %{{x|1}}, <4 x i8> %{{y|2}}) + // CHECK: %{{[0-9]+}} = call <4 x i8> @llvm.uadd.sat.v4i8(<4 x i8> %{{x|0}}, <4 x i8> %{{y|1}}) simd_saturating_add(x, y) } // CHECK-LABEL: @uadd_u8x8 #[no_mangle] pub unsafe fn uadd_u8x8(x: u8x8, y: u8x8) -> u8x8 { - // CHECK: %{{[0-9]+}} = call <8 x i8> @llvm.uadd.sat.v8i8(<8 x i8> %{{x|1}}, <8 x i8> %{{y|2}}) + // CHECK: %{{[0-9]+}} = call <8 x i8> @llvm.uadd.sat.v8i8(<8 x i8> %{{x|0}}, <8 x i8> %{{y|1}}) simd_saturating_add(x, y) } // CHECK-LABEL: @uadd_u8x16 #[no_mangle] pub unsafe fn uadd_u8x16(x: u8x16, y: u8x16) -> u8x16 { - // CHECK: %{{[0-9]+}} = call <16 x i8> @llvm.uadd.sat.v16i8(<16 x i8> %{{x|1}}, <16 x i8> %{{y|2}}) + // CHECK: %{{[0-9]+}} = call <16 x i8> @llvm.uadd.sat.v16i8(<16 x i8> %{{x|0}}, <16 x i8> %{{y|1}}) simd_saturating_add(x, y) } // CHECK-LABEL: @uadd_u8x32 #[no_mangle] pub unsafe fn uadd_u8x32(x: u8x32, y: u8x32) -> u8x32 { - // CHECK: %{{[0-9]+}} = call <32 x i8> @llvm.uadd.sat.v32i8(<32 x i8> %{{x|1}}, <32 x i8> %{{y|2}}) + // CHECK: %{{[0-9]+}} = call <32 x i8> @llvm.uadd.sat.v32i8(<32 x i8> %{{x|0}}, <32 x i8> %{{y|1}}) simd_saturating_add(x, y) } // CHECK-LABEL: @uadd_u8x64 #[no_mangle] pub unsafe fn uadd_u8x64(x: u8x64, y: u8x64) -> u8x64 { - // CHECK: %{{[0-9]+}} = call <64 x i8> @llvm.uadd.sat.v64i8(<64 x i8> %{{x|1}}, <64 x i8> %{{y|2}}) + // CHECK: %{{[0-9]+}} = call <64 x i8> @llvm.uadd.sat.v64i8(<64 x i8> %{{x|0}}, <64 x i8> %{{y|1}}) simd_saturating_add(x, y) } // CHECK-LABEL: @uadd_u16x2 #[no_mangle] pub unsafe fn uadd_u16x2(x: u16x2, y: u16x2) -> u16x2 { - // CHECK: %{{[0-9]+}} = call <2 x i16> @llvm.uadd.sat.v2i16(<2 x i16> %{{x|1}}, <2 x i16> %{{y|2}}) + // CHECK: %{{[0-9]+}} = call <2 x i16> @llvm.uadd.sat.v2i16(<2 x i16> %{{x|0}}, <2 x i16> %{{y|1}}) simd_saturating_add(x, y) } // CHECK-LABEL: @uadd_u16x4 #[no_mangle] pub unsafe fn uadd_u16x4(x: u16x4, y: u16x4) -> u16x4 { - // CHECK: %{{[0-9]+}} = call <4 x i16> @llvm.uadd.sat.v4i16(<4 x i16> %{{x|1}}, <4 x i16> %{{y|2}}) + // CHECK: %{{[0-9]+}} = call <4 x i16> @llvm.uadd.sat.v4i16(<4 x i16> %{{x|0}}, <4 x i16> %{{y|1}}) simd_saturating_add(x, y) } // CHECK-LABEL: @uadd_u16x8 #[no_mangle] pub unsafe fn uadd_u16x8(x: u16x8, y: u16x8) -> u16x8 { - // CHECK: %{{[0-9]+}} = call <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> %{{x|1}}, <8 x i16> %{{y|2}}) + // CHECK: %{{[0-9]+}} = call <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> %{{x|0}}, <8 x i16> %{{y|1}}) simd_saturating_add(x, y) } // CHECK-LABEL: @uadd_u16x16 #[no_mangle] pub unsafe fn uadd_u16x16(x: u16x16, y: u16x16) -> u16x16 { - // CHECK: %{{[0-9]+}} = call <16 x i16> @llvm.uadd.sat.v16i16(<16 x i16> %{{x|1}}, <16 x i16> %{{y|2}}) + // CHECK: %{{[0-9]+}} = call <16 x i16> @llvm.uadd.sat.v16i16(<16 x i16> %{{x|0}}, <16 x i16> %{{y|1}}) simd_saturating_add(x, y) } // CHECK-LABEL: @uadd_u16x32 #[no_mangle] pub unsafe fn uadd_u16x32(x: u16x32, y: u16x32) -> u16x32 { - // CHECK: %{{[0-9]+}} = call <32 x i16> @llvm.uadd.sat.v32i16(<32 x i16> %{{x|1}}, <32 x i16> %{{y|2}}) + // CHECK: %{{[0-9]+}} = call <32 x i16> @llvm.uadd.sat.v32i16(<32 x i16> %{{x|0}}, <32 x i16> %{{y|1}}) simd_saturating_add(x, y) } // CHECK-LABEL: @uadd_u32x2 #[no_mangle] pub unsafe fn uadd_u32x2(x: u32x2, y: u32x2) -> u32x2 { - // CHECK: %{{[0-9]+}} = call <2 x i32> @llvm.uadd.sat.v2i32(<2 x i32> %{{x|1}}, <2 x i32> %{{y|2}}) + // CHECK: %{{[0-9]+}} = call <2 x i32> @llvm.uadd.sat.v2i32(<2 x i32> %{{x|0}}, <2 x i32> %{{y|1}}) simd_saturating_add(x, y) } // CHECK-LABEL: @uadd_u32x4 #[no_mangle] pub unsafe fn uadd_u32x4(x: u32x4, y: u32x4) -> u32x4 { - // CHECK: %{{[0-9]+}} = call <4 x i32> @llvm.uadd.sat.v4i32(<4 x i32> %{{x|1}}, <4 x i32> %{{y|2}}) + // CHECK: %{{[0-9]+}} = call <4 x i32> @llvm.uadd.sat.v4i32(<4 x i32> %{{x|0}}, <4 x i32> %{{y|1}}) simd_saturating_add(x, y) } // CHECK-LABEL: @uadd_u32x8 #[no_mangle] pub unsafe fn uadd_u32x8(x: u32x8, y: u32x8) -> u32x8 { - // CHECK: %{{[0-9]+}} = call <8 x i32> @llvm.uadd.sat.v8i32(<8 x i32> %{{x|1}}, <8 x i32> %{{y|2}}) + // CHECK: %{{[0-9]+}} = call <8 x i32> @llvm.uadd.sat.v8i32(<8 x i32> %{{x|0}}, <8 x i32> %{{y|1}}) simd_saturating_add(x, y) } // CHECK-LABEL: @uadd_u32x16 #[no_mangle] pub unsafe fn uadd_u32x16(x: u32x16, y: u32x16) -> u32x16 { - // CHECK: %{{[0-9]+}} = call <16 x i32> @llvm.uadd.sat.v16i32(<16 x i32> %{{x|1}}, <16 x i32> %{{y|2}}) + // CHECK: %{{[0-9]+}} = call <16 x i32> @llvm.uadd.sat.v16i32(<16 x i32> %{{x|0}}, <16 x i32> %{{y|1}}) simd_saturating_add(x, y) } // CHECK-LABEL: @uadd_u64x2 #[no_mangle] pub unsafe fn uadd_u64x2(x: u64x2, y: u64x2) -> u64x2 { - // CHECK: %{{[0-9]+}} = call <2 x i64> @llvm.uadd.sat.v2i64(<2 x i64> %{{x|1}}, <2 x i64> %{{y|2}}) + // CHECK: %{{[0-9]+}} = call <2 x i64> @llvm.uadd.sat.v2i64(<2 x i64> %{{x|0}}, <2 x i64> %{{y|1}}) simd_saturating_add(x, y) } // CHECK-LABEL: @uadd_u64x4 #[no_mangle] pub unsafe fn uadd_u64x4(x: u64x4, y: u64x4) -> u64x4 { - // CHECK: %{{[0-9]+}} = call <4 x i64> @llvm.uadd.sat.v4i64(<4 x i64> %{{x|1}}, <4 x i64> %{{y|2}}) + // CHECK: %{{[0-9]+}} = call <4 x i64> @llvm.uadd.sat.v4i64(<4 x i64> %{{x|0}}, <4 x i64> %{{y|1}}) simd_saturating_add(x, y) } // CHECK-LABEL: @uadd_u64x8 #[no_mangle] pub unsafe fn uadd_u64x8(x: u64x8, y: u64x8) -> u64x8 { - // CHECK: %{{[0-9]+}} = call <8 x i64> @llvm.uadd.sat.v8i64(<8 x i64> %{{x|1}}, <8 x i64> %{{y|2}}) + // CHECK: %{{[0-9]+}} = call <8 x i64> @llvm.uadd.sat.v8i64(<8 x i64> %{{x|0}}, <8 x i64> %{{y|1}}) simd_saturating_add(x, y) } // CHECK-LABEL: @uadd_u128x2 #[no_mangle] pub unsafe fn uadd_u128x2(x: u128x2, y: u128x2) -> u128x2 { - // CHECK: %{{[0-9]+}} = call <2 x i128> @llvm.uadd.sat.v2i128(<2 x i128> %{{x|1}}, <2 x i128> %{{y|2}}) + // CHECK: %{{[0-9]+}} = call <2 x i128> @llvm.uadd.sat.v2i128(<2 x i128> %{{x|0}}, <2 x i128> %{{y|1}}) simd_saturating_add(x, y) } // CHECK-LABEL: @uadd_u128x4 #[no_mangle] pub unsafe fn uadd_u128x4(x: u128x4, y: u128x4) -> u128x4 { - // CHECK: %{{[0-9]+}} = call <4 x i128> @llvm.uadd.sat.v4i128(<4 x i128> %{{x|1}}, <4 x i128> %{{y|2}}) + // CHECK: %{{[0-9]+}} = call <4 x i128> @llvm.uadd.sat.v4i128(<4 x i128> %{{x|0}}, <4 x i128> %{{y|1}}) simd_saturating_add(x, y) } @@ -412,140 +412,140 @@ pub unsafe fn uadd_u128x4(x: u128x4, y: u128x4) -> u128x4 { // CHECK-LABEL: @ssub_i8x2 #[no_mangle] pub unsafe fn ssub_i8x2(x: i8x2, y: i8x2) -> i8x2 { - // CHECK: %{{[0-9]+}} = call <2 x i8> @llvm.ssub.sat.v2i8(<2 x i8> %{{x|1}}, <2 x i8> %{{y|2}}) + // CHECK: %{{[0-9]+}} = call <2 x i8> @llvm.ssub.sat.v2i8(<2 x i8> %{{x|0}}, <2 x i8> %{{y|1}}) simd_saturating_sub(x, y) } // CHECK-LABEL: @ssub_i8x4 #[no_mangle] pub unsafe fn ssub_i8x4(x: i8x4, y: i8x4) -> i8x4 { - // CHECK: %{{[0-9]+}} = call <4 x i8> @llvm.ssub.sat.v4i8(<4 x i8> %{{x|1}}, <4 x i8> %{{y|2}}) + // CHECK: %{{[0-9]+}} = call <4 x i8> @llvm.ssub.sat.v4i8(<4 x i8> %{{x|0}}, <4 x i8> %{{y|1}}) simd_saturating_sub(x, y) } // CHECK-LABEL: @ssub_i8x8 #[no_mangle] pub unsafe fn ssub_i8x8(x: i8x8, y: i8x8) -> i8x8 { - // CHECK: %{{[0-9]+}} = call <8 x i8> @llvm.ssub.sat.v8i8(<8 x i8> %{{x|1}}, <8 x i8> %{{y|2}}) + // CHECK: %{{[0-9]+}} = call <8 x i8> @llvm.ssub.sat.v8i8(<8 x i8> %{{x|0}}, <8 x i8> %{{y|1}}) simd_saturating_sub(x, y) } // CHECK-LABEL: @ssub_i8x16 #[no_mangle] pub unsafe fn ssub_i8x16(x: i8x16, y: i8x16) -> i8x16 { - // CHECK: %{{[0-9]+}} = call <16 x i8> @llvm.ssub.sat.v16i8(<16 x i8> %{{x|1}}, <16 x i8> %{{y|2}}) + // CHECK: %{{[0-9]+}} = call <16 x i8> @llvm.ssub.sat.v16i8(<16 x i8> %{{x|0}}, <16 x i8> %{{y|1}}) simd_saturating_sub(x, y) } // CHECK-LABEL: @ssub_i8x32 #[no_mangle] pub unsafe fn ssub_i8x32(x: i8x32, y: i8x32) -> i8x32 { - // CHECK: %{{[0-9]+}} = call <32 x i8> @llvm.ssub.sat.v32i8(<32 x i8> %{{x|1}}, <32 x i8> %{{y|2}}) + // CHECK: %{{[0-9]+}} = call <32 x i8> @llvm.ssub.sat.v32i8(<32 x i8> %{{x|0}}, <32 x i8> %{{y|1}}) simd_saturating_sub(x, y) } // CHECK-LABEL: @ssub_i8x64 #[no_mangle] pub unsafe fn ssub_i8x64(x: i8x64, y: i8x64) -> i8x64 { - // CHECK: %{{[0-9]+}} = call <64 x i8> @llvm.ssub.sat.v64i8(<64 x i8> %{{x|1}}, <64 x i8> %{{y|2}}) + // CHECK: %{{[0-9]+}} = call <64 x i8> @llvm.ssub.sat.v64i8(<64 x i8> %{{x|0}}, <64 x i8> %{{y|1}}) simd_saturating_sub(x, y) } // CHECK-LABEL: @ssub_i16x2 #[no_mangle] pub unsafe fn ssub_i16x2(x: i16x2, y: i16x2) -> i16x2 { - // CHECK: %{{[0-9]+}} = call <2 x i16> @llvm.ssub.sat.v2i16(<2 x i16> %{{x|1}}, <2 x i16> %{{y|2}}) + // CHECK: %{{[0-9]+}} = call <2 x i16> @llvm.ssub.sat.v2i16(<2 x i16> %{{x|0}}, <2 x i16> %{{y|1}}) simd_saturating_sub(x, y) } // CHECK-LABEL: @ssub_i16x4 #[no_mangle] pub unsafe fn ssub_i16x4(x: i16x4, y: i16x4) -> i16x4 { - // CHECK: %{{[0-9]+}} = call <4 x i16> @llvm.ssub.sat.v4i16(<4 x i16> %{{x|1}}, <4 x i16> %{{y|2}}) + // CHECK: %{{[0-9]+}} = call <4 x i16> @llvm.ssub.sat.v4i16(<4 x i16> %{{x|0}}, <4 x i16> %{{y|1}}) simd_saturating_sub(x, y) } // CHECK-LABEL: @ssub_i16x8 #[no_mangle] pub unsafe fn ssub_i16x8(x: i16x8, y: i16x8) -> i16x8 { - // CHECK: %{{[0-9]+}} = call <8 x i16> @llvm.ssub.sat.v8i16(<8 x i16> %{{x|1}}, <8 x i16> %{{y|2}}) + // CHECK: %{{[0-9]+}} = call <8 x i16> @llvm.ssub.sat.v8i16(<8 x i16> %{{x|0}}, <8 x i16> %{{y|1}}) simd_saturating_sub(x, y) } // CHECK-LABEL: @ssub_i16x16 #[no_mangle] pub unsafe fn ssub_i16x16(x: i16x16, y: i16x16) -> i16x16 { - // CHECK: %{{[0-9]+}} = call <16 x i16> @llvm.ssub.sat.v16i16(<16 x i16> %{{x|1}}, <16 x i16> %{{y|2}}) + // CHECK: %{{[0-9]+}} = call <16 x i16> @llvm.ssub.sat.v16i16(<16 x i16> %{{x|0}}, <16 x i16> %{{y|1}}) simd_saturating_sub(x, y) } // CHECK-LABEL: @ssub_i16x32 #[no_mangle] pub unsafe fn ssub_i16x32(x: i16x32, y: i16x32) -> i16x32 { - // CHECK: %{{[0-9]+}} = call <32 x i16> @llvm.ssub.sat.v32i16(<32 x i16> %{{x|1}}, <32 x i16> %{{y|2}}) + // CHECK: %{{[0-9]+}} = call <32 x i16> @llvm.ssub.sat.v32i16(<32 x i16> %{{x|0}}, <32 x i16> %{{y|1}}) simd_saturating_sub(x, y) } // CHECK-LABEL: @ssub_i32x2 #[no_mangle] pub unsafe fn ssub_i32x2(x: i32x2, y: i32x2) -> i32x2 { - // CHECK: %{{[0-9]+}} = call <2 x i32> @llvm.ssub.sat.v2i32(<2 x i32> %{{x|1}}, <2 x i32> %{{y|2}}) + // CHECK: %{{[0-9]+}} = call <2 x i32> @llvm.ssub.sat.v2i32(<2 x i32> %{{x|0}}, <2 x i32> %{{y|1}}) simd_saturating_sub(x, y) } // CHECK-LABEL: @ssub_i32x4 #[no_mangle] pub unsafe fn ssub_i32x4(x: i32x4, y: i32x4) -> i32x4 { - // CHECK: %{{[0-9]+}} = call <4 x i32> @llvm.ssub.sat.v4i32(<4 x i32> %{{x|1}}, <4 x i32> %{{y|2}}) + // CHECK: %{{[0-9]+}} = call <4 x i32> @llvm.ssub.sat.v4i32(<4 x i32> %{{x|0}}, <4 x i32> %{{y|1}}) simd_saturating_sub(x, y) } // CHECK-LABEL: @ssub_i32x8 #[no_mangle] pub unsafe fn ssub_i32x8(x: i32x8, y: i32x8) -> i32x8 { - // CHECK: %{{[0-9]+}} = call <8 x i32> @llvm.ssub.sat.v8i32(<8 x i32> %{{x|1}}, <8 x i32> %{{y|2}}) + // CHECK: %{{[0-9]+}} = call <8 x i32> @llvm.ssub.sat.v8i32(<8 x i32> %{{x|0}}, <8 x i32> %{{y|1}}) simd_saturating_sub(x, y) } // CHECK-LABEL: @ssub_i32x16 #[no_mangle] pub unsafe fn ssub_i32x16(x: i32x16, y: i32x16) -> i32x16 { - // CHECK: %{{[0-9]+}} = call <16 x i32> @llvm.ssub.sat.v16i32(<16 x i32> %{{x|1}}, <16 x i32> %{{y|2}}) + // CHECK: %{{[0-9]+}} = call <16 x i32> @llvm.ssub.sat.v16i32(<16 x i32> %{{x|0}}, <16 x i32> %{{y|1}}) simd_saturating_sub(x, y) } // CHECK-LABEL: @ssub_i64x2 #[no_mangle] pub unsafe fn ssub_i64x2(x: i64x2, y: i64x2) -> i64x2 { - // CHECK: %{{[0-9]+}} = call <2 x i64> @llvm.ssub.sat.v2i64(<2 x i64> %{{x|1}}, <2 x i64> %{{y|2}}) + // CHECK: %{{[0-9]+}} = call <2 x i64> @llvm.ssub.sat.v2i64(<2 x i64> %{{x|0}}, <2 x i64> %{{y|1}}) simd_saturating_sub(x, y) } // CHECK-LABEL: @ssub_i64x4 #[no_mangle] pub unsafe fn ssub_i64x4(x: i64x4, y: i64x4) -> i64x4 { - // CHECK: %{{[0-9]+}} = call <4 x i64> @llvm.ssub.sat.v4i64(<4 x i64> %{{x|1}}, <4 x i64> %{{y|2}}) + // CHECK: %{{[0-9]+}} = call <4 x i64> @llvm.ssub.sat.v4i64(<4 x i64> %{{x|0}}, <4 x i64> %{{y|1}}) simd_saturating_sub(x, y) } // CHECK-LABEL: @ssub_i64x8 #[no_mangle] pub unsafe fn ssub_i64x8(x: i64x8, y: i64x8) -> i64x8 { - // CHECK: %{{[0-9]+}} = call <8 x i64> @llvm.ssub.sat.v8i64(<8 x i64> %{{x|1}}, <8 x i64> %{{y|2}}) + // CHECK: %{{[0-9]+}} = call <8 x i64> @llvm.ssub.sat.v8i64(<8 x i64> %{{x|0}}, <8 x i64> %{{y|1}}) simd_saturating_sub(x, y) } // CHECK-LABEL: @ssub_i128x2 #[no_mangle] pub unsafe fn ssub_i128x2(x: i128x2, y: i128x2) -> i128x2 { - // CHECK: %{{[0-9]+}} = call <2 x i128> @llvm.ssub.sat.v2i128(<2 x i128> %{{x|1}}, <2 x i128> %{{y|2}}) + // CHECK: %{{[0-9]+}} = call <2 x i128> @llvm.ssub.sat.v2i128(<2 x i128> %{{x|0}}, <2 x i128> %{{y|1}}) simd_saturating_sub(x, y) } // CHECK-LABEL: @ssub_i128x4 #[no_mangle] pub unsafe fn ssub_i128x4(x: i128x4, y: i128x4) -> i128x4 { - // CHECK: %{{[0-9]+}} = call <4 x i128> @llvm.ssub.sat.v4i128(<4 x i128> %{{x|1}}, <4 x i128> %{{y|2}}) + // CHECK: %{{[0-9]+}} = call <4 x i128> @llvm.ssub.sat.v4i128(<4 x i128> %{{x|0}}, <4 x i128> %{{y|1}}) simd_saturating_sub(x, y) } @@ -554,139 +554,139 @@ pub unsafe fn ssub_i128x4(x: i128x4, y: i128x4) -> i128x4 { // CHECK-LABEL: @usub_u8x2 #[no_mangle] pub unsafe fn usub_u8x2(x: u8x2, y: u8x2) -> u8x2 { - // CHECK: %{{[0-9]+}} = call <2 x i8> @llvm.usub.sat.v2i8(<2 x i8> %{{x|1}}, <2 x i8> %{{y|2}}) + // CHECK: %{{[0-9]+}} = call <2 x i8> @llvm.usub.sat.v2i8(<2 x i8> %{{x|0}}, <2 x i8> %{{y|1}}) simd_saturating_sub(x, y) } // CHECK-LABEL: @usub_u8x4 #[no_mangle] pub unsafe fn usub_u8x4(x: u8x4, y: u8x4) -> u8x4 { - // CHECK: %{{[0-9]+}} = call <4 x i8> @llvm.usub.sat.v4i8(<4 x i8> %{{x|1}}, <4 x i8> %{{y|2}}) + // CHECK: %{{[0-9]+}} = call <4 x i8> @llvm.usub.sat.v4i8(<4 x i8> %{{x|0}}, <4 x i8> %{{y|1}}) simd_saturating_sub(x, y) } // CHECK-LABEL: @usub_u8x8 #[no_mangle] pub unsafe fn usub_u8x8(x: u8x8, y: u8x8) -> u8x8 { - // CHECK: %{{[0-9]+}} = call <8 x i8> @llvm.usub.sat.v8i8(<8 x i8> %{{x|1}}, <8 x i8> %{{y|2}}) + // CHECK: %{{[0-9]+}} = call <8 x i8> @llvm.usub.sat.v8i8(<8 x i8> %{{x|0}}, <8 x i8> %{{y|1}}) simd_saturating_sub(x, y) } // CHECK-LABEL: @usub_u8x16 #[no_mangle] pub unsafe fn usub_u8x16(x: u8x16, y: u8x16) -> u8x16 { - // CHECK: %{{[0-9]+}} = call <16 x i8> @llvm.usub.sat.v16i8(<16 x i8> %{{x|1}}, <16 x i8> %{{y|2}}) + // CHECK: %{{[0-9]+}} = call <16 x i8> @llvm.usub.sat.v16i8(<16 x i8> %{{x|0}}, <16 x i8> %{{y|1}}) simd_saturating_sub(x, y) } // CHECK-LABEL: @usub_u8x32 #[no_mangle] pub unsafe fn usub_u8x32(x: u8x32, y: u8x32) -> u8x32 { - // CHECK: %{{[0-9]+}} = call <32 x i8> @llvm.usub.sat.v32i8(<32 x i8> %{{x|1}}, <32 x i8> %{{y|2}}) + // CHECK: %{{[0-9]+}} = call <32 x i8> @llvm.usub.sat.v32i8(<32 x i8> %{{x|0}}, <32 x i8> %{{y|1}}) simd_saturating_sub(x, y) } // CHECK-LABEL: @usub_u8x64 #[no_mangle] pub unsafe fn usub_u8x64(x: u8x64, y: u8x64) -> u8x64 { - // CHECK: %{{[0-9]+}} = call <64 x i8> @llvm.usub.sat.v64i8(<64 x i8> %{{x|1}}, <64 x i8> %{{y|2}}) + // CHECK: %{{[0-9]+}} = call <64 x i8> @llvm.usub.sat.v64i8(<64 x i8> %{{x|0}}, <64 x i8> %{{y|1}}) simd_saturating_sub(x, y) } // CHECK-LABEL: @usub_u16x2 #[no_mangle] pub unsafe fn usub_u16x2(x: u16x2, y: u16x2) -> u16x2 { - // CHECK: %{{[0-9]+}} = call <2 x i16> @llvm.usub.sat.v2i16(<2 x i16> %{{x|1}}, <2 x i16> %{{y|2}}) + // CHECK: %{{[0-9]+}} = call <2 x i16> @llvm.usub.sat.v2i16(<2 x i16> %{{x|0}}, <2 x i16> %{{y|1}}) simd_saturating_sub(x, y) } // CHECK-LABEL: @usub_u16x4 #[no_mangle] pub unsafe fn usub_u16x4(x: u16x4, y: u16x4) -> u16x4 { - // CHECK: %{{[0-9]+}} = call <4 x i16> @llvm.usub.sat.v4i16(<4 x i16> %{{x|1}}, <4 x i16> %{{y|2}}) + // CHECK: %{{[0-9]+}} = call <4 x i16> @llvm.usub.sat.v4i16(<4 x i16> %{{x|0}}, <4 x i16> %{{y|1}}) simd_saturating_sub(x, y) } // CHECK-LABEL: @usub_u16x8 #[no_mangle] pub unsafe fn usub_u16x8(x: u16x8, y: u16x8) -> u16x8 { - // CHECK: %{{[0-9]+}} = call <8 x i16> @llvm.usub.sat.v8i16(<8 x i16> %{{x|1}}, <8 x i16> %{{y|2}}) + // CHECK: %{{[0-9]+}} = call <8 x i16> @llvm.usub.sat.v8i16(<8 x i16> %{{x|0}}, <8 x i16> %{{y|1}}) simd_saturating_sub(x, y) } // CHECK-LABEL: @usub_u16x16 #[no_mangle] pub unsafe fn usub_u16x16(x: u16x16, y: u16x16) -> u16x16 { - // CHECK: %{{[0-9]+}} = call <16 x i16> @llvm.usub.sat.v16i16(<16 x i16> %{{x|1}}, <16 x i16> %{{y|2}}) + // CHECK: %{{[0-9]+}} = call <16 x i16> @llvm.usub.sat.v16i16(<16 x i16> %{{x|0}}, <16 x i16> %{{y|1}}) simd_saturating_sub(x, y) } // CHECK-LABEL: @usub_u16x32 #[no_mangle] pub unsafe fn usub_u16x32(x: u16x32, y: u16x32) -> u16x32 { - // CHECK: %{{[0-9]+}} = call <32 x i16> @llvm.usub.sat.v32i16(<32 x i16> %{{x|1}}, <32 x i16> %{{y|2}}) + // CHECK: %{{[0-9]+}} = call <32 x i16> @llvm.usub.sat.v32i16(<32 x i16> %{{x|0}}, <32 x i16> %{{y|1}}) simd_saturating_sub(x, y) } // CHECK-LABEL: @usub_u32x2 #[no_mangle] pub unsafe fn usub_u32x2(x: u32x2, y: u32x2) -> u32x2 { - // CHECK: %{{[0-9]+}} = call <2 x i32> @llvm.usub.sat.v2i32(<2 x i32> %{{x|1}}, <2 x i32> %{{y|2}}) + // CHECK: %{{[0-9]+}} = call <2 x i32> @llvm.usub.sat.v2i32(<2 x i32> %{{x|0}}, <2 x i32> %{{y|1}}) simd_saturating_sub(x, y) } // CHECK-LABEL: @usub_u32x4 #[no_mangle] pub unsafe fn usub_u32x4(x: u32x4, y: u32x4) -> u32x4 { - // CHECK: %{{[0-9]+}} = call <4 x i32> @llvm.usub.sat.v4i32(<4 x i32> %{{x|1}}, <4 x i32> %{{y|2}}) + // CHECK: %{{[0-9]+}} = call <4 x i32> @llvm.usub.sat.v4i32(<4 x i32> %{{x|0}}, <4 x i32> %{{y|1}}) simd_saturating_sub(x, y) } // CHECK-LABEL: @usub_u32x8 #[no_mangle] pub unsafe fn usub_u32x8(x: u32x8, y: u32x8) -> u32x8 { - // CHECK: %{{[0-9]+}} = call <8 x i32> @llvm.usub.sat.v8i32(<8 x i32> %{{x|1}}, <8 x i32> %{{y|2}}) + // CHECK: %{{[0-9]+}} = call <8 x i32> @llvm.usub.sat.v8i32(<8 x i32> %{{x|0}}, <8 x i32> %{{y|1}}) simd_saturating_sub(x, y) } // CHECK-LABEL: @usub_u32x16 #[no_mangle] pub unsafe fn usub_u32x16(x: u32x16, y: u32x16) -> u32x16 { - // CHECK: %{{[0-9]+}} = call <16 x i32> @llvm.usub.sat.v16i32(<16 x i32> %{{x|1}}, <16 x i32> %{{y|2}}) + // CHECK: %{{[0-9]+}} = call <16 x i32> @llvm.usub.sat.v16i32(<16 x i32> %{{x|0}}, <16 x i32> %{{y|1}}) simd_saturating_sub(x, y) } // CHECK-LABEL: @usub_u64x2 #[no_mangle] pub unsafe fn usub_u64x2(x: u64x2, y: u64x2) -> u64x2 { - // CHECK: %{{[0-9]+}} = call <2 x i64> @llvm.usub.sat.v2i64(<2 x i64> %{{x|1}}, <2 x i64> %{{y|2}}) + // CHECK: %{{[0-9]+}} = call <2 x i64> @llvm.usub.sat.v2i64(<2 x i64> %{{x|0}}, <2 x i64> %{{y|1}}) simd_saturating_sub(x, y) } // CHECK-LABEL: @usub_u64x4 #[no_mangle] pub unsafe fn usub_u64x4(x: u64x4, y: u64x4) -> u64x4 { - // CHECK: %{{[0-9]+}} = call <4 x i64> @llvm.usub.sat.v4i64(<4 x i64> %{{x|1}}, <4 x i64> %{{y|2}}) + // CHECK: %{{[0-9]+}} = call <4 x i64> @llvm.usub.sat.v4i64(<4 x i64> %{{x|0}}, <4 x i64> %{{y|1}}) simd_saturating_sub(x, y) } // CHECK-LABEL: @usub_u64x8 #[no_mangle] pub unsafe fn usub_u64x8(x: u64x8, y: u64x8) -> u64x8 { - // CHECK: %{{[0-9]+}} = call <8 x i64> @llvm.usub.sat.v8i64(<8 x i64> %{{x|1}}, <8 x i64> %{{y|2}}) + // CHECK: %{{[0-9]+}} = call <8 x i64> @llvm.usub.sat.v8i64(<8 x i64> %{{x|0}}, <8 x i64> %{{y|1}}) simd_saturating_sub(x, y) } // CHECK-LABEL: @usub_u128x2 #[no_mangle] pub unsafe fn usub_u128x2(x: u128x2, y: u128x2) -> u128x2 { - // CHECK: %{{[0-9]+}} = call <2 x i128> @llvm.usub.sat.v2i128(<2 x i128> %{{x|1}}, <2 x i128> %{{y|2}}) + // CHECK: %{{[0-9]+}} = call <2 x i128> @llvm.usub.sat.v2i128(<2 x i128> %{{x|0}}, <2 x i128> %{{y|1}}) simd_saturating_sub(x, y) } // CHECK-LABEL: @usub_u128x4 #[no_mangle] pub unsafe fn usub_u128x4(x: u128x4, y: u128x4) -> u128x4 { - // CHECK: %{{[0-9]+}} = call <4 x i128> @llvm.usub.sat.v4i128(<4 x i128> %{{x|1}}, <4 x i128> %{{y|2}}) + // CHECK: %{{[0-9]+}} = call <4 x i128> @llvm.usub.sat.v4i128(<4 x i128> %{{x|0}}, <4 x i128> %{{y|1}}) simd_saturating_sub(x, y) } diff --git a/tests/codegen/simd-intrinsic/simd-intrinsic-generic-extract-insert.rs b/tests/codegen/simd-intrinsic/simd-intrinsic-generic-extract-insert.rs index 80583dec1..a5d2509d0 100644 --- a/tests/codegen/simd-intrinsic/simd-intrinsic-generic-extract-insert.rs +++ b/tests/codegen/simd-intrinsic/simd-intrinsic-generic-extract-insert.rs @@ -35,13 +35,13 @@ pub unsafe fn extract_s(v: S<4>, i: u32) -> f32 { // CHECK-LABEL: @insert_m #[no_mangle] pub unsafe fn insert_m(v: M, i: u32, j: f32) -> M { - // CHECK: insertelement <4 x float> %{{v|1|2}}, float %j, i32 %i + // CHECK: insertelement <4 x float> %{{v|0|1}}, float %j, i32 %i simd_insert(v, i, j) } // CHECK-LABEL: @insert_s #[no_mangle] pub unsafe fn insert_s(v: S<4>, i: u32, j: f32) -> S<4> { - // CHECK: insertelement <4 x float> %{{v|1|2}}, float %j, i32 %i + // CHECK: insertelement <4 x float> %{{v|0|1}}, float %j, i32 %i simd_insert(v, i, j) } diff --git a/tests/codegen/simd-intrinsic/simd-intrinsic-generic-gather.rs b/tests/codegen/simd-intrinsic/simd-intrinsic-generic-gather.rs index cacc32f2f..0bb210196 100644 --- a/tests/codegen/simd-intrinsic/simd-intrinsic-generic-gather.rs +++ b/tests/codegen/simd-intrinsic/simd-intrinsic-generic-gather.rs @@ -23,7 +23,7 @@ extern "platform-intrinsic" { #[no_mangle] pub unsafe fn gather_f32x2(pointers: Vec2<*const f32>, mask: Vec2<i32>, values: Vec2<f32>) -> Vec2<f32> { - // CHECK: call <2 x float> @llvm.masked.gather.v2f32.{{.+}}(<2 x {{float\*|ptr}}> {{.*}}, i32 {{.*}}, <2 x i1> {{.*}}, <2 x float> {{.*}}) + // CHECK: call <2 x float> @llvm.masked.gather.v2f32.v2p0(<2 x ptr> {{.*}}, i32 {{.*}}, <2 x i1> {{.*}}, <2 x float> {{.*}}) simd_gather(values, pointers, mask) } @@ -31,6 +31,6 @@ pub unsafe fn gather_f32x2(pointers: Vec2<*const f32>, mask: Vec2<i32>, #[no_mangle] pub unsafe fn gather_pf32x2(pointers: Vec2<*const *const f32>, mask: Vec2<i32>, values: Vec2<*const f32>) -> Vec2<*const f32> { - // CHECK: call <2 x {{float\*|ptr}}> @llvm.masked.gather.{{.+}}(<2 x {{float\*\*|ptr}}> {{.*}}, i32 {{.*}}, <2 x i1> {{.*}}, <2 x {{float\*|ptr}}> {{.*}}) + // CHECK: call <2 x ptr> @llvm.masked.gather.v2p0.v2p0(<2 x ptr> {{.*}}, i32 {{.*}}, <2 x i1> {{.*}}, <2 x ptr> {{.*}}) simd_gather(values, pointers, mask) } diff --git a/tests/codegen/simd-intrinsic/simd-intrinsic-generic-scatter.rs b/tests/codegen/simd-intrinsic/simd-intrinsic-generic-scatter.rs index 94ecaf609..51953560b 100644 --- a/tests/codegen/simd-intrinsic/simd-intrinsic-generic-scatter.rs +++ b/tests/codegen/simd-intrinsic/simd-intrinsic-generic-scatter.rs @@ -23,7 +23,7 @@ extern "platform-intrinsic" { #[no_mangle] pub unsafe fn scatter_f32x2(pointers: Vec2<*mut f32>, mask: Vec2<i32>, values: Vec2<f32>) { - // CHECK: call void @llvm.masked.scatter.v2f32.v2p0{{.*}}(<2 x float> {{.*}}, <2 x {{float\*|ptr}}> {{.*}}, i32 {{.*}}, <2 x i1> {{.*}}) + // CHECK: call void @llvm.masked.scatter.v2f32.v2p0(<2 x float> {{.*}}, <2 x ptr> {{.*}}, i32 {{.*}}, <2 x i1> {{.*}}) simd_scatter(values, pointers, mask) } @@ -32,6 +32,6 @@ pub unsafe fn scatter_f32x2(pointers: Vec2<*mut f32>, mask: Vec2<i32>, #[no_mangle] pub unsafe fn scatter_pf32x2(pointers: Vec2<*mut *const f32>, mask: Vec2<i32>, values: Vec2<*const f32>) { - // CHECK: call void @llvm.masked.scatter.v2p0{{.*}}.v2p0{{.*}}(<2 x {{float\*|ptr}}> {{.*}}, <2 x {{float\*\*|ptr}}> {{.*}}, i32 {{.*}}, <2 x i1> {{.*}}) + // CHECK: call void @llvm.masked.scatter.v2p0.v2p0(<2 x ptr> {{.*}}, <2 x ptr> {{.*}}, i32 {{.*}}, <2 x i1> {{.*}}) simd_scatter(values, pointers, mask) } diff --git a/tests/codegen/simd-intrinsic/simd-intrinsic-transmute-array.rs b/tests/codegen/simd-intrinsic/simd-intrinsic-transmute-array.rs index fd488a14b..eb4ce307e 100644 --- a/tests/codegen/simd-intrinsic/simd-intrinsic-transmute-array.rs +++ b/tests/codegen/simd-intrinsic/simd-intrinsic-transmute-array.rs @@ -2,7 +2,6 @@ // compile-flags: -C no-prepopulate-passes #![crate_type = "lib"] - #![allow(non_camel_case_types)] #![feature(repr_simd, platform_intrinsics)] #![feature(inline_const)] @@ -43,8 +42,8 @@ pub fn build_array_s(x: [f32; 4]) -> S<4> { // CHECK-LABEL: @build_array_transmute_s #[no_mangle] pub fn build_array_transmute_s(x: [f32; 4]) -> S<4> { - // CHECK: %[[VAL:.+]] = load <4 x float>, {{ptr %x|.+>\* %.+}}, align [[ARRAY_ALIGN]] - // CHECK: store <4 x float> %[[VAL:.+]], {{ptr %0|.+>\* %.+}}, align [[VECTOR_ALIGN]] + // CHECK: %[[VAL:.+]] = load <4 x float>, ptr %x, align [[ARRAY_ALIGN]] + // CHECK: store <4 x float> %[[VAL:.+]], ptr %_0, align [[VECTOR_ALIGN]] unsafe { std::mem::transmute(x) } } @@ -58,8 +57,8 @@ pub fn build_array_t(x: [f32; 4]) -> T { // CHECK-LABEL: @build_array_transmute_t #[no_mangle] pub fn build_array_transmute_t(x: [f32; 4]) -> T { - // CHECK: %[[VAL:.+]] = load <4 x float>, {{ptr %x|.+>\* %.+}}, align [[ARRAY_ALIGN]] - // CHECK: store <4 x float> %[[VAL:.+]], {{ptr %0|.+>\* %.+}}, align [[VECTOR_ALIGN]] + // CHECK: %[[VAL:.+]] = load <4 x float>, ptr %x, align [[ARRAY_ALIGN]] + // CHECK: store <4 x float> %[[VAL:.+]], ptr %_0, align [[VECTOR_ALIGN]] unsafe { std::mem::transmute(x) } } @@ -77,7 +76,7 @@ pub fn build_array_u(x: [f32; 4]) -> U { // CHECK-LABEL: @build_array_transmute_u #[no_mangle] pub fn build_array_transmute_u(x: [f32; 4]) -> U { - // CHECK: %[[VAL:.+]] = load <4 x float>, {{ptr %x|.+>\* %.+}}, align [[ARRAY_ALIGN]] - // CHECK: store <4 x float> %[[VAL:.+]], {{ptr %0|.+>\* %.+}}, align [[VECTOR_ALIGN]] + // CHECK: %[[VAL:.+]] = load <4 x float>, ptr %x, align [[ARRAY_ALIGN]] + // CHECK: store <4 x float> %[[VAL:.+]], ptr %_0, align [[VECTOR_ALIGN]] unsafe { std::mem::transmute(x) } } |