diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-17 12:19:43 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-17 12:19:43 +0000 |
commit | 3e3e70d529d8c7d7c4d7bc4fefc9f109393b9245 (patch) | |
tree | daf049b282ab10e8c3d03e409b3cd84ff3f7690c /tests/codegen | |
parent | Adding debian version 1.68.2+dfsg1-1. (diff) | |
download | rustc-3e3e70d529d8c7d7c4d7bc4fefc9f109393b9245.tar.xz rustc-3e3e70d529d8c7d7c4d7bc4fefc9f109393b9245.zip |
Merging upstream version 1.69.0+dfsg1.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'tests/codegen')
29 files changed, 446 insertions, 112 deletions
diff --git a/tests/codegen/array-equality.rs b/tests/codegen/array-equality.rs index cd5e82a92..abfe295f8 100644 --- a/tests/codegen/array-equality.rs +++ b/tests/codegen/array-equality.rs @@ -1,4 +1,4 @@ -// compile-flags: -O +// compile-flags: -O -Z merge-functions=disabled // only-x86_64 #![crate_type = "lib"] @@ -43,6 +43,15 @@ pub fn array_eq_long(a: &[u16; 1234], b: &[u16; 1234]) -> bool { a == b } +// CHECK-LABEL: @array_char_eq +#[no_mangle] +pub fn array_char_eq(a: [char; 2], b: [char; 2]) -> bool { + // CHECK-NEXT: start: + // CHECK-NEXT: %[[EQ:.+]] = icmp eq i64 %0, %1 + // CHECK-NEXT: ret i1 %[[EQ]] + a == b +} + // CHECK-LABEL: @array_eq_zero_short(i48 #[no_mangle] pub fn array_eq_zero_short(x: [u16; 3]) -> bool { @@ -52,6 +61,25 @@ pub fn array_eq_zero_short(x: [u16; 3]) -> bool { x == [0; 3] } +// CHECK-LABEL: @array_eq_none_short(i40 +#[no_mangle] +pub fn array_eq_none_short(x: [Option<std::num::NonZeroU8>; 5]) -> bool { + // CHECK-NEXT: start: + // CHECK-NEXT: %[[EQ:.+]] = icmp eq i40 %0, 0 + // CHECK-NEXT: ret i1 %[[EQ]] + x == [None; 5] +} + +// CHECK-LABEL: @array_eq_zero_nested( +#[no_mangle] +pub fn array_eq_zero_nested(x: [[u8; 3]; 3]) -> bool { + // CHECK: %[[VAL:.+]] = load i72 + // CHECK-SAME: align 1 + // CHECK: %[[EQ:.+]] = icmp eq i72 %[[VAL]], 0 + // CHECK: ret i1 %[[EQ]] + x == [[0; 3]; 3] +} + // CHECK-LABEL: @array_eq_zero_mid( #[no_mangle] pub fn array_eq_zero_mid(x: [u16; 8]) -> bool { diff --git a/tests/codegen/array-map.rs b/tests/codegen/array-map.rs new file mode 100644 index 000000000..9298e89e3 --- /dev/null +++ b/tests/codegen/array-map.rs @@ -0,0 +1,49 @@ +// compile-flags: -C opt-level=3 -C target-cpu=x86-64-v3 +// no-system-llvm +// only-x86_64 +// ignore-debug (the extra assertions get in the way) + +#![crate_type = "lib"] +#![feature(array_zip)] + +// CHECK-LABEL: @short_integer_map +#[no_mangle] +pub fn short_integer_map(x: [u32; 8]) -> [u32; 8] { + // CHECK: load <8 x i32> + // CHECK: shl <8 x i32> + // CHECK: or <8 x i32> + // CHECK: store <8 x i32> + x.map(|x| 2 * x + 1) +} + +// CHECK-LABEL: @short_integer_zip_map +#[no_mangle] +pub fn short_integer_zip_map(x: [u32; 8], y: [u32; 8]) -> [u32; 8] { + // CHECK: %[[A:.+]] = load <8 x i32> + // CHECK: %[[B:.+]] = load <8 x i32> + // CHECK: sub <8 x i32> %[[A]], %[[B]] + // CHECK: store <8 x i32> + x.zip(y).map(|(x, y)| x - y) +} + +// This test is checking that LLVM can SRoA away a bunch of the overhead, +// like fully moving the iterators to registers. Notably, previous implementations +// of `map` ended up `alloca`ing the whole `array::IntoIterator`, meaning both a +// hard-to-eliminate `memcpy` and that the iteration counts needed to be written +// out to stack every iteration, even for infallible operations on `Copy` types. +// +// This is still imperfect, as there's more copies than would be ideal, +// but hopefully work like #103830 will improve that in future, +// and update this test to be stricter. +// +// CHECK-LABEL: @long_integer_map +#[no_mangle] +pub fn long_integer_map(x: [u32; 64]) -> [u32; 64] { + // CHECK: start: + // CHECK-NEXT: alloca [64 x i32] + // CHECK-NEXT: alloca %"core::mem::manually_drop::ManuallyDrop<[u32; 64]>" + // CHECK-NOT: alloca + // CHECK: mul <{{[0-9]+}} x i32> + // CHECK: add <{{[0-9]+}} x i32> + x.map(|x| 13 * x + 7) +} diff --git a/tests/codegen/autovectorize-f32x4.rs b/tests/codegen/autovectorize-f32x4.rs index 6b09c8fc9..9ecea53f1 100644 --- a/tests/codegen/autovectorize-f32x4.rs +++ b/tests/codegen/autovectorize-f32x4.rs @@ -1,6 +1,7 @@ -// compile-flags: -C opt-level=3 +// compile-flags: -C opt-level=3 -Z merge-functions=disabled // only-x86_64 #![crate_type = "lib"] +#![feature(array_zip)] // CHECK-LABEL: @auto_vectorize_direct #[no_mangle] @@ -30,3 +31,13 @@ pub fn auto_vectorize_loop(a: [f32; 4], b: [f32; 4]) -> [f32; 4] { } c } + +// CHECK-LABEL: @auto_vectorize_array_zip_map +#[no_mangle] +pub fn auto_vectorize_array_zip_map(a: [f32; 4], b: [f32; 4]) -> [f32; 4] { +// CHECK: load <4 x float> +// CHECK: load <4 x float> +// CHECK: fadd <4 x float> +// CHECK: store <4 x float> + a.zip(b).map(|(a, b)| a + b) +} diff --git a/tests/codegen/avr/avr-func-addrspace.rs b/tests/codegen/avr/avr-func-addrspace.rs index e9740e30d..bc11e1081 100644 --- a/tests/codegen/avr/avr-func-addrspace.rs +++ b/tests/codegen/avr/avr-func-addrspace.rs @@ -109,3 +109,28 @@ pub unsafe fn transmute_fn_ptr_to_data(x: fn()) -> *const () { // as long as it doesn't cause a verifier error by using `bitcast`. transmute(x) } + +pub enum Either<T, U> { A(T), B(U) } + +// Previously, we would codegen this as passing/returning a scalar pair of `{ i8, ptr }`, +// with the `ptr` field representing both `&i32` and `fn()` depending on the variant. +// This is incorrect, because `fn()` should be `ptr addrspace(1)`, not `ptr`. + +// CHECK: define{{.+}}void @should_not_combine_addrspace({{.+\*|ptr}}{{.+}}sret{{.+}}%0, {{.+\*|ptr}}{{.+}}%x) +#[no_mangle] +#[inline(never)] +pub fn should_not_combine_addrspace(x: Either<&i32, fn()>) -> Either<&i32, fn()> { + x +} + +// The incorrectness described above would result in us producing (after optimizations) +// a `ptrtoint`/`inttoptr` roundtrip to convert from `ptr` to `ptr addrspace(1)`. + +// CHECK-LABEL: @call_with_fn_ptr +#[no_mangle] +pub fn call_with_fn_ptr<'a>(f: fn()) -> Either<&'a i32, fn()> { + // CHECK-NOT: ptrtoint + // CHECK-NOT: inttoptr + // CHECK: call addrspace(1) void @should_not_combine_addrspace + should_not_combine_addrspace(Either::B(f)) +} diff --git a/tests/codegen/consts.rs b/tests/codegen/consts.rs index 260d9de86..dd22fd0f7 100644 --- a/tests/codegen/consts.rs +++ b/tests/codegen/consts.rs @@ -1,5 +1,4 @@ // compile-flags: -C no-prepopulate-passes -// min-llvm-version: 14.0 #![crate_type = "lib"] @@ -10,11 +9,11 @@ // CHECK: @STATIC = {{.*}}, align 4 // This checks the constants from inline_enum_const -// CHECK: @alloc12 = {{.*}}, align 2 +// CHECK: @alloc_76bfe2f13a3e3b01074971d122eac57e = {{.*}}, align 2 // This checks the constants from {low,high}_align_const, they share the same // constant, but the alignment differs, so the higher one should be used -// CHECK: [[LOW_HIGH:@alloc[0-9]+]] = {{.*}}, align 4 +// CHECK: [[LOW_HIGH:@alloc_[a-f0-9]+]] = {{.*}}, align 4 #[derive(Copy, Clone)] // repr(i16) is required for the {low,high}_align_const test diff --git a/tests/codegen/debug-vtable.rs b/tests/codegen/debug-vtable.rs index bdd312878..d82b737de 100644 --- a/tests/codegen/debug-vtable.rs +++ b/tests/codegen/debug-vtable.rs @@ -9,6 +9,14 @@ // compile-flags: -Cdebuginfo=2 -Copt-level=0 -Csymbol-mangling-version=v0 // ignore-tidy-linelength +// Make sure that vtables don't have the unnamed_addr attribute when debuginfo is enabled. +// This helps debuggers more reliably map from dyn pointer to concrete type. +// CHECK: @vtable.0 = private constant <{ +// CHECK: @vtable.1 = private constant <{ +// CHECK: @vtable.2 = private constant <{ +// CHECK: @vtable.3 = private constant <{ +// CHECK: @vtable.4 = private constant <{ + // NONMSVC: ![[USIZE:[0-9]+]] = !DIBasicType(name: "usize" // MSVC: ![[USIZE:[0-9]+]] = !DIDerivedType(tag: DW_TAG_typedef, name: "usize" // NONMSVC: ![[PTR:[0-9]+]] = !DIDerivedType(tag: DW_TAG_pointer_type, name: "*const ()" diff --git a/tests/codegen/function-arguments-noopt.rs b/tests/codegen/function-arguments-noopt.rs index ff76405a4..0c62e0d35 100644 --- a/tests/codegen/function-arguments-noopt.rs +++ b/tests/codegen/function-arguments-noopt.rs @@ -29,6 +29,12 @@ pub fn borrow(x: &i32) -> &i32 { x } +// CHECK: align 4 {{i32\*|ptr}} @borrow_mut({{i32\*|ptr}} align 4 %x) +#[no_mangle] +pub fn borrow_mut(x: &mut i32) -> &mut i32 { + x +} + // CHECK-LABEL: @borrow_call #[no_mangle] pub fn borrow_call(x: &i32, f: fn(&i32) -> &i32) -> &i32 { diff --git a/tests/codegen/function-arguments.rs b/tests/codegen/function-arguments.rs index 1f979d7b9..d6f019016 100644 --- a/tests/codegen/function-arguments.rs +++ b/tests/codegen/function-arguments.rs @@ -1,6 +1,7 @@ // compile-flags: -O -C no-prepopulate-passes #![crate_type = "lib"] +#![feature(dyn_star)] use std::mem::MaybeUninit; use std::num::NonZeroU64; @@ -85,6 +86,12 @@ pub fn option_nonzero_int(x: Option<NonZeroU64>) -> Option<NonZeroU64> { pub fn readonly_borrow(_: &i32) { } +// CHECK: noundef align 4 dereferenceable(4) {{i32\*|ptr}} @readonly_borrow_ret() +#[no_mangle] +pub fn readonly_borrow_ret() -> &'static i32 { + loop {} +} + // CHECK: @static_borrow({{i32\*|ptr}} noalias noundef readonly align 4 dereferenceable(4) %_1) // static borrow may be captured #[no_mangle] @@ -115,9 +122,17 @@ pub fn mutable_unsafe_borrow(_: &mut UnsafeInner) { pub fn mutable_borrow(_: &mut i32) { } +// CHECK: noundef align 4 dereferenceable(4) {{i32\*|ptr}} @mutable_borrow_ret() #[no_mangle] -// CHECK: @mutable_notunpin_borrow({{i32\*|ptr}} noundef align 4 dereferenceable(4) %_1) +pub fn mutable_borrow_ret() -> &'static mut i32 { + loop {} +} + +#[no_mangle] +// CHECK: @mutable_notunpin_borrow({{i32\*|ptr}} noundef nonnull align 4 %_1) // This one is *not* `noalias` because it might be self-referential. +// It is also not `dereferenceable` due to +// <https://github.com/rust-lang/unsafe-code-guidelines/issues/381>. pub fn mutable_notunpin_borrow(_: &mut NotUnpin) { } @@ -167,6 +182,12 @@ pub fn _box(x: Box<i32>) -> Box<i32> { x } +// CHECK: noundef nonnull align 4 {{i32\*|ptr}} @notunpin_box({{i32\*|ptr}} noundef nonnull align 4 %x) +#[no_mangle] +pub fn notunpin_box(x: Box<NotUnpin>) -> Box<NotUnpin> { + x +} + // CHECK: @struct_return({{%S\*|ptr}} noalias nocapture noundef sret(%S) dereferenceable(32){{( %0)?}}) #[no_mangle] pub fn struct_return() -> S { @@ -233,12 +254,12 @@ pub fn trait_raw(_: *const dyn Drop) { // CHECK: @trait_box({{\{\}\*|ptr}} noalias noundef nonnull align 1{{( %0)?}}, {{.+}} noalias noundef readonly align {{.*}} dereferenceable({{.*}}){{( %1)?}}) #[no_mangle] -pub fn trait_box(_: Box<dyn Drop>) { +pub fn trait_box(_: Box<dyn Drop + Unpin>) { } // CHECK: { {{i8\*|ptr}}, {{i8\*|ptr}} } @trait_option({{i8\*|ptr}} noalias noundef align 1 %x.0, {{i8\*|ptr}} %x.1) #[no_mangle] -pub fn trait_option(x: Option<Box<dyn Drop>>) -> Option<Box<dyn Drop>> { +pub fn trait_option(x: Option<Box<dyn Drop + Unpin>>) -> Option<Box<dyn Drop + Unpin>> { x } @@ -259,3 +280,11 @@ pub fn enum_id_1(x: Option<Result<u16, u16>>) -> Option<Result<u16, u16>> { pub fn enum_id_2(x: Option<u8>) -> Option<u8> { x } + +// CHECK: { {{\{\}\*|ptr}}, {{.+}} } @dyn_star({{\{\}\*|ptr}} noundef %x.0, {{.+}} noalias noundef readonly align {{.*}} dereferenceable({{.*}}) %x.1) +// Expect an ABI something like `{ {}*, [3 x i64]* }`, but that's hard to match on generically, +// so do like the `trait_box` test and just match on `{{.+}}` for the vtable. +#[no_mangle] +pub fn dyn_star(x: dyn* Drop) -> dyn* Drop { + x +} diff --git a/tests/codegen/inherit_overflow.rs b/tests/codegen/inherit_overflow.rs new file mode 100644 index 000000000..0b0b890b2 --- /dev/null +++ b/tests/codegen/inherit_overflow.rs @@ -0,0 +1,14 @@ +// compile-flags: -Zmir-enable-passes=+Inline,+ConstProp --crate-type lib +// revisions: ASSERT NOASSERT +//[ASSERT] compile-flags: -Coverflow-checks=on +//[NOASSERT] compile-flags: -Coverflow-checks=off + +// CHECK-LABEL: define{{.*}} @assertion +// ASSERT: call void @_ZN4core9panicking5panic17h +// NOASSERT: ret i8 0 +#[no_mangle] +pub fn assertion() -> u8 { + // Optimized MIR will replace this `CheckedBinaryOp` by `const (0, true)`. + // Verify that codegen does or does not emit the panic. + <u8 as std::ops::Add>::add(255, 1) +} diff --git a/tests/codegen/instrument-xray/basic.rs b/tests/codegen/instrument-xray/basic.rs new file mode 100644 index 000000000..d3e49d531 --- /dev/null +++ b/tests/codegen/instrument-xray/basic.rs @@ -0,0 +1,9 @@ +// Checks that `-Z instrument-xray` produces expected instrumentation. +// +// needs-xray +// compile-flags: -Z instrument-xray=always + +#![crate_type = "lib"] + +// CHECK: attributes #{{.*}} "function-instrument"="xray-always" +pub fn function() {} diff --git a/tests/codegen/instrument-xray/options-combine.rs b/tests/codegen/instrument-xray/options-combine.rs new file mode 100644 index 000000000..f7e500b65 --- /dev/null +++ b/tests/codegen/instrument-xray/options-combine.rs @@ -0,0 +1,12 @@ +// Checks that `-Z instrument-xray` options can be specified multiple times. +// +// needs-xray +// compile-flags: -Z instrument-xray=skip-exit +// compile-flags: -Z instrument-xray=instruction-threshold=123 +// compile-flags: -Z instrument-xray=instruction-threshold=456 + +#![crate_type = "lib"] + +// CHECK: attributes #{{.*}} "xray-instruction-threshold"="456" "xray-skip-exit" +// CHECK-NOT: attributes #{{.*}} "xray-instruction-threshold"="123" +pub fn function() {} diff --git a/tests/codegen/instrument-xray/options-override.rs b/tests/codegen/instrument-xray/options-override.rs new file mode 100644 index 000000000..00f818379 --- /dev/null +++ b/tests/codegen/instrument-xray/options-override.rs @@ -0,0 +1,11 @@ +// Checks that the last `-Z instrument-xray` option wins. +// +// needs-xray +// compile-flags: -Z instrument-xray=always +// compile-flags: -Z instrument-xray=never + +#![crate_type = "lib"] + +// CHECK: attributes #{{.*}} "function-instrument"="xray-never" +// CHECK-NOT: attributes #{{.*}} "function-instrument"="xray-always" +pub fn function() {} diff --git a/tests/codegen/issue-75659.rs b/tests/codegen/issue-75659.rs index 6bcb59aff..9394868c0 100644 --- a/tests/codegen/issue-75659.rs +++ b/tests/codegen/issue-75659.rs @@ -1,7 +1,7 @@ // This test checks that the call to memchr/slice_contains is optimized away // when searching in small slices. -// compile-flags: -O -Zinline-mir=no +// compile-flags: -O -Zinline-mir=false // only-x86_64 #![crate_type = "lib"] diff --git a/tests/codegen/merge-functions.rs b/tests/codegen/merge-functions.rs index 8e8fe5c96..d6caeeee8 100644 --- a/tests/codegen/merge-functions.rs +++ b/tests/codegen/merge-functions.rs @@ -1,4 +1,3 @@ -// min-llvm-version: 14.0 // revisions: O Os //[Os] compile-flags: -Copt-level=s //[O] compile-flags: -O diff --git a/tests/codegen/move-operands.rs b/tests/codegen/move-operands.rs index 6c51324a3..1d8209e8e 100644 --- a/tests/codegen/move-operands.rs +++ b/tests/codegen/move-operands.rs @@ -1,4 +1,4 @@ -// compile-flags: -C no-prepopulate-passes -Zmir-enable-passes=+DestinationPropagation +// compile-flags: -C no-prepopulate-passes -Zmir-enable-passes=+DestinationPropagation,-CopyProp #![crate_type = "lib"] diff --git a/tests/codegen/option-as-slice.rs b/tests/codegen/option-as-slice.rs new file mode 100644 index 000000000..d5077dbf6 --- /dev/null +++ b/tests/codegen/option-as-slice.rs @@ -0,0 +1,28 @@ +// compile-flags: -O +// only-x86_64 + +#![crate_type = "lib"] +#![feature(option_as_slice)] + +extern crate core; + +use core::num::NonZeroU64; +use core::option::Option; + +// CHECK-LABEL: @u64_opt_as_slice +#[no_mangle] +pub fn u64_opt_as_slice(o: &Option<u64>) -> &[u64] { + // CHECK: start: + // CHECK-NOT: select + // CHECK: ret + o.as_slice() +} + +// CHECK-LABEL: @nonzero_u64_opt_as_slice +#[no_mangle] +pub fn nonzero_u64_opt_as_slice(o: &Option<NonZeroU64>) -> &[NonZeroU64] { + // CHECK: start: + // CHECK-NOT: select + // CHECK: ret + o.as_slice() +} diff --git a/tests/codegen/option-nonzero-eq.rs b/tests/codegen/option-nonzero-eq.rs index 598dcc19b..835decd3e 100644 --- a/tests/codegen/option-nonzero-eq.rs +++ b/tests/codegen/option-nonzero-eq.rs @@ -3,6 +3,7 @@ #![crate_type = "lib"] extern crate core; +use core::cmp::Ordering; use core::num::{NonZeroU32, NonZeroI64}; use core::ptr::NonNull; @@ -32,3 +33,12 @@ pub fn non_null_eq(l: Option<NonNull<u8>>, r: Option<NonNull<u8>>) -> bool { // CHECK-NEXT: ret i1 l == r } + +// CHECK-lABEL: @ordering_eq +#[no_mangle] +pub fn ordering_eq(l: Option<Ordering>, r: Option<Ordering>) -> bool { + // CHECK: start: + // CHECK-NEXT: icmp eq i8 + // CHECK-NEXT: ret i1 + l == r +} diff --git a/tests/codegen/remap_path_prefix/main.rs b/tests/codegen/remap_path_prefix/main.rs index 78ebbccfc..6c0cd6997 100644 --- a/tests/codegen/remap_path_prefix/main.rs +++ b/tests/codegen/remap_path_prefix/main.rs @@ -12,7 +12,7 @@ mod aux_mod; include!("aux_mod.rs"); // Here we check that the expansion of the file!() macro is mapped. -// CHECK: @alloc2 = private unnamed_addr constant <{ [34 x i8] }> <{ [34 x i8] c"/the/src/remap_path_prefix/main.rs" }> +// CHECK: @alloc_92a59126a55aa3c0019b6c8a007fe001 = private unnamed_addr constant <{ [34 x i8] }> <{ [34 x i8] c"/the/src/remap_path_prefix/main.rs" }> pub static FILE_PATH: &'static str = file!(); fn main() { diff --git a/tests/codegen/sanitizer-kasan-emits-instrumentation.rs b/tests/codegen/sanitizer-kasan-emits-instrumentation.rs new file mode 100644 index 000000000..d6e3f2719 --- /dev/null +++ b/tests/codegen/sanitizer-kasan-emits-instrumentation.rs @@ -0,0 +1,47 @@ +// Verifies that `-Zsanitizer=kernel-address` emits sanitizer instrumentation. + +// compile-flags: -Zsanitizer=kernel-address +// revisions: aarch64 riscv64imac riscv64gc x86_64 +//[aarch64] compile-flags: --target aarch64-unknown-none +//[aarch64] needs-llvm-components: aarch64 +//[riscv64imac] compile-flags: --target riscv64imac-unknown-none-elf +//[riscv64imac] needs-llvm-components: riscv +//[riscv64imac] min-llvm-version: 16 +//[riscv64gc] compile-flags: --target riscv64gc-unknown-none-elf +//[riscv64gc] needs-llvm-components: riscv +//[riscv64gc] min-llvm-version: 16 +//[x86_64] compile-flags: --target x86_64-unknown-none +//[x86_64] needs-llvm-components: x86 + +#![crate_type = "rlib"] +#![feature(no_core, no_sanitize, lang_items)] +#![no_core] + +#[lang = "sized"] +trait Sized {} + +#[lang = "copy"] +trait Copy {} + +impl Copy for u8 {} + +// CHECK-LABEL: ; sanitizer_kasan_emits_instrumentation::unsanitized +// CHECK-NEXT: ; Function Attrs: +// CHECK-NOT: sanitize_address +// CHECK: start: +// CHECK-NOT: call void @__asan_report_load +// CHECK: } +#[no_sanitize(address)] +pub fn unsanitized(b: &mut u8) -> u8 { + *b +} + +// CHECK-LABEL: ; sanitizer_kasan_emits_instrumentation::sanitized +// CHECK-NEXT: ; Function Attrs: +// CHECK: sanitize_address +// CHECK: start: +// CHECK: call void @__asan_report_load +// CHECK: } +pub fn sanitized(b: &mut u8) -> u8 { + *b +} diff --git a/tests/codegen/simd-intrinsic/simd-intrinsic-generic-arithmetic-saturating.rs b/tests/codegen/simd-intrinsic/simd-intrinsic-generic-arithmetic-saturating.rs index 6fb0ceb40..faac7566a 100644 --- a/tests/codegen/simd-intrinsic/simd-intrinsic-generic-arithmetic-saturating.rs +++ b/tests/codegen/simd-intrinsic/simd-intrinsic-generic-arithmetic-saturating.rs @@ -116,150 +116,150 @@ extern "platform-intrinsic" { fn simd_saturating_sub<T>(x: T, y: T) -> T; } -// NOTE(eddyb) `%{{x|_3}}` is used because on some targets (e.g. WASM) +// NOTE(eddyb) `%{{x|1}}` is used because on some targets (e.g. WASM) // SIMD vectors are passed directly, resulting in `%x` being a vector, // while on others they're passed indirectly, resulting in `%x` being -// a pointer to a vector, and `%_3` a vector loaded from that pointer. +// a pointer to a vector, and `%1` a vector loaded from that pointer. // This is controlled by the target spec option `simd_types_indirect`. -// The same applies to `%{{y|_4}}` as well. +// The same applies to `%{{y|2}}` as well. // CHECK-LABEL: @sadd_i8x2 #[no_mangle] pub unsafe fn sadd_i8x2(x: i8x2, y: i8x2) -> i8x2 { - // CHECK: %{{[0-9]+}} = call <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8> %{{x|_3}}, <2 x i8> %{{y|_4}}) + // CHECK: %{{[0-9]+}} = call <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8> %{{x|1}}, <2 x i8> %{{y|2}}) simd_saturating_add(x, y) } // CHECK-LABEL: @sadd_i8x4 #[no_mangle] pub unsafe fn sadd_i8x4(x: i8x4, y: i8x4) -> i8x4 { - // CHECK: %{{[0-9]+}} = call <4 x i8> @llvm.sadd.sat.v4i8(<4 x i8> %{{x|_3}}, <4 x i8> %{{y|_4}}) + // CHECK: %{{[0-9]+}} = call <4 x i8> @llvm.sadd.sat.v4i8(<4 x i8> %{{x|1}}, <4 x i8> %{{y|2}}) simd_saturating_add(x, y) } // CHECK-LABEL: @sadd_i8x8 #[no_mangle] pub unsafe fn sadd_i8x8(x: i8x8, y: i8x8) -> i8x8 { - // CHECK: %{{[0-9]+}} = call <8 x i8> @llvm.sadd.sat.v8i8(<8 x i8> %{{x|_3}}, <8 x i8> %{{y|_4}}) + // CHECK: %{{[0-9]+}} = call <8 x i8> @llvm.sadd.sat.v8i8(<8 x i8> %{{x|1}}, <8 x i8> %{{y|2}}) simd_saturating_add(x, y) } // CHECK-LABEL: @sadd_i8x16 #[no_mangle] pub unsafe fn sadd_i8x16(x: i8x16, y: i8x16) -> i8x16 { - // CHECK: %{{[0-9]+}} = call <16 x i8> @llvm.sadd.sat.v16i8(<16 x i8> %{{x|_3}}, <16 x i8> %{{y|_4}}) + // CHECK: %{{[0-9]+}} = call <16 x i8> @llvm.sadd.sat.v16i8(<16 x i8> %{{x|1}}, <16 x i8> %{{y|2}}) simd_saturating_add(x, y) } // CHECK-LABEL: @sadd_i8x32 #[no_mangle] pub unsafe fn sadd_i8x32(x: i8x32, y: i8x32) -> i8x32 { - // CHECK: %{{[0-9]+}} = call <32 x i8> @llvm.sadd.sat.v32i8(<32 x i8> %{{x|_3}}, <32 x i8> %{{y|_4}}) + // CHECK: %{{[0-9]+}} = call <32 x i8> @llvm.sadd.sat.v32i8(<32 x i8> %{{x|1}}, <32 x i8> %{{y|2}}) simd_saturating_add(x, y) } // CHECK-LABEL: @sadd_i8x64 #[no_mangle] pub unsafe fn sadd_i8x64(x: i8x64, y: i8x64) -> i8x64 { - // CHECK: %{{[0-9]+}} = call <64 x i8> @llvm.sadd.sat.v64i8(<64 x i8> %{{x|_3}}, <64 x i8> %{{y|_4}}) + // CHECK: %{{[0-9]+}} = call <64 x i8> @llvm.sadd.sat.v64i8(<64 x i8> %{{x|1}}, <64 x i8> %{{y|2}}) simd_saturating_add(x, y) } // CHECK-LABEL: @sadd_i16x2 #[no_mangle] pub unsafe fn sadd_i16x2(x: i16x2, y: i16x2) -> i16x2 { - // CHECK: %{{[0-9]+}} = call <2 x i16> @llvm.sadd.sat.v2i16(<2 x i16> %{{x|_3}}, <2 x i16> %{{y|_4}}) + // CHECK: %{{[0-9]+}} = call <2 x i16> @llvm.sadd.sat.v2i16(<2 x i16> %{{x|1}}, <2 x i16> %{{y|2}}) simd_saturating_add(x, y) } // CHECK-LABEL: @sadd_i16x4 #[no_mangle] pub unsafe fn sadd_i16x4(x: i16x4, y: i16x4) -> i16x4 { - // CHECK: %{{[0-9]+}} = call <4 x i16> @llvm.sadd.sat.v4i16(<4 x i16> %{{x|_3}}, <4 x i16> %{{y|_4}}) + // CHECK: %{{[0-9]+}} = call <4 x i16> @llvm.sadd.sat.v4i16(<4 x i16> %{{x|1}}, <4 x i16> %{{y|2}}) simd_saturating_add(x, y) } // CHECK-LABEL: @sadd_i16x8 #[no_mangle] pub unsafe fn sadd_i16x8(x: i16x8, y: i16x8) -> i16x8 { - // CHECK: %{{[0-9]+}} = call <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16> %{{x|_3}}, <8 x i16> %{{y|_4}}) + // CHECK: %{{[0-9]+}} = call <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16> %{{x|1}}, <8 x i16> %{{y|2}}) simd_saturating_add(x, y) } // CHECK-LABEL: @sadd_i16x16 #[no_mangle] pub unsafe fn sadd_i16x16(x: i16x16, y: i16x16) -> i16x16 { - // CHECK: %{{[0-9]+}} = call <16 x i16> @llvm.sadd.sat.v16i16(<16 x i16> %{{x|_3}}, <16 x i16> %{{y|_4}}) + // CHECK: %{{[0-9]+}} = call <16 x i16> @llvm.sadd.sat.v16i16(<16 x i16> %{{x|1}}, <16 x i16> %{{y|2}}) simd_saturating_add(x, y) } // CHECK-LABEL: @sadd_i16x32 #[no_mangle] pub unsafe fn sadd_i16x32(x: i16x32, y: i16x32) -> i16x32 { - // CHECK: %{{[0-9]+}} = call <32 x i16> @llvm.sadd.sat.v32i16(<32 x i16> %{{x|_3}}, <32 x i16> %{{y|_4}}) + // CHECK: %{{[0-9]+}} = call <32 x i16> @llvm.sadd.sat.v32i16(<32 x i16> %{{x|1}}, <32 x i16> %{{y|2}}) simd_saturating_add(x, y) } // CHECK-LABEL: @sadd_i32x2 #[no_mangle] pub unsafe fn sadd_i32x2(x: i32x2, y: i32x2) -> i32x2 { - // CHECK: %{{[0-9]+}} = call <2 x i32> @llvm.sadd.sat.v2i32(<2 x i32> %{{x|_3}}, <2 x i32> %{{y|_4}}) + // CHECK: %{{[0-9]+}} = call <2 x i32> @llvm.sadd.sat.v2i32(<2 x i32> %{{x|1}}, <2 x i32> %{{y|2}}) simd_saturating_add(x, y) } // CHECK-LABEL: @sadd_i32x4 #[no_mangle] pub unsafe fn sadd_i32x4(x: i32x4, y: i32x4) -> i32x4 { - // CHECK: %{{[0-9]+}} = call <4 x i32> @llvm.sadd.sat.v4i32(<4 x i32> %{{x|_3}}, <4 x i32> %{{y|_4}}) + // CHECK: %{{[0-9]+}} = call <4 x i32> @llvm.sadd.sat.v4i32(<4 x i32> %{{x|1}}, <4 x i32> %{{y|2}}) simd_saturating_add(x, y) } // CHECK-LABEL: @sadd_i32x8 #[no_mangle] pub unsafe fn sadd_i32x8(x: i32x8, y: i32x8) -> i32x8 { - // CHECK: %{{[0-9]+}} = call <8 x i32> @llvm.sadd.sat.v8i32(<8 x i32> %{{x|_3}}, <8 x i32> %{{y|_4}}) + // CHECK: %{{[0-9]+}} = call <8 x i32> @llvm.sadd.sat.v8i32(<8 x i32> %{{x|1}}, <8 x i32> %{{y|2}}) simd_saturating_add(x, y) } // CHECK-LABEL: @sadd_i32x16 #[no_mangle] pub unsafe fn sadd_i32x16(x: i32x16, y: i32x16) -> i32x16 { - // CHECK: %{{[0-9]+}} = call <16 x i32> @llvm.sadd.sat.v16i32(<16 x i32> %{{x|_3}}, <16 x i32> %{{y|_4}}) + // CHECK: %{{[0-9]+}} = call <16 x i32> @llvm.sadd.sat.v16i32(<16 x i32> %{{x|1}}, <16 x i32> %{{y|2}}) simd_saturating_add(x, y) } // CHECK-LABEL: @sadd_i64x2 #[no_mangle] pub unsafe fn sadd_i64x2(x: i64x2, y: i64x2) -> i64x2 { - // CHECK: %{{[0-9]+}} = call <2 x i64> @llvm.sadd.sat.v2i64(<2 x i64> %{{x|_3}}, <2 x i64> %{{y|_4}}) + // CHECK: %{{[0-9]+}} = call <2 x i64> @llvm.sadd.sat.v2i64(<2 x i64> %{{x|1}}, <2 x i64> %{{y|2}}) simd_saturating_add(x, y) } // CHECK-LABEL: @sadd_i64x4 #[no_mangle] pub unsafe fn sadd_i64x4(x: i64x4, y: i64x4) -> i64x4 { - // CHECK: %{{[0-9]+}} = call <4 x i64> @llvm.sadd.sat.v4i64(<4 x i64> %{{x|_3}}, <4 x i64> %{{y|_4}}) + // CHECK: %{{[0-9]+}} = call <4 x i64> @llvm.sadd.sat.v4i64(<4 x i64> %{{x|1}}, <4 x i64> %{{y|2}}) simd_saturating_add(x, y) } // CHECK-LABEL: @sadd_i64x8 #[no_mangle] pub unsafe fn sadd_i64x8(x: i64x8, y: i64x8) -> i64x8 { - // CHECK: %{{[0-9]+}} = call <8 x i64> @llvm.sadd.sat.v8i64(<8 x i64> %{{x|_3}}, <8 x i64> %{{y|_4}}) + // CHECK: %{{[0-9]+}} = call <8 x i64> @llvm.sadd.sat.v8i64(<8 x i64> %{{x|1}}, <8 x i64> %{{y|2}}) simd_saturating_add(x, y) } // CHECK-LABEL: @sadd_i128x2 #[no_mangle] pub unsafe fn sadd_i128x2(x: i128x2, y: i128x2) -> i128x2 { - // CHECK: %{{[0-9]+}} = call <2 x i128> @llvm.sadd.sat.v2i128(<2 x i128> %{{x|_3}}, <2 x i128> %{{y|_4}}) + // CHECK: %{{[0-9]+}} = call <2 x i128> @llvm.sadd.sat.v2i128(<2 x i128> %{{x|1}}, <2 x i128> %{{y|2}}) simd_saturating_add(x, y) } // CHECK-LABEL: @sadd_i128x4 #[no_mangle] pub unsafe fn sadd_i128x4(x: i128x4, y: i128x4) -> i128x4 { - // CHECK: %{{[0-9]+}} = call <4 x i128> @llvm.sadd.sat.v4i128(<4 x i128> %{{x|_3}}, <4 x i128> %{{y|_4}}) + // CHECK: %{{[0-9]+}} = call <4 x i128> @llvm.sadd.sat.v4i128(<4 x i128> %{{x|1}}, <4 x i128> %{{y|2}}) simd_saturating_add(x, y) } @@ -268,140 +268,140 @@ pub unsafe fn sadd_i128x4(x: i128x4, y: i128x4) -> i128x4 { // CHECK-LABEL: @uadd_u8x2 #[no_mangle] pub unsafe fn uadd_u8x2(x: u8x2, y: u8x2) -> u8x2 { - // CHECK: %{{[0-9]+}} = call <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8> %{{x|_3}}, <2 x i8> %{{y|_4}}) + // CHECK: %{{[0-9]+}} = call <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8> %{{x|1}}, <2 x i8> %{{y|2}}) simd_saturating_add(x, y) } // CHECK-LABEL: @uadd_u8x4 #[no_mangle] pub unsafe fn uadd_u8x4(x: u8x4, y: u8x4) -> u8x4 { - // CHECK: %{{[0-9]+}} = call <4 x i8> @llvm.uadd.sat.v4i8(<4 x i8> %{{x|_3}}, <4 x i8> %{{y|_4}}) + // CHECK: %{{[0-9]+}} = call <4 x i8> @llvm.uadd.sat.v4i8(<4 x i8> %{{x|1}}, <4 x i8> %{{y|2}}) simd_saturating_add(x, y) } // CHECK-LABEL: @uadd_u8x8 #[no_mangle] pub unsafe fn uadd_u8x8(x: u8x8, y: u8x8) -> u8x8 { - // CHECK: %{{[0-9]+}} = call <8 x i8> @llvm.uadd.sat.v8i8(<8 x i8> %{{x|_3}}, <8 x i8> %{{y|_4}}) + // CHECK: %{{[0-9]+}} = call <8 x i8> @llvm.uadd.sat.v8i8(<8 x i8> %{{x|1}}, <8 x i8> %{{y|2}}) simd_saturating_add(x, y) } // CHECK-LABEL: @uadd_u8x16 #[no_mangle] pub unsafe fn uadd_u8x16(x: u8x16, y: u8x16) -> u8x16 { - // CHECK: %{{[0-9]+}} = call <16 x i8> @llvm.uadd.sat.v16i8(<16 x i8> %{{x|_3}}, <16 x i8> %{{y|_4}}) + // CHECK: %{{[0-9]+}} = call <16 x i8> @llvm.uadd.sat.v16i8(<16 x i8> %{{x|1}}, <16 x i8> %{{y|2}}) simd_saturating_add(x, y) } // CHECK-LABEL: @uadd_u8x32 #[no_mangle] pub unsafe fn uadd_u8x32(x: u8x32, y: u8x32) -> u8x32 { - // CHECK: %{{[0-9]+}} = call <32 x i8> @llvm.uadd.sat.v32i8(<32 x i8> %{{x|_3}}, <32 x i8> %{{y|_4}}) + // CHECK: %{{[0-9]+}} = call <32 x i8> @llvm.uadd.sat.v32i8(<32 x i8> %{{x|1}}, <32 x i8> %{{y|2}}) simd_saturating_add(x, y) } // CHECK-LABEL: @uadd_u8x64 #[no_mangle] pub unsafe fn uadd_u8x64(x: u8x64, y: u8x64) -> u8x64 { - // CHECK: %{{[0-9]+}} = call <64 x i8> @llvm.uadd.sat.v64i8(<64 x i8> %{{x|_3}}, <64 x i8> %{{y|_4}}) + // CHECK: %{{[0-9]+}} = call <64 x i8> @llvm.uadd.sat.v64i8(<64 x i8> %{{x|1}}, <64 x i8> %{{y|2}}) simd_saturating_add(x, y) } // CHECK-LABEL: @uadd_u16x2 #[no_mangle] pub unsafe fn uadd_u16x2(x: u16x2, y: u16x2) -> u16x2 { - // CHECK: %{{[0-9]+}} = call <2 x i16> @llvm.uadd.sat.v2i16(<2 x i16> %{{x|_3}}, <2 x i16> %{{y|_4}}) + // CHECK: %{{[0-9]+}} = call <2 x i16> @llvm.uadd.sat.v2i16(<2 x i16> %{{x|1}}, <2 x i16> %{{y|2}}) simd_saturating_add(x, y) } // CHECK-LABEL: @uadd_u16x4 #[no_mangle] pub unsafe fn uadd_u16x4(x: u16x4, y: u16x4) -> u16x4 { - // CHECK: %{{[0-9]+}} = call <4 x i16> @llvm.uadd.sat.v4i16(<4 x i16> %{{x|_3}}, <4 x i16> %{{y|_4}}) + // CHECK: %{{[0-9]+}} = call <4 x i16> @llvm.uadd.sat.v4i16(<4 x i16> %{{x|1}}, <4 x i16> %{{y|2}}) simd_saturating_add(x, y) } // CHECK-LABEL: @uadd_u16x8 #[no_mangle] pub unsafe fn uadd_u16x8(x: u16x8, y: u16x8) -> u16x8 { - // CHECK: %{{[0-9]+}} = call <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> %{{x|_3}}, <8 x i16> %{{y|_4}}) + // CHECK: %{{[0-9]+}} = call <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> %{{x|1}}, <8 x i16> %{{y|2}}) simd_saturating_add(x, y) } // CHECK-LABEL: @uadd_u16x16 #[no_mangle] pub unsafe fn uadd_u16x16(x: u16x16, y: u16x16) -> u16x16 { - // CHECK: %{{[0-9]+}} = call <16 x i16> @llvm.uadd.sat.v16i16(<16 x i16> %{{x|_3}}, <16 x i16> %{{y|_4}}) + // CHECK: %{{[0-9]+}} = call <16 x i16> @llvm.uadd.sat.v16i16(<16 x i16> %{{x|1}}, <16 x i16> %{{y|2}}) simd_saturating_add(x, y) } // CHECK-LABEL: @uadd_u16x32 #[no_mangle] pub unsafe fn uadd_u16x32(x: u16x32, y: u16x32) -> u16x32 { - // CHECK: %{{[0-9]+}} = call <32 x i16> @llvm.uadd.sat.v32i16(<32 x i16> %{{x|_3}}, <32 x i16> %{{y|_4}}) + // CHECK: %{{[0-9]+}} = call <32 x i16> @llvm.uadd.sat.v32i16(<32 x i16> %{{x|1}}, <32 x i16> %{{y|2}}) simd_saturating_add(x, y) } // CHECK-LABEL: @uadd_u32x2 #[no_mangle] pub unsafe fn uadd_u32x2(x: u32x2, y: u32x2) -> u32x2 { - // CHECK: %{{[0-9]+}} = call <2 x i32> @llvm.uadd.sat.v2i32(<2 x i32> %{{x|_3}}, <2 x i32> %{{y|_4}}) + // CHECK: %{{[0-9]+}} = call <2 x i32> @llvm.uadd.sat.v2i32(<2 x i32> %{{x|1}}, <2 x i32> %{{y|2}}) simd_saturating_add(x, y) } // CHECK-LABEL: @uadd_u32x4 #[no_mangle] pub unsafe fn uadd_u32x4(x: u32x4, y: u32x4) -> u32x4 { - // CHECK: %{{[0-9]+}} = call <4 x i32> @llvm.uadd.sat.v4i32(<4 x i32> %{{x|_3}}, <4 x i32> %{{y|_4}}) + // CHECK: %{{[0-9]+}} = call <4 x i32> @llvm.uadd.sat.v4i32(<4 x i32> %{{x|1}}, <4 x i32> %{{y|2}}) simd_saturating_add(x, y) } // CHECK-LABEL: @uadd_u32x8 #[no_mangle] pub unsafe fn uadd_u32x8(x: u32x8, y: u32x8) -> u32x8 { - // CHECK: %{{[0-9]+}} = call <8 x i32> @llvm.uadd.sat.v8i32(<8 x i32> %{{x|_3}}, <8 x i32> %{{y|_4}}) + // CHECK: %{{[0-9]+}} = call <8 x i32> @llvm.uadd.sat.v8i32(<8 x i32> %{{x|1}}, <8 x i32> %{{y|2}}) simd_saturating_add(x, y) } // CHECK-LABEL: @uadd_u32x16 #[no_mangle] pub unsafe fn uadd_u32x16(x: u32x16, y: u32x16) -> u32x16 { - // CHECK: %{{[0-9]+}} = call <16 x i32> @llvm.uadd.sat.v16i32(<16 x i32> %{{x|_3}}, <16 x i32> %{{y|_4}}) + // CHECK: %{{[0-9]+}} = call <16 x i32> @llvm.uadd.sat.v16i32(<16 x i32> %{{x|1}}, <16 x i32> %{{y|2}}) simd_saturating_add(x, y) } // CHECK-LABEL: @uadd_u64x2 #[no_mangle] pub unsafe fn uadd_u64x2(x: u64x2, y: u64x2) -> u64x2 { - // CHECK: %{{[0-9]+}} = call <2 x i64> @llvm.uadd.sat.v2i64(<2 x i64> %{{x|_3}}, <2 x i64> %{{y|_4}}) + // CHECK: %{{[0-9]+}} = call <2 x i64> @llvm.uadd.sat.v2i64(<2 x i64> %{{x|1}}, <2 x i64> %{{y|2}}) simd_saturating_add(x, y) } // CHECK-LABEL: @uadd_u64x4 #[no_mangle] pub unsafe fn uadd_u64x4(x: u64x4, y: u64x4) -> u64x4 { - // CHECK: %{{[0-9]+}} = call <4 x i64> @llvm.uadd.sat.v4i64(<4 x i64> %{{x|_3}}, <4 x i64> %{{y|_4}}) + // CHECK: %{{[0-9]+}} = call <4 x i64> @llvm.uadd.sat.v4i64(<4 x i64> %{{x|1}}, <4 x i64> %{{y|2}}) simd_saturating_add(x, y) } // CHECK-LABEL: @uadd_u64x8 #[no_mangle] pub unsafe fn uadd_u64x8(x: u64x8, y: u64x8) -> u64x8 { - // CHECK: %{{[0-9]+}} = call <8 x i64> @llvm.uadd.sat.v8i64(<8 x i64> %{{x|_3}}, <8 x i64> %{{y|_4}}) + // CHECK: %{{[0-9]+}} = call <8 x i64> @llvm.uadd.sat.v8i64(<8 x i64> %{{x|1}}, <8 x i64> %{{y|2}}) simd_saturating_add(x, y) } // CHECK-LABEL: @uadd_u128x2 #[no_mangle] pub unsafe fn uadd_u128x2(x: u128x2, y: u128x2) -> u128x2 { - // CHECK: %{{[0-9]+}} = call <2 x i128> @llvm.uadd.sat.v2i128(<2 x i128> %{{x|_3}}, <2 x i128> %{{y|_4}}) + // CHECK: %{{[0-9]+}} = call <2 x i128> @llvm.uadd.sat.v2i128(<2 x i128> %{{x|1}}, <2 x i128> %{{y|2}}) simd_saturating_add(x, y) } // CHECK-LABEL: @uadd_u128x4 #[no_mangle] pub unsafe fn uadd_u128x4(x: u128x4, y: u128x4) -> u128x4 { - // CHECK: %{{[0-9]+}} = call <4 x i128> @llvm.uadd.sat.v4i128(<4 x i128> %{{x|_3}}, <4 x i128> %{{y|_4}}) + // CHECK: %{{[0-9]+}} = call <4 x i128> @llvm.uadd.sat.v4i128(<4 x i128> %{{x|1}}, <4 x i128> %{{y|2}}) simd_saturating_add(x, y) } @@ -412,140 +412,140 @@ pub unsafe fn uadd_u128x4(x: u128x4, y: u128x4) -> u128x4 { // CHECK-LABEL: @ssub_i8x2 #[no_mangle] pub unsafe fn ssub_i8x2(x: i8x2, y: i8x2) -> i8x2 { - // CHECK: %{{[0-9]+}} = call <2 x i8> @llvm.ssub.sat.v2i8(<2 x i8> %{{x|_3}}, <2 x i8> %{{y|_4}}) + // CHECK: %{{[0-9]+}} = call <2 x i8> @llvm.ssub.sat.v2i8(<2 x i8> %{{x|1}}, <2 x i8> %{{y|2}}) simd_saturating_sub(x, y) } // CHECK-LABEL: @ssub_i8x4 #[no_mangle] pub unsafe fn ssub_i8x4(x: i8x4, y: i8x4) -> i8x4 { - // CHECK: %{{[0-9]+}} = call <4 x i8> @llvm.ssub.sat.v4i8(<4 x i8> %{{x|_3}}, <4 x i8> %{{y|_4}}) + // CHECK: %{{[0-9]+}} = call <4 x i8> @llvm.ssub.sat.v4i8(<4 x i8> %{{x|1}}, <4 x i8> %{{y|2}}) simd_saturating_sub(x, y) } // CHECK-LABEL: @ssub_i8x8 #[no_mangle] pub unsafe fn ssub_i8x8(x: i8x8, y: i8x8) -> i8x8 { - // CHECK: %{{[0-9]+}} = call <8 x i8> @llvm.ssub.sat.v8i8(<8 x i8> %{{x|_3}}, <8 x i8> %{{y|_4}}) + // CHECK: %{{[0-9]+}} = call <8 x i8> @llvm.ssub.sat.v8i8(<8 x i8> %{{x|1}}, <8 x i8> %{{y|2}}) simd_saturating_sub(x, y) } // CHECK-LABEL: @ssub_i8x16 #[no_mangle] pub unsafe fn ssub_i8x16(x: i8x16, y: i8x16) -> i8x16 { - // CHECK: %{{[0-9]+}} = call <16 x i8> @llvm.ssub.sat.v16i8(<16 x i8> %{{x|_3}}, <16 x i8> %{{y|_4}}) + // CHECK: %{{[0-9]+}} = call <16 x i8> @llvm.ssub.sat.v16i8(<16 x i8> %{{x|1}}, <16 x i8> %{{y|2}}) simd_saturating_sub(x, y) } // CHECK-LABEL: @ssub_i8x32 #[no_mangle] pub unsafe fn ssub_i8x32(x: i8x32, y: i8x32) -> i8x32 { - // CHECK: %{{[0-9]+}} = call <32 x i8> @llvm.ssub.sat.v32i8(<32 x i8> %{{x|_3}}, <32 x i8> %{{y|_4}}) + // CHECK: %{{[0-9]+}} = call <32 x i8> @llvm.ssub.sat.v32i8(<32 x i8> %{{x|1}}, <32 x i8> %{{y|2}}) simd_saturating_sub(x, y) } // CHECK-LABEL: @ssub_i8x64 #[no_mangle] pub unsafe fn ssub_i8x64(x: i8x64, y: i8x64) -> i8x64 { - // CHECK: %{{[0-9]+}} = call <64 x i8> @llvm.ssub.sat.v64i8(<64 x i8> %{{x|_3}}, <64 x i8> %{{y|_4}}) + // CHECK: %{{[0-9]+}} = call <64 x i8> @llvm.ssub.sat.v64i8(<64 x i8> %{{x|1}}, <64 x i8> %{{y|2}}) simd_saturating_sub(x, y) } // CHECK-LABEL: @ssub_i16x2 #[no_mangle] pub unsafe fn ssub_i16x2(x: i16x2, y: i16x2) -> i16x2 { - // CHECK: %{{[0-9]+}} = call <2 x i16> @llvm.ssub.sat.v2i16(<2 x i16> %{{x|_3}}, <2 x i16> %{{y|_4}}) + // CHECK: %{{[0-9]+}} = call <2 x i16> @llvm.ssub.sat.v2i16(<2 x i16> %{{x|1}}, <2 x i16> %{{y|2}}) simd_saturating_sub(x, y) } // CHECK-LABEL: @ssub_i16x4 #[no_mangle] pub unsafe fn ssub_i16x4(x: i16x4, y: i16x4) -> i16x4 { - // CHECK: %{{[0-9]+}} = call <4 x i16> @llvm.ssub.sat.v4i16(<4 x i16> %{{x|_3}}, <4 x i16> %{{y|_4}}) + // CHECK: %{{[0-9]+}} = call <4 x i16> @llvm.ssub.sat.v4i16(<4 x i16> %{{x|1}}, <4 x i16> %{{y|2}}) simd_saturating_sub(x, y) } // CHECK-LABEL: @ssub_i16x8 #[no_mangle] pub unsafe fn ssub_i16x8(x: i16x8, y: i16x8) -> i16x8 { - // CHECK: %{{[0-9]+}} = call <8 x i16> @llvm.ssub.sat.v8i16(<8 x i16> %{{x|_3}}, <8 x i16> %{{y|_4}}) + // CHECK: %{{[0-9]+}} = call <8 x i16> @llvm.ssub.sat.v8i16(<8 x i16> %{{x|1}}, <8 x i16> %{{y|2}}) simd_saturating_sub(x, y) } // CHECK-LABEL: @ssub_i16x16 #[no_mangle] pub unsafe fn ssub_i16x16(x: i16x16, y: i16x16) -> i16x16 { - // CHECK: %{{[0-9]+}} = call <16 x i16> @llvm.ssub.sat.v16i16(<16 x i16> %{{x|_3}}, <16 x i16> %{{y|_4}}) + // CHECK: %{{[0-9]+}} = call <16 x i16> @llvm.ssub.sat.v16i16(<16 x i16> %{{x|1}}, <16 x i16> %{{y|2}}) simd_saturating_sub(x, y) } // CHECK-LABEL: @ssub_i16x32 #[no_mangle] pub unsafe fn ssub_i16x32(x: i16x32, y: i16x32) -> i16x32 { - // CHECK: %{{[0-9]+}} = call <32 x i16> @llvm.ssub.sat.v32i16(<32 x i16> %{{x|_3}}, <32 x i16> %{{y|_4}}) + // CHECK: %{{[0-9]+}} = call <32 x i16> @llvm.ssub.sat.v32i16(<32 x i16> %{{x|1}}, <32 x i16> %{{y|2}}) simd_saturating_sub(x, y) } // CHECK-LABEL: @ssub_i32x2 #[no_mangle] pub unsafe fn ssub_i32x2(x: i32x2, y: i32x2) -> i32x2 { - // CHECK: %{{[0-9]+}} = call <2 x i32> @llvm.ssub.sat.v2i32(<2 x i32> %{{x|_3}}, <2 x i32> %{{y|_4}}) + // CHECK: %{{[0-9]+}} = call <2 x i32> @llvm.ssub.sat.v2i32(<2 x i32> %{{x|1}}, <2 x i32> %{{y|2}}) simd_saturating_sub(x, y) } // CHECK-LABEL: @ssub_i32x4 #[no_mangle] pub unsafe fn ssub_i32x4(x: i32x4, y: i32x4) -> i32x4 { - // CHECK: %{{[0-9]+}} = call <4 x i32> @llvm.ssub.sat.v4i32(<4 x i32> %{{x|_3}}, <4 x i32> %{{y|_4}}) + // CHECK: %{{[0-9]+}} = call <4 x i32> @llvm.ssub.sat.v4i32(<4 x i32> %{{x|1}}, <4 x i32> %{{y|2}}) simd_saturating_sub(x, y) } // CHECK-LABEL: @ssub_i32x8 #[no_mangle] pub unsafe fn ssub_i32x8(x: i32x8, y: i32x8) -> i32x8 { - // CHECK: %{{[0-9]+}} = call <8 x i32> @llvm.ssub.sat.v8i32(<8 x i32> %{{x|_3}}, <8 x i32> %{{y|_4}}) + // CHECK: %{{[0-9]+}} = call <8 x i32> @llvm.ssub.sat.v8i32(<8 x i32> %{{x|1}}, <8 x i32> %{{y|2}}) simd_saturating_sub(x, y) } // CHECK-LABEL: @ssub_i32x16 #[no_mangle] pub unsafe fn ssub_i32x16(x: i32x16, y: i32x16) -> i32x16 { - // CHECK: %{{[0-9]+}} = call <16 x i32> @llvm.ssub.sat.v16i32(<16 x i32> %{{x|_3}}, <16 x i32> %{{y|_4}}) + // CHECK: %{{[0-9]+}} = call <16 x i32> @llvm.ssub.sat.v16i32(<16 x i32> %{{x|1}}, <16 x i32> %{{y|2}}) simd_saturating_sub(x, y) } // CHECK-LABEL: @ssub_i64x2 #[no_mangle] pub unsafe fn ssub_i64x2(x: i64x2, y: i64x2) -> i64x2 { - // CHECK: %{{[0-9]+}} = call <2 x i64> @llvm.ssub.sat.v2i64(<2 x i64> %{{x|_3}}, <2 x i64> %{{y|_4}}) + // CHECK: %{{[0-9]+}} = call <2 x i64> @llvm.ssub.sat.v2i64(<2 x i64> %{{x|1}}, <2 x i64> %{{y|2}}) simd_saturating_sub(x, y) } // CHECK-LABEL: @ssub_i64x4 #[no_mangle] pub unsafe fn ssub_i64x4(x: i64x4, y: i64x4) -> i64x4 { - // CHECK: %{{[0-9]+}} = call <4 x i64> @llvm.ssub.sat.v4i64(<4 x i64> %{{x|_3}}, <4 x i64> %{{y|_4}}) + // CHECK: %{{[0-9]+}} = call <4 x i64> @llvm.ssub.sat.v4i64(<4 x i64> %{{x|1}}, <4 x i64> %{{y|2}}) simd_saturating_sub(x, y) } // CHECK-LABEL: @ssub_i64x8 #[no_mangle] pub unsafe fn ssub_i64x8(x: i64x8, y: i64x8) -> i64x8 { - // CHECK: %{{[0-9]+}} = call <8 x i64> @llvm.ssub.sat.v8i64(<8 x i64> %{{x|_3}}, <8 x i64> %{{y|_4}}) + // CHECK: %{{[0-9]+}} = call <8 x i64> @llvm.ssub.sat.v8i64(<8 x i64> %{{x|1}}, <8 x i64> %{{y|2}}) simd_saturating_sub(x, y) } // CHECK-LABEL: @ssub_i128x2 #[no_mangle] pub unsafe fn ssub_i128x2(x: i128x2, y: i128x2) -> i128x2 { - // CHECK: %{{[0-9]+}} = call <2 x i128> @llvm.ssub.sat.v2i128(<2 x i128> %{{x|_3}}, <2 x i128> %{{y|_4}}) + // CHECK: %{{[0-9]+}} = call <2 x i128> @llvm.ssub.sat.v2i128(<2 x i128> %{{x|1}}, <2 x i128> %{{y|2}}) simd_saturating_sub(x, y) } // CHECK-LABEL: @ssub_i128x4 #[no_mangle] pub unsafe fn ssub_i128x4(x: i128x4, y: i128x4) -> i128x4 { - // CHECK: %{{[0-9]+}} = call <4 x i128> @llvm.ssub.sat.v4i128(<4 x i128> %{{x|_3}}, <4 x i128> %{{y|_4}}) + // CHECK: %{{[0-9]+}} = call <4 x i128> @llvm.ssub.sat.v4i128(<4 x i128> %{{x|1}}, <4 x i128> %{{y|2}}) simd_saturating_sub(x, y) } @@ -554,139 +554,139 @@ pub unsafe fn ssub_i128x4(x: i128x4, y: i128x4) -> i128x4 { // CHECK-LABEL: @usub_u8x2 #[no_mangle] pub unsafe fn usub_u8x2(x: u8x2, y: u8x2) -> u8x2 { - // CHECK: %{{[0-9]+}} = call <2 x i8> @llvm.usub.sat.v2i8(<2 x i8> %{{x|_3}}, <2 x i8> %{{y|_4}}) + // CHECK: %{{[0-9]+}} = call <2 x i8> @llvm.usub.sat.v2i8(<2 x i8> %{{x|1}}, <2 x i8> %{{y|2}}) simd_saturating_sub(x, y) } // CHECK-LABEL: @usub_u8x4 #[no_mangle] pub unsafe fn usub_u8x4(x: u8x4, y: u8x4) -> u8x4 { - // CHECK: %{{[0-9]+}} = call <4 x i8> @llvm.usub.sat.v4i8(<4 x i8> %{{x|_3}}, <4 x i8> %{{y|_4}}) + // CHECK: %{{[0-9]+}} = call <4 x i8> @llvm.usub.sat.v4i8(<4 x i8> %{{x|1}}, <4 x i8> %{{y|2}}) simd_saturating_sub(x, y) } // CHECK-LABEL: @usub_u8x8 #[no_mangle] pub unsafe fn usub_u8x8(x: u8x8, y: u8x8) -> u8x8 { - // CHECK: %{{[0-9]+}} = call <8 x i8> @llvm.usub.sat.v8i8(<8 x i8> %{{x|_3}}, <8 x i8> %{{y|_4}}) + // CHECK: %{{[0-9]+}} = call <8 x i8> @llvm.usub.sat.v8i8(<8 x i8> %{{x|1}}, <8 x i8> %{{y|2}}) simd_saturating_sub(x, y) } // CHECK-LABEL: @usub_u8x16 #[no_mangle] pub unsafe fn usub_u8x16(x: u8x16, y: u8x16) -> u8x16 { - // CHECK: %{{[0-9]+}} = call <16 x i8> @llvm.usub.sat.v16i8(<16 x i8> %{{x|_3}}, <16 x i8> %{{y|_4}}) + // CHECK: %{{[0-9]+}} = call <16 x i8> @llvm.usub.sat.v16i8(<16 x i8> %{{x|1}}, <16 x i8> %{{y|2}}) simd_saturating_sub(x, y) } // CHECK-LABEL: @usub_u8x32 #[no_mangle] pub unsafe fn usub_u8x32(x: u8x32, y: u8x32) -> u8x32 { - // CHECK: %{{[0-9]+}} = call <32 x i8> @llvm.usub.sat.v32i8(<32 x i8> %{{x|_3}}, <32 x i8> %{{y|_4}}) + // CHECK: %{{[0-9]+}} = call <32 x i8> @llvm.usub.sat.v32i8(<32 x i8> %{{x|1}}, <32 x i8> %{{y|2}}) simd_saturating_sub(x, y) } // CHECK-LABEL: @usub_u8x64 #[no_mangle] pub unsafe fn usub_u8x64(x: u8x64, y: u8x64) -> u8x64 { - // CHECK: %{{[0-9]+}} = call <64 x i8> @llvm.usub.sat.v64i8(<64 x i8> %{{x|_3}}, <64 x i8> %{{y|_4}}) + // CHECK: %{{[0-9]+}} = call <64 x i8> @llvm.usub.sat.v64i8(<64 x i8> %{{x|1}}, <64 x i8> %{{y|2}}) simd_saturating_sub(x, y) } // CHECK-LABEL: @usub_u16x2 #[no_mangle] pub unsafe fn usub_u16x2(x: u16x2, y: u16x2) -> u16x2 { - // CHECK: %{{[0-9]+}} = call <2 x i16> @llvm.usub.sat.v2i16(<2 x i16> %{{x|_3}}, <2 x i16> %{{y|_4}}) + // CHECK: %{{[0-9]+}} = call <2 x i16> @llvm.usub.sat.v2i16(<2 x i16> %{{x|1}}, <2 x i16> %{{y|2}}) simd_saturating_sub(x, y) } // CHECK-LABEL: @usub_u16x4 #[no_mangle] pub unsafe fn usub_u16x4(x: u16x4, y: u16x4) -> u16x4 { - // CHECK: %{{[0-9]+}} = call <4 x i16> @llvm.usub.sat.v4i16(<4 x i16> %{{x|_3}}, <4 x i16> %{{y|_4}}) + // CHECK: %{{[0-9]+}} = call <4 x i16> @llvm.usub.sat.v4i16(<4 x i16> %{{x|1}}, <4 x i16> %{{y|2}}) simd_saturating_sub(x, y) } // CHECK-LABEL: @usub_u16x8 #[no_mangle] pub unsafe fn usub_u16x8(x: u16x8, y: u16x8) -> u16x8 { - // CHECK: %{{[0-9]+}} = call <8 x i16> @llvm.usub.sat.v8i16(<8 x i16> %{{x|_3}}, <8 x i16> %{{y|_4}}) + // CHECK: %{{[0-9]+}} = call <8 x i16> @llvm.usub.sat.v8i16(<8 x i16> %{{x|1}}, <8 x i16> %{{y|2}}) simd_saturating_sub(x, y) } // CHECK-LABEL: @usub_u16x16 #[no_mangle] pub unsafe fn usub_u16x16(x: u16x16, y: u16x16) -> u16x16 { - // CHECK: %{{[0-9]+}} = call <16 x i16> @llvm.usub.sat.v16i16(<16 x i16> %{{x|_3}}, <16 x i16> %{{y|_4}}) + // CHECK: %{{[0-9]+}} = call <16 x i16> @llvm.usub.sat.v16i16(<16 x i16> %{{x|1}}, <16 x i16> %{{y|2}}) simd_saturating_sub(x, y) } // CHECK-LABEL: @usub_u16x32 #[no_mangle] pub unsafe fn usub_u16x32(x: u16x32, y: u16x32) -> u16x32 { - // CHECK: %{{[0-9]+}} = call <32 x i16> @llvm.usub.sat.v32i16(<32 x i16> %{{x|_3}}, <32 x i16> %{{y|_4}}) + // CHECK: %{{[0-9]+}} = call <32 x i16> @llvm.usub.sat.v32i16(<32 x i16> %{{x|1}}, <32 x i16> %{{y|2}}) simd_saturating_sub(x, y) } // CHECK-LABEL: @usub_u32x2 #[no_mangle] pub unsafe fn usub_u32x2(x: u32x2, y: u32x2) -> u32x2 { - // CHECK: %{{[0-9]+}} = call <2 x i32> @llvm.usub.sat.v2i32(<2 x i32> %{{x|_3}}, <2 x i32> %{{y|_4}}) + // CHECK: %{{[0-9]+}} = call <2 x i32> @llvm.usub.sat.v2i32(<2 x i32> %{{x|1}}, <2 x i32> %{{y|2}}) simd_saturating_sub(x, y) } // CHECK-LABEL: @usub_u32x4 #[no_mangle] pub unsafe fn usub_u32x4(x: u32x4, y: u32x4) -> u32x4 { - // CHECK: %{{[0-9]+}} = call <4 x i32> @llvm.usub.sat.v4i32(<4 x i32> %{{x|_3}}, <4 x i32> %{{y|_4}}) + // CHECK: %{{[0-9]+}} = call <4 x i32> @llvm.usub.sat.v4i32(<4 x i32> %{{x|1}}, <4 x i32> %{{y|2}}) simd_saturating_sub(x, y) } // CHECK-LABEL: @usub_u32x8 #[no_mangle] pub unsafe fn usub_u32x8(x: u32x8, y: u32x8) -> u32x8 { - // CHECK: %{{[0-9]+}} = call <8 x i32> @llvm.usub.sat.v8i32(<8 x i32> %{{x|_3}}, <8 x i32> %{{y|_4}}) + // CHECK: %{{[0-9]+}} = call <8 x i32> @llvm.usub.sat.v8i32(<8 x i32> %{{x|1}}, <8 x i32> %{{y|2}}) simd_saturating_sub(x, y) } // CHECK-LABEL: @usub_u32x16 #[no_mangle] pub unsafe fn usub_u32x16(x: u32x16, y: u32x16) -> u32x16 { - // CHECK: %{{[0-9]+}} = call <16 x i32> @llvm.usub.sat.v16i32(<16 x i32> %{{x|_3}}, <16 x i32> %{{y|_4}}) + // CHECK: %{{[0-9]+}} = call <16 x i32> @llvm.usub.sat.v16i32(<16 x i32> %{{x|1}}, <16 x i32> %{{y|2}}) simd_saturating_sub(x, y) } // CHECK-LABEL: @usub_u64x2 #[no_mangle] pub unsafe fn usub_u64x2(x: u64x2, y: u64x2) -> u64x2 { - // CHECK: %{{[0-9]+}} = call <2 x i64> @llvm.usub.sat.v2i64(<2 x i64> %{{x|_3}}, <2 x i64> %{{y|_4}}) + // CHECK: %{{[0-9]+}} = call <2 x i64> @llvm.usub.sat.v2i64(<2 x i64> %{{x|1}}, <2 x i64> %{{y|2}}) simd_saturating_sub(x, y) } // CHECK-LABEL: @usub_u64x4 #[no_mangle] pub unsafe fn usub_u64x4(x: u64x4, y: u64x4) -> u64x4 { - // CHECK: %{{[0-9]+}} = call <4 x i64> @llvm.usub.sat.v4i64(<4 x i64> %{{x|_3}}, <4 x i64> %{{y|_4}}) + // CHECK: %{{[0-9]+}} = call <4 x i64> @llvm.usub.sat.v4i64(<4 x i64> %{{x|1}}, <4 x i64> %{{y|2}}) simd_saturating_sub(x, y) } // CHECK-LABEL: @usub_u64x8 #[no_mangle] pub unsafe fn usub_u64x8(x: u64x8, y: u64x8) -> u64x8 { - // CHECK: %{{[0-9]+}} = call <8 x i64> @llvm.usub.sat.v8i64(<8 x i64> %{{x|_3}}, <8 x i64> %{{y|_4}}) + // CHECK: %{{[0-9]+}} = call <8 x i64> @llvm.usub.sat.v8i64(<8 x i64> %{{x|1}}, <8 x i64> %{{y|2}}) simd_saturating_sub(x, y) } // CHECK-LABEL: @usub_u128x2 #[no_mangle] pub unsafe fn usub_u128x2(x: u128x2, y: u128x2) -> u128x2 { - // CHECK: %{{[0-9]+}} = call <2 x i128> @llvm.usub.sat.v2i128(<2 x i128> %{{x|_3}}, <2 x i128> %{{y|_4}}) + // CHECK: %{{[0-9]+}} = call <2 x i128> @llvm.usub.sat.v2i128(<2 x i128> %{{x|1}}, <2 x i128> %{{y|2}}) simd_saturating_sub(x, y) } // CHECK-LABEL: @usub_u128x4 #[no_mangle] pub unsafe fn usub_u128x4(x: u128x4, y: u128x4) -> u128x4 { - // CHECK: %{{[0-9]+}} = call <4 x i128> @llvm.usub.sat.v4i128(<4 x i128> %{{x|_3}}, <4 x i128> %{{y|_4}}) + // CHECK: %{{[0-9]+}} = call <4 x i128> @llvm.usub.sat.v4i128(<4 x i128> %{{x|1}}, <4 x i128> %{{y|2}}) simd_saturating_sub(x, y) } diff --git a/tests/codegen/simd-intrinsic/simd-intrinsic-generic-bitmask.rs b/tests/codegen/simd-intrinsic/simd-intrinsic-generic-bitmask.rs index 4a98d797b..ea24569bd 100644 --- a/tests/codegen/simd-intrinsic/simd-intrinsic-generic-bitmask.rs +++ b/tests/codegen/simd-intrinsic/simd-intrinsic-generic-bitmask.rs @@ -26,16 +26,16 @@ extern "platform-intrinsic" { fn simd_bitmask<T, U>(x: T) -> U; } -// NOTE(eddyb) `%{{x|_2}}` is used because on some targets (e.g. WASM) +// NOTE(eddyb) `%{{x|1}}` is used because on some targets (e.g. WASM) // SIMD vectors are passed directly, resulting in `%x` being a vector, // while on others they're passed indirectly, resulting in `%x` being -// a pointer to a vector, and `%_2` a vector loaded from that pointer. +// a pointer to a vector, and `%1` a vector loaded from that pointer. // This is controlled by the target spec option `simd_types_indirect`. // CHECK-LABEL: @bitmask_int #[no_mangle] pub unsafe fn bitmask_int(x: i32x2) -> u8 { - // CHECK: [[A:%[0-9]+]] = lshr <2 x i32> %{{x|_2}}, <i32 31, i32 31> + // CHECK: [[A:%[0-9]+]] = lshr <2 x i32> %{{x|1}}, <i32 31, i32 31> // CHECK: [[B:%[0-9]+]] = trunc <2 x i32> [[A]] to <2 x i1> // CHECK: [[C:%[0-9]+]] = bitcast <2 x i1> [[B]] to i2 // CHECK: %{{[0-9]+}} = zext i2 [[C]] to i8 @@ -45,7 +45,7 @@ pub unsafe fn bitmask_int(x: i32x2) -> u8 { // CHECK-LABEL: @bitmask_uint #[no_mangle] pub unsafe fn bitmask_uint(x: u32x2) -> u8 { - // CHECK: [[A:%[0-9]+]] = lshr <2 x i32> %{{x|_2}}, <i32 31, i32 31> + // CHECK: [[A:%[0-9]+]] = lshr <2 x i32> %{{x|1}}, <i32 31, i32 31> // CHECK: [[B:%[0-9]+]] = trunc <2 x i32> [[A]] to <2 x i1> // CHECK: [[C:%[0-9]+]] = bitcast <2 x i1> [[B]] to i2 // CHECK: %{{[0-9]+}} = zext i2 [[C]] to i8 @@ -55,7 +55,7 @@ pub unsafe fn bitmask_uint(x: u32x2) -> u8 { // CHECK-LABEL: @bitmask_int16 #[no_mangle] pub unsafe fn bitmask_int16(x: i8x16) -> u16 { - // CHECK: [[A:%[0-9]+]] = lshr <16 x i8> %{{x|_2}}, <i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7> + // CHECK: [[A:%[0-9]+]] = lshr <16 x i8> %{{x|1|2}}, <i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7> // CHECK: [[B:%[0-9]+]] = trunc <16 x i8> [[A]] to <16 x i1> // CHECK: %{{[0-9]+}} = bitcast <16 x i1> [[B]] to i16 // CHECK-NOT: zext diff --git a/tests/codegen/simd-intrinsic/simd-intrinsic-generic-extract-insert.rs b/tests/codegen/simd-intrinsic/simd-intrinsic-generic-extract-insert.rs index b5b0b1330..80583dec1 100644 --- a/tests/codegen/simd-intrinsic/simd-intrinsic-generic-extract-insert.rs +++ b/tests/codegen/simd-intrinsic/simd-intrinsic-generic-extract-insert.rs @@ -21,27 +21,27 @@ extern "platform-intrinsic" { // CHECK-LABEL: @extract_m #[no_mangle] pub unsafe fn extract_m(v: M, i: u32) -> f32 { - // CHECK: extractelement <4 x float> %{{v|_3}}, i32 %i + // CHECK: extractelement <4 x float> %{{v|1|2}}, i32 %i simd_extract(v, i) } // CHECK-LABEL: @extract_s #[no_mangle] pub unsafe fn extract_s(v: S<4>, i: u32) -> f32 { - // CHECK: extractelement <4 x float> %{{v|_3}}, i32 %i + // CHECK: extractelement <4 x float> %{{v|1|2}}, i32 %i simd_extract(v, i) } // CHECK-LABEL: @insert_m #[no_mangle] pub unsafe fn insert_m(v: M, i: u32, j: f32) -> M { - // CHECK: insertelement <4 x float> %{{v|_4}}, float %j, i32 %i + // CHECK: insertelement <4 x float> %{{v|1|2}}, float %j, i32 %i simd_insert(v, i, j) } // CHECK-LABEL: @insert_s #[no_mangle] pub unsafe fn insert_s(v: S<4>, i: u32, j: f32) -> S<4> { - // CHECK: insertelement <4 x float> %{{v|_4}}, float %j, i32 %i + // CHECK: insertelement <4 x float> %{{v|1|2}}, float %j, i32 %i simd_insert(v, i, j) } diff --git a/tests/codegen/simd-intrinsic/simd-intrinsic-transmute-array.rs b/tests/codegen/simd-intrinsic/simd-intrinsic-transmute-array.rs index db5b60567..7c77398df 100644 --- a/tests/codegen/simd-intrinsic/simd-intrinsic-transmute-array.rs +++ b/tests/codegen/simd-intrinsic/simd-intrinsic-transmute-array.rs @@ -22,7 +22,6 @@ pub struct U(f32, f32, f32, f32); #[no_mangle] pub fn build_array_s(x: [f32; 4]) -> S<4> { // CHECK: call void @llvm.memcpy.{{.+}}({{.*}}, i{{[0-9]+}} 16, i1 false) - // CHECK: call void @llvm.memcpy.{{.+}}({{.*}}, i{{[0-9]+}} 16, i1 false) S::<4>(x) } @@ -30,7 +29,6 @@ pub fn build_array_s(x: [f32; 4]) -> S<4> { #[no_mangle] pub fn build_array_t(x: [f32; 4]) -> T { // CHECK: call void @llvm.memcpy.{{.+}}({{.*}}, i{{[0-9]+}} 16, i1 false) - // CHECK: call void @llvm.memcpy.{{.+}}({{.*}}, i{{[0-9]+}} 16, i1 false) T(x) } @@ -38,6 +36,5 @@ pub fn build_array_t(x: [f32; 4]) -> T { #[no_mangle] pub fn build_array_u(x: [f32; 4]) -> U { // CHECK: call void @llvm.memcpy.{{.+}}({{.*}}, i{{[0-9]+}} 16, i1 false) - // CHECK: call void @llvm.memcpy.{{.+}}({{.*}}, i{{[0-9]+}} 16, i1 false) unsafe { std::mem::transmute(x) } } diff --git a/tests/codegen/simd_arith_offset.rs b/tests/codegen/simd_arith_offset.rs index 7b623a22a..f23337773 100644 --- a/tests/codegen/simd_arith_offset.rs +++ b/tests/codegen/simd_arith_offset.rs @@ -21,6 +21,6 @@ pub struct Simd<T, const LANES: usize>([T; LANES]); // CHECK-LABEL: smoke #[no_mangle] pub fn smoke(ptrs: SimdConstPtr<u8, 8>, offsets: Simd<usize, 8>) -> SimdConstPtr<u8, 8> { - // CHECK: getelementptr i8, <8 x {{i8\*|ptr}}> %_3, <8 x i64> %_4 + // CHECK: getelementptr i8, <8 x {{i8\*|ptr}}> %1, <8 x i64> %2 unsafe { simd_arith_offset(ptrs, offsets) } } diff --git a/tests/codegen/slice-ref-equality.rs b/tests/codegen/slice-ref-equality.rs index 47fde12bf..8f0adab35 100644 --- a/tests/codegen/slice-ref-equality.rs +++ b/tests/codegen/slice-ref-equality.rs @@ -1,7 +1,10 @@ -// compile-flags: -C opt-level=3 -Zmerge-functions=disabled +// compile-flags: -O -Zmerge-functions=disabled +// ignore-debug (the extra assertions get in the way) #![crate_type = "lib"] +use std::num::{NonZeroI16, NonZeroU32}; + // #71602 reported a simple array comparison just generating a loop. // This was originally fixed by ensuring it generates a single bcmp, // but we now generate it as a load+icmp instead. `is_zero_slice` was @@ -36,3 +39,54 @@ pub fn is_zero_array(data: &[u8; 4]) -> bool { // CHECK-NEXT: ret i1 %[[EQ]] *data == [0; 4] } + +// The following test the extra specializations to make sure that slice +// equality for non-byte types also just emit a `bcmp`, not a loop. + +// CHECK-LABEL: @eq_slice_of_nested_u8( +// CHECK-SAME: [[USIZE:i16|i32|i64]] noundef %1 +// CHECK-SAME: [[USIZE]] noundef %3 +#[no_mangle] +fn eq_slice_of_nested_u8(x: &[[u8; 3]], y: &[[u8; 3]]) -> bool { + // CHECK: icmp eq [[USIZE]] %1, %3 + // CHECK: %[[BYTES:.+]] = mul nsw [[USIZE]] %1, 3 + // CHECK: tail call{{( noundef)?}} i32 @{{bcmp|memcmp}}({{i8\*|ptr}} + // CHECK-SAME: , [[USIZE]]{{( noundef)?}} %[[BYTES]]) + x == y +} + +// CHECK-LABEL: @eq_slice_of_i32( +// CHECK-SAME: [[USIZE:i16|i32|i64]] noundef %1 +// CHECK-SAME: [[USIZE]] noundef %3 +#[no_mangle] +fn eq_slice_of_i32(x: &[i32], y: &[i32]) -> bool { + // CHECK: icmp eq [[USIZE]] %1, %3 + // CHECK: %[[BYTES:.+]] = shl nsw [[USIZE]] %1, 2 + // CHECK: tail call{{( noundef)?}} i32 @{{bcmp|memcmp}}({{i32\*|ptr}} + // CHECK-SAME: , [[USIZE]]{{( noundef)?}} %[[BYTES]]) + x == y +} + +// CHECK-LABEL: @eq_slice_of_nonzero( +// CHECK-SAME: [[USIZE:i16|i32|i64]] noundef %1 +// CHECK-SAME: [[USIZE]] noundef %3 +#[no_mangle] +fn eq_slice_of_nonzero(x: &[NonZeroU32], y: &[NonZeroU32]) -> bool { + // CHECK: icmp eq [[USIZE]] %1, %3 + // CHECK: %[[BYTES:.+]] = shl nsw [[USIZE]] %1, 2 + // CHECK: tail call{{( noundef)?}} i32 @{{bcmp|memcmp}}({{i32\*|ptr}} + // CHECK-SAME: , [[USIZE]]{{( noundef)?}} %[[BYTES]]) + x == y +} + +// CHECK-LABEL: @eq_slice_of_option_of_nonzero( +// CHECK-SAME: [[USIZE:i16|i32|i64]] noundef %1 +// CHECK-SAME: [[USIZE]] noundef %3 +#[no_mangle] +fn eq_slice_of_option_of_nonzero(x: &[Option<NonZeroI16>], y: &[Option<NonZeroI16>]) -> bool { + // CHECK: icmp eq [[USIZE]] %1, %3 + // CHECK: %[[BYTES:.+]] = shl nsw [[USIZE]] %1, 1 + // CHECK: tail call{{( noundef)?}} i32 @{{bcmp|memcmp}}({{i16\*|ptr}} + // CHECK-SAME: , [[USIZE]]{{( noundef)?}} %[[BYTES]]) + x == y +} diff --git a/tests/codegen/sse42-implies-crc32.rs b/tests/codegen/sse42-implies-crc32.rs index 47b1a8993..56079d32a 100644 --- a/tests/codegen/sse42-implies-crc32.rs +++ b/tests/codegen/sse42-implies-crc32.rs @@ -1,5 +1,4 @@ // only-x86_64 -// min-llvm-version: 14.0 // compile-flags: -Copt-level=3 #![crate_type = "lib"] diff --git a/tests/codegen/thread-local.rs b/tests/codegen/thread-local.rs index 0f1b29ca7..aa7fab7fb 100644 --- a/tests/codegen/thread-local.rs +++ b/tests/codegen/thread-local.rs @@ -4,6 +4,7 @@ // ignore-wasm globals are used instead of thread locals // ignore-emscripten globals are used instead of thread locals // ignore-android does not use #[thread_local] +// ignore-nto does not use #[thread_local] #![crate_type = "lib"] diff --git a/tests/codegen/uninit-consts.rs b/tests/codegen/uninit-consts.rs index 98a6761f8..54e9a9e9b 100644 --- a/tests/codegen/uninit-consts.rs +++ b/tests/codegen/uninit-consts.rs @@ -1,5 +1,4 @@ // compile-flags: -C no-prepopulate-passes -// min-llvm-version: 14.0 // Check that we use undef (and not zero) for uninitialized bytes in constants. diff --git a/tests/codegen/vec-in-place.rs b/tests/codegen/vec-in-place.rs index 5df366905..999260422 100644 --- a/tests/codegen/vec-in-place.rs +++ b/tests/codegen/vec-in-place.rs @@ -1,4 +1,3 @@ -// min-llvm-version: 14.0 // ignore-debug: the debug assertions get in the way // compile-flags: -O -Z merge-functions=disabled #![crate_type = "lib"] |