blob: 04314dc291a6baf1b4655369142f52824e9a7ed6 (
plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
|
// compile-flags: -C opt-level=3 --edition=2021
// only-x86_64
// ignore-debug: the debug assertions get in the way
#![crate_type = "lib"]
#![feature(portable_simd)]
use std::simd::{Simd, SimdUint};
const N: usize = 8;
#[no_mangle]
// CHECK-LABEL: @wider_reduce_simd
pub fn wider_reduce_simd(x: Simd<u8, N>) -> u16 {
// CHECK: zext <8 x i8>
// CHECK-SAME: to <8 x i16>
// CHECK: call i16 @llvm.vector.reduce.add.v8i16(<8 x i16>
let x: Simd<u16, N> = x.cast();
x.reduce_sum()
}
#[no_mangle]
// CHECK-LABEL: @wider_reduce_loop
pub fn wider_reduce_loop(x: Simd<u8, N>) -> u16 {
// CHECK: zext <8 x i8>
// CHECK-SAME: to <8 x i16>
// CHECK: call i16 @llvm.vector.reduce.add.v8i16(<8 x i16>
let mut sum = 0_u16;
for i in 0..N {
sum += u16::from(x[i]);
}
sum
}
#[no_mangle]
// CHECK-LABEL: @wider_reduce_iter
pub fn wider_reduce_iter(x: Simd<u8, N>) -> u16 {
// CHECK: zext <8 x i8>
// CHECK-SAME: to <8 x i16>
// CHECK: call i16 @llvm.vector.reduce.add.v8i16(<8 x i16>
x.as_array().iter().copied().map(u16::from).sum()
}
// This iterator one is the most interesting, as it's the one
// which used to not auto-vectorize due to a suboptimality in the
// `<array::IntoIter as Iterator>::fold` implementation.
#[no_mangle]
// CHECK-LABEL: @wider_reduce_into_iter
pub fn wider_reduce_into_iter(x: Simd<u8, N>) -> u16 {
// FIXME MIR inlining messes up LLVM optimizations.
// WOULD-CHECK: zext <8 x i8>
// WOULD-CHECK-SAME: to <8 x i16>
// WOULD-CHECK: call i16 @llvm.vector.reduce.add.v8i16(<8 x i16>
x.to_array().into_iter().map(u16::from).sum()
}
|